From 3c23c207d0c378e513a968df20a5fd075bb33aca Mon Sep 17 00:00:00 2001 From: Marko Boben Date: Sun, 2 Feb 2025 19:15:34 +0100 Subject: [PATCH] Upgrade to Avalanche v1.10.0 --- README.md | 8 +- avalanchego/.gitignore | 2 + avalanchego/.golangci.yml | 71 +- avalanchego/Dockerfile | 6 +- avalanchego/LICENSE | 2 +- avalanchego/LICENSE.header | 4 +- avalanchego/README.md | 65 +- avalanchego/RELEASES.md | 726 +++++ avalanchego/SECURITY.md | 17 + avalanchego/api/admin/client.go | 31 +- avalanchego/api/admin/client_test.go | 22 +- avalanchego/api/admin/service.go | 118 +- avalanchego/api/admin/service_test.go | 34 +- avalanchego/api/auth/auth.go | 25 +- avalanchego/api/auth/auth_test.go | 51 +- avalanchego/api/auth/claims.go | 6 +- avalanchego/api/auth/response.go | 2 +- avalanchego/api/auth/service.go | 19 +- avalanchego/api/common_args_responses.go | 12 +- avalanchego/api/health/checker.go | 14 +- avalanchego/api/health/client.go | 64 +- avalanchego/api/health/client_test.go | 47 +- avalanchego/api/health/handler.go | 9 +- avalanchego/api/health/health.go | 62 +- avalanchego/api/health/health_test.go | 236 +- avalanchego/api/health/metrics.go | 2 +- avalanchego/api/health/mocks/Client.go | 137 - avalanchego/api/health/result.go | 2 +- avalanchego/api/health/service.go | 44 +- avalanchego/api/health/service_test.go | 166 +- avalanchego/api/health/worker.go | 90 +- avalanchego/api/info/client.go | 61 +- avalanchego/api/info/client_test.go | 71 + avalanchego/api/info/mocks/Client.go | 339 -- avalanchego/api/info/service.go | 167 +- avalanchego/api/info/service_test.go | 18 +- avalanchego/api/ipcs/client.go | 17 +- avalanchego/api/ipcs/service.go | 20 +- .../api/keystore/blockchain_keystore.go | 10 +- avalanchego/api/keystore/client.go | 22 +- avalanchego/api/keystore/codec.go | 2 +- .../api/keystore/gkeystore/keystore_client.go | 4 +- .../api/keystore/gkeystore/keystore_server.go | 27 +- avalanchego/api/keystore/keystore.go | 8 +- avalanchego/api/keystore/service.go | 29 +- avalanchego/api/keystore/service_test.go | 5 +- avalanchego/api/metrics/gatherer_test.go | 2 +- avalanchego/api/metrics/multi_gatherer.go | 19 +- .../api/metrics/multi_gatherer_test.go | 8 +- avalanchego/api/metrics/optional_gatherer.go | 4 +- .../api/metrics/optional_gatherer_test.go | 9 +- avalanchego/api/server/metrics.go | 74 + avalanchego/api/server/middleware_handler.go | 2 +- avalanchego/api/server/mock_server.go | 204 +- avalanchego/api/server/router.go | 21 +- avalanchego/api/server/router_test.go | 2 +- avalanchego/api/server/server.go | 199 +- avalanchego/api/server/wrapper.go | 2 +- avalanchego/api/traced_handler.go | 47 + avalanchego/app/app.go | 211 +- avalanchego/app/plugin/plugin.go | 56 - avalanchego/app/plugin/plugin_client.go | 41 - avalanchego/app/plugin/plugin_server.go | 41 - avalanchego/app/plugin/process.go | 49 - avalanchego/app/process/process.go | 183 -- avalanchego/app/runner/config.go | 15 - avalanchego/app/runner/runner.go | 48 - avalanchego/buf.lock | 10 - avalanchego/cache/cache.go | 25 +- avalanchego/cache/lru_cache.go | 127 +- avalanchego/cache/lru_cache_benchmark_test.go | 8 +- avalanchego/cache/lru_cache_test.go | 10 +- avalanchego/cache/metercacher/cache.go | 20 +- avalanchego/cache/metercacher/cache_test.go | 7 +- avalanchego/cache/metercacher/metrics.go | 2 +- avalanchego/cache/test_cacher.go | 8 +- avalanchego/cache/unique_cache.go | 28 +- avalanchego/cache/unique_cache_test.go | 31 +- avalanchego/chains/atomic/codec.go | 2 +- .../atomic/gsharedmemory/filtered_batch.go | 10 +- .../gsharedmemory/shared_memory_client.go | 262 +- .../gsharedmemory/shared_memory_server.go | 288 +- .../gsharedmemory/shared_memory_test.go | 38 +- avalanchego/chains/atomic/memory.go | 11 +- avalanchego/chains/atomic/memory_test.go | 14 +- .../chains/atomic/mock_shared_memory.go | 3 + avalanchego/chains/atomic/prefixes.go | 2 +- avalanchego/chains/atomic/shared_memory.go | 13 +- .../chains/atomic/shared_memory_test.go | 2 +- avalanchego/chains/atomic/state.go | 9 +- .../chains/atomic/test_shared_memory.go | 4 +- avalanchego/chains/atomic/writer.go | 2 +- avalanchego/chains/linearizable_vm.go | 80 + avalanchego/chains/manager.go | 926 ++++-- avalanchego/chains/mock_manager.go | 44 - avalanchego/chains/registrant.go | 9 +- avalanchego/chains/subnet.go | 87 - avalanchego/chains/subnet_test.go | 39 - avalanchego/chains/test_manager.go | 65 + avalanchego/codec/codec.go | 5 +- avalanchego/codec/general_codec.go | 2 +- avalanchego/codec/hierarchycodec/codec.go | 19 +- .../codec/hierarchycodec/codec_test.go | 2 +- avalanchego/codec/linearcodec/codec.go | 19 +- avalanchego/codec/linearcodec/codec_test.go | 2 +- avalanchego/codec/manager.go | 33 +- avalanchego/codec/mock_manager.go | 108 + .../codec/reflectcodec/struct_fielder.go | 4 +- avalanchego/codec/reflectcodec/type_codec.go | 189 +- avalanchego/codec/registry.go | 2 +- avalanchego/codec/test_codec.go | 805 ++--- avalanchego/config/config.go | 464 ++- avalanchego/config/config_test.go | 72 +- avalanchego/config/flags.go | 217 +- avalanchego/config/keys.go | 32 +- avalanchego/config/pflags.go | 31 - avalanchego/config/viper.go | 16 +- avalanchego/database/batch.go | 55 +- avalanchego/database/benchmark_database.go | 7 +- avalanchego/database/common.go | 2 +- avalanchego/database/corruptabledb/db.go | 15 +- avalanchego/database/corruptabledb/db_test.go | 20 +- avalanchego/database/database.go | 7 +- avalanchego/database/encdb/db.go | 86 +- avalanchego/database/encdb/db_test.go | 23 +- avalanchego/database/errors.go | 2 +- avalanchego/database/helpers.go | 7 +- avalanchego/database/iterator.go | 27 +- avalanchego/database/leveldb/db.go | 47 +- avalanchego/database/leveldb/db_test.go | 22 +- avalanchego/database/leveldb/metrics.go | 2 +- avalanchego/database/linkeddb/codec.go | 2 +- avalanchego/database/linkeddb/linkeddb.go | 54 +- .../database/linkeddb/linkeddb_test.go | 2 +- avalanchego/database/manager/manager.go | 18 +- avalanchego/database/manager/manager_test.go | 172 +- avalanchego/database/manager/mocks/manager.go | 197 -- .../database/manager/versioned_database.go | 21 +- avalanchego/database/memdb/db.go | 107 +- avalanchego/database/memdb/db_test.go | 8 +- avalanchego/database/meterdb/db.go | 14 +- avalanchego/database/meterdb/db_test.go | 15 +- avalanchego/database/meterdb/metrics.go | 2 +- avalanchego/database/mock_batch.go | 3 + avalanchego/database/mockdb/db.go | 126 - avalanchego/database/mockdb/db_test.go | 75 - avalanchego/database/nodb/db.go | 97 - avalanchego/database/prefixdb/db.go | 92 +- avalanchego/database/prefixdb/db_test.go | 8 +- avalanchego/database/rpcdb/db_client.go | 147 +- avalanchego/database/rpcdb/db_server.go | 79 +- avalanchego/database/rpcdb/db_test.go | 53 +- avalanchego/database/rpcdb/errors.go | 18 +- avalanchego/database/test_database.go | 1214 ++++---- avalanchego/database/versiondb/db.go | 114 +- avalanchego/database/versiondb/db_test.go | 11 +- avalanchego/genesis/aliases.go | 2 +- avalanchego/genesis/beacons.go | 2 +- avalanchego/genesis/config.go | 17 +- avalanchego/genesis/config_test.go | 67 + avalanchego/genesis/genesis.go | 47 +- avalanchego/genesis/genesis_local.go | 8 +- avalanchego/genesis/genesis_mainnet.go | 2 +- avalanchego/genesis/genesis_test.go | 32 +- avalanchego/genesis/params.go | 2 +- avalanchego/genesis/unparsed_config.go | 2 +- avalanchego/go.mod | 96 +- avalanchego/go.sum | 229 +- avalanchego/ids/aliases.go | 6 +- avalanchego/ids/aliases_test.go | 2 +- avalanchego/ids/bag.go | 158 - avalanchego/ids/bag_test.go | 201 -- avalanchego/ids/bit_set.go | 39 - avalanchego/ids/bits.go | 2 +- avalanchego/ids/bits_test.go | 2 +- .../ids/galiasreader/alias_reader_client.go | 4 +- .../ids/galiasreader/alias_reader_server.go | 4 +- .../ids/galiasreader/alias_reader_test.go | 37 +- avalanchego/ids/id.go | 39 +- avalanchego/ids/id_test.go | 56 +- avalanchego/ids/node_id.go | 29 +- avalanchego/ids/node_id_bag.go | 99 - avalanchego/ids/node_id_set.go | 159 - avalanchego/ids/node_id_test.go | 46 +- avalanchego/ids/request_id.go | 18 + avalanchego/ids/set.go | 190 -- avalanchego/ids/set_benchmark_test.go | 71 - avalanchego/ids/set_test.go | 217 -- avalanchego/ids/short.go | 48 +- avalanchego/ids/short_set.go | 163 - avalanchego/ids/short_set_test.go | 319 -- avalanchego/ids/short_test.go | 162 - avalanchego/ids/slice.go | 33 - avalanchego/ids/test_aliases.go | 8 +- avalanchego/ids/test_generator.go | 2 +- avalanchego/ids/unique_bag.go | 101 - avalanchego/ids/unique_bag_test.go | 126 - avalanchego/indexer/client.go | 42 +- avalanchego/indexer/client_test.go | 23 +- avalanchego/indexer/container.go | 2 +- avalanchego/indexer/examples/p-chain/main.go | 55 + .../indexer/examples/x-chain-blocks/main.go | 55 + avalanchego/indexer/index.go | 15 +- avalanchego/indexer/index_test.go | 5 +- avalanchego/indexer/indexer.go | 138 +- avalanchego/indexer/indexer_test.go | 179 +- avalanchego/indexer/service.go | 10 +- avalanchego/ipcs/chainipc.go | 44 +- avalanchego/ipcs/eventsocket.go | 52 +- avalanchego/ipcs/socket/socket.go | 2 +- avalanchego/ipcs/socket/socket_test.go | 6 +- avalanchego/ipcs/socket/socket_unix.go | 34 +- avalanchego/ipcs/socket/socket_windows.go | 3 +- avalanchego/main/main.go | 24 +- avalanchego/message/builder_test.go | 507 --- avalanchego/message/codec.go | 264 -- avalanchego/message/codec_test.go | 266 -- avalanchego/message/creator.go | 42 +- avalanchego/message/fields.go | 259 +- avalanchego/message/inbound_msg_builder.go | 496 +-- .../message/inbound_msg_builder_proto.go | 439 --- .../message/inbound_msg_builder_proto_test.go | 35 - .../message/inbound_msg_builder_test.go | 406 +++ avalanchego/message/internal_msg_builder.go | 534 +++- avalanchego/message/messages.go | 820 ++--- .../message/messages_benchmark_test.go | 136 +- avalanchego/message/messages_test.go | 1468 ++++----- avalanchego/message/mock_message.go | 93 + .../message/mock_outbound_message_builder.go | 386 +++ avalanchego/message/ops.go | 584 ++-- avalanchego/message/outbound_msg_builder.go | 539 ++-- .../message/outbound_msg_builder_proto.go | 517 --- .../outbound_msg_builder_proto_test.go | 31 - .../message/outbound_msg_builder_test.go | 48 + avalanchego/message/test_message.go | 27 - avalanchego/nat/nat.go | 26 +- avalanchego/nat/no_router.go | 13 +- avalanchego/nat/pmp.go | 37 +- avalanchego/nat/upnp.go | 31 +- avalanchego/network/README.md | 162 + avalanchego/network/certs_test.go | 2 +- avalanchego/network/config.go | 30 +- avalanchego/network/conn_test.go | 4 +- avalanchego/network/dialer/dialer.go | 4 +- avalanchego/network/dialer/dialer_test.go | 74 +- avalanchego/network/dialer_test.go | 4 +- avalanchego/network/example_test.go | 154 + avalanchego/network/handler_test.go | 4 +- avalanchego/network/listener_test.go | 9 +- avalanchego/network/metrics.go | 122 +- avalanchego/network/network.go | 908 ++++-- avalanchego/network/network_test.go | 208 +- avalanchego/network/peer/config.go | 32 +- avalanchego/network/peer/example_test.go | 4 +- avalanchego/network/peer/gossip_tracker.go | 323 ++ .../network/peer/gossip_tracker_callback.go | 56 + .../network/peer/gossip_tracker_metrics.go | 42 + .../network/peer/gossip_tracker_test.go | 620 ++++ avalanchego/network/peer/info.go | 19 +- avalanchego/network/peer/ip.go | 14 +- avalanchego/network/{ => peer}/ip_signer.go | 37 +- .../network/{ => peer}/ip_signer_test.go | 31 +- avalanchego/network/peer/message_queue.go | 20 +- .../network/peer/message_queue_test.go | 104 +- avalanchego/network/peer/metrics.go | 58 +- .../network/peer/mock_gossip_tracker.go | 167 + avalanchego/network/peer/msg_length.go | 59 +- avalanchego/network/peer/msg_length_test.go | 178 +- avalanchego/network/peer/network.go | 35 +- avalanchego/network/peer/peer.go | 596 ++-- avalanchego/network/peer/peer_test.go | 219 +- avalanchego/network/peer/set.go | 28 +- avalanchego/network/peer/set_test.go | 23 +- avalanchego/network/peer/test_network.go | 81 +- avalanchego/network/peer/test_peer.go | 55 +- avalanchego/network/peer/tls_config.go | 2 +- avalanchego/network/peer/upgrader.go | 6 +- avalanchego/network/peer/validator_id.go | 14 + avalanchego/network/test_network.go | 263 ++ .../network/throttling/bandwidth_throttler.go | 4 +- .../throttling/bandwidth_throttler_test.go | 2 +- avalanchego/network/throttling/common.go | 2 +- .../network/throttling/dial_throttler.go | 8 +- .../network/throttling/dial_throttler_test.go | 3 +- .../throttling/inbound_conn_throttler.go | 4 +- .../throttling/inbound_conn_throttler_test.go | 18 +- .../inbound_conn_upgrade_throttler.go | 27 +- .../inbound_conn_upgrade_throttler_test.go | 2 +- .../inbound_msg_buffer_throttler.go | 13 +- .../inbound_msg_buffer_throttler_test.go | 2 +- .../throttling/inbound_msg_byte_throttler.go | 24 +- .../inbound_msg_byte_throttler_test.go | 16 +- .../throttling/inbound_msg_throttler.go | 31 +- .../throttling/inbound_resource_throttler.go | 58 +- .../inbound_resource_throttler_test.go | 16 +- .../throttling/no_inbound_msg_throttler.go | 4 +- .../throttling/outbound_msg_throttler.go | 20 +- .../throttling/outbound_msg_throttler_test.go | 76 +- .../network/throttling/release_func.go | 2 +- avalanchego/network/tracked_ip.go | 14 +- avalanchego/network/tracked_ip_test.go | 2 +- avalanchego/node/beacon_manager.go | 41 + avalanchego/node/beacon_manager_test.go | 87 + avalanchego/node/config.go | 49 +- .../node/insecure_validator_manager.go | 41 + avalanchego/node/node.go | 470 +-- avalanchego/proto/Dockerfile.buf | 6 +- avalanchego/proto/README.md | 6 +- avalanchego/proto/appsender/appsender.proto | 21 + avalanchego/proto/io/reader/reader.proto | 4 +- avalanchego/proto/io/writer/writer.proto | 4 +- avalanchego/proto/messenger/messenger.proto | 8 +- avalanchego/proto/net/conn/conn.proto | 8 +- avalanchego/proto/p2p/p2p.proto | 76 +- .../proto/pb/aliasreader/aliasreader.pb.go | 2 +- .../proto/pb/appsender/appsender.pb.go | 263 +- .../proto/pb/appsender/appsender_grpc.pb.go | 72 + avalanchego/proto/pb/http/http.pb.go | 2 +- .../http/responsewriter/responsewriter.pb.go | 2 +- avalanchego/proto/pb/io/reader/reader.pb.go | 44 +- avalanchego/proto/pb/io/writer/writer.pb.go | 44 +- avalanchego/proto/pb/keystore/keystore.pb.go | 2 +- .../proto/pb/messenger/messenger.pb.go | 114 +- avalanchego/proto/pb/net/conn/conn.pb.go | 117 +- avalanchego/proto/pb/p2p/p2p.pb.go | 1235 +++++--- avalanchego/proto/pb/plugin/plugin.pb.go | 166 - avalanchego/proto/pb/plugin/plugin_grpc.pb.go | 178 -- avalanchego/proto/pb/rpcdb/rpcdb.pb.go | 508 +-- .../proto/pb/sharedmemory/sharedmemory.pb.go | 189 +- .../proto/pb/subnetlookup/subnetlookup.pb.go | 216 -- .../pb/subnetlookup/subnetlookup_grpc.pb.go | 105 - .../pb/validatorstate/validator_state.pb.go | 593 ++++ .../validatorstate/validator_state_grpc.pb.go | 226 ++ avalanchego/proto/pb/vm/runtime/runtime.pb.go | 170 + .../proto/pb/vm/runtime/runtime_grpc.pb.go | 108 + avalanchego/proto/pb/vm/vm.pb.go | 2095 ++++++++----- avalanchego/proto/pb/vm/vm_grpc.pb.go | 182 +- avalanchego/proto/pb/warp/message.pb.go | 234 ++ avalanchego/proto/pb/warp/message_grpc.pb.go | 105 + avalanchego/proto/plugin/plugin.proto | 17 - avalanchego/proto/rpcdb/rpcdb.proto | 27 +- .../proto/sharedmemory/sharedmemory.proto | 9 - .../proto/subnetlookup/subnetlookup.proto | 17 - .../validatorstate/validator_state.proto | 51 + avalanchego/proto/vm/runtime/runtime.proto | 21 + avalanchego/proto/vm/vm.proto | 153 +- avalanchego/proto/warp/message.proto | 19 + avalanchego/pubsub/connection.go | 4 +- avalanchego/pubsub/connections.go | 18 +- avalanchego/pubsub/filter_param.go | 14 +- avalanchego/pubsub/filter_test.go | 2 +- avalanchego/pubsub/filterer.go | 2 +- avalanchego/pubsub/messages.go | 10 +- avalanchego/pubsub/server.go | 17 +- avalanchego/scripts/build.sh | 28 +- avalanchego/scripts/build_avalanche.sh | 27 +- avalanchego/scripts/build_coreth.sh | 63 +- avalanchego/scripts/build_fuzz.sh | 25 + avalanchego/scripts/build_image.sh | 2 - avalanchego/scripts/build_local_dep_image.sh | 1 - avalanchego/scripts/build_local_image.sh | 3 - avalanchego/scripts/build_test.sh | 2 - avalanchego/scripts/constants.sh | 19 +- avalanchego/scripts/lint.sh | 2 +- avalanchego/scripts/local.Dockerfile | 4 +- avalanchego/scripts/mock.gen.sh | 27 + avalanchego/scripts/mocks.mockgen.txt | 51 + avalanchego/scripts/protobuf_codegen.sh | 6 +- avalanchego/scripts/tests.e2e.sh | 9 +- avalanchego/scripts/tests.upgrade.sh | 6 +- avalanchego/snow/acceptor.go | 12 +- avalanchego/snow/choices/decidable.go | 8 +- avalanchego/snow/choices/status.go | 2 +- avalanchego/snow/choices/status_test.go | 80 +- avalanchego/snow/choices/test_decidable.go | 21 +- .../snow/consensus/avalanche/consensus.go | 27 +- .../consensus/avalanche/consensus_test.go | 1013 ++++-- .../snow/consensus/avalanche/factory.go | 2 +- .../snow/consensus/avalanche/parameters.go | 2 +- .../consensus/avalanche/parameters_test.go | 2 +- .../avalanche/poll/early_term_no_traversal.go | 25 +- .../poll/early_term_no_traversal_test.go | 19 +- .../consensus/avalanche/poll/interfaces.go | 11 +- .../consensus/avalanche/poll/no_early_term.go | 29 +- .../avalanche/poll/no_early_term_test.go | 13 +- .../snow/consensus/avalanche/poll/set.go | 21 +- .../snow/consensus/avalanche/poll/set_test.go | 29 +- .../snow/consensus/avalanche/test_vertex.go | 43 +- .../snow/consensus/avalanche/topological.go | 215 +- .../consensus/avalanche/topological_test.go | 6 +- .../consensus/avalanche/traced_consensus.go | 55 + .../consensus/avalanche/transaction_vertex.go | 35 +- .../snow/consensus/avalanche/vertex.go | 12 +- avalanchego/snow/consensus/metrics/height.go | 4 +- avalanchego/snow/consensus/metrics/latency.go | 58 +- avalanchego/snow/consensus/metrics/polls.go | 4 +- .../snow/consensus/metrics/timestamp.go | 38 + .../snow/consensus/snowball/binary_slush.go | 20 +- .../consensus/snowball/binary_snowball.go | 4 +- .../snowball/binary_snowball_test.go | 122 +- .../consensus/snowball/binary_snowflake.go | 12 +- .../snowball/binary_snowflake_test.go | 36 +- .../snow/consensus/snowball/consensus.go | 8 +- .../snowball/consensus_performance_test.go | 2 +- .../snowball/consensus_reversibility_test.go | 2 +- .../snow/consensus/snowball/consensus_test.go | 68 +- .../snow/consensus/snowball/factory.go | 2 +- avalanchego/snow/consensus/snowball/flat.go | 15 +- .../snow/consensus/snowball/flat_test.go | 9 +- .../snow/consensus/snowball/network_test.go | 5 +- .../snow/consensus/snowball/nnary_slush.go | 20 +- .../snow/consensus/snowball/nnary_snowball.go | 4 +- .../consensus/snowball/nnary_snowball_test.go | 2 +- .../consensus/snowball/nnary_snowflake.go | 18 +- .../snowball/nnary_snowflake_test.go | 2 +- .../snow/consensus/snowball/parameters.go | 2 +- .../consensus/snowball/parameters_test.go | 2 +- avalanchego/snow/consensus/snowball/tree.go | 151 +- .../snow/consensus/snowball/tree_test.go | 57 +- .../snow/consensus/snowball/unary_snowball.go | 4 +- .../consensus/snowball/unary_snowball_test.go | 2 +- .../consensus/snowball/unary_snowflake.go | 16 +- .../snowball/unary_snowflake_test.go | 2 +- avalanchego/snow/consensus/snowman/block.go | 5 +- .../snow/consensus/snowman/consensus.go | 23 +- .../snow/consensus/snowman/consensus_test.go | 278 +- avalanchego/snow/consensus/snowman/factory.go | 2 +- .../snow/consensus/snowman/mock_block.go | 28 +- .../snow/consensus/snowman/network_test.go | 21 +- .../snow/consensus/snowman/oracle_block.go | 9 +- .../snowman/poll/early_term_no_traversal.go | 17 +- .../poll/early_term_no_traversal_test.go | 23 +- .../snow/consensus/snowman/poll/interfaces.go | 13 +- .../consensus/snowman/poll/no_early_term.go | 25 +- .../snowman/poll/no_early_term_test.go | 13 +- .../snow/consensus/snowman/poll/set.go | 21 +- .../snow/consensus/snowman/poll/set_test.go | 43 +- .../snow/consensus/snowman/snowman_block.go | 8 +- .../snow/consensus/snowman/test_block.go | 41 +- .../snow/consensus/snowman/topological.go | 117 +- .../consensus/snowman/topological_test.go | 6 +- .../consensus/snowman/traced_consensus.go | 50 + .../snow/consensus/snowstorm/acceptor.go | 25 +- .../consensus/snowstorm/benchmark_test.go | 2 +- .../snow/consensus/snowstorm/consensus.go | 27 +- .../consensus/snowstorm/consensus_test.go | 396 +-- .../snow/consensus/snowstorm/directed.go | 227 +- .../snow/consensus/snowstorm/directed_test.go | 6 +- .../snow/consensus/snowstorm/factory.go | 2 +- .../snow/consensus/snowstorm/network_test.go | 11 +- .../snow/consensus/snowstorm/rejector.go | 24 +- .../snow/consensus/snowstorm/snowball.go | 2 +- .../snow/consensus/snowstorm/stringer.go | 21 +- .../snow/consensus/snowstorm/stringer_test.go | 52 + .../snow/consensus/snowstorm/test_tx.go | 38 +- avalanchego/snow/consensus/snowstorm/tx.go | 9 +- avalanchego/snow/context.go | 118 +- .../avalanche/bootstrap/bootstrapper.go | 206 +- .../avalanche/bootstrap/bootstrapper_test.go | 245 +- .../snow/engine/avalanche/bootstrap/config.go | 4 +- .../engine/avalanche/bootstrap/metrics.go | 2 +- .../snow/engine/avalanche/bootstrap/tx_job.go | 34 +- .../engine/avalanche/bootstrap/vertex_job.go | 30 +- avalanchego/snow/engine/avalanche/config.go | 4 +- .../snow/engine/avalanche/config_test.go | 5 +- avalanchego/snow/engine/avalanche/engine.go | 6 +- .../snow/engine/avalanche/getter/getter.go | 51 +- .../engine/avalanche/getter/getter_test.go | 37 +- avalanchego/snow/engine/avalanche/issuer.go | 92 +- avalanchego/snow/engine/avalanche/metrics.go | 2 +- .../snow/engine/avalanche/mocks/engine.go | 580 ---- .../engine/avalanche/state/prefixed_state.go | 56 +- .../snow/engine/avalanche/state/serializer.go | 54 +- .../snow/engine/avalanche/state/state.go | 18 +- .../engine/avalanche/state/unique_vertex.go | 74 +- .../avalanche/state/unique_vertex_test.go | 104 +- ...est_avalanche_engine.go => test_engine.go} | 11 +- .../snow/engine/avalanche/traced_engine.go | 42 + .../snow/engine/avalanche/transitive.go | 340 +- .../snow/engine/avalanche/transitive_test.go | 1236 +++++--- .../snow/engine/avalanche/vertex/builder.go | 13 +- .../engine/avalanche/vertex/builder_test.go | 2 +- .../snow/engine/avalanche/vertex/codec.go | 2 +- .../snow/engine/avalanche/vertex/heap.go | 25 +- .../snow/engine/avalanche/vertex/heap_test.go | 2 +- .../snow/engine/avalanche/vertex/manager.go | 2 +- .../snow/engine/avalanche/vertex/mock_vm.go | 406 +++ .../engine/avalanche/vertex/mocks/dag_vm.go | 303 -- .../snow/engine/avalanche/vertex/parser.go | 6 +- .../engine/avalanche/vertex/parser_test.go | 2 +- .../snow/engine/avalanche/vertex/sorting.go | 28 - .../avalanche/vertex/stateless_vertex.go | 49 +- .../avalanche/vertex/stateless_vertex_test.go | 2 +- .../snow/engine/avalanche/vertex/storage.go | 10 +- .../engine/avalanche/vertex/test_builder.go | 21 +- .../engine/avalanche/vertex/test_manager.go | 4 +- .../engine/avalanche/vertex/test_parser.go | 15 +- .../engine/avalanche/vertex/test_storage.go | 23 +- .../snow/engine/avalanche/vertex/test_vm.go | 43 +- .../snow/engine/avalanche/vertex/vm.go | 53 +- avalanchego/snow/engine/avalanche/voter.go | 84 +- .../snow/engine/avalanche/voter_test.go | 105 +- .../common/appsender/appsender_client.go | 45 +- .../common/appsender/appsender_server.go | 43 +- .../snow/engine/common/bootstrap_tracker.go | 20 + .../snow/engine/common/bootstrapable.go | 6 +- .../snow/engine/common/bootstrapper.go | 130 +- avalanchego/snow/engine/common/config.go | 31 +- avalanchego/snow/engine/common/engine.go | 143 +- avalanchego/snow/engine/common/fetcher.go | 6 +- avalanchego/snow/engine/common/fx.go | 2 +- avalanchego/snow/engine/common/halter.go | 9 +- .../snow/engine/common/http_handler.go | 2 +- avalanchego/snow/engine/common/message.go | 4 +- avalanchego/snow/engine/common/mixed_query.go | 18 +- .../snow/engine/common/mixed_query_test.go | 30 +- avalanchego/snow/engine/common/mock_sender.go | 636 +--- .../snow/engine/common/no_ops_handlers.go | 133 +- avalanchego/snow/engine/common/queue/job.go | 11 +- avalanchego/snow/engine/common/queue/jobs.go | 92 +- .../snow/engine/common/queue/jobs_test.go | 143 +- .../snow/engine/common/queue/parser.go | 6 +- avalanchego/snow/engine/common/queue/state.go | 36 +- .../snow/engine/common/queue/test_job.go | 24 +- .../snow/engine/common/queue/test_parser.go | 13 +- avalanchego/snow/engine/common/requests.go | 6 +- .../snow/engine/common/requests_test.go | 2 +- avalanchego/snow/engine/common/sender.go | 80 +- .../snow/engine/common/state_syncer.go | 6 +- avalanchego/snow/engine/common/subnet.go | 17 - .../snow/engine/common/subnet_tracker.go | 15 - ...st_subnet.go => test_bootstrap_tracker.go} | 26 +- .../snow/engine/common/test_bootstrapable.go | 11 +- .../snow/engine/common/test_bootstrapper.go | 6 +- avalanchego/snow/engine/common/test_config.go | 15 +- avalanchego/snow/engine/common/test_engine.go | 286 +- avalanchego/snow/engine/common/test_sender.go | 151 +- avalanchego/snow/engine/common/test_timer.go | 4 +- avalanchego/snow/engine/common/test_vm.go | 196 +- avalanchego/snow/engine/common/timer.go | 2 +- .../common/traced_bootstrapable_engine.go | 43 + .../snow/engine/common/traced_engine.go | 420 +++ .../snow/engine/common/traced_state_syncer.go | 33 + .../snow/engine/common/tracker/accepted.go | 70 + .../engine/common/tracker/accepted_test.go | 41 + .../snow/engine/common/tracker/peers.go | 25 +- .../snow/engine/common/tracker/peers_test.go | 11 +- .../snow/engine/common/tracker/startup.go | 14 +- avalanchego/snow/engine/common/vm.go | 42 +- .../snow/engine/snowman/ancestor_tree.go | 7 +- .../snow/engine/snowman/ancestor_tree_test.go | 2 +- .../snow/engine/snowman/block/README.md | 4 +- .../snow/engine/snowman/block/batched_vm.go | 32 +- .../engine/snowman/block/batched_vm_test.go | 18 +- .../engine/snowman/block/block_context_vm.go | 68 + .../engine/snowman/block/height_indexed_vm.go | 7 +- .../mocks/build_block_with_context_vm.go | 55 + .../engine/snowman/block/mocks/chain_vm.go | 268 +- .../snowman/block/mocks/state_syncable_vm.go | 46 +- .../block/mocks/with_verify_context.go | 68 + .../engine/snowman/block/state_summary.go | 11 +- .../engine/snowman/block/state_sync_mode.go | 44 + .../engine/snowman/block/state_syncable_vm.go | 13 +- .../engine/snowman/block/test_batched_vm.go | 27 +- .../snowman/block/test_height_indexed_vm.go | 17 +- .../snowman/block/test_state_summary.go | 27 +- .../snowman/block/test_state_syncable_vm.go | 35 +- .../snow/engine/snowman/block/test_vm.go | 35 +- avalanchego/snow/engine/snowman/block/vm.go | 14 +- .../engine/snowman/bootstrap/block_job.go | 36 +- .../engine/snowman/bootstrap/bootstrapper.go | 153 +- .../snowman/bootstrap/bootstrapper_test.go | 326 +- .../snow/engine/snowman/bootstrap/config.go | 2 +- .../snow/engine/snowman/bootstrap/metrics.go | 2 +- avalanchego/snow/engine/snowman/config.go | 2 +- .../snow/engine/snowman/config_test.go | 5 +- avalanchego/snow/engine/snowman/engine.go | 2 +- .../snow/engine/snowman/getter/getter.go | 44 +- .../snow/engine/snowman/getter/getter_test.go | 45 +- avalanchego/snow/engine/snowman/issuer.go | 23 +- .../snow/engine/snowman/memory_block.go | 14 +- avalanchego/snow/engine/snowman/metrics.go | 2 +- .../snow/engine/snowman/mocks/engine.go | 580 ---- .../snow/engine/snowman/syncer/config.go | 5 +- .../engine/snowman/syncer/state_syncer.go | 233 +- .../snowman/syncer/state_syncer_test.go | 227 +- .../snow/engine/snowman/syncer/utils_test.go | 11 +- ...{test_snowman_engine.go => test_engine.go} | 11 +- .../snow/engine/snowman/traced_engine.go | 42 + avalanchego/snow/engine/snowman/transitive.go | 327 +- .../snow/engine/snowman/transitive_test.go | 852 +++-- avalanchego/snow/engine/snowman/voter.go | 50 +- avalanchego/snow/events/blockable.go | 13 +- avalanchego/snow/events/blocker.go | 33 +- avalanchego/snow/events/blocker_test.go | 63 +- .../snow/networking/benchlist/benchable.go | 2 +- .../snow/networking/benchlist/benchlist.go | 42 +- .../networking/benchlist/benchlist_test.go | 170 +- .../snow/networking/benchlist/manager.go | 32 +- .../snow/networking/benchlist/metrics.go | 2 +- .../networking/benchlist/test_benchable.go | 2 +- avalanchego/snow/networking/handler/engine.go | 55 + .../snow/networking/handler/engine_test.go | 71 + .../snow/networking/handler/handler.go | 1019 +++--- .../snow/networking/handler/handler_test.go | 465 ++- .../snow/networking/handler/message_queue.go | 85 +- .../handler/message_queue_metrics.go | 2 +- .../networking/handler/message_queue_test.go | 278 +- .../snow/networking/handler/metrics.go | 35 +- .../snow/networking/handler/mock_handler.go | 210 ++ avalanchego/snow/networking/handler/parser.go | 36 +- .../snow/networking/router/chain_router.go | 454 ++- .../networking/router/chain_router_metrics.go | 2 +- .../networking/router/chain_router_test.go | 1450 +++++++-- avalanchego/snow/networking/router/health.go | 2 +- .../snow/networking/router/inbound_handler.go | 12 +- .../snow/networking/router/mock_router.go | 173 ++ avalanchego/snow/networking/router/router.go | 21 +- .../snow/networking/router/traced_router.go | 155 + .../snow/networking/sender/external_sender.go | 14 +- .../networking/sender/mock_external_sender.go | 69 + avalanchego/snow/networking/sender/sender.go | 1116 +++++-- .../snow/networking/sender/sender_test.go | 1228 +++++++- .../networking/sender/test_external_sender.go | 22 +- .../snow/networking/sender/traced_sender.go | 264 ++ .../snow/networking/timeout/manager.go | 22 +- .../snow/networking/timeout/manager_test.go | 47 +- .../snow/networking/timeout/metrics.go | 2 +- .../snow/networking/timeout/mock_manager.go | 143 + .../tracker/mock_resource_tracker.go | 96 +- .../snow/networking/tracker/mock_targeter.go | 13 +- .../networking/tracker/resource_tracker.go | 4 +- .../tracker/resource_tracker_test.go | 2 +- .../snow/networking/tracker/targeter.go | 6 +- .../snow/networking/tracker/targeter_test.go | 6 +- .../snow/networking/worker/mock_pool.go | 22 - avalanchego/snow/networking/worker/pool.go | 4 +- avalanchego/snow/state.go | 13 +- avalanchego/snow/uptime/locked_calculator.go | 36 +- .../snow/uptime/locked_calculator_test.go | 66 +- avalanchego/snow/uptime/manager.go | 130 +- avalanchego/snow/uptime/manager_test.go | 343 +- avalanchego/snow/uptime/mock_calculator.go | 85 + avalanchego/snow/uptime/mocks/calculator.go | 85 - avalanchego/snow/uptime/no_op_calculator.go | 26 + avalanchego/snow/uptime/state.go | 38 +- avalanchego/snow/uptime/test_state.go | 29 +- avalanchego/snow/validators/connector.go | 12 +- avalanchego/snow/validators/custom.go | 192 -- avalanchego/snow/validators/custom_test.go | 32 - .../gvalidators/validator_state_client.go | 87 + .../gvalidators/validator_state_server.go | 79 + .../gvalidators/validator_state_test.go | 183 ++ avalanchego/snow/validators/logger.go | 91 + avalanchego/snow/validators/manager.go | 184 +- avalanchego/snow/validators/manager_test.go | 120 + avalanchego/snow/validators/mock_manager.go | 81 + avalanchego/snow/validators/mock_set.go | 236 ++ avalanchego/snow/validators/mock_state.go | 99 + .../snow/validators/mock_subnet_connector.go | 53 + avalanchego/snow/validators/set.go | 547 ++-- avalanchego/snow/validators/set_test.go | 730 +++-- avalanchego/snow/validators/state.go | 47 +- .../snow/validators/subnet_connector.go | 16 + avalanchego/snow/validators/test_state.go | 40 +- avalanchego/snow/validators/traced_state.go | 75 + .../validators/unhandled_subnet_connector.go | 23 + avalanchego/snow/validators/validator.go | 79 +- avalanchego/staking/tls.go | 9 +- avalanchego/staking/tls_test.go | 2 +- avalanchego/subnets/config.go | 60 + avalanchego/subnets/config_test.go | 77 + avalanchego/subnets/no_op_allower.go | 15 + avalanchego/subnets/subnet.go | 105 + avalanchego/subnets/subnet_test.go | 72 + avalanchego/tests/colors.go | 12 +- avalanchego/tests/e2e/banff/suites.go | 2 +- avalanchego/tests/e2e/describe.go | 2 +- avalanchego/tests/e2e/e2e.go | 64 +- avalanchego/tests/e2e/e2e_test.go | 2 +- .../tests/e2e/p/permissionless_subnets.go | 12 +- avalanchego/tests/e2e/p/workflow.go | 9 +- avalanchego/tests/e2e/ping/suites.go | 2 +- .../tests/e2e/static-handlers/suites.go | 10 +- avalanchego/tests/e2e/x/transfer/virtuous.go | 15 +- .../tests/e2e/x/whitelist-vtx/suites.go | 34 +- avalanchego/tests/http.go | 15 +- avalanchego/tests/keys.go | 24 +- avalanchego/tests/keys_test.go | 6 +- avalanchego/tests/upgrade/upgrade_test.go | 2 +- avalanchego/trace/exporter.go | 62 + avalanchego/trace/exporter_type.go | 45 + avalanchego/trace/noop.go | 25 + avalanchego/trace/tracer.go | 84 + avalanchego/utils/atomic.go | 27 + avalanchego/utils/atomic_bool.go | 22 - avalanchego/utils/atomic_interface.go | 31 - avalanchego/utils/atomic_interface_test.go | 22 - avalanchego/utils/atomic_test.go | 26 + avalanchego/utils/bag/bag.go | 170 + .../{ids => utils/bag}/bag_benchmark_test.go | 34 +- avalanchego/utils/bag/bag_test.go | 240 ++ avalanchego/utils/bag/unique_bag.go | 114 + avalanchego/utils/bag/unique_bag_test.go | 103 + avalanchego/utils/beacon/beacon.go | 13 +- avalanchego/utils/beacon/set.go | 8 +- avalanchego/utils/beacon/set_test.go | 2 +- avalanchego/utils/bloom/bloom_filter.go | 2 +- avalanchego/utils/bloom/bloom_filter_test.go | 2 +- avalanchego/utils/bloom/map_filter.go | 15 +- .../utils/buffer/bounded_nonblocking_queue.go | 90 + .../buffer/bounded_nonblocking_queue_test.go | 142 + .../utils/buffer/unbounded_blocking_deque.go | 169 + .../buffer/unbounded_blocking_deque_test.go | 99 + avalanchego/utils/buffer/unbounded_deque.go | 190 ++ .../utils/buffer/unbounded_deque_test.go | 672 ++++ avalanchego/utils/buffer/unbounded_queue.go | 103 - .../utils/buffer/unbounded_queue_test.go | 306 -- avalanchego/utils/bytes.go | 14 +- avalanchego/utils/bytes_test.go | 24 - avalanchego/utils/cb58/cb58.go | 2 +- avalanchego/utils/cb58/cb58_test.go | 18 +- avalanchego/utils/compare/compare.go | 27 + avalanchego/utils/compare/compare_test.go | 26 + avalanchego/utils/compression/compressor.go | 2 +- .../utils/compression/compressor_test.go | 248 ++ .../utils/compression/gzip_compressor.go | 68 +- .../utils/compression/gzip_compressor_test.go | 78 - .../utils/compression/gzip_zip_bomb.bin | Bin 0 -> 2096390 bytes .../utils/compression/no_compressor.go | 4 +- .../utils/compression/no_compressor_test.go | 2 +- avalanchego/utils/compression/type.go | 59 + avalanchego/utils/compression/type_test.go | 56 + .../utils/compression/zstd_compressor.go | 58 + .../utils/compression/zstd_zip_bomb.bin | Bin 0 -> 2097138 bytes avalanchego/utils/constants/aliases.go | 2 +- avalanchego/utils/constants/application.go | 2 +- avalanchego/utils/constants/network_ids.go | 2 +- .../utils/constants/network_ids_test.go | 2 +- avalanchego/utils/constants/networking.go | 98 +- avalanchego/utils/constants/vm_ids.go | 2 +- avalanchego/utils/context.go | 35 + .../utils/crypto/bls/bls_benchmark_test.go | 88 + avalanchego/utils/crypto/bls/bls_test.go | 4 +- avalanchego/utils/crypto/bls/public.go | 6 +- avalanchego/utils/crypto/bls/public_test.go | 2 +- avalanchego/utils/crypto/bls/secret.go | 13 +- avalanchego/utils/crypto/bls/secret_test.go | 2 +- avalanchego/utils/crypto/bls/signature.go | 2 +- .../utils/crypto/bls/signature_test.go | 2 +- avalanchego/utils/crypto/crypto.go | 39 - .../utils/crypto/crypto_benchmark_test.go | 104 - avalanchego/utils/crypto/ed25519.go | 95 - avalanchego/utils/crypto/errors.go | 13 - avalanchego/utils/crypto/keychain/keychain.go | 166 + .../utils/crypto/keychain/keychain_test.go | 432 +++ avalanchego/utils/crypto/keychain/ledger.go | 19 + .../utils/crypto/keychain/mock_ledger.go | 128 + avalanchego/utils/crypto/ledger/ledger.go | 130 + .../utils/crypto/ledger/ledger_test.go | 76 + avalanchego/utils/crypto/rsa.go | 126 - avalanchego/utils/crypto/rsapss.go | 123 - .../utils/crypto/secp256k1/rfc6979_test.go | 85 + .../{secp256k1r.go => secp256k1/secp256k1.go} | 138 +- .../secp256k1/secp256k1_benchmark_test.go | 35 + .../secp256k1_test.go} | 77 +- .../utils/crypto/{ => secp256k1}/test_keys.go | 14 +- .../utils/dynamicip/ifconfig_resolver.go | 14 +- avalanchego/utils/dynamicip/no_updater.go | 2 +- .../utils/dynamicip/opendns_resolver.go | 18 +- avalanchego/utils/dynamicip/resolver.go | 5 +- avalanchego/utils/dynamicip/resolver_test.go | 2 +- avalanchego/utils/dynamicip/updater.go | 37 +- avalanchego/utils/dynamicip/updater_test.go | 29 +- avalanchego/utils/filesystem/io.go | 2 +- avalanchego/utils/filesystem/mock_file.go | 2 +- avalanchego/utils/filesystem/mock_io.go | 6 +- avalanchego/utils/filesystem/rename.go | 2 +- avalanchego/utils/filesystem/rename_test.go | 10 +- .../utils/formatting/address/address.go | 2 +- .../utils/formatting/address/converter.go | 2 +- avalanchego/utils/formatting/address/tools.go | 11 - avalanchego/utils/formatting/encoding.go | 2 +- .../formatting/encoding_benchmark_test.go | 2 +- avalanchego/utils/formatting/encoding_test.go | 16 +- avalanchego/utils/formatting/int_format.go | 2 +- .../utils/formatting/int_format_test.go | 2 +- .../utils/formatting/prefixed_stringer.go | 2 +- .../utils/hashing/consistent/hashable.go | 2 +- avalanchego/utils/hashing/consistent/ring.go | 80 +- .../utils/hashing/consistent/ring_test.go | 8 +- avalanchego/utils/hashing/hasher.go | 2 +- avalanchego/utils/hashing/hashing.go | 36 +- avalanchego/utils/hashing/mock_hasher.go | 7 +- avalanchego/utils/ips/claimed_ip_port.go | 11 +- avalanchego/utils/ips/dynamic_ip_port.go | 4 +- avalanchego/utils/ips/ip_port.go | 10 +- avalanchego/utils/ips/ip_test.go | 2 +- avalanchego/utils/ips/lookup.go | 34 + avalanchego/utils/ips/lookup_test.go | 44 + avalanchego/utils/json/codec.go | 2 +- avalanchego/utils/json/float32.go | 2 +- avalanchego/utils/json/float32_test.go | 2 +- avalanchego/utils/json/float64.go | 2 +- avalanchego/utils/json/uint16.go | 2 +- avalanchego/utils/json/uint32.go | 2 +- avalanchego/utils/json/uint64.go | 2 +- avalanchego/utils/json/uint8.go | 2 +- avalanchego/utils/linkedhashmap/iterator.go | 13 +- .../utils/linkedhashmap/linkedhashmap.go | 8 +- .../utils/linkedhashmap/linkedhashmap_test.go | 2 +- avalanchego/utils/logging/color.go | 2 +- avalanchego/utils/logging/config.go | 2 +- avalanchego/utils/logging/factory.go | 12 +- avalanchego/utils/logging/format.go | 2 +- avalanchego/utils/logging/level.go | 2 +- avalanchego/utils/logging/log.go | 24 +- avalanchego/utils/logging/log_test.go | 2 +- avalanchego/utils/logging/logger.go | 5 +- avalanchego/utils/logging/mock_logger.go | 113 +- avalanchego/utils/logging/sanitize.go | 2 +- avalanchego/utils/logging/test_log.go | 41 +- avalanchego/utils/math/averager.go | 2 +- avalanchego/utils/math/averager_heap.go | 8 +- avalanchego/utils/math/averager_heap_test.go | 2 +- avalanchego/utils/math/continuous_averager.go | 2 +- .../continuous_averager_benchmark_test.go | 2 +- .../utils/math/continuous_averager_test.go | 2 +- .../utils/math/meter/continuous_meter.go | 6 +- avalanchego/utils/math/meter/factory.go | 2 +- avalanchego/utils/math/meter/meter.go | 2 +- .../utils/math/meter/meter_benchmark_test.go | 2 +- avalanchego/utils/math/meter/meter_test.go | 2 +- avalanchego/utils/math/safe_math.go | 49 +- avalanchego/utils/math/safe_math_test.go | 179 +- avalanchego/utils/math/sync_averager.go | 2 +- avalanchego/utils/metric/api_interceptor.go | 4 +- avalanchego/utils/metric/averager.go | 4 +- avalanchego/utils/password/hash.go | 2 +- avalanchego/utils/password/hash_test.go | 2 +- avalanchego/utils/password/password.go | 2 +- avalanchego/utils/password/password_test.go | 2 +- avalanchego/utils/perms/chmod.go | 2 +- avalanchego/utils/perms/create.go | 2 +- avalanchego/utils/perms/perms.go | 2 +- avalanchego/utils/perms/write_file.go | 2 +- avalanchego/utils/profiler/continuous.go | 14 +- avalanchego/utils/profiler/profiler.go | 6 +- avalanchego/utils/profiler/profiler_test.go | 2 +- avalanchego/utils/resource/mock_user.go | 47 +- avalanchego/utils/resource/no_usage.go | 14 +- avalanchego/utils/resource/usage.go | 4 +- avalanchego/utils/resource/usage_test.go | 2 +- avalanchego/utils/rpc/json.go | 2 +- avalanchego/utils/rpc/options.go | 2 +- avalanchego/utils/rpc/requester.go | 15 +- avalanchego/utils/sampler/rand.go | 2 +- avalanchego/utils/sampler/uniform.go | 6 +- .../utils/sampler/uniform_benchmark_test.go | 2 +- avalanchego/utils/sampler/uniform_best.go | 4 +- avalanchego/utils/sampler/uniform_replacer.go | 8 +- avalanchego/utils/sampler/uniform_resample.go | 16 +- avalanchego/utils/sampler/uniform_test.go | 6 +- avalanchego/utils/sampler/weighted.go | 2 +- avalanchego/utils/sampler/weighted_array.go | 39 +- .../utils/sampler/weighted_array_test.go | 27 + .../utils/sampler/weighted_benchmark_test.go | 6 +- avalanchego/utils/sampler/weighted_best.go | 4 +- avalanchego/utils/sampler/weighted_heap.go | 56 +- .../utils/sampler/weighted_heap_test.go | 58 +- avalanchego/utils/sampler/weighted_linear.go | 39 +- .../utils/sampler/weighted_linear_test.go | 27 + avalanchego/utils/sampler/weighted_test.go | 2 +- avalanchego/utils/sampler/weighted_uniform.go | 4 +- .../sampler/weighted_without_replacement.go | 2 +- ...hted_without_replacement_benchmark_test.go | 2 +- .../weighted_without_replacement_generic.go | 2 +- .../weighted_without_replacement_test.go | 7 +- avalanchego/utils/set/bits.go | 102 + avalanchego/utils/set/bits_64.go | 57 + .../set/bits_64_test.go} | 22 +- avalanchego/utils/set/bits_test.go | 508 +++ avalanchego/utils/set/set.go | 207 ++ avalanchego/utils/set/set_benchmark_test.go | 39 + avalanchego/utils/set/set_test.go | 194 ++ avalanchego/utils/sorting.go | 126 +- avalanchego/utils/sorting_test.go | 231 +- avalanchego/utils/stacktrace.go | 2 +- avalanchego/utils/storage/storage_common.go | 2 +- avalanchego/utils/storage/storage_unix.go | 2 +- avalanchego/utils/storage/storage_windows.go | 2 +- avalanchego/utils/subprocess/linux_new.go | 20 - avalanchego/utils/subprocess/non_linux_new.go | 13 - .../utils/timer/adaptive_timeout_manager.go | 70 +- .../timer/adaptive_timeout_manager_test.go | 9 +- avalanchego/utils/timer/eta.go | 2 +- avalanchego/utils/timer/meter.go | 2 +- avalanchego/utils/timer/mockable/clock.go | 2 +- .../utils/timer/mockable/clock_test.go | 2 +- avalanchego/utils/timer/staged_timer.go | 2 +- avalanchego/utils/timer/staged_timer_test.go | 2 +- avalanchego/utils/timer/timeout_manager.go | 138 - .../utils/timer/timeout_manager_test.go | 25 - avalanchego/utils/timer/timer.go | 2 +- avalanchego/utils/timer/timer_test.go | 4 +- avalanchego/utils/ulimit/ulimit_bsd.go | 2 +- avalanchego/utils/ulimit/ulimit_darwin.go | 2 +- avalanchego/utils/ulimit/ulimit_test.go | 18 - avalanchego/utils/ulimit/ulimit_unix.go | 2 +- avalanchego/utils/ulimit/ulimit_windows.go | 2 +- avalanchego/utils/units/avax.go | 2 +- avalanchego/utils/units/bytes.go | 2 +- avalanchego/utils/window/window.go | 18 +- avalanchego/utils/window/window_test.go | 2 +- avalanchego/utils/wrappers/closers.go | 4 +- avalanchego/utils/wrappers/errors.go | 8 +- avalanchego/utils/wrappers/packing.go | 367 +-- avalanchego/utils/wrappers/packing_test.go | 759 ++--- avalanchego/utils/zero.go | 4 +- avalanchego/version/application.go | 4 +- avalanchego/version/application_test.go | 2 +- avalanchego/version/compatibility.go | 8 +- avalanchego/version/compatibility.json | 46 + avalanchego/version/compatibility_test.go | 2 +- avalanchego/version/constants.go | 73 +- avalanchego/version/constants_test.go | 15 + avalanchego/version/parser.go | 2 +- avalanchego/version/parser_test.go | 2 +- avalanchego/version/string.go | 9 +- avalanchego/version/version.go | 4 +- avalanchego/version/version_test.go | 2 +- avalanchego/vms/avm/blocks/block.go | 33 + avalanchego/vms/avm/blocks/block_test.go | 104 + avalanchego/vms/avm/blocks/builder/builder.go | 182 ++ .../vms/avm/blocks/builder/builder_test.go | 642 ++++ avalanchego/vms/avm/blocks/executor/block.go | 332 ++ .../vms/avm/blocks/executor/block_test.go | 1072 +++++++ .../vms/avm/blocks/executor/manager.go | 210 ++ .../vms/avm/blocks/executor/manager_test.go | 349 +++ .../vms/avm/blocks/executor/mock_manager.go | 170 + avalanchego/vms/avm/blocks/mock_block.go | 166 + avalanchego/vms/avm/blocks/parser.go | 110 + avalanchego/vms/avm/blocks/standard_block.go | 92 + avalanchego/vms/avm/chain_state.go | 48 + avalanchego/vms/avm/client.go | 144 +- avalanchego/vms/avm/client_test.go | 10 +- avalanchego/vms/avm/config/config.go | 13 + avalanchego/vms/avm/dag_state.go | 84 + avalanchego/vms/avm/factory.go | 23 +- avalanchego/vms/avm/fx_test.go | 30 +- avalanchego/vms/avm/fxs/fx.go | 8 +- avalanchego/vms/avm/genesis.go | 21 +- avalanchego/vms/avm/genesis_test.go | 27 + avalanchego/vms/avm/health.go | 6 +- avalanchego/vms/avm/index_test.go | 78 +- avalanchego/vms/avm/metrics.go | 49 - avalanchego/vms/avm/metrics/metrics.go | 103 + avalanchego/vms/avm/metrics/mock_metrics.go | 131 + avalanchego/vms/avm/metrics/tx_metrics.go | 78 + avalanchego/vms/avm/network/atomic.go | 138 + avalanchego/vms/avm/network/network.go | 202 ++ avalanchego/vms/avm/network/network_test.go | 343 ++ avalanchego/vms/avm/pubsub_filterer.go | 10 +- avalanchego/vms/avm/pubsub_filterer_test.go | 2 +- avalanchego/vms/avm/service.go | 693 +++-- avalanchego/vms/avm/service_test.go | 1210 +++++--- avalanchego/vms/avm/state_test.go | 77 +- avalanchego/vms/avm/states/diff.go | 172 + avalanchego/vms/avm/states/mock_states.go | 767 +++++ avalanchego/vms/avm/states/state.go | 555 +++- avalanchego/vms/avm/states/state_test.go | 314 ++ avalanchego/vms/avm/states/tx_state.go | 91 - avalanchego/vms/avm/states/tx_state_test.go | 102 - avalanchego/vms/avm/states/versions.go | 14 + avalanchego/vms/avm/static_client.go | 9 +- avalanchego/vms/avm/static_service.go | 43 +- avalanchego/vms/avm/static_service_test.go | 4 +- avalanchego/vms/avm/tx_execute.go | 91 - avalanchego/vms/avm/tx_init.go | 4 +- avalanchego/vms/avm/tx_semantic_verify.go | 139 - .../vms/avm/tx_semantic_verify_test.go | 1684 ---------- avalanchego/vms/avm/txs/base_tx.go | 37 +- avalanchego/vms/avm/txs/base_tx_test.go | 548 +--- avalanchego/vms/avm/txs/codec.go | 25 +- avalanchego/vms/avm/txs/create_asset_tx.go | 91 +- .../vms/avm/txs/create_asset_tx_test.go | 618 +--- avalanchego/vms/avm/txs/executor/backend.go | 26 + avalanchego/vms/avm/txs/executor/executor.go | 147 + .../vms/avm/txs/executor/semantic_verifier.go | 252 ++ .../txs/executor/semantic_verifier_test.go | 894 ++++++ .../avm/txs/executor/syntactic_verifier.go | 302 ++ .../txs/executor/syntactic_verifier_test.go | 2332 ++++++++++++++ avalanchego/vms/avm/txs/export_tx.go | 46 +- avalanchego/vms/avm/txs/export_tx_test.go | 658 +--- avalanchego/vms/avm/txs/import_tx.go | 59 +- avalanchego/vms/avm/txs/import_tx_test.go | 144 +- avalanchego/vms/avm/txs/initial_state.go | 50 +- avalanchego/vms/avm/txs/initial_state_test.go | 20 +- avalanchego/vms/avm/txs/mempool/mempool.go | 219 ++ .../vms/avm/txs/mempool/mempool_test.go | 173 ++ .../vms/avm/txs/mempool/mock_mempool.go | 145 + avalanchego/vms/avm/txs/mock_unsigned_tx.go | 163 + avalanchego/vms/avm/txs/operation.go | 41 +- avalanchego/vms/avm/txs/operation_test.go | 40 +- avalanchego/vms/avm/txs/operation_tx.go | 75 +- avalanchego/vms/avm/txs/parser.go | 78 +- avalanchego/vms/avm/txs/tx.go | 103 +- avalanchego/vms/avm/txs/tx_test.go | 231 -- avalanchego/vms/avm/txs/visitor.go | 4 +- avalanchego/vms/avm/unique_tx.go | 166 +- avalanchego/vms/avm/utxo/spender.go | 440 +++ avalanchego/vms/avm/vm.go | 808 ++--- avalanchego/vms/avm/vm_benchmark_test.go | 35 +- avalanchego/vms/avm/vm_regression_test.go | 151 + avalanchego/vms/avm/vm_test.go | 325 +- avalanchego/vms/avm/wallet_client.go | 21 +- avalanchego/vms/avm/wallet_service.go | 59 +- avalanchego/vms/avm/wallet_service_test.go | 14 +- avalanchego/vms/components/avax/addresses.go | 13 +- avalanchego/vms/components/avax/asset.go | 8 +- avalanchego/vms/components/avax/asset_test.go | 2 +- .../vms/components/avax/atomic_utxos.go | 9 +- avalanchego/vms/components/avax/base_tx.go | 33 +- .../vms/components/avax/flow_checker.go | 14 +- avalanchego/vms/components/avax/metadata.go | 16 +- .../vms/components/avax/metadata_test.go | 2 +- .../components/avax/mock_transferable_in.go | 93 + .../components/avax/mock_transferable_out.go | 3 + .../vms/components/avax/singleton_state.go | 42 - .../components/avax/singleton_state_test.go | 30 - avalanchego/vms/components/avax/state.go | 2 +- .../vms/components/avax/status_state.go | 93 - .../vms/components/avax/status_state_test.go | 51 - .../vms/components/avax/test_verifiable.go | 29 +- .../vms/components/avax/transferables.go | 94 +- .../vms/components/avax/transferables_test.go | 11 +- avalanchego/vms/components/avax/utxo.go | 4 +- .../vms/components/avax/utxo_fetching.go | 20 +- .../vms/components/avax/utxo_fetching_test.go | 7 +- .../vms/components/avax/utxo_handler.go | 32 + avalanchego/vms/components/avax/utxo_id.go | 64 +- .../vms/components/avax/utxo_id_test.go | 157 +- avalanchego/vms/components/avax/utxo_state.go | 37 +- .../vms/components/avax/utxo_state_test.go | 4 +- avalanchego/vms/components/avax/utxo_test.go | 2 +- avalanchego/vms/components/chain/block.go | 68 +- avalanchego/vms/components/chain/state.go | 223 +- .../vms/components/chain/state_test.go | 199 +- avalanchego/vms/components/index/index.go | 17 +- avalanchego/vms/components/index/metrics.go | 2 +- avalanchego/vms/components/keystore/codec.go | 2 +- avalanchego/vms/components/keystore/user.go | 49 +- .../vms/components/keystore/user_test.go | 14 +- .../message/codec.go | 8 +- .../message/handler.go | 2 +- .../message/handler_test.go | 6 +- .../message/message.go | 21 +- .../vms/components/message/message_test.go | 19 + avalanchego/vms/components/message/tx.go | 20 + .../message/tx_test.go} | 10 +- avalanchego/vms/components/state/builtin.go | 61 - avalanchego/vms/components/state/state.go | 243 -- .../vms/components/state/state_test.go | 509 --- avalanchego/vms/components/state/types.go | 19 - .../vms/components/verify/mock_verifiable.go | 3 + avalanchego/vms/components/verify/subnet.go | 19 +- .../vms/components/verify/subnet_test.go | 102 +- .../vms/components/verify/verification.go | 2 +- .../components/verify/verification_test.go | 6 +- avalanchego/vms/manager.go | 66 +- avalanchego/vms/metervm/batched_vm.go | 15 +- avalanchego/vms/metervm/block.go | 66 +- avalanchego/vms/metervm/block_metrics.go | 17 +- avalanchego/vms/metervm/block_vm.go | 64 +- .../metervm/build_block_with_context_vm.go | 31 + avalanchego/vms/metervm/height_indexed_vm.go | 12 +- avalanchego/vms/metervm/metrics.go | 2 +- avalanchego/vms/metervm/state_syncable_vm.go | 24 +- avalanchego/vms/metervm/vertex_metrics.go | 2 +- avalanchego/vms/metervm/vertex_vm.go | 59 +- avalanchego/vms/mock_manager.go | 74 +- avalanchego/vms/nftfx/credential.go | 2 +- avalanchego/vms/nftfx/credential_test.go | 2 +- avalanchego/vms/nftfx/factory.go | 10 +- avalanchego/vms/nftfx/factory_test.go | 6 +- avalanchego/vms/nftfx/fx.go | 6 +- avalanchego/vms/nftfx/fx_test.go | 64 +- avalanchego/vms/nftfx/mint_operation.go | 2 +- avalanchego/vms/nftfx/mint_operation_test.go | 2 +- avalanchego/vms/nftfx/mint_output.go | 4 +- avalanchego/vms/nftfx/mint_output_test.go | 2 +- avalanchego/vms/nftfx/transfer_operation.go | 2 +- .../vms/nftfx/transfer_operation_test.go | 2 +- avalanchego/vms/nftfx/transfer_output.go | 10 +- avalanchego/vms/nftfx/transfer_output_test.go | 2 +- .../vms/platformvm/api/static_client.go | 9 +- .../vms/platformvm/api/static_service.go | 130 +- .../vms/platformvm/api/static_service_test.go | 142 +- .../vms/platformvm/blocks/abort_block.go | 29 +- .../vms/platformvm/blocks/abort_block_test.go | 6 +- .../vms/platformvm/blocks/atomic_block.go | 15 +- .../platformvm/blocks/atomic_block_test.go | 8 +- avalanchego/vms/platformvm/blocks/block.go | 2 +- .../blocks/builder/apricot_builder.go | 113 - .../blocks/builder/apricot_builder_test.go | 360 --- .../blocks/builder/banff_builder.go | 61 - .../blocks/builder/banff_builder_test.go | 511 --- .../vms/platformvm/blocks/builder/builder.go | 156 +- .../platformvm/blocks/builder/builder_test.go | 455 ++- .../platformvm/blocks/builder/helpers_test.go | 100 +- .../vms/platformvm/blocks/builder/network.go | 45 +- .../platformvm/blocks/builder/network_test.go | 47 +- .../blocks/builder/standard_block_test.go | 20 +- avalanchego/vms/platformvm/blocks/codec.go | 2 +- .../vms/platformvm/blocks/commit_block.go | 26 +- .../platformvm/blocks/commit_block_test.go | 6 +- .../vms/platformvm/blocks/common_block.go | 21 +- .../platformvm/blocks/executor/acceptor.go | 22 +- .../blocks/executor/acceptor_test.go | 20 +- .../vms/platformvm/blocks/executor/backend.go | 2 +- .../blocks/executor/backend_test.go | 7 +- .../vms/platformvm/blocks/executor/block.go | 15 +- .../platformvm/blocks/executor/block_state.go | 5 +- .../platformvm/blocks/executor/block_test.go | 8 +- .../blocks/executor/helpers_test.go | 108 +- .../vms/platformvm/blocks/executor/manager.go | 4 +- .../blocks/executor/manager_test.go | 6 +- .../blocks/executor/mock_manager.go | 114 + .../vms/platformvm/blocks/executor/options.go | 4 +- .../blocks/executor/options_test.go | 2 +- .../blocks/executor/proposal_block_test.go | 307 +- .../platformvm/blocks/executor/rejector.go | 4 +- .../blocks/executor/rejector_test.go | 21 +- .../blocks/executor/standard_block_test.go | 173 +- .../platformvm/blocks/executor/verifier.go | 26 +- .../blocks/executor/verifier_test.go | 32 +- .../vms/platformvm/blocks/mock_block.go | 3 + avalanchego/vms/platformvm/blocks/parse.go | 2 +- .../vms/platformvm/blocks/parse_test.go | 12 +- .../vms/platformvm/blocks/proposal_block.go | 29 +- .../platformvm/blocks/proposal_block_test.go | 19 +- .../vms/platformvm/blocks/standard_block.go | 29 +- .../platformvm/blocks/standard_block_test.go | 19 +- avalanchego/vms/platformvm/blocks/visitor.go | 2 +- avalanchego/vms/platformvm/client.go | 137 +- .../client_permissionless_validator.go | 70 +- avalanchego/vms/platformvm/config/config.go | 43 +- .../vms/platformvm/docs/chain_time_update.md | 35 + avalanchego/vms/platformvm/factory.go | 10 +- avalanchego/vms/platformvm/fx/fx.go | 4 +- avalanchego/vms/platformvm/fx/mock_fx.go | 82 +- avalanchego/vms/platformvm/genesis/codec.go | 2 +- avalanchego/vms/platformvm/genesis/genesis.go | 6 +- avalanchego/vms/platformvm/health.go | 78 +- avalanchego/vms/platformvm/health_test.go | 110 + .../vms/platformvm/metrics/block_metrics.go | 4 +- avalanchego/vms/platformvm/metrics/metrics.go | 40 +- avalanchego/vms/platformvm/metrics/no_op.go | 10 +- .../vms/platformvm/metrics/tx_metrics.go | 8 +- .../vms/platformvm/reward/calculator.go | 4 +- .../vms/platformvm/reward/calculator_test.go | 59 +- avalanchego/vms/platformvm/reward/config.go | 2 +- avalanchego/vms/platformvm/service.go | 1192 ++++--- avalanchego/vms/platformvm/service_test.go | 550 ++-- avalanchego/vms/platformvm/signer/empty.go | 13 +- .../vms/platformvm/signer/empty_test.go | 3 +- .../platformvm/signer/proof_of_possession.go | 8 +- .../signer/proof_of_possession_test.go | 2 +- avalanchego/vms/platformvm/signer/signer.go | 2 +- .../platformvm/stakeable/stakeable_lock.go | 2 +- .../stakeable/stakeable_lock_test.go | 135 + avalanchego/vms/platformvm/state/diff.go | 147 +- avalanchego/vms/platformvm/state/diff_test.go | 15 +- .../vms/platformvm/state/empty_iterator.go | 14 +- .../platformvm/state/empty_iterator_test.go | 2 +- .../vms/platformvm/state/masked_iterator.go | 4 +- .../platformvm/state/masked_iterator_test.go | 2 +- .../vms/platformvm/state/merged_iterator.go | 6 +- .../platformvm/state/merged_iterator_test.go | 2 +- .../vms/platformvm/state/mock_chain.go | 32 + avalanchego/vms/platformvm/state/mock_diff.go | 38 +- .../platformvm/state/mock_staker_iterator.go | 3 + .../vms/platformvm/state/mock_state.go | 85 +- .../vms/platformvm/state/mock_versions.go | 3 + .../platformvm/state/slice_iterator_test.go | 6 +- avalanchego/vms/platformvm/state/staker.go | 32 +- .../platformvm/state/staker_diff_iterator.go | 8 +- .../state/staker_diff_iterator_test.go | 2 +- .../vms/platformvm/state/staker_status.go | 12 + .../vms/platformvm/state/staker_test.go | 32 +- avalanchego/vms/platformvm/state/stakers.go | 100 +- .../vms/platformvm/state/stakers_test.go | 45 +- avalanchego/vms/platformvm/state/state.go | 857 ++--- .../vms/platformvm/state/state_test.go | 364 ++- .../vms/platformvm/state/tree_iterator.go | 10 +- .../platformvm/state/tree_iterator_test.go | 9 +- avalanchego/vms/platformvm/state/utxos.go | 21 - .../platformvm/state/validator_metadata.go | 265 ++ .../state/validator_metadata_test.go | 303 ++ .../vms/platformvm/state/validator_uptimes.go | 165 + avalanchego/vms/platformvm/state/versions.go | 4 +- .../platformvm/status/blockchain_status.go | 2 +- .../status/blockchain_status_test.go | 2 +- avalanchego/vms/platformvm/status/status.go | 2 +- .../vms/platformvm/status/status_test.go | 2 +- .../vms/platformvm/txs/add_delegator_test.go | 19 +- .../vms/platformvm/txs/add_delegator_tx.go | 47 +- .../txs/add_permissionless_delegator_tx.go | 29 +- .../add_permissionless_delegator_tx_test.go | 34 +- .../txs/add_permissionless_validator_tx.go | 33 +- .../add_permissionless_validator_tx_test.go | 41 +- .../txs/add_subnet_validator_test.go | 27 +- .../platformvm/txs/add_subnet_validator_tx.go | 27 +- .../vms/platformvm/txs/add_validator_test.go | 13 +- .../vms/platformvm/txs/add_validator_tx.go | 59 +- .../vms/platformvm/txs/advance_time_tx.go | 29 +- avalanchego/vms/platformvm/txs/base_tx.go | 22 +- .../vms/platformvm/txs/base_tx_test.go | 25 +- .../vms/platformvm/txs/builder/builder.go | 76 +- .../platformvm/txs/builder/mock_builder.go | 21 +- avalanchego/vms/platformvm/txs/codec.go | 2 +- .../vms/platformvm/txs/create_chain_test.go | 49 +- .../vms/platformvm/txs/create_chain_tx.go | 7 +- .../vms/platformvm/txs/create_subnet_tx.go | 4 +- .../txs/executor/advance_time_test.go | 423 ++- .../txs/executor/atomic_tx_executor.go | 47 +- .../vms/platformvm/txs/executor/backend.go | 4 +- .../txs/executor/create_chain_test.go | 106 +- .../txs/executor/create_subnet_test.go | 6 +- .../platformvm/txs/executor/export_test.go | 16 +- .../platformvm/txs/executor/helpers_test.go | 138 +- .../platformvm/txs/executor/import_test.go | 75 +- .../txs/executor/proposal_tx_executor.go | 200 +- .../txs/executor/proposal_tx_executor_test.go | 538 ++-- .../txs/executor/reward_validator_test.go | 597 +++- .../txs/executor/staker_tx_verification.go | 57 +- .../executor/staker_tx_verification_test.go | 145 +- .../txs/executor/standard_tx_executor.go | 211 +- .../txs/executor/standard_tx_executor_test.go | 605 ++-- .../platformvm/txs/executor/state_changes.go | 18 +- .../txs/executor/subnet_tx_verification.go | 6 +- .../txs/executor/tx_mempool_verifier.go | 111 +- avalanchego/vms/platformvm/txs/export_tx.go | 4 +- avalanchego/vms/platformvm/txs/import_tx.go | 14 +- .../vms/platformvm/txs/mempool/issuer.go | 28 +- .../vms/platformvm/txs/mempool/mempool.go | 87 +- .../platformvm/txs/mempool/mempool_test.go | 17 +- .../platformvm/txs/mempool/mock_mempool.go | 40 +- .../vms/platformvm/txs/mempool/remover.go | 16 +- avalanchego/vms/platformvm/txs/mock_staker.go | 20 + .../vms/platformvm/txs/mock_unsigned_tx.go | 67 +- avalanchego/vms/platformvm/txs/priorities.go | 2 +- .../txs/remove_subnet_validator_tx.go | 4 +- .../txs/remove_subnet_validator_tx_test.go | 15 +- .../vms/platformvm/txs/reward_validator_tx.go | 30 +- avalanchego/vms/platformvm/txs/staker_tx.go | 28 +- .../{validator => txs}/subnet_validator.go | 8 +- .../platformvm/txs/subnet_validator_test.go | 38 + .../vms/platformvm/txs/transform_subnet_tx.go | 4 +- .../txs/transform_subnet_tx_test.go | 14 +- avalanchego/vms/platformvm/txs/tx.go | 59 +- .../vms/platformvm/txs/txheap/by_age.go | 4 +- .../vms/platformvm/txs/txheap/by_end_time.go | 4 +- .../platformvm/txs/txheap/by_end_time_test.go | 15 +- .../platformvm/txs/txheap/by_start_time.go | 4 +- .../txs/txheap/by_start_time_test.go | 15 +- avalanchego/vms/platformvm/txs/txheap/heap.go | 20 +- .../vms/platformvm/txs/txheap/with_metrics.go | 4 +- avalanchego/vms/platformvm/txs/unsigned_tx.go | 7 +- .../{validator => txs}/validator.go | 23 +- .../vms/platformvm/txs/validator_test.go | 91 + avalanchego/vms/platformvm/txs/visitor.go | 2 +- avalanchego/vms/platformvm/utxo/handler.go | 90 +- .../vms/platformvm/utxo/handler_test.go | 38 +- .../vms/platformvm/utxo/mock_verifier.go | 6 +- .../platformvm/validator/validator_test.go | 100 - avalanchego/vms/platformvm/vm.go | 355 ++- .../vms/platformvm/vm_regression_test.go | 822 +++-- avalanchego/vms/platformvm/vm_test.go | 2763 +++++++++-------- avalanchego/vms/platformvm/warp/codec.go | 31 + avalanchego/vms/platformvm/warp/constants.go | 15 + .../vms/platformvm/warp/gwarp/client.go | 34 + .../vms/platformvm/warp/gwarp/server.go | 50 + .../vms/platformvm/warp/gwarp/signer_test.go | 71 + avalanchego/vms/platformvm/warp/message.go | 51 + .../vms/platformvm/warp/message_test.go | 44 + avalanchego/vms/platformvm/warp/signature.go | 160 + .../vms/platformvm/warp/signature_test.go | 785 +++++ avalanchego/vms/platformvm/warp/signer.go | 48 + .../vms/platformvm/warp/signer_test.go | 25 + .../vms/platformvm/warp/test_signer.go | 57 + .../vms/platformvm/warp/unsigned_message.go | 71 + .../platformvm/warp/unsigned_message_test.go | 34 + avalanchego/vms/platformvm/warp/validator.go | 142 + .../vms/platformvm/warp/validator_test.go | 306 ++ avalanchego/vms/propertyfx/burn_operation.go | 8 +- .../vms/propertyfx/burn_operation_test.go | 2 +- avalanchego/vms/propertyfx/credential.go | 2 +- avalanchego/vms/propertyfx/credential_test.go | 2 +- avalanchego/vms/propertyfx/factory.go | 10 +- avalanchego/vms/propertyfx/factory_test.go | 6 +- avalanchego/vms/propertyfx/fx.go | 6 +- avalanchego/vms/propertyfx/fx_test.go | 50 +- avalanchego/vms/propertyfx/mint_operation.go | 2 +- .../vms/propertyfx/mint_operation_test.go | 2 +- avalanchego/vms/propertyfx/mint_output.go | 2 +- .../vms/propertyfx/mint_output_test.go | 2 +- avalanchego/vms/propertyfx/owned_output.go | 2 +- .../vms/propertyfx/owned_output_test.go | 2 +- avalanchego/vms/proposervm/batched_vm.go | 29 +- avalanchego/vms/proposervm/batched_vm_test.go | 388 ++- avalanchego/vms/proposervm/block.go | 122 +- avalanchego/vms/proposervm/block/block.go | 37 +- .../vms/proposervm/block/block_test.go | 15 +- avalanchego/vms/proposervm/block/build.go | 61 +- .../vms/proposervm/block/build_test.go | 6 +- avalanchego/vms/proposervm/block/codec.go | 41 +- avalanchego/vms/proposervm/block/header.go | 21 +- .../vms/proposervm/block/header_test.go | 2 +- avalanchego/vms/proposervm/block/option.go | 21 +- .../vms/proposervm/block/option_test.go | 2 +- avalanchego/vms/proposervm/block/parse.go | 23 +- .../vms/proposervm/block/parse_test.go | 19 +- avalanchego/vms/proposervm/block_server.go | 10 +- avalanchego/vms/proposervm/block_test.go | 87 + .../vms/proposervm/height_indexed_vm.go | 19 +- .../vms/proposervm/indexer/block_server.go | 6 +- .../proposervm/indexer/block_server_test.go | 11 +- .../vms/proposervm/indexer/height_indexer.go | 12 +- .../proposervm/indexer/height_indexer_test.go | 26 +- .../vms/proposervm/mock_post_fork_block.go | 321 ++ avalanchego/vms/proposervm/post_fork_block.go | 42 +- .../vms/proposervm/post_fork_block_test.go | 315 +- .../vms/proposervm/post_fork_option.go | 41 +- .../vms/proposervm/post_fork_option_test.go | 182 +- avalanchego/vms/proposervm/pre_fork_block.go | 53 +- .../vms/proposervm/pre_fork_block_test.go | 228 +- .../vms/proposervm/proposer/mock_windower.go | 70 + .../vms/proposervm/proposer/validators.go | 18 +- .../proposervm/proposer/validators_test.go | 26 + .../vms/proposervm/proposer/windower.go | 62 +- .../vms/proposervm/proposer/windower_test.go | 50 +- .../vms/proposervm/scheduler/scheduler.go | 2 +- .../proposervm/scheduler/scheduler_test.go | 4 +- .../proposervm/state/block_height_index.go | 136 +- .../state/block_height_index_test.go | 30 - .../vms/proposervm/state/block_state.go | 22 +- .../vms/proposervm/state/block_state_test.go | 4 +- .../vms/proposervm/state/chain_state.go | 4 +- .../vms/proposervm/state/chain_state_test.go | 2 +- avalanchego/vms/proposervm/state/codec.go | 2 +- .../vms/proposervm/state/mock_state.go | 63 +- avalanchego/vms/proposervm/state/state.go | 2 +- .../vms/proposervm/state/state_test.go | 2 +- avalanchego/vms/proposervm/state_summary.go | 31 +- .../vms/proposervm/state_syncable_vm.go | 43 +- .../vms/proposervm/state_syncable_vm_test.go | 240 +- avalanchego/vms/proposervm/summary/build.go | 2 +- .../vms/proposervm/summary/build_test.go | 2 +- avalanchego/vms/proposervm/summary/codec.go | 2 +- avalanchego/vms/proposervm/summary/parse.go | 2 +- .../vms/proposervm/summary/parse_test.go | 2 +- .../vms/proposervm/summary/state_summary.go | 28 +- avalanchego/vms/proposervm/tree/tree.go | 23 +- avalanchego/vms/proposervm/tree/tree_test.go | 9 +- avalanchego/vms/proposervm/vm.go | 358 +-- .../vms/proposervm/vm_byzantine_test.go | 202 +- avalanchego/vms/proposervm/vm_test.go | 1032 ++++-- avalanchego/vms/registry/mock_vm_getter.go | 4 +- .../vms/registry/mock_vm_registerer.go | 60 +- avalanchego/vms/registry/mock_vm_registry.go | 21 +- avalanchego/vms/registry/vm_getter.go | 7 +- avalanchego/vms/registry/vm_getter_test.go | 14 +- avalanchego/vms/registry/vm_registerer.go | 50 +- .../vms/registry/vm_registerer_test.go | 150 +- avalanchego/vms/registry/vm_registry.go | 26 +- avalanchego/vms/registry/vm_registry_test.go | 63 +- avalanchego/vms/rpcchainvm/batched_vm_test.go | 129 + avalanchego/vms/rpcchainvm/errors.go | 30 +- avalanchego/vms/rpcchainvm/factory.go | 99 +- .../vms/rpcchainvm/ghttp/gconn/conn_client.go | 21 +- .../vms/rpcchainvm/ghttp/gconn/conn_server.go | 20 +- .../rpcchainvm/ghttp/greader/reader_client.go | 8 +- .../rpcchainvm/ghttp/greader/reader_server.go | 10 +- .../ghttp/gresponsewriter/locked_writer.go | 12 +- .../ghttp/gresponsewriter/writer_client.go | 21 +- .../ghttp/gresponsewriter/writer_server.go | 51 +- .../rpcchainvm/ghttp/gwriter/writer_client.go | 8 +- .../rpcchainvm/ghttp/gwriter/writer_server.go | 10 +- .../vms/rpcchainvm/ghttp/http_client.go | 23 +- .../vms/rpcchainvm/ghttp/http_server.go | 9 +- avalanchego/vms/rpcchainvm/ghttp/http_test.go | 4 +- .../vms/rpcchainvm/grpcutils/client.go | 105 + .../vms/rpcchainvm/grpcutils/client_test.go | 87 + .../vms/rpcchainvm/grpcutils/listener.go | 21 - .../vms/rpcchainvm/grpcutils/server.go | 123 + .../vms/rpcchainvm/grpcutils/server_closer.go | 2 +- avalanchego/vms/rpcchainvm/grpcutils/util.go | 129 +- .../vms/rpcchainvm/gruntime/runtime_client.go | 31 + .../vms/rpcchainvm/gruntime/runtime_server.go | 32 + .../gsubnetlookup/subnet_lookup_client.go | 35 - .../gsubnetlookup/subnet_lookup_server.go | 43 - .../rpcchainvm/messenger/messenger_client.go | 4 +- .../rpcchainvm/messenger/messenger_server.go | 4 +- avalanchego/vms/rpcchainvm/mock_factory.go | 50 - .../vms/rpcchainvm/mock_plugin_directory.go | 52 - avalanchego/vms/rpcchainvm/plugin_test.go | 109 - avalanchego/vms/rpcchainvm/runtime/README.md | 45 + avalanchego/vms/rpcchainvm/runtime/manager.go | 48 + avalanchego/vms/rpcchainvm/runtime/runtime.go | 51 + .../runtime/subprocess/initializer.go | 48 + .../runtime/subprocess/linux_stopper.go | 62 + .../runtime/subprocess/non_linux_stopper.go | 31 + .../rpcchainvm/runtime/subprocess/runtime.go | 152 + .../rpcchainvm/runtime/subprocess/stopper.go | 32 + .../vms/rpcchainvm/state_syncable_vm_test.go | 294 +- avalanchego/vms/rpcchainvm/vm.go | 123 +- avalanchego/vms/rpcchainvm/vm_client.go | 562 ++-- avalanchego/vms/rpcchainvm/vm_server.go | 405 ++- avalanchego/vms/rpcchainvm/vm_test.go | 517 +-- .../vms/rpcchainvm/with_context_vm_test.go | 120 + avalanchego/vms/secp256k1fx/credential.go | 10 +- .../vms/secp256k1fx/credential_test.go | 8 +- avalanchego/vms/secp256k1fx/factory.go | 10 +- avalanchego/vms/secp256k1fx/factory_test.go | 6 +- avalanchego/vms/secp256k1fx/fx.go | 99 +- avalanchego/vms/secp256k1fx/fx_test.go | 257 +- avalanchego/vms/secp256k1fx/input.go | 4 +- avalanchego/vms/secp256k1fx/input_test.go | 66 +- avalanchego/vms/secp256k1fx/keychain.go | 72 +- avalanchego/vms/secp256k1fx/keychain_test.go | 38 +- avalanchego/vms/secp256k1fx/mint_operation.go | 2 +- .../vms/secp256k1fx/mint_operation_test.go | 121 +- avalanchego/vms/secp256k1fx/mint_output.go | 8 +- .../vms/secp256k1fx/mint_output_test.go | 52 +- avalanchego/vms/secp256k1fx/output_owners.go | 23 +- .../vms/secp256k1fx/output_owners_test.go | 164 +- avalanchego/vms/secp256k1fx/transfer_input.go | 12 +- .../vms/secp256k1fx/transfer_input_test.go | 4 +- .../vms/secp256k1fx/transfer_output.go | 22 +- .../vms/secp256k1fx/transfer_output_test.go | 4 +- avalanchego/vms/secp256k1fx/tx.go | 8 +- avalanchego/vms/secp256k1fx/vm.go | 25 +- avalanchego/vms/tracedvm/batched_vm.go | 70 + avalanchego/vms/tracedvm/block.go | 120 + avalanchego/vms/tracedvm/block_vm.go | 182 ++ .../tracedvm/build_block_with_context_vm.go | 28 + avalanchego/vms/tracedvm/height_indexed_vm.go | 39 + avalanchego/vms/tracedvm/state_syncable_vm.go | 73 + avalanchego/vms/tracedvm/tx.go | 50 + avalanchego/vms/tracedvm/vertex_vm.go | 94 + avalanchego/vms/types/blob_data.go | 2 +- avalanchego/wallet/chain/p/backend.go | 7 +- avalanchego/wallet/chain/p/backend_visitor.go | 13 +- avalanchego/wallet/chain/p/builder.go | 59 +- .../wallet/chain/p/builder_with_options.go | 17 +- avalanchego/wallet/chain/p/context.go | 53 +- avalanchego/wallet/chain/p/signer.go | 10 +- avalanchego/wallet/chain/p/signer_visitor.go | 72 +- avalanchego/wallet/chain/p/wallet.go | 33 +- .../wallet/chain/p/wallet_with_options.go | 15 +- avalanchego/wallet/chain/x/backend.go | 44 +- avalanchego/wallet/chain/x/backend_visitor.go | 64 + avalanchego/wallet/chain/x/builder.go | 18 +- .../wallet/chain/x/builder_with_options.go | 12 +- avalanchego/wallet/chain/x/constants.go | 8 +- avalanchego/wallet/chain/x/context.go | 28 +- avalanchego/wallet/chain/x/signer.go | 49 +- avalanchego/wallet/chain/x/wallet.go | 12 +- .../wallet/chain/x/wallet_with_options.go | 4 +- avalanchego/wallet/subnet/primary/api.go | 10 +- .../wallet/subnet/primary/common/options.go | 21 +- .../wallet/subnet/primary/common/spend.go | 5 +- .../wallet/subnet/primary/example_test.go | 12 +- .../add-permissioned-subnet-validator/main.go | 70 + .../examples/add-primary-validator/main.go | 79 + .../primary/examples/create-asset/main.go | 65 + .../primary/examples/create-chain/main.go | 63 + .../examples/create-locked-stakeable/main.go | 73 + .../primary/examples/create-subnet/main.go | 51 + .../examples/remove-subnet-validator/main.go | 57 + avalanchego/wallet/subnet/primary/utxos.go | 14 +- avalanchego/wallet/subnet/primary/wallet.go | 32 +- avalanchego/x/README.md | 3 + avalanchego/x/merkledb/README.md | 71 + avalanchego/x/merkledb/batch.go | 27 + avalanchego/x/merkledb/cache.go | 80 + avalanchego/x/merkledb/cache_test.go | 213 ++ avalanchego/x/merkledb/codec.go | 858 +++++ avalanchego/x/merkledb/codec_test.go | 698 +++++ avalanchego/x/merkledb/db.go | 1147 +++++++ avalanchego/x/merkledb/db_test.go | 1003 ++++++ avalanchego/x/merkledb/history.go | 264 ++ avalanchego/x/merkledb/history_test.go | 891 ++++++ avalanchego/x/merkledb/iterator.go | 63 + avalanchego/x/merkledb/maybe.go | 47 + avalanchego/x/merkledb/maybe_test.go | 31 + avalanchego/x/merkledb/metrics.go | 219 ++ avalanchego/x/merkledb/metrics_test.go | 74 + avalanchego/x/merkledb/node.go | 203 ++ avalanchego/x/merkledb/node_test.go | 69 + avalanchego/x/merkledb/path.go | 153 + avalanchego/x/merkledb/path_test.go | 77 + avalanchego/x/merkledb/proof.go | 689 ++++ avalanchego/x/merkledb/proof_test.go | 1660 ++++++++++ avalanchego/x/merkledb/trie.go | 84 + avalanchego/x/merkledb/trie_test.go | 1465 +++++++++ avalanchego/x/merkledb/trieview.go | 1413 +++++++++ avalanchego/x/sync/client.go | 201 ++ avalanchego/x/sync/client_test.go | 286 ++ avalanchego/x/sync/codec.go | 34 + avalanchego/x/sync/metrics.go | 96 + avalanchego/x/sync/mock_client.go | 69 + avalanchego/x/sync/network_client.go | 294 ++ avalanchego/x/sync/network_server.go | 212 ++ avalanchego/x/sync/peer_tracker.go | 230 ++ avalanchego/x/sync/request.go | 96 + avalanchego/x/sync/response_handler.go | 46 + avalanchego/x/sync/sync_test.go | 804 +++++ avalanchego/x/sync/syncmanager.go | 679 ++++ avalanchego/x/sync/syncworkheap.go | 198 ++ avalanchego/x/sync/syncworkheap_test.go | 233 ++ coreth/.golangci.yml | 22 +- coreth/Dockerfile | 6 +- coreth/README.md | 7 + coreth/RELEASES.md | 71 + coreth/accounts/abi/abi.go | 60 + coreth/accounts/abi/abi_test.go | 34 +- .../accounts/abi/bind/backends/simulated.go | 9 +- .../abi/bind/backends/simulated_test.go | 91 +- coreth/accounts/abi/bind/base.go | 4 +- coreth/accounts/abi/bind/bind_test.go | 2 +- coreth/accounts/abi/reflect.go | 35 +- coreth/accounts/abi/utils.go | 17 +- coreth/accounts/accounts.go | 6 +- coreth/accounts/hd.go | 2 +- .../accounts/keystore/account_cache_test.go | 8 +- coreth/accounts/keystore/keystore_test.go | 4 +- coreth/accounts/scwallet/wallet.go | 3 + coreth/accounts/url.go | 7 +- coreth/consensus/dummy/consensus.go | 50 +- coreth/consensus/misc/dao.go | 95 - coreth/core/bench_test.go | 16 +- coreth/core/blockchain.go | 493 ++- coreth/core/blockchain_reader.go | 9 + coreth/core/blockchain_repair_test.go | 17 +- coreth/core/blockchain_snapshot_test.go | 45 +- coreth/core/blockchain_test.go | 335 +- coreth/core/bounded_buffer.go | 56 +- coreth/core/chain_makers.go | 61 +- coreth/core/chain_makers_test.go | 5 +- coreth/core/dao_test.go | 197 -- coreth/core/error.go | 2 +- coreth/core/fifo_cache.go | 70 + coreth/core/genesis.go | 40 +- coreth/core/genesis_test.go | 14 +- coreth/core/headerchain.go | 28 +- coreth/core/headerchain_test.go | 11 +- coreth/core/mkalloc.go | 8 +- coreth/core/rawdb/accessors_chain.go | 20 + coreth/core/rawdb/accessors_state_sync.go | 24 + coreth/core/rawdb/chain_iterator.go | 311 ++ coreth/core/rawdb/chain_iterator_test.go | 218 ++ coreth/core/rawdb/database.go | 12 +- coreth/core/rawdb/schema.go | 8 + coreth/core/rlp_test.go | 10 +- coreth/core/state/database.go | 12 +- coreth/core/state/pruner/pruner.go | 6 +- coreth/core/state/snapshot/difflayer.go | 2 +- coreth/core/state/snapshot/difflayer_test.go | 4 +- coreth/core/state/snapshot/disklayer.go | 4 +- coreth/core/state/snapshot/generate.go | 13 +- coreth/core/state/snapshot/generate_test.go | 26 +- coreth/core/state/snapshot/journal.go | 3 +- coreth/core/state/snapshot/snapshot.go | 19 +- coreth/core/state/snapshot/snapshot_test.go | 18 +- coreth/core/state/statedb.go | 6 +- coreth/core/state/trie_prefetcher.go | 24 +- coreth/core/state_manager.go | 6 +- coreth/core/state_processor.go | 5 - coreth/core/state_processor_test.go | 286 +- coreth/core/state_transition.go | 89 +- coreth/core/state_transition_ext_test.go | 224 ++ coreth/core/state_transition_test.go | 248 ++ coreth/core/test_blockchain.go | 230 +- coreth/core/tx_noncer.go | 2 +- coreth/core/tx_pool.go | 12 +- coreth/core/types/block.go | 3 - coreth/core/types/gen_header_json.go | 6 - coreth/core/types/gen_header_rlp.go | 11 +- coreth/core/types/log.go | 9 + coreth/core/types/transaction_signing_test.go | 3 + coreth/core/vm/contracts.go | 82 +- coreth/core/vm/eips.go | 8 + coreth/core/vm/evm.go | 51 +- coreth/core/vm/gas_table.go | 96 +- coreth/core/vm/gas_table_test.go | 72 + coreth/core/vm/instructions.go | 61 +- coreth/core/vm/interpreter.go | 11 +- coreth/core/vm/jump_table.go | 19 + coreth/core/vm/operations_acl.go | 2 +- coreth/core/vm/runtime/runtime_test.go | 243 +- .../testdata/precompiles/bn256ScalarMul.json | 7 + coreth/eth/api.go | 4 +- coreth/eth/api_backend.go | 41 +- coreth/eth/api_backend_test.go | 73 + coreth/eth/backend.go | 68 +- coreth/eth/ethconfig/config.go | 77 +- coreth/eth/filters/bench_test.go | 201 ++ coreth/eth/filters/filter.go | 26 +- coreth/eth/filters/filter_system.go | 35 +- coreth/eth/filters/filter_system_test.go | 783 +++++ coreth/eth/filters/filter_test.go | 255 ++ coreth/eth/gasprice/fee_info_provider.go | 145 + coreth/eth/gasprice/fee_info_provider_test.go | 73 + coreth/eth/gasprice/feehistory.go | 9 +- coreth/eth/gasprice/feehistory_test.go | 4 +- coreth/eth/gasprice/gasprice.go | 155 +- coreth/eth/gasprice/gasprice_test.go | 278 +- coreth/eth/state_accessor.go | 107 +- coreth/eth/tracers/api.go | 462 ++- coreth/eth/tracers/api_test.go | 170 +- .../internal/tracetest/calltrace_test.go | 9 +- .../testdata/call_tracer_legacy/create.json | 58 + .../call_tracer_legacy/deep_calls.json | 415 +++ .../call_tracer_legacy/delegatecall.json | 97 + .../inner_create_oog_outer_throw.json | 77 + .../call_tracer_legacy/inner_instafail.json | 72 + .../inner_throw_outer_revert.json | 81 + .../testdata/call_tracer_legacy/oog.json | 60 + .../testdata/call_tracer_legacy/revert.json | 58 + .../call_tracer_legacy/revert_reason.json | 64 + .../call_tracer_legacy/selfdestruct.json | 73 + .../testdata/call_tracer_legacy/simple.json | 78 + .../testdata/call_tracer_legacy/throw.json | 62 + coreth/eth/tracers/js/bigint.go | 30 + coreth/eth/tracers/js/goja.go | 957 ++++++ .../internal/tracers/4byte_tracer_legacy.js | 86 + .../js/internal/tracers/bigram_tracer.js | 47 + .../js/internal/tracers/call_tracer_legacy.js | 252 ++ .../js/internal/tracers/evmdis_tracer.js | 93 + .../js/internal/tracers/noop_tracer_legacy.js | 29 + .../js/internal/tracers/opcount_tracer.js | 32 + .../tracers/prestate_tracer_legacy.js | 115 + .../tracers/js/internal/tracers/tracers.go | 59 + .../js/internal/tracers/trigram_tracer.js | 49 + .../js/internal/tracers/unigram_tracer.js | 41 + coreth/eth/tracers/js/tracer_test.go | 315 ++ coreth/eth/tracers/native/4byte.go | 17 +- coreth/eth/tracers/native/tracer.go | 32 +- coreth/ethclient/ethclient.go | 4 +- coreth/ethdb/leveldb/leveldb.go | 15 +- coreth/go.mod | 79 +- coreth/go.sum | 199 +- coreth/internal/ethapi/api.go | 69 +- coreth/internal/ethapi/backend.go | 8 +- coreth/internal/flags/helpers.go | 4 +- coreth/metrics/runtime_test.go | 13 +- coreth/miner/worker.go | 47 +- coreth/node/node.go | 2 - coreth/params/avalanche_params.go | 1 + coreth/params/config.go | 97 +- coreth/params/dao.go | 168 - coreth/params/denomination.go | 3 +- coreth/params/protocol_params.go | 4 +- coreth/params/version.go | 8 +- coreth/peer/client.go | 38 +- coreth/peer/network.go | 295 +- coreth/peer/network_test.go | 296 +- coreth/peer/peer_tracker.go | 9 +- coreth/peer/stats/stats.go | 10 +- coreth/peer/waiting_handler.go | 5 +- coreth/plugin/evm/atomic_syncer.go | 17 +- coreth/plugin/evm/atomic_trie.go | 2 +- .../plugin/evm/atomic_tx_repository_test.go | 3 +- coreth/plugin/evm/block.go | 42 +- coreth/plugin/evm/block_builder.go | 109 +- coreth/plugin/evm/block_builder_test.go | 8 +- coreth/plugin/evm/block_verification.go | 65 +- coreth/plugin/evm/client.go | 44 +- coreth/plugin/evm/config.go | 152 +- coreth/plugin/evm/config_test.go | 66 +- coreth/plugin/evm/export_tx.go | 25 +- coreth/plugin/evm/export_tx_test.go | 123 +- coreth/plugin/evm/factory.go | 4 +- coreth/plugin/evm/formatting.go | 12 +- coreth/plugin/evm/gossiper.go | 15 +- .../evm/gossiper_atomic_gossiping_test.go | 33 +- .../plugin/evm/gossiper_eth_gossiping_test.go | 35 +- coreth/plugin/evm/health.go | 4 +- coreth/plugin/evm/import_tx.go | 19 +- coreth/plugin/evm/import_tx_test.go | 50 +- coreth/plugin/evm/mempool.go | 6 +- .../evm/mempool_atomic_gossiping_test.go | 20 +- coreth/plugin/evm/message/codec.go | 21 +- .../plugin/evm/message/cross_chain_handler.go | 71 + coreth/plugin/evm/message/eth_call_request.go | 33 + coreth/plugin/evm/message/handler.go | 20 +- coreth/plugin/evm/message/request.go | 10 + coreth/plugin/evm/message/syncable.go | 9 +- coreth/plugin/evm/service.go | 11 +- coreth/plugin/evm/syncervm_client.go | 49 +- coreth/plugin/evm/syncervm_server.go | 9 +- coreth/plugin/evm/syncervm_test.go | 133 +- coreth/plugin/evm/test_tx.go | 5 +- coreth/plugin/evm/tx.go | 13 +- coreth/plugin/evm/tx_test.go | 7 +- coreth/plugin/evm/user.go | 22 +- coreth/plugin/evm/version.go | 2 +- coreth/plugin/evm/vm.go | 220 +- coreth/plugin/evm/vm_extra_state_root_test.go | 267 -- coreth/plugin/evm/vm_test.go | 963 +++--- coreth/plugin/main.go | 3 +- coreth/rpc/client.go | 45 +- coreth/rpc/client_opt.go | 116 + coreth/rpc/client_opt_test.go | 34 + coreth/rpc/client_test.go | 8 +- coreth/rpc/doc.go | 57 +- coreth/rpc/errors.go | 18 +- coreth/rpc/handler.go | 199 +- coreth/rpc/http.go | 90 +- coreth/rpc/json.go | 15 +- coreth/rpc/server_test.go | 2 +- coreth/rpc/service.go | 3 +- coreth/rpc/subscription_test.go | 2 +- coreth/rpc/testdata/internal-error.js | 7 + coreth/rpc/testdata/invalid-badversion.js | 19 + coreth/rpc/testservice_test.go | 14 + coreth/rpc/websocket.go | 78 +- coreth/rpc/websocket_test.go | 5 +- coreth/scripts/build.sh | 2 +- coreth/scripts/build_test.sh | 9 + coreth/scripts/constants.sh | 6 + coreth/scripts/versions.sh | 4 +- coreth/signer/core/apitypes/types.go | 2 +- coreth/sync/client/client.go | 4 +- coreth/sync/client/leaf_syncer.go | 23 +- coreth/sync/client/mock_network.go | 8 +- coreth/sync/handlers/block_request.go | 2 - coreth/sync/handlers/handler.go | 4 +- coreth/sync/statesync/code_syncer.go | 5 +- coreth/sync/statesync/sync_test.go | 157 +- coreth/sync/statesync/test_sync.go | 14 +- coreth/sync/statesync/trie_segments.go | 19 +- coreth/tests/init.go | 19 + coreth/tests/init_test.go | 287 -- coreth/tests/state_test_util.go | 92 - coreth/trie/committer.go | 18 +- coreth/trie/database.go | 91 +- coreth/trie/hasher.go | 4 +- coreth/trie/proof.go | 22 +- coreth/trie/secure_trie.go | 105 +- coreth/trie/stacktrie.go | 13 +- coreth/trie/test_trie.go | 2 +- coreth/trie/trie.go | 12 +- coreth/trie/util_test.go | 4 +- coreth/utils/metered_cache.go | 140 + coreth/vmerrs/vmerrs.go | 32 +- 1758 files changed, 106204 insertions(+), 54056 deletions(-) create mode 100644 avalanchego/SECURITY.md delete mode 100644 avalanchego/api/health/mocks/Client.go create mode 100644 avalanchego/api/info/client_test.go delete mode 100644 avalanchego/api/info/mocks/Client.go create mode 100644 avalanchego/api/server/metrics.go create mode 100644 avalanchego/api/traced_handler.go delete mode 100644 avalanchego/app/plugin/plugin.go delete mode 100644 avalanchego/app/plugin/plugin_client.go delete mode 100644 avalanchego/app/plugin/plugin_server.go delete mode 100644 avalanchego/app/plugin/process.go delete mode 100644 avalanchego/app/process/process.go delete mode 100644 avalanchego/app/runner/config.go delete mode 100644 avalanchego/app/runner/runner.go delete mode 100644 avalanchego/buf.lock create mode 100644 avalanchego/chains/linearizable_vm.go delete mode 100644 avalanchego/chains/mock_manager.go delete mode 100644 avalanchego/chains/subnet.go delete mode 100644 avalanchego/chains/subnet_test.go create mode 100644 avalanchego/chains/test_manager.go create mode 100644 avalanchego/codec/mock_manager.go delete mode 100644 avalanchego/config/pflags.go delete mode 100644 avalanchego/database/manager/mocks/manager.go delete mode 100644 avalanchego/database/mockdb/db.go delete mode 100644 avalanchego/database/mockdb/db_test.go delete mode 100644 avalanchego/database/nodb/db.go create mode 100644 avalanchego/genesis/config_test.go delete mode 100644 avalanchego/ids/bag.go delete mode 100644 avalanchego/ids/bag_test.go delete mode 100644 avalanchego/ids/bit_set.go delete mode 100644 avalanchego/ids/node_id_bag.go delete mode 100644 avalanchego/ids/node_id_set.go create mode 100644 avalanchego/ids/request_id.go delete mode 100644 avalanchego/ids/set.go delete mode 100644 avalanchego/ids/set_benchmark_test.go delete mode 100644 avalanchego/ids/set_test.go delete mode 100644 avalanchego/ids/short_set.go delete mode 100644 avalanchego/ids/short_set_test.go delete mode 100644 avalanchego/ids/short_test.go delete mode 100644 avalanchego/ids/slice.go delete mode 100644 avalanchego/ids/unique_bag.go delete mode 100644 avalanchego/ids/unique_bag_test.go create mode 100644 avalanchego/indexer/examples/p-chain/main.go create mode 100644 avalanchego/indexer/examples/x-chain-blocks/main.go delete mode 100644 avalanchego/message/builder_test.go delete mode 100644 avalanchego/message/codec.go delete mode 100644 avalanchego/message/codec_test.go delete mode 100644 avalanchego/message/inbound_msg_builder_proto.go delete mode 100644 avalanchego/message/inbound_msg_builder_proto_test.go create mode 100644 avalanchego/message/inbound_msg_builder_test.go create mode 100644 avalanchego/message/mock_message.go create mode 100644 avalanchego/message/mock_outbound_message_builder.go delete mode 100644 avalanchego/message/outbound_msg_builder_proto.go delete mode 100644 avalanchego/message/outbound_msg_builder_proto_test.go create mode 100644 avalanchego/message/outbound_msg_builder_test.go delete mode 100644 avalanchego/message/test_message.go create mode 100644 avalanchego/network/README.md create mode 100644 avalanchego/network/example_test.go create mode 100644 avalanchego/network/peer/gossip_tracker.go create mode 100644 avalanchego/network/peer/gossip_tracker_callback.go create mode 100644 avalanchego/network/peer/gossip_tracker_metrics.go create mode 100644 avalanchego/network/peer/gossip_tracker_test.go rename avalanchego/network/{ => peer}/ip_signer.go (65%) rename avalanchego/network/{ => peer}/ip_signer_test.go (50%) create mode 100644 avalanchego/network/peer/mock_gossip_tracker.go create mode 100644 avalanchego/network/peer/validator_id.go create mode 100644 avalanchego/network/test_network.go create mode 100644 avalanchego/node/beacon_manager.go create mode 100644 avalanchego/node/beacon_manager_test.go create mode 100644 avalanchego/node/insecure_validator_manager.go delete mode 100644 avalanchego/proto/pb/plugin/plugin.pb.go delete mode 100644 avalanchego/proto/pb/plugin/plugin_grpc.pb.go delete mode 100644 avalanchego/proto/pb/subnetlookup/subnetlookup.pb.go delete mode 100644 avalanchego/proto/pb/subnetlookup/subnetlookup_grpc.pb.go create mode 100644 avalanchego/proto/pb/validatorstate/validator_state.pb.go create mode 100644 avalanchego/proto/pb/validatorstate/validator_state_grpc.pb.go create mode 100644 avalanchego/proto/pb/vm/runtime/runtime.pb.go create mode 100644 avalanchego/proto/pb/vm/runtime/runtime_grpc.pb.go create mode 100644 avalanchego/proto/pb/warp/message.pb.go create mode 100644 avalanchego/proto/pb/warp/message_grpc.pb.go delete mode 100644 avalanchego/proto/plugin/plugin.proto delete mode 100644 avalanchego/proto/subnetlookup/subnetlookup.proto create mode 100644 avalanchego/proto/validatorstate/validator_state.proto create mode 100644 avalanchego/proto/vm/runtime/runtime.proto create mode 100644 avalanchego/proto/warp/message.proto create mode 100644 avalanchego/scripts/build_fuzz.sh create mode 100644 avalanchego/scripts/mock.gen.sh create mode 100644 avalanchego/scripts/mocks.mockgen.txt create mode 100644 avalanchego/snow/consensus/avalanche/traced_consensus.go create mode 100644 avalanchego/snow/consensus/metrics/timestamp.go create mode 100644 avalanchego/snow/consensus/snowman/traced_consensus.go create mode 100644 avalanchego/snow/consensus/snowstorm/stringer_test.go delete mode 100644 avalanchego/snow/engine/avalanche/mocks/engine.go rename avalanchego/snow/engine/avalanche/{test_avalanche_engine.go => test_engine.go} (67%) create mode 100644 avalanchego/snow/engine/avalanche/traced_engine.go create mode 100644 avalanchego/snow/engine/avalanche/vertex/mock_vm.go delete mode 100644 avalanchego/snow/engine/avalanche/vertex/mocks/dag_vm.go delete mode 100644 avalanchego/snow/engine/avalanche/vertex/sorting.go create mode 100644 avalanchego/snow/engine/common/bootstrap_tracker.go delete mode 100644 avalanchego/snow/engine/common/subnet.go delete mode 100644 avalanchego/snow/engine/common/subnet_tracker.go rename avalanchego/snow/engine/common/{test_subnet.go => test_bootstrap_tracker.go} (58%) create mode 100644 avalanchego/snow/engine/common/traced_bootstrapable_engine.go create mode 100644 avalanchego/snow/engine/common/traced_engine.go create mode 100644 avalanchego/snow/engine/common/traced_state_syncer.go create mode 100644 avalanchego/snow/engine/common/tracker/accepted.go create mode 100644 avalanchego/snow/engine/common/tracker/accepted_test.go create mode 100644 avalanchego/snow/engine/snowman/block/block_context_vm.go create mode 100644 avalanchego/snow/engine/snowman/block/mocks/build_block_with_context_vm.go create mode 100644 avalanchego/snow/engine/snowman/block/mocks/with_verify_context.go create mode 100644 avalanchego/snow/engine/snowman/block/state_sync_mode.go delete mode 100644 avalanchego/snow/engine/snowman/mocks/engine.go rename avalanchego/snow/engine/snowman/{test_snowman_engine.go => test_engine.go} (68%) create mode 100644 avalanchego/snow/engine/snowman/traced_engine.go create mode 100644 avalanchego/snow/networking/handler/engine.go create mode 100644 avalanchego/snow/networking/handler/engine_test.go create mode 100644 avalanchego/snow/networking/handler/mock_handler.go create mode 100644 avalanchego/snow/networking/router/mock_router.go create mode 100644 avalanchego/snow/networking/router/traced_router.go create mode 100644 avalanchego/snow/networking/sender/mock_external_sender.go create mode 100644 avalanchego/snow/networking/sender/traced_sender.go create mode 100644 avalanchego/snow/networking/timeout/mock_manager.go delete mode 100644 avalanchego/snow/networking/worker/mock_pool.go create mode 100644 avalanchego/snow/uptime/mock_calculator.go delete mode 100644 avalanchego/snow/uptime/mocks/calculator.go create mode 100644 avalanchego/snow/uptime/no_op_calculator.go delete mode 100644 avalanchego/snow/validators/custom.go delete mode 100644 avalanchego/snow/validators/custom_test.go create mode 100644 avalanchego/snow/validators/gvalidators/validator_state_client.go create mode 100644 avalanchego/snow/validators/gvalidators/validator_state_server.go create mode 100644 avalanchego/snow/validators/gvalidators/validator_state_test.go create mode 100644 avalanchego/snow/validators/logger.go create mode 100644 avalanchego/snow/validators/manager_test.go create mode 100644 avalanchego/snow/validators/mock_manager.go create mode 100644 avalanchego/snow/validators/mock_set.go create mode 100644 avalanchego/snow/validators/mock_state.go create mode 100644 avalanchego/snow/validators/mock_subnet_connector.go create mode 100644 avalanchego/snow/validators/subnet_connector.go create mode 100644 avalanchego/snow/validators/traced_state.go create mode 100644 avalanchego/snow/validators/unhandled_subnet_connector.go create mode 100644 avalanchego/subnets/config.go create mode 100644 avalanchego/subnets/config_test.go create mode 100644 avalanchego/subnets/no_op_allower.go create mode 100644 avalanchego/subnets/subnet.go create mode 100644 avalanchego/subnets/subnet_test.go create mode 100644 avalanchego/trace/exporter.go create mode 100644 avalanchego/trace/exporter_type.go create mode 100644 avalanchego/trace/noop.go create mode 100644 avalanchego/trace/tracer.go create mode 100644 avalanchego/utils/atomic.go delete mode 100644 avalanchego/utils/atomic_bool.go delete mode 100644 avalanchego/utils/atomic_interface.go delete mode 100644 avalanchego/utils/atomic_interface_test.go create mode 100644 avalanchego/utils/atomic_test.go create mode 100644 avalanchego/utils/bag/bag.go rename avalanchego/{ids => utils/bag}/bag_benchmark_test.go (57%) create mode 100644 avalanchego/utils/bag/bag_test.go create mode 100644 avalanchego/utils/bag/unique_bag.go create mode 100644 avalanchego/utils/bag/unique_bag_test.go create mode 100644 avalanchego/utils/buffer/bounded_nonblocking_queue.go create mode 100644 avalanchego/utils/buffer/bounded_nonblocking_queue_test.go create mode 100644 avalanchego/utils/buffer/unbounded_blocking_deque.go create mode 100644 avalanchego/utils/buffer/unbounded_blocking_deque_test.go create mode 100644 avalanchego/utils/buffer/unbounded_deque.go create mode 100644 avalanchego/utils/buffer/unbounded_deque_test.go delete mode 100644 avalanchego/utils/buffer/unbounded_queue.go delete mode 100644 avalanchego/utils/buffer/unbounded_queue_test.go delete mode 100644 avalanchego/utils/bytes_test.go create mode 100644 avalanchego/utils/compare/compare.go create mode 100644 avalanchego/utils/compare/compare_test.go create mode 100644 avalanchego/utils/compression/compressor_test.go delete mode 100644 avalanchego/utils/compression/gzip_compressor_test.go create mode 100644 avalanchego/utils/compression/gzip_zip_bomb.bin create mode 100644 avalanchego/utils/compression/type.go create mode 100644 avalanchego/utils/compression/type_test.go create mode 100644 avalanchego/utils/compression/zstd_compressor.go create mode 100644 avalanchego/utils/compression/zstd_zip_bomb.bin create mode 100644 avalanchego/utils/context.go create mode 100644 avalanchego/utils/crypto/bls/bls_benchmark_test.go delete mode 100644 avalanchego/utils/crypto/crypto.go delete mode 100644 avalanchego/utils/crypto/crypto_benchmark_test.go delete mode 100644 avalanchego/utils/crypto/ed25519.go delete mode 100644 avalanchego/utils/crypto/errors.go create mode 100644 avalanchego/utils/crypto/keychain/keychain.go create mode 100644 avalanchego/utils/crypto/keychain/keychain_test.go create mode 100644 avalanchego/utils/crypto/keychain/ledger.go create mode 100644 avalanchego/utils/crypto/keychain/mock_ledger.go create mode 100644 avalanchego/utils/crypto/ledger/ledger.go create mode 100644 avalanchego/utils/crypto/ledger/ledger_test.go delete mode 100644 avalanchego/utils/crypto/rsa.go delete mode 100644 avalanchego/utils/crypto/rsapss.go create mode 100644 avalanchego/utils/crypto/secp256k1/rfc6979_test.go rename avalanchego/utils/crypto/{secp256k1r.go => secp256k1/secp256k1.go} (56%) create mode 100644 avalanchego/utils/crypto/secp256k1/secp256k1_benchmark_test.go rename avalanchego/utils/crypto/{secp256k1r_test.go => secp256k1/secp256k1_test.go} (75%) rename avalanchego/utils/crypto/{ => secp256k1}/test_keys.go (66%) delete mode 100644 avalanchego/utils/formatting/address/tools.go create mode 100644 avalanchego/utils/ips/lookup.go create mode 100644 avalanchego/utils/ips/lookup_test.go create mode 100644 avalanchego/utils/sampler/weighted_array_test.go create mode 100644 avalanchego/utils/sampler/weighted_linear_test.go create mode 100644 avalanchego/utils/set/bits.go create mode 100644 avalanchego/utils/set/bits_64.go rename avalanchego/{ids/bit_set_test.go => utils/set/bits_64_test.go} (91%) create mode 100644 avalanchego/utils/set/bits_test.go create mode 100644 avalanchego/utils/set/set.go create mode 100644 avalanchego/utils/set/set_benchmark_test.go create mode 100644 avalanchego/utils/set/set_test.go delete mode 100644 avalanchego/utils/subprocess/linux_new.go delete mode 100644 avalanchego/utils/subprocess/non_linux_new.go delete mode 100644 avalanchego/utils/timer/timeout_manager.go delete mode 100644 avalanchego/utils/timer/timeout_manager_test.go delete mode 100644 avalanchego/utils/ulimit/ulimit_test.go create mode 100644 avalanchego/version/compatibility.json create mode 100644 avalanchego/version/constants_test.go create mode 100644 avalanchego/vms/avm/blocks/block.go create mode 100644 avalanchego/vms/avm/blocks/block_test.go create mode 100644 avalanchego/vms/avm/blocks/builder/builder.go create mode 100644 avalanchego/vms/avm/blocks/builder/builder_test.go create mode 100644 avalanchego/vms/avm/blocks/executor/block.go create mode 100644 avalanchego/vms/avm/blocks/executor/block_test.go create mode 100644 avalanchego/vms/avm/blocks/executor/manager.go create mode 100644 avalanchego/vms/avm/blocks/executor/manager_test.go create mode 100644 avalanchego/vms/avm/blocks/executor/mock_manager.go create mode 100644 avalanchego/vms/avm/blocks/mock_block.go create mode 100644 avalanchego/vms/avm/blocks/parser.go create mode 100644 avalanchego/vms/avm/blocks/standard_block.go create mode 100644 avalanchego/vms/avm/chain_state.go create mode 100644 avalanchego/vms/avm/config/config.go create mode 100644 avalanchego/vms/avm/dag_state.go create mode 100644 avalanchego/vms/avm/genesis_test.go delete mode 100644 avalanchego/vms/avm/metrics.go create mode 100644 avalanchego/vms/avm/metrics/metrics.go create mode 100644 avalanchego/vms/avm/metrics/mock_metrics.go create mode 100644 avalanchego/vms/avm/metrics/tx_metrics.go create mode 100644 avalanchego/vms/avm/network/atomic.go create mode 100644 avalanchego/vms/avm/network/network.go create mode 100644 avalanchego/vms/avm/network/network_test.go create mode 100644 avalanchego/vms/avm/states/diff.go create mode 100644 avalanchego/vms/avm/states/mock_states.go create mode 100644 avalanchego/vms/avm/states/state_test.go delete mode 100644 avalanchego/vms/avm/states/tx_state.go delete mode 100644 avalanchego/vms/avm/states/tx_state_test.go create mode 100644 avalanchego/vms/avm/states/versions.go delete mode 100644 avalanchego/vms/avm/tx_execute.go delete mode 100644 avalanchego/vms/avm/tx_semantic_verify.go delete mode 100644 avalanchego/vms/avm/tx_semantic_verify_test.go create mode 100644 avalanchego/vms/avm/txs/executor/backend.go create mode 100644 avalanchego/vms/avm/txs/executor/executor.go create mode 100644 avalanchego/vms/avm/txs/executor/semantic_verifier.go create mode 100644 avalanchego/vms/avm/txs/executor/semantic_verifier_test.go create mode 100644 avalanchego/vms/avm/txs/executor/syntactic_verifier.go create mode 100644 avalanchego/vms/avm/txs/executor/syntactic_verifier_test.go create mode 100644 avalanchego/vms/avm/txs/mempool/mempool.go create mode 100644 avalanchego/vms/avm/txs/mempool/mempool_test.go create mode 100644 avalanchego/vms/avm/txs/mempool/mock_mempool.go create mode 100644 avalanchego/vms/avm/txs/mock_unsigned_tx.go delete mode 100644 avalanchego/vms/avm/txs/tx_test.go create mode 100644 avalanchego/vms/avm/utxo/spender.go create mode 100644 avalanchego/vms/avm/vm_regression_test.go create mode 100644 avalanchego/vms/components/avax/mock_transferable_in.go delete mode 100644 avalanchego/vms/components/avax/singleton_state.go delete mode 100644 avalanchego/vms/components/avax/singleton_state_test.go delete mode 100644 avalanchego/vms/components/avax/status_state.go delete mode 100644 avalanchego/vms/components/avax/status_state_test.go create mode 100644 avalanchego/vms/components/avax/utxo_handler.go rename avalanchego/vms/{platformvm => components}/message/codec.go (77%) rename avalanchego/vms/{platformvm => components}/message/handler.go (89%) rename avalanchego/vms/{platformvm => components}/message/handler_test.go (85%) rename avalanchego/vms/{platformvm => components}/message/message.go (64%) create mode 100644 avalanchego/vms/components/message/message_test.go create mode 100644 avalanchego/vms/components/message/tx.go rename avalanchego/vms/{platformvm/message/message_test.go => components/message/tx_test.go} (73%) delete mode 100644 avalanchego/vms/components/state/builtin.go delete mode 100644 avalanchego/vms/components/state/state.go delete mode 100644 avalanchego/vms/components/state/state_test.go delete mode 100644 avalanchego/vms/components/state/types.go create mode 100644 avalanchego/vms/metervm/build_block_with_context_vm.go delete mode 100644 avalanchego/vms/platformvm/blocks/builder/apricot_builder.go delete mode 100644 avalanchego/vms/platformvm/blocks/builder/apricot_builder_test.go delete mode 100644 avalanchego/vms/platformvm/blocks/builder/banff_builder.go delete mode 100644 avalanchego/vms/platformvm/blocks/builder/banff_builder_test.go create mode 100644 avalanchego/vms/platformvm/blocks/executor/mock_manager.go create mode 100644 avalanchego/vms/platformvm/docs/chain_time_update.md create mode 100644 avalanchego/vms/platformvm/health_test.go create mode 100644 avalanchego/vms/platformvm/stakeable/stakeable_lock_test.go create mode 100644 avalanchego/vms/platformvm/state/staker_status.go delete mode 100644 avalanchego/vms/platformvm/state/utxos.go create mode 100644 avalanchego/vms/platformvm/state/validator_metadata.go create mode 100644 avalanchego/vms/platformvm/state/validator_metadata_test.go create mode 100644 avalanchego/vms/platformvm/state/validator_uptimes.go rename avalanchego/vms/platformvm/{validator => txs}/subnet_validator.go (81%) create mode 100644 avalanchego/vms/platformvm/txs/subnet_validator_test.go rename avalanchego/vms/platformvm/{validator => txs}/validator.go (75%) create mode 100644 avalanchego/vms/platformvm/txs/validator_test.go delete mode 100644 avalanchego/vms/platformvm/validator/validator_test.go create mode 100644 avalanchego/vms/platformvm/warp/codec.go create mode 100644 avalanchego/vms/platformvm/warp/constants.go create mode 100644 avalanchego/vms/platformvm/warp/gwarp/client.go create mode 100644 avalanchego/vms/platformvm/warp/gwarp/server.go create mode 100644 avalanchego/vms/platformvm/warp/gwarp/signer_test.go create mode 100644 avalanchego/vms/platformvm/warp/message.go create mode 100644 avalanchego/vms/platformvm/warp/message_test.go create mode 100644 avalanchego/vms/platformvm/warp/signature.go create mode 100644 avalanchego/vms/platformvm/warp/signature_test.go create mode 100644 avalanchego/vms/platformvm/warp/signer.go create mode 100644 avalanchego/vms/platformvm/warp/signer_test.go create mode 100644 avalanchego/vms/platformvm/warp/test_signer.go create mode 100644 avalanchego/vms/platformvm/warp/unsigned_message.go create mode 100644 avalanchego/vms/platformvm/warp/unsigned_message_test.go create mode 100644 avalanchego/vms/platformvm/warp/validator.go create mode 100644 avalanchego/vms/platformvm/warp/validator_test.go create mode 100644 avalanchego/vms/proposervm/block_test.go create mode 100644 avalanchego/vms/proposervm/mock_post_fork_block.go create mode 100644 avalanchego/vms/proposervm/proposer/mock_windower.go create mode 100644 avalanchego/vms/proposervm/proposer/validators_test.go delete mode 100644 avalanchego/vms/proposervm/state/block_height_index_test.go create mode 100644 avalanchego/vms/rpcchainvm/batched_vm_test.go create mode 100644 avalanchego/vms/rpcchainvm/grpcutils/client.go create mode 100644 avalanchego/vms/rpcchainvm/grpcutils/client_test.go delete mode 100644 avalanchego/vms/rpcchainvm/grpcutils/listener.go create mode 100644 avalanchego/vms/rpcchainvm/grpcutils/server.go create mode 100644 avalanchego/vms/rpcchainvm/gruntime/runtime_client.go create mode 100644 avalanchego/vms/rpcchainvm/gruntime/runtime_server.go delete mode 100644 avalanchego/vms/rpcchainvm/gsubnetlookup/subnet_lookup_client.go delete mode 100644 avalanchego/vms/rpcchainvm/gsubnetlookup/subnet_lookup_server.go delete mode 100644 avalanchego/vms/rpcchainvm/mock_factory.go delete mode 100644 avalanchego/vms/rpcchainvm/mock_plugin_directory.go delete mode 100644 avalanchego/vms/rpcchainvm/plugin_test.go create mode 100644 avalanchego/vms/rpcchainvm/runtime/README.md create mode 100644 avalanchego/vms/rpcchainvm/runtime/manager.go create mode 100644 avalanchego/vms/rpcchainvm/runtime/runtime.go create mode 100644 avalanchego/vms/rpcchainvm/runtime/subprocess/initializer.go create mode 100644 avalanchego/vms/rpcchainvm/runtime/subprocess/linux_stopper.go create mode 100644 avalanchego/vms/rpcchainvm/runtime/subprocess/non_linux_stopper.go create mode 100644 avalanchego/vms/rpcchainvm/runtime/subprocess/runtime.go create mode 100644 avalanchego/vms/rpcchainvm/runtime/subprocess/stopper.go create mode 100644 avalanchego/vms/rpcchainvm/with_context_vm_test.go create mode 100644 avalanchego/vms/tracedvm/batched_vm.go create mode 100644 avalanchego/vms/tracedvm/block.go create mode 100644 avalanchego/vms/tracedvm/block_vm.go create mode 100644 avalanchego/vms/tracedvm/build_block_with_context_vm.go create mode 100644 avalanchego/vms/tracedvm/height_indexed_vm.go create mode 100644 avalanchego/vms/tracedvm/state_syncable_vm.go create mode 100644 avalanchego/vms/tracedvm/tx.go create mode 100644 avalanchego/vms/tracedvm/vertex_vm.go create mode 100644 avalanchego/wallet/chain/x/backend_visitor.go create mode 100644 avalanchego/wallet/subnet/primary/examples/add-permissioned-subnet-validator/main.go create mode 100644 avalanchego/wallet/subnet/primary/examples/add-primary-validator/main.go create mode 100644 avalanchego/wallet/subnet/primary/examples/create-asset/main.go create mode 100644 avalanchego/wallet/subnet/primary/examples/create-chain/main.go create mode 100644 avalanchego/wallet/subnet/primary/examples/create-locked-stakeable/main.go create mode 100644 avalanchego/wallet/subnet/primary/examples/create-subnet/main.go create mode 100644 avalanchego/wallet/subnet/primary/examples/remove-subnet-validator/main.go create mode 100644 avalanchego/x/README.md create mode 100644 avalanchego/x/merkledb/README.md create mode 100644 avalanchego/x/merkledb/batch.go create mode 100644 avalanchego/x/merkledb/cache.go create mode 100644 avalanchego/x/merkledb/cache_test.go create mode 100644 avalanchego/x/merkledb/codec.go create mode 100644 avalanchego/x/merkledb/codec_test.go create mode 100644 avalanchego/x/merkledb/db.go create mode 100644 avalanchego/x/merkledb/db_test.go create mode 100644 avalanchego/x/merkledb/history.go create mode 100644 avalanchego/x/merkledb/history_test.go create mode 100644 avalanchego/x/merkledb/iterator.go create mode 100644 avalanchego/x/merkledb/maybe.go create mode 100644 avalanchego/x/merkledb/maybe_test.go create mode 100644 avalanchego/x/merkledb/metrics.go create mode 100644 avalanchego/x/merkledb/metrics_test.go create mode 100644 avalanchego/x/merkledb/node.go create mode 100644 avalanchego/x/merkledb/node_test.go create mode 100644 avalanchego/x/merkledb/path.go create mode 100644 avalanchego/x/merkledb/path_test.go create mode 100644 avalanchego/x/merkledb/proof.go create mode 100644 avalanchego/x/merkledb/proof_test.go create mode 100644 avalanchego/x/merkledb/trie.go create mode 100644 avalanchego/x/merkledb/trie_test.go create mode 100644 avalanchego/x/merkledb/trieview.go create mode 100644 avalanchego/x/sync/client.go create mode 100644 avalanchego/x/sync/client_test.go create mode 100644 avalanchego/x/sync/codec.go create mode 100644 avalanchego/x/sync/metrics.go create mode 100644 avalanchego/x/sync/mock_client.go create mode 100644 avalanchego/x/sync/network_client.go create mode 100644 avalanchego/x/sync/network_server.go create mode 100644 avalanchego/x/sync/peer_tracker.go create mode 100644 avalanchego/x/sync/request.go create mode 100644 avalanchego/x/sync/response_handler.go create mode 100644 avalanchego/x/sync/sync_test.go create mode 100644 avalanchego/x/sync/syncmanager.go create mode 100644 avalanchego/x/sync/syncworkheap.go create mode 100644 avalanchego/x/sync/syncworkheap_test.go delete mode 100644 coreth/consensus/misc/dao.go delete mode 100644 coreth/core/dao_test.go create mode 100644 coreth/core/fifo_cache.go create mode 100644 coreth/core/rawdb/chain_iterator.go create mode 100644 coreth/core/rawdb/chain_iterator_test.go create mode 100644 coreth/core/state_transition_ext_test.go create mode 100644 coreth/core/state_transition_test.go create mode 100644 coreth/eth/api_backend_test.go create mode 100644 coreth/eth/filters/bench_test.go create mode 100644 coreth/eth/filters/filter_system_test.go create mode 100644 coreth/eth/filters/filter_test.go create mode 100644 coreth/eth/gasprice/fee_info_provider.go create mode 100644 coreth/eth/gasprice/fee_info_provider_test.go create mode 100644 coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/create.json create mode 100644 coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/deep_calls.json create mode 100644 coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/delegatecall.json create mode 100644 coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_create_oog_outer_throw.json create mode 100644 coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_instafail.json create mode 100644 coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_throw_outer_revert.json create mode 100644 coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/oog.json create mode 100644 coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert.json create mode 100644 coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert_reason.json create mode 100644 coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/selfdestruct.json create mode 100644 coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/simple.json create mode 100644 coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/throw.json create mode 100644 coreth/eth/tracers/js/bigint.go create mode 100644 coreth/eth/tracers/js/goja.go create mode 100644 coreth/eth/tracers/js/internal/tracers/4byte_tracer_legacy.js create mode 100644 coreth/eth/tracers/js/internal/tracers/bigram_tracer.js create mode 100644 coreth/eth/tracers/js/internal/tracers/call_tracer_legacy.js create mode 100644 coreth/eth/tracers/js/internal/tracers/evmdis_tracer.js create mode 100644 coreth/eth/tracers/js/internal/tracers/noop_tracer_legacy.js create mode 100644 coreth/eth/tracers/js/internal/tracers/opcount_tracer.js create mode 100644 coreth/eth/tracers/js/internal/tracers/prestate_tracer_legacy.js create mode 100644 coreth/eth/tracers/js/internal/tracers/tracers.go create mode 100644 coreth/eth/tracers/js/internal/tracers/trigram_tracer.js create mode 100644 coreth/eth/tracers/js/internal/tracers/unigram_tracer.js create mode 100644 coreth/eth/tracers/js/tracer_test.go delete mode 100644 coreth/params/dao.go create mode 100644 coreth/plugin/evm/message/cross_chain_handler.go create mode 100644 coreth/plugin/evm/message/eth_call_request.go delete mode 100644 coreth/plugin/evm/vm_extra_state_root_test.go create mode 100644 coreth/rpc/client_opt.go create mode 100644 coreth/rpc/client_opt_test.go create mode 100644 coreth/rpc/testdata/internal-error.js create mode 100644 coreth/rpc/testdata/invalid-badversion.js delete mode 100644 coreth/tests/init_test.go create mode 100644 coreth/utils/metered_cache.go diff --git a/README.md b/README.md index 73491c3a..265e0d41 100644 --- a/README.md +++ b/README.md @@ -1,12 +1,6 @@ # go-flare -go-flare is a modified version of [avalanchego@v1.9.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0) and [coreth@v0.11.0](https://github.com/ava-labs/coreth/releases/tag/v0.11.0), incorporating specific features for Flare and Songbird networks. These features include prioritized contract handling and the invocation of the daemon contract. - -All nodes should upgrade to the version 1.9.1 **before the following dates**: -- Coston2 network: November 26, 2024 at 12:00:00 UTC -- Flare network: December 17, 2024 at 12:00:00 UTC -- Coston network: January 7, 2025 at 12:00:00 UTC -- Songbird network: January 28, 2025 at 12:00:00 UTC +go-flare is a modified version of [avalanchego@v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0) and [coreth@v0.12.0](https://github.com/ava-labs/coreth/releases/tag/v0.12.0), incorporating specific features for Flare and Songbird networks. These features include prioritized contract handling and the invocation of the daemon contract. ## System Requirements - go version 1.21.8 diff --git a/avalanchego/.gitignore b/avalanchego/.gitignore index eef0dc70..c5a3cad3 100644 --- a/avalanchego/.gitignore +++ b/avalanchego/.gitignore @@ -60,3 +60,5 @@ tests/e2e/e2e.test tests/upgrade/upgrade.test vendor + +**/testdata diff --git a/avalanchego/.golangci.yml b/avalanchego/.golangci.yml index ffdec4c5..78f38391 100644 --- a/avalanchego/.golangci.yml +++ b/avalanchego/.golangci.yml @@ -4,6 +4,7 @@ run: # skip auto-generated files. skip-files: - ".*\\.pb\\.go$" + - ".*mock.*" issues: # Maximum count of issues with the same text. Set to 0 to disable. Default is 3. @@ -15,6 +16,7 @@ linters: disable-all: true enable: - asciicheck + - bodyclose - depguard - errcheck - exportloopref @@ -23,35 +25,79 @@ linters: - gofmt - gofumpt - goimports - - revive + - goprintffuncname - gosec - gosimple - govet - ineffassign - misspell - nakedret + - noctx - nolintlint - prealloc + - revive + - staticcheck - stylecheck + - typecheck - unconvert - unparam - unused - - unconvert - whitespace - - staticcheck - # - bodyclose - # - structcheck - # - lll - # - gomnd - # - goprintffuncname - # - interfacer - # - typecheck + # - errorlint (TODO: re-enable in go1.20 migration) # - goerr113 - # - noctx + # - gomnd + # - lll linters-settings: + errorlint: + # Check for plain type assertions and type switches. + asserts: false + # Check for plain error comparisons. + comparison: false + revive: + rules: + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr + - name: bool-literal-in-expr + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return + - name: early-return + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines + - name: empty-lines + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format + - name: string-format + disabled: false + arguments: + - ["fmt.Errorf[0]", "/.*%.*/", "no format directive, use errors.New instead"] + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag + - name: struct-tag + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-naming + - name: unexported-naming + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error + - name: unhandled-error + disabled: false + arguments: + - "fmt\\.Fprint" + - "fmt\\.Fprintf" + - "fmt\\.Print" + - "fmt\\.Printf" + - "fmt\\.Println" + - "math/rand\\.Read" + - "strings\\.Builder\\.WriteString" + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter + - name: unused-parameter + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-receiver + - name: unused-receiver + disabled: false + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break + - name: useless-break + disabled: false staticcheck: - go: "1.18" + go: "1.19" # https://staticcheck.io/docs/options#checks checks: - "all" @@ -65,4 +111,5 @@ linters-settings: list-type: blacklist packages-with-error-message: - io/ioutil: 'io/ioutil is deprecated. Use package io or os instead.' + - github.com/stretchr/testify/assert: 'github.com/stretchr/testify/require should be used instead.' include-go-root: true diff --git a/avalanchego/Dockerfile b/avalanchego/Dockerfile index 8700a76f..20c06bf4 100644 --- a/avalanchego/Dockerfile +++ b/avalanchego/Dockerfile @@ -5,8 +5,8 @@ # README.md # go.mod # ============= Compilation Stage ================ -FROM golang:1.18.5-buster AS builder -RUN apt-get update && apt-get install -y --no-install-recommends bash=5.0-4 git=1:2.20.1-2+deb10u3 make=4.2.1-1.2 gcc=4:8.3.0-1 musl-dev=1.1.21-2 ca-certificates=20200601~deb10u2 linux-headers-amd64 +FROM golang:1.19.6-buster AS builder +RUN apt-get update && apt-get install -y --no-install-recommends bash=5.0-4 make=4.2.1-1.2 gcc=4:8.3.0-1 musl-dev=1.1.21-2 ca-certificates=20200601~deb10u2 linux-headers-amd64 WORKDIR /build # Copy and download avalanche dependencies using go mod @@ -17,7 +17,7 @@ RUN go mod download # Copy the code into the container COPY . . -# Build avalanchego and plugins +# Build avalanchego RUN ./scripts/build.sh # ============= Cleanup Stage ================ diff --git a/avalanchego/LICENSE b/avalanchego/LICENSE index 1d314a7b..c9be72c5 100644 --- a/avalanchego/LICENSE +++ b/avalanchego/LICENSE @@ -1,6 +1,6 @@ BSD 3-Clause License -Copyright (c) 2020, Ava Labs, Inc. +Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/avalanchego/LICENSE.header b/avalanchego/LICENSE.header index f72620fe..1be34461 100644 --- a/avalanchego/LICENSE.header +++ b/avalanchego/LICENSE.header @@ -1,2 +1,2 @@ -Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -See the file LICENSE for licensing terms. +Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +See the file LICENSE for licensing terms. \ No newline at end of file diff --git a/avalanchego/README.md b/avalanchego/README.md index 44a0ba37..38b18a2b 100644 --- a/avalanchego/README.md +++ b/avalanchego/README.md @@ -17,16 +17,18 @@ The minimum recommended hardware specification for nodes connected to Mainnet is - CPU: Equivalent of 8 AWS vCPU - RAM: 16 GiB - Storage: 1 TiB -- OS: Ubuntu 20.04 or macOS >= 12 +- OS: Ubuntu 20.04/22.04 or macOS >= 12 - Network: Reliable IPv4 or IPv6 network connection, with an open public port. If you plan to build AvalancheGo from source, you will also need the following software: -- [Go](https://golang.org/doc/install) version >= 1.18.1 +- [Go](https://golang.org/doc/install) version >= 1.19.6 - [gcc](https://gcc.gnu.org/) - g++ -### Native Install +### Building From Source + +#### Clone The Repository Clone the AvalancheGo repository: @@ -35,17 +37,17 @@ git clone git@github.com:ava-labs/avalanchego.git cd avalanchego ``` -This will clone and checkout to `master` branch. +This will clone and checkout the `master` branch. -#### Building the Avalanche Executable +#### Building AvalancheGo -Build Avalanche by running the build script: +Build AvalancheGo by running the build script: ```sh ./scripts/build.sh ``` -The output of the script will be the Avalanche binary named `avalanchego`. It is located in the build directory: +The `avalanchego` binary is now in the `build` directory. To run: ```sh ./build/avalanchego @@ -57,20 +59,20 @@ Install AvalancheGo using an `apt` repository. #### Adding the APT Repository -If you have already added the APT repository, you do not need to add it again. +If you already have the APT repository added, you do not need to add it again. -To add the repository on Ubuntu 20.04 (Focal), run: +To add the repository on Ubuntu, run: ```sh sudo su - -wget -O - https://downloads.avax.network/avalanchego.gpg.key | apt-key add - -echo "deb https://downloads.avax.network/apt focal main" > /etc/apt/sources.list.d/avalanche.list +wget -qO - https://downloads.avax.network/avalanchego.gpg.key | tee /etc/apt/trusted.gpg.d/avalanchego.asc +source /etc/os-release && echo "deb https://downloads.avax.network/apt $UBUNTU_CODENAME main" > /etc/apt/sources.list.d/avalanche.list exit ``` #### Installing the Latest Version -After adding the APT repository, install avalanchego by running: +After adding the APT repository, install `avalanchego` by running: ```sh sudo apt update @@ -85,9 +87,9 @@ The Avalanche binary to be executed is named `avalanchego`. ### Docker Install -Make sure docker is installed on the machine - so commands like `docker run` etc. are available. +Make sure Docker is installed on the machine - so commands like `docker run` etc. are available. -Building the docker image of latest avalanchego branch can be done by running: +Building the Docker image of latest `avalanchego` branch can be done by running: ```sh ./scripts/build_image.sh @@ -99,7 +101,7 @@ To check the built image, run: docker image ls ``` -The image should be tagged as `avaplatform/avalanchego:xxxxxxxx`, where `xxxxxxxx` is the shortened commit of the Avalanche source it was built from. To run the avalanche node, run: +The image should be tagged as `avaplatform/avalanchego:xxxxxxxx`, where `xxxxxxxx` is the shortened commit of the Avalanche source it was built from. To run the Avalanche node, run: ```sh docker run -ti -p 9650:9650 -p 9651:9651 avaplatform/avalanchego:xxxxxxxx /avalanchego/build/avalanchego @@ -133,7 +135,7 @@ See [this tutorial.](https://docs.avax.network/build/tutorials/platform/create-a ## Bootstrapping -A node needs to catch up to the latest network state before it can participate in consensus and serve API calls. This process, called bootstrapping, currently takes several days for a new node connected to Mainnet. +A node needs to catch up to the latest network state before it can participate in consensus and serve API calls. This process (called bootstrapping) currently takes several days for a new node connected to Mainnet. A node will not [report healthy](https://docs.avax.network/build/avalanchego-apis/health) until it is done bootstrapping. @@ -143,7 +145,7 @@ The bottleneck during bootstrapping is typically database IO. Using a more power ## Generating Code -Avalanchego uses multiple tools to generate efficient and boilerplate code. +AvalancheGo uses multiple tools to generate efficient and boilerplate code. ### Running protobuf codegen @@ -151,7 +153,7 @@ To regenerate the protobuf go code, run `scripts/protobuf_codegen.sh` from the r This should only be necessary when upgrading protobuf versions or modifying .proto definition files. -To use this script, you must have [buf](https://docs.buf.build/installation) (v1.7.0), protoc-gen-go (v1.28.0) and protoc-gen-go-grpc (v1.2.0) installed. +To use this script, you must have [buf](https://docs.buf.build/installation) (v1.11.0), protoc-gen-go (v1.28.0) and protoc-gen-go-grpc (v1.2.0) installed. To install the buf dependencies: @@ -186,17 +188,28 @@ docker run -t -i -v $(pwd):/opt/avalanche -w/opt/avalanche avalanche:protobuf_co ### Running mock codegen -Going forward, AvalancheGo will use [gomock](https://github.com/golang/mock) for mocking in unit tests. +To regenerate the [gomock](https://github.com/golang/mock) code, run `scripts/mock.gen.sh` from the root of the repo. -Example usage: +This should only be necessary when modifying exported interfaces or after modifying `scripts/mock.mockgen.txt`. -```sh -mockgen -destination vms/platformvm/state/mock_diff.go -package state github.com/ava-labs/avalanchego/vms/platformvm/state Diff -``` +## Versioning + +### Version Semantics + +AvalancheGo is first and foremost a client for the Avalanche network. The versioning of AvalancheGo follows that of the Avalanche network. + +- `v0.x.x` indicates a development network version. +- `v1.x.x` indicates a production network version. +- `vx.[Upgrade].x` indicates the number of network upgrades that have occurred. +- `vx.x.[Patch]` indicates the number of client upgrades that have occurred since the last network upgrade. + +### Library Compatibility Guarantees + +Because AvalancheGo's version denotes the network version, it is expected that interfaces exported by AvalancheGo's packages may change in `Patch` version updates. -This makes a mock implementation of the `Diff` interface from `github.com/ava-labs/avalanchego/vms/platformvm/state`and puts it at `vms/platformvm/state/mock_diff.go`. The struct implementing the mock will be in the `state` package. +### API Compatibility Guarantees -See `gomock` documentation for more information. +APIs exposed when running AvalancheGo will maintain backwards compatibility, unless the functionality is explicitly deprecated and announced when removed. ## Supported Platforms @@ -232,4 +245,4 @@ To officially support a new platform, one must satisfy the following requirement **We and our community welcome responsible disclosures.** -If you've discovered a security vulnerability, please report it via our [bug bounty program](https://hackenproof.com/avalanche/). Valid reports will be eligible for a reward (terms and conditions apply). +Please refer to our [Security Policy](SECURITY.md) and [Security Advisories](https://github.com/ava-labs/avalanchego/security/advisories). diff --git a/avalanchego/RELEASES.md b/avalanchego/RELEASES.md index a8ee5fc3..1a3d929e 100644 --- a/avalanchego/RELEASES.md +++ b/avalanchego/RELEASES.md @@ -1,5 +1,731 @@ # Release Notes +## [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0) + +[This upgrade](https://medium.com/avalancheavax/cortina-x-chain-linearization-a1d9305553f6) linearizes the X-chain, introduces delegation batching to the P-chain, and increases the maximum block size on the C-chain. + +The changes in the upgrade go into effect at 11 AM ET, April 25th 2023 on Mainnet. + +**All Mainnet nodes should upgrade before 11 AM ET, April 25th 2023.** + +The supported plugin version is `25`. + +### What's Changed + +- Add CODEOWNERS for the x/ package by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1260 +- Feature Spec Template by @richardpringle in https://github.com/ava-labs/avalanchego/pull/1258 +- Standardize CI triggers by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1265 +- special case no sent/received message in network health check by @ceyonur in https://github.com/ava-labs/avalanchego/pull/1263 +- Fix bug template by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1268 +- Replace `flags` usage with `pflags` by @danlaine in https://github.com/ava-labs/avalanchego/pull/1270 +- Fixed grammatical errors in `README.md` by @krakxn in https://github.com/ava-labs/avalanchego/pull/1102 +- Add tests for race conditions in merkledb by @kyl27 in https://github.com/ava-labs/avalanchego/pull/1256 +- Add P-chain indexer API example by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1271 +- use `require` in `snow/choices` tests by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1279 +- use `require` in `utils/wrappers` tests by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1280 +- add support for tracking delegatee rewards to validator metadata by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1273 +- defer delegatee rewards until end of validator staking period by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1262 +- Initialize UptimeCalculator in TestPeer by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/1283 +- Add Avalanche liveness health checks by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1287 +- Skip AMI generation with Fuji tags by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1288 +- Use `maps.Equal` in `set.Equals` by @danlaine in https://github.com/ava-labs/avalanchego/pull/1290 +- return accrued delegator rewards in `GetCurrentValidators` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1291 +- Add zstd compression by @danlaine in https://github.com/ava-labs/avalanchego/pull/1278 +- implement `txs.Visitor` in X chain wallet by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/1299 +- Parallelize gzip compression by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1293 +- Add zip bomb tests by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1300 +- Gossip Avalanche frontier after the linearization by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1303 +- Add fine grained metrics+logging for handling, processing, and grab l… by @aaronbuchwald in https://github.com/ava-labs/avalanchego/pull/1301 +- Persist stateless block in AVM state by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1305 +- Initialize FxID fields in GetBlock and GetBlockByHeight by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1306 +- Filterable Health Tags by @ceyonur in https://github.com/ava-labs/avalanchego/pull/1304 +- increase health await timeout by @ceyonur in https://github.com/ava-labs/avalanchego/pull/1317 +- Expose GetEngineManager from the chain Handler by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1316 +- Add BLS benchmarks by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1318 +- Encode codec version in merkledb by @danlaine in https://github.com/ava-labs/avalanchego/pull/1313 +- Expose consensus-app-concurrency by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1322 +- Adjust Logic In Merkle DB History by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1310 +- Fix Concurrency Bug In CommitToParent by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/1320 +- Cleanup goroutines on health.Stop by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/1325 + +### New Contributors + +- @richardpringle made their first contribution in https://github.com/ava-labs/avalanchego/pull/1258 +- @ceyonur made their first contribution in https://github.com/ava-labs/avalanchego/pull/1263 +- @krakxn made their first contribution in https://github.com/ava-labs/avalanchego/pull/1102 +- @kyl27 made their first contribution in https://github.com/ava-labs/avalanchego/pull/1256 +- @dhrubabasu made their first contribution in https://github.com/ava-labs/avalanchego/pull/1279 +- @joshua-kim made their first contribution in https://github.com/ava-labs/avalanchego/pull/1283 +- @dboehm-avalabs made their first contribution in https://github.com/ava-labs/avalanchego/pull/1310 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.9.16...v1.10.0 + +## [v1.9.16](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.16) + +This version is backwards compatible to [v1.9.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0). It is optional, but encouraged. The supported plugin version is `24`. + +- Removed unnecessary repoll after rejecting vertices +- Improved snowstorm lookup error handling +- Removed rejected vertices from the Avalanche frontier more aggressively +- Reduced default health check values for processing decisions + +## [v1.9.15](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.15) + +This version is backwards compatible to [v1.9.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0). It is optional, but encouraged. The supported plugin version is `24`. + +- Fixed `x/merkledb.ChangeProof#getLargestKey` to correctly handle no changes +- Added test for `avm/txs/executor.SemanticVerifier#verifyFxUsage` with multiple valid fxs +- Fixed CPU + bandwidth performance regression during vertex processing +- Added example usage of the `/ext/index/X/block` API +- Reduced the default value of `--snow-optimal-processing` from `50` to `10` +- Updated the year in the license header + +## [v1.9.14](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.14) + +This version is backwards compatible to [v1.9.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0). It is optional, but encouraged. The supported plugin version is `24`. + +## [v1.9.13](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.13) + +This version is backwards compatible to [v1.9.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0). It is optional, but encouraged. The supported plugin version is `24`. + +## [v1.9.12](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.12) + +This version is backwards compatible to [v1.9.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0). It is optional, but encouraged. The supported plugin version is `24`. + +### Networking + +- Removed linger setting on P2P connections +- Improved error message when failing to calculate peer uptimes +- Removed `EngineType` from P2P response messages +- Added context cancellation during dynamic IP updates +- Reduced the maximum P2P reconnect delay from 1 hour to 1 minute + +### Consensus + +- Added support to switch from `Avalanche` consensus to `Snowman` consensus +- Added support for routing consensus messages to either `Avalanche` or `Snowman` consensus on the same chain +- Removed usage of deferred evaluation of the `handler.Consensus` in the `Avalanche` `OnFinished` callback +- Dropped inbound `Avalanche` consensus messages after switching to `Snowman` consensus +- Renamed the `Avalanche` VM metrics prefix from `avalanche_{chainID}_vm_` to `avalanche_{chainID}_vm_avalanche` +- Replaced `consensus` and `decision` dispatchers with `block`, `tx`, and `vertex` dispatchers +- Removed `Avalanche` bootstrapping restarts during the switch to `Snowman` consensus + +### AVM + +- Added `avm` block execution manager +- Added `avm` block builder +- Refactored `avm` transaction syntactic verification +- Refactored `avm` transaction semantic verification +- Refactored `avm` transaction execution +- Added `avm` mempool gossip +- Removed block timer interface from `avm` `mempool` +- Moved `toEngine` channel into the `avm` `mempool` +- Added `GetUTXOFromID` to the `avm` `state.Chain` interface +- Added unpopulated `MerkleRoot` to `avm` blocks +- Added `avm` transaction based metrics +- Replaced error strings with error interfaces in the `avm` mempool + +### PlatformVM + +- Added logs when the local nodes stake amount changes +- Moved `platformvm` `message` package into `components` +- Replaced error strings with error interfaces in the `platformvm` mempool + +### Warp + +- Added `ID` method to `warp.UnsignedMessage` +- Improved `warp.Signature` verification error descriptions + +### Miscellaneous + +- Improved `merkledb` locking to allow concurrent read access through `trieView`s +- Fixed `Banff` transaction signing with ledger when using the wallet +- Emitted github artifacts after successful builds +- Added non-blocking bounded queue +- Converted the `x.Parser` helper to be a `block.Parser` interface from a `tx.Parser` interface + +### Cleanup + +- Separated dockerhub image publishing from the kurtosis test workflow +- Exported various errors to use in testing +- Removed the `vms/components/state` package +- Replaced ad-hoc linked hashmaps with the standard data-structure +- Removed `usr/local/lib/avalanche` from deb packages +- Standardized usage of `constants.UnitTestID` + +### Examples + +- Added P-chain `RemoveSubnetValidatorTx` example using the wallet +- Added X-chain `CreateAssetTx` example using the wallet + +### Configs + +- Added support to specify `HTTP` server timeouts + - `--http-read-timeout` + - `--http-read-header-timeout` + - `--http-write-timeout` + - `--http-idle-timeout` + +### APIs + +- Added `avm` block APIs + - `avm.getBlock` + - `avm.getBlockByHeight` + - `avm.getHeight` +- Converted `avm` APIs to only surface accepted state +- Deprecated all `ipcs` APIs + - `ipcs.publishBlockchain` + - `ipcs.unpublishBlockchain` + - `ipcs.getPublishedBlockchains` +- Deprecated all `keystore` APIs + - `keystore.createUser` + - `keystore.deleteUser` + - `keystore.listUsers` + - `keystore.importUser` + - `keystore.exportUser` +- Deprecated the `avm/pubsub` API endpoint +- Deprecated various `avm` APIs + - `avm.getAddressTxs` + - `avm.getBalance` + - `avm.getAllBalances` + - `avm.createAsset` + - `avm.createFixedCapAsset` + - `avm.createVariableCapAsset` + - `avm.createNFTAsset` + - `avm.createAddress` + - `avm.listAddresses` + - `avm.exportKey` + - `avm.importKey` + - `avm.mint` + - `avm.sendNFT` + - `avm.mintNFT` + - `avm.import` + - `avm.export` + - `avm.send` + - `avm.sendMultiple` +- Deprecated the `avm/wallet` API endpoint + - `wallet.issueTx` + - `wallet.send` + - `wallet.sendMultiple` +- Deprecated various `platformvm` APIs + - `platform.exportKey` + - `platform.importKey` + - `platform.getBalance` + - `platform.createAddress` + - `platform.listAddresses` + - `platform.getSubnets` + - `platform.addValidator` + - `platform.addDelegator` + - `platform.addSubnetValidator` + - `platform.createSubnet` + - `platform.exportAVAX` + - `platform.importAVAX` + - `platform.createBlockchain` + - `platform.getBlockchains` + - `platform.getStake` + - `platform.getMaxStakeAmount` + - `platform.getRewardUTXOs` +- Deprecated the `stake` field in the `platform.getTotalStake` response in favor of `weight` + +## [v1.9.11](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.11) + +This version is backwards compatible to [v1.9.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0). It is optional, but encouraged. The supported plugin version is `24`. + +### Plugins + +- Removed error from `logging.NoLog#Write` +- Added logging to the static VM factory usage +- Fixed incorrect error being returned from `subprocess.Bootstrap` + +### Ledger + +- Added ledger tx parsing support + +### MerkleDB + +- Added explicit consistency guarantees when committing multiple `merkledb.trieView`s to disk at once +- Removed reliance on premature root calculations for `merkledb.trieView` validity tracking +- Updated `x/merkledb/README.md` + +## [v1.9.10](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.10) + +This version is backwards compatible to [v1.9.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0). It is optional, but encouraged. The supported plugin version is `24`. + +### MerkleDB + +- Removed parent tracking from `merkledb.trieView` +- Removed `base` caches from `merkledb.trieView` +- Fixed error handling during `merkledb` intermediate node eviction +- Replaced values larger than `32` bytes with a hash in the `merkledb` hash representation + +### AVM + +- Refactored `avm` API tx creation into a standalone `Spender` implementation +- Migrated UTXO interfaces from the `platformvm` into the `components` for use in the `avm` +- Refactored `avm` `tx.SyntacticVerify` to expect the config rather than the fee fields + +### Miscellaneous + +- Updated the minimum golang version to `v1.19.6` +- Fixed `rpcchainvm` signal handling to only shutdown upon receipt of `SIGTERM` +- Added `warp.Signature#NumSigners` for better cost tracking support +- Added `snow.Context#PublicKey` to provide access to the local node's BLS public key inside the VM execution environment +- Renamed Avalanche consensus metric prefix to `avalanche_{chainID}_avalanche` +- Specified an explicit TCP `Linger` timeout of `15` seconds +- Updated the `secp256k1` library to `v4.1.0` + +### Cleanup + +- Removed support for the `--whitelisted-subnets` flag +- Removed unnecessary abstractions from the `app` package +- Removed `Factory` embedding from `platformvm.VM` and `avm.VM` +- Removed `validator` package from the `platformvm` +- Removed `timer.TimeoutManager` +- Replaced `snow.Context` in `Factory.New` with `logging.Logger` +- Renamed `set.Bits#Len` to `BitLen` and `set.Bits#HammingWeight` to `Len` to align with `set.Bits64` + +## [v1.9.9](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.9) + +This version is backwards compatible to [v1.9.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0). It is optional, but encouraged. The supported plugin version is `23`. + +**Note: The `--whitelisted-subnets` flag was deprecated in `v1.9.6`. This is the last release in which it will be supported. Use `--track-subnets` instead.** + +### Monitoring + +- Added warning when the P2P server IP is private +- Added warning when the HTTP server IP is potentially publicly reachable +- Removed `merkledb.trieView#calculateIDs` tracing when no recalculation is needed + +### Databases + +- Capped the number of goroutines that `merkledb.trieView#calculateIDsConcurrent` will create +- Removed `nodb` package +- Refactored `Batch` implementations to share common code +- Added `Batch.Replay` invariant tests +- Converted to use `require` in all `database` interface tests + +### Cryptography + +- Moved the `secp256k1` implementations to a new `secp256k1` package out of the `crypto` package +- Added `rfc6979` compliance tests to the `secp256k1` signing implementation +- Removed unused cryptography implementations `ed25519`, `rsa`, and `rsapss` +- Removed unnecessary cryptography interfaces `crypto.Factory`, `crypto.RecoverableFactory`, `crypto.PublicKey`, and `crypto.PrivateKey` +- Added verification when parsing `secp256k1` public keys to ensure usage of the compressed format + +### API + +- Removed delegators from `platform.getCurrentValidators` unless a single `nodeID` is requested +- Added `delegatorCount` and `delegatorWeight` to the validators returned by `platform.getCurrentValidators` + +### Documentation + +- Improved documentation on the `block.WithVerifyContext` interface +- Fixed `--public-ip` and `--public-ip-resolution-service` CLI flag descriptions +- Updated `README.md` to explicitly reference `SECURITY.md` + +### Coreth + +- Enabled state sync by default when syncing from an empty database +- Increased block gas limit to 15M for `Cortina` Network Upgrade +- Added back file tracer endpoint +- Added back JS tracer + +### Miscellaneous + +- Added `allowedNodes` to the subnet config for `validatorOnly` subnets +- Removed the `hashicorp/go-plugin` dependency to improve plugin flexibility +- Replaced specialized `bag` implementations with generic `bag` implementations +- Added `mempool` package to the `avm` +- Added `chain.State#IsProcessing` to simplify integration with `block.WithVerifyContext` +- Added `StateSyncMinVersion` to `sync.ClientConfig` +- Added validity checks for `InitialStakeDuration` in a custom network genesis +- Removed unnecessary reflect call when marshalling an empty slice + +### Cleanup + +- Renamed `teleporter` package to `warp` +- Replaced `bool` flags in P-chain state diffs with an `enum` +- Refactored subnet configs to more closely align between the primary network and subnets +- Simplified the `utxo.Spender` interface +- Removed unused field `common.Config#Validators` + +## [v1.9.8](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.8) + +This version is backwards compatible to [v1.9.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0). It is optional, but encouraged. The supported plugin version is `22`. + +### Networking + +- Added TCP proxy support for p2p network traffic +- Added p2p network client utility for directly messaging the p2p network + +### Consensus + +- Guaranteed delivery of App messages to the VM, regardless of sync status +- Added `EngineType` to consensus context + +### MerkleDB - Alpha + +- Added initial implementation of a path-based merkle-radix tree +- Added initial implementation of state sync powered by the merkledb + +### APIs + +- Updated `platform.getCurrentValidators` to return `uptime` as a percentage +- Updated `platform.get*Validators` to avoid iterating over the staker set when requesting specific nodeIDs +- Cached staker data in `platform.get*Validators` to significantly reduce DB IO +- Added `stakeAmount` and `weight` to all staker responses in P-chain APIs +- Deprecated `stakeAmount` in staker responses from P-chain APIs +- Removed `creationTxFee` from `info.GetTxFeeResponse` +- Removed `address` from `platformvm.GetBalanceRequest` + +### Fixes + +- Fixed `RemoveSubnetValidatorTx` weight diff corruption +- Released network lock before attempting to close a peer connection +- Fixed X-Chain last accepted block initialization to use the genesis block, not the stop vertex after linearization +- Removed plugin directory handling from AMI generation +- Removed copy of plugins directory from tar script + +### Cleanup + +- Removed unused rpm packaging scripts +- Removed engine dependency from chain registrants +- Removed unused field from chain handler log +- Linted custom test `chains.Manager` +- Used generic btree implementation +- Deleted `utils.CopyBytes` +- Updated rjeczalik/notify from v0.9.2 to v0.9.3 + +### Miscellaneous + +- Added AVM `state.Chain` interface +- Added generic atomic value utility +- Added test for the AMI builder during RCs +- Converted cache implementations to use generics +- Added optional cache eviction callback + +## [v1.9.7](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.7) + +This version is backwards compatible to [v1.9.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0). It is optional, but encouraged. The supported plugin version is `22`. + +### Fixes + +- Fixed subnet validator lookup regression + +## [v1.9.6](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.6) + +This version is backwards compatible to [v1.9.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0). It is optional, but encouraged. The supported plugin version is `22`. + +### Consensus + +- Added `StateSyncMode` to the return of `StateSummary#Accept` to support syncing chain state while tracking the chain as a light client +- Added `AcceptedFrontier` to `Chits` messages +- Reduced unnecessary confidence resets during consensus by applying `AcceptedFrontier`s during `QueryFailed` handling +- Added EngineType for consensus messages in the p2p message definitions +- Updated `vertex.DAGVM` interface to support linearization + +### Configs + +- Added `--plugin-dir` flag. The default value is `[DATADIR]/plugins` +- Removed `--build-dir` flag. The location of the avalanchego binary is no longer considered when looking for the `plugins` directory. Subnet maintainers should ensure that their node is able to properly discover plugins, as the default location is likely changed. See `--plugin-dir` +- Changed the default value of `--api-keystore-enabled` to `false` +- Added `--track-subnets` flag as a replacement of `--whitelisted-subnets` + +### Fixes + +- Fixed NAT-PMP router discovery and port mapping +- Fixed `--staking-enabled=false` setting to correctly start subnet chains and report healthy +- Fixed message logging in the consensus handler + +### VMs + +- Populated non-trivial logger in the `rpcchainvm` `Server`'s `snow.Context` +- Updated `rpcchainvm` proto definitions to use enums +- Added `Block` format and definition to the `AVM` +- Removed `proposervm` height index reset + +### Metrics + +- Added `avalanche_network_peer_connected_duration_average` metric +- Added `avalanche_api_calls_processing` metric +- Added `avalanche_api_calls` metric +- Added `avalanche_api_calls_duration` metric + +### Documentation + +- Added wallet example to create `stakeable.LockOut` outputs +- Improved ubuntu deb install instructions + +### Miscellaneous + +- Updated ledger-avalanche to v0.6.5 +- Added linter to ban the usage of `fmt.Errorf` without format directives +- Added `List` to the `buffer#Deque` interface +- Added `Index` to the `buffer#Deque` interface +- Added `SetLevel` to the `Logger` interface +- Updated `auth` API to use the new `jwt` standard + +## [v1.9.5](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.5) + +This version is backwards compatible to [v1.9.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0). It is optional, but encouraged. The supported plugin version is `21`. + +### Subnet Messaging + +- Added subnet message serialization format +- Added subnet message signing +- Replaced `bls.SecretKey` with a `teleporter.Signer` in the `snow.Context` +- Moved `SNLookup` into the `validators.State` interface to support non-whitelisted chainID to subnetID lookups +- Added support for non-whitelisted subnetIDs for fetching the validator set at a given height +- Added subnet message verification +- Added `teleporter.AnycastID` to denote a subnet message not intended for a specific chain + +### Fixes + +- Added re-gossip of updated validator IPs +- Fixed `rpcchainvm.BatchedParseBlock` to correctly wrap returned blocks +- Removed incorrect `uintptr` handling in the generic codec +- Removed message latency tracking on messages being sent to itself + +### Coreth + +- Added support for eth_call over VM2VM messaging +- Added config flags for tx pool behavior + +### Miscellaneous + +- Added networking package README.md +- Removed pagination of large db messages over gRPC +- Added `Size` to the generic codec to reduce allocations +- Added `UnpackLimitedBytes` and `UnpackLimitedStr` to the manual packer +- Added SECURITY.md +- Exposed proposer list from the `proposervm`'s `Windower` interface +- Added health and bootstrapping client helpers that block until the node is healthy +- Moved bit sets from the `ids` package to the `set` package +- Added more wallet examples + +## [v1.9.4](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.4) + +This version is backwards compatible to [v1.9.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0). It is optional, but encouraged. The supported plugin version is `20`. + +**This version modifies the db format. The db format is compatible with v1.9.3, but not v1.9.2 or earlier. After running a node with v1.9.4 attempting to run a node with a version earlier than v1.9.3 may report a fatal error on startup.** + +### PeerList Gossip Optimization + +- Added gossip tracking to the `peer` instance to only gossip new `IP`s to a connection +- Added `PeerListAck` message to report which `TxID`s provided by the `PeerList` message were tracked +- Added `TxID`s to the `PeerList` message to unique-ify nodeIDs across validation periods +- Added `TxID` mappings to the gossip tracker + +### Validator Set Tracking + +- Renamed `GetValidators` to `Get` on the `validators.Manager` interface +- Removed `Set`, `AddWeight`, `RemoveWeight`, and `Contains` from the `validators.Manager` interface +- Added `Add` to the `validators.Manager` interface +- Removed `Set` from the `validators.Set` interface +- Added `Add` and `Get` to the `validators.Set` interface +- Modified `validators.Set#Sample` to return `ids.NodeID` rather than `valdiators.Validator` +- Replaced the `validators.Validator` interface with a struct +- Added a `BLS` public key field to `validators.Validator` +- Added a `TxID` field to `validators.Validator` +- Improved and documented error handling within the `validators.Set` interface +- Added `BLS` public keys to the result of `GetValidatorSet` +- Added `BuildBlockWithContext` as an optional VM method to build blocks at a specific P-chain height +- Added `VerifyWithContext` as an optional block method to verify blocks at a specific P-chain height + +### Uptime Tracking + +- Added ConnectedSubnet message handling to the chain handler +- Added SubnetConnector interface and implemented it in the platformvm +- Added subnet uptimes to p2p `pong` messages +- Added subnet uptimes to `platform.getCurrentValidators` +- Added `subnetID` as an argument to `info.Uptime` + +### Fixes + +- Fixed incorrect context cancellation of escaped contexts from grpc servers +- Fixed race condition between API initialization and shutdown +- Fixed race condition between NAT traversal initialization and shutdown +- Fixed race condition during beacon connection tracking +- Added race detection to the E2E tests +- Added additional message and sender tests + +### Coreth + +- Improved header and logs caching using maximum accepted depth cache +- Added config option to perform database inspection on startup +- Added configurable transaction indexing to reduce disk usage +- Added special case to allow transactions using Nick's Method to bypass API level replay protection +- Added counter metrics for number of accepted/processed logs + +### APIs + +- Added indices to the return values of `GetLastAccepted` and `GetContainerByID` on the `indexer` API client +- Removed unnecessary locking from the `info` API + +### Chain Data + +- Added `ChainDataDir` to the `snow.Context` to allow blockchains to canonically access disk outside avalanchego's database +- Added `--chain-data-dir` as a CLI flag to specify the base directory for all `ChainDataDir`s + +### Miscellaneous + +- Removed `Version` from the `peer.Network` interface +- Removed `Pong` from the `peer.Network` interface +- Reduced memory allocations inside the system throttler +- Added `CChainID` to the `snow.Context` +- Converted all sorting to utilize generics +- Converted all set management to utilize generics + +## [v1.9.3](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.3) + +This version is backwards compatible to [v1.9.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0). It is optional, but encouraged. The supported plugin version is `19`. + +### Tracing + +- Added `context.Context` to all `VM` interface functions +- Added `context.Context` to the `validators.State` interface +- Added additional message fields to `tracedRouter#HandleInbound` +- Added `tracedVM` implementations for `block.ChainVM` and `vertex.DAGVM` +- Added `tracedState` implementation for `validators.State` +- Added `tracedHandler` implementation for `http.Handler` +- Added `tracedConsensus` implementations for `snowman.Consensus` and `avalanche.Consensus` + +### Fixes + +- Fixed incorrect `NodeID` used in registered `AppRequest` timeouts +- Fixed panic when calling `encdb#NewBatch` after `encdb#Close` +- Fixed panic when calling `prefixdb#NewBatch` after `prefixdb#Close` + +### Configs + +- Added `proposerMinBlockDelay` support to subnet configs +- Added `providedFlags` field to the `initializing node` for easily observing custom node configs +- Added `--chain-aliases-file` and `--chain-aliases-file-content` CLI flags +- Added `--proposervm-use-current-height` CLI flag + +### Coreth + +- Added metric for number of processed and accepted transactions +- Added wait for state sync goroutines to complete on shutdown +- Increased go-ethereum dependency to v1.10.26 +- Increased soft cap on transaction size limits +- Added back isForkIncompatible checks for all existing forks +- Cleaned up Apricot Phase 6 code + +### Linting + +- Added `unused-receiver` linter +- Added `unused-parameter` linter +- Added `useless-break` linter +- Added `unhandled-error` linter +- Added `unexported-naming` linter +- Added `struct-tag` linter +- Added `bool-literal-in-expr` linter +- Added `early-return` linter +- Added `empty-lines` linter +- Added `error-lint` linter + +### Testing + +- Added `scripts/build_fuzz.sh` and initial fuzz tests +- Added additional `Fx` tests +- Added additional `messageQueue` tests +- Fixed `vmRegisterer` tests + +### Documentation + +- Documented `Database.Put` invariant for `nil` and empty slices +- Documented avalanchego's versioning scheme +- Improved `vm.proto` docs + +### Miscellaneous + +- Added peer gossip tracker +- Added `avalanche_P_vm_time_until_unstake` and `avalanche_P_vm_time_until_unstake_subnet` metrics +- Added `keychain.NewLedgerKeychainFromIndices` +- Removed usage of `Temporary` error handling after `listener#Accept` +- Removed `Parameters` from all `Consensus` interfaces +- Updated `avalanche-network-runner` to `v1.3.0` +- Added `ids.BigBitSet` to extend `ids.BitSet64` for arbitrarily large sets +- Added support for parsing future subnet uptime tracking data to the P-chain's state implementation +- Increased validator set cache size +- Added `avax.UTXOIDFromString` helper for managing `UTXOID`s more easily + +## [v1.9.2](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.2) + +This version is backwards compatible to [v1.9.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0). It is optional, but encouraged. The supported plugin version is `19`. + +### Coreth + +- Added trie clean cache journaling to disk to improve processing time after restart +- Fixed regression where a snapshot could be marked as stale by the async acceptor during block processing +- Added fine-grained block processing metrics + +### RPCChainVM + +- Added `validators.State` to the rpcchainvm server's `snow.Context` +- Added `rpcProtocolVersion` to the output of `info.getNodeVersion` +- Added `rpcchainvm` protocol version to the output of the `--version` flag +- Added `version.RPCChainVMProtocolCompatibility` map to easily compare plugin compatibility against avalanchego versions + +### Builds + +- Downgraded `ubuntu` release binaries from `jammy` to `focal` +- Updated macos github runners to `macos-12` +- Added workflow dispatch to build release binaries + +### BLS + +- Added bls proof of possession to `platform.getCurrentValidators` and `platform.getPendingValidators` +- Added bls public key to in-memory staker objects +- Improved memory clearing of bls secret keys + +### Cleanup + +- Fixed issue where the chain manager would attempt to start chain creation multiple times +- Fixed race that caused the P-chain to finish bootstrapping before the primary network finished bootstrapping +- Converted inbound message handling to expect usage of types rather than maps of fields +- Simplified the `validators.Set` implementation +- Added a warning if synchronous consensus messages take too long + +## [v1.9.1](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.1) + +This version is backwards compatible to [v1.9.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0). It is optional, but encouraged. The supported plugin version is `18`. + +### Features + +- Added cross-chain messaging support to the VM interface +- Added Ledger support to the Primary Network wallet +- Converted Bionic builds to Jammy builds +- Added `mock.gen.sh` to programmatically generate mock implementations +- Added BLS signer to the `snow.Context` +- Moved `base` from `rpc.NewEndpointRequester` to be included in the `method` in `SendRequest` +- Converted `UnboundedQueue` to `UnboundedDeque` + +### Observability + +- Added support for OpenTelemetry tracing +- Converted periodic bootstrapping status update to be time-based +- Removed duplicated fields from the json format of the node config +- Configured min connected stake health check based on the consensus parameters +- Added new consensus metrics +- Documented how chain time is advanced in the PlatformVM with `chain_time_update.md` + +### Cleanup + +- Converted chain creation to be handled asynchronously from the P-chain's execution environment +- Removed `SetLinger` usage of P2P TCP connections +- Removed `Banff` upgrade flow +- Fixed ProposerVM inner block caching after verification +- Fixed PlatformVM mempool verification to use an updated chain time +- Removed deprecated CLI flags: `--dynamic-update-duration`, `--dynamic-public-ip` +- Added unexpected Put bytes tests to the Avalanche and Snowman consensus engines +- Removed mockery generated mock implementations +- Converted safe math functions to use generics where possible +- Added linting to prevent usage of `assert` in unit tests +- Converted empty struct usage to `nil` for interface compliance checks +- Added CODEOWNERs to own first rounds of PR review + ## [v1.9.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0) This upgrade adds support for creating Proof-of-Stake Subnets. diff --git a/avalanchego/SECURITY.md b/avalanchego/SECURITY.md new file mode 100644 index 00000000..26e938c8 --- /dev/null +++ b/avalanchego/SECURITY.md @@ -0,0 +1,17 @@ +# Security Policy + +Avalanche takes the security of the platform and of its users very seriously. We and our community recognize the critical role of external security researchers and developers and welcome responsible disclosures. Valid reports will be eligible for a reward (terms and conditions apply). + +## Reporting a Vulnerability + +**Please do not file a public ticket** mentioning the vulnerability. To disclose a vulnerability submit it through our [Bug Bounty Program](https://hackenproof.com/avalanche). + +Vulnerabilities must be disclosed to us privately with reasonable time to respond, and avoid compromise of other users and accounts, or loss of funds that are not your own. We do not reward spam or social engineering vulnerabilities. + +Do not test for or validate any security issues in the live Avalanche networks (Mainnet and Fuji testnet), confirm all exploits in a local private testnet. + +Please refer to the [Bug Bounty Page](https://hackenproof.com/avalanche) for the most up-to-date program rules and scope. + +## Supported Versions + +Please use the [most recently released version](https://github.com/ava-labs/avalanchego/releases/latest) to perform testing and to validate security issues. diff --git a/avalanchego/api/admin/client.go b/avalanchego/api/admin/client.go index c6bda757..51cb0754 100644 --- a/avalanchego/api/admin/client.go +++ b/avalanchego/api/admin/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package admin @@ -13,7 +13,7 @@ import ( "github.com/ava-labs/avalanchego/utils/rpc" ) -var _ Client = &client{} +var _ Client = (*client)(nil) // Client interface for the Avalanche Platform Info API Endpoint type Client interface { @@ -39,36 +39,35 @@ type client struct { // NewClient returns a new Info API Client func NewClient(uri string) Client { return &client{requester: rpc.NewEndpointRequester( - uri+"/ext/admin", - "admin", + uri + "/ext/admin", )} } func (c *client) StartCPUProfiler(ctx context.Context, options ...rpc.Option) error { - return c.requester.SendRequest(ctx, "startCPUProfiler", struct{}{}, &api.EmptyReply{}, options...) + return c.requester.SendRequest(ctx, "admin.startCPUProfiler", struct{}{}, &api.EmptyReply{}, options...) } func (c *client) StopCPUProfiler(ctx context.Context, options ...rpc.Option) error { - return c.requester.SendRequest(ctx, "stopCPUProfiler", struct{}{}, &api.EmptyReply{}, options...) + return c.requester.SendRequest(ctx, "admin.stopCPUProfiler", struct{}{}, &api.EmptyReply{}, options...) } func (c *client) MemoryProfile(ctx context.Context, options ...rpc.Option) error { - return c.requester.SendRequest(ctx, "memoryProfile", struct{}{}, &api.EmptyReply{}, options...) + return c.requester.SendRequest(ctx, "admin.memoryProfile", struct{}{}, &api.EmptyReply{}, options...) } func (c *client) LockProfile(ctx context.Context, options ...rpc.Option) error { - return c.requester.SendRequest(ctx, "lockProfile", struct{}{}, &api.EmptyReply{}, options...) + return c.requester.SendRequest(ctx, "admin.lockProfile", struct{}{}, &api.EmptyReply{}, options...) } func (c *client) Alias(ctx context.Context, endpoint, alias string, options ...rpc.Option) error { - return c.requester.SendRequest(ctx, "alias", &AliasArgs{ + return c.requester.SendRequest(ctx, "admin.alias", &AliasArgs{ Endpoint: endpoint, Alias: alias, }, &api.EmptyReply{}, options...) } func (c *client) AliasChain(ctx context.Context, chain, alias string, options ...rpc.Option) error { - return c.requester.SendRequest(ctx, "aliasChain", &AliasChainArgs{ + return c.requester.SendRequest(ctx, "admin.aliasChain", &AliasChainArgs{ Chain: chain, Alias: alias, }, &api.EmptyReply{}, options...) @@ -76,19 +75,19 @@ func (c *client) AliasChain(ctx context.Context, chain, alias string, options .. func (c *client) GetChainAliases(ctx context.Context, chain string, options ...rpc.Option) ([]string, error) { res := &GetChainAliasesReply{} - err := c.requester.SendRequest(ctx, "getChainAliases", &GetChainAliasesArgs{ + err := c.requester.SendRequest(ctx, "admin.getChainAliases", &GetChainAliasesArgs{ Chain: chain, }, res, options...) return res.Aliases, err } func (c *client) Stacktrace(ctx context.Context, options ...rpc.Option) error { - return c.requester.SendRequest(ctx, "stacktrace", struct{}{}, &api.EmptyReply{}, options...) + return c.requester.SendRequest(ctx, "admin.stacktrace", struct{}{}, &api.EmptyReply{}, options...) } func (c *client) LoadVMs(ctx context.Context, options ...rpc.Option) (map[ids.ID][]string, map[ids.ID]string, error) { res := &LoadVMsReply{} - err := c.requester.SendRequest(ctx, "loadVMs", struct{}{}, res, options...) + err := c.requester.SendRequest(ctx, "admin.loadVMs", struct{}{}, res, options...) return res.NewVMs, res.FailedVMs, err } @@ -116,7 +115,7 @@ func (c *client) SetLoggerLevel( return fmt.Errorf("couldn't parse %q to log level", displayLevel) } } - return c.requester.SendRequest(ctx, "setLoggerLevel", &SetLoggerLevelArgs{ + return c.requester.SendRequest(ctx, "admin.setLoggerLevel", &SetLoggerLevelArgs{ LoggerName: loggerName, LogLevel: &logLevelArg, DisplayLevel: &displayLevelArg, @@ -129,7 +128,7 @@ func (c *client) GetLoggerLevel( options ...rpc.Option, ) (map[string]LogAndDisplayLevels, error) { res := &GetLoggerLevelReply{} - err := c.requester.SendRequest(ctx, "getLoggerLevel", &GetLoggerLevelArgs{ + err := c.requester.SendRequest(ctx, "admin.getLoggerLevel", &GetLoggerLevelArgs{ LoggerName: loggerName, }, res, options...) return res.LoggerLevels, err @@ -137,6 +136,6 @@ func (c *client) GetLoggerLevel( func (c *client) GetConfig(ctx context.Context, options ...rpc.Option) (interface{}, error) { var res interface{} - err := c.requester.SendRequest(ctx, "getConfig", struct{}{}, &res, options...) + err := c.requester.SendRequest(ctx, "admin.getConfig", struct{}{}, &res, options...) return res, err } diff --git a/avalanchego/api/admin/client_test.go b/avalanchego/api/admin/client_test.go index 7119249b..e034e398 100644 --- a/avalanchego/api/admin/client_test.go +++ b/avalanchego/api/admin/client_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package admin @@ -16,6 +16,8 @@ import ( "github.com/ava-labs/avalanchego/utils/rpc" ) +var errTest = errors.New("non-nil error") + // SuccessResponseTest defines the expected result of an API call that returns SuccessResponse type SuccessResponseTest struct { Err error @@ -28,7 +30,7 @@ func GetSuccessResponseTests() []SuccessResponseTest { Err: nil, }, { - Err: errors.New("Non-nil error"), + Err: errTest, }, } } @@ -46,7 +48,7 @@ func NewMockClient(response interface{}, err error) rpc.EndpointRequester { } } -func (mc *mockClient) SendRequest(ctx context.Context, method string, params interface{}, reply interface{}, options ...rpc.Option) error { +func (mc *mockClient) SendRequest(_ context.Context, _ string, _ interface{}, reply interface{}, _ ...rpc.Option) error { if mc.err != nil { return mc.err } @@ -182,11 +184,11 @@ func TestGetChainAliases(t *testing.T) { }) t.Run("failure", func(t *testing.T) { - mockClient := client{requester: NewMockClient(&GetChainAliasesReply{}, errors.New("some error"))} + mockClient := client{requester: NewMockClient(&GetChainAliasesReply{}, errTest)} _, err := mockClient.GetChainAliases(context.Background(), "chain") - require.EqualError(t, err, "some error") + require.ErrorIs(t, err, errTest) }) } @@ -228,11 +230,11 @@ func TestReloadInstalledVMs(t *testing.T) { }) t.Run("failure", func(t *testing.T) { - mockClient := client{requester: NewMockClient(&LoadVMsReply{}, errors.New("some error"))} + mockClient := client{requester: NewMockClient(&LoadVMsReply{}, errTest)} _, _, err := mockClient.LoadVMs(context.Background()) - require.EqualError(t, err, "some error") + require.ErrorIs(t, err, errTest) }) } @@ -279,7 +281,7 @@ func TestSetLoggerLevel(t *testing.T) { require := require.New(t) var err error if tt.serviceErr { - err = errors.New("some error") + err = errTest } mockClient := client{requester: NewMockClient(&api.EmptyReply{}, err)} err = mockClient.SetLoggerLevel( @@ -328,7 +330,7 @@ func TestGetLoggerLevel(t *testing.T) { require := require.New(t) var err error if tt.serviceErr { - err = errors.New("some error") + err = errTest } mockClient := client{requester: NewMockClient(&GetLoggerLevelReply{LoggerLevels: tt.serviceResponse}, err)} res, err := mockClient.GetLoggerLevel( @@ -372,7 +374,7 @@ func TestGetConfig(t *testing.T) { require := require.New(t) var err error if tt.serviceErr { - err = errors.New("some error") + err = errTest } mockClient := client{requester: NewMockClient(tt.expectedResponse, err)} res, err := mockClient.GetConfig(context.Background()) diff --git a/avalanchego/api/admin/service.go b/avalanchego/api/admin/service.go index 72043369..57ee8bfa 100644 --- a/avalanchego/api/admin/service.go +++ b/avalanchego/api/admin/service.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package admin @@ -73,31 +73,43 @@ func NewService(config Config) (*common.HTTPHandler, error) { } // StartCPUProfiler starts a cpu profile writing to the specified file -func (service *Admin) StartCPUProfiler(_ *http.Request, _ *struct{}, _ *api.EmptyReply) error { - service.Log.Debug("Admin: StartCPUProfiler called") +func (a *Admin) StartCPUProfiler(_ *http.Request, _ *struct{}, _ *api.EmptyReply) error { + a.Log.Debug("API called", + zap.String("service", "admin"), + zap.String("method", "startCPUProfiler"), + ) - return service.profiler.StartCPUProfiler() + return a.profiler.StartCPUProfiler() } // StopCPUProfiler stops the cpu profile -func (service *Admin) StopCPUProfiler(_ *http.Request, _ *struct{}, _ *api.EmptyReply) error { - service.Log.Debug("Admin: StopCPUProfiler called") +func (a *Admin) StopCPUProfiler(_ *http.Request, _ *struct{}, _ *api.EmptyReply) error { + a.Log.Debug("API called", + zap.String("service", "admin"), + zap.String("method", "stopCPUProfiler"), + ) - return service.profiler.StopCPUProfiler() + return a.profiler.StopCPUProfiler() } // MemoryProfile runs a memory profile writing to the specified file -func (service *Admin) MemoryProfile(_ *http.Request, _ *struct{}, _ *api.EmptyReply) error { - service.Log.Debug("Admin: MemoryProfile called") +func (a *Admin) MemoryProfile(_ *http.Request, _ *struct{}, _ *api.EmptyReply) error { + a.Log.Debug("API called", + zap.String("service", "admin"), + zap.String("method", "memoryProfile"), + ) - return service.profiler.MemoryProfile() + return a.profiler.MemoryProfile() } // LockProfile runs a mutex profile writing to the specified file -func (service *Admin) LockProfile(_ *http.Request, _ *struct{}, _ *api.EmptyReply) error { - service.Log.Debug("Admin: LockProfile called") +func (a *Admin) LockProfile(_ *http.Request, _ *struct{}, _ *api.EmptyReply) error { + a.Log.Debug("API called", + zap.String("service", "admin"), + zap.String("method", "lockProfile"), + ) - return service.profiler.LockProfile() + return a.profiler.LockProfile() } // AliasArgs are the arguments for calling Alias @@ -107,8 +119,10 @@ type AliasArgs struct { } // Alias attempts to alias an HTTP endpoint to a new name -func (service *Admin) Alias(_ *http.Request, args *AliasArgs, _ *api.EmptyReply) error { - service.Log.Debug("Admin: Alias called", +func (a *Admin) Alias(_ *http.Request, args *AliasArgs, _ *api.EmptyReply) error { + a.Log.Debug("API called", + zap.String("service", "admin"), + zap.String("method", "alias"), logging.UserString("endpoint", args.Endpoint), logging.UserString("alias", args.Alias), ) @@ -117,7 +131,7 @@ func (service *Admin) Alias(_ *http.Request, args *AliasArgs, _ *api.EmptyReply) return errAliasTooLong } - return service.HTTPServer.AddAliasesWithReadLock(args.Endpoint, args.Alias) + return a.HTTPServer.AddAliasesWithReadLock(args.Endpoint, args.Alias) } // AliasChainArgs are the arguments for calling AliasChain @@ -127,8 +141,10 @@ type AliasChainArgs struct { } // AliasChain attempts to alias a chain to a new name -func (service *Admin) AliasChain(_ *http.Request, args *AliasChainArgs, _ *api.EmptyReply) error { - service.Log.Debug("Admin: AliasChain called", +func (a *Admin) AliasChain(_ *http.Request, args *AliasChainArgs, _ *api.EmptyReply) error { + a.Log.Debug("API called", + zap.String("service", "admin"), + zap.String("method", "aliasChain"), logging.UserString("chain", args.Chain), logging.UserString("alias", args.Alias), ) @@ -136,18 +152,18 @@ func (service *Admin) AliasChain(_ *http.Request, args *AliasChainArgs, _ *api.E if len(args.Alias) > maxAliasLength { return errAliasTooLong } - chainID, err := service.ChainManager.Lookup(args.Chain) + chainID, err := a.ChainManager.Lookup(args.Chain) if err != nil { return err } - if err := service.ChainManager.Alias(chainID, args.Alias); err != nil { + if err := a.ChainManager.Alias(chainID, args.Alias); err != nil { return err } endpoint := path.Join(constants.ChainAliasPrefix, chainID.String()) alias := path.Join(constants.ChainAliasPrefix, args.Alias) - return service.HTTPServer.AddAliasesWithReadLock(endpoint, alias) + return a.HTTPServer.AddAliasesWithReadLock(endpoint, alias) } // GetChainAliasesArgs are the arguments for calling GetChainAliases @@ -161,8 +177,10 @@ type GetChainAliasesReply struct { } // GetChainAliases returns the aliases of the chain -func (service *Admin) GetChainAliases(_ *http.Request, args *GetChainAliasesArgs, reply *GetChainAliasesReply) error { - service.Log.Debug("Admin: GetChainAliases called", +func (a *Admin) GetChainAliases(_ *http.Request, args *GetChainAliasesArgs, reply *GetChainAliasesReply) error { + a.Log.Debug("API called", + zap.String("service", "admin"), + zap.String("method", "getChainAliases"), logging.UserString("chain", args.Chain), ) @@ -171,13 +189,16 @@ func (service *Admin) GetChainAliases(_ *http.Request, args *GetChainAliasesArgs return err } - reply.Aliases, err = service.ChainManager.Aliases(id) + reply.Aliases, err = a.ChainManager.Aliases(id) return err } // Stacktrace returns the current global stacktrace -func (service *Admin) Stacktrace(_ *http.Request, _ *struct{}, _ *api.EmptyReply) error { - service.Log.Debug("Admin: Stacktrace called") +func (a *Admin) Stacktrace(_ *http.Request, _ *struct{}, _ *api.EmptyReply) error { + a.Log.Debug("API called", + zap.String("service", "admin"), + zap.String("method", "stacktrace"), + ) stacktrace := []byte(utils.GetStacktrace(true)) return perms.WriteFile(stacktraceFile, stacktrace, perms.ReadWrite) @@ -199,8 +220,10 @@ type SetLoggerLevelArgs struct { // Sets the display level of these loggers to args.LogLevel. // If args.DisplayLevel == nil, doesn't set the display level of these loggers. // If args.DisplayLevel != nil, must be a valid string representation of a log level. -func (service *Admin) SetLoggerLevel(_ *http.Request, args *SetLoggerLevelArgs, _ *api.EmptyReply) error { - service.Log.Debug("Admin: SetLoggerLevel called", +func (a *Admin) SetLoggerLevel(_ *http.Request, args *SetLoggerLevelArgs, _ *api.EmptyReply) error { + a.Log.Debug("API called", + zap.String("service", "admin"), + zap.String("method", "setLoggerLevel"), logging.UserString("loggerName", args.LoggerName), zap.Stringer("logLevel", args.LogLevel), zap.Stringer("displayLevel", args.DisplayLevel), @@ -215,17 +238,17 @@ func (service *Admin) SetLoggerLevel(_ *http.Request, args *SetLoggerLevelArgs, loggerNames = []string{args.LoggerName} } else { // Empty name means all loggers - loggerNames = service.LogFactory.GetLoggerNames() + loggerNames = a.LogFactory.GetLoggerNames() } for _, name := range loggerNames { if args.LogLevel != nil { - if err := service.LogFactory.SetLogLevel(name, *args.LogLevel); err != nil { + if err := a.LogFactory.SetLogLevel(name, *args.LogLevel); err != nil { return err } } if args.DisplayLevel != nil { - if err := service.LogFactory.SetDisplayLevel(name, *args.DisplayLevel); err != nil { + if err := a.LogFactory.SetDisplayLevel(name, *args.DisplayLevel); err != nil { return err } } @@ -249,8 +272,10 @@ type GetLoggerLevelReply struct { } // GetLogLevel returns the log level and display level of all loggers. -func (service *Admin) GetLoggerLevel(_ *http.Request, args *GetLoggerLevelArgs, reply *GetLoggerLevelReply) error { - service.Log.Debug("Admin: GetLoggerLevels called", +func (a *Admin) GetLoggerLevel(_ *http.Request, args *GetLoggerLevelArgs, reply *GetLoggerLevelReply) error { + a.Log.Debug("API called", + zap.String("service", "admin"), + zap.String("method", "getLoggerLevels"), logging.UserString("loggerName", args.LoggerName), ) reply.LoggerLevels = make(map[string]LogAndDisplayLevels) @@ -259,15 +284,15 @@ func (service *Admin) GetLoggerLevel(_ *http.Request, args *GetLoggerLevelArgs, if len(args.LoggerName) > 0 { loggerNames = []string{args.LoggerName} } else { - loggerNames = service.LogFactory.GetLoggerNames() + loggerNames = a.LogFactory.GetLoggerNames() } for _, name := range loggerNames { - logLevel, err := service.LogFactory.GetLogLevel(name) + logLevel, err := a.LogFactory.GetLogLevel(name) if err != nil { return err } - displayLevel, err := service.LogFactory.GetDisplayLevel(name) + displayLevel, err := a.LogFactory.GetDisplayLevel(name) if err != nil { return err } @@ -280,9 +305,12 @@ func (service *Admin) GetLoggerLevel(_ *http.Request, args *GetLoggerLevelArgs, } // GetConfig returns the config that the node was started with. -func (service *Admin) GetConfig(_ *http.Request, args *struct{}, reply *interface{}) error { - service.Log.Debug("Admin: GetConfig called") - *reply = service.NodeConfig +func (a *Admin) GetConfig(_ *http.Request, _ *struct{}, reply *interface{}) error { + a.Log.Debug("API called", + zap.String("service", "admin"), + zap.String("method", "getConfig"), + ) + *reply = a.NodeConfig return nil } @@ -295,10 +323,14 @@ type LoadVMsReply struct { } // LoadVMs loads any new VMs available to the node and returns the added VMs. -func (service *Admin) LoadVMs(_ *http.Request, _ *struct{}, reply *LoadVMsReply) error { - service.Log.Debug("Admin: LoadVMs called") +func (a *Admin) LoadVMs(r *http.Request, _ *struct{}, reply *LoadVMsReply) error { + a.Log.Debug("API called", + zap.String("service", "admin"), + zap.String("method", "loadVMs"), + ) - loadedVMs, failedVMs, err := service.VMRegistry.ReloadWithReadLock() + ctx := r.Context() + loadedVMs, failedVMs, err := a.VMRegistry.ReloadWithReadLock(ctx) if err != nil { return err } @@ -310,6 +342,6 @@ func (service *Admin) LoadVMs(_ *http.Request, _ *struct{}, reply *LoadVMsReply) } reply.FailedVMs = failedVMsParsed - reply.NewVMs, err = ids.GetRelevantAliases(service.VMManager, loadedVMs) + reply.NewVMs, err = ids.GetRelevantAliases(a.VMManager, loadedVMs) return err } diff --git a/avalanchego/api/admin/service_test.go b/avalanchego/api/admin/service_test.go index ce7a4e80..0cebe8d6 100644 --- a/avalanchego/api/admin/service_test.go +++ b/avalanchego/api/admin/service_test.go @@ -1,10 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package admin import ( - "errors" + "net/http" "testing" "github.com/golang/mock/gomock" @@ -17,8 +17,6 @@ import ( "github.com/ava-labs/avalanchego/vms/registry" ) -var errOops = errors.New("oops") - type loadVMsTest struct { admin *Admin ctrl *gomock.Controller @@ -57,7 +55,7 @@ func TestLoadVMsSuccess(t *testing.T) { newVMs := []ids.ID{id1, id2} failedVMs := map[ids.ID]error{ - ids.GenerateTestID(): errors.New("failed for some reason"), + ids.GenerateTestID(): errTest, } // every vm is at least aliased to itself. alias1 := []string{id1.String(), "vm1-alias-1", "vm1-alias-2"} @@ -68,14 +66,14 @@ func TestLoadVMsSuccess(t *testing.T) { id2: alias2[1:], } - resources.mockLog.EXPECT().Debug(gomock.Any()).Times(1) - resources.mockVMRegistry.EXPECT().ReloadWithReadLock().Times(1).Return(newVMs, failedVMs, nil) + resources.mockLog.EXPECT().Debug(gomock.Any(), gomock.Any()).Times(1) + resources.mockVMRegistry.EXPECT().ReloadWithReadLock(gomock.Any()).Times(1).Return(newVMs, failedVMs, nil) resources.mockVMManager.EXPECT().Aliases(id1).Times(1).Return(alias1, nil) resources.mockVMManager.EXPECT().Aliases(id2).Times(1).Return(alias2, nil) // execute test reply := LoadVMsReply{} - err := resources.admin.LoadVMs(nil, nil, &reply) + err := resources.admin.LoadVMs(&http.Request{}, nil, &reply) require.Equal(t, expectedVMRegistry, reply.NewVMs) require.Equal(t, err, nil) @@ -86,14 +84,14 @@ func TestLoadVMsReloadFails(t *testing.T) { resources := initLoadVMsTest(t) defer resources.ctrl.Finish() - resources.mockLog.EXPECT().Debug(gomock.Any()).Times(1) + resources.mockLog.EXPECT().Debug(gomock.Any(), gomock.Any()).Times(1) // Reload fails - resources.mockVMRegistry.EXPECT().ReloadWithReadLock().Times(1).Return(nil, nil, errOops) + resources.mockVMRegistry.EXPECT().ReloadWithReadLock(gomock.Any()).Times(1).Return(nil, nil, errTest) reply := LoadVMsReply{} - err := resources.admin.LoadVMs(nil, nil, &reply) + err := resources.admin.LoadVMs(&http.Request{}, nil, &reply) - require.Equal(t, err, errOops) + require.Equal(t, err, errTest) } // Tests behavior for LoadVMs if we fail to fetch our aliases @@ -105,18 +103,18 @@ func TestLoadVMsGetAliasesFails(t *testing.T) { id2 := ids.GenerateTestID() newVMs := []ids.ID{id1, id2} failedVMs := map[ids.ID]error{ - ids.GenerateTestID(): errors.New("failed for some reason"), + ids.GenerateTestID(): errTest, } // every vm is at least aliased to itself. alias1 := []string{id1.String(), "vm1-alias-1", "vm1-alias-2"} - resources.mockLog.EXPECT().Debug(gomock.Any()).Times(1) - resources.mockVMRegistry.EXPECT().ReloadWithReadLock().Times(1).Return(newVMs, failedVMs, nil) + resources.mockLog.EXPECT().Debug(gomock.Any(), gomock.Any()).Times(1) + resources.mockVMRegistry.EXPECT().ReloadWithReadLock(gomock.Any()).Times(1).Return(newVMs, failedVMs, nil) resources.mockVMManager.EXPECT().Aliases(id1).Times(1).Return(alias1, nil) - resources.mockVMManager.EXPECT().Aliases(id2).Times(1).Return(nil, errOops) + resources.mockVMManager.EXPECT().Aliases(id2).Times(1).Return(nil, errTest) reply := LoadVMsReply{} - err := resources.admin.LoadVMs(nil, nil, &reply) + err := resources.admin.LoadVMs(&http.Request{}, nil, &reply) - require.Equal(t, err, errOops) + require.Equal(t, err, errTest) } diff --git a/avalanchego/api/auth/auth.go b/avalanchego/api/auth/auth.go index 3b10674a..733f276e 100644 --- a/avalanchego/api/auth/auth.go +++ b/avalanchego/api/auth/auth.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package auth @@ -14,13 +14,14 @@ import ( "sync" "time" - "github.com/golang-jwt/jwt" + jwt "github.com/golang-jwt/jwt/v4" "github.com/gorilla/rpc/v2" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/password" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" ) @@ -53,7 +54,7 @@ var ( errNoEndpoints = errors.New("must name at least one endpoint") errTooManyEndpoints = fmt.Errorf("can only name at most %d endpoints", maxEndpoints) - _ Auth = &auth{} + _ Auth = (*auth)(nil) ) type Auth interface { @@ -99,14 +100,13 @@ type auth struct { // Can be changed via API call. password password.Hash // Set of token IDs that have been revoked - revoked map[string]struct{} + revoked set.Set[string] } func New(log logging.Logger, endpoint, pw string) (Auth, error) { a := &auth{ log: log, endpoint: endpoint, - revoked: make(map[string]struct{}), } return a, a.password.Set(pw) } @@ -116,7 +116,6 @@ func NewFromHash(log logging.Logger, endpoint string, pw password.Hash) Auth { log: log, endpoint: endpoint, password: pw, - revoked: make(map[string]struct{}), } } @@ -149,12 +148,12 @@ func (a *auth) NewToken(pw string, duration time.Duration, endpoints []string) ( if _, err := rand.Read(idBytes[:]); err != nil { return "", fmt.Errorf("failed to generate the unique token ID due to %w", err) } - id := base64.URLEncoding.EncodeToString(idBytes[:]) + id := base64.RawURLEncoding.EncodeToString(idBytes[:]) claims := endpointClaims{ - StandardClaims: jwt.StandardClaims{ - ExpiresAt: a.clock.Time().Add(duration).Unix(), - Id: id, + RegisteredClaims: jwt.RegisteredClaims{ + ExpiresAt: jwt.NewNumericDate(a.clock.Time().Add(duration)), + ID: id, }, } if canAccessAll { @@ -196,7 +195,7 @@ func (a *auth) RevokeToken(tokenStr, pw string) error { if !ok { return fmt.Errorf("expected auth token's claims to be type endpointClaims but is %T", token.Claims) } - a.revoked[claims.Id] = struct{}{} + a.revoked.Add(claims.ID) return nil } @@ -217,7 +216,7 @@ func (a *auth) AuthenticateToken(tokenStr, url string) error { return fmt.Errorf("expected auth token's claims to be type endpointClaims but is %T", token.Claims) } - _, revoked := a.revoked[claims.Id] + _, revoked := a.revoked[claims.ID] if revoked { return errTokenRevoked } @@ -250,7 +249,7 @@ func (a *auth) ChangePassword(oldPW, newPW string) error { // All the revoked tokens are now invalid; no need to mark specifically as // revoked. - a.revoked = make(map[string]struct{}) + a.revoked.Clear() return nil } diff --git a/avalanchego/api/auth/auth_test.go b/avalanchego/api/auth/auth_test.go index 46124c71..c86bcc22 100644 --- a/avalanchego/api/auth/auth_test.go +++ b/avalanchego/api/auth/auth_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package auth @@ -14,7 +14,7 @@ import ( "testing" "time" - "github.com/golang-jwt/jwt" + jwt "github.com/golang-jwt/jwt/v4" "github.com/stretchr/testify/require" @@ -26,6 +26,7 @@ var ( testPassword = "password!@#$%$#@!" hashedPassword = password.Hash{} unAuthorizedResponseRegex = "^{\"jsonrpc\":\"2.0\",\"error\":{\"code\":-32600,\"message\":\"(.*)\"},\"id\":1}" + errTest = errors.New("non-nil error") ) func init() { @@ -70,7 +71,7 @@ func TestNewTokenHappyPath(t *testing.T) { require.True(t, ok, "expected auth token's claims to be type endpointClaims but is different type") require.ElementsMatch(t, endpoints, claims.Endpoints, "token has wrong endpoint claims") - shouldExpireAt := now.Add(defaultTokenLifespan).Unix() + shouldExpireAt := jwt.NewNumericDate(now.Add(defaultTokenLifespan)) require.Equal(t, shouldExpireAt, claims.ExpiresAt, "token expiration time is wrong") } @@ -151,8 +152,8 @@ func TestWrapHandlerHappyPath(t *testing.T) { wrappedHandler := auth.WrapHandler(dummyHandler) for _, endpoint := range endpoints { - req := httptest.NewRequest(http.MethodPost, fmt.Sprintf("http://127.0.0.1:9650%s", endpoint), strings.NewReader("")) - req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", tokenStr)) + req := httptest.NewRequest(http.MethodPost, "http://127.0.0.1:9650"+endpoint, strings.NewReader("")) + req.Header.Add("Authorization", "Bearer "+tokenStr) rr := httptest.NewRecorder() wrappedHandler.ServeHTTP(rr, req) require.Equal(t, http.StatusOK, rr.Code) @@ -173,8 +174,8 @@ func TestWrapHandlerRevokedToken(t *testing.T) { wrappedHandler := auth.WrapHandler(dummyHandler) for _, endpoint := range endpoints { - req := httptest.NewRequest(http.MethodPost, fmt.Sprintf("http://127.0.0.1:9650%s", endpoint), strings.NewReader("")) - req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", tokenStr)) + req := httptest.NewRequest(http.MethodPost, "http://127.0.0.1:9650"+endpoint, strings.NewReader("")) + req.Header.Add("Authorization", "Bearer "+tokenStr) rr := httptest.NewRecorder() wrappedHandler.ServeHTTP(rr, req) require.Equal(t, http.StatusUnauthorized, rr.Code) @@ -196,8 +197,8 @@ func TestWrapHandlerExpiredToken(t *testing.T) { wrappedHandler := auth.WrapHandler(dummyHandler) for _, endpoint := range endpoints { - req := httptest.NewRequest(http.MethodPost, fmt.Sprintf("http://127.0.0.1:9650%s", endpoint), strings.NewReader("")) - req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", tokenStr)) + req := httptest.NewRequest(http.MethodPost, "http://127.0.0.1:9650"+endpoint, strings.NewReader("")) + req.Header.Add("Authorization", "Bearer "+tokenStr) rr := httptest.NewRecorder() wrappedHandler.ServeHTTP(rr, req) require.Equal(t, http.StatusUnauthorized, rr.Code) @@ -233,8 +234,8 @@ func TestWrapHandlerUnauthorizedEndpoint(t *testing.T) { wrappedHandler := auth.WrapHandler(dummyHandler) for _, endpoint := range unauthorizedEndpoints { - req := httptest.NewRequest(http.MethodPost, fmt.Sprintf("http://127.0.0.1:9650%s", endpoint), strings.NewReader("")) - req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", tokenStr)) + req := httptest.NewRequest(http.MethodPost, "http://127.0.0.1:9650"+endpoint, strings.NewReader("")) + req.Header.Add("Authorization", "Bearer "+tokenStr) rr := httptest.NewRecorder() wrappedHandler.ServeHTTP(rr, req) require.Equal(t, http.StatusUnauthorized, rr.Code) @@ -253,7 +254,7 @@ func TestWrapHandlerAuthEndpoint(t *testing.T) { wrappedHandler := auth.WrapHandler(dummyHandler) req := httptest.NewRequest(http.MethodPost, "http://127.0.0.1:9650/ext/auth", strings.NewReader("")) - req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", tokenStr)) + req.Header.Add("Authorization", "Bearer "+tokenStr) rr := httptest.NewRecorder() wrappedHandler.ServeHTTP(rr, req) require.Equal(t, http.StatusOK, rr.Code) @@ -269,8 +270,8 @@ func TestWrapHandlerAccessAll(t *testing.T) { wrappedHandler := auth.WrapHandler(dummyHandler) for _, endpoint := range endpoints { - req := httptest.NewRequest(http.MethodPost, fmt.Sprintf("http://127.0.0.1:9650%s", endpoint), strings.NewReader("")) - req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", tokenStr)) + req := httptest.NewRequest(http.MethodPost, "http://127.0.0.1:9650"+endpoint, strings.NewReader("")) + req.Header.Add("Authorization", "Bearer "+tokenStr) rr := httptest.NewRecorder() wrappedHandler.ServeHTTP(rr, req) require.Equal(t, http.StatusOK, rr.Code) @@ -279,9 +280,9 @@ func TestWrapHandlerAccessAll(t *testing.T) { func TestWriteUnauthorizedResponse(t *testing.T) { rr := httptest.NewRecorder() - writeUnauthorizedResponse(rr, errors.New("example err")) + writeUnauthorizedResponse(rr, errTest) require.Equal(t, http.StatusUnauthorized, rr.Code) - require.Equal(t, "{\"jsonrpc\":\"2.0\",\"error\":{\"code\":-32600,\"message\":\"example err\"},\"id\":1}\n", rr.Body.String()) + require.Equal(t, "{\"jsonrpc\":\"2.0\",\"error\":{\"code\":-32600,\"message\":\"non-nil error\"},\"id\":1}\n", rr.Body.String()) } func TestWrapHandlerMutatedRevokedToken(t *testing.T) { @@ -298,13 +299,11 @@ func TestWrapHandlerMutatedRevokedToken(t *testing.T) { wrappedHandler := auth.WrapHandler(dummyHandler) for _, endpoint := range endpoints { - req := httptest.NewRequest(http.MethodPost, fmt.Sprintf("http://127.0.0.1:9650%s", endpoint), strings.NewReader("")) - req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", tokenStr+"=")) + req := httptest.NewRequest(http.MethodPost, "http://127.0.0.1:9650"+endpoint, strings.NewReader("")) + req.Header.Add("Authorization", fmt.Sprintf("Bearer %s=", tokenStr)) // The appended = at the end looks like padding rr := httptest.NewRecorder() wrappedHandler.ServeHTTP(rr, req) require.Equal(t, http.StatusUnauthorized, rr.Code) - require.Contains(t, rr.Body.String(), errTokenRevoked.Error()) - require.Regexp(t, unAuthorizedResponseRegex, rr.Body.String()) } } @@ -317,12 +316,12 @@ func TestWrapHandlerInvalidSigningMethod(t *testing.T) { if _, err := rand.Read(idBytes[:]); err != nil { t.Fatal(err) } - id := base64.URLEncoding.EncodeToString(idBytes[:]) + id := base64.RawURLEncoding.EncodeToString(idBytes[:]) claims := endpointClaims{ - StandardClaims: jwt.StandardClaims{ - ExpiresAt: auth.clock.Time().Add(defaultTokenLifespan).Unix(), - Id: id, + RegisteredClaims: jwt.RegisteredClaims{ + ExpiresAt: jwt.NewNumericDate(auth.clock.Time().Add(defaultTokenLifespan)), + ID: id, }, Endpoints: endpoints, } @@ -335,8 +334,8 @@ func TestWrapHandlerInvalidSigningMethod(t *testing.T) { wrappedHandler := auth.WrapHandler(dummyHandler) for _, endpoint := range endpoints { - req := httptest.NewRequest(http.MethodPost, fmt.Sprintf("http://127.0.0.1:9650%s", endpoint), strings.NewReader("")) - req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", tokenStr+"=")) + req := httptest.NewRequest(http.MethodPost, "http://127.0.0.1:9650"+endpoint, strings.NewReader("")) + req.Header.Add("Authorization", "Bearer "+tokenStr) rr := httptest.NewRecorder() wrappedHandler.ServeHTTP(rr, req) require.Equal(t, http.StatusUnauthorized, rr.Code) diff --git a/avalanchego/api/auth/claims.go b/avalanchego/api/auth/claims.go index 444583bb..e2bf55d3 100644 --- a/avalanchego/api/auth/claims.go +++ b/avalanchego/api/auth/claims.go @@ -1,15 +1,15 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package auth import ( - "github.com/golang-jwt/jwt" + jwt "github.com/golang-jwt/jwt/v4" ) // Custom claim type used for API access token type endpointClaims struct { - jwt.StandardClaims + jwt.RegisteredClaims // Each element is an endpoint that the token allows access to // If endpoints has an element "*", allows access to all API endpoints diff --git a/avalanchego/api/auth/response.go b/avalanchego/api/auth/response.go index 94592070..e87065c7 100644 --- a/avalanchego/api/auth/response.go +++ b/avalanchego/api/auth/response.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package auth diff --git a/avalanchego/api/auth/service.go b/avalanchego/api/auth/service.go index 2eebbf83..77517c17 100644 --- a/avalanchego/api/auth/service.go +++ b/avalanchego/api/auth/service.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package auth @@ -6,6 +6,8 @@ package auth import ( "net/http" + "go.uber.org/zap" + "github.com/ava-labs/avalanchego/api" ) @@ -33,7 +35,10 @@ type Token struct { } func (s *Service) NewToken(_ *http.Request, args *NewTokenArgs, reply *Token) error { - s.auth.log.Debug("Auth: NewToken called") + s.auth.log.Debug("API called", + zap.String("service", "auth"), + zap.String("method", "newToken"), + ) var err error reply.Token, err = s.auth.NewToken(args.Password.Password, defaultTokenLifespan, args.Endpoints) @@ -46,7 +51,10 @@ type RevokeTokenArgs struct { } func (s *Service) RevokeToken(_ *http.Request, args *RevokeTokenArgs, _ *api.EmptyReply) error { - s.auth.log.Debug("Auth: RevokeToken called") + s.auth.log.Debug("API called", + zap.String("service", "auth"), + zap.String("method", "revokeToken"), + ) return s.auth.RevokeToken(args.Token.Token, args.Password.Password) } @@ -57,7 +65,10 @@ type ChangePasswordArgs struct { } func (s *Service) ChangePassword(_ *http.Request, args *ChangePasswordArgs, _ *api.EmptyReply) error { - s.auth.log.Debug("Auth: ChangePassword called") + s.auth.log.Debug("API called", + zap.String("service", "auth"), + zap.String("method", "changePassword"), + ) return s.auth.ChangePassword(args.OldPassword, args.NewPassword) } diff --git a/avalanchego/api/common_args_responses.go b/avalanchego/api/common_args_responses.go index 6b6c2764..458fdbf3 100644 --- a/avalanchego/api/common_args_responses.go +++ b/avalanchego/api/common_args_responses.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package api @@ -67,6 +67,12 @@ type GetBlockArgs struct { Encoding formatting.Encoding `json:"encoding"` } +// GetBlockByHeightArgs is the parameters supplied to the GetBlockByHeight API +type GetBlockByHeightArgs struct { + Height uint64 `json:"height"` + Encoding formatting.Encoding `json:"encoding"` +} + // GetBlockResponse is the response object for the GetBlock API. type GetBlockResponse struct { Block interface{} `json:"block"` @@ -77,6 +83,10 @@ type GetBlockResponse struct { Encoding formatting.Encoding `json:"encoding"` } +type GetHeightResponse struct { + Height json.Uint64 `json:"height"` +} + // FormattedBlock defines a JSON formatted struct containing a block in Hex // format type FormattedBlock struct { diff --git a/avalanchego/api/health/checker.go b/avalanchego/api/health/checker.go index 64252e0f..efc89517 100644 --- a/avalanchego/api/health/checker.go +++ b/avalanchego/api/health/checker.go @@ -1,17 +1,23 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health +import "context" + +var _ Checker = CheckerFunc(nil) + // Checker can have its health checked type Checker interface { // HealthCheck returns health check results and, if not healthy, a non-nil // error // // It is expected that the results are json marshallable. - HealthCheck() (interface{}, error) + HealthCheck(context.Context) (interface{}, error) } -type CheckerFunc func() (interface{}, error) +type CheckerFunc func(context.Context) (interface{}, error) -func (f CheckerFunc) HealthCheck() (interface{}, error) { return f() } +func (f CheckerFunc) HealthCheck(ctx context.Context) (interface{}, error) { + return f(ctx) +} diff --git a/avalanchego/api/health/client.go b/avalanchego/api/health/client.go index f2a81f20..7c615757 100644 --- a/avalanchego/api/health/client.go +++ b/avalanchego/api/health/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health @@ -10,19 +10,18 @@ import ( "github.com/ava-labs/avalanchego/utils/rpc" ) -var _ Client = &client{} +var _ Client = (*client)(nil) // Client interface for Avalanche Health API Endpoint +// For helpers to wait for Readiness, Health, or Liveness, see AwaitReady, +// AwaitHealthy, and AwaitAlive. type Client interface { // Readiness returns if the node has finished initialization - Readiness(context.Context, ...rpc.Option) (*APIHealthReply, error) + Readiness(ctx context.Context, tags []string, options ...rpc.Option) (*APIReply, error) // Health returns a summation of the health of the node - Health(context.Context, ...rpc.Option) (*APIHealthReply, error) + Health(ctx context.Context, tags []string, options ...rpc.Option) (*APIReply, error) // Liveness returns if the node is in need of a restart - Liveness(context.Context, ...rpc.Option) (*APIHealthReply, error) - // AwaitHealthy queries the Health endpoint with a pause of [interval] - // in between checks and returns early if Health returns healthy - AwaitHealthy(ctx context.Context, freq time.Duration, options ...rpc.Option) (bool, error) + Liveness(ctx context.Context, tags []string, options ...rpc.Option) (*APIReply, error) } // Client implementation for Avalanche Health API Endpoint @@ -33,35 +32,58 @@ type client struct { // NewClient returns a client to interact with Health API endpoint func NewClient(uri string) Client { return &client{requester: rpc.NewEndpointRequester( - uri+"/ext/health", - "health", + uri + "/ext/health", )} } -func (c *client) Readiness(ctx context.Context, options ...rpc.Option) (*APIHealthReply, error) { - res := &APIHealthReply{} - err := c.requester.SendRequest(ctx, "readiness", struct{}{}, res, options...) +func (c *client) Readiness(ctx context.Context, tags []string, options ...rpc.Option) (*APIReply, error) { + res := &APIReply{} + err := c.requester.SendRequest(ctx, "health.readiness", &APIArgs{Tags: tags}, res, options...) return res, err } -func (c *client) Health(ctx context.Context, options ...rpc.Option) (*APIHealthReply, error) { - res := &APIHealthReply{} - err := c.requester.SendRequest(ctx, "health", struct{}{}, res, options...) +func (c *client) Health(ctx context.Context, tags []string, options ...rpc.Option) (*APIReply, error) { + res := &APIReply{} + err := c.requester.SendRequest(ctx, "health.health", &APIArgs{Tags: tags}, res, options...) return res, err } -func (c *client) Liveness(ctx context.Context, options ...rpc.Option) (*APIHealthReply, error) { - res := &APIHealthReply{} - err := c.requester.SendRequest(ctx, "liveness", struct{}{}, res, options...) +func (c *client) Liveness(ctx context.Context, tags []string, options ...rpc.Option) (*APIReply, error) { + res := &APIReply{} + err := c.requester.SendRequest(ctx, "health.liveness", &APIArgs{Tags: tags}, res, options...) return res, err } -func (c *client) AwaitHealthy(ctx context.Context, freq time.Duration, options ...rpc.Option) (bool, error) { +// AwaitReady polls the node every [freq] until the node reports ready. +// Only returns an error if [ctx] returns an error. +func AwaitReady(ctx context.Context, c Client, freq time.Duration, tags []string, options ...rpc.Option) (bool, error) { + return await(ctx, freq, c.Readiness, tags, options...) +} + +// AwaitHealthy polls the node every [freq] until the node reports healthy. +// Only returns an error if [ctx] returns an error. +func AwaitHealthy(ctx context.Context, c Client, freq time.Duration, tags []string, options ...rpc.Option) (bool, error) { + return await(ctx, freq, c.Health, tags, options...) +} + +// AwaitAlive polls the node every [freq] until the node reports liveness. +// Only returns an error if [ctx] returns an error. +func AwaitAlive(ctx context.Context, c Client, freq time.Duration, tags []string, options ...rpc.Option) (bool, error) { + return await(ctx, freq, c.Liveness, tags, options...) +} + +func await( + ctx context.Context, + freq time.Duration, + check func(ctx context.Context, tags []string, options ...rpc.Option) (*APIReply, error), + tags []string, + options ...rpc.Option, +) (bool, error) { ticker := time.NewTicker(freq) defer ticker.Stop() for { - res, err := c.Health(ctx, options...) + res, err := check(ctx, tags, options...) if err == nil && res.Healthy { return true, nil } diff --git a/avalanchego/api/health/client_test.go b/avalanchego/api/health/client_test.go index 42ec539a..88d0696e 100644 --- a/avalanchego/api/health/client_test.go +++ b/avalanchego/api/health/client_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health @@ -14,13 +14,13 @@ import ( ) type mockClient struct { - reply APIHealthReply + reply APIReply err error onCall func() } -func (mc *mockClient) SendRequest(ctx context.Context, method string, params interface{}, replyIntf interface{}, options ...rpc.Option) error { - reply := replyIntf.(*APIHealthReply) +func (mc *mockClient) SendRequest(_ context.Context, _ string, _ interface{}, replyIntf interface{}, _ ...rpc.Option) error { + reply := replyIntf.(*APIReply) *reply = mc.reply mc.onCall() return mc.err @@ -37,37 +37,45 @@ func TestClient(t *testing.T) { require := require.New(t) mc := &mockClient{ - reply: APIHealthReply{ + reply: APIReply{ Healthy: true, }, err: nil, onCall: func() {}, } - c := client{ + c := &client{ requester: mc, } { - readiness, err := c.Readiness(context.Background()) + readiness, err := c.Readiness(context.Background(), nil) require.NoError(err) require.True(readiness.Healthy) } { - health, err := c.Health(context.Background()) + health, err := c.Health(context.Background(), nil) require.NoError(err) require.True(health.Healthy) } { - liveness, err := c.Liveness(context.Background()) + liveness, err := c.Liveness(context.Background(), nil) require.NoError(err) require.True(liveness.Healthy) } { ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - healthy, err := c.AwaitHealthy(ctx, time.Second) + healthy, err := AwaitHealthy(ctx, c, time.Second, nil) + cancel() + require.NoError(err) + require.True(healthy) + } + + { + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + healthy, err := AwaitReady(ctx, c, time.Second, nil) cancel() require.NoError(err) require.True(healthy) @@ -77,7 +85,15 @@ func TestClient(t *testing.T) { { ctx, cancel := context.WithTimeout(context.Background(), 20*time.Microsecond) - healthy, err := c.AwaitHealthy(ctx, time.Microsecond) + healthy, err := AwaitHealthy(ctx, c, time.Microsecond, nil) + cancel() + require.Error(err) + require.False(healthy) + } + + { + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Microsecond) + healthy, err := AwaitReady(ctx, c, time.Microsecond, nil) cancel() require.Error(err) require.False(healthy) @@ -88,7 +104,14 @@ func TestClient(t *testing.T) { } { - healthy, err := c.AwaitHealthy(context.Background(), time.Microsecond) + healthy, err := AwaitHealthy(context.Background(), c, time.Microsecond, nil) + require.NoError(err) + require.True(healthy) + } + + mc.reply.Healthy = false + { + healthy, err := AwaitReady(context.Background(), c, time.Microsecond, nil) require.NoError(err) require.True(healthy) } diff --git a/avalanchego/api/health/handler.go b/avalanchego/api/health/handler.go index ef4d3322..a8bd8269 100644 --- a/avalanchego/api/health/handler.go +++ b/avalanchego/api/health/handler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health @@ -47,19 +47,20 @@ func NewGetAndPostHandler(log logging.Logger, reporter Reporter) (http.Handler, // NewGetHandler return a health handler that supports GET requests reporting // the result of the provided [reporter]. -func NewGetHandler(reporter func() (map[string]Result, bool)) http.Handler { +func NewGetHandler(reporter func(tags ...string) (map[string]Result, bool)) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // Make sure the content type is set before writing the header. w.Header().Set("Content-Type", "application/json") - checks, healthy := reporter() + tags := r.URL.Query()["tag"] + checks, healthy := reporter(tags...) if !healthy { // If a health check has failed, we should return a 503. w.WriteHeader(http.StatusServiceUnavailable) } // The encoder will call write on the writer, which will write the // header with a 200. - _ = stdjson.NewEncoder(w).Encode(APIHealthReply{ + _ = stdjson.NewEncoder(w).Encode(APIReply{ Checks: checks, Healthy: healthy, }) diff --git a/avalanchego/api/health/health.go b/avalanchego/api/health/health.go index 712fcd55..874944e7 100644 --- a/avalanchego/api/health/health.go +++ b/avalanchego/api/health/health.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health import ( + "context" "time" "github.com/prometheus/client_golang/prometheus" @@ -13,7 +14,13 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" ) -var _ Health = &health{} +// GlobalTag is the tag that is returned for all health check results, +// regardless of the tags passed to the Reporter. +// Registering a health check with this tag will ensure that it is always +// included in the results. +const GlobalTag = "global" + +var _ Health = (*health)(nil) // Health defines the full health service interface for registering, reporting // and refreshing health checks. @@ -21,22 +28,27 @@ type Health interface { Registerer Reporter - Start(freq time.Duration) + // Start running periodic health checks at the specified frequency. + // Repeated calls to Start will be no-ops. + Start(ctx context.Context, freq time.Duration) + + // Stop running periodic health checks. Stop should only be called after + // Start. Once Stop returns, no more health checks will be executed. Stop() } // Registerer defines how to register new components to check the health of. type Registerer interface { - RegisterReadinessCheck(name string, checker Checker) error - RegisterHealthCheck(name string, checker Checker) error - RegisterLivenessCheck(name string, checker Checker) error + RegisterReadinessCheck(name string, checker Checker, tags ...string) error + RegisterHealthCheck(name string, checker Checker, tags ...string) error + RegisterLivenessCheck(name string, checker Checker, tags ...string) error } // Reporter returns the current health status. type Reporter interface { - Readiness() (map[string]Result, bool) - Health() (map[string]Result, bool) - Liveness() (map[string]Result, bool) + Readiness(tags ...string) (map[string]Result, bool) + Health(tags ...string) (map[string]Result, bool) + Liveness(tags ...string) (map[string]Result, bool) } type health struct { @@ -66,20 +78,20 @@ func New(log logging.Logger, registerer prometheus.Registerer) (Health, error) { }, err } -func (h *health) RegisterReadinessCheck(name string, checker Checker) error { - return h.readiness.RegisterMonotonicCheck(name, checker) +func (h *health) RegisterReadinessCheck(name string, checker Checker, tags ...string) error { + return h.readiness.RegisterMonotonicCheck(name, checker, tags...) } -func (h *health) RegisterHealthCheck(name string, checker Checker) error { - return h.health.RegisterCheck(name, checker) +func (h *health) RegisterHealthCheck(name string, checker Checker, tags ...string) error { + return h.health.RegisterCheck(name, checker, tags...) } -func (h *health) RegisterLivenessCheck(name string, checker Checker) error { - return h.liveness.RegisterCheck(name, checker) +func (h *health) RegisterLivenessCheck(name string, checker Checker, tags ...string) error { + return h.liveness.RegisterCheck(name, checker, tags...) } -func (h *health) Readiness() (map[string]Result, bool) { - results, healthy := h.readiness.Results() +func (h *health) Readiness(tags ...string) (map[string]Result, bool) { + results, healthy := h.readiness.Results(tags...) if !healthy { h.log.Warn("failing readiness check", zap.Reflect("reason", results), @@ -88,8 +100,8 @@ func (h *health) Readiness() (map[string]Result, bool) { return results, healthy } -func (h *health) Health() (map[string]Result, bool) { - results, healthy := h.health.Results() +func (h *health) Health(tags ...string) (map[string]Result, bool) { + results, healthy := h.health.Results(tags...) if !healthy { h.log.Warn("failing health check", zap.Reflect("reason", results), @@ -98,8 +110,8 @@ func (h *health) Health() (map[string]Result, bool) { return results, healthy } -func (h *health) Liveness() (map[string]Result, bool) { - results, healthy := h.liveness.Results() +func (h *health) Liveness(tags ...string) (map[string]Result, bool) { + results, healthy := h.liveness.Results(tags...) if !healthy { h.log.Warn("failing liveness check", zap.Reflect("reason", results), @@ -108,10 +120,10 @@ func (h *health) Liveness() (map[string]Result, bool) { return results, healthy } -func (h *health) Start(freq time.Duration) { - h.readiness.Start(freq) - h.health.Start(freq) - h.liveness.Start(freq) +func (h *health) Start(ctx context.Context, freq time.Duration) { + h.readiness.Start(ctx, freq) + h.health.Start(ctx, freq) + h.liveness.Start(ctx, freq) } func (h *health) Stop() { diff --git a/avalanchego/api/health/health_test.go b/avalanchego/api/health/health_test.go index 34a3c317..d8d13331 100644 --- a/avalanchego/api/health/health_test.go +++ b/avalanchego/api/health/health_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health import ( + "context" "errors" "fmt" "sync" @@ -19,44 +20,38 @@ import ( ) const ( - checkFreq = time.Millisecond - awaitFreq = 50 * time.Microsecond + checkFreq = time.Millisecond + awaitFreq = 50 * time.Microsecond + awaitTimeout = 30 * time.Second ) -func awaitReadiness(r Reporter) { - for { - _, ready := r.Readiness() - if ready { - return - } - time.Sleep(awaitFreq) - } +var errUnhealthy = errors.New("unhealthy") + +func awaitReadiness(t *testing.T, r Reporter, ready bool) { + require.Eventually(t, func() bool { + _, ok := r.Readiness() + return ok == ready + }, awaitTimeout, awaitFreq) } -func awaitHealthy(r Reporter, healthy bool) { - for { +func awaitHealthy(t *testing.T, r Reporter, healthy bool) { + require.Eventually(t, func() bool { _, ok := r.Health() - if ok == healthy { - return - } - time.Sleep(awaitFreq) - } + return ok == healthy + }, awaitTimeout, awaitFreq) } -func awaitLiveness(r Reporter, liveness bool) { - for { +func awaitLiveness(t *testing.T, r Reporter, liveness bool) { + require.Eventually(t, func() bool { _, ok := r.Liveness() - if ok == liveness { - return - } - time.Sleep(awaitFreq) - } + return ok == liveness + }, awaitTimeout, awaitFreq) } func TestDuplicatedRegistations(t *testing.T) { require := require.New(t) - check := CheckerFunc(func() (interface{}, error) { + check := CheckerFunc(func(context.Context) (interface{}, error) { return "", nil }) @@ -82,7 +77,7 @@ func TestDuplicatedRegistations(t *testing.T) { func TestDefaultFailing(t *testing.T) { require := require.New(t) - check := CheckerFunc(func() (interface{}, error) { + check := CheckerFunc(func(context.Context) (interface{}, error) { return "", nil }) @@ -126,7 +121,7 @@ func TestDefaultFailing(t *testing.T) { func TestPassingChecks(t *testing.T) { require := require.New(t) - check := CheckerFunc(func() (interface{}, error) { + check := CheckerFunc(func(context.Context) (interface{}, error) { return "", nil }) @@ -140,11 +135,11 @@ func TestPassingChecks(t *testing.T) { err = h.RegisterLivenessCheck("check", check) require.NoError(err) - h.Start(checkFreq) + h.Start(context.Background(), checkFreq) defer h.Stop() { - awaitReadiness(h) + awaitReadiness(t, h, true) readinessResult, readiness := h.Readiness() require.Len(readinessResult, 1) @@ -158,7 +153,7 @@ func TestPassingChecks(t *testing.T) { } { - awaitHealthy(h, true) + awaitHealthy(t, h, true) healthResult, health := h.Health() require.Len(healthResult, 1) @@ -172,7 +167,7 @@ func TestPassingChecks(t *testing.T) { } { - awaitLiveness(h, true) + awaitLiveness(t, h, true) livenessResult, liveness := h.Liveness() require.Len(livenessResult, 1) @@ -189,13 +184,10 @@ func TestPassingChecks(t *testing.T) { func TestPassingThenFailingChecks(t *testing.T) { require := require.New(t) - var ( - shouldCheckErr utils.AtomicBool - checkErr = errors.New("unhealthy") - ) - check := CheckerFunc(func() (interface{}, error) { - if shouldCheckErr.GetValue() { - return checkErr.Error(), checkErr + var shouldCheckErr utils.Atomic[bool] + check := CheckerFunc(func(context.Context) (interface{}, error) { + if shouldCheckErr.Get() { + return errUnhealthy.Error(), errUnhealthy } return "", nil }) @@ -210,12 +202,12 @@ func TestPassingThenFailingChecks(t *testing.T) { err = h.RegisterLivenessCheck("check", check) require.NoError(err) - h.Start(checkFreq) + h.Start(context.Background(), checkFreq) defer h.Stop() - awaitReadiness(h) - awaitHealthy(h, true) - awaitLiveness(h, true) + awaitReadiness(t, h, true) + awaitHealthy(t, h, true) + awaitLiveness(t, h, true) { _, readiness := h.Readiness() @@ -228,10 +220,10 @@ func TestPassingThenFailingChecks(t *testing.T) { require.True(liveness) } - shouldCheckErr.SetValue(true) + shouldCheckErr.Set(true) - awaitHealthy(h, false) - awaitLiveness(h, false) + awaitHealthy(t, h, false) + awaitLiveness(t, h, false) { // Notice that Readiness is a monotonic check - so it still reports @@ -254,14 +246,14 @@ func TestDeadlockRegression(t *testing.T) { require.NoError(err) var lock sync.Mutex - check := CheckerFunc(func() (interface{}, error) { + check := CheckerFunc(func(context.Context) (interface{}, error) { lock.Lock() time.Sleep(time.Nanosecond) lock.Unlock() return "", nil }) - h.Start(time.Nanosecond) + h.Start(context.Background(), time.Nanosecond) defer h.Stop() for i := 0; i < 1000; i++ { @@ -271,5 +263,151 @@ func TestDeadlockRegression(t *testing.T) { require.NoError(err) } - awaitHealthy(h, true) + awaitHealthy(t, h, true) +} + +func TestTags(t *testing.T) { + require := require.New(t) + + check := CheckerFunc(func(context.Context) (interface{}, error) { + return "", nil + }) + + h, err := New(logging.NoLog{}, prometheus.NewRegistry()) + require.NoError(err) + err = h.RegisterHealthCheck("check1", check) + require.NoError(err) + err = h.RegisterHealthCheck("check2", check, "tag1") + require.NoError(err) + err = h.RegisterHealthCheck("check3", check, "tag2") + require.NoError(err) + err = h.RegisterHealthCheck("check4", check, "tag1", "tag2") + require.NoError(err) + err = h.RegisterHealthCheck("check5", check, GlobalTag) + require.NoError(err) + + // default checks + { + healthResult, health := h.Health() + require.Len(healthResult, 5) + require.Contains(healthResult, "check1") + require.Contains(healthResult, "check2") + require.Contains(healthResult, "check3") + require.Contains(healthResult, "check4") + require.Contains(healthResult, "check5") + require.False(health) + + healthResult, health = h.Health("tag1") + require.Len(healthResult, 3) + require.Contains(healthResult, "check2") + require.Contains(healthResult, "check4") + require.Contains(healthResult, "check5") + require.False(health) + + healthResult, health = h.Health("tag1", "tag2") + require.Len(healthResult, 4) + require.Contains(healthResult, "check2") + require.Contains(healthResult, "check3") + require.Contains(healthResult, "check4") + require.Contains(healthResult, "check5") + require.False(health) + + healthResult, health = h.Health("nonExistentTag") + require.Len(healthResult, 1) + require.Contains(healthResult, "check5") + require.False(health) + + healthResult, health = h.Health("tag1", "tag2", "nonExistentTag") + require.Len(healthResult, 4) + require.Contains(healthResult, "check2") + require.Contains(healthResult, "check3") + require.Contains(healthResult, "check4") + require.Contains(healthResult, "check5") + require.False(health) + } + + h.Start(context.Background(), checkFreq) + + awaitHealthy(t, h, true) + + { + healthResult, health := h.Health() + require.Len(healthResult, 5) + require.Contains(healthResult, "check1") + require.Contains(healthResult, "check2") + require.Contains(healthResult, "check3") + require.Contains(healthResult, "check4") + require.Contains(healthResult, "check5") + require.True(health) + + healthResult, health = h.Health("tag1") + require.Len(healthResult, 3) + require.Contains(healthResult, "check2") + require.Contains(healthResult, "check4") + require.Contains(healthResult, "check5") + require.True(health) + + healthResult, health = h.Health("tag1", "tag2") + require.Len(healthResult, 4) + require.Contains(healthResult, "check2") + require.Contains(healthResult, "check3") + require.Contains(healthResult, "check4") + require.Contains(healthResult, "check5") + require.True(health) + + healthResult, health = h.Health("nonExistentTag") + require.Len(healthResult, 1) + require.Contains(healthResult, "check5") + require.True(health) + + healthResult, health = h.Health("tag1", "tag2", "nonExistentTag") + require.Len(healthResult, 4) + require.Contains(healthResult, "check2") + require.Contains(healthResult, "check3") + require.Contains(healthResult, "check4") + require.Contains(healthResult, "check5") + require.True(health) + } + + // stop the health check + h.Stop() + + { + // now we'll add a new check which is unhealthy by default (notYetRunResult) + err = h.RegisterHealthCheck("check6", check, "tag1") + require.NoError(err) + + awaitHealthy(t, h, false) + + healthResult, health := h.Health("tag1") + require.Len(healthResult, 4) + require.Contains(healthResult, "check2") + require.Contains(healthResult, "check4") + require.Contains(healthResult, "check5") + require.Contains(healthResult, "check6") + require.Equal(notYetRunResult, healthResult["check6"]) + require.False(health) + + healthResult, health = h.Health("tag2") + require.Len(healthResult, 3) + require.Contains(healthResult, "check3") + require.Contains(healthResult, "check4") + require.Contains(healthResult, "check5") + require.True(health) + + // add global tag + err = h.RegisterHealthCheck("check7", check, GlobalTag) + require.NoError(err) + + awaitHealthy(t, h, false) + + healthResult, health = h.Health("tag2") + require.Len(healthResult, 4) + require.Contains(healthResult, "check3") + require.Contains(healthResult, "check4") + require.Contains(healthResult, "check5") + require.Contains(healthResult, "check7") + require.Equal(notYetRunResult, healthResult["check7"]) + require.False(health) + } } diff --git a/avalanchego/api/health/metrics.go b/avalanchego/api/health/metrics.go index 1d4948a5..f735ec14 100644 --- a/avalanchego/api/health/metrics.go +++ b/avalanchego/api/health/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health diff --git a/avalanchego/api/health/mocks/Client.go b/avalanchego/api/health/mocks/Client.go deleted file mode 100644 index b439fbde..00000000 --- a/avalanchego/api/health/mocks/Client.go +++ /dev/null @@ -1,137 +0,0 @@ -// Code generated by mockery v2.10.6. DO NOT EDIT. - -package mocks - -import ( - context "context" - - health "github.com/ava-labs/avalanchego/api/health" - mock "github.com/stretchr/testify/mock" - - rpc "github.com/ava-labs/avalanchego/utils/rpc" - - time "time" -) - -// Client is an autogenerated mock type for the Client type -type Client struct { - mock.Mock -} - -// AwaitHealthy provides a mock function with given fields: ctx, freq, options -func (_m *Client) AwaitHealthy(ctx context.Context, freq time.Duration, options ...rpc.Option) (bool, error) { - _va := make([]interface{}, len(options)) - for _i := range options { - _va[_i] = options[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, freq) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 bool - if rf, ok := ret.Get(0).(func(context.Context, time.Duration, ...rpc.Option) bool); ok { - r0 = rf(ctx, freq, options...) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, time.Duration, ...rpc.Option) error); ok { - r1 = rf(ctx, freq, options...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Health provides a mock function with given fields: _a0, _a1 -func (_m *Client) Health(_a0 context.Context, _a1 ...rpc.Option) (*health.APIHealthReply, error) { - _va := make([]interface{}, len(_a1)) - for _i := range _a1 { - _va[_i] = _a1[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *health.APIHealthReply - if rf, ok := ret.Get(0).(func(context.Context, ...rpc.Option) *health.APIHealthReply); ok { - r0 = rf(_a0, _a1...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*health.APIHealthReply) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, ...rpc.Option) error); ok { - r1 = rf(_a0, _a1...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Liveness provides a mock function with given fields: _a0, _a1 -func (_m *Client) Liveness(_a0 context.Context, _a1 ...rpc.Option) (*health.APIHealthReply, error) { - _va := make([]interface{}, len(_a1)) - for _i := range _a1 { - _va[_i] = _a1[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *health.APIHealthReply - if rf, ok := ret.Get(0).(func(context.Context, ...rpc.Option) *health.APIHealthReply); ok { - r0 = rf(_a0, _a1...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*health.APIHealthReply) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, ...rpc.Option) error); ok { - r1 = rf(_a0, _a1...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Readiness provides a mock function with given fields: _a0, _a1 -func (_m *Client) Readiness(_a0 context.Context, _a1 ...rpc.Option) (*health.APIHealthReply, error) { - _va := make([]interface{}, len(_a1)) - for _i := range _a1 { - _va[_i] = _a1[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *health.APIHealthReply - if rf, ok := ret.Get(0).(func(context.Context, ...rpc.Option) *health.APIHealthReply); ok { - r0 = rf(_a0, _a1...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*health.APIHealthReply) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, ...rpc.Option) error); ok { - r1 = rf(_a0, _a1...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/avalanchego/api/health/result.go b/avalanchego/api/health/result.go index 8fc3e1b1..f5253c66 100644 --- a/avalanchego/api/health/result.go +++ b/avalanchego/api/health/result.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health diff --git a/avalanchego/api/health/service.go b/avalanchego/api/health/service.go index 64780c55..368d986c 100644 --- a/avalanchego/api/health/service.go +++ b/avalanchego/api/health/service.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health @@ -6,6 +6,8 @@ package health import ( "net/http" + "go.uber.org/zap" + "github.com/ava-labs/avalanchego/utils/logging" ) @@ -14,29 +16,47 @@ type Service struct { health Reporter } -// APIHealthReply is the response for Health -type APIHealthReply struct { +// APIReply is the response for Readiness, Health, and Liveness. +type APIReply struct { Checks map[string]Result `json:"checks"` Healthy bool `json:"healthy"` } +// APIArgs is the arguments for Readiness, Health, and Liveness. +type APIArgs struct { + Tags []string `json:"tags"` +} + // Readiness returns if the node has finished initialization -func (s *Service) Readiness(_ *http.Request, _ *struct{}, reply *APIHealthReply) error { - s.log.Debug("Health.readiness called") - reply.Checks, reply.Healthy = s.health.Readiness() +func (s *Service) Readiness(_ *http.Request, args *APIArgs, reply *APIReply) error { + s.log.Debug("API called", + zap.String("service", "health"), + zap.String("method", "readiness"), + zap.Strings("tags", args.Tags), + ) + reply.Checks, reply.Healthy = s.health.Readiness(args.Tags...) return nil } // Health returns a summation of the health of the node -func (s *Service) Health(_ *http.Request, _ *struct{}, reply *APIHealthReply) error { - s.log.Debug("Health.health called") - reply.Checks, reply.Healthy = s.health.Health() +func (s *Service) Health(_ *http.Request, args *APIArgs, reply *APIReply) error { + s.log.Debug("API called", + zap.String("service", "health"), + zap.String("method", "health"), + zap.Strings("tags", args.Tags), + ) + + reply.Checks, reply.Healthy = s.health.Health(args.Tags...) return nil } // Liveness returns if the node is in need of a restart -func (s *Service) Liveness(_ *http.Request, _ *struct{}, reply *APIHealthReply) error { - s.log.Debug("Health.liveness called") - reply.Checks, reply.Healthy = s.health.Liveness() +func (s *Service) Liveness(_ *http.Request, args *APIArgs, reply *APIReply) error { + s.log.Debug("API called", + zap.String("service", "health"), + zap.String("method", "liveness"), + zap.Strings("tags", args.Tags), + ) + reply.Checks, reply.Healthy = s.health.Liveness(args.Tags...) return nil } diff --git a/avalanchego/api/health/service_test.go b/avalanchego/api/health/service_test.go index ff384067..c12bce1f 100644 --- a/avalanchego/api/health/service_test.go +++ b/avalanchego/api/health/service_test.go @@ -1,22 +1,25 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health import ( + "context" + "net/http" "testing" "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/logging" ) func TestServiceResponses(t *testing.T) { require := require.New(t) - check := CheckerFunc(func() (interface{}, error) { + check := CheckerFunc(func(context.Context) (interface{}, error) { return "", nil }) @@ -36,8 +39,8 @@ func TestServiceResponses(t *testing.T) { require.NoError(err) { - reply := APIHealthReply{} - err = s.Readiness(nil, nil, &reply) + reply := APIReply{} + err = s.Readiness(nil, &APIArgs{}, &reply) require.NoError(err) require.Len(reply.Checks, 1) @@ -47,8 +50,8 @@ func TestServiceResponses(t *testing.T) { } { - reply := APIHealthReply{} - err = s.Health(nil, nil, &reply) + reply := APIReply{} + err = s.Health(nil, &APIArgs{}, &reply) require.NoError(err) require.Len(reply.Checks, 1) @@ -58,8 +61,8 @@ func TestServiceResponses(t *testing.T) { } { - reply := APIHealthReply{} - err = s.Liveness(nil, nil, &reply) + reply := APIReply{} + err = s.Liveness(nil, &APIArgs{}, &reply) require.NoError(err) require.Len(reply.Checks, 1) @@ -68,16 +71,16 @@ func TestServiceResponses(t *testing.T) { require.False(reply.Healthy) } - h.Start(checkFreq) + h.Start(context.Background(), checkFreq) defer h.Stop() - awaitReadiness(h) - awaitHealthy(h, true) - awaitLiveness(h, true) + awaitReadiness(t, h, true) + awaitHealthy(t, h, true) + awaitLiveness(t, h, true) { - reply := APIHealthReply{} - err = s.Readiness(nil, nil, &reply) + reply := APIReply{} + err = s.Readiness(nil, &APIArgs{}, &reply) require.NoError(err) result := reply.Checks["check"] @@ -88,8 +91,8 @@ func TestServiceResponses(t *testing.T) { } { - reply := APIHealthReply{} - err = s.Health(nil, nil, &reply) + reply := APIReply{} + err = s.Health(nil, &APIArgs{}, &reply) require.NoError(err) result := reply.Checks["check"] @@ -100,8 +103,8 @@ func TestServiceResponses(t *testing.T) { } { - reply := APIHealthReply{} - err = s.Liveness(nil, nil, &reply) + reply := APIReply{} + err = s.Liveness(nil, &APIArgs{}, &reply) require.NoError(err) result := reply.Checks["check"] @@ -111,3 +114,130 @@ func TestServiceResponses(t *testing.T) { require.True(reply.Healthy) } } + +func TestServiceTagResponse(t *testing.T) { + check := CheckerFunc(func(context.Context) (interface{}, error) { + return "", nil + }) + + subnetID1 := ids.GenerateTestID() + subnetID2 := ids.GenerateTestID() + + // test cases + type testMethods struct { + name string + register func(Health, string, Checker, ...string) error + check func(*Service, *http.Request, *APIArgs, *APIReply) error + await func(*testing.T, Reporter, bool) + } + + tests := []testMethods{ + { + name: "Readiness", + register: func(h Health, s1 string, c Checker, s2 ...string) error { + return h.RegisterReadinessCheck(s1, c, s2...) + }, + check: func(s *Service, req *http.Request, a1 *APIArgs, a2 *APIReply) error { + return s.Readiness(req, a1, a2) + }, + await: awaitReadiness, + }, + { + name: "Health", + register: func(h Health, s1 string, c Checker, s2 ...string) error { + return h.RegisterHealthCheck(s1, c, s2...) + }, + check: func(s *Service, r *http.Request, a1 *APIArgs, a2 *APIReply) error { + return s.Health(r, a1, a2) + }, + await: awaitHealthy, + }, + { + name: "Liveness", + register: func(h Health, s1 string, c Checker, s2 ...string) error { + return h.RegisterLivenessCheck(s1, c, s2...) + }, + check: func(s *Service, r *http.Request, a1 *APIArgs, a2 *APIReply) error { + return s.Liveness(r, a1, a2) + }, + await: awaitLiveness, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + h, err := New(logging.NoLog{}, prometheus.NewRegistry()) + require.NoError(err) + err = test.register(h, "check1", check) + require.NoError(err) + err = test.register(h, "check2", check, subnetID1.String()) + require.NoError(err) + err = test.register(h, "check3", check, subnetID2.String()) + require.NoError(err) + err = test.register(h, "check4", check, subnetID1.String(), subnetID2.String()) + require.NoError(err) + + s := &Service{ + log: logging.NoLog{}, + health: h, + } + + // default checks + { + reply := APIReply{} + err = test.check(s, nil, &APIArgs{}, &reply) + require.NoError(err) + require.Len(reply.Checks, 4) + require.Contains(reply.Checks, "check1") + require.Contains(reply.Checks, "check2") + require.Contains(reply.Checks, "check3") + require.Contains(reply.Checks, "check4") + require.Equal(notYetRunResult, reply.Checks["check1"]) + require.False(reply.Healthy) + + err = test.check(s, nil, &APIArgs{Tags: []string{subnetID1.String()}}, &reply) + require.NoError(err) + require.Len(reply.Checks, 2) + require.Contains(reply.Checks, "check2") + require.Contains(reply.Checks, "check4") + require.Equal(notYetRunResult, reply.Checks["check2"]) + require.False(reply.Healthy) + } + + h.Start(context.Background(), checkFreq) + + test.await(t, h, true) + + { + reply := APIReply{} + err = test.check(s, nil, &APIArgs{Tags: []string{subnetID1.String()}}, &reply) + require.NoError(err) + require.Len(reply.Checks, 2) + require.Contains(reply.Checks, "check2") + require.Contains(reply.Checks, "check4") + require.True(reply.Healthy) + } + + // stop the health check + h.Stop() + + { + // now we'll add a new check which is unhealthy by default (notYetRunResult) + err = test.register(h, "check5", check, subnetID1.String()) + require.NoError(err) + + reply := APIReply{} + err = test.check(s, nil, &APIArgs{Tags: []string{subnetID1.String()}}, &reply) + require.NoError(err) + require.Len(reply.Checks, 3) + require.Contains(reply.Checks, "check2") + require.Contains(reply.Checks, "check4") + require.Contains(reply.Checks, "check5") + require.Equal(notYetRunResult, reply.Checks["check5"]) + require.False(reply.Healthy) + } + }) + } +} diff --git a/avalanchego/api/health/worker.go b/avalanchego/api/health/worker.go index cb165b11..9db01b82 100644 --- a/avalanchego/api/health/worker.go +++ b/avalanchego/api/health/worker.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package health import ( + "context" "errors" "fmt" "sync" @@ -11,7 +12,10 @@ import ( "github.com/prometheus/client_golang/prometheus" + "golang.org/x/exp/maps" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/set" ) var errDuplicateCheck = errors.New("duplicated check") @@ -23,9 +27,11 @@ type worker struct { resultsLock sync.RWMutex results map[string]Result + tags map[string]set.Set[string] // tag -> set of check names startOnce sync.Once closeOnce sync.Once + wg sync.WaitGroup closer chan struct{} } @@ -36,10 +42,11 @@ func newWorker(namespace string, registerer prometheus.Registerer) (*worker, err checks: make(map[string]Checker), results: make(map[string]Result), closer: make(chan struct{}), + tags: make(map[string]set.Set[string]), }, err } -func (w *worker) RegisterCheck(name string, checker Checker) error { +func (w *worker) RegisterCheck(name string, checker Checker, tags ...string) error { w.checksLock.Lock() defer w.checksLock.Unlock() @@ -53,51 +60,86 @@ func (w *worker) RegisterCheck(name string, checker Checker) error { w.checks[name] = checker w.results[name] = notYetRunResult + // Add the check to the tag + for _, tag := range tags { + names := w.tags[tag] + names.Add(name) + w.tags[tag] = names + } + // Whenever a new check is added - it is failing w.metrics.failingChecks.Inc() return nil } -func (w *worker) RegisterMonotonicCheck(name string, checker Checker) error { - var result utils.AtomicInterface - return w.RegisterCheck(name, CheckerFunc(func() (interface{}, error) { - details := result.GetValue() +func (w *worker) RegisterMonotonicCheck(name string, checker Checker, tags ...string) error { + var result utils.Atomic[any] + return w.RegisterCheck(name, CheckerFunc(func(ctx context.Context) (any, error) { + details := result.Get() if details != nil { return details, nil } - details, err := checker.HealthCheck() + details, err := checker.HealthCheck(ctx) if err == nil { - result.SetValue(details) + result.Set(details) } return details, err - })) + }), tags...) } -func (w *worker) Results() (map[string]Result, bool) { +func (w *worker) Results(tags ...string) (map[string]Result, bool) { w.resultsLock.RLock() defer w.resultsLock.RUnlock() results := make(map[string]Result, len(w.results)) healthy := true - for name, result := range w.results { - results[name] = result - healthy = healthy && result.Error == nil + + // if tags are specified, iterate through registered check names in the tag + if len(tags) > 0 { + names := set.Set[string]{} + // prepare tagSet for global tag + tagSet := set.NewSet[string](len(tags) + 1) + tagSet.Add(tags...) + // we always want to include the global tag + tagSet.Add(GlobalTag) + for tag := range tagSet { + if set, ok := w.tags[tag]; ok { + names.Union(set) + } + } + for name := range names { + if result, ok := w.results[name]; ok { + results[name] = result + healthy = healthy && result.Error == nil + } + } + } else { // if tags are not specified, iterate through all registered check names + for name, result := range w.results { + results[name] = result + healthy = healthy && result.Error == nil + } } + return results, healthy } -func (w *worker) Start(freq time.Duration) { +func (w *worker) Start(ctx context.Context, freq time.Duration) { w.startOnce.Do(func() { + detachedCtx := utils.Detach(ctx) + w.wg.Add(1) go func() { ticker := time.NewTicker(freq) - defer ticker.Stop() + defer func() { + ticker.Stop() + w.wg.Done() + }() - w.runChecks() + w.runChecks(detachedCtx) for { select { case <-ticker.C: - w.runChecks() + w.runChecks(detachedCtx) case <-w.closer: return } @@ -109,30 +151,28 @@ func (w *worker) Start(freq time.Duration) { func (w *worker) Stop() { w.closeOnce.Do(func() { close(w.closer) + w.wg.Wait() }) } -func (w *worker) runChecks() { +func (w *worker) runChecks(ctx context.Context) { w.checksLock.RLock() // Copy the [w.checks] map to collect the checks that we will be running // during this iteration. If [w.checks] is modified during this iteration of // [runChecks], then the added check will not be run until the next // iteration. - checks := make(map[string]Checker, len(w.checks)) - for name, checker := range w.checks { - checks[name] = checker - } + checks := maps.Clone(w.checks) w.checksLock.RUnlock() var wg sync.WaitGroup wg.Add(len(checks)) for name, check := range checks { - go w.runCheck(&wg, name, check) + go w.runCheck(ctx, &wg, name, check) } wg.Wait() } -func (w *worker) runCheck(wg *sync.WaitGroup, name string, check Checker) { +func (w *worker) runCheck(ctx context.Context, wg *sync.WaitGroup, name string, check Checker) { defer wg.Done() start := time.Now() @@ -140,7 +180,7 @@ func (w *worker) runCheck(wg *sync.WaitGroup, name string, check Checker) { // To avoid any deadlocks when [RegisterCheck] is called with a lock // that is grabbed by [check.HealthCheck], we ensure that no locks // are held when [check.HealthCheck] is called. - details, err := check.HealthCheck() + details, err := check.HealthCheck(ctx) end := time.Now() result := Result{ diff --git a/avalanchego/api/info/client.go b/avalanchego/api/info/client.go index 07305ad2..f952f62a 100644 --- a/avalanchego/api/info/client.go +++ b/avalanchego/api/info/client.go @@ -1,19 +1,21 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package info import ( "context" + "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/rpc" "github.com/ava-labs/avalanchego/vms/platformvm/signer" ) -var _ Client = &client{} +var _ Client = (*client)(nil) -// Client interface for an Info API Client +// Client interface for an Info API Client. +// See also AwaitBootstrapped. type Client interface { GetNodeVersion(context.Context, ...rpc.Option) (*GetNodeVersionReply, error) GetNodeID(context.Context, ...rpc.Option) (ids.NodeID, *signer.ProofOfPossession, error) @@ -24,7 +26,7 @@ type Client interface { Peers(context.Context, ...rpc.Option) ([]Peer, error) IsBootstrapped(context.Context, string, ...rpc.Option) (bool, error) GetTxFee(context.Context, ...rpc.Option) (*GetTxFeeResponse, error) - Uptime(context.Context, ...rpc.Option) (*UptimeResponse, error) + Uptime(context.Context, ids.ID, ...rpc.Option) (*UptimeResponse, error) GetVMs(context.Context, ...rpc.Option) (map[ids.ID][]string, error) } @@ -36,44 +38,43 @@ type client struct { // NewClient returns a new Info API Client func NewClient(uri string) Client { return &client{requester: rpc.NewEndpointRequester( - uri+"/ext/info", - "info", + uri + "/ext/info", )} } func (c *client) GetNodeVersion(ctx context.Context, options ...rpc.Option) (*GetNodeVersionReply, error) { res := &GetNodeVersionReply{} - err := c.requester.SendRequest(ctx, "getNodeVersion", struct{}{}, res, options...) + err := c.requester.SendRequest(ctx, "info.getNodeVersion", struct{}{}, res, options...) return res, err } func (c *client) GetNodeID(ctx context.Context, options ...rpc.Option) (ids.NodeID, *signer.ProofOfPossession, error) { res := &GetNodeIDReply{} - err := c.requester.SendRequest(ctx, "getNodeID", struct{}{}, res, options...) + err := c.requester.SendRequest(ctx, "info.getNodeID", struct{}{}, res, options...) return res.NodeID, res.NodePOP, err } func (c *client) GetNodeIP(ctx context.Context, options ...rpc.Option) (string, error) { res := &GetNodeIPReply{} - err := c.requester.SendRequest(ctx, "getNodeIP", struct{}{}, res, options...) + err := c.requester.SendRequest(ctx, "info.getNodeIP", struct{}{}, res, options...) return res.IP, err } func (c *client) GetNetworkID(ctx context.Context, options ...rpc.Option) (uint32, error) { res := &GetNetworkIDReply{} - err := c.requester.SendRequest(ctx, "getNetworkID", struct{}{}, res, options...) + err := c.requester.SendRequest(ctx, "info.getNetworkID", struct{}{}, res, options...) return uint32(res.NetworkID), err } func (c *client) GetNetworkName(ctx context.Context, options ...rpc.Option) (string, error) { res := &GetNetworkNameReply{} - err := c.requester.SendRequest(ctx, "getNetworkName", struct{}{}, res, options...) + err := c.requester.SendRequest(ctx, "info.getNetworkName", struct{}{}, res, options...) return res.NetworkName, err } func (c *client) GetBlockchainID(ctx context.Context, alias string, options ...rpc.Option) (ids.ID, error) { res := &GetBlockchainIDReply{} - err := c.requester.SendRequest(ctx, "getBlockchainID", &GetBlockchainIDArgs{ + err := c.requester.SendRequest(ctx, "info.getBlockchainID", &GetBlockchainIDArgs{ Alias: alias, }, res, options...) return res.BlockchainID, err @@ -81,13 +82,13 @@ func (c *client) GetBlockchainID(ctx context.Context, alias string, options ...r func (c *client) Peers(ctx context.Context, options ...rpc.Option) ([]Peer, error) { res := &PeersReply{} - err := c.requester.SendRequest(ctx, "peers", struct{}{}, res, options...) + err := c.requester.SendRequest(ctx, "info.peers", struct{}{}, res, options...) return res.Peers, err } func (c *client) IsBootstrapped(ctx context.Context, chainID string, options ...rpc.Option) (bool, error) { res := &IsBootstrappedResponse{} - err := c.requester.SendRequest(ctx, "isBootstrapped", &IsBootstrappedArgs{ + err := c.requester.SendRequest(ctx, "info.isBootstrapped", &IsBootstrappedArgs{ Chain: chainID, }, res, options...) return res.IsBootstrapped, err @@ -95,18 +96,42 @@ func (c *client) IsBootstrapped(ctx context.Context, chainID string, options ... func (c *client) GetTxFee(ctx context.Context, options ...rpc.Option) (*GetTxFeeResponse, error) { res := &GetTxFeeResponse{} - err := c.requester.SendRequest(ctx, "getTxFee", struct{}{}, res, options...) + err := c.requester.SendRequest(ctx, "info.getTxFee", struct{}{}, res, options...) return res, err } -func (c *client) Uptime(ctx context.Context, options ...rpc.Option) (*UptimeResponse, error) { +func (c *client) Uptime(ctx context.Context, subnetID ids.ID, options ...rpc.Option) (*UptimeResponse, error) { res := &UptimeResponse{} - err := c.requester.SendRequest(ctx, "uptime", struct{}{}, res, options...) + err := c.requester.SendRequest(ctx, "info.uptime", &UptimeRequest{ + SubnetID: subnetID, + }, res, options...) return res, err } func (c *client) GetVMs(ctx context.Context, options ...rpc.Option) (map[ids.ID][]string, error) { res := &GetVMsReply{} - err := c.requester.SendRequest(ctx, "getVMs", struct{}{}, res, options...) + err := c.requester.SendRequest(ctx, "info.getVMs", struct{}{}, res, options...) return res.VMs, err } + +// AwaitBootstrapped polls the node every [freq] to check if [chainID] has +// finished bootstrapping. Returns true once [chainID] reports that it has +// finished bootstrapping. +// Only returns an error if [ctx] returns an error. +func AwaitBootstrapped(ctx context.Context, c Client, chainID string, freq time.Duration, options ...rpc.Option) (bool, error) { + ticker := time.NewTicker(freq) + defer ticker.Stop() + + for { + res, err := c.IsBootstrapped(ctx, chainID, options...) + if err == nil && res { + return true, nil + } + + select { + case <-ticker.C: + case <-ctx.Done(): + return false, ctx.Err() + } + } +} diff --git a/avalanchego/api/info/client_test.go b/avalanchego/api/info/client_test.go new file mode 100644 index 00000000..292a1841 --- /dev/null +++ b/avalanchego/api/info/client_test.go @@ -0,0 +1,71 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package info + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils/rpc" +) + +type mockClient struct { + reply IsBootstrappedResponse + err error + onCall func() +} + +func (mc *mockClient) SendRequest(_ context.Context, _ string, _ interface{}, replyIntf interface{}, _ ...rpc.Option) error { + reply := replyIntf.(*IsBootstrappedResponse) + *reply = mc.reply + mc.onCall() + return mc.err +} + +func TestNewClient(t *testing.T) { + require := require.New(t) + + c := NewClient("") + require.NotNil(c) +} + +func TestClient(t *testing.T) { + require := require.New(t) + + mc := &mockClient{ + reply: IsBootstrappedResponse{true}, + err: nil, + onCall: func() {}, + } + c := &client{ + requester: mc, + } + + { + bootstrapped, err := c.IsBootstrapped(context.Background(), "X") + require.NoError(err) + require.True(bootstrapped) + } + + mc.reply.IsBootstrapped = false + + { + bootstrapped, err := c.IsBootstrapped(context.Background(), "X") + require.NoError(err) + require.False(bootstrapped) + } + + mc.onCall = func() { + mc.reply.IsBootstrapped = true + } + + { + bootstrapped, err := AwaitBootstrapped(context.Background(), c, "X", time.Microsecond) + require.NoError(err) + require.True(bootstrapped) + } +} diff --git a/avalanchego/api/info/mocks/Client.go b/avalanchego/api/info/mocks/Client.go deleted file mode 100644 index 8a12defb..00000000 --- a/avalanchego/api/info/mocks/Client.go +++ /dev/null @@ -1,339 +0,0 @@ -// Code generated by mockery v2.10.6. DO NOT EDIT. - -package mocks - -import ( - context "context" - - info "github.com/ava-labs/avalanchego/api/info" - ids "github.com/ava-labs/avalanchego/ids" - - mock "github.com/stretchr/testify/mock" - - rpc "github.com/ava-labs/avalanchego/utils/rpc" -) - -// Client is an autogenerated mock type for the Client type -type Client struct { - mock.Mock -} - -// GetBlockchainID provides a mock function with given fields: _a0, _a1, _a2 -func (_m *Client) GetBlockchainID(_a0 context.Context, _a1 string, _a2 ...rpc.Option) (ids.ID, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 ids.ID - if rf, ok := ret.Get(0).(func(context.Context, string, ...rpc.Option) ids.ID); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(ids.ID) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, string, ...rpc.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetNetworkID provides a mock function with given fields: _a0, _a1 -func (_m *Client) GetNetworkID(_a0 context.Context, _a1 ...rpc.Option) (uint32, error) { - _va := make([]interface{}, len(_a1)) - for _i := range _a1 { - _va[_i] = _a1[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 uint32 - if rf, ok := ret.Get(0).(func(context.Context, ...rpc.Option) uint32); ok { - r0 = rf(_a0, _a1...) - } else { - r0 = ret.Get(0).(uint32) - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, ...rpc.Option) error); ok { - r1 = rf(_a0, _a1...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetNetworkName provides a mock function with given fields: _a0, _a1 -func (_m *Client) GetNetworkName(_a0 context.Context, _a1 ...rpc.Option) (string, error) { - _va := make([]interface{}, len(_a1)) - for _i := range _a1 { - _va[_i] = _a1[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 string - if rf, ok := ret.Get(0).(func(context.Context, ...rpc.Option) string); ok { - r0 = rf(_a0, _a1...) - } else { - r0 = ret.Get(0).(string) - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, ...rpc.Option) error); ok { - r1 = rf(_a0, _a1...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetNodeID provides a mock function with given fields: _a0, _a1 -func (_m *Client) GetNodeID(_a0 context.Context, _a1 ...rpc.Option) (string, error) { - _va := make([]interface{}, len(_a1)) - for _i := range _a1 { - _va[_i] = _a1[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 string - if rf, ok := ret.Get(0).(func(context.Context, ...rpc.Option) string); ok { - r0 = rf(_a0, _a1...) - } else { - r0 = ret.Get(0).(string) - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, ...rpc.Option) error); ok { - r1 = rf(_a0, _a1...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetNodeIP provides a mock function with given fields: _a0, _a1 -func (_m *Client) GetNodeIP(_a0 context.Context, _a1 ...rpc.Option) (string, error) { - _va := make([]interface{}, len(_a1)) - for _i := range _a1 { - _va[_i] = _a1[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 string - if rf, ok := ret.Get(0).(func(context.Context, ...rpc.Option) string); ok { - r0 = rf(_a0, _a1...) - } else { - r0 = ret.Get(0).(string) - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, ...rpc.Option) error); ok { - r1 = rf(_a0, _a1...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetNodeVersion provides a mock function with given fields: _a0, _a1 -func (_m *Client) GetNodeVersion(_a0 context.Context, _a1 ...rpc.Option) (*info.GetNodeVersionReply, error) { - _va := make([]interface{}, len(_a1)) - for _i := range _a1 { - _va[_i] = _a1[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *info.GetNodeVersionReply - if rf, ok := ret.Get(0).(func(context.Context, ...rpc.Option) *info.GetNodeVersionReply); ok { - r0 = rf(_a0, _a1...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*info.GetNodeVersionReply) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, ...rpc.Option) error); ok { - r1 = rf(_a0, _a1...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetTxFee provides a mock function with given fields: _a0, _a1 -func (_m *Client) GetTxFee(_a0 context.Context, _a1 ...rpc.Option) (*info.GetTxFeeResponse, error) { - _va := make([]interface{}, len(_a1)) - for _i := range _a1 { - _va[_i] = _a1[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *info.GetTxFeeResponse - if rf, ok := ret.Get(0).(func(context.Context, ...rpc.Option) *info.GetTxFeeResponse); ok { - r0 = rf(_a0, _a1...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*info.GetTxFeeResponse) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, ...rpc.Option) error); ok { - r1 = rf(_a0, _a1...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetVMs provides a mock function with given fields: _a0, _a1 -func (_m *Client) GetVMs(_a0 context.Context, _a1 ...rpc.Option) (map[ids.ID][]string, error) { - _va := make([]interface{}, len(_a1)) - for _i := range _a1 { - _va[_i] = _a1[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 map[ids.ID][]string - if rf, ok := ret.Get(0).(func(context.Context, ...rpc.Option) map[ids.ID][]string); ok { - r0 = rf(_a0, _a1...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[ids.ID][]string) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, ...rpc.Option) error); ok { - r1 = rf(_a0, _a1...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// IsBootstrapped provides a mock function with given fields: _a0, _a1, _a2 -func (_m *Client) IsBootstrapped(_a0 context.Context, _a1 string, _a2 ...rpc.Option) (bool, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 bool - if rf, ok := ret.Get(0).(func(context.Context, string, ...rpc.Option) bool); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, string, ...rpc.Option) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Peers provides a mock function with given fields: _a0, _a1 -func (_m *Client) Peers(_a0 context.Context, _a1 ...rpc.Option) ([]info.Peer, error) { - _va := make([]interface{}, len(_a1)) - for _i := range _a1 { - _va[_i] = _a1[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 []info.Peer - if rf, ok := ret.Get(0).(func(context.Context, ...rpc.Option) []info.Peer); ok { - r0 = rf(_a0, _a1...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]info.Peer) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, ...rpc.Option) error); ok { - r1 = rf(_a0, _a1...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Uptime provides a mock function with given fields: _a0, _a1 -func (_m *Client) Uptime(_a0 context.Context, _a1 ...rpc.Option) (*info.UptimeResponse, error) { - _va := make([]interface{}, len(_a1)) - for _i := range _a1 { - _va[_i] = _a1[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 *info.UptimeResponse - if rf, ok := ret.Get(0).(func(context.Context, ...rpc.Option) *info.UptimeResponse); ok { - r0 = rf(_a0, _a1...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*info.UptimeResponse) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, ...rpc.Option) error); ok { - r1 = rf(_a0, _a1...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/avalanchego/api/info/service.go b/avalanchego/api/info/service.go index 59d2e574..b9b12062 100644 --- a/avalanchego/api/info/service.go +++ b/avalanchego/api/info/service.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package info @@ -10,6 +10,8 @@ import ( "github.com/gorilla/rpc/v2" + "go.uber.org/zap" + "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network" @@ -26,10 +28,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/signer" ) -var ( - errNoChainProvided = errors.New("argument 'chain' not given") - errNotValidator = errors.New("this is not a validator node") -) +var errNoChainProvided = errors.New("argument 'chain' not given") // Info is the API service for unprivileged info on a node type Info struct { @@ -87,28 +86,36 @@ func NewService( }, "info"); err != nil { return nil, err } - return &common.HTTPHandler{Handler: newServer}, nil + return &common.HTTPHandler{ + LockOptions: common.NoLock, + Handler: newServer, + }, nil } // GetNodeVersionReply are the results from calling GetNodeVersion type GetNodeVersionReply struct { - Version string `json:"version"` - DatabaseVersion string `json:"databaseVersion"` - GitCommit string `json:"gitCommit"` - VMVersions map[string]string `json:"vmVersions"` + Version string `json:"version"` + DatabaseVersion string `json:"databaseVersion"` + RPCProtocolVersion json.Uint32 `json:"rpcProtocolVersion"` + GitCommit string `json:"gitCommit"` + VMVersions map[string]string `json:"vmVersions"` } // GetNodeVersion returns the version this node is running -func (service *Info) GetNodeVersion(_ *http.Request, _ *struct{}, reply *GetNodeVersionReply) error { - service.log.Debug("Info: GetNodeVersion called") +func (i *Info) GetNodeVersion(_ *http.Request, _ *struct{}, reply *GetNodeVersionReply) error { + i.log.Debug("API called", + zap.String("service", "info"), + zap.String("method", "getNodeVersion"), + ) - vmVersions, err := service.vmManager.Versions() + vmVersions, err := i.vmManager.Versions() if err != nil { return err } - reply.Version = service.Version.String() + reply.Version = i.Version.String() reply.DatabaseVersion = version.CurrentDatabase.String() + reply.RPCProtocolVersion = json.Uint32(version.RPCChainVMProtocol) reply.GitCommit = version.GitCommit reply.VMVersions = vmVersions return nil @@ -121,11 +128,14 @@ type GetNodeIDReply struct { } // GetNodeID returns the node ID of this node -func (service *Info) GetNodeID(_ *http.Request, _ *struct{}, reply *GetNodeIDReply) error { - service.log.Debug("Info: GetNodeID called") +func (i *Info) GetNodeID(_ *http.Request, _ *struct{}, reply *GetNodeIDReply) error { + i.log.Debug("API called", + zap.String("service", "info"), + zap.String("method", "getNodeID"), + ) - reply.NodeID = service.NodeID - reply.NodePOP = service.NodePOP + reply.NodeID = i.NodeID + reply.NodePOP = i.NodePOP return nil } @@ -140,18 +150,24 @@ type GetNodeIPReply struct { } // GetNodeIP returns the IP of this node -func (service *Info) GetNodeIP(_ *http.Request, _ *struct{}, reply *GetNodeIPReply) error { - service.log.Debug("Info: GetNodeIP called") +func (i *Info) GetNodeIP(_ *http.Request, _ *struct{}, reply *GetNodeIPReply) error { + i.log.Debug("API called", + zap.String("service", "info"), + zap.String("method", "getNodeIP"), + ) - reply.IP = service.myIP.IPPort().String() + reply.IP = i.myIP.IPPort().String() return nil } // GetNetworkID returns the network ID this node is running on -func (service *Info) GetNetworkID(_ *http.Request, _ *struct{}, reply *GetNetworkIDReply) error { - service.log.Debug("Info: GetNetworkID called") +func (i *Info) GetNetworkID(_ *http.Request, _ *struct{}, reply *GetNetworkIDReply) error { + i.log.Debug("API called", + zap.String("service", "info"), + zap.String("method", "getNetworkID"), + ) - reply.NetworkID = json.Uint32(service.NetworkID) + reply.NetworkID = json.Uint32(i.NetworkID) return nil } @@ -161,10 +177,13 @@ type GetNetworkNameReply struct { } // GetNetworkName returns the network name this node is running on -func (service *Info) GetNetworkName(_ *http.Request, _ *struct{}, reply *GetNetworkNameReply) error { - service.log.Debug("Info: GetNetworkName called") +func (i *Info) GetNetworkName(_ *http.Request, _ *struct{}, reply *GetNetworkNameReply) error { + i.log.Debug("API called", + zap.String("service", "info"), + zap.String("method", "getNetworkName"), + ) - reply.NetworkName = constants.NetworkName(service.NetworkID) + reply.NetworkName = constants.NetworkName(i.NetworkID) return nil } @@ -179,10 +198,13 @@ type GetBlockchainIDReply struct { } // GetBlockchainID returns the blockchain ID that resolves the alias that was supplied -func (service *Info) GetBlockchainID(_ *http.Request, args *GetBlockchainIDArgs, reply *GetBlockchainIDReply) error { - service.log.Debug("Info: GetBlockchainID called") +func (i *Info) GetBlockchainID(_ *http.Request, args *GetBlockchainIDArgs, reply *GetBlockchainIDReply) error { + i.log.Debug("API called", + zap.String("service", "info"), + zap.String("method", "getBlockchainID"), + ) - bID, err := service.chainManager.Lookup(args.Alias) + bID, err := i.chainManager.Lookup(args.Alias) reply.BlockchainID = bID return err } @@ -207,15 +229,18 @@ type PeersReply struct { } // Peers returns the list of current validators -func (service *Info) Peers(_ *http.Request, args *PeersArgs, reply *PeersReply) error { - service.log.Debug("Info: Peers called") +func (i *Info) Peers(_ *http.Request, args *PeersArgs, reply *PeersReply) error { + i.log.Debug("API called", + zap.String("service", "info"), + zap.String("method", "peers"), + ) - peers := service.networking.PeerInfo(args.NodeIDs) + peers := i.networking.PeerInfo(args.NodeIDs) peerInfo := make([]Peer, len(peers)) - for i, peer := range peers { - peerInfo[i] = Peer{ + for index, peer := range peers { + peerInfo[index] = Peer{ Info: peer, - Benched: service.benchlist.GetBenched(peer.ID), + Benched: i.benchlist.GetBenched(peer.ID), } } @@ -239,19 +264,21 @@ type IsBootstrappedResponse struct { // IsBootstrapped returns nil and sets [reply.IsBootstrapped] == true iff [args.Chain] exists and is done bootstrapping // Returns an error if the chain doesn't exist -func (service *Info) IsBootstrapped(_ *http.Request, args *IsBootstrappedArgs, reply *IsBootstrappedResponse) error { - service.log.Debug("Info: IsBootstrapped called", +func (i *Info) IsBootstrapped(_ *http.Request, args *IsBootstrappedArgs, reply *IsBootstrappedResponse) error { + i.log.Debug("API called", + zap.String("service", "info"), + zap.String("method", "isBootstrapped"), logging.UserString("chain", args.Chain), ) if args.Chain == "" { return errNoChainProvided } - chainID, err := service.chainManager.Lookup(args.Chain) + chainID, err := i.chainManager.Lookup(args.Chain) if err != nil { return fmt.Errorf("there is no chain with alias/ID '%s'", args.Chain) } - reply.IsBootstrapped = service.chainManager.IsBootstrapped(chainID) + reply.IsBootstrapped = i.chainManager.IsBootstrapped(chainID) return nil } @@ -272,11 +299,20 @@ type UptimeResponse struct { WeightedAveragePercentage json.Float64 `json:"weightedAveragePercentage"` } -func (service *Info) Uptime(_ *http.Request, _ *struct{}, reply *UptimeResponse) error { - service.log.Debug("Info: Uptime called") - result, isValidator := service.networking.NodeUptime() - if !isValidator { - return errNotValidator +type UptimeRequest struct { + // if omitted, defaults to primary network + SubnetID ids.ID `json:"subnetID"` +} + +func (i *Info) Uptime(_ *http.Request, args *UptimeRequest, reply *UptimeResponse) error { + i.log.Debug("API called", + zap.String("service", "info"), + zap.String("method", "uptime"), + ) + + result, err := i.networking.NodeUptime(args.SubnetID) + if err != nil { + return fmt.Errorf("couldn't get node uptime: %w", err) } reply.WeightedAveragePercentage = json.Float64(result.WeightedAveragePercentage) reply.RewardingStakePercentage = json.Float64(result.RewardingStakePercentage) @@ -284,9 +320,7 @@ func (service *Info) Uptime(_ *http.Request, _ *struct{}, reply *UptimeResponse) } type GetTxFeeResponse struct { - TxFee json.Uint64 `json:"txFee"` - // TODO: remove [CreationTxFee] after enough time for dependencies to update - CreationTxFee json.Uint64 `json:"creationTxFee"` + TxFee json.Uint64 `json:"txFee"` CreateAssetTxFee json.Uint64 `json:"createAssetTxFee"` CreateSubnetTxFee json.Uint64 `json:"createSubnetTxFee"` TransformSubnetTxFee json.Uint64 `json:"transformSubnetTxFee"` @@ -298,17 +332,21 @@ type GetTxFeeResponse struct { } // GetTxFee returns the transaction fee in nAVAX. -func (service *Info) GetTxFee(_ *http.Request, args *struct{}, reply *GetTxFeeResponse) error { - reply.TxFee = json.Uint64(service.TxFee) - reply.CreationTxFee = json.Uint64(service.CreateAssetTxFee) - reply.CreateAssetTxFee = json.Uint64(service.CreateAssetTxFee) - reply.CreateSubnetTxFee = json.Uint64(service.CreateSubnetTxFee) - reply.TransformSubnetTxFee = json.Uint64(service.TransformSubnetTxFee) - reply.CreateBlockchainTxFee = json.Uint64(service.CreateBlockchainTxFee) - reply.AddPrimaryNetworkValidatorFee = json.Uint64(service.AddPrimaryNetworkValidatorFee) - reply.AddPrimaryNetworkDelegatorFee = json.Uint64(service.AddPrimaryNetworkDelegatorFee) - reply.AddSubnetValidatorFee = json.Uint64(service.AddSubnetValidatorFee) - reply.AddSubnetDelegatorFee = json.Uint64(service.AddSubnetDelegatorFee) +func (i *Info) GetTxFee(_ *http.Request, _ *struct{}, reply *GetTxFeeResponse) error { + i.log.Debug("API called", + zap.String("service", "info"), + zap.String("method", "getTxFee"), + ) + + reply.TxFee = json.Uint64(i.TxFee) + reply.CreateAssetTxFee = json.Uint64(i.CreateAssetTxFee) + reply.CreateSubnetTxFee = json.Uint64(i.CreateSubnetTxFee) + reply.TransformSubnetTxFee = json.Uint64(i.TransformSubnetTxFee) + reply.CreateBlockchainTxFee = json.Uint64(i.CreateBlockchainTxFee) + reply.AddPrimaryNetworkValidatorFee = json.Uint64(i.AddPrimaryNetworkValidatorFee) + reply.AddPrimaryNetworkDelegatorFee = json.Uint64(i.AddPrimaryNetworkDelegatorFee) + reply.AddSubnetValidatorFee = json.Uint64(i.AddSubnetValidatorFee) + reply.AddSubnetDelegatorFee = json.Uint64(i.AddSubnetDelegatorFee) return nil } @@ -318,15 +356,18 @@ type GetVMsReply struct { } // GetVMs lists the virtual machines installed on the node -func (service *Info) GetVMs(_ *http.Request, _ *struct{}, reply *GetVMsReply) error { - service.log.Debug("Info: GetVMs called") +func (i *Info) GetVMs(_ *http.Request, _ *struct{}, reply *GetVMsReply) error { + i.log.Debug("API called", + zap.String("service", "info"), + zap.String("method", "getVMs"), + ) // Fetch the VMs registered on this node. - vmIDs, err := service.VMManager.ListFactories() + vmIDs, err := i.VMManager.ListFactories() if err != nil { return err } - reply.VMs, err = ids.GetRelevantAliases(service.VMManager, vmIDs) + reply.VMs, err = ids.GetRelevantAliases(i.VMManager, vmIDs) return err } diff --git a/avalanchego/api/info/service_test.go b/avalanchego/api/info/service_test.go index 1448e80b..3492b641 100644 --- a/avalanchego/api/info/service_test.go +++ b/avalanchego/api/info/service_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package info @@ -16,7 +16,7 @@ import ( "github.com/ava-labs/avalanchego/vms" ) -var errOops = errors.New("oops") +var errTest = errors.New("non-nil error") type getVMsTest struct { info *Info @@ -61,7 +61,7 @@ func TestGetVMsSuccess(t *testing.T) { id2: alias2[1:], } - resources.mockLog.EXPECT().Debug(gomock.Any()).Times(1) + resources.mockLog.EXPECT().Debug(gomock.Any(), gomock.Any()).Times(1) resources.mockVMManager.EXPECT().ListFactories().Times(1).Return(vmIDs, nil) resources.mockVMManager.EXPECT().Aliases(id1).Times(1).Return(alias1, nil) resources.mockVMManager.EXPECT().Aliases(id2).Times(1).Return(alias2, nil) @@ -78,13 +78,13 @@ func TestGetVMsVMsListFactoriesFails(t *testing.T) { resources := initGetVMsTest(t) defer resources.ctrl.Finish() - resources.mockLog.EXPECT().Debug(gomock.Any()).Times(1) - resources.mockVMManager.EXPECT().ListFactories().Times(1).Return(nil, errOops) + resources.mockLog.EXPECT().Debug(gomock.Any(), gomock.Any()).Times(1) + resources.mockVMManager.EXPECT().ListFactories().Times(1).Return(nil, errTest) reply := GetVMsReply{} err := resources.info.GetVMs(nil, nil, &reply) - require.Equal(t, errOops, err) + require.Equal(t, errTest, err) } // Tests GetVMs if we can't get our vm aliases. @@ -97,13 +97,13 @@ func TestGetVMsGetAliasesFails(t *testing.T) { vmIDs := []ids.ID{id1, id2} alias1 := []string{id1.String(), "vm1-alias-1", "vm1-alias-2"} - resources.mockLog.EXPECT().Debug(gomock.Any()).Times(1) + resources.mockLog.EXPECT().Debug(gomock.Any(), gomock.Any()).Times(1) resources.mockVMManager.EXPECT().ListFactories().Times(1).Return(vmIDs, nil) resources.mockVMManager.EXPECT().Aliases(id1).Times(1).Return(alias1, nil) - resources.mockVMManager.EXPECT().Aliases(id2).Times(1).Return(nil, errOops) + resources.mockVMManager.EXPECT().Aliases(id2).Times(1).Return(nil, errTest) reply := GetVMsReply{} err := resources.info.GetVMs(nil, nil, &reply) - require.Equal(t, err, errOops) + require.Equal(t, err, errTest) } diff --git a/avalanchego/api/ipcs/client.go b/avalanchego/api/ipcs/client.go index 154ccae4..95391f0f 100644 --- a/avalanchego/api/ipcs/client.go +++ b/avalanchego/api/ipcs/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ipcs @@ -11,9 +11,11 @@ import ( "github.com/ava-labs/avalanchego/utils/rpc" ) -var _ Client = &client{} +var _ Client = (*client)(nil) // Client interface for interacting with the IPCS endpoint +// +// Deprecated: The IPCs API is deprecated. The Index API should be used instead. type Client interface { // PublishBlockchain requests the node to begin publishing consensus and decision events PublishBlockchain(ctx context.Context, chainID string, options ...rpc.Option) (*PublishBlockchainReply, error) @@ -29,29 +31,30 @@ type client struct { } // NewClient returns a Client for interacting with the IPCS endpoint +// +// Deprecated: The IPCs API is deprecated. The Index API should be used instead. func NewClient(uri string) Client { return &client{requester: rpc.NewEndpointRequester( - uri+"/ext/ipcs", - "ipcs", + uri + "/ext/ipcs", )} } func (c *client) PublishBlockchain(ctx context.Context, blockchainID string, options ...rpc.Option) (*PublishBlockchainReply, error) { res := &PublishBlockchainReply{} - err := c.requester.SendRequest(ctx, "publishBlockchain", &PublishBlockchainArgs{ + err := c.requester.SendRequest(ctx, "ipcs.publishBlockchain", &PublishBlockchainArgs{ BlockchainID: blockchainID, }, res, options...) return res, err } func (c *client) UnpublishBlockchain(ctx context.Context, blockchainID string, options ...rpc.Option) error { - return c.requester.SendRequest(ctx, "unpublishBlockchain", &UnpublishBlockchainArgs{ + return c.requester.SendRequest(ctx, "ipcs.unpublishBlockchain", &UnpublishBlockchainArgs{ BlockchainID: blockchainID, }, &api.EmptyReply{}, options...) } func (c *client) GetPublishedBlockchains(ctx context.Context, options ...rpc.Option) ([]ids.ID, error) { res := &GetPublishedBlockchainsReply{} - err := c.requester.SendRequest(ctx, "getPublishedBlockchains", nil, res, options...) + err := c.requester.SendRequest(ctx, "ipcs.getPublishedBlockchains", nil, res, options...) return res.Chains, err } diff --git a/avalanchego/api/ipcs/service.go b/avalanchego/api/ipcs/service.go index 3ad5f426..65b09a18 100644 --- a/avalanchego/api/ipcs/service.go +++ b/avalanchego/api/ipcs/service.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ipcs @@ -58,8 +58,10 @@ type PublishBlockchainReply struct { } // PublishBlockchain publishes the finalized accepted transactions from the blockchainID over the IPC -func (ipc *IPCServer) PublishBlockchain(r *http.Request, args *PublishBlockchainArgs, reply *PublishBlockchainReply) error { - ipc.log.Debug("IPCs: PublishBlockchain called", +func (ipc *IPCServer) PublishBlockchain(_ *http.Request, args *PublishBlockchainArgs, reply *PublishBlockchainReply) error { + ipc.log.Warn("deprecated API called", + zap.String("service", "ipcs"), + zap.String("method", "publishBlockchain"), logging.UserString("blockchainID", args.BlockchainID), ) @@ -93,8 +95,10 @@ type UnpublishBlockchainArgs struct { } // UnpublishBlockchain closes publishing of a blockchainID -func (ipc *IPCServer) UnpublishBlockchain(r *http.Request, args *UnpublishBlockchainArgs, _ *api.EmptyReply) error { - ipc.log.Debug("IPCs: UnpublishBlockchain called", +func (ipc *IPCServer) UnpublishBlockchain(_ *http.Request, args *UnpublishBlockchainArgs, _ *api.EmptyReply) error { + ipc.log.Warn("deprecated API called", + zap.String("service", "ipcs"), + zap.String("method", "unpublishBlockchain"), logging.UserString("blockchainID", args.BlockchainID), ) @@ -124,7 +128,11 @@ type GetPublishedBlockchainsReply struct { } // GetPublishedBlockchains returns blockchains being published -func (ipc *IPCServer) GetPublishedBlockchains(r *http.Request, args *struct{}, reply *GetPublishedBlockchainsReply) error { +func (ipc *IPCServer) GetPublishedBlockchains(_ *http.Request, _ *struct{}, reply *GetPublishedBlockchainsReply) error { + ipc.log.Warn("deprecated API called", + zap.String("service", "ipcs"), + zap.String("method", "getPublishedBlockchains"), + ) reply.Chains = ipc.ipcs.GetPublishedBlockchains() return nil } diff --git a/avalanchego/api/keystore/blockchain_keystore.go b/avalanchego/api/keystore/blockchain_keystore.go index 03f0bccf..4c163b96 100644 --- a/avalanchego/api/keystore/blockchain_keystore.go +++ b/avalanchego/api/keystore/blockchain_keystore.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore @@ -12,7 +12,7 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" ) -var _ BlockchainKeystore = &blockchainKeystore{} +var _ BlockchainKeystore = (*blockchainKeystore)(nil) type BlockchainKeystore interface { // Get a database that is able to read and write unencrypted values from the @@ -31,7 +31,8 @@ type blockchainKeystore struct { } func (bks *blockchainKeystore) GetDatabase(username, password string) (*encdb.Database, error) { - bks.ks.log.Debug("Keystore: GetDatabase called", + bks.ks.log.Warn("deprecated keystore called", + zap.String("method", "getDatabase"), logging.UserString("username", username), zap.Stringer("blockchainID", bks.blockchainID), ) @@ -40,7 +41,8 @@ func (bks *blockchainKeystore) GetDatabase(username, password string) (*encdb.Da } func (bks *blockchainKeystore) GetRawDatabase(username, password string) (database.Database, error) { - bks.ks.log.Debug("Keystore: GetRawDatabase called", + bks.ks.log.Warn("deprecated keystore called", + zap.String("method", "getRawDatabase"), logging.UserString("username", username), zap.Stringer("blockchainID", bks.blockchainID), ) diff --git a/avalanchego/api/keystore/client.go b/avalanchego/api/keystore/client.go index e170d16d..43442ace 100644 --- a/avalanchego/api/keystore/client.go +++ b/avalanchego/api/keystore/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore @@ -11,9 +11,12 @@ import ( "github.com/ava-labs/avalanchego/utils/rpc" ) -var _ Client = &client{} +var _ Client = (*client)(nil) // Client interface for Avalanche Keystore API Endpoint +// +// Deprecated: The Keystore API is deprecated. Dedicated wallets should be used +// instead. type Client interface { CreateUser(context.Context, api.UserPass, ...rpc.Option) error // Returns the usernames of all keystore users @@ -31,20 +34,21 @@ type client struct { requester rpc.EndpointRequester } +// Deprecated: The Keystore API is deprecated. Dedicated wallets should be used +// instead. func NewClient(uri string) Client { return &client{requester: rpc.NewEndpointRequester( - uri+"/ext/keystore", - "keystore", + uri + "/ext/keystore", )} } func (c *client) CreateUser(ctx context.Context, user api.UserPass, options ...rpc.Option) error { - return c.requester.SendRequest(ctx, "createUser", &user, &api.EmptyReply{}, options...) + return c.requester.SendRequest(ctx, "keystore.createUser", &user, &api.EmptyReply{}, options...) } func (c *client) ListUsers(ctx context.Context, options ...rpc.Option) ([]string, error) { res := &ListUsersReply{} - err := c.requester.SendRequest(ctx, "listUsers", struct{}{}, res, options...) + err := c.requester.SendRequest(ctx, "keystore.listUsers", struct{}{}, res, options...) return res.Users, err } @@ -52,7 +56,7 @@ func (c *client) ExportUser(ctx context.Context, user api.UserPass, options ...r res := &ExportUserReply{ Encoding: formatting.Hex, } - err := c.requester.SendRequest(ctx, "exportUser", &user, res, options...) + err := c.requester.SendRequest(ctx, "keystore.exportUser", &user, res, options...) if err != nil { return nil, err } @@ -65,7 +69,7 @@ func (c *client) ImportUser(ctx context.Context, user api.UserPass, account []by return err } - return c.requester.SendRequest(ctx, "importUser", &ImportUserArgs{ + return c.requester.SendRequest(ctx, "keystore.importUser", &ImportUserArgs{ UserPass: user, User: accountStr, Encoding: formatting.Hex, @@ -73,5 +77,5 @@ func (c *client) ImportUser(ctx context.Context, user api.UserPass, account []by } func (c *client) DeleteUser(ctx context.Context, user api.UserPass, options ...rpc.Option) error { - return c.requester.SendRequest(ctx, "deleteUser", &user, &api.EmptyReply{}, options...) + return c.requester.SendRequest(ctx, "keystore.deleteUser", &user, &api.EmptyReply{}, options...) } diff --git a/avalanchego/api/keystore/codec.go b/avalanchego/api/keystore/codec.go index f079c58d..df6c18ae 100644 --- a/avalanchego/api/keystore/codec.go +++ b/avalanchego/api/keystore/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore diff --git a/avalanchego/api/keystore/gkeystore/keystore_client.go b/avalanchego/api/keystore/gkeystore/keystore_client.go index 7902b73e..6bbfc6f9 100644 --- a/avalanchego/api/keystore/gkeystore/keystore_client.go +++ b/avalanchego/api/keystore/gkeystore/keystore_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gkeystore @@ -16,7 +16,7 @@ import ( rpcdbpb "github.com/ava-labs/avalanchego/proto/pb/rpcdb" ) -var _ keystore.BlockchainKeystore = &Client{} +var _ keystore.BlockchainKeystore = (*Client)(nil) // Client is a snow.Keystore that talks over RPC. type Client struct { diff --git a/avalanchego/api/keystore/gkeystore/keystore_server.go b/avalanchego/api/keystore/gkeystore/keystore_server.go index 66e214e6..9244939d 100644 --- a/avalanchego/api/keystore/gkeystore/keystore_server.go +++ b/avalanchego/api/keystore/gkeystore/keystore_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gkeystore @@ -6,8 +6,6 @@ package gkeystore import ( "context" - "google.golang.org/grpc" - "github.com/ava-labs/avalanchego/api/keystore" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/rpcdb" @@ -17,7 +15,7 @@ import ( rpcdbpb "github.com/ava-labs/avalanchego/proto/pb/rpcdb" ) -var _ keystorepb.KeystoreServer = &Server{} +var _ keystorepb.KeystoreServer = (*Server)(nil) // Server is a snow.Keystore that is managed over RPC. type Server struct { @@ -43,24 +41,19 @@ func (s *Server) GetDatabase( closer := dbCloser{Database: db} - // start the db server serverListener, err := grpcutils.NewListener() if err != nil { return nil, err } - serverAddr := serverListener.Addr().String() - go grpcutils.Serve(serverListener, func(opts []grpc.ServerOption) *grpc.Server { - if len(opts) == 0 { - opts = append(opts, grpcutils.DefaultServerOptions...) - } - server := grpc.NewServer(opts...) - closer.closer.Add(server) - db := rpcdb.NewServer(&closer) - rpcdbpb.RegisterDatabaseServer(server, db) - return server - }) - return &keystorepb.GetDatabaseResponse{ServerAddr: serverAddr}, nil + server := grpcutils.NewServer() + closer.closer.Add(server) + rpcdbpb.RegisterDatabaseServer(server, rpcdb.NewServer(&closer)) + + // start the db server + go grpcutils.Serve(serverListener, server) + + return &keystorepb.GetDatabaseResponse{ServerAddr: serverListener.Addr().String()}, nil } type dbCloser struct { diff --git a/avalanchego/api/keystore/keystore.go b/avalanchego/api/keystore/keystore.go index dbdc6425..fecd63b1 100644 --- a/avalanchego/api/keystore/keystore.go +++ b/avalanchego/api/keystore/keystore.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore @@ -34,7 +34,7 @@ var ( usersPrefix = []byte("users") bcsPrefix = []byte("bcs") - _ Keystore = &keystore{} + _ Keystore = (*keystore)(nil) ) type Keystore interface { @@ -242,12 +242,12 @@ func (ks *keystore) DeleteUser(username, pw string) error { defer it.Release() for it.Next() { - if err = dataBatch.Delete(it.Key()); err != nil { + if err := dataBatch.Delete(it.Key()); err != nil { return err } } - if err = it.Error(); err != nil { + if err := it.Error(); err != nil { return err } diff --git a/avalanchego/api/keystore/service.go b/avalanchego/api/keystore/service.go index 955af65b..c0e823c2 100644 --- a/avalanchego/api/keystore/service.go +++ b/avalanchego/api/keystore/service.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore @@ -7,6 +7,8 @@ import ( "fmt" "net/http" + "go.uber.org/zap" + "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/memdb" @@ -20,7 +22,9 @@ type service struct { } func (s *service) CreateUser(_ *http.Request, args *api.UserPass, _ *api.EmptyReply) error { - s.ks.log.Debug("Keystore: CreateUser called", + s.ks.log.Warn("deprecated API called", + zap.String("service", "keystore"), + zap.String("method", "createUser"), logging.UserString("username", args.Username), ) @@ -28,7 +32,9 @@ func (s *service) CreateUser(_ *http.Request, args *api.UserPass, _ *api.EmptyRe } func (s *service) DeleteUser(_ *http.Request, args *api.UserPass, _ *api.EmptyReply) error { - s.ks.log.Debug("Keystore: DeleteUser called", + s.ks.log.Warn("deprecated API called", + zap.String("service", "keystore"), + zap.String("method", "deleteUser"), logging.UserString("username", args.Username), ) @@ -39,8 +45,11 @@ type ListUsersReply struct { Users []string `json:"users"` } -func (s *service) ListUsers(_ *http.Request, args *struct{}, reply *ListUsersReply) error { - s.ks.log.Debug("Keystore: ListUsers called") +func (s *service) ListUsers(_ *http.Request, _ *struct{}, reply *ListUsersReply) error { + s.ks.log.Warn("deprecated API called", + zap.String("service", "keystore"), + zap.String("method", "listUsers"), + ) var err error reply.Users, err = s.ks.ListUsers() @@ -56,8 +65,10 @@ type ImportUserArgs struct { Encoding formatting.Encoding `json:"encoding"` } -func (s *service) ImportUser(r *http.Request, args *ImportUserArgs, _ *api.EmptyReply) error { - s.ks.log.Debug("Keystore: ImportUser called", +func (s *service) ImportUser(_ *http.Request, args *ImportUserArgs, _ *api.EmptyReply) error { + s.ks.log.Warn("deprecated API called", + zap.String("service", "keystore"), + zap.String("method", "importUser"), logging.UserString("username", args.Username), ) @@ -85,7 +96,9 @@ type ExportUserReply struct { } func (s *service) ExportUser(_ *http.Request, args *ExportUserArgs, reply *ExportUserReply) error { - s.ks.log.Debug("Keystore: ExportUser called", + s.ks.log.Warn("deprecated API called", + zap.String("service", "keystore"), + zap.String("method", "exportUser"), logging.UserString("username", args.Username), ) diff --git a/avalanchego/api/keystore/service_test.go b/avalanchego/api/keystore/service_test.go index b208d1e7..84c8980e 100644 --- a/avalanchego/api/keystore/service_test.go +++ b/avalanchego/api/keystore/service_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore @@ -405,7 +405,8 @@ func TestServiceDeleteUser(t *testing.T) { } // deleted user details should be available to create user again. - if err = s.CreateUser(nil, &api.UserPass{Username: testUser, Password: password}, &api.EmptyReply{}); err != nil { + err := s.CreateUser(nil, &api.UserPass{Username: testUser, Password: password}, &api.EmptyReply{}) + if err != nil { t.Fatalf("failed to create user: %v", err) } } diff --git a/avalanchego/api/metrics/gatherer_test.go b/avalanchego/api/metrics/gatherer_test.go index 28349d98..2059c1ab 100644 --- a/avalanchego/api/metrics/gatherer_test.go +++ b/avalanchego/api/metrics/gatherer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics diff --git a/avalanchego/api/metrics/multi_gatherer.go b/avalanchego/api/metrics/multi_gatherer.go index a9c9c58e..eb357668 100644 --- a/avalanchego/api/metrics/multi_gatherer.go +++ b/avalanchego/api/metrics/multi_gatherer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics @@ -6,18 +6,19 @@ package metrics import ( "errors" "fmt" - "sort" "sync" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" + + "golang.org/x/exp/slices" ) var ( errDuplicatedPrefix = errors.New("duplicated prefix") - _ MultiGatherer = &multiGatherer{} + _ MultiGatherer = (*multiGatherer)(nil) ) // MultiGatherer extends the Gatherer interface by allowing additional gatherers @@ -84,10 +85,8 @@ func (g *multiGatherer) Register(namespace string, gatherer prometheus.Gatherer) return nil } -type sortMetricsData []*dto.MetricFamily - -func (m sortMetricsData) Less(i, j int) bool { return *m[i].Name < *m[j].Name } -func (m sortMetricsData) Len() int { return len(m) } -func (m sortMetricsData) Swap(i, j int) { m[j], m[i] = m[i], m[j] } - -func sortMetrics(m []*dto.MetricFamily) { sort.Sort(sortMetricsData(m)) } +func sortMetrics(m []*dto.MetricFamily) { + slices.SortFunc(m, func(i, j *dto.MetricFamily) bool { + return *i.Name < *j.Name + }) +} diff --git a/avalanchego/api/metrics/multi_gatherer_test.go b/avalanchego/api/metrics/multi_gatherer_test.go index 6ee29740..aead517d 100644 --- a/avalanchego/api/metrics/multi_gatherer_test.go +++ b/avalanchego/api/metrics/multi_gatherer_test.go @@ -1,10 +1,9 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics import ( - "errors" "testing" "github.com/stretchr/testify/require" @@ -43,16 +42,15 @@ func TestMultiGathererAddedError(t *testing.T) { g := NewMultiGatherer() - expected := errors.New(":(") tg := &testGatherer{ - err: expected, + err: errTest, } err := g.Register("", tg) require.NoError(err) mfs, err := g.Gather() - require.Equal(expected, err) + require.ErrorIs(err, errTest) require.Empty(mfs) } diff --git a/avalanchego/api/metrics/optional_gatherer.go b/avalanchego/api/metrics/optional_gatherer.go index 2355257a..4d917dfb 100644 --- a/avalanchego/api/metrics/optional_gatherer.go +++ b/avalanchego/api/metrics/optional_gatherer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics @@ -15,7 +15,7 @@ import ( var ( errDuplicatedRegister = errors.New("duplicated register") - _ OptionalGatherer = &optionalGatherer{} + _ OptionalGatherer = (*optionalGatherer)(nil) ) // OptionalGatherer extends the Gatherer interface by allowing the optional diff --git a/avalanchego/api/metrics/optional_gatherer_test.go b/avalanchego/api/metrics/optional_gatherer_test.go index 1d45b7d5..6b528886 100644 --- a/avalanchego/api/metrics/optional_gatherer_test.go +++ b/avalanchego/api/metrics/optional_gatherer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics @@ -12,6 +12,8 @@ import ( dto "github.com/prometheus/client_model/go" ) +var errTest = errors.New("non-nil error") + func TestOptionalGathererEmptyGather(t *testing.T) { require := require.New(t) @@ -40,16 +42,15 @@ func TestOptionalGathererAddedError(t *testing.T) { g := NewOptionalGatherer() - expected := errors.New(":(") tg := &testGatherer{ - err: expected, + err: errTest, } err := g.Register(tg) require.NoError(err) mfs, err := g.Gather() - require.Equal(expected, err) + require.ErrorIs(err, errTest) require.Empty(mfs) } diff --git a/avalanchego/api/server/metrics.go b/avalanchego/api/server/metrics.go new file mode 100644 index 00000000..6556c3a0 --- /dev/null +++ b/avalanchego/api/server/metrics.go @@ -0,0 +1,74 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package server + +import ( + "net/http" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/utils/wrappers" +) + +type metrics struct { + numProcessing *prometheus.GaugeVec + numCalls *prometheus.CounterVec + totalDuration *prometheus.GaugeVec +} + +func newMetrics(namespace string, registerer prometheus.Registerer) (*metrics, error) { + m := &metrics{ + numProcessing: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "calls_processing", + Help: "The number of calls this API is currently processing", + }, + []string{"base"}, + ), + numCalls: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "calls", + Help: "The number of calls this API has processed", + }, + []string{"base"}, + ), + totalDuration: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "calls_duration", + Help: "The total amount of time, in nanoseconds, spent handling API calls", + }, + []string{"base"}, + ), + } + + errs := wrappers.Errs{} + errs.Add( + registerer.Register(m.numProcessing), + registerer.Register(m.numCalls), + registerer.Register(m.totalDuration), + ) + return m, errs.Err +} + +func (m *metrics) wrapHandler(chainName string, handler http.Handler) http.Handler { + numProcessing := m.numProcessing.WithLabelValues(chainName) + numCalls := m.numCalls.WithLabelValues(chainName) + totalDuration := m.totalDuration.WithLabelValues(chainName) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + startTime := time.Now() + numProcessing.Inc() + + defer func() { + numProcessing.Dec() + numCalls.Inc() + totalDuration.Add(float64(time.Since(startTime))) + }() + + handler.ServeHTTP(w, r) + }) +} diff --git a/avalanchego/api/server/middleware_handler.go b/avalanchego/api/server/middleware_handler.go index 7abaf5f1..1e5b7192 100644 --- a/avalanchego/api/server/middleware_handler.go +++ b/avalanchego/api/server/middleware_handler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package server diff --git a/avalanchego/api/server/mock_server.go b/avalanchego/api/server/mock_server.go index 00789172..315e3d88 100644 --- a/avalanchego/api/server/mock_server.go +++ b/avalanchego/api/server/mock_server.go @@ -1,134 +1,21 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. -// Source: api/server/server.go +// Source: github.com/ava-labs/avalanchego/api/server (interfaces: Server) // Package server is a generated GoMock package. package server import ( - io "io" reflect "reflect" sync "sync" - time "time" - ids "github.com/ava-labs/avalanchego/ids" snow "github.com/ava-labs/avalanchego/snow" common "github.com/ava-labs/avalanchego/snow/engine/common" - logging "github.com/ava-labs/avalanchego/utils/logging" gomock "github.com/golang/mock/gomock" ) -// MockPathAdder is a mock of PathAdder interface. -type MockPathAdder struct { - ctrl *gomock.Controller - recorder *MockPathAdderMockRecorder -} - -// MockPathAdderMockRecorder is the mock recorder for MockPathAdder. -type MockPathAdderMockRecorder struct { - mock *MockPathAdder -} - -// NewMockPathAdder creates a new mock instance. -func NewMockPathAdder(ctrl *gomock.Controller) *MockPathAdder { - mock := &MockPathAdder{ctrl: ctrl} - mock.recorder = &MockPathAdderMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockPathAdder) EXPECT() *MockPathAdderMockRecorder { - return m.recorder -} - -// AddAliases mocks base method. -func (m *MockPathAdder) AddAliases(endpoint string, aliases ...string) error { - m.ctrl.T.Helper() - varargs := []interface{}{endpoint} - for _, a := range aliases { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "AddAliases", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddAliases indicates an expected call of AddAliases. -func (mr *MockPathAdderMockRecorder) AddAliases(endpoint interface{}, aliases ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{endpoint}, aliases...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddAliases", reflect.TypeOf((*MockPathAdder)(nil).AddAliases), varargs...) -} - -// AddRoute mocks base method. -func (m *MockPathAdder) AddRoute(handler *common.HTTPHandler, lock *sync.RWMutex, base, endpoint string, loggingWriter io.Writer) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddRoute", handler, lock, base, endpoint, loggingWriter) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddRoute indicates an expected call of AddRoute. -func (mr *MockPathAdderMockRecorder) AddRoute(handler, lock, base, endpoint, loggingWriter interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRoute", reflect.TypeOf((*MockPathAdder)(nil).AddRoute), handler, lock, base, endpoint, loggingWriter) -} - -// MockPathAdderWithReadLock is a mock of PathAdderWithReadLock interface. -type MockPathAdderWithReadLock struct { - ctrl *gomock.Controller - recorder *MockPathAdderWithReadLockMockRecorder -} - -// MockPathAdderWithReadLockMockRecorder is the mock recorder for MockPathAdderWithReadLock. -type MockPathAdderWithReadLockMockRecorder struct { - mock *MockPathAdderWithReadLock -} - -// NewMockPathAdderWithReadLock creates a new mock instance. -func NewMockPathAdderWithReadLock(ctrl *gomock.Controller) *MockPathAdderWithReadLock { - mock := &MockPathAdderWithReadLock{ctrl: ctrl} - mock.recorder = &MockPathAdderWithReadLockMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockPathAdderWithReadLock) EXPECT() *MockPathAdderWithReadLockMockRecorder { - return m.recorder -} - -// AddAliasesWithReadLock mocks base method. -func (m *MockPathAdderWithReadLock) AddAliasesWithReadLock(endpoint string, aliases ...string) error { - m.ctrl.T.Helper() - varargs := []interface{}{endpoint} - for _, a := range aliases { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "AddAliasesWithReadLock", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddAliasesWithReadLock indicates an expected call of AddAliasesWithReadLock. -func (mr *MockPathAdderWithReadLockMockRecorder) AddAliasesWithReadLock(endpoint interface{}, aliases ...interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{endpoint}, aliases...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddAliasesWithReadLock", reflect.TypeOf((*MockPathAdderWithReadLock)(nil).AddAliasesWithReadLock), varargs...) -} - -// AddRouteWithReadLock mocks base method. -func (m *MockPathAdderWithReadLock) AddRouteWithReadLock(handler *common.HTTPHandler, lock *sync.RWMutex, base, endpoint string, loggingWriter io.Writer) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddRouteWithReadLock", handler, lock, base, endpoint, loggingWriter) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddRouteWithReadLock indicates an expected call of AddRouteWithReadLock. -func (mr *MockPathAdderWithReadLockMockRecorder) AddRouteWithReadLock(handler, lock, base, endpoint, loggingWriter interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRouteWithReadLock", reflect.TypeOf((*MockPathAdderWithReadLock)(nil).AddRouteWithReadLock), handler, lock, base, endpoint, loggingWriter) -} - // MockServer is a mock of Server interface. type MockServer struct { ctrl *gomock.Controller @@ -153,10 +40,10 @@ func (m *MockServer) EXPECT() *MockServerMockRecorder { } // AddAliases mocks base method. -func (m *MockServer) AddAliases(endpoint string, aliases ...string) error { +func (m *MockServer) AddAliases(arg0 string, arg1 ...string) error { m.ctrl.T.Helper() - varargs := []interface{}{endpoint} - for _, a := range aliases { + varargs := []interface{}{arg0} + for _, a := range arg1 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "AddAliases", varargs...) @@ -165,17 +52,17 @@ func (m *MockServer) AddAliases(endpoint string, aliases ...string) error { } // AddAliases indicates an expected call of AddAliases. -func (mr *MockServerMockRecorder) AddAliases(endpoint interface{}, aliases ...interface{}) *gomock.Call { +func (mr *MockServerMockRecorder) AddAliases(arg0 interface{}, arg1 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{endpoint}, aliases...) + varargs := append([]interface{}{arg0}, arg1...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddAliases", reflect.TypeOf((*MockServer)(nil).AddAliases), varargs...) } // AddAliasesWithReadLock mocks base method. -func (m *MockServer) AddAliasesWithReadLock(endpoint string, aliases ...string) error { +func (m *MockServer) AddAliasesWithReadLock(arg0 string, arg1 ...string) error { m.ctrl.T.Helper() - varargs := []interface{}{endpoint} - for _, a := range aliases { + varargs := []interface{}{arg0} + for _, a := range arg1 { varargs = append(varargs, a) } ret := m.ctrl.Call(m, "AddAliasesWithReadLock", varargs...) @@ -184,52 +71,38 @@ func (m *MockServer) AddAliasesWithReadLock(endpoint string, aliases ...string) } // AddAliasesWithReadLock indicates an expected call of AddAliasesWithReadLock. -func (mr *MockServerMockRecorder) AddAliasesWithReadLock(endpoint interface{}, aliases ...interface{}) *gomock.Call { +func (mr *MockServerMockRecorder) AddAliasesWithReadLock(arg0 interface{}, arg1 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{endpoint}, aliases...) + varargs := append([]interface{}{arg0}, arg1...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddAliasesWithReadLock", reflect.TypeOf((*MockServer)(nil).AddAliasesWithReadLock), varargs...) } -// AddChainRoute mocks base method. -func (m *MockServer) AddChainRoute(handler *common.HTTPHandler, ctx *snow.ConsensusContext, base, endpoint string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddChainRoute", handler, ctx, base, endpoint) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddChainRoute indicates an expected call of AddChainRoute. -func (mr *MockServerMockRecorder) AddChainRoute(handler, ctx, base, endpoint interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddChainRoute", reflect.TypeOf((*MockServer)(nil).AddChainRoute), handler, ctx, base, endpoint) -} - // AddRoute mocks base method. -func (m *MockServer) AddRoute(handler *common.HTTPHandler, lock *sync.RWMutex, base, endpoint string) error { +func (m *MockServer) AddRoute(arg0 *common.HTTPHandler, arg1 *sync.RWMutex, arg2, arg3 string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddRoute", handler, lock, base, endpoint) + ret := m.ctrl.Call(m, "AddRoute", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // AddRoute indicates an expected call of AddRoute. -func (mr *MockServerMockRecorder) AddRoute(handler, lock, base, endpoint interface{}) *gomock.Call { +func (mr *MockServerMockRecorder) AddRoute(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRoute", reflect.TypeOf((*MockServer)(nil).AddRoute), handler, lock, base, endpoint) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRoute", reflect.TypeOf((*MockServer)(nil).AddRoute), arg0, arg1, arg2, arg3) } // AddRouteWithReadLock mocks base method. -func (m *MockServer) AddRouteWithReadLock(handler *common.HTTPHandler, lock *sync.RWMutex, base, endpoint string) error { +func (m *MockServer) AddRouteWithReadLock(arg0 *common.HTTPHandler, arg1 *sync.RWMutex, arg2, arg3 string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddRouteWithReadLock", handler, lock, base, endpoint) + ret := m.ctrl.Call(m, "AddRouteWithReadLock", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // AddRouteWithReadLock indicates an expected call of AddRouteWithReadLock. -func (mr *MockServerMockRecorder) AddRouteWithReadLock(handler, lock, base, endpoint interface{}) *gomock.Call { +func (mr *MockServerMockRecorder) AddRouteWithReadLock(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRouteWithReadLock", reflect.TypeOf((*MockServer)(nil).AddRouteWithReadLock), handler, lock, base, endpoint) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRouteWithReadLock", reflect.TypeOf((*MockServer)(nil).AddRouteWithReadLock), arg0, arg1, arg2, arg3) } // Dispatch mocks base method. @@ -247,46 +120,29 @@ func (mr *MockServerMockRecorder) Dispatch() *gomock.Call { } // DispatchTLS mocks base method. -func (m *MockServer) DispatchTLS(certBytes, keyBytes []byte) error { +func (m *MockServer) DispatchTLS(arg0, arg1 []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DispatchTLS", certBytes, keyBytes) + ret := m.ctrl.Call(m, "DispatchTLS", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // DispatchTLS indicates an expected call of DispatchTLS. -func (mr *MockServerMockRecorder) DispatchTLS(certBytes, keyBytes interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DispatchTLS", reflect.TypeOf((*MockServer)(nil).DispatchTLS), certBytes, keyBytes) -} - -// Initialize mocks base method. -func (m *MockServer) Initialize(log logging.Logger, factory logging.Factory, host string, port uint16, allowedOrigins []string, shutdownTimeout time.Duration, nodeID ids.NodeID, wrappers ...Wrapper) { - m.ctrl.T.Helper() - varargs := []interface{}{log, factory, host, port, allowedOrigins, shutdownTimeout, nodeID} - for _, a := range wrappers { - varargs = append(varargs, a) - } - m.ctrl.Call(m, "Initialize", varargs...) -} - -// Initialize indicates an expected call of Initialize. -func (mr *MockServerMockRecorder) Initialize(log, factory, host, port, allowedOrigins, shutdownTimeout, nodeID interface{}, wrappers ...interface{}) *gomock.Call { +func (mr *MockServerMockRecorder) DispatchTLS(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{log, factory, host, port, allowedOrigins, shutdownTimeout, nodeID}, wrappers...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockServer)(nil).Initialize), varargs...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DispatchTLS", reflect.TypeOf((*MockServer)(nil).DispatchTLS), arg0, arg1) } // RegisterChain mocks base method. -func (m *MockServer) RegisterChain(chainName string, engine common.Engine) { +func (m *MockServer) RegisterChain(arg0 string, arg1 *snow.ConsensusContext, arg2 common.VM) { m.ctrl.T.Helper() - m.ctrl.Call(m, "RegisterChain", chainName, engine) + m.ctrl.Call(m, "RegisterChain", arg0, arg1, arg2) } // RegisterChain indicates an expected call of RegisterChain. -func (mr *MockServerMockRecorder) RegisterChain(chainName, engine interface{}) *gomock.Call { +func (mr *MockServerMockRecorder) RegisterChain(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterChain", reflect.TypeOf((*MockServer)(nil).RegisterChain), chainName, engine) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterChain", reflect.TypeOf((*MockServer)(nil).RegisterChain), arg0, arg1, arg2) } // Shutdown mocks base method. diff --git a/avalanchego/api/server/router.go b/avalanchego/api/server/router.go index 90474281..9732b8e6 100644 --- a/avalanchego/api/server/router.go +++ b/avalanchego/api/server/router.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package server @@ -10,6 +10,8 @@ import ( "sync" "github.com/gorilla/mux" + + "github.com/ava-labs/avalanchego/utils/set" ) var ( @@ -22,7 +24,7 @@ type router struct { router *mux.Router routeLock sync.Mutex - reservedRoutes map[string]bool // Reserves routes so that there can't be alias that conflict + reservedRoutes set.Set[string] // Reserves routes so that there can't be alias that conflict aliases map[string][]string // Maps a route to a set of reserved routes routes map[string]map[string]http.Handler // Maps routes to a handler } @@ -30,7 +32,7 @@ type router struct { func newRouter() *router { return &router{ router: mux.NewRouter(), - reservedRoutes: make(map[string]bool), + reservedRoutes: set.Set[string]{}, aliases: make(map[string][]string), routes: make(map[string]map[string]http.Handler), } @@ -68,7 +70,7 @@ func (r *router) AddRouter(base, endpoint string, handler http.Handler) error { } func (r *router) addRouter(base, endpoint string, handler http.Handler) error { - if r.reservedRoutes[base] { + if r.reservedRoutes.Contains(base) { return fmt.Errorf("couldn't route to %s as that route is either aliased or already maps to a handler", base) } @@ -87,12 +89,13 @@ func (r *router) forceAddRouter(base, endpoint string, handler http.Handler) err endpoints[endpoint] = handler r.routes[base] = endpoints + // Name routes based on their URL for easy retrieval in the future - if route := r.router.Handle(url, handler); route != nil { - route.Name(url) - } else { + route := r.router.Handle(url, handler) + if route == nil { return fmt.Errorf("failed to create new route for %s", url) } + route.Name(url) var err error if aliases, exists := r.aliases[base]; exists { @@ -112,13 +115,13 @@ func (r *router) AddAlias(base string, aliases ...string) error { defer r.routeLock.Unlock() for _, alias := range aliases { - if r.reservedRoutes[alias] { + if r.reservedRoutes.Contains(alias) { return fmt.Errorf("couldn't alias to %s as that route is either already aliased or already maps to a handler", alias) } } for _, alias := range aliases { - r.reservedRoutes[alias] = true + r.reservedRoutes.Add(alias) } r.aliases[base] = append(r.aliases[base], aliases...) diff --git a/avalanchego/api/server/router_test.go b/avalanchego/api/server/router_test.go index 8b898320..dc53ccd2 100644 --- a/avalanchego/api/server/router_test.go +++ b/avalanchego/api/server/router_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package server diff --git a/avalanchego/api/server/server.go b/avalanchego/api/server/server.go index fb697c57..e6e24b34 100644 --- a/avalanchego/api/server/server.go +++ b/avalanchego/api/server/server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package server @@ -17,28 +17,29 @@ import ( "github.com/NYTimes/gziphandler" + "github.com/prometheus/client_golang/prometheus" + "github.com/rs/cors" "go.uber.org/zap" + "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" ) -const ( - baseURL = "/ext" - readHeaderTimeout = 10 * time.Second -) +const baseURL = "/ext" var ( errUnknownLockOption = errors.New("invalid lock options") _ PathAdder = readPathAdder{} - _ Server = &server{} + _ Server = (*server)(nil) ) type PathAdder interface { @@ -63,50 +64,41 @@ type PathAdderWithReadLock interface { type Server interface { PathAdder PathAdderWithReadLock - // Initialize creates the API server at the provided host and port - Initialize(log logging.Logger, - factory logging.Factory, - host string, - port uint16, - allowedOrigins []string, - shutdownTimeout time.Duration, - nodeID ids.NodeID, - wrappers ...Wrapper) // Dispatch starts the API server Dispatch() error // DispatchTLS starts the API server with the provided TLS certificate DispatchTLS(certBytes, keyBytes []byte) error - // RegisterChain registers the API endpoints associated with this chain. That is, - // add pairs to server so that API calls can be made to the VM. - // This method runs in a goroutine to avoid a deadlock in the event that the caller - // holds the engine's context lock. Namely, this could happen when the P-Chain is - // creating a new chain and holds the P-Chain's lock when this function is held, - // and at the same time the server's lock is held due to an API call and is trying - // to grab the P-Chain's lock. - RegisterChain(chainName string, engine common.Engine) - // AddChainRoute registers a route to a chain's handler - AddChainRoute( - handler *common.HTTPHandler, - ctx *snow.ConsensusContext, - base, endpoint string, - ) error + // RegisterChain registers the API endpoints associated with this chain. + // That is, add pairs to server so that API calls can be + // made to the VM. + RegisterChain(chainName string, ctx *snow.ConsensusContext, vm common.VM) // Shutdown this server Shutdown() error } +type HTTPConfig struct { + ReadTimeout time.Duration `json:"readTimeout"` + ReadHeaderTimeout time.Duration `json:"readHeaderTimeout"` + WriteTimeout time.Duration `json:"writeHeaderTimeout"` + IdleTimeout time.Duration `json:"idleTimeout"` +} + type server struct { // log this server writes to log logging.Logger // generates new logs for chains to write to factory logging.Factory - // points the the router handlers - handler http.Handler // Listens for HTTP traffic on this address listenHost string listenPort uint16 shutdownTimeout time.Duration + tracingEnabled bool + tracer trace.Tracer + + metrics *metrics + // Maps endpoints to handlers router *router @@ -114,11 +106,7 @@ type server struct { } // New returns an instance of a Server. -func New() Server { - return &server{} -} - -func (s *server) Initialize( +func New( log logging.Logger, factory logging.Factory, host string, @@ -126,25 +114,25 @@ func (s *server) Initialize( allowedOrigins []string, shutdownTimeout time.Duration, nodeID ids.NodeID, + tracingEnabled bool, + tracer trace.Tracer, + namespace string, + registerer prometheus.Registerer, + httpConfig HTTPConfig, wrappers ...Wrapper, -) { - s.log = log - s.factory = factory - s.listenHost = host - s.listenPort = port - s.shutdownTimeout = shutdownTimeout - s.router = newRouter() - - s.log.Info("API created", - zap.Strings("allowedOrigins", allowedOrigins), - ) +) (Server, error) { + m, err := newMetrics(namespace, registerer) + if err != nil { + return nil, err + } + router := newRouter() corsHandler := cors.New(cors.Options{ AllowedOrigins: allowedOrigins, AllowCredentials: true, - }).Handler(s.router) + }).Handler(router) gzipHandler := gziphandler.GzipHandler(corsHandler) - s.handler = http.HandlerFunc( + var handler http.Handler = http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { // Attach this node's ID as a header w.Header().Set("node-id", nodeID.String()) @@ -153,8 +141,31 @@ func (s *server) Initialize( ) for _, wrapper := range wrappers { - s.handler = wrapper.WrapHandler(s.handler) + handler = wrapper.WrapHandler(handler) } + + log.Info("API created", + zap.Strings("allowedOrigins", allowedOrigins), + ) + + return &server{ + log: log, + factory: factory, + listenHost: host, + listenPort: port, + shutdownTimeout: shutdownTimeout, + tracingEnabled: tracingEnabled, + tracer: tracer, + metrics: m, + router: router, + srv: &http.Server{ + Handler: handler, + ReadTimeout: httpConfig.ReadTimeout, + ReadHeaderTimeout: httpConfig.ReadHeaderTimeout, + WriteTimeout: httpConfig.WriteTimeout, + IdleTimeout: httpConfig.IdleTimeout, + }, + }, nil } func (s *server) Dispatch() error { @@ -176,10 +187,6 @@ func (s *server) Dispatch() error { ) } - s.srv = &http.Server{ - Handler: s.handler, - ReadHeaderTimeout: readHeaderTimeout, - } return s.srv.Serve(listener) } @@ -211,27 +218,17 @@ func (s *server) DispatchTLS(certBytes, keyBytes []byte) error { ) } - s.srv = &http.Server{ - Addr: listenAddress, - Handler: s.handler, - ReadHeaderTimeout: readHeaderTimeout, - } return s.srv.Serve(listener) } -func (s *server) RegisterChain(chainName string, engine common.Engine) { - go s.registerChain(chainName, engine) -} - -func (s *server) registerChain(chainName string, engine common.Engine) { +func (s *server) RegisterChain(chainName string, ctx *snow.ConsensusContext, vm common.VM) { var ( handlers map[string]*common.HTTPHandler err error ) - ctx := engine.Context() ctx.Lock.Lock() - handlers, err = engine.GetVM().CreateHandlers() + handlers, err = vm.CreateHandlers(context.TODO()) ctx.Lock.Unlock() if err != nil { s.log.Error("failed to create handlers", @@ -259,7 +256,7 @@ func (s *server) registerChain(chainName string, engine common.Engine) { ) continue } - if err := s.AddChainRoute(handler, ctx, defaultEndpoint, extension); err != nil { + if err := s.addChainRoute(chainName, handler, ctx, defaultEndpoint, extension); err != nil { s.log.Error("error adding route", zap.Error(err), ) @@ -267,19 +264,32 @@ func (s *server) registerChain(chainName string, engine common.Engine) { } } -func (s *server) AddChainRoute(handler *common.HTTPHandler, ctx *snow.ConsensusContext, base, endpoint string) error { +func (s *server) addChainRoute(chainName string, handler *common.HTTPHandler, ctx *snow.ConsensusContext, base, endpoint string) error { url := fmt.Sprintf("%s/%s", baseURL, base) s.log.Info("adding route", zap.String("url", url), zap.String("endpoint", endpoint), ) + if s.tracingEnabled { + handler = &common.HTTPHandler{ + LockOptions: handler.LockOptions, + Handler: api.TraceHandler(handler.Handler, chainName, s.tracer), + } + } // Apply middleware to grab/release chain's lock before/after calling API method - h, err := lockMiddleware(handler.Handler, handler.LockOptions, &ctx.Lock) + h, err := lockMiddleware( + handler.Handler, + handler.LockOptions, + s.tracingEnabled, + s.tracer, + &ctx.Lock, + ) if err != nil { return err } // Apply middleware to reject calls to the handler before the chain finishes bootstrapping h = rejectMiddleware(h, ctx) + h = s.metrics.wrapHandler(chainName, h) return s.router.AddRouter(url, endpoint, h) } @@ -299,41 +309,74 @@ func (s *server) addRoute(handler *common.HTTPHandler, lock *sync.RWMutex, base, zap.String("url", url), zap.String("endpoint", endpoint), ) + + if s.tracingEnabled { + handler = &common.HTTPHandler{ + LockOptions: handler.LockOptions, + Handler: api.TraceHandler(handler.Handler, url, s.tracer), + } + } + // Apply middleware to grab/release chain's lock before/after calling API method - h, err := lockMiddleware(handler.Handler, handler.LockOptions, lock) + h, err := lockMiddleware( + handler.Handler, + handler.LockOptions, + s.tracingEnabled, + s.tracer, + lock, + ) if err != nil { return err } + h = s.metrics.wrapHandler(base, h) return s.router.AddRouter(url, endpoint, h) } // Wraps a handler by grabbing and releasing a lock before calling the handler. -func lockMiddleware(handler http.Handler, lockOption common.LockOption, lock *sync.RWMutex) (http.Handler, error) { +func lockMiddleware( + handler http.Handler, + lockOption common.LockOption, + tracingEnabled bool, + tracer trace.Tracer, + lock *sync.RWMutex, +) (http.Handler, error) { + var ( + name string + lockedHandler http.Handler + ) switch lockOption { case common.WriteLock: - return middlewareHandler{ + name = "writeLock" + lockedHandler = middlewareHandler{ before: lock.Lock, after: lock.Unlock, handler: handler, - }, nil + } case common.ReadLock: - return middlewareHandler{ + name = "readLock" + lockedHandler = middlewareHandler{ before: lock.RLock, after: lock.RUnlock, handler: handler, - }, nil + } case common.NoLock: return handler, nil default: return nil, errUnknownLockOption } + + if !tracingEnabled { + return lockedHandler, nil + } + + return api.TraceHandler(lockedHandler, name, tracer), nil } // Reject middleware wraps a handler. If the chain that the context describes is // not done state-syncing/bootstrapping, writes back an error. func rejectMiddleware(handler http.Handler, ctx *snow.ConsensusContext) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // If chain isn't done bootstrapping, ignore API calls - if ctx.GetState() != snow.NormalOp { + if ctx.State.Get().State != snow.NormalOp { w.WriteHeader(http.StatusServiceUnavailable) // Doesn't matter if there's an error while writing. They'll get the StatusServiceUnavailable code. _, _ = w.Write([]byte("API call rejected because chain is not done bootstrapping")) @@ -363,10 +406,6 @@ func (s *server) AddAliasesWithReadLock(endpoint string, aliases ...string) erro } func (s *server) Shutdown() error { - if s.srv == nil { - return nil - } - ctx, cancel := context.WithTimeout(context.Background(), s.shutdownTimeout) err := s.srv.Shutdown(ctx) cancel() diff --git a/avalanchego/api/server/wrapper.go b/avalanchego/api/server/wrapper.go index 47f092d1..2a2a2763 100644 --- a/avalanchego/api/server/wrapper.go +++ b/avalanchego/api/server/wrapper.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package server diff --git a/avalanchego/api/traced_handler.go b/avalanchego/api/traced_handler.go new file mode 100644 index 00000000..149be820 --- /dev/null +++ b/avalanchego/api/traced_handler.go @@ -0,0 +1,47 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package api + +import ( + "fmt" + "net/http" + + "go.opentelemetry.io/otel/attribute" + + oteltrace "go.opentelemetry.io/otel/trace" + + "github.com/ava-labs/avalanchego/trace" +) + +var _ http.Handler = (*tracedHandler)(nil) + +type tracedHandler struct { + h http.Handler + serveHTTPTag string + tracer trace.Tracer +} + +func TraceHandler(h http.Handler, name string, tracer trace.Tracer) http.Handler { + return &tracedHandler{ + h: h, + serveHTTPTag: fmt.Sprintf("%s.ServeHTTP", name), + tracer: tracer, + } +} + +func (h *tracedHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + ctx, span := h.tracer.Start(ctx, h.serveHTTPTag, oteltrace.WithAttributes( + attribute.String("method", r.Method), + attribute.String("url", r.URL.Redacted()), + attribute.String("proto", r.Proto), + attribute.String("host", r.Host), + attribute.String("remoteAddr", r.RemoteAddr), + attribute.String("requestURI", r.RequestURI), + )) + defer span.End() + + r = r.WithContext(ctx) + h.h.ServeHTTP(w, r) +} diff --git a/avalanchego/app/app.go b/avalanchego/app/app.go index 90e28bac..d091e60d 100644 --- a/avalanchego/app/app.go +++ b/avalanchego/app/app.go @@ -1,14 +1,42 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package app import ( + "fmt" "os" "os/signal" + "sync" "syscall" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" + + "github.com/ava-labs/avalanchego/nat" + "github.com/ava-labs/avalanchego/node" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/ips" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/perms" + "github.com/ava-labs/avalanchego/utils/ulimit" +) + +const ( + Header = ` _____ .__ .__ + / _ \___ _______ | | _____ ____ ____ | |__ ____ ,_ o + / /_\ \ \/ /\__ \ | | \__ \ / \_/ ___\| | \_/ __ \ / //\, + / | \ / / __ \| |__/ __ \| | \ \___| Y \ ___/ \>> | + \____|__ /\_/ (____ /____(____ /___| /\___ >___| /\___ > \\ + \/ \/ \/ \/ \/ \/ \/` +) + +var ( + stakingPortName = fmt.Sprintf("%s-staking", constants.AppName) + httpPortName = fmt.Sprintf("%s-http", constants.AppName) + + _ App = (*app)(nil) ) type App interface { @@ -26,6 +54,13 @@ type App interface { ExitCode() (int, error) } +func New(config node.Config) App { + return &app{ + config: config, + node: &node.Node{}, + } +} + func Run(app App) int { // start running the application if err := app.Start(); err != nil { @@ -61,3 +96,177 @@ func Run(app App) int { // return the exit code that the application reported return exitCode } + +// app is a wrapper around a node that runs in this process +type app struct { + config node.Config + node *node.Node + exitWG sync.WaitGroup +} + +// Start the business logic of the node (as opposed to config reading, etc). +// Does not block until the node is done. Errors returned from this method +// are not logged. +func (a *app) Start() error { + // Set the data directory permissions to be read write. + if err := perms.ChmodR(a.config.DatabaseConfig.Path, true, perms.ReadWriteExecute); err != nil { + return fmt.Errorf("failed to restrict the permissions of the database directory with: %w", err) + } + if err := perms.ChmodR(a.config.LoggingConfig.Directory, true, perms.ReadWriteExecute); err != nil { + return fmt.Errorf("failed to restrict the permissions of the log directory with: %w", err) + } + + // we want to create the logger after the plugin has started the app + logFactory := logging.NewFactory(a.config.LoggingConfig) + log, err := logFactory.Make("main") + if err != nil { + logFactory.Close() + return err + } + + // update fd limit + fdLimit := a.config.FdLimit + if err := ulimit.Set(fdLimit, log); err != nil { + log.Fatal("failed to set fd-limit", + zap.Error(err), + ) + logFactory.Close() + return err + } + + // Track if sybil control is enforced + if !a.config.EnableStaking { + log.Warn("sybil control is not enforced", + zap.String("reason", "staking is disabled"), + ) + } + + // TODO move this to config + // SupportsNAT() for NoRouter is false. + // Which means we tried to perform a NAT activity but we were not successful. + if a.config.AttemptedNATTraversal && !a.config.Nat.SupportsNAT() { + log.Warn("UPnP and NAT-PMP router attach failed, you may not be listening publicly. " + + "Please confirm the settings in your router") + } + + if ip := a.config.IPPort.IPPort().IP; ip.IsLoopback() || ip.IsPrivate() { + log.Warn("P2P IP is private, you will not be publicly discoverable", + zap.Stringer("ip", ip), + ) + } + + // An empty host is treated as a wildcard to match all addresses, so it is + // considered public. + hostIsPublic := a.config.HTTPHost == "" + if !hostIsPublic { + ip, err := ips.Lookup(a.config.HTTPHost) + if err != nil { + log.Fatal("failed to lookup HTTP host", + zap.String("host", a.config.HTTPHost), + zap.Error(err), + ) + logFactory.Close() + return err + } + hostIsPublic = !ip.IsLoopback() && !ip.IsPrivate() + + log.Debug("finished HTTP host lookup", + zap.String("host", a.config.HTTPHost), + zap.Stringer("ip", ip), + zap.Bool("isPublic", hostIsPublic), + ) + } + + mapper := nat.NewPortMapper(log, a.config.Nat) + + // Open staking port we want for NAT traversal to have the external port + // (config.IP.Port) to connect to our internal listening port + // (config.InternalStakingPort) which should be the same in most cases. + if port := a.config.IPPort.IPPort().Port; port != 0 { + mapper.Map( + port, + port, + stakingPortName, + a.config.IPPort, + a.config.IPResolutionFreq, + ) + } + + // Don't open the HTTP port if the HTTP server is private + if hostIsPublic { + log.Warn("HTTP server is binding to a potentially public host. "+ + "You may be vulnerable to a DoS attack if your HTTP port is publicly accessible", + zap.String("host", a.config.HTTPHost), + ) + + // For NAT traversal we want to route from the external port + // (config.ExternalHTTPPort) to our internal port (config.HTTPPort). + if a.config.HTTPPort != 0 { + mapper.Map( + a.config.HTTPPort, + a.config.HTTPPort, + httpPortName, + nil, + a.config.IPResolutionFreq, + ) + } + } + + // Regularly update our public IP. + // Note that if the node config said to not dynamically resolve and + // update our public IP, [p.config.IPUdater] is a no-op implementation. + go a.config.IPUpdater.Dispatch(log) + + if err := a.node.Initialize(&a.config, log, logFactory); err != nil { + log.Fatal("error initializing node", + zap.Error(err), + ) + mapper.UnmapAllPorts() + a.config.IPUpdater.Stop() + log.Stop() + logFactory.Close() + return err + } + + // [p.ExitCode] will block until [p.exitWG.Done] is called + a.exitWG.Add(1) + go func() { + defer func() { + if r := recover(); r != nil { + fmt.Println("caught panic", r) + } + log.Stop() + logFactory.Close() + a.exitWG.Done() + }() + defer func() { + mapper.UnmapAllPorts() + a.config.IPUpdater.Stop() + + // If [p.node.Dispatch()] panics, then we should log the panic and + // then re-raise the panic. This is why the above defer is broken + // into two parts. + log.StopOnPanic() + }() + + err := a.node.Dispatch() + log.Debug("dispatch returned", + zap.Error(err), + ) + }() + return nil +} + +// Stop attempts to shutdown the currently running node. This function will +// return immediately. +func (a *app) Stop() error { + a.node.Shutdown(0) + return nil +} + +// ExitCode returns the exit code that the node is reporting. This function +// blocks until the node has been shut down. +func (a *app) ExitCode() (int, error) { + a.exitWG.Wait() + return a.node.ExitCode(), nil +} diff --git a/avalanchego/app/plugin/plugin.go b/avalanchego/app/plugin/plugin.go deleted file mode 100644 index 3706169b..00000000 --- a/avalanchego/app/plugin/plugin.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package plugin - -import ( - "context" - - "google.golang.org/grpc" - - "github.com/hashicorp/go-plugin" - - "github.com/ava-labs/avalanchego/app" - - pluginpb "github.com/ava-labs/avalanchego/proto/pb/plugin" -) - -const Name = "nodeProcess" - -var ( - Handshake = plugin.HandshakeConfig{ - ProtocolVersion: 2, - MagicCookieKey: "NODE_PROCESS_PLUGIN", - MagicCookieValue: "dynamic", - } - - // PluginMap is the map of plugins we can dispense. - PluginMap = map[string]plugin.Plugin{ - Name: &appPlugin{}, - } - - _ plugin.Plugin = &appPlugin{} - _ plugin.GRPCPlugin = &appPlugin{} -) - -type appPlugin struct { - plugin.NetRPCUnsupportedPlugin - app app.App -} - -// New will be called by the server side of the plugin to pass into the server -// side PluginMap for dispatching. -func New(app app.App) plugin.Plugin { - return &appPlugin{app: app} -} - -// GRPCServer registers a new GRPC server. -func (p *appPlugin) GRPCServer(_ *plugin.GRPCBroker, s *grpc.Server) error { - pluginpb.RegisterNodeServer(s, NewServer(p.app)) - return nil -} - -// GRPCClient returns a new GRPC client -func (p *appPlugin) GRPCClient(_ context.Context, _ *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { - return NewClient(pluginpb.NewNodeClient(c)), nil -} diff --git a/avalanchego/app/plugin/plugin_client.go b/avalanchego/app/plugin/plugin_client.go deleted file mode 100644 index f1d5e232..00000000 --- a/avalanchego/app/plugin/plugin_client.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package plugin - -import ( - "context" - - "google.golang.org/protobuf/types/known/emptypb" - - pluginpb "github.com/ava-labs/avalanchego/proto/pb/plugin" -) - -type Client struct { - client pluginpb.NodeClient -} - -// NewServer returns an app instance connected to a remote app instance -func NewClient(node pluginpb.NodeClient) *Client { - return &Client{ - client: node, - } -} - -func (c *Client) Start() error { - _, err := c.client.Start(context.Background(), &emptypb.Empty{}) - return err -} - -func (c *Client) Stop() error { - _, err := c.client.Stop(context.Background(), &emptypb.Empty{}) - return err -} - -func (c *Client) ExitCode() (int, error) { - resp, err := c.client.ExitCode(context.Background(), &emptypb.Empty{}) - if err != nil { - return 0, err - } - return int(resp.ExitCode), nil -} diff --git a/avalanchego/app/plugin/plugin_server.go b/avalanchego/app/plugin/plugin_server.go deleted file mode 100644 index 64d15f3c..00000000 --- a/avalanchego/app/plugin/plugin_server.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package plugin - -import ( - "context" - - "google.golang.org/protobuf/types/known/emptypb" - - "github.com/ava-labs/avalanchego/app" - - pluginpb "github.com/ava-labs/avalanchego/proto/pb/plugin" -) - -// Server wraps a node so it can be served with the hashicorp plugin harness -type Server struct { - pluginpb.UnsafeNodeServer - app app.App -} - -func NewServer(app app.App) *Server { - return &Server{ - app: app, - } -} - -func (s *Server) Start(context.Context, *emptypb.Empty) (*emptypb.Empty, error) { - return &emptypb.Empty{}, s.app.Start() -} - -func (s *Server) Stop(context.Context, *emptypb.Empty) (*emptypb.Empty, error) { - return &emptypb.Empty{}, s.app.Stop() -} - -func (s *Server) ExitCode(context.Context, *emptypb.Empty) (*pluginpb.ExitCodeResponse, error) { - exitCode, err := s.app.ExitCode() - return &pluginpb.ExitCodeResponse{ - ExitCode: int32(exitCode), - }, err -} diff --git a/avalanchego/app/plugin/process.go b/avalanchego/app/plugin/process.go deleted file mode 100644 index e49cd007..00000000 --- a/avalanchego/app/plugin/process.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package plugin - -import ( - "fmt" - "os" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin" - - "github.com/ava-labs/avalanchego/app" - "github.com/ava-labs/avalanchego/utils/subprocess" -) - -func Exec(path string, args []string, forwardIO bool) (app.App, *plugin.Client, error) { - clientConfig := &plugin.ClientConfig{ - HandshakeConfig: Handshake, - Plugins: PluginMap, - Cmd: subprocess.New(path, args...), - AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC}, - Logger: hclog.New(&hclog.LoggerOptions{Level: hclog.Error}), - } - if forwardIO { - clientConfig.SyncStdout = os.Stdout - clientConfig.SyncStderr = os.Stderr - } - - client := plugin.NewClient(clientConfig) - rpcClient, err := client.Client() - if err != nil { - client.Kill() - return nil, nil, fmt.Errorf("couldn't get client at path %s: %w", path, err) - } - - raw, err := rpcClient.Dispense(Name) - if err != nil { - client.Kill() - return nil, nil, fmt.Errorf("couldn't dispense plugin at path %s': %w", path, err) - } - - app, ok := raw.(app.App) - if !ok { - client.Kill() - return nil, nil, fmt.Errorf("expected app.App but got %T", raw) - } - return app, client, nil -} diff --git a/avalanchego/app/process/process.go b/avalanchego/app/process/process.go deleted file mode 100644 index ee78d20f..00000000 --- a/avalanchego/app/process/process.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package process - -import ( - "fmt" - "sync" - - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/app" - "github.com/ava-labs/avalanchego/nat" - "github.com/ava-labs/avalanchego/node" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/perms" - "github.com/ava-labs/avalanchego/utils/ulimit" -) - -const ( - Header = ` _____ .__ .__ - / _ \___ _______ | | _____ ____ ____ | |__ ____ ,_ o - / /_\ \ \/ /\__ \ | | \__ \ / \_/ ___\| | \_/ __ \ / //\, - / | \ / / __ \| |__/ __ \| | \ \___| Y \ ___/ \>> | - \____|__ /\_/ (____ /____(____ /___| /\___ >___| /\___ > \\ - \/ \/ \/ \/ \/ \/ \/` -) - -var ( - stakingPortName = fmt.Sprintf("%s-staking", constants.AppName) - httpPortName = fmt.Sprintf("%s-http", constants.AppName) - - _ app.App = &process{} -) - -// process is a wrapper around a node that runs in this process -type process struct { - config node.Config - node *node.Node - exitWG sync.WaitGroup -} - -func NewApp(config node.Config) app.App { - return &process{ - config: config, - node: &node.Node{}, - } -} - -// Start the business logic of the node (as opposed to config reading, etc). -// Does not block until the node is done. Errors returned from this method -// are not logged. -func (p *process) Start() error { - // Set the data directory permissions to be read write. - if err := perms.ChmodR(p.config.DatabaseConfig.Path, true, perms.ReadWriteExecute); err != nil { - return fmt.Errorf("failed to restrict the permissions of the database directory with: %w", err) - } - if err := perms.ChmodR(p.config.LoggingConfig.Directory, true, perms.ReadWriteExecute); err != nil { - return fmt.Errorf("failed to restrict the permissions of the log directory with: %w", err) - } - - // we want to create the logger after the plugin has started the app - logFactory := logging.NewFactory(p.config.LoggingConfig) - log, err := logFactory.Make("main") - if err != nil { - logFactory.Close() - return err - } - - // update fd limit - fdLimit := p.config.FdLimit - if err := ulimit.Set(fdLimit, log); err != nil { - log.Fatal("failed to set fd-limit", - zap.Error(err), - ) - logFactory.Close() - return err - } - - // Track if sybil control is enforced - if !p.config.EnableStaking { - log.Warn("sybil control is not enforced", - zap.String("reason", "staking is disabled"), - ) - } - - // TODO move this to config - // SupportsNAT() for NoRouter is false. - // Which means we tried to perform a NAT activity but we were not successful. - if p.config.AttemptedNATTraversal && !p.config.Nat.SupportsNAT() { - log.Warn("UPnP or NAT-PMP router attach failed, you may not be listening publicly. " + - "Please confirm the settings in your router") - } - - mapper := nat.NewPortMapper(log, p.config.Nat) - - // Open staking port we want for NAT traversal to have the external port - // (config.IP.Port) to connect to our internal listening port - // (config.InternalStakingPort) which should be the same in most cases. - if p.config.IPPort.IPPort().Port != 0 { - mapper.Map( - "TCP", - p.config.IPPort.IPPort().Port, - p.config.IPPort.IPPort().Port, - stakingPortName, - p.config.IPPort, - p.config.IPResolutionFreq, - ) - } - - // Open the HTTP port iff the HTTP server is not listening on localhost - if p.config.HTTPHost != "127.0.0.1" && p.config.HTTPHost != "localhost" && p.config.HTTPPort != 0 { - // For NAT traversal we want to route from the external port - // (config.ExternalHTTPPort) to our internal port (config.HTTPPort) - mapper.Map( - "TCP", - p.config.HTTPPort, - p.config.HTTPPort, - httpPortName, - nil, - p.config.IPResolutionFreq, - ) - } - - // Regularly update our public IP. - // Note that if the node config said to not dynamically resolve and - // update our public IP, [p.config.IPUdater] is a no-op implementation. - go p.config.IPUpdater.Dispatch(log) - - if err := p.node.Initialize(&p.config, log, logFactory); err != nil { - log.Fatal("error initializing node", - zap.Error(err), - ) - mapper.UnmapAllPorts() - p.config.IPUpdater.Stop() - log.Stop() - logFactory.Close() - return err - } - - // [p.ExitCode] will block until [p.exitWG.Done] is called - p.exitWG.Add(1) - go func() { - defer func() { - if r := recover(); r != nil { - fmt.Println("caught panic", r) - } - log.Stop() - logFactory.Close() - p.exitWG.Done() - }() - defer func() { - mapper.UnmapAllPorts() - p.config.IPUpdater.Stop() - - // If [p.node.Dispatch()] panics, then we should log the panic and - // then re-raise the panic. This is why the above defer is broken - // into two parts. - log.StopOnPanic() - }() - - err := p.node.Dispatch() - log.Debug("dispatch returned", - zap.Error(err), - ) - }() - return nil -} - -// Stop attempts to shutdown the currently running node. This function will -// return immediately. -func (p *process) Stop() error { - p.node.Shutdown(0) - return nil -} - -// ExitCode returns the exit code that the node is reporting. This function -// blocks until the node has been shut down. -func (p *process) ExitCode() (int, error) { - p.exitWG.Wait() - return p.node.ExitCode(), nil -} diff --git a/avalanchego/app/runner/config.go b/avalanchego/app/runner/config.go deleted file mode 100644 index 14197ff2..00000000 --- a/avalanchego/app/runner/config.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package runner - -type Config struct { - // If true, displays version and exits during startup - DisplayVersionAndExit bool - - // Path to the build directory - BuildDir string - - // If true, run as a plugin - PluginMode bool -} diff --git a/avalanchego/app/runner/runner.go b/avalanchego/app/runner/runner.go deleted file mode 100644 index b2589eb1..00000000 --- a/avalanchego/app/runner/runner.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package runner - -import ( - "fmt" - "os" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin" - - "golang.org/x/term" - - "github.com/ava-labs/avalanchego/app" - "github.com/ava-labs/avalanchego/app/process" - "github.com/ava-labs/avalanchego/node" - "github.com/ava-labs/avalanchego/vms/rpcchainvm/grpcutils" - - appplugin "github.com/ava-labs/avalanchego/app/plugin" -) - -// Run an AvalancheGo node. -// If specified in the config, serves a hashicorp plugin that can be consumed by -// the daemon (see avalanchego/main). -func Run(config Config, nodeConfig node.Config) { - nodeApp := process.NewApp(nodeConfig) // Create node wrapper - if config.PluginMode { // Serve as a plugin - plugin.Serve(&plugin.ServeConfig{ - HandshakeConfig: appplugin.Handshake, - Plugins: map[string]plugin.Plugin{ - appplugin.Name: appplugin.New(nodeApp), - }, - GRPCServer: grpcutils.NewDefaultServer, // A non-nil value here enables gRPC serving for this plugin - Logger: hclog.New(&hclog.LoggerOptions{ - Level: hclog.Error, - }), - }) - return - } - - if term.IsTerminal(int(os.Stdout.Fd())) { - fmt.Println(process.Header) - } - - exitCode := app.Run(nodeApp) - os.Exit(exitCode) -} diff --git a/avalanchego/buf.lock b/avalanchego/buf.lock deleted file mode 100644 index c8628dea..00000000 --- a/avalanchego/buf.lock +++ /dev/null @@ -1,10 +0,0 @@ -# Generated by buf. DO NOT EDIT. -version: v1 -deps: - - remote: buf.build - owner: prometheus - repository: client-model - branch: main - commit: 1d56a02d481a412a83b3c4984eb90c2e - digest: b1-qpEBhqZ9HZsskMFK3hfVMAA2b-XZmKb2WkhyhFej7Gs= - create_time: 2022-01-12T14:51:04.903729Z diff --git a/avalanchego/cache/cache.go b/avalanchego/cache/cache.go index f69dca5e..ea6c4168 100644 --- a/avalanchego/cache/cache.go +++ b/avalanchego/cache/cache.go @@ -1,39 +1,36 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cache -// Cacher acts as a best effort key value store. Keys must be comparable, as -// defined by https://golang.org/ref/spec#Comparison_operators. -type Cacher interface { - // Put inserts an element into the cache. If spaced is required, elements will +// Cacher acts as a best effort key value store. +type Cacher[K comparable, V any] interface { + // Put inserts an element into the cache. If space is required, elements will // be evicted. - Put(key, value interface{}) + Put(key K, value V) // Get returns the entry in the cache with the key specified, if no value // exists, false is returned. - Get(key interface{}) (interface{}, bool) + Get(key K) (V, bool) // Evict removes the specified entry from the cache - Evict(key interface{}) + Evict(key K) // Flush removes all entries from the cache Flush() } // Evictable allows the object to be notified when it is evicted -type Evictable interface { - // Key must return a comparable value as defined by - // https://golang.org/ref/spec#Comparison_operators. - Key() interface{} +type Evictable[K comparable] interface { + Key() K Evict() } // Deduplicator acts as a best effort deduplication service -type Deduplicator interface { +type Deduplicator[K comparable, V Evictable[K]] interface { // Deduplicate returns either the provided value, or a previously provided // value with the same ID that hasn't yet been evicted - Deduplicate(Evictable) Evictable + Deduplicate(V) V // Flush removes all entries from the cache Flush() diff --git a/avalanchego/cache/lru_cache.go b/avalanchego/cache/lru_cache.go index 460da6d2..07c0d9b7 100644 --- a/avalanchego/cache/lru_cache.go +++ b/avalanchego/cache/lru_cache.go @@ -1,136 +1,99 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cache import ( - "container/list" "sync" -) - -const minCacheSize = 32 -var _ Cacher = &LRU{} + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/linkedhashmap" +) -type entry struct { - Key interface{} - Value interface{} -} +var _ Cacher[struct{}, struct{}] = (*LRU[struct{}, struct{}])(nil) // LRU is a key value store with bounded size. If the size is attempted to be // exceeded, then an element is removed from the cache before the insertion is // done, based on evicting the least recently used value. -type LRU struct { - lock sync.Mutex - entryMap map[interface{}]*list.Element - entryList *list.List - Size int +type LRU[K comparable, V any] struct { + lock sync.Mutex + elements linkedhashmap.LinkedHashmap[K, V] + // If set to < 0, will be set internally to 1. + Size int } -func (c *LRU) Put(key, value interface{}) { +func (c *LRU[K, V]) Put(key K, value V) { c.lock.Lock() defer c.lock.Unlock() c.put(key, value) } -func (c *LRU) Get(key interface{}) (interface{}, bool) { +func (c *LRU[K, V]) Get(key K) (V, bool) { c.lock.Lock() defer c.lock.Unlock() return c.get(key) } -func (c *LRU) Evict(key interface{}) { +func (c *LRU[K, _]) Evict(key K) { c.lock.Lock() defer c.lock.Unlock() c.evict(key) } -func (c *LRU) Flush() { +func (c *LRU[_, _]) Flush() { c.lock.Lock() defer c.lock.Unlock() c.flush() } -func (c *LRU) init() { - if c.entryMap == nil { - c.entryMap = make(map[interface{}]*list.Element, minCacheSize) - } - if c.entryList == nil { - c.entryList = list.New() - } - if c.Size <= 0 { - c.Size = 1 - } -} - -func (c *LRU) resize() { - for c.entryList.Len() > c.Size { - e := c.entryList.Front() - c.entryList.Remove(e) - - val := e.Value.(*entry) - delete(c.entryMap, val.Key) - } -} - -func (c *LRU) put(key, value interface{}) { - c.init() +func (c *LRU[K, V]) put(key K, value V) { c.resize() - if e, ok := c.entryMap[key]; !ok { - if c.entryList.Len() >= c.Size { - e = c.entryList.Front() - c.entryList.MoveToBack(e) - - val := e.Value.(*entry) - delete(c.entryMap, val.Key) - val.Key = key - val.Value = value - } else { - e = c.entryList.PushBack(&entry{ - Key: key, - Value: value, - }) - } - c.entryMap[key] = e - } else { - c.entryList.MoveToBack(e) - - val := e.Value.(*entry) - val.Value = value + if c.elements.Len() == c.Size { + oldestKey, _, _ := c.elements.Oldest() + c.elements.Delete(oldestKey) } + c.elements.Put(key, value) } -func (c *LRU) get(key interface{}) (interface{}, bool) { - c.init() +func (c *LRU[K, V]) get(key K) (V, bool) { c.resize() - if e, ok := c.entryMap[key]; ok { - c.entryList.MoveToBack(e) - - val := e.Value.(*entry) - return val.Value, true + val, ok := c.elements.Get(key) + if !ok { + return utils.Zero[V](), false } - return struct{}{}, false + c.elements.Put(key, val) // Mark [k] as MRU. + return val, true } -func (c *LRU) evict(key interface{}) { - c.init() +func (c *LRU[K, _]) evict(key K) { c.resize() - if e, ok := c.entryMap[key]; ok { - c.entryList.Remove(e) - delete(c.entryMap, key) - } + c.elements.Delete(key) } -func (c *LRU) flush() { - c.init() +func (c *LRU[K, V]) flush() { + c.elements = linkedhashmap.New[K, V]() +} - c.entryMap = make(map[interface{}]*list.Element, minCacheSize) - c.entryList = list.New() +// Initializes [c.elements] if it's nil. +// Sets [c.size] to 1 if it's <= 0. +// Removes oldest elements to make number of elements +// in the cache == [c.size] if necessary. +func (c *LRU[K, V]) resize() { + if c.elements == nil { + c.elements = linkedhashmap.New[K, V]() + } + if c.Size <= 0 { + c.Size = 1 + } + for c.elements.Len() > c.Size { + oldestKey, _, _ := c.elements.Oldest() + c.elements.Delete(oldestKey) + } } diff --git a/avalanchego/cache/lru_cache_benchmark_test.go b/avalanchego/cache/lru_cache_benchmark_test.go index 20c45e62..73acf90b 100644 --- a/avalanchego/cache/lru_cache_benchmark_test.go +++ b/avalanchego/cache/lru_cache_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cache @@ -12,7 +12,7 @@ import ( func BenchmarkLRUCachePutSmall(b *testing.B) { smallLen := 5 - cache := &LRU{Size: smallLen} + cache := &LRU[ids.ID, int]{Size: smallLen} for n := 0; n < b.N; n++ { for i := 0; i < smallLen; i++ { var id ids.ID @@ -29,7 +29,7 @@ func BenchmarkLRUCachePutSmall(b *testing.B) { func BenchmarkLRUCachePutMedium(b *testing.B) { mediumLen := 250 - cache := &LRU{Size: mediumLen} + cache := &LRU[ids.ID, int]{Size: mediumLen} for n := 0; n < b.N; n++ { for i := 0; i < mediumLen; i++ { var id ids.ID @@ -46,7 +46,7 @@ func BenchmarkLRUCachePutMedium(b *testing.B) { func BenchmarkLRUCachePutLarge(b *testing.B) { largeLen := 10000 - cache := &LRU{Size: largeLen} + cache := &LRU[ids.ID, int]{Size: largeLen} for n := 0; n < b.N; n++ { for i := 0; i < largeLen; i++ { var id ids.ID diff --git a/avalanchego/cache/lru_cache_test.go b/avalanchego/cache/lru_cache_test.go index 0228c42a..aaf4eb34 100644 --- a/avalanchego/cache/lru_cache_test.go +++ b/avalanchego/cache/lru_cache_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cache @@ -10,19 +10,19 @@ import ( ) func TestLRU(t *testing.T) { - cache := &LRU{Size: 1} + cache := &LRU[ids.ID, int]{Size: 1} TestBasic(t, cache) } func TestLRUEviction(t *testing.T) { - cache := &LRU{Size: 2} + cache := &LRU[ids.ID, int]{Size: 2} TestEviction(t, cache) } func TestLRUResize(t *testing.T) { - cache := LRU{Size: 2} + cache := LRU[ids.ID, int]{Size: 2} id1 := ids.ID{1} id2 := ids.ID{2} @@ -41,6 +41,7 @@ func TestLRUResize(t *testing.T) { } cache.Size = 1 + // id1 evicted if _, found := cache.Get(id1); found { t.Fatalf("Retrieve value when none exists") @@ -51,6 +52,7 @@ func TestLRUResize(t *testing.T) { } cache.Size = 0 + // We reset the size to 1 in resize if _, found := cache.Get(id1); found { t.Fatalf("Retrieve value when none exists") diff --git a/avalanchego/cache/metercacher/cache.go b/avalanchego/cache/metercacher/cache.go index c8b809c3..56719312 100644 --- a/avalanchego/cache/metercacher/cache.go +++ b/avalanchego/cache/metercacher/cache.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metercacher @@ -10,32 +10,32 @@ import ( "github.com/ava-labs/avalanchego/utils/timer/mockable" ) -var _ cache.Cacher = &Cache{} +var _ cache.Cacher[struct{}, struct{}] = (*Cache[struct{}, struct{}])(nil) -type Cache struct { +type Cache[K comparable, V any] struct { metrics - cache.Cacher + cache.Cacher[K, V] clock mockable.Clock } -func New( +func New[K comparable, V any]( namespace string, registerer prometheus.Registerer, - cache cache.Cacher, -) (cache.Cacher, error) { - meterCache := &Cache{Cacher: cache} + cache cache.Cacher[K, V], +) (cache.Cacher[K, V], error) { + meterCache := &Cache[K, V]{Cacher: cache} return meterCache, meterCache.metrics.Initialize(namespace, registerer) } -func (c *Cache) Put(key, value interface{}) { +func (c *Cache[K, V]) Put(key K, value V) { start := c.clock.Time() c.Cacher.Put(key, value) end := c.clock.Time() c.put.Observe(float64(end.Sub(start))) } -func (c *Cache) Get(key interface{}) (interface{}, bool) { +func (c *Cache[K, V]) Get(key K) (V, bool) { start := c.clock.Time() value, has := c.Cacher.Get(key) end := c.clock.Time() diff --git a/avalanchego/cache/metercacher/cache_test.go b/avalanchego/cache/metercacher/cache_test.go index 64c547e1..bb40fec0 100644 --- a/avalanchego/cache/metercacher/cache_test.go +++ b/avalanchego/cache/metercacher/cache_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metercacher @@ -9,12 +9,13 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/ids" ) func TestInterface(t *testing.T) { for _, test := range cache.CacherTests { - cache := &cache.LRU{Size: test.Size} - c, err := New("", prometheus.NewRegistry(), cache) + cache := &cache.LRU[ids.ID, int]{Size: test.Size} + c, err := New[ids.ID, int]("", prometheus.NewRegistry(), cache) if err != nil { t.Fatal(err) } diff --git a/avalanchego/cache/metercacher/metrics.go b/avalanchego/cache/metercacher/metrics.go index 6353615e..c9ae019f 100644 --- a/avalanchego/cache/metercacher/metrics.go +++ b/avalanchego/cache/metercacher/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metercacher diff --git a/avalanchego/cache/test_cacher.go b/avalanchego/cache/test_cacher.go index 32c8e795..0fa81668 100644 --- a/avalanchego/cache/test_cacher.go +++ b/avalanchego/cache/test_cacher.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cache @@ -12,13 +12,13 @@ import ( // CacherTests is a list of all Cacher tests var CacherTests = []struct { Size int - Func func(t *testing.T, c Cacher) + Func func(t *testing.T, c Cacher[ids.ID, int]) }{ {Size: 1, Func: TestBasic}, {Size: 2, Func: TestEviction}, } -func TestBasic(t *testing.T, cache Cacher) { +func TestBasic(t *testing.T, cache Cacher[ids.ID, int]) { id1 := ids.ID{1} if _, found := cache.Get(id1); found { t.Fatalf("Retrieved value when none exists") @@ -60,7 +60,7 @@ func TestBasic(t *testing.T, cache Cacher) { } } -func TestEviction(t *testing.T, cache Cacher) { +func TestEviction(t *testing.T, cache Cacher[ids.ID, int]) { id1 := ids.ID{1} id2 := ids.ID{2} id3 := ids.ID{3} diff --git a/avalanchego/cache/unique_cache.go b/avalanchego/cache/unique_cache.go index 59a26ffe..24052d79 100644 --- a/avalanchego/cache/unique_cache.go +++ b/avalanchego/cache/unique_cache.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cache @@ -8,33 +8,33 @@ import ( "sync" ) -var _ Deduplicator = &EvictableLRU{} +var _ Deduplicator[struct{}, Evictable[struct{}]] = (*EvictableLRU[struct{}, Evictable[struct{}]])(nil) // EvictableLRU is an LRU cache that notifies the objects when they are evicted. -type EvictableLRU struct { +type EvictableLRU[K comparable, _ Evictable[K]] struct { lock sync.Mutex - entryMap map[interface{}]*list.Element + entryMap map[K]*list.Element entryList *list.List Size int } -func (c *EvictableLRU) Deduplicate(value Evictable) Evictable { +func (c *EvictableLRU[_, V]) Deduplicate(value V) V { c.lock.Lock() defer c.lock.Unlock() return c.deduplicate(value) } -func (c *EvictableLRU) Flush() { +func (c *EvictableLRU[_, _]) Flush() { c.lock.Lock() defer c.lock.Unlock() c.flush() } -func (c *EvictableLRU) init() { +func (c *EvictableLRU[K, _]) init() { if c.entryMap == nil { - c.entryMap = make(map[interface{}]*list.Element) + c.entryMap = make(map[K]*list.Element) } if c.entryList == nil { c.entryList = list.New() @@ -44,18 +44,18 @@ func (c *EvictableLRU) init() { } } -func (c *EvictableLRU) resize() { +func (c *EvictableLRU[_, V]) resize() { for c.entryList.Len() > c.Size { e := c.entryList.Front() c.entryList.Remove(e) - val := e.Value.(Evictable) + val := e.Value.(V) delete(c.entryMap, val.Key()) val.Evict() } } -func (c *EvictableLRU) deduplicate(value Evictable) Evictable { +func (c *EvictableLRU[_, V]) deduplicate(value V) V { c.init() c.resize() @@ -65,7 +65,7 @@ func (c *EvictableLRU) deduplicate(value Evictable) Evictable { e = c.entryList.Front() c.entryList.MoveToBack(e) - val := e.Value.(Evictable) + val := e.Value.(V) delete(c.entryMap, val.Key()) val.Evict() @@ -77,13 +77,13 @@ func (c *EvictableLRU) deduplicate(value Evictable) Evictable { } else { c.entryList.MoveToBack(e) - val := e.Value.(Evictable) + val := e.Value.(V) value = val } return value } -func (c *EvictableLRU) flush() { +func (c *EvictableLRU[_, _]) flush() { c.init() size := c.Size diff --git a/avalanchego/cache/unique_cache_test.go b/avalanchego/cache/unique_cache_test.go index 790f6c38..0094a47a 100644 --- a/avalanchego/cache/unique_cache_test.go +++ b/avalanchego/cache/unique_cache_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cache @@ -9,30 +9,35 @@ import ( "github.com/ava-labs/avalanchego/ids" ) -type evictable struct { - id ids.ID +type evictable[K comparable] struct { + id K evicted int } -func (e *evictable) Key() interface{} { return e.id } -func (e *evictable) Evict() { e.evicted++ } +func (e *evictable[K]) Key() K { + return e.id +} + +func (e *evictable[_]) Evict() { + e.evicted++ +} func TestEvictableLRU(t *testing.T) { - cache := EvictableLRU{} + cache := EvictableLRU[ids.ID, *evictable[ids.ID]]{} - expectedValue1 := &evictable{id: ids.ID{1}} - if returnedValue := cache.Deduplicate(expectedValue1).(*evictable); returnedValue != expectedValue1 { + expectedValue1 := &evictable[ids.ID]{id: ids.ID{1}} + if returnedValue := cache.Deduplicate(expectedValue1); returnedValue != expectedValue1 { t.Fatalf("Returned unknown value") } else if expectedValue1.evicted != 0 { t.Fatalf("Value was evicted unexpectedly") - } else if returnedValue := cache.Deduplicate(expectedValue1).(*evictable); returnedValue != expectedValue1 { + } else if returnedValue := cache.Deduplicate(expectedValue1); returnedValue != expectedValue1 { t.Fatalf("Returned unknown value") } else if expectedValue1.evicted != 0 { t.Fatalf("Value was evicted unexpectedly") } - expectedValue2 := &evictable{id: ids.ID{2}} - returnedValue := cache.Deduplicate(expectedValue2).(*evictable) + expectedValue2 := &evictable[ids.ID]{id: ids.ID{2}} + returnedValue := cache.Deduplicate(expectedValue2) switch { case returnedValue != expectedValue2: t.Fatalf("Returned unknown value") @@ -44,8 +49,8 @@ func TestEvictableLRU(t *testing.T) { cache.Size = 2 - expectedValue3 := &evictable{id: ids.ID{2}} - returnedValue = cache.Deduplicate(expectedValue3).(*evictable) + expectedValue3 := &evictable[ids.ID]{id: ids.ID{2}} + returnedValue = cache.Deduplicate(expectedValue3) switch { case returnedValue != expectedValue2: t.Fatalf("Returned unknown value") diff --git a/avalanchego/chains/atomic/codec.go b/avalanchego/chains/atomic/codec.go index 0138d2f7..bc2e93c2 100644 --- a/avalanchego/chains/atomic/codec.go +++ b/avalanchego/chains/atomic/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic diff --git a/avalanchego/chains/atomic/gsharedmemory/filtered_batch.go b/avalanchego/chains/atomic/gsharedmemory/filtered_batch.go index e04564c4..df63e8df 100644 --- a/avalanchego/chains/atomic/gsharedmemory/filtered_batch.go +++ b/avalanchego/chains/atomic/gsharedmemory/filtered_batch.go @@ -1,20 +1,22 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gsharedmemory import ( + "github.com/ava-labs/avalanchego/utils/set" + sharedmemorypb "github.com/ava-labs/avalanchego/proto/pb/sharedmemory" ) type filteredBatch struct { writes map[string][]byte - deletes map[string]struct{} + deletes set.Set[string] } func (b *filteredBatch) Put(key []byte, value []byte) error { keyStr := string(key) - delete(b.deletes, keyStr) + b.deletes.Remove(keyStr) b.writes[keyStr] = value return nil } @@ -22,7 +24,7 @@ func (b *filteredBatch) Put(key []byte, value []byte) error { func (b *filteredBatch) Delete(key []byte) error { keyStr := string(key) delete(b.writes, keyStr) - b.deletes[keyStr] = struct{}{} + b.deletes.Add(keyStr) return nil } diff --git a/avalanchego/chains/atomic/gsharedmemory/shared_memory_client.go b/avalanchego/chains/atomic/gsharedmemory/shared_memory_client.go index 40e419fa..649503a0 100644 --- a/avalanchego/chains/atomic/gsharedmemory/shared_memory_client.go +++ b/avalanchego/chains/atomic/gsharedmemory/shared_memory_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gsharedmemory @@ -6,31 +6,18 @@ package gsharedmemory import ( "context" - stdatomic "sync/atomic" - "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/units" sharedmemorypb "github.com/ava-labs/avalanchego/proto/pb/sharedmemory" ) -const ( - maxBatchSize = 128 * units.KiB - - // baseElementSize is an approximation of the protobuf encoding overhead per - // element - baseElementSize = 8 // bytes -) - -var _ atomic.SharedMemory = &Client{} +var _ atomic.SharedMemory = (*Client)(nil) // Client is atomic.SharedMemory that talks over RPC. type Client struct { client sharedmemorypb.SharedMemoryClient - - uniqueID int64 } // NewClient returns shared memory connected to remote shared memory @@ -39,49 +26,14 @@ func NewClient(client sharedmemorypb.SharedMemoryClient) *Client { } func (c *Client) Get(peerChainID ids.ID, keys [][]byte) ([][]byte, error) { - req := &sharedmemorypb.GetRequest{ + resp, err := c.client.Get(context.Background(), &sharedmemorypb.GetRequest{ PeerChainId: peerChainID[:], - Id: stdatomic.AddInt64(&c.uniqueID, 1), - Continues: true, - } - - currentSize := 0 - prevIndex := 0 - for i, key := range keys { - sizeChange := baseElementSize + len(key) - if newSize := currentSize + sizeChange; newSize > maxBatchSize { - _, err := c.client.Get(context.Background(), req) - if err != nil { - return nil, err - } - - currentSize = 0 - prevIndex = i - req.PeerChainId = nil - } - currentSize += sizeChange - - req.Keys = keys[prevIndex : i+1] - } - - req.Continues = false - resp, err := c.client.Get(context.Background(), req) + Keys: keys, + }) if err != nil { return nil, err } - - values := resp.Values - - req.PeerChainId = nil - req.Keys = nil - for resp.Continues { - resp, err = c.client.Get(context.Background(), req) - if err != nil { - return nil, err - } - values = append(values, resp.Values...) - } - return values, nil + return resp.Values, nil } func (c *Client) Indexed( @@ -96,213 +48,55 @@ func (c *Client) Indexed( []byte, error, ) { - req := &sharedmemorypb.IndexedRequest{ + resp, err := c.client.Indexed(context.Background(), &sharedmemorypb.IndexedRequest{ PeerChainId: peerChainID[:], + Traits: traits, StartTrait: startTrait, StartKey: startKey, Limit: int32(limit), - Id: stdatomic.AddInt64(&c.uniqueID, 1), - Continues: true, - } - - currentSize := 0 - prevIndex := 0 - for i, trait := range traits { - sizeChange := baseElementSize + len(trait) - if newSize := currentSize + sizeChange; newSize > maxBatchSize { - _, err := c.client.Indexed(context.Background(), req) - if err != nil { - return nil, nil, nil, err - } - - currentSize = 0 - prevIndex = i - req.PeerChainId = nil - req.StartTrait = nil - req.StartKey = nil - req.Limit = 0 - } - currentSize += sizeChange - - req.Traits = traits[prevIndex : i+1] - } - - req.Continues = false - resp, err := c.client.Indexed(context.Background(), req) + }) if err != nil { return nil, nil, nil, err } - lastTrait := resp.LastTrait - lastKey := resp.LastKey - values := resp.Values - - req.PeerChainId = nil - req.Traits = nil - req.StartTrait = nil - req.StartKey = nil - req.Limit = 0 - for resp.Continues { - resp, err = c.client.Indexed(context.Background(), req) - if err != nil { - return nil, nil, nil, err - } - values = append(values, resp.Values...) - } - return values, lastTrait, lastKey, nil + return resp.Values, resp.LastTrait, resp.LastKey, nil } -func (c *Client) Apply(requests map[ids.ID]*atomic.Requests, batch ...database.Batch) error { +func (c *Client) Apply(requests map[ids.ID]*atomic.Requests, batches ...database.Batch) error { req := &sharedmemorypb.ApplyRequest{ - Continues: true, - Id: stdatomic.AddInt64(&c.uniqueID, 1), + Requests: make([]*sharedmemorypb.AtomicRequest, 0, len(requests)), + Batches: make([]*sharedmemorypb.Batch, len(batches)), } - - currentSize := 0 for key, value := range requests { key := key chainReq := &sharedmemorypb.AtomicRequest{ - PeerChainId: key[:], + RemoveRequests: value.RemoveRequests, + PutRequests: make([]*sharedmemorypb.Element, len(value.PutRequests)), + PeerChainId: key[:], } - req.Requests = append(req.Requests, chainReq) - - for _, v := range value.PutRequests { - sizeChange := baseElementSize + len(v.Key) + len(v.Value) - for _, trait := range v.Traits { - sizeChange += len(trait) - } - - if newSize := sizeChange + currentSize; newSize > maxBatchSize { - currentSize = 0 - - if _, err := c.client.Apply(context.Background(), req); err != nil { - return err - } - - chainReq = &sharedmemorypb.AtomicRequest{ - PeerChainId: key[:], - } - req.Requests = []*sharedmemorypb.AtomicRequest{chainReq} - } - - currentSize += sizeChange - chainReq.PutRequests = append(chainReq.PutRequests, &sharedmemorypb.Element{ + for i, v := range value.PutRequests { + chainReq.PutRequests[i] = &sharedmemorypb.Element{ Key: v.Key, Value: v.Value, Traits: v.Traits, - }) - } - - for _, v := range value.RemoveRequests { - sizeChange := baseElementSize + len(v) - if newSize := sizeChange + currentSize; newSize > maxBatchSize { - currentSize = 0 - - if _, err := c.client.Apply(context.Background(), req); err != nil { - return err - } - - chainReq = &sharedmemorypb.AtomicRequest{ - PeerChainId: key[:], - } - req.Requests = []*sharedmemorypb.AtomicRequest{chainReq} } - - currentSize += sizeChange - chainReq.RemoveRequests = append(chainReq.RemoveRequests, v) - } - } - - batchGroups, err := c.makeBatches(batch, currentSize) - if err != nil { - return err - } - - for i, batches := range batchGroups { - req.Batches = batches - req.Continues = i < len(batchGroups)-1 - if _, err := c.client.Apply(context.Background(), req); err != nil { - return err - } - req.Requests = nil - } - - if len(batchGroups) == 0 { - req.Continues = false - if _, err := c.client.Apply(context.Background(), req); err != nil { - return err } + req.Requests = append(req.Requests, chainReq) } - return nil -} - -func (c *Client) makeBatches(rawBatches []database.Batch, currentSize int) ([][]*sharedmemorypb.Batch, error) { - batchGroups := [][]*sharedmemorypb.Batch(nil) - currentBatchGroup := []*sharedmemorypb.Batch(nil) - currentBatch := &sharedmemorypb.Batch{ - Id: stdatomic.AddInt64(&c.uniqueID, 1), - } - for _, batch := range rawBatches { + for i, batch := range batches { batch := batch.Inner() fb := filteredBatch{ - writes: make(map[string][]byte), - deletes: make(map[string]struct{}), + writes: make(map[string][]byte), } if err := batch.Replay(&fb); err != nil { - return nil, err - } - - puts := fb.PutRequests() - for _, p := range puts { - sizeChange := baseElementSize + len(p.Key) + len(p.Value) - if newSize := currentSize + sizeChange; newSize > maxBatchSize { - if len(currentBatch.Deletes)+len(currentBatch.Puts) > 0 { - currentBatchGroup = append(currentBatchGroup, currentBatch) - } - if len(currentBatchGroup) > 0 { - batchGroups = append(batchGroups, currentBatchGroup) - } - - currentSize = 0 - currentBatchGroup = nil - currentBatch = &sharedmemorypb.Batch{ - Id: currentBatch.Id, - } - } - currentSize += sizeChange - currentBatch.Puts = append(currentBatch.Puts, p) - } - - deletes := fb.DeleteRequests() - for _, d := range deletes { - sizeChange := baseElementSize + len(d.Key) - if newSize := currentSize + sizeChange; newSize > maxBatchSize { - if len(currentBatch.Deletes)+len(currentBatch.Puts) > 0 { - currentBatchGroup = append(currentBatchGroup, currentBatch) - } - if len(currentBatchGroup) > 0 { - batchGroups = append(batchGroups, currentBatchGroup) - } - - currentSize = 0 - currentBatchGroup = nil - currentBatch = &sharedmemorypb.Batch{ - Id: currentBatch.Id, - } - } - currentSize += sizeChange - currentBatch.Deletes = append(currentBatch.Deletes, d) - } - - if len(currentBatch.Deletes)+len(currentBatch.Puts) > 0 { - currentBatchGroup = append(currentBatchGroup, currentBatch) + return err } - currentBatch = &sharedmemorypb.Batch{ - Id: stdatomic.AddInt64(&c.uniqueID, 1), + req.Batches[i] = &sharedmemorypb.Batch{ + Puts: fb.PutRequests(), + Deletes: fb.DeleteRequests(), } } - if len(currentBatchGroup) > 0 { - batchGroups = append(batchGroups, currentBatchGroup) - } - return batchGroups, nil + + _, err := c.client.Apply(context.Background(), req) + return err } diff --git a/avalanchego/chains/atomic/gsharedmemory/shared_memory_server.go b/avalanchego/chains/atomic/gsharedmemory/shared_memory_server.go index 0725e7e6..3e2d0d38 100644 --- a/avalanchego/chains/atomic/gsharedmemory/shared_memory_server.go +++ b/avalanchego/chains/atomic/gsharedmemory/shared_memory_server.go @@ -1,11 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gsharedmemory import ( "context" - "sync" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" @@ -14,293 +13,100 @@ import ( sharedmemorypb "github.com/ava-labs/avalanchego/proto/pb/sharedmemory" ) -var _ sharedmemorypb.SharedMemoryServer = &Server{} +var _ sharedmemorypb.SharedMemoryServer = (*Server)(nil) // Server is shared memory that is managed over RPC. type Server struct { sharedmemorypb.UnsafeSharedMemoryServer sm atomic.SharedMemory db database.Database - - getsLock sync.Mutex - gets map[int64]*getRequest - - indexedLock sync.Mutex - indexed map[int64]*indexedRequest - - applyLock sync.Mutex - apply map[int64]*applyRequest } // NewServer returns shared memory connected to remote shared memory func NewServer(sm atomic.SharedMemory, db database.Database) *Server { return &Server{ - sm: sm, - db: db, - gets: make(map[int64]*getRequest), - indexed: make(map[int64]*indexedRequest), - apply: make(map[int64]*applyRequest), + sm: sm, + db: db, } } -type getRequest struct { - peerChainID ids.ID - keys [][]byte - - executed bool - remainingValues [][]byte -} - func (s *Server) Get( _ context.Context, req *sharedmemorypb.GetRequest, ) (*sharedmemorypb.GetResponse, error) { - s.getsLock.Lock() - defer s.getsLock.Unlock() - - get, exists := s.gets[req.Id] - if !exists { - peerChainID, err := ids.ToID(req.PeerChainId) - if err != nil { - return nil, err - } - - get = &getRequest{ - peerChainID: peerChainID, - keys: make([][]byte, 0, len(req.Keys)), - } - } - - get.keys = append(get.keys, req.Keys...) - - if req.Continues { - s.gets[req.Id] = get - return &sharedmemorypb.GetResponse{}, nil - } - - if !get.executed { - values, err := s.sm.Get(get.peerChainID, get.keys) - if err != nil { - delete(s.gets, req.Id) - return nil, err - } - - get.executed = true - get.remainingValues = values - } - - currentSize := 0 - resp := &sharedmemorypb.GetResponse{} - for i, value := range get.remainingValues { - sizeChange := baseElementSize + len(value) - if newSize := currentSize + sizeChange; newSize > maxBatchSize && i > 0 { - break - } - currentSize += sizeChange - - resp.Values = append(resp.Values, value) - } - - get.remainingValues = get.remainingValues[len(resp.Values):] - resp.Continues = len(get.remainingValues) > 0 - - if resp.Continues { - s.gets[req.Id] = get - } else { - delete(s.gets, req.Id) + peerChainID, err := ids.ToID(req.PeerChainId) + if err != nil { + return nil, err } - return resp, nil -} -type indexedRequest struct { - peerChainID ids.ID - traits [][]byte - startTrait []byte - startKey []byte - limit int - - executed bool - remainingValues [][]byte - lastTrait []byte - lastKey []byte + values, err := s.sm.Get(peerChainID, req.Keys) + return &sharedmemorypb.GetResponse{ + Values: values, + }, err } func (s *Server) Indexed( _ context.Context, req *sharedmemorypb.IndexedRequest, ) (*sharedmemorypb.IndexedResponse, error) { - s.indexedLock.Lock() - defer s.indexedLock.Unlock() - - indexed, exists := s.indexed[req.Id] - if !exists { - peerChainID, err := ids.ToID(req.PeerChainId) - if err != nil { - return nil, err - } - - indexed = &indexedRequest{ - peerChainID: peerChainID, - traits: make([][]byte, 0, len(req.Traits)), - startTrait: req.StartTrait, - startKey: req.StartKey, - limit: int(req.Limit), - } - } - - indexed.traits = append(indexed.traits, req.Traits...) - - if req.Continues { - s.indexed[req.Id] = indexed - return &sharedmemorypb.IndexedResponse{}, nil - } - - if !indexed.executed { - values, lastTrait, lastKey, err := s.sm.Indexed( - indexed.peerChainID, - indexed.traits, - indexed.startTrait, - indexed.startKey, - indexed.limit, - ) - if err != nil { - delete(s.indexed, req.Id) - return nil, err - } - - indexed.executed = true - indexed.remainingValues = values - indexed.lastTrait = lastTrait - indexed.lastKey = lastKey - } - - currentSize := 0 - resp := &sharedmemorypb.IndexedResponse{ - LastTrait: indexed.lastTrait, - LastKey: indexed.lastKey, - } - for i, value := range indexed.remainingValues { - sizeChange := baseElementSize + len(value) - if newSize := currentSize + sizeChange; newSize > maxBatchSize && i > 0 { - break - } - currentSize += sizeChange - - resp.Values = append(resp.Values, value) - } - - indexed.remainingValues = indexed.remainingValues[len(resp.Values):] - resp.Continues = len(indexed.remainingValues) > 0 - - if resp.Continues { - indexed.lastTrait = nil - indexed.lastKey = nil - s.indexed[req.Id] = indexed - } else { - delete(s.indexed, req.Id) + peerChainID, err := ids.ToID(req.PeerChainId) + if err != nil { + return nil, err } - return resp, nil -} -type applyRequest struct { - requests map[ids.ID]*atomic.Requests - batches map[int64]database.Batch + values, lastTrait, lastKey, err := s.sm.Indexed( + peerChainID, + req.Traits, + req.StartTrait, + req.StartKey, + int(req.Limit), + ) + return &sharedmemorypb.IndexedResponse{ + Values: values, + LastTrait: lastTrait, + LastKey: lastKey, + }, err } func (s *Server) Apply( _ context.Context, req *sharedmemorypb.ApplyRequest, ) (*sharedmemorypb.ApplyResponse, error) { - s.applyLock.Lock() - defer s.applyLock.Unlock() - - apply, exists := s.apply[req.Id] - if !exists { - apply = &applyRequest{ - requests: make(map[ids.ID]*atomic.Requests), - batches: make(map[int64]database.Batch), - } - } - - if err := s.parseRequests(apply.requests, req.Requests); err != nil { - delete(s.apply, req.Id) - return nil, err - } - - if err := s.parseBatches(apply.batches, req.Batches); err != nil { - delete(s.apply, req.Id) - return nil, err - } - - if req.Continues { - s.apply[req.Id] = apply - return &sharedmemorypb.ApplyResponse{}, nil - } - - delete(s.apply, req.Id) - - batches := make([]database.Batch, len(apply.batches)) - i := 0 - for _, batch := range apply.batches { - batches[i] = batch - i++ - } - - return &sharedmemorypb.ApplyResponse{}, s.sm.Apply(apply.requests, batches...) -} - -func (s *Server) parseRequests( - requests map[ids.ID]*atomic.Requests, - rawRequests []*sharedmemorypb.AtomicRequest, -) error { - for _, value := range rawRequests { - peerChainID, err := ids.ToID(value.PeerChainId) + requests := make(map[ids.ID]*atomic.Requests, len(req.Requests)) + for _, request := range req.Requests { + peerChainID, err := ids.ToID(request.PeerChainId) if err != nil { - return err + return nil, err } - req, ok := requests[peerChainID] - if !ok { - req = &atomic.Requests{ - PutRequests: make([]*atomic.Element, 0, len(value.PutRequests)), - } - requests[peerChainID] = req + r := &atomic.Requests{ + RemoveRequests: request.RemoveRequests, + PutRequests: make([]*atomic.Element, len(request.PutRequests)), } - - req.RemoveRequests = append(req.RemoveRequests, value.RemoveRequests...) - for _, v := range value.PutRequests { - req.PutRequests = append(req.PutRequests, &atomic.Element{ - Key: v.Key, - Value: v.Value, - Traits: v.Traits, - }) + for i, put := range request.PutRequests { + r.PutRequests[i] = &atomic.Element{ + Key: put.Key, + Value: put.Value, + Traits: put.Traits, + } } + requests[peerChainID] = r } - return nil -} - -func (s *Server) parseBatches( - batches map[int64]database.Batch, - rawBatches []*sharedmemorypb.Batch, -) error { - for _, reqBatch := range rawBatches { - batch, exists := batches[reqBatch.Id] - if !exists { - batch = s.db.NewBatch() - batches[reqBatch.Id] = batch - } + batches := make([]database.Batch, len(req.Batches)) + for i, reqBatch := range req.Batches { + batch := s.db.NewBatch() for _, putReq := range reqBatch.Puts { if err := batch.Put(putReq.Key, putReq.Value); err != nil { - return err + return nil, err } } - for _, deleteReq := range reqBatch.Deletes { if err := batch.Delete(deleteReq.Key); err != nil { - return err + return nil, err } } + batches[i] = batch } - return nil + return &sharedmemorypb.ApplyResponse{}, s.sm.Apply(requests, batches...) } diff --git a/avalanchego/chains/atomic/gsharedmemory/shared_memory_test.go b/avalanchego/chains/atomic/gsharedmemory/shared_memory_test.go index 99aed9d8..715e0e43 100644 --- a/avalanchego/chains/atomic/gsharedmemory/shared_memory_test.go +++ b/avalanchego/chains/atomic/gsharedmemory/shared_memory_test.go @@ -1,34 +1,24 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gsharedmemory import ( - "context" "io" - "net" "testing" "github.com/stretchr/testify/require" - "google.golang.org/grpc" - "google.golang.org/grpc/test/bufconn" - "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/rpcchainvm/grpcutils" sharedmemorypb "github.com/ava-labs/avalanchego/proto/pb/sharedmemory" ) -const ( - bufSize = units.MiB -) - func TestInterface(t *testing.T) { require := require.New(t) @@ -56,27 +46,19 @@ func TestInterface(t *testing.T) { } func wrapSharedMemory(t *testing.T, sm atomic.SharedMemory, db database.Database) (atomic.SharedMemory, io.Closer) { - listener := bufconn.Listen(bufSize) - serverCloser := grpcutils.ServerCloser{} - - serverFunc := func(opts []grpc.ServerOption) *grpc.Server { - server := grpcutils.NewDefaultServer(opts) - sharedmemorypb.RegisterSharedMemoryServer(server, NewServer(sm, db)) - serverCloser.Add(server) - return server + listener, err := grpcutils.NewListener() + if err != nil { + t.Fatalf("Failed to create listener: %s", err) } + serverCloser := grpcutils.ServerCloser{} - go grpcutils.Serve(listener, serverFunc) + server := grpcutils.NewServer() + sharedmemorypb.RegisterSharedMemoryServer(server, NewServer(sm, db)) + serverCloser.Add(server) - dialer := grpc.WithContextDialer( - func(context.Context, string) (net.Conn, error) { - return listener.Dial() - }, - ) + go grpcutils.Serve(listener, server) - dopts := grpcutils.DefaultDialOptions - dopts = append(dopts, dialer) - conn, err := grpcutils.Dial("", dopts...) + conn, err := grpcutils.Dial(listener.Addr().String()) if err != nil { t.Fatalf("Failed to dial: %s", err) } diff --git a/avalanchego/chains/atomic/memory.go b/avalanchego/chains/atomic/memory.go index daa85920..a8aa703f 100644 --- a/avalanchego/chains/atomic/memory.go +++ b/avalanchego/chains/atomic/memory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic @@ -49,7 +49,7 @@ func (m *Memory) NewSharedMemory(chainID ids.ID) SharedMemory { // database // // Invariant: ReleaseSharedDatabase must be called after to free the database -// associated with [sharedID] +// associated with [sharedID] func (m *Memory) GetSharedDatabase(db database.Database, sharedID ids.ID) database.Database { lock := m.makeLock(sharedID) lock.Lock() @@ -59,9 +59,8 @@ func (m *Memory) GetSharedDatabase(db database.Database, sharedID ids.ID) databa // ReleaseSharedDatabase unlocks the provided DB // // Note: ReleaseSharedDatabase must be called only after a corresponding call to -// GetSharedDatabase. -// If ReleaseSharedDatabase is called without a corresponding one-to-one -// call with GetSharedDatabase, it will panic. +// GetSharedDatabase. If ReleaseSharedDatabase is called without a corresponding +// one-to-one call with GetSharedDatabase, it will panic. func (m *Memory) ReleaseSharedDatabase(sharedID ids.ID) { lock := m.releaseLock(sharedID) lock.Unlock() @@ -102,7 +101,7 @@ func (m *Memory) releaseLock(sharedID ids.ID) *sync.Mutex { } // sharedID calculates the ID of the shared memory space -func (m *Memory) sharedID(id1, id2 ids.ID) ids.ID { +func sharedID(id1, id2 ids.ID) ids.ID { // Swap IDs locally to ensure id1 <= id2. if bytes.Compare(id1[:], id2[:]) == 1 { id1, id2 = id2, id1 diff --git a/avalanchego/chains/atomic/memory_test.go b/avalanchego/chains/atomic/memory_test.go index 2dc337d9..faf461f8 100644 --- a/avalanchego/chains/atomic/memory_test.go +++ b/avalanchego/chains/atomic/memory_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic @@ -15,11 +15,9 @@ var ( blockchainID1 = ids.Empty.Prefix(1) ) -func TestMemorySharedID(t *testing.T) { - m := NewMemory(memdb.New()) - - sharedID0 := m.sharedID(blockchainID0, blockchainID1) - sharedID1 := m.sharedID(blockchainID1, blockchainID0) +func TestSharedID(t *testing.T) { + sharedID0 := sharedID(blockchainID0, blockchainID1) + sharedID1 := sharedID(blockchainID1, blockchainID0) if sharedID0 != sharedID1 { t.Fatalf("SharedMemory.sharedID should be communitive") @@ -29,7 +27,7 @@ func TestMemorySharedID(t *testing.T) { func TestMemoryMakeReleaseLock(t *testing.T) { m := NewMemory(memdb.New()) - sharedID := m.sharedID(blockchainID0, blockchainID1) + sharedID := sharedID(blockchainID0, blockchainID1) lock0 := m.makeLock(sharedID) @@ -53,7 +51,7 @@ func TestMemoryMakeReleaseLock(t *testing.T) { func TestMemoryUnknownFree(t *testing.T) { m := NewMemory(memdb.New()) - sharedID := m.sharedID(blockchainID0, blockchainID1) + sharedID := sharedID(blockchainID0, blockchainID1) defer func() { if recover() == nil { diff --git a/avalanchego/chains/atomic/mock_shared_memory.go b/avalanchego/chains/atomic/mock_shared_memory.go index 691fa7e0..7f5d4b5a 100644 --- a/avalanchego/chains/atomic/mock_shared_memory.go +++ b/avalanchego/chains/atomic/mock_shared_memory.go @@ -1,3 +1,6 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/chains/atomic (interfaces: SharedMemory) diff --git a/avalanchego/chains/atomic/prefixes.go b/avalanchego/chains/atomic/prefixes.go index 95b2f5a7..08927384 100644 --- a/avalanchego/chains/atomic/prefixes.go +++ b/avalanchego/chains/atomic/prefixes.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic diff --git a/avalanchego/chains/atomic/shared_memory.go b/avalanchego/chains/atomic/shared_memory.go index e77f8c10..7b2f8a56 100644 --- a/avalanchego/chains/atomic/shared_memory.go +++ b/avalanchego/chains/atomic/shared_memory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic @@ -7,9 +7,10 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" ) -var _ SharedMemory = &sharedMemory{} +var _ SharedMemory = (*sharedMemory)(nil) type Requests struct { RemoveRequests [][]byte `serialize:"true"` @@ -62,7 +63,7 @@ type sharedMemory struct { } func (sm *sharedMemory) Get(peerChainID ids.ID, keys [][]byte) ([][]byte, error) { - sharedID := sm.m.sharedID(peerChainID, sm.thisChainID) + sharedID := sharedID(peerChainID, sm.thisChainID) db := sm.m.GetSharedDatabase(sm.m.db, sharedID) defer sm.m.ReleaseSharedDatabase(sharedID) @@ -88,7 +89,7 @@ func (sm *sharedMemory) Indexed( startKey []byte, limit int, ) ([][]byte, []byte, []byte, error) { - sharedID := sm.m.sharedID(peerChainID, sm.thisChainID) + sharedID := sharedID(peerChainID, sm.thisChainID) db := sm.m.GetSharedDatabase(sm.m.db, sharedID) defer sm.m.ReleaseSharedDatabase(sharedID) @@ -117,13 +118,13 @@ func (sm *sharedMemory) Apply(requests map[ids.ID]*Requests, batches ...database sharedIDs := make([]ids.ID, 0, len(requests)) sharedOperations := make(map[ids.ID]*Requests, len(requests)) for peerChainID, request := range requests { - sharedID := sm.m.sharedID(sm.thisChainID, peerChainID) + sharedID := sharedID(sm.thisChainID, peerChainID) sharedIDs = append(sharedIDs, sharedID) request.peerChainID = peerChainID sharedOperations[sharedID] = request } - ids.SortIDs(sharedIDs) + utils.Sort(sharedIDs) // Make sure all operations are committed atomically vdb := versiondb.New(sm.m.db) diff --git a/avalanchego/chains/atomic/shared_memory_test.go b/avalanchego/chains/atomic/shared_memory_test.go index afe9b434..bb3266d8 100644 --- a/avalanchego/chains/atomic/shared_memory_test.go +++ b/avalanchego/chains/atomic/shared_memory_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic diff --git a/avalanchego/chains/atomic/state.go b/avalanchego/chains/atomic/state.go index b2279bb1..c16a0a2e 100644 --- a/avalanchego/chains/atomic/state.go +++ b/avalanchego/chains/atomic/state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic @@ -13,6 +13,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/set" ) var errDuplicatedOperation = errors.New("duplicated operation on provided value") @@ -196,13 +197,13 @@ func (s *state) getKeys(traits [][]byte, startTrait, startKey []byte, limit int) // this variable is declared, the map may not be initialized from the // start. The first add to the underlying map of the set would then // result in the map being initialized. - keySet := ids.Set{} + keySet := set.Set[ids.ID]{} keys := [][]byte(nil) lastTrait := startTrait lastKey := startKey // Iterate over the traits in order appending all of the keys that possess // the given [traits]. - utils.Sort2DBytes(traits) + utils.SortBytes(traits) for _, trait := range traits { switch bytes.Compare(trait, startTrait) { case -1: @@ -235,7 +236,7 @@ func (s *state) getKeys(traits [][]byte, startTrait, startKey []byte, limit int) // and adds keys that possess [trait] to [keys] until the iteration completes or // limit hits 0. If a key possesses multiple traits, it will be de-duplicated // with [keySet]. -func (s *state) appendTraitKeys(keys *[][]byte, keySet *ids.Set, limit *int, trait, startKey []byte) ([]byte, error) { +func (s *state) appendTraitKeys(keys *[][]byte, keySet *set.Set[ids.ID], limit *int, trait, startKey []byte) ([]byte, error) { lastKey := startKey // Create the prefixDB for the specified trait. diff --git a/avalanchego/chains/atomic/test_shared_memory.go b/avalanchego/chains/atomic/test_shared_memory.go index 1d8bac5f..0e7d918a 100644 --- a/avalanchego/chains/atomic/test_shared_memory.go +++ b/avalanchego/chains/atomic/test_shared_memory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic @@ -287,7 +287,7 @@ func TestSharedMemoryCommitOnRemove(t *testing.T, _, chainID1 ids.ID, sm0, _ Sha } // TestPutAndRemoveBatch tests to make sure multiple put and remove requests work properly -func TestPutAndRemoveBatch(t *testing.T, chainID0, chainID1 ids.ID, _, sm1 SharedMemory, db database.Database) { +func TestPutAndRemoveBatch(t *testing.T, chainID0, _ ids.ID, _, sm1 SharedMemory, db database.Database) { require := require.New(t) batch := db.NewBatch() diff --git a/avalanchego/chains/atomic/writer.go b/avalanchego/chains/atomic/writer.go index 00b18d2a..9f3876a5 100644 --- a/avalanchego/chains/atomic/writer.go +++ b/avalanchego/chains/atomic/writer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package atomic diff --git a/avalanchego/chains/linearizable_vm.go b/avalanchego/chains/linearizable_vm.go new file mode 100644 index 00000000..abaf20ca --- /dev/null +++ b/avalanchego/chains/linearizable_vm.go @@ -0,0 +1,80 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package chains + +import ( + "context" + + "github.com/ava-labs/avalanchego/api/metrics" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + + dbManager "github.com/ava-labs/avalanchego/database/manager" +) + +var ( + _ vertex.LinearizableVM = (*initializeOnLinearizeVM)(nil) + _ block.ChainVM = (*linearizeOnInitializeVM)(nil) +) + +// initializeOnLinearizeVM transforms the consensus engine's call to Linearize +// into a call to Initialize. This enables the proposervm to be initialized by +// the call to Linearize. This also provides the stopVertexID to the +// linearizeOnInitializeVM. +type initializeOnLinearizeVM struct { + vertex.DAGVM + vmToInitialize common.VM + vmToLinearize *linearizeOnInitializeVM + + registerer metrics.OptionalGatherer + ctx *snow.Context + dbManager dbManager.Manager + genesisBytes []byte + upgradeBytes []byte + configBytes []byte + toEngine chan<- common.Message + fxs []*common.Fx + appSender common.AppSender +} + +func (vm *initializeOnLinearizeVM) Linearize(ctx context.Context, stopVertexID ids.ID) error { + vm.vmToLinearize.stopVertexID = stopVertexID + vm.ctx.Metrics = vm.registerer + return vm.vmToInitialize.Initialize( + ctx, + vm.ctx, + vm.dbManager, + vm.genesisBytes, + vm.upgradeBytes, + vm.configBytes, + vm.toEngine, + vm.fxs, + vm.appSender, + ) +} + +// linearizeOnInitializeVM transforms the proposervm's call to Initialize into a +// call to Linearize. This enables the proposervm to provide its toEngine +// channel to the VM that is being linearized. +type linearizeOnInitializeVM struct { + vertex.LinearizableVMWithEngine + stopVertexID ids.ID +} + +func (vm *linearizeOnInitializeVM) Initialize( + ctx context.Context, + _ *snow.Context, + _ dbManager.Manager, + _ []byte, + _ []byte, + _ []byte, + toEngine chan<- common.Message, + _ []*common.Fx, + _ common.AppSender, +) error { + return vm.Linearize(ctx, vm.stopVertexID, toEngine) +} diff --git a/avalanchego/chains/manager.go b/avalanchego/chains/manager.go index e80ee733..b3614139 100644 --- a/avalanchego/chains/manager.go +++ b/avalanchego/chains/manager.go @@ -1,13 +1,16 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package chains import ( + "context" "crypto" "crypto/tls" "errors" "fmt" + "os" + "path/filepath" "sync" "time" @@ -24,8 +27,8 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/network" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/snow/engine/avalanche/state" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" @@ -38,12 +41,20 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/sender" "github.com/ava-labs/avalanchego/snow/networking/timeout" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/subnets" + "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/utils/buffer" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/perms" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms" "github.com/ava-labs/avalanchego/vms/metervm" + "github.com/ava-labs/avalanchego/vms/platformvm/warp" "github.com/ava-labs/avalanchego/vms/proposervm" + "github.com/ava-labs/avalanchego/vms/tracedvm" dbManager "github.com/ava-labs/avalanchego/database/manager" timetracker "github.com/ava-labs/avalanchego/snow/networking/tracker" @@ -59,15 +70,30 @@ import ( snowgetter "github.com/ava-labs/avalanchego/snow/engine/snowman/getter" ) -const defaultChannelSize = 1 +const ( + defaultChannelSize = 1 + initialQueueSize = 3 +) var ( - errUnknownChainID = errors.New("unknown chain ID") - errUnknownVMType = errors.New("the vm should have type avalanche.DAGVM or snowman.ChainVM") - errCreatePlatformVM = errors.New("attempted to create a chain running the PlatformVM") - errNotBootstrapped = errors.New("chains not bootstrapped") + // Commonly shared VM DB prefix + vmDBPrefix = []byte("vm") + + // Bootstrapping prefixes for LinearizableVMs + vertexDBPrefix = []byte("vertex") + vertexBootstrappingDBPrefix = []byte("vertex_bs") + txBootstrappingDBPrefix = []byte("tx_bs") + blockBootstrappingDBPrefix = []byte("block_bs") + + // Bootstrapping prefixes for ChainVMs + bootstrappingDB = []byte("bs") + + errUnknownVMType = errors.New("the vm should have type avalanche.DAGVM or snowman.ChainVM") + errCreatePlatformVM = errors.New("attempted to create a chain running the PlatformVM") + errNotBootstrapped = errors.New("subnets not bootstrapped") + errNoPlatformSubnetConfig = errors.New("subnet config for platform chain not found") - _ Manager = &manager{} + _ Manager = (*manager)(nil) ) // Manager manages the chains running on this node. @@ -82,11 +108,11 @@ type Manager interface { // Return the router this Manager is using to route consensus messages to chains Router() router.Router - // Create a chain in the future - CreateChain(ChainParameters) - - // Create a chain now - ForceCreateChain(ChainParameters) + // Queues a chain to be created in the future after chain creator is unblocked. + // This is only called from the P-chain thread to create other chains + // Queued chains are created only after P-chain is bootstrapped. + // This assumes only chains in tracked subnets are queued. + QueueChainCreation(ChainParameters) // Add a registrant [r]. Every time a chain is // created, [r].RegisterChain([new chain]) is called. @@ -98,12 +124,13 @@ type Manager interface { // Given an alias, return the ID of the VM associated with that alias LookupVM(string) (ids.ID, error) - // Returns the ID of the subnet that is validating the provided chain - SubnetID(chainID ids.ID) (ids.ID, error) - // Returns true iff the chain with the given ID exists and is finished bootstrapping IsBootstrapped(ids.ID) bool + // Starts the chain creator with the initial platform chain parameters, must + // be called once. + StartChainCreator(platformChain ChainParameters) error + Shutdown() } @@ -125,7 +152,8 @@ type ChainParameters struct { type chain struct { Name string - Engine common.Engine + Context *snow.ConsensusContext + VM common.VM Handler handler.Handler Beacons validators.Set } @@ -139,43 +167,45 @@ type ChainConfig struct { } type ManagerConfig struct { - StakingEnabled bool // True iff the network has staking enabled - StakingCert tls.Certificate // needed to sign snowman++ blocks + StakingEnabled bool // True iff the network has staking enabled + StakingCert tls.Certificate // needed to sign snowman++ blocks + StakingBLSKey *bls.SecretKey + TracingEnabled bool + // Must not be used unless [TracingEnabled] is true as this may be nil. + Tracer trace.Tracer Log logging.Logger LogFactory logging.Factory VMManager vms.Manager // Manage mappings from vm ID --> vm - DecisionAcceptorGroup snow.AcceptorGroup - ConsensusAcceptorGroup snow.AcceptorGroup + BlockAcceptorGroup snow.AcceptorGroup + TxAcceptorGroup snow.AcceptorGroup + VertexAcceptorGroup snow.AcceptorGroup DBManager dbManager.Manager - MsgCreator message.Creator // message creator, shared with network - MsgCreatorWithProto message.Creator // message creator using protobufs, shared with network - Router router.Router // Routes incoming messages to the appropriate chain - Net network.Network // Sends consensus messages to other validators - ConsensusParams avcon.Parameters // The consensus parameters (alpha, beta, etc.) for new chains - Validators validators.Manager // Validators validating on this chain - NodeID ids.NodeID // The ID of this node - NetworkID uint32 // ID of the network this node is connected to - Server server.Server // Handles HTTP API calls + MsgCreator message.OutboundMsgBuilder // message creator, shared with network + Router router.Router // Routes incoming messages to the appropriate chain + Net network.Network // Sends consensus messages to other validators + Validators validators.Manager // Validators validating on this chain + NodeID ids.NodeID // The ID of this node + NetworkID uint32 // ID of the network this node is connected to + Server server.Server // Handles HTTP API calls Keystore keystore.Keystore AtomicMemory *atomic.Memory AVAXAssetID ids.ID - XChainID ids.ID - CriticalChains ids.Set // Chains that can't exit gracefully - WhitelistedSubnets ids.Set // Subnets to validate + XChainID ids.ID // ID of the X-Chain, + CChainID ids.ID // ID of the C-Chain, + CriticalChains set.Set[ids.ID] // Chains that can't exit gracefully TimeoutManager timeout.Manager // Manages request timeouts when sending messages to other validators Health health.Registerer - RetryBootstrap bool // Should Bootstrap be retried - RetryBootstrapWarnFrequency int // Max number of times to retry bootstrap before warning the node operator - SubnetConfigs map[ids.ID]SubnetConfig // ID -> SubnetConfig - ChainConfigs map[string]ChainConfig // alias -> ChainConfig + RetryBootstrap bool // Should Bootstrap be retried + RetryBootstrapWarnFrequency int // Max number of times to retry bootstrap before warning the node operator + SubnetConfigs map[ids.ID]subnets.Config // ID -> SubnetConfig + ChainConfigs map[string]ChainConfig // alias -> ChainConfig // ShutdownNodeFunc allows the chain manager to issue a request to shutdown the node ShutdownNodeFunc func(exitCode int) MeterVMEnabled bool // Should each VM be wrapped with a MeterVM Metrics metrics.MultiGatherer ConsensusGossipFrequency time.Duration - - GossipConfig sender.GossipConfig + ConsensusAppConcurrency int // Max Time to spend fetching a container and its // ancestors when responding to a GetAncestors @@ -188,12 +218,13 @@ type ManagerConfig struct { ApricotPhase4Time time.Time ApricotPhase4MinPChainHeight uint64 - BanffTime time.Time // Tracks CPU/disk usage caused by each peer. ResourceTracker timetracker.ResourceTracker StateSyncBeacons []ids.NodeID + + ChainDataDir string } type manager struct { @@ -205,76 +236,96 @@ type manager struct { // Those notified when a chain is created registrants []Registrant - unblocked bool - blockedChains []ChainParameters + // queue that holds chain create requests + chainsQueue buffer.BlockingDeque[ChainParameters] + // unblocks chain creator to start processing the queue + unblockChainCreatorCh chan struct{} + chainCreatorShutdownCh chan struct{} + subnetsLock sync.Mutex // Key: Subnet's ID // Value: Subnet description - subnets map[ids.ID]Subnet + subnets map[ids.ID]subnets.Subnet chainsLock sync.Mutex // Key: Chain's ID // Value: The chain chains map[ids.ID]handler.Handler - // snowman++ related interface to allow validators retrival + // snowman++ related interface to allow validators retrieval validatorState validators.State } // New returns a new Manager func New(config *ManagerConfig) Manager { return &manager{ - Aliaser: ids.NewAliaser(), - ManagerConfig: *config, - subnets: make(map[ids.ID]Subnet), - chains: make(map[ids.ID]handler.Handler), + Aliaser: ids.NewAliaser(), + ManagerConfig: *config, + subnets: make(map[ids.ID]subnets.Subnet), + chains: make(map[ids.ID]handler.Handler), + chainsQueue: buffer.NewUnboundedBlockingDeque[ChainParameters](initialQueueSize), + unblockChainCreatorCh: make(chan struct{}), + chainCreatorShutdownCh: make(chan struct{}), } } // Router that this chain manager is using to route consensus messages to chains -func (m *manager) Router() router.Router { return m.ManagerConfig.Router } +func (m *manager) Router() router.Router { + return m.ManagerConfig.Router +} -// Create a chain -func (m *manager) CreateChain(chain ChainParameters) { - if !m.unblocked { - m.blockedChains = append(m.blockedChains, chain) - } else { - m.ForceCreateChain(chain) +// QueueChainCreation queues a chain creation request +// Invariant: Tracked Subnet must be checked before calling this function +func (m *manager) QueueChainCreation(chainParams ChainParameters) { + m.subnetsLock.Lock() + subnetID := chainParams.SubnetID + sb, exists := m.subnets[subnetID] + if !exists { + sbConfig, ok := m.SubnetConfigs[subnetID] + if !ok { + // default to primary subnet config + sbConfig = m.SubnetConfigs[constants.PrimaryNetworkID] + } + sb = subnets.New(m.NodeID, sbConfig) + m.subnets[chainParams.SubnetID] = sb } -} + addedChain := sb.AddChain(chainParams.ID) + m.subnetsLock.Unlock() -// Create a chain, this is only called from the P-chain thread, except for -// creating the P-chain. -func (m *manager) ForceCreateChain(chainParams ChainParameters) { - if m.StakingEnabled && chainParams.SubnetID != constants.PrimaryNetworkID && !m.WhitelistedSubnets.Contains(chainParams.SubnetID) { - m.Log.Debug("skipped creating non-whitelisted chain", + if !addedChain { + m.Log.Debug("skipping chain creation", + zap.String("reason", "chain already staged"), + zap.Stringer("subnetID", subnetID), zap.Stringer("chainID", chainParams.ID), zap.Stringer("vmID", chainParams.VMID), ) return } - // Assert that there isn't already a chain with an alias in [chain].Aliases - // (Recall that the string representation of a chain's ID is also an alias - // for a chain) - if alias, isRepeat := m.isChainWithAlias(chainParams.ID.String()); isRepeat { - m.Log.Debug("skipping chain creation", - zap.String("reason", "there is already a chain with same alias"), - zap.String("alias", alias), + + if ok := m.chainsQueue.PushRight(chainParams); !ok { + m.Log.Warn("skipping chain creation", + zap.String("reason", "couldn't enqueue chain"), + zap.Stringer("subnetID", subnetID), + zap.Stringer("chainID", chainParams.ID), + zap.Stringer("vmID", chainParams.VMID), ) - return } +} + +// createChain creates and starts the chain +// +// Note: it is expected for the subnet to already have the chain registered as +// bootstrapping before this function is called +func (m *manager) createChain(chainParams ChainParameters) { m.Log.Info("creating chain", + zap.Stringer("subnetID", chainParams.SubnetID), zap.Stringer("chainID", chainParams.ID), zap.Stringer("vmID", chainParams.VMID), ) - sb, exists := m.subnets[chainParams.SubnetID] - if !exists { - sb = newSubnet() - m.subnets[chainParams.SubnetID] = sb - } - - sb.addChain(chainParams.ID) + m.subnetsLock.Lock() + sb := m.subnets[chainParams.SubnetID] + m.subnetsLock.Unlock() // Note: buildChain builds all chain's relevant objects (notably engine and handler) // but does not start their operations. Starting of the handler (which could potentially @@ -283,11 +334,12 @@ func (m *manager) ForceCreateChain(chainParams ChainParameters) { // upon start is dropped. chain, err := m.buildChain(chainParams, sb) if err != nil { - sb.removeChain(chainParams.ID) if m.CriticalChains.Contains(chainParams.ID) { // Shut down if we fail to create a required chain (i.e. X, P or C) m.Log.Fatal("error creating required chain", + zap.Stringer("subnetID", chainParams.SubnetID), zap.Stringer("chainID", chainParams.ID), + zap.Stringer("vmID", chainParams.VMID), zap.Error(err), ) go m.ShutdownNodeFunc(1) @@ -296,7 +348,10 @@ func (m *manager) ForceCreateChain(chainParams ChainParameters) { chainAlias := m.PrimaryAliasOrDefault(chainParams.ID) m.Log.Error("error creating chain", + zap.Stringer("subnetID", chainParams.SubnetID), + zap.Stringer("chainID", chainParams.ID), zap.String("chainAlias", chainAlias), + zap.Stringer("vmID", chainParams.VMID), zap.Error(err), ) @@ -304,12 +359,20 @@ func (m *manager) ForceCreateChain(chainParams ChainParameters) { // created or not. This attempts to notify the node operator that their // node may not be properly validating the subnet they expect to be // validating. - healthCheckErr := fmt.Errorf("failed to create chain on whitelisted subnet: %s", chainParams.SubnetID) - if err := m.Health.RegisterHealthCheck(chainAlias, health.CheckerFunc(func() (interface{}, error) { - return nil, healthCheckErr - })); err != nil { + healthCheckErr := fmt.Errorf("failed to create chain on subnet: %s", chainParams.SubnetID) + err := m.Health.RegisterHealthCheck( + chainAlias, + health.CheckerFunc(func(context.Context) (interface{}, error) { + return nil, healthCheckErr + }), + chainParams.SubnetID.String(), + ) + if err != nil { m.Log.Error("failed to register failing health check", + zap.Stringer("subnetID", chainParams.SubnetID), + zap.Stringer("chainID", chainParams.ID), zap.String("chainAlias", chainAlias), + zap.Stringer("vmID", chainParams.VMID), zap.Error(err), ) } @@ -323,18 +386,20 @@ func (m *manager) ForceCreateChain(chainParams ChainParameters) { // Associate the newly created chain with its default alias if err := m.Alias(chainParams.ID, chainParams.ID.String()); err != nil { m.Log.Error("failed to alias the new chain with itself", + zap.Stringer("subnetID", chainParams.SubnetID), zap.Stringer("chainID", chainParams.ID), + zap.Stringer("vmID", chainParams.VMID), zap.Error(err), ) } // Notify those that registered to be notified when a new chain is created - m.notifyRegistrants(chain.Name, chain.Engine) + m.notifyRegistrants(chain.Name, chain.Context, chain.VM) // Allows messages to be routed to the new chain. If the handler hasn't been // started and a message is forwarded, then the message will block until the // handler is started. - m.ManagerConfig.Router.AddChain(chain.Handler) + m.ManagerConfig.Router.AddChain(context.TODO(), chain.Handler) // Register bootstrapped health checks after P chain has been added to // chains. @@ -344,22 +409,28 @@ func (m *manager) ForceCreateChain(chainParams ChainParameters) { // the manager. if chainParams.ID == constants.PlatformChainID { if err := m.registerBootstrappedHealthChecks(); err != nil { - chain.Handler.StopWithError(err) + chain.Handler.StopWithError(context.TODO(), err) } } // Tell the chain to start processing messages. // If the X, P, or C Chain panics, do not attempt to recover - chain.Handler.Start(!m.CriticalChains.Contains(chainParams.ID)) + chain.Handler.Start(context.TODO(), !m.CriticalChains.Contains(chainParams.ID)) } // Create a chain -func (m *manager) buildChain(chainParams ChainParameters, sb Subnet) (*chain, error) { +func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*chain, error) { if chainParams.ID != constants.PlatformChainID && chainParams.VMID == constants.PlatformVMID { return nil, errCreatePlatformVM } primaryAlias := m.PrimaryAliasOrDefault(chainParams.ID) + // Create this chain's data directory + chainDataDir := filepath.Join(m.ChainDataDir, chainParams.ID.String()) + if err := os.MkdirAll(chainDataDir, perms.ReadWriteExecute); err != nil { + return nil, fmt.Errorf("error while creating chain data directory %w", err) + } + // Create the log and context of the chain chainLog, err := m.LogFactory.MakeChain(primaryAlias) if err != nil { @@ -372,6 +443,15 @@ func (m *manager) buildChain(chainParams ChainParameters, sb Subnet) (*chain, er return nil, fmt.Errorf("error while registering chain's metrics %w", err) } + // This converts the prefix for all the Avalanche consensus metrics from + // `avalanche_{chainID}_` into `avalanche_{chainID}_avalanche_` so that + // there are no conflicts when registering the Snowman consensus metrics. + avalancheConsensusMetrics := prometheus.NewRegistry() + avalancheDAGNamespace := fmt.Sprintf("%s_avalanche", chainNamespace) + if err := m.Metrics.Register(avalancheDAGNamespace, avalancheConsensusMetrics); err != nil { + return nil, fmt.Errorf("error while registering DAG metrics %w", err) + } + vmMetrics := metrics.NewOptionalGatherer() vmNamespace := fmt.Sprintf("%s_vm", chainNamespace) if err := m.Metrics.Register(vmNamespace, vmMetrics); err != nil { @@ -384,33 +464,28 @@ func (m *manager) buildChain(chainParams ChainParameters, sb Subnet) (*chain, er SubnetID: chainParams.SubnetID, ChainID: chainParams.ID, NodeID: m.NodeID, + PublicKey: bls.PublicFromSecretKey(m.StakingBLSKey), XChainID: m.XChainID, + CChainID: m.CChainID, AVAXAssetID: m.AVAXAssetID, Log: chainLog, Keystore: m.Keystore.NewBlockchainKeyStore(chainParams.ID), SharedMemory: m.AtomicMemory.NewSharedMemory(chainParams.ID), BCLookup: m, - SNLookup: m, Metrics: vmMetrics, - ValidatorState: m.validatorState, - StakingCertLeaf: m.StakingCert.Leaf, - StakingLeafSigner: m.StakingCert.PrivateKey.(crypto.Signer), - }, - DecisionAcceptor: m.DecisionAcceptorGroup, - ConsensusAcceptor: m.ConsensusAcceptorGroup, - Registerer: consensusMetrics, - } - // We set the state to Initializing here because failing to set the state - // before it's first access would cause a panic. - ctx.SetState(snow.Initializing) + WarpSigner: warp.NewSigner(m.StakingBLSKey, chainParams.ID), - if sbConfigs, ok := m.SubnetConfigs[chainParams.SubnetID]; ok { - if sbConfigs.ValidatorOnly { - ctx.SetValidatorOnly() - } + ValidatorState: m.validatorState, + ChainDataDir: chainDataDir, + }, + BlockAcceptor: m.BlockAcceptorGroup, + TxAcceptor: m.TxAcceptorGroup, + VertexAcceptor: m.VertexAcceptorGroup, + Registerer: consensusMetrics, + AvalancheRegisterer: avalancheConsensusMetrics, } // Get a factory for the vm we want to use on our chain @@ -420,7 +495,7 @@ func (m *manager) buildChain(chainParams ChainParameters, sb Subnet) (*chain, er } // Create the chain - vm, err := vmFactory.New(ctx.Context) + vm, err := vmFactory.New(chainLog) if err != nil { return nil, fmt.Errorf("error while creating vm: %w", err) } @@ -434,7 +509,7 @@ func (m *manager) buildChain(chainParams ChainParameters, sb Subnet) (*chain, er return nil, fmt.Errorf("error while getting fxFactory: %w", err) } - fx, err := fxFactory.New(ctx.Context) + fx, err := fxFactory.New(chainLog) if err != nil { return nil, fmt.Errorf("error while creating fx: %w", err) } @@ -446,18 +521,12 @@ func (m *manager) buildChain(chainParams ChainParameters, sb Subnet) (*chain, er } } - consensusParams := m.ConsensusParams - if sbConfigs, ok := m.SubnetConfigs[chainParams.SubnetID]; ok && chainParams.SubnetID != constants.PrimaryNetworkID { - consensusParams = sbConfigs.ConsensusParameters - } - - // The validators of this blockchain var vdrs validators.Set // Validators validating this blockchain var ok bool if m.StakingEnabled { - vdrs, ok = m.Validators.GetValidators(chainParams.SubnetID) + vdrs, ok = m.Validators.Get(chainParams.SubnetID) } else { // Staking is disabled. Every peer validates every subnet. - vdrs, ok = m.Validators.GetValidators(constants.PrimaryNetworkID) + vdrs, ok = m.Validators.Get(constants.PrimaryNetworkID) } if !ok { return nil, fmt.Errorf("couldn't get validator set of subnet with ID %s. The subnet may not exist", chainParams.SubnetID) @@ -472,7 +541,7 @@ func (m *manager) buildChain(chainParams ChainParameters, sb Subnet) (*chain, er var chain *chain switch vm := vm.(type) { - case vertex.DAGVM: + case vertex.LinearizableVMWithEngine: chain, err = m.createAvalancheChain( ctx, chainParams.GenesisData, @@ -480,7 +549,6 @@ func (m *manager) buildChain(chainParams ChainParameters, sb Subnet) (*chain, er beacons, vm, fxs, - consensusParams, bootstrapWeight, sb, ) @@ -495,7 +563,6 @@ func (m *manager) buildChain(chainParams ChainParameters, sb Subnet) (*chain, er beacons, vm, fxs, - consensusParams.Parameters, bootstrapWeight, sb, ) @@ -514,15 +581,8 @@ func (m *manager) buildChain(chainParams ChainParameters, sb Subnet) (*chain, er return chain, nil } -func (m *manager) AddRegistrant(r Registrant) { m.registrants = append(m.registrants, r) } - -func (m *manager) unblockChains() { - m.unblocked = true - blocked := m.blockedChains - m.blockedChains = nil - for _, chainParams := range blocked { - m.ForceCreateChain(chainParams) - } +func (m *manager) AddRegistrant(r Registrant) { + m.registrants = append(m.registrants, r) } // Create a DAG-based blockchain that uses Avalanche @@ -531,32 +591,41 @@ func (m *manager) createAvalancheChain( genesisData []byte, vdrs, beacons validators.Set, - vm vertex.DAGVM, + vm vertex.LinearizableVMWithEngine, fxs []*common.Fx, - consensusParams avcon.Parameters, bootstrapWeight uint64, - sb Subnet, + sb subnets.Subnet, ) (*chain, error) { ctx.Lock.Lock() defer ctx.Lock.Unlock() + ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + State: snow.Initializing, + }) + meterDBManager, err := m.DBManager.NewMeterDBManager("db", ctx.Registerer) if err != nil { return nil, err } prefixDBManager := meterDBManager.NewPrefixDBManager(ctx.ChainID[:]) - vmDBManager := prefixDBManager.NewPrefixDBManager([]byte("vm")) + vmDBManager := prefixDBManager.NewPrefixDBManager(vmDBPrefix) db := prefixDBManager.Current() - vertexDB := prefixdb.New([]byte("vertex"), db.Database) - vertexBootstrappingDB := prefixdb.New([]byte("vertex_bs"), db.Database) - txBootstrappingDB := prefixdb.New([]byte("tx_bs"), db.Database) + vertexDB := prefixdb.New(vertexDBPrefix, db.Database) + vertexBootstrappingDB := prefixdb.New(vertexBootstrappingDBPrefix, db.Database) + txBootstrappingDB := prefixdb.New(txBootstrappingDBPrefix, db.Database) + blockBootstrappingDB := prefixdb.New(blockBootstrappingDBPrefix, db.Database) - vtxBlocker, err := queue.NewWithMissing(vertexBootstrappingDB, "vtx", ctx.Registerer) + vtxBlocker, err := queue.NewWithMissing(vertexBootstrappingDB, "vtx", ctx.AvalancheRegisterer) + if err != nil { + return nil, err + } + txBlocker, err := queue.New(txBootstrappingDB, "tx", ctx.AvalancheRegisterer) if err != nil { return nil, err } - txBlocker, err := queue.New(txBootstrappingDB, "tx", ctx.Registerer) + blockBlocker, err := queue.NewWithMissing(blockBootstrappingDB, "block", ctx.Registerer) if err != nil { return nil, err } @@ -565,27 +634,59 @@ func (m *manager) createAvalancheChain( // VM uses this channel to notify engine that a block is ready to be made msgChan := make(chan common.Message, defaultChannelSize) - gossipConfig := m.GossipConfig - if sbConfigs, ok := m.SubnetConfigs[ctx.SubnetID]; ok && ctx.SubnetID != constants.PrimaryNetworkID { - gossipConfig = sbConfigs.GossipConfig + // Passes messages from the avalanche engines to the network + avalancheMessageSender, err := sender.New( + ctx, + m.MsgCreator, + m.Net, + m.ManagerConfig.Router, + m.TimeoutManager, + p2p.EngineType_ENGINE_TYPE_AVALANCHE, + sb, + ) + if err != nil { + return nil, fmt.Errorf("couldn't initialize avalanche sender: %w", err) } - // Passes messages from the consensus engine to the network - sender, err := sender.New( + if m.TracingEnabled { + avalancheMessageSender = sender.Trace(avalancheMessageSender, m.Tracer) + } + + err = m.VertexAcceptorGroup.RegisterAcceptor( + ctx.ChainID, + "gossip", + avalancheMessageSender, + false, + ) + if err != nil { // Set up the event dispatcher + return nil, fmt.Errorf("problem initializing event dispatcher: %w", err) + } + + // Passes messages from the snowman engines to the network + snowmanMessageSender, err := sender.New( ctx, m.MsgCreator, - m.MsgCreatorWithProto, - m.BanffTime, m.Net, m.ManagerConfig.Router, m.TimeoutManager, - gossipConfig, + p2p.EngineType_ENGINE_TYPE_SNOWMAN, + sb, ) if err != nil { - return nil, fmt.Errorf("couldn't initialize sender: %w", err) + return nil, fmt.Errorf("couldn't initialize avalanche sender: %w", err) } - if err := m.ConsensusAcceptorGroup.RegisterAcceptor(ctx.ChainID, "gossip", sender, false); err != nil { // Set up the event dipatcher + if m.TracingEnabled { + snowmanMessageSender = sender.Trace(snowmanMessageSender, m.Tracer) + } + + err = m.BlockAcceptorGroup.RegisterAcceptor( + ctx.ChainID, + "gossip", + snowmanMessageSender, + false, + ) + if err != nil { // Set up the event dispatcher return nil, fmt.Errorf("problem initializing event dispatcher: %w", err) } @@ -597,19 +698,44 @@ func (m *manager) createAvalancheChain( if m.MeterVMEnabled { vm = metervm.NewVertexVM(vm) } + if m.TracingEnabled { + vm = tracedvm.NewVertexVM(vm, m.Tracer) + } // Handles serialization/deserialization of vertices and also the // persistence of vertices vtxManager := state.NewSerializer( state.SerializerConfig{ - ChainID: ctx.ChainID, - VM: vm, - DB: vertexDB, - Log: ctx.Log, - XChainMigrationTime: version.GetXChainMigrationTime(ctx.NetworkID), + ChainID: ctx.ChainID, + VM: vm, + DB: vertexDB, + Log: ctx.Log, + CortinaTime: version.GetCortinaTime(ctx.NetworkID), }, ) - if err := vm.Initialize( + + avalancheRegisterer := metrics.NewOptionalGatherer() + snowmanRegisterer := metrics.NewOptionalGatherer() + + registerer := metrics.NewMultiGatherer() + if err := registerer.Register("avalanche", avalancheRegisterer); err != nil { + return nil, err + } + if err := registerer.Register("", snowmanRegisterer); err != nil { + return nil, err + } + if err := ctx.Context.Metrics.Register(registerer); err != nil { + return nil, err + } + + ctx.Context.Metrics = avalancheRegisterer + + // The only difference between using avalancheMessageSender and + // snowmanMessageSender here is where the metrics will be placed. Because we + // end up using this sender after the linearization, we pass in + // snowmanMessageSender here. + err = vm.Initialize( + context.TODO(), ctx.Context, vmDBManager, genesisData, @@ -617,25 +743,86 @@ func (m *manager) createAvalancheChain( chainConfig.Config, msgChan, fxs, - sender, - ); err != nil { + snowmanMessageSender, + ) + if err != nil { return nil, fmt.Errorf("error during vm's Initialize: %w", err) } + // Initialize the ProposerVM and the vm wrapped inside it + minBlockDelay := proposervm.DefaultMinBlockDelay + if subnetCfg, ok := m.SubnetConfigs[ctx.SubnetID]; ok { + minBlockDelay = subnetCfg.ProposerMinBlockDelay + } + m.Log.Info("creating proposervm wrapper", + zap.Time("activationTime", m.ApricotPhase4Time), + zap.Uint64("minPChainHeight", m.ApricotPhase4MinPChainHeight), + zap.Duration("minBlockDelay", minBlockDelay), + ) + + chainAlias := m.PrimaryAliasOrDefault(ctx.ChainID) + + untracedVMWrappedInsideProposerVM := &linearizeOnInitializeVM{ + LinearizableVMWithEngine: vm, + } + + var vmWrappedInsideProposerVM block.ChainVM = untracedVMWrappedInsideProposerVM + if m.TracingEnabled { + vmWrappedInsideProposerVM = tracedvm.NewBlockVM(vmWrappedInsideProposerVM, chainAlias, m.Tracer) + } + + // Note: vmWrappingProposerVM is the VM that the Snowman engines should be + // using. + var vmWrappingProposerVM block.ChainVM = proposervm.New( + vmWrappedInsideProposerVM, + m.ApricotPhase4Time, + m.ApricotPhase4MinPChainHeight, + minBlockDelay, + m.StakingCert.PrivateKey.(crypto.Signer), + m.StakingCert.Leaf, + ) + + if m.MeterVMEnabled { + vmWrappingProposerVM = metervm.NewBlockVM(vmWrappingProposerVM) + } + if m.TracingEnabled { + vmWrappingProposerVM = tracedvm.NewBlockVM(vmWrappingProposerVM, "proposervm", m.Tracer) + } + + // Note: linearizableVM is the VM that the Avalanche engines should be + // using. + linearizableVM := &initializeOnLinearizeVM{ + DAGVM: vm, + vmToInitialize: vmWrappingProposerVM, + vmToLinearize: untracedVMWrappedInsideProposerVM, + + registerer: snowmanRegisterer, + ctx: ctx.Context, + dbManager: vmDBManager, + genesisBytes: genesisData, + upgradeBytes: chainConfig.Upgrade, + configBytes: chainConfig.Config, + toEngine: msgChan, + fxs: fxs, + appSender: snowmanMessageSender, + } + + consensusParams := sb.Config().ConsensusParameters sampleK := consensusParams.K if uint64(sampleK) > bootstrapWeight { sampleK = int(bootstrapWeight) } // Asynchronously passes messages from the network to the consensus engine - handler, err := handler.New( - m.MsgCreator, + h, err := handler.New( ctx, vdrs, msgChan, - sb.afterBootstrapped(), m.ConsensusGossipFrequency, + m.ConsensusAppConcurrency, m.ResourceTracker, + validators.UnhandledSubnetConnector, // avalanche chains don't use subnet connector + sb, ) if err != nil { return nil, fmt.Errorf("error initializing network handler: %w", err) @@ -645,16 +832,81 @@ func (m *manager) createAvalancheChain( startupTracker := tracker.NewStartup(connectedPeers, (3*bootstrapWeight+3)/4) beacons.RegisterCallbackListener(startupTracker) - commonCfg := common.Config{ + snowmanCommonCfg := common.Config{ + Ctx: ctx, + Beacons: beacons, + SampleK: sampleK, + Alpha: bootstrapWeight/2 + 1, // must be > 50% + StartupTracker: startupTracker, + Sender: snowmanMessageSender, + BootstrapTracker: sb, + Timer: h, + RetryBootstrap: m.RetryBootstrap, + RetryBootstrapWarnFrequency: m.RetryBootstrapWarnFrequency, + MaxTimeGetAncestors: m.BootstrapMaxTimeGetAncestors, + AncestorsMaxContainersSent: m.BootstrapAncestorsMaxContainersSent, + AncestorsMaxContainersReceived: m.BootstrapAncestorsMaxContainersReceived, + SharedCfg: &common.SharedConfig{}, + } + snowGetHandler, err := snowgetter.New(vmWrappingProposerVM, snowmanCommonCfg) + if err != nil { + return nil, fmt.Errorf("couldn't initialize snow base message handler: %w", err) + } + + var snowmanConsensus smcon.Consensus = &smcon.Topological{} + if m.TracingEnabled { + snowmanConsensus = smcon.Trace(snowmanConsensus, m.Tracer) + } + + // Create engine, bootstrapper and state-syncer in this order, + // to make sure start callbacks are duly initialized + snowmanEngineConfig := smeng.Config{ + Ctx: snowmanCommonCfg.Ctx, + AllGetsServer: snowGetHandler, + VM: vmWrappingProposerVM, + Sender: snowmanCommonCfg.Sender, + Validators: vdrs, + Params: consensusParams.Parameters, + Consensus: snowmanConsensus, + } + snowmanEngine, err := smeng.New(snowmanEngineConfig) + if err != nil { + return nil, fmt.Errorf("error initializing snowman engine: %w", err) + } + + if m.TracingEnabled { + snowmanEngine = smeng.TraceEngine(snowmanEngine, m.Tracer) + } + + // create bootstrap gear + bootstrapCfg := smbootstrap.Config{ + Config: snowmanCommonCfg, + AllGetsServer: snowGetHandler, + Blocked: blockBlocker, + VM: vmWrappingProposerVM, + } + snowmanBootstrapper, err := smbootstrap.New( + context.TODO(), + bootstrapCfg, + snowmanEngine.Start, + ) + if err != nil { + return nil, fmt.Errorf("error initializing snowman bootstrapper: %w", err) + } + + if m.TracingEnabled { + snowmanBootstrapper = common.TraceBootstrapableEngine(snowmanBootstrapper, m.Tracer) + } + + avalancheCommonCfg := common.Config{ Ctx: ctx, - Validators: vdrs, Beacons: beacons, SampleK: sampleK, StartupTracker: startupTracker, Alpha: bootstrapWeight/2 + 1, // must be > 50% - Sender: sender, - Subnet: sb, - Timer: handler, + Sender: avalancheMessageSender, + BootstrapTracker: sb, + Timer: h, RetryBootstrap: m.RetryBootstrap, RetryBootstrapWarnFrequency: m.RetryBootstrapWarnFrequency, MaxTimeGetAncestors: m.BootstrapMaxTimeGetAncestors, @@ -663,59 +915,86 @@ func (m *manager) createAvalancheChain( SharedCfg: &common.SharedConfig{}, } - avaGetHandler, err := avagetter.New(vtxManager, commonCfg) + avaGetHandler, err := avagetter.New(vtxManager, avalancheCommonCfg) if err != nil { return nil, fmt.Errorf("couldn't initialize avalanche base message handler: %w", err) } // create bootstrap gear - bootstrapperConfig := avbootstrap.Config{ - Config: commonCfg, + avalancheBootstrapperConfig := avbootstrap.Config{ + Config: avalancheCommonCfg, AllGetsServer: avaGetHandler, VtxBlocked: vtxBlocker, TxBlocked: txBlocker, Manager: vtxManager, - VM: vm, + VM: linearizableVM, } - bootstrapper, err := avbootstrap.New( - bootstrapperConfig, - func(lastReqID uint32) error { - return handler.Consensus().Start(lastReqID + 1) - }, - ) - if err != nil { - return nil, fmt.Errorf("error initializing avalanche bootstrapper: %w", err) + + var avalancheConsensus avcon.Consensus = &avcon.Topological{} + if m.TracingEnabled { + avalancheConsensus = avcon.Trace(avalancheConsensus, m.Tracer) } - handler.SetBootstrapper(bootstrapper) // create engine gear - engineConfig := aveng.Config{ - Ctx: bootstrapperConfig.Ctx, + avalancheEngineConfig := aveng.Config{ + Ctx: ctx, AllGetsServer: avaGetHandler, - VM: bootstrapperConfig.VM, + VM: linearizableVM, Manager: vtxManager, - Sender: bootstrapperConfig.Sender, + Sender: avalancheMessageSender, Validators: vdrs, Params: consensusParams, - Consensus: &avcon.Topological{}, + Consensus: avalancheConsensus, } - engine, err := aveng.New(engineConfig) + avalancheEngine, err := aveng.New( + avalancheEngineConfig, + snowmanEngine.Start, + ) if err != nil { return nil, fmt.Errorf("error initializing avalanche engine: %w", err) } - handler.SetConsensus(engine) - // Register health check for this chain - chainAlias := m.PrimaryAliasOrDefault(ctx.ChainID) + if m.TracingEnabled { + avalancheEngine = aveng.TraceEngine(avalancheEngine, m.Tracer) + } + + avalancheBootstrapper, err := avbootstrap.New( + context.TODO(), + avalancheBootstrapperConfig, + avalancheEngine.Start, + snowmanBootstrapper.Start, + ) + if err != nil { + return nil, fmt.Errorf("error initializing avalanche bootstrapper: %w", err) + } + + if m.TracingEnabled { + avalancheBootstrapper = common.TraceBootstrapableEngine(avalancheBootstrapper, m.Tracer) + } + + h.SetEngineManager(&handler.EngineManager{ + Avalanche: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: avalancheBootstrapper, + Consensus: avalancheEngine, + }, + Snowman: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: snowmanBootstrapper, + Consensus: snowmanEngine, + }, + }) - if err := m.Health.RegisterHealthCheck(chainAlias, handler); err != nil { + // Register health check for this chain + if err := m.Health.RegisterHealthCheck(chainAlias, h, ctx.SubnetID.String()); err != nil { return nil, fmt.Errorf("couldn't add health check for chain %s: %w", chainAlias, err) } return &chain{ Name: chainAlias, - Engine: engine, - Handler: handler, + Context: ctx, + VM: vm, + Handler: h, }, nil } @@ -727,22 +1006,26 @@ func (m *manager) createSnowmanChain( beacons validators.Set, vm block.ChainVM, fxs []*common.Fx, - consensusParams snowball.Parameters, bootstrapWeight uint64, - sb Subnet, + sb subnets.Subnet, ) (*chain, error) { ctx.Lock.Lock() defer ctx.Lock.Unlock() + ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.Initializing, + }) + meterDBManager, err := m.DBManager.NewMeterDBManager("db", ctx.Registerer) if err != nil { return nil, err } prefixDBManager := meterDBManager.NewPrefixDBManager(ctx.ChainID[:]) - vmDBManager := prefixDBManager.NewPrefixDBManager([]byte("vm")) + vmDBManager := prefixDBManager.NewPrefixDBManager(vmDBPrefix) db := prefixDBManager.Current() - bootstrappingDB := prefixdb.New([]byte("bs"), db.Database) + bootstrappingDB := prefixdb.New(bootstrappingDB, db.Database) blocked, err := queue.NewWithMissing(bootstrappingDB, "block", ctx.Registerer) if err != nil { @@ -753,51 +1036,80 @@ func (m *manager) createSnowmanChain( // VM uses this channel to notify engine that a block is ready to be made msgChan := make(chan common.Message, defaultChannelSize) - gossipConfig := m.GossipConfig - if sbConfigs, ok := m.SubnetConfigs[ctx.SubnetID]; ok && ctx.SubnetID != constants.PrimaryNetworkID { - gossipConfig = sbConfigs.GossipConfig - } - // Passes messages from the consensus engine to the network - sender, err := sender.New( + messageSender, err := sender.New( ctx, m.MsgCreator, - m.MsgCreatorWithProto, - m.BanffTime, m.Net, m.ManagerConfig.Router, m.TimeoutManager, - gossipConfig, + p2p.EngineType_ENGINE_TYPE_SNOWMAN, + sb, ) if err != nil { return nil, fmt.Errorf("couldn't initialize sender: %w", err) } - if err := m.ConsensusAcceptorGroup.RegisterAcceptor(ctx.ChainID, "gossip", sender, false); err != nil { // Set up the event dipatcher + if m.TracingEnabled { + messageSender = sender.Trace(messageSender, m.Tracer) + } + + err = m.BlockAcceptorGroup.RegisterAcceptor( + ctx.ChainID, + "gossip", + messageSender, + false, + ) + if err != nil { // Set up the event dispatcher return nil, fmt.Errorf("problem initializing event dispatcher: %w", err) } - // first vm to be init is P-Chain once, which provides validator interface to all ProposerVMs + var ( + bootstrapFunc func() + subnetConnector = validators.UnhandledSubnetConnector + ) + // If [m.validatorState] is nil then we are creating the P-Chain. Since the + // P-Chain is the first chain to be created, we can use it to initialize + // required interfaces for the other chains if m.validatorState == nil { valState, ok := vm.(validators.State) if !ok { return nil, fmt.Errorf("expected validators.State but got %T", vm) } - lockedValState := validators.NewLockedState(&ctx.Lock, valState) - - // Initialize the validator state for future chains. - m.validatorState = lockedValState + if m.TracingEnabled { + valState = validators.Trace(valState, "platformvm", m.Tracer) + } // Notice that this context is left unlocked. This is because the // lock will already be held when accessing these values on the // P-chain. ctx.ValidatorState = valState + // Initialize the validator state for future chains. + m.validatorState = validators.NewLockedState(&ctx.Lock, valState) + if m.TracingEnabled { + m.validatorState = validators.Trace(m.validatorState, "lockedState", m.Tracer) + } + if !m.ManagerConfig.StakingEnabled { m.validatorState = validators.NewNoValidatorsState(m.validatorState) ctx.ValidatorState = validators.NewNoValidatorsState(ctx.ValidatorState) } + + // Set this func only for platform + // + // The snowman bootstrapper ensures this function is only executed once, so + // we don't need to be concerned about closing this channel multiple times. + bootstrapFunc = func() { + close(m.unblockChainCreatorCh) + } + + // Set up the subnet connector for the P-Chain + subnetConnector, ok = vm.(validators.SubnetConnector) + if !ok { + return nil, fmt.Errorf("expected validators.SubnetConnector but got %T", vm) + } } // Initialize the ProposerVM and the vm wrapped inside it @@ -806,17 +1118,39 @@ func (m *manager) createSnowmanChain( return nil, fmt.Errorf("error while fetching chain config: %w", err) } + minBlockDelay := proposervm.DefaultMinBlockDelay + if subnetCfg, ok := m.SubnetConfigs[ctx.SubnetID]; ok { + minBlockDelay = subnetCfg.ProposerMinBlockDelay + } + m.Log.Info("creating proposervm wrapper", + zap.Time("activationTime", m.ApricotPhase4Time), + zap.Uint64("minPChainHeight", m.ApricotPhase4MinPChainHeight), + zap.Duration("minBlockDelay", minBlockDelay), + ) + + chainAlias := m.PrimaryAliasOrDefault(ctx.ChainID) + if m.TracingEnabled { + vm = tracedvm.NewBlockVM(vm, chainAlias, m.Tracer) + } + vm = proposervm.New( vm, m.ApricotPhase4Time, m.ApricotPhase4MinPChainHeight, - m.BanffTime, + minBlockDelay, + m.StakingCert.PrivateKey.(crypto.Signer), + m.StakingCert.Leaf, ) if m.MeterVMEnabled { vm = metervm.NewBlockVM(vm) } + if m.TracingEnabled { + vm = tracedvm.NewBlockVM(vm, "proposervm", m.Tracer) + } + if err := vm.Initialize( + context.TODO(), ctx.Context, vmDBManager, genesisData, @@ -824,25 +1158,27 @@ func (m *manager) createSnowmanChain( chainConfig.Config, msgChan, fxs, - sender, + messageSender, ); err != nil { return nil, err } + consensusParams := sb.Config().ConsensusParameters sampleK := consensusParams.K if uint64(sampleK) > bootstrapWeight { sampleK = int(bootstrapWeight) } // Asynchronously passes messages from the network to the consensus engine - handler, err := handler.New( - m.MsgCreator, + h, err := handler.New( ctx, vdrs, msgChan, - sb.afterBootstrapped(), m.ConsensusGossipFrequency, + m.ConsensusAppConcurrency, m.ResourceTracker, + subnetConnector, + sb, ) if err != nil { return nil, fmt.Errorf("couldn't initialize message handler: %w", err) @@ -854,14 +1190,13 @@ func (m *manager) createSnowmanChain( commonCfg := common.Config{ Ctx: ctx, - Validators: vdrs, Beacons: beacons, SampleK: sampleK, StartupTracker: startupTracker, Alpha: bootstrapWeight/2 + 1, // must be > 50% - Sender: sender, - Subnet: sb, - Timer: handler, + Sender: messageSender, + BootstrapTracker: sb, + Timer: h, RetryBootstrap: m.RetryBootstrap, RetryBootstrapWarnFrequency: m.RetryBootstrapWarnFrequency, MaxTimeGetAncestors: m.BootstrapMaxTimeGetAncestors, @@ -875,6 +1210,11 @@ func (m *manager) createSnowmanChain( return nil, fmt.Errorf("couldn't initialize snow base message handler: %w", err) } + var consensus smcon.Consensus = &smcon.Topological{} + if m.TracingEnabled { + consensus = smcon.Trace(consensus, m.Tracer) + } + // Create engine, bootstrapper and state-syncer in this order, // to make sure start callbacks are duly initialized engineConfig := smeng.Config{ @@ -883,14 +1223,17 @@ func (m *manager) createSnowmanChain( VM: vm, Sender: commonCfg.Sender, Validators: vdrs, - Params: consensusParams, - Consensus: &smcon.Topological{}, + Params: consensusParams.Parameters, + Consensus: consensus, } engine, err := smeng.New(engineConfig) if err != nil { return nil, fmt.Errorf("error initializing snowman engine: %w", err) } - handler.SetConsensus(engine) + + if m.TracingEnabled { + engine = smeng.TraceEngine(engine, m.Tracer) + } // create bootstrap gear bootstrapCfg := smbootstrap.Config{ @@ -898,16 +1241,20 @@ func (m *manager) createSnowmanChain( AllGetsServer: snowGetHandler, Blocked: blocked, VM: vm, - Bootstrapped: m.unblockChains, + Bootstrapped: bootstrapFunc, } bootstrapper, err := smbootstrap.New( + context.TODO(), bootstrapCfg, engine.Start, ) if err != nil { return nil, fmt.Errorf("error initializing snowman bootstrapper: %w", err) } - handler.SetBootstrapper(bootstrapper) + + if m.TracingEnabled { + bootstrapper = common.TraceBootstrapableEngine(bootstrapper, m.Tracer) + } // create state sync gear stateSyncCfg, err := syncer.NewConfig( @@ -923,33 +1270,33 @@ func (m *manager) createSnowmanChain( stateSyncCfg, bootstrapper.Start, ) - handler.SetStateSyncer(stateSyncer) - // Register health checks - chainAlias := m.PrimaryAliasOrDefault(ctx.ChainID) + if m.TracingEnabled { + stateSyncer = common.TraceStateSyncer(stateSyncer, m.Tracer) + } - if err := m.Health.RegisterHealthCheck(chainAlias, handler); err != nil { + h.SetEngineManager(&handler.EngineManager{ + Avalanche: nil, + Snowman: &handler.Engine{ + StateSyncer: stateSyncer, + Bootstrapper: bootstrapper, + Consensus: engine, + }, + }) + + // Register health checks + if err := m.Health.RegisterHealthCheck(chainAlias, h, ctx.SubnetID.String()); err != nil { return nil, fmt.Errorf("couldn't add health check for chain %s: %w", chainAlias, err) } return &chain{ Name: chainAlias, - Engine: engine, - Handler: handler, + Context: ctx, + VM: vm, + Handler: h, }, nil } -func (m *manager) SubnetID(chainID ids.ID) (ids.ID, error) { - m.chainsLock.Lock() - defer m.chainsLock.Unlock() - - chain, exists := m.chains[chainID] - if !exists { - return ids.ID{}, errUnknownChainID - } - return chain.Context().SubnetID, nil -} - func (m *manager) IsBootstrapped(id ids.ID) bool { m.chainsLock.Lock() chain, exists := m.chains[id] @@ -958,35 +1305,29 @@ func (m *manager) IsBootstrapped(id ids.ID) bool { return false } - return chain.Context().GetState() == snow.NormalOp + return chain.Context().State.Get().State == snow.NormalOp } -func (m *manager) chainsNotBootstrapped() []ids.ID { - m.chainsLock.Lock() - defer m.chainsLock.Unlock() +func (m *manager) subnetsNotBootstrapped() []ids.ID { + m.subnetsLock.Lock() + defer m.subnetsLock.Unlock() - chainsBootstrapping := make([]ids.ID, 0, len(m.chains)) - for chainID, chain := range m.chains { - if chain.Context().GetState() == snow.NormalOp { - continue + subnetsBootstrapping := make([]ids.ID, 0, len(m.subnets)) + for subnetID, subnet := range m.subnets { + if !subnet.IsBootstrapped() { + subnetsBootstrapping = append(subnetsBootstrapping, subnetID) } - chainsBootstrapping = append(chainsBootstrapping, chainID) } - return chainsBootstrapping + return subnetsBootstrapping } func (m *manager) registerBootstrappedHealthChecks() error { - bootstrappedCheck := health.CheckerFunc(func() (interface{}, error) { - chains := m.chainsNotBootstrapped() - aliases := make([]string, len(chains)) - for i, chain := range chains { - aliases[i] = m.PrimaryAliasOrDefault(chain) - } - - if len(aliases) != 0 { - return aliases, errNotBootstrapped + bootstrappedCheck := health.CheckerFunc(func(context.Context) (interface{}, error) { + subnetIDs := m.subnetsNotBootstrapped() + if len(subnetIDs) != 0 { + return subnetIDs, errNotBootstrapped } - return aliases, nil + return []ids.ID{}, nil }) if err := m.Health.RegisterReadinessCheck("bootstrapped", bootstrappedCheck); err != nil { return fmt.Errorf("couldn't register bootstrapped readiness check: %w", err) @@ -997,33 +1338,78 @@ func (m *manager) registerBootstrappedHealthChecks() error { return nil } +// Starts chain creation loop to process queued chains +func (m *manager) StartChainCreator(platformParams ChainParameters) error { + // Get the Primary Network's subnet config. If it wasn't registered, then we + // throw a fatal error. + sbConfig, ok := m.SubnetConfigs[constants.PrimaryNetworkID] + if !ok { + return errNoPlatformSubnetConfig + } + + m.subnetsLock.Lock() + sb := subnets.New(m.NodeID, sbConfig) + m.subnets[platformParams.SubnetID] = sb + sb.AddChain(platformParams.ID) + m.subnetsLock.Unlock() + + // The P-chain is created synchronously to ensure that `VM.Initialize` has + // finished before returning from this function. This is required because + // the P-chain initializes state that the rest of the node initialization + // depends on. + m.createChain(platformParams) + + m.Log.Info("starting chain creator") + go m.dispatchChainCreator() + return nil +} + +func (m *manager) dispatchChainCreator() { + select { + // This channel will be closed when Shutdown is called on the manager. + case <-m.chainCreatorShutdownCh: + return + case <-m.unblockChainCreatorCh: + } + + // Handle chain creations + for { + // Get the next chain we should create. + // Dequeue waits until an element is pushed, so this is not + // busy-looping. + chainParams, ok := m.chainsQueue.PopLeft() + if !ok { // queue is closed, return directly + return + } + m.createChain(chainParams) + } +} + +// Shutdown stops all the chains +func (m *manager) closeChainCreator() { + m.Log.Info("stopping chain creator") + m.chainsQueue.Close() + close(m.chainCreatorShutdownCh) +} + // Shutdown stops all the chains func (m *manager) Shutdown() { m.Log.Info("shutting down chain manager") - m.ManagerConfig.Router.Shutdown() + m.closeChainCreator() + m.ManagerConfig.Router.Shutdown(context.TODO()) } // LookupVM returns the ID of the VM associated with an alias -func (m *manager) LookupVM(alias string) (ids.ID, error) { return m.VMManager.Lookup(alias) } +func (m *manager) LookupVM(alias string) (ids.ID, error) { + return m.VMManager.Lookup(alias) +} // Notify registrants [those who want to know about the creation of chains] // that the specified chain has been created -func (m *manager) notifyRegistrants(name string, engine common.Engine) { +func (m *manager) notifyRegistrants(name string, ctx *snow.ConsensusContext, vm common.VM) { for _, registrant := range m.registrants { - registrant.RegisterChain(name, engine) - } -} - -// Returns: -// 1) the alias that already exists, or the empty string if there is none -// 2) true iff there exists a chain such that the chain has an alias in [aliases] -func (m *manager) isChainWithAlias(aliases ...string) (string, bool) { - for _, alias := range aliases { - if _, err := m.Lookup(alias); err == nil { - return alias, true - } + registrant.RegisterChain(name, ctx, vm) } - return "", false } // getChainConfig returns value of a entry by looking at ID key and alias key diff --git a/avalanchego/chains/mock_manager.go b/avalanchego/chains/mock_manager.go deleted file mode 100644 index ea4295db..00000000 --- a/avalanchego/chains/mock_manager.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package chains - -import ( - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/networking/router" -) - -var _ Manager = MockManager{} - -// MockManager implements Manager but does nothing. Always returns nil error. -// To be used only in tests -type MockManager struct{} - -func (mm MockManager) Router() router.Router { return nil } -func (mm MockManager) CreateChain(ChainParameters) {} -func (mm MockManager) ForceCreateChain(ChainParameters) {} -func (mm MockManager) AddRegistrant(Registrant) {} -func (mm MockManager) Aliases(ids.ID) ([]string, error) { return nil, nil } -func (mm MockManager) PrimaryAlias(ids.ID) (string, error) { return "", nil } -func (mm MockManager) PrimaryAliasOrDefault(ids.ID) string { return "" } -func (mm MockManager) Alias(ids.ID, string) error { return nil } -func (mm MockManager) RemoveAliases(ids.ID) {} -func (mm MockManager) Shutdown() {} -func (mm MockManager) SubnetID(ids.ID) (ids.ID, error) { return ids.ID{}, nil } -func (mm MockManager) IsBootstrapped(ids.ID) bool { return false } - -func (mm MockManager) Lookup(s string) (ids.ID, error) { - id, err := ids.FromString(s) - if err == nil { - return id, nil - } - return ids.ID{}, nil -} - -func (mm MockManager) LookupVM(s string) (ids.ID, error) { - id, err := ids.FromString(s) - if err == nil { - return id, nil - } - return ids.ID{}, nil -} diff --git a/avalanchego/chains/registrant.go b/avalanchego/chains/registrant.go index 1c9fc0a5..3a213704 100644 --- a/avalanchego/chains/registrant.go +++ b/avalanchego/chains/registrant.go @@ -1,16 +1,17 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package chains import ( + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" ) // Registrant can register the existence of a chain type Registrant interface { - // Called when the chain described by [engine] is created + // Called when a chain is created // This function is called before the chain starts processing messages - // [engine] should be an avalanche.Engine or snowman.Engine - RegisterChain(name string, engine common.Engine) + // [vm] should be a vertex.DAGVM or block.ChainVM + RegisterChain(chainName string, ctx *snow.ConsensusContext, vm common.VM) } diff --git a/avalanchego/chains/subnet.go b/avalanchego/chains/subnet.go deleted file mode 100644 index 2299448f..00000000 --- a/avalanchego/chains/subnet.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package chains - -import ( - "sync" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/consensus/avalanche" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/networking/sender" -) - -var _ Subnet = &subnet{} - -// Subnet keeps track of the currently bootstrapping chains in a subnet. If no -// chains in the subnet are currently bootstrapping, the subnet is considered -// bootstrapped. -type Subnet interface { - common.Subnet - - afterBootstrapped() chan struct{} - - addChain(chainID ids.ID) - removeChain(chainID ids.ID) -} - -type SubnetConfig struct { - sender.GossipConfig - - // ValidatorOnly indicates that this Subnet's Chains are available to only subnet validators. - ValidatorOnly bool `json:"validatorOnly" yaml:"validatorOnly"` - ConsensusParameters avalanche.Parameters `json:"consensusParameters" yaml:"consensusParameters"` -} - -type subnet struct { - lock sync.RWMutex - bootstrapping ids.Set - once sync.Once - bootstrappedSema chan struct{} -} - -func newSubnet() Subnet { - return &subnet{ - bootstrappedSema: make(chan struct{}), - } -} - -func (s *subnet) IsBootstrapped() bool { - s.lock.RLock() - defer s.lock.RUnlock() - - return s.bootstrapping.Len() == 0 -} - -func (s *subnet) Bootstrapped(chainID ids.ID) { - s.lock.Lock() - defer s.lock.Unlock() - - s.bootstrapping.Remove(chainID) - if s.bootstrapping.Len() > 0 { - return - } - - s.once.Do(func() { - close(s.bootstrappedSema) - }) -} - -func (s *subnet) afterBootstrapped() chan struct{} { - return s.bootstrappedSema -} - -func (s *subnet) addChain(chainID ids.ID) { - s.lock.Lock() - defer s.lock.Unlock() - - s.bootstrapping.Add(chainID) -} - -func (s *subnet) removeChain(chainID ids.ID) { - s.lock.Lock() - defer s.lock.Unlock() - - s.bootstrapping.Remove(chainID) -} diff --git a/avalanchego/chains/subnet_test.go b/avalanchego/chains/subnet_test.go deleted file mode 100644 index 1e1574a1..00000000 --- a/avalanchego/chains/subnet_test.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package chains - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" -) - -func TestSubnet(t *testing.T) { - require := require.New(t) - - chainID0 := ids.GenerateTestID() - chainID1 := ids.GenerateTestID() - chainID2 := ids.GenerateTestID() - - s := newSubnet() - s.addChain(chainID0) - require.False(s.IsBootstrapped(), "A subnet with one chain in bootstrapping shouldn't be considered bootstrapped") - - s.Bootstrapped(chainID0) - require.True(s.IsBootstrapped(), "A subnet with only bootstrapped chains should be considered bootstrapped") - - s.addChain(chainID1) - require.False(s.IsBootstrapped(), "A subnet with one chain in bootstrapping shouldn't be considered bootstrapped") - - s.addChain(chainID2) - require.False(s.IsBootstrapped(), "A subnet with one chain in bootstrapping shouldn't be considered bootstrapped") - - s.Bootstrapped(chainID1) - require.False(s.IsBootstrapped(), "A subnet with one chain in bootstrapping shouldn't be considered bootstrapped") - - s.Bootstrapped(chainID2) - require.True(s.IsBootstrapped(), "A subnet with only bootstrapped chains should be considered bootstrapped") -} diff --git a/avalanchego/chains/test_manager.go b/avalanchego/chains/test_manager.go new file mode 100644 index 00000000..e4dabea4 --- /dev/null +++ b/avalanchego/chains/test_manager.go @@ -0,0 +1,65 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package chains + +import ( + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/networking/router" +) + +// TestManager implements Manager but does nothing. Always returns nil error. +// To be used only in tests +var TestManager Manager = testManager{} + +type testManager struct{} + +func (testManager) Router() router.Router { + return nil +} + +func (testManager) QueueChainCreation(ChainParameters) {} + +func (testManager) ForceCreateChain(ChainParameters) {} + +func (testManager) AddRegistrant(Registrant) {} + +func (testManager) Aliases(ids.ID) ([]string, error) { + return nil, nil +} + +func (testManager) PrimaryAlias(ids.ID) (string, error) { + return "", nil +} + +func (testManager) PrimaryAliasOrDefault(ids.ID) string { + return "" +} + +func (testManager) Alias(ids.ID, string) error { + return nil +} + +func (testManager) RemoveAliases(ids.ID) {} + +func (testManager) Shutdown() {} + +func (testManager) StartChainCreator(ChainParameters) error { + return nil +} + +func (testManager) SubnetID(ids.ID) (ids.ID, error) { + return ids.ID{}, nil +} + +func (testManager) IsBootstrapped(ids.ID) bool { + return false +} + +func (testManager) Lookup(s string) (ids.ID, error) { + return ids.FromString(s) +} + +func (testManager) LookupVM(s string) (ids.ID, error) { + return ids.FromString(s) +} diff --git a/avalanchego/codec/codec.go b/avalanchego/codec/codec.go index 2343e345..004b2553 100644 --- a/avalanchego/codec/codec.go +++ b/avalanchego/codec/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package codec @@ -9,4 +9,7 @@ import "github.com/ava-labs/avalanchego/utils/wrappers" type Codec interface { MarshalInto(interface{}, *wrappers.Packer) error Unmarshal([]byte, interface{}) error + + // Returns the size, in bytes, of [value] when it's marshaled + Size(value interface{}) (int, error) } diff --git a/avalanchego/codec/general_codec.go b/avalanchego/codec/general_codec.go index 8281b15e..ac32b84e 100644 --- a/avalanchego/codec/general_codec.go +++ b/avalanchego/codec/general_codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package codec diff --git a/avalanchego/codec/hierarchycodec/codec.go b/avalanchego/codec/hierarchycodec/codec.go index 481cd6bb..af66bf24 100644 --- a/avalanchego/codec/hierarchycodec/codec.go +++ b/avalanchego/codec/hierarchycodec/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package hierarchycodec @@ -19,10 +19,10 @@ const ( ) var ( - _ Codec = &hierarchyCodec{} - _ codec.Codec = &hierarchyCodec{} - _ codec.Registry = &hierarchyCodec{} - _ codec.GeneralCodec = &hierarchyCodec{} + _ Codec = (*hierarchyCodec)(nil) + _ codec.Codec = (*hierarchyCodec)(nil) + _ codec.Registry = (*hierarchyCodec)(nil) + _ codec.GeneralCodec = (*hierarchyCodec)(nil) ) // Codec marshals and unmarshals @@ -62,7 +62,9 @@ func New(tagNames []string, maxSliceLen uint32) Codec { } // NewDefault returns a new codec with reasonable default values -func NewDefault() Codec { return New([]string{reflectcodec.DefaultTagName}, defaultMaxSliceLength) } +func NewDefault() Codec { + return New([]string{reflectcodec.DefaultTagName}, defaultMaxSliceLength) +} // SkipRegistrations some number of type IDs func (c *hierarchyCodec) SkipRegistrations(num int) { @@ -101,6 +103,11 @@ func (c *hierarchyCodec) RegisterType(val interface{}) error { return nil } +func (*hierarchyCodec) PrefixSize(reflect.Type) int { + // see PackPrefix implementation + return wrappers.ShortLen + wrappers.ShortLen +} + func (c *hierarchyCodec) PackPrefix(p *wrappers.Packer, valueType reflect.Type) error { c.lock.RLock() defer c.lock.RUnlock() diff --git a/avalanchego/codec/hierarchycodec/codec_test.go b/avalanchego/codec/hierarchycodec/codec_test.go index 7fcbee6b..64a3cf8b 100644 --- a/avalanchego/codec/hierarchycodec/codec_test.go +++ b/avalanchego/codec/hierarchycodec/codec_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package hierarchycodec diff --git a/avalanchego/codec/linearcodec/codec.go b/avalanchego/codec/linearcodec/codec.go index 1a94c78b..30b317c8 100644 --- a/avalanchego/codec/linearcodec/codec.go +++ b/avalanchego/codec/linearcodec/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package linearcodec @@ -19,10 +19,10 @@ const ( ) var ( - _ Codec = &linearCodec{} - _ codec.Codec = &linearCodec{} - _ codec.Registry = &linearCodec{} - _ codec.GeneralCodec = &linearCodec{} + _ Codec = (*linearCodec)(nil) + _ codec.Codec = (*linearCodec)(nil) + _ codec.Registry = (*linearCodec)(nil) + _ codec.GeneralCodec = (*linearCodec)(nil) ) // Codec marshals and unmarshals @@ -55,7 +55,9 @@ func New(tagNames []string, maxSliceLen uint32) Codec { } // NewDefault is a convenience constructor; it returns a new codec with reasonable default values -func NewDefault() Codec { return New([]string{reflectcodec.DefaultTagName}, defaultMaxSliceLength) } +func NewDefault() Codec { + return New([]string{reflectcodec.DefaultTagName}, defaultMaxSliceLength) +} // NewCustomMaxLength is a convenience constructor; it returns a new codec with custom max length and default tags func NewCustomMaxLength(maxSliceLen uint32) Codec { @@ -86,6 +88,11 @@ func (c *linearCodec) RegisterType(val interface{}) error { return nil } +func (*linearCodec) PrefixSize(reflect.Type) int { + // see PackPrefix implementation + return wrappers.IntLen +} + func (c *linearCodec) PackPrefix(p *wrappers.Packer, valueType reflect.Type) error { c.lock.RLock() defer c.lock.RUnlock() diff --git a/avalanchego/codec/linearcodec/codec_test.go b/avalanchego/codec/linearcodec/codec_test.go index feed6b44..1e6b836a 100644 --- a/avalanchego/codec/linearcodec/codec_test.go +++ b/avalanchego/codec/linearcodec/codec_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package linearcodec diff --git a/avalanchego/codec/manager.go b/avalanchego/codec/manager.go index e9a91323..8cfefc6b 100644 --- a/avalanchego/codec/manager.go +++ b/avalanchego/codec/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package codec @@ -31,7 +31,7 @@ var ( errDuplicatedVersion = errors.New("duplicated codec version") ) -var _ Manager = &manager{} +var _ Manager = (*manager)(nil) // Manager describes the functionality for managing codec versions. type Manager interface { @@ -42,6 +42,12 @@ type Manager interface { // by this codec manager SetMaxSize(int) + // Size returns the size, in bytes, of [value] when it's marshaled + // using the codec with the given version. + // RegisterCodec must have been called with that version. + // If [value] is nil, returns [errMarshalNil] + Size(version uint16, value interface{}) (int, error) + // Marshal the given value using the codec with the given version. // RegisterCodec must have been called with that version. Marshal(version uint16, source interface{}) (destination []byte, err error) @@ -61,7 +67,9 @@ func NewManager(maxSize int) Manager { } // NewDefaultManager returns a new codec manager. -func NewDefaultManager() Manager { return NewManager(defaultMaxSize) } +func NewDefaultManager() Manager { + return NewManager(defaultMaxSize) +} type manager struct { lock sync.RWMutex @@ -89,6 +97,25 @@ func (m *manager) SetMaxSize(size int) { m.lock.Unlock() } +func (m *manager) Size(version uint16, value interface{}) (int, error) { + if value == nil { + return 0, errMarshalNil // can't marshal nil + } + + m.lock.RLock() + c, exists := m.codecs[version] + m.lock.RUnlock() + + if !exists { + return 0, errUnknownVersion + } + + res, err := c.Size(value) + + // Add [wrappers.ShortLen] for the codec version + return wrappers.ShortLen + res, err +} + // To marshal an interface, [value] must be a pointer to the interface. func (m *manager) Marshal(version uint16, value interface{}) ([]byte, error) { if value == nil { diff --git a/avalanchego/codec/mock_manager.go b/avalanchego/codec/mock_manager.go new file mode 100644 index 00000000..0dc2c439 --- /dev/null +++ b/avalanchego/codec/mock_manager.go @@ -0,0 +1,108 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/codec (interfaces: Manager) + +// Package codec is a generated GoMock package. +package codec + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockManager is a mock of Manager interface. +type MockManager struct { + ctrl *gomock.Controller + recorder *MockManagerMockRecorder +} + +// MockManagerMockRecorder is the mock recorder for MockManager. +type MockManagerMockRecorder struct { + mock *MockManager +} + +// NewMockManager creates a new mock instance. +func NewMockManager(ctrl *gomock.Controller) *MockManager { + mock := &MockManager{ctrl: ctrl} + mock.recorder = &MockManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockManager) EXPECT() *MockManagerMockRecorder { + return m.recorder +} + +// Marshal mocks base method. +func (m *MockManager) Marshal(arg0 uint16, arg1 interface{}) ([]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Marshal", arg0, arg1) + ret0, _ := ret[0].([]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Marshal indicates an expected call of Marshal. +func (mr *MockManagerMockRecorder) Marshal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Marshal", reflect.TypeOf((*MockManager)(nil).Marshal), arg0, arg1) +} + +// RegisterCodec mocks base method. +func (m *MockManager) RegisterCodec(arg0 uint16, arg1 Codec) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RegisterCodec", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// RegisterCodec indicates an expected call of RegisterCodec. +func (mr *MockManagerMockRecorder) RegisterCodec(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterCodec", reflect.TypeOf((*MockManager)(nil).RegisterCodec), arg0, arg1) +} + +// SetMaxSize mocks base method. +func (m *MockManager) SetMaxSize(arg0 int) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetMaxSize", arg0) +} + +// SetMaxSize indicates an expected call of SetMaxSize. +func (mr *MockManagerMockRecorder) SetMaxSize(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetMaxSize", reflect.TypeOf((*MockManager)(nil).SetMaxSize), arg0) +} + +// Size mocks base method. +func (m *MockManager) Size(arg0 uint16, arg1 interface{}) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Size", arg0, arg1) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Size indicates an expected call of Size. +func (mr *MockManagerMockRecorder) Size(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Size", reflect.TypeOf((*MockManager)(nil).Size), arg0, arg1) +} + +// Unmarshal mocks base method. +func (m *MockManager) Unmarshal(arg0 []byte, arg1 interface{}) (uint16, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Unmarshal", arg0, arg1) + ret0, _ := ret[0].(uint16) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Unmarshal indicates an expected call of Unmarshal. +func (mr *MockManagerMockRecorder) Unmarshal(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unmarshal", reflect.TypeOf((*MockManager)(nil).Unmarshal), arg0, arg1) +} diff --git a/avalanchego/codec/reflectcodec/struct_fielder.go b/avalanchego/codec/reflectcodec/struct_fielder.go index 49e5d860..7a54182c 100644 --- a/avalanchego/codec/reflectcodec/struct_fielder.go +++ b/avalanchego/codec/reflectcodec/struct_fielder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package reflectcodec @@ -18,7 +18,7 @@ const ( TagValue = "true" ) -var _ StructFielder = &structFielder{} +var _ StructFielder = (*structFielder)(nil) type FieldDesc struct { Index int diff --git a/avalanchego/codec/reflectcodec/type_codec.go b/avalanchego/codec/reflectcodec/type_codec.go index 710060aa..a92ade18 100644 --- a/avalanchego/codec/reflectcodec/type_codec.go +++ b/avalanchego/codec/reflectcodec/type_codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package reflectcodec @@ -27,7 +27,7 @@ var ( errExtraSpace = errors.New("trailing buffer space") ) -var _ codec.Codec = &genericCodec{} +var _ codec.Codec = (*genericCodec)(nil) type TypeCodec interface { // UnpackPrefix unpacks the prefix of an interface from the given packer. @@ -42,22 +42,31 @@ type TypeCodec interface { // When deserializing the bytes, the prefix specifies which concrete type // to deserialize into. PackPrefix(*wrappers.Packer, reflect.Type) error + + // PrefixSize returns prefix length for the given type into the given + // packer. + PrefixSize(reflect.Type) int } // genericCodec handles marshaling and unmarshaling of structs with a generic // implementation for interface encoding. // // A few notes: -// 1) We use "marshal" and "serialize" interchangeably, and "unmarshal" and "deserialize" interchangeably -// 2) To include a field of a struct in the serialized form, add the tag `{tagName}:"true"` to it. `{tagName}` defaults to `serialize`. -// 3) These typed members of a struct may be serialized: -// bool, string, uint[8,16,32,64], int[8,16,32,64], -// structs, slices, arrays, interface. -// structs, slices and arrays can only be serialized if their constituent values can be. -// 4) To marshal an interface, you must pass a pointer to the value -// 5) To unmarshal an interface, you must call codec.RegisterType([instance of the type that fulfills the interface]). -// 6) Serialized fields must be exported -// 7) nil slices are marshaled as empty slices +// +// 1. We use "marshal" and "serialize" interchangeably, and "unmarshal" and +// "deserialize" interchangeably +// 2. To include a field of a struct in the serialized form, add the tag +// `{tagName}:"true"` to it. `{tagName}` defaults to `serialize`. +// 3. These typed members of a struct may be serialized: +// bool, string, uint[8,16,32,64], int[8,16,32,64], +// structs, slices, arrays, interface. +// structs, slices and arrays can only be serialized if their constituent +// values can be. +// 4. To marshal an interface, you must pass a pointer to the value +// 5. To unmarshal an interface, you must call +// codec.RegisterType([instance of the type that fulfills the interface]). +// 6. Serialized fields must be exported +// 7. nil slices are marshaled as empty slices type genericCodec struct { typer TypeCodec maxSliceLen uint32 @@ -73,6 +82,136 @@ func New(typer TypeCodec, tagNames []string, maxSliceLen uint32) codec.Codec { } } +func (c *genericCodec) Size(value interface{}) (int, error) { + if value == nil { + return 0, errMarshalNil // can't marshal nil + } + + size, _, err := c.size(reflect.ValueOf(value)) + return size, err +} + +// size returns the size of the value along with whether the value is constant sized. +func (c *genericCodec) size(value reflect.Value) (int, bool, error) { + switch valueKind := value.Kind(); valueKind { + case reflect.Uint8: + return wrappers.ByteLen, true, nil + case reflect.Int8: + return wrappers.ByteLen, true, nil + case reflect.Uint16: + return wrappers.ShortLen, true, nil + case reflect.Int16: + return wrappers.ShortLen, true, nil + case reflect.Uint32: + return wrappers.IntLen, true, nil + case reflect.Int32: + return wrappers.IntLen, true, nil + case reflect.Uint64: + return wrappers.LongLen, true, nil + case reflect.Int64: + return wrappers.LongLen, true, nil + case reflect.Bool: + return wrappers.BoolLen, true, nil + case reflect.String: + return wrappers.StringLen(value.String()), false, nil + case reflect.Ptr: + if value.IsNil() { + // Can't marshal nil pointers (but nil slices are fine) + return 0, false, errMarshalNil + } + return c.size(value.Elem()) + + case reflect.Interface: + if value.IsNil() { + // Can't marshal nil interfaces (but nil slices are fine) + return 0, false, errMarshalNil + } + underlyingValue := value.Interface() + underlyingType := reflect.TypeOf(underlyingValue) + prefixSize := c.typer.PrefixSize(underlyingType) + valueSize, _, err := c.size(value.Elem()) + if err != nil { + return 0, false, err + } + return prefixSize + valueSize, false, nil + + case reflect.Slice: + numElts := value.Len() + if numElts == 0 { + return wrappers.IntLen, false, nil + } + + size, constSize, err := c.size(value.Index(0)) + if err != nil { + return 0, false, err + } + + // For fixed-size types we manually calculate lengths rather than + // processing each element separately to improve performance. + if constSize { + return wrappers.IntLen + numElts*size, false, nil + } + + for i := 1; i < numElts; i++ { + innerSize, _, err := c.size(value.Index(i)) + if err != nil { + return 0, false, err + } + size += innerSize + } + return wrappers.IntLen + size, false, nil + + case reflect.Array: + numElts := value.Len() + if numElts == 0 { + return 0, true, nil + } + + size, constSize, err := c.size(value.Index(0)) + if err != nil { + return 0, false, err + } + + // For fixed-size types we manually calculate lengths rather than + // processing each element separately to improve performance. + if constSize { + return numElts * size, true, nil + } + + for i := 1; i < numElts; i++ { + innerSize, _, err := c.size(value.Index(i)) + if err != nil { + return 0, false, err + } + size += innerSize + } + return size, false, nil + + case reflect.Struct: + serializedFields, err := c.fielder.GetSerializedFields(value.Type()) + if err != nil { + return 0, false, err + } + + var ( + size int + constSize = true + ) + for _, fieldDesc := range serializedFields { + innerSize, innerConstSize, err := c.size(value.Field(fieldDesc.Index)) + if err != nil { + return 0, false, err + } + size += innerSize + constSize = constSize && innerConstSize + } + return size, constSize, nil + + default: + return 0, false, fmt.Errorf("can't evaluate marshal length of unknown kind %s", valueKind) + } +} + // To marshal an interface, [value] must be a pointer to the interface func (c *genericCodec) MarshalInto(value interface{}, p *wrappers.Packer) error { if value == nil { @@ -86,15 +225,7 @@ func (c *genericCodec) MarshalInto(value interface{}, p *wrappers.Packer) error // [value]'s underlying value must not be a nil pointer or interface // c.lock should be held for the duration of this function func (c *genericCodec) marshal(value reflect.Value, p *wrappers.Packer, maxSliceLen uint32) error { - valueKind := value.Kind() - switch valueKind { - case reflect.Interface, reflect.Ptr, reflect.Invalid: - if value.IsNil() { // Can't marshal nil (except nil slices) - return errMarshalNil - } - } - - switch valueKind { + switch valueKind := value.Kind(); valueKind { case reflect.Uint8: p.PackByte(uint8(value.Uint())) return p.Err @@ -125,9 +256,15 @@ func (c *genericCodec) marshal(value reflect.Value, p *wrappers.Packer, maxSlice case reflect.Bool: p.PackBool(value.Bool()) return p.Err - case reflect.Uintptr, reflect.Ptr: + case reflect.Ptr: + if value.IsNil() { // Can't marshal nil (except nil slices) + return errMarshalNil + } return c.marshal(value.Elem(), p, c.maxSliceLen) case reflect.Interface: + if value.IsNil() { // Can't marshal nil (except nil slices) + return errMarshalNil + } underlyingValue := value.Interface() underlyingType := reflect.TypeOf(underlyingValue) if err := c.typer.PackPrefix(p, underlyingType); err != nil { @@ -149,6 +286,12 @@ func (c *genericCodec) marshal(value reflect.Value, p *wrappers.Packer, maxSlice if p.Err != nil { return p.Err } + if numElts == 0 { + // Returning here prevents execution of the (expensive) reflect + // calls below which check if the slice is []byte and, if it is, + // the call of value.Bytes() + return nil + } // If this is a slice of bytes, manually pack the bytes rather // than calling marshal on each byte. This improves performance. if elemKind := value.Type().Elem().Kind(); elemKind == reflect.Uint8 { @@ -369,8 +512,6 @@ func (c *genericCodec) unmarshal(p *wrappers.Packer, value reflect.Value, maxSli // Assign to the top-level struct's member value.Set(v) return nil - case reflect.Invalid: - return errUnmarshalNil default: return fmt.Errorf("can't unmarshal unknown type %s", value.Kind().String()) } diff --git a/avalanchego/codec/registry.go b/avalanchego/codec/registry.go index b61acf2b..9031c94f 100644 --- a/avalanchego/codec/registry.go +++ b/avalanchego/codec/registry.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package codec diff --git a/avalanchego/codec/test_codec.go b/avalanchego/codec/test_codec.go index f0a40c63..8da7a1be 100644 --- a/avalanchego/codec/test_codec.go +++ b/avalanchego/codec/test_codec.go @@ -1,23 +1,20 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package codec import ( - "bytes" "math" - "reflect" "testing" "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/utils/wrappers" ) var Tests = []func(c GeneralCodec, t testing.TB){ TestStruct, TestRegisterStructTwice, TestUInt32, + TestUIntPtr, TestSlice, TestMaxSizeSlice, TestBool, @@ -36,6 +33,11 @@ var Tests = []func(c GeneralCodec, t testing.TB){ TestNilSliceSerialization, TestEmptySliceSerialization, TestSliceWithEmptySerialization, + TestSliceWithEmptySerializationOutOfMemory, + TestSliceTooLarge, + TestNegativeNumbers, + TestTooLargeUnmarshal, + TestUnmarshalInvalidInterface, TestRestrictedSlice, TestExtraSpace, TestSliceLengthOverflow, @@ -49,8 +51,8 @@ var MultipleTagsTests = []func(c GeneralCodec, t testing.TB){ // for the sake of testing var ( - _ Foo = &MyInnerStruct{} - _ Foo = &MyInnerStruct2{} + _ Foo = (*MyInnerStruct)(nil) + _ Foo = (*MyInnerStruct2)(nil) ) type Foo interface { @@ -61,7 +63,7 @@ type MyInnerStruct struct { Str string `serialize:"true"` } -func (m *MyInnerStruct) Foo() int { +func (*MyInnerStruct) Foo() int { return 1 } @@ -69,7 +71,7 @@ type MyInnerStruct2 struct { Bool bool `serialize:"true"` } -func (m *MyInnerStruct2) Foo() int { +func (*MyInnerStruct2) Foo() int { return 2 } @@ -102,6 +104,8 @@ type myStruct struct { // Test marshaling/unmarshaling a complicated struct func TestStruct(codec GeneralCodec, t testing.TB) { + require := require.New(t) + temp := Foo(&MyInnerStruct{}) myStructInstance := myStruct{ InnerStruct: MyInnerStruct{"hello"}, @@ -110,7 +114,7 @@ func TestStruct(codec GeneralCodec, t testing.TB) { Member2: 2, MySlice: []byte{1, 2, 3, 4}, MySlice2: []string{"one", "two", "three"}, - MySlice3: []MyInnerStruct{{"a"}, {"b"}, {"c"}}, + MySlice3: []MyInnerStruct{{"abc"}, {"ab"}, {"c"}}, MySlice4: []*MyInnerStruct2{{true}, {}}, MySlice5: []Foo{&MyInnerStruct2{true}, &MyInnerStruct2{}}, MyArray: [4]byte{5, 6, 7, 8}, @@ -129,249 +133,212 @@ func TestStruct(codec GeneralCodec, t testing.TB) { } manager := NewDefaultManager() - errs := wrappers.Errs{} - errs.Add( - codec.RegisterType(&MyInnerStruct{}), // Register the types that may be unmarshaled into interfaces - codec.RegisterType(&MyInnerStruct2{}), - manager.RegisterCodec(0, codec), - ) - if errs.Errored() { - t.Fatal(errs.Err) - } + // Register the types that may be unmarshaled into interfaces + require.NoError(codec.RegisterType(&MyInnerStruct{})) + require.NoError(codec.RegisterType(&MyInnerStruct2{})) + require.NoError(manager.RegisterCodec(0, codec)) myStructBytes, err := manager.Marshal(0, myStructInstance) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + + bytesLen, err := manager.Size(0, myStructInstance) + require.NoError(err) + require.Equal(len(myStructBytes), bytesLen) myStructUnmarshaled := &myStruct{} version, err := manager.Unmarshal(myStructBytes, myStructUnmarshaled) - if err != nil { - t.Fatal(err) - } - if version != 0 { - t.Fatalf("wrong version returned. Expected %d ; Returned %d", 0, version) - } + require.NoError(err) - if !reflect.DeepEqual(*myStructUnmarshaled, myStructInstance) { - t.Fatal("should be same") - } + require.Equal(uint16(0), version) + require.Equal(myStructInstance, *myStructUnmarshaled) } func TestRegisterStructTwice(codec GeneralCodec, t testing.TB) { - var _ GeneralCodec = codec - - errs := wrappers.Errs{} - errs.Add( - codec.RegisterType(&MyInnerStruct{}), - codec.RegisterType(&MyInnerStruct{}), // Register the same struct twice - ) - if !errs.Errored() { - t.Fatal("Registering the same struct twice should have caused an error") - } + require := require.New(t) + + require.NoError(codec.RegisterType(&MyInnerStruct{})) + require.Error(codec.RegisterType(&MyInnerStruct{})) } func TestUInt32(codec GeneralCodec, t testing.TB) { - var _ GeneralCodec = codec + require := require.New(t) number := uint32(500) manager := NewDefaultManager() - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err := manager.RegisterCodec(0, codec) + require.NoError(err) bytes, err := manager.Marshal(0, number) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + + bytesLen, err := manager.Size(0, number) + require.NoError(err) + require.Equal(len(bytes), bytesLen) var numberUnmarshaled uint32 version, err := manager.Unmarshal(bytes, &numberUnmarshaled) - if err != nil { - t.Fatal(err) - } - if version != 0 { - t.Fatalf("wrong version returned. Expected %d ; Returned %d", 0, version) - } + require.NoError(err) + require.Equal(uint16(0), version) + require.Equal(number, numberUnmarshaled) +} - if number != numberUnmarshaled { - t.Fatal("expected marshaled and unmarshaled values to match") - } +func TestUIntPtr(codec GeneralCodec, t testing.TB) { + require := require.New(t) + + manager := NewDefaultManager() + + err := manager.RegisterCodec(0, codec) + require.NoError(err) + + number := uintptr(500) + _, err = manager.Marshal(0, number) + require.Error(err) } func TestSlice(codec GeneralCodec, t testing.TB) { - var _ GeneralCodec = codec + require := require.New(t) mySlice := []bool{true, false, true, true} manager := NewDefaultManager() - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err := manager.RegisterCodec(0, codec) + require.NoError(err) bytes, err := manager.Marshal(0, mySlice) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + + bytesLen, err := manager.Size(0, mySlice) + require.NoError(err) + require.Equal(len(bytes), bytesLen) var sliceUnmarshaled []bool version, err := manager.Unmarshal(bytes, &sliceUnmarshaled) - if err != nil { - t.Fatal(err) - } - if version != 0 { - t.Fatalf("wrong version returned. Expected %d ; Returned %d", 0, version) - } - - if !reflect.DeepEqual(mySlice, sliceUnmarshaled) { - t.Fatal("expected marshaled and unmarshaled values to match") - } + require.NoError(err) + require.Equal(uint16(0), version) + require.Equal(mySlice, sliceUnmarshaled) } // Test marshalling/unmarshalling largest possible slice func TestMaxSizeSlice(codec GeneralCodec, t testing.TB) { - var _ GeneralCodec = codec + require := require.New(t) mySlice := make([]string, math.MaxUint16) mySlice[0] = "first!" mySlice[math.MaxUint16-1] = "last!" manager := NewDefaultManager() - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err := manager.RegisterCodec(0, codec) + require.NoError(err) + bytes, err := manager.Marshal(0, mySlice) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + + bytesLen, err := manager.Size(0, mySlice) + require.NoError(err) + require.Equal(len(bytes), bytesLen) var sliceUnmarshaled []string version, err := manager.Unmarshal(bytes, &sliceUnmarshaled) - if err != nil { - t.Fatal(err) - } - if version != 0 { - t.Fatalf("wrong version returned. Expected %d ; Returned %d", 0, version) - } - - if !reflect.DeepEqual(mySlice, sliceUnmarshaled) { - t.Fatal("expected marshaled and unmarshaled values to match") - } + require.NoError(err) + require.Equal(uint16(0), version) + require.Equal(mySlice, sliceUnmarshaled) } // Test marshalling a bool func TestBool(codec GeneralCodec, t testing.TB) { - var _ GeneralCodec = codec + require := require.New(t) myBool := true manager := NewDefaultManager() - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err := manager.RegisterCodec(0, codec) + require.NoError(err) + bytes, err := manager.Marshal(0, myBool) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + + bytesLen, err := manager.Size(0, myBool) + require.NoError(err) + require.Equal(len(bytes), bytesLen) var boolUnmarshaled bool version, err := manager.Unmarshal(bytes, &boolUnmarshaled) - if err != nil { - t.Fatal(err) - } - if version != 0 { - t.Fatalf("wrong version returned. Expected %d ; Returned %d", 0, version) - } - - if !reflect.DeepEqual(myBool, boolUnmarshaled) { - t.Fatal("expected marshaled and unmarshaled values to match") - } + require.NoError(err) + require.Equal(uint16(0), version) + require.Equal(myBool, boolUnmarshaled) } // Test marshalling an array func TestArray(codec GeneralCodec, t testing.TB) { - var _ GeneralCodec = codec + require := require.New(t) myArr := [5]uint64{5, 6, 7, 8, 9} manager := NewDefaultManager() - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err := manager.RegisterCodec(0, codec) + require.NoError(err) + bytes, err := manager.Marshal(0, myArr) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + + bytesLen, err := manager.Size(0, myArr) + require.NoError(err) + require.Equal(len(bytes), bytesLen) var myArrUnmarshaled [5]uint64 version, err := manager.Unmarshal(bytes, &myArrUnmarshaled) - if err != nil { - t.Fatal(err) - } - if version != 0 { - t.Fatalf("wrong version returned. Expected %d ; Returned %d", 0, version) - } - - if !reflect.DeepEqual(myArr, myArrUnmarshaled) { - t.Fatal("expected marshaled and unmarshaled values to match") - } + require.NoError(err) + require.Equal(uint16(0), version) + require.Equal(myArr, myArrUnmarshaled) } // Test marshalling a really big array func TestBigArray(codec GeneralCodec, t testing.TB) { - var _ GeneralCodec = codec + require := require.New(t) myArr := [30000]uint64{5, 6, 7, 8, 9} manager := NewDefaultManager() - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err := manager.RegisterCodec(0, codec) + require.NoError(err) + bytes, err := manager.Marshal(0, myArr) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + + bytesLen, err := manager.Size(0, myArr) + require.NoError(err) + require.Equal(len(bytes), bytesLen) var myArrUnmarshaled [30000]uint64 version, err := manager.Unmarshal(bytes, &myArrUnmarshaled) - if err != nil { - t.Fatal(err) - } - if version != 0 { - t.Fatalf("wrong version returned. Expected %d ; Returned %d", 0, version) - } - - if !reflect.DeepEqual(myArr, myArrUnmarshaled) { - t.Fatal("expected marshaled and unmarshaled values to match") - } + require.NoError(err) + require.Equal(uint16(0), version) + require.Equal(myArr, myArrUnmarshaled) } // Test marshalling a pointer to a struct func TestPointerToStruct(codec GeneralCodec, t testing.TB) { - var _ GeneralCodec = codec + require := require.New(t) myPtr := &MyInnerStruct{Str: "Hello!"} manager := NewDefaultManager() - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err := manager.RegisterCodec(0, codec) + require.NoError(err) + bytes, err := manager.Marshal(0, myPtr) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + + bytesLen, err := manager.Size(0, myPtr) + require.NoError(err) + require.Equal(len(bytes), bytesLen) var myPtrUnmarshaled *MyInnerStruct version, err := manager.Unmarshal(bytes, &myPtrUnmarshaled) - if err != nil { - t.Fatal(err) - } - if version != 0 { - t.Fatalf("wrong version returned. Expected %d ; Returned %d", 0, version) - } - - if !reflect.DeepEqual(myPtr, myPtrUnmarshaled) { - t.Fatal("expected marshaled and unmarshaled values to match") - } + require.NoError(err) + require.Equal(uint16(0), version) + require.Equal(myPtr, myPtrUnmarshaled) } // Test marshalling a slice of structs func TestSliceOfStruct(codec GeneralCodec, t testing.TB) { + require := require.New(t) + mySlice := []MyInnerStruct3{ { Str: "One", @@ -384,64 +351,57 @@ func TestSliceOfStruct(codec GeneralCodec, t testing.TB) { F: &MyInnerStruct{"Six"}, }, } - if err := codec.RegisterType(&MyInnerStruct{}); err != nil { - t.Fatal(err) - } + err := codec.RegisterType(&MyInnerStruct{}) + require.NoError(err) + manager := NewDefaultManager() - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err = manager.RegisterCodec(0, codec) + require.NoError(err) + bytes, err := manager.Marshal(0, mySlice) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + + bytesLen, err := manager.Size(0, mySlice) + require.NoError(err) + require.Equal(len(bytes), bytesLen) var mySliceUnmarshaled []MyInnerStruct3 version, err := manager.Unmarshal(bytes, &mySliceUnmarshaled) - if err != nil { - t.Fatal(err) - } - if version != 0 { - t.Fatalf("wrong version returned. Expected %d ; Returned %d", 0, version) - } - - if !reflect.DeepEqual(mySlice, mySliceUnmarshaled) { - t.Fatal("expected marshaled and unmarshaled values to match") - } + require.NoError(err) + require.Equal(uint16(0), version) + require.Equal(mySlice, mySliceUnmarshaled) } // Test marshalling an interface func TestInterface(codec GeneralCodec, t testing.TB) { - if err := codec.RegisterType(&MyInnerStruct2{}); err != nil { - t.Fatal(err) - } + require := require.New(t) + + err := codec.RegisterType(&MyInnerStruct2{}) + require.NoError(err) + manager := NewDefaultManager() - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err = manager.RegisterCodec(0, codec) + require.NoError(err) var f Foo = &MyInnerStruct2{true} bytes, err := manager.Marshal(0, &f) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + + bytesLen, err := manager.Size(0, &f) + require.NoError(err) + require.Equal(len(bytes), bytesLen) var unmarshaledFoo Foo version, err := manager.Unmarshal(bytes, &unmarshaledFoo) - if err != nil { - t.Fatal(err) - } - if version != 0 { - t.Fatalf("wrong version returned. Expected %d ; Returned %d", 0, version) - } - - if !reflect.DeepEqual(f, unmarshaledFoo) { - t.Fatal("expected unmarshaled value to match original") - } + require.NoError(err) + require.Equal(uint16(0), version) + require.Equal(f, unmarshaledFoo) } // Test marshalling a slice of interfaces func TestSliceOfInterface(codec GeneralCodec, t testing.TB) { + require := require.New(t) + mySlice := []Foo{ &MyInnerStruct{ Str: "Hello", @@ -450,34 +410,31 @@ func TestSliceOfInterface(codec GeneralCodec, t testing.TB) { Str: ", World!", }, } - if err := codec.RegisterType(&MyInnerStruct{}); err != nil { - t.Fatal(err) - } + err := codec.RegisterType(&MyInnerStruct{}) + require.NoError(err) + manager := NewDefaultManager() - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err = manager.RegisterCodec(0, codec) + require.NoError(err) + bytes, err := manager.Marshal(0, mySlice) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + + bytesLen, err := manager.Size(0, mySlice) + require.NoError(err) + require.Equal(len(bytes), bytesLen) var mySliceUnmarshaled []Foo version, err := manager.Unmarshal(bytes, &mySliceUnmarshaled) - if err != nil { - t.Fatal(err) - } - if version != 0 { - t.Fatalf("wrong version returned. Expected %d ; Returned %d", 0, version) - } - - if !reflect.DeepEqual(mySlice, mySliceUnmarshaled) { - t.Fatal("expected marshaled and unmarshaled values to match") - } + require.NoError(err) + require.Equal(uint16(0), version) + require.Equal(mySlice, mySliceUnmarshaled) } // Test marshalling an array of interfaces func TestArrayOfInterface(codec GeneralCodec, t testing.TB) { + require := require.New(t) + myArray := [2]Foo{ &MyInnerStruct{ Str: "Hello", @@ -486,96 +443,81 @@ func TestArrayOfInterface(codec GeneralCodec, t testing.TB) { Str: ", World!", }, } - if err := codec.RegisterType(&MyInnerStruct{}); err != nil { - t.Fatal(err) - } + err := codec.RegisterType(&MyInnerStruct{}) + require.NoError(err) + manager := NewDefaultManager() - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err = manager.RegisterCodec(0, codec) + require.NoError(err) + bytes, err := manager.Marshal(0, myArray) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + + bytesLen, err := manager.Size(0, myArray) + require.NoError(err) + require.Equal(len(bytes), bytesLen) var myArrayUnmarshaled [2]Foo version, err := manager.Unmarshal(bytes, &myArrayUnmarshaled) - if err != nil { - t.Fatal(err) - } - if version != 0 { - t.Fatalf("wrong version returned. Expected %d ; Returned %d", 0, version) - } - - if !reflect.DeepEqual(myArray, myArrayUnmarshaled) { - t.Fatal("expected marshaled and unmarshaled values to match") - } + require.NoError(err) + require.Equal(uint16(0), version) + require.Equal(myArray, myArrayUnmarshaled) } // Test marshalling a pointer to an interface func TestPointerToInterface(codec GeneralCodec, t testing.TB) { + require := require.New(t) + var myinnerStruct Foo = &MyInnerStruct{Str: "Hello!"} myPtr := &myinnerStruct - if err := codec.RegisterType(&MyInnerStruct{}); err != nil { - t.Fatal(err) - } + err := codec.RegisterType(&MyInnerStruct{}) + require.NoError(err) + manager := NewDefaultManager() - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err = manager.RegisterCodec(0, codec) + require.NoError(err) bytes, err := manager.Marshal(0, &myPtr) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + + bytesLen, err := manager.Size(0, &myPtr) + require.NoError(err) + require.Equal(len(bytes), bytesLen) var myPtrUnmarshaled *Foo version, err := manager.Unmarshal(bytes, &myPtrUnmarshaled) - if err != nil { - t.Fatal(err) - } - if version != 0 { - t.Fatalf("wrong version returned. Expected %d ; Returned %d", 0, version) - } - - if !reflect.DeepEqual(myPtr, myPtrUnmarshaled) { - t.Fatal("expected marshaled and unmarshaled values to match") - } + require.NoError(err) + require.Equal(uint16(0), version) + require.Equal(myPtr, myPtrUnmarshaled) } // Test marshalling a string func TestString(codec GeneralCodec, t testing.TB) { - var _ GeneralCodec = codec + require := require.New(t) myString := "Ayy" manager := NewDefaultManager() - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err := manager.RegisterCodec(0, codec) + require.NoError(err) bytes, err := manager.Marshal(0, myString) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + + bytesLen, err := manager.Size(0, myString) + require.NoError(err) + require.Equal(len(bytes), bytesLen) var stringUnmarshaled string version, err := manager.Unmarshal(bytes, &stringUnmarshaled) - if err != nil { - t.Fatal(err) - } - if version != 0 { - t.Fatalf("wrong version returned. Expected %d ; Returned %d", 0, version) - } - - if !reflect.DeepEqual(myString, stringUnmarshaled) { - t.Fatal("expected marshaled and unmarshaled values to match") - } + require.NoError(err) + require.Equal(uint16(0), version) + require.Equal(myString, stringUnmarshaled) } // Ensure a nil slice is unmarshaled to slice with length 0 func TestNilSlice(codec GeneralCodec, t testing.TB) { - var _ GeneralCodec = codec + require := require.New(t) type structWithSlice struct { Slice []byte `serialize:"true"` @@ -583,37 +525,31 @@ func TestNilSlice(codec GeneralCodec, t testing.TB) { myStruct := structWithSlice{Slice: nil} manager := NewDefaultManager() - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err := manager.RegisterCodec(0, codec) + require.NoError(err) bytes, err := manager.Marshal(0, myStruct) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + + bytesLen, err := manager.Size(0, myStruct) + require.NoError(err) + require.Equal(len(bytes), bytesLen) var structUnmarshaled structWithSlice version, err := manager.Unmarshal(bytes, &structUnmarshaled) - if err != nil { - t.Fatal(err) - } - if version != 0 { - t.Fatalf("wrong version returned. Expected %d ; Returned %d", 0, version) - } - - if len(structUnmarshaled.Slice) != 0 { - t.Fatal("expected slice to have length 0") - } + require.NoError(err) + require.Equal(uint16(0), version) + require.Equal(0, len(structUnmarshaled.Slice)) } // Ensure that trying to serialize a struct with an unexported member // that has `serialize:"true"` returns error func TestSerializeUnexportedField(codec GeneralCodec, t testing.TB) { - var _ GeneralCodec = codec + require := require.New(t) type s struct { ExportedField string `serialize:"true"` - unexportedField string `serialize:"true"` + unexportedField string `serialize:"true"` //nolint:revive } myS := s{ @@ -622,17 +558,18 @@ func TestSerializeUnexportedField(codec GeneralCodec, t testing.TB) { } manager := NewDefaultManager() - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err := manager.RegisterCodec(0, codec) + require.NoError(err) - if _, err := manager.Marshal(0, myS); err == nil { - t.Fatalf("expected err but got none") - } + _, err = manager.Marshal(0, myS) + require.Error(err) + + _, err = manager.Size(0, myS) + require.Error(err) } func TestSerializeOfNoSerializeField(codec GeneralCodec, t testing.TB) { - var _ GeneralCodec = codec + require := require.New(t) type s struct { SerializedField string `serialize:"true"` @@ -645,108 +582,86 @@ func TestSerializeOfNoSerializeField(codec GeneralCodec, t testing.TB) { UnmarkedField: "No declared serialize", } manager := NewDefaultManager() - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err := manager.RegisterCodec(0, codec) + require.NoError(err) marshalled, err := manager.Marshal(0, myS) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + + bytesLen, err := manager.Size(0, myS) + require.NoError(err) + require.Equal(len(marshalled), bytesLen) unmarshalled := s{} version, err := manager.Unmarshal(marshalled, &unmarshalled) - if err != nil { - t.Fatal(err) - } - if version != 0 { - t.Fatalf("wrong version returned. Expected %d ; Returned %d", 0, version) - } + require.NoError(err) + require.Equal(uint16(0), version) expectedUnmarshalled := s{SerializedField: "Serialize me"} - if !reflect.DeepEqual(unmarshalled, expectedUnmarshalled) { - t.Fatalf("Got %#v, expected %#v", unmarshalled, expectedUnmarshalled) - } + require.Equal(expectedUnmarshalled, unmarshalled) } // Test marshalling of nil slice func TestNilSliceSerialization(codec GeneralCodec, t testing.TB) { - var _ GeneralCodec = codec + require := require.New(t) type simpleSliceStruct struct { Arr []uint32 `serialize:"true"` } manager := NewDefaultManager() - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err := manager.RegisterCodec(0, codec) + require.NoError(err) val := &simpleSliceStruct{} expected := []byte{0, 0, 0, 0, 0, 0} // 0 for codec version, then nil slice marshaled as 0 length slice result, err := manager.Marshal(0, val) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(expected, result) { - t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) - } + require.NoError(err) + require.Equal(expected, result) + + bytesLen, err := manager.Size(0, val) + require.NoError(err) + require.Equal(len(result), bytesLen) valUnmarshaled := &simpleSliceStruct{} version, err := manager.Unmarshal(result, &valUnmarshaled) - if err != nil { - t.Fatal(err) - } - if version != 0 { - t.Fatalf("wrong version returned. Expected %d ; Returned %d", 0, version) - } - - if len(valUnmarshaled.Arr) != 0 { - t.Fatal("should be 0 length") - } + require.NoError(err) + require.Equal(uint16(0), version) + require.Equal(0, len(valUnmarshaled.Arr)) } // Test marshaling a slice that has 0 elements (but isn't nil) func TestEmptySliceSerialization(codec GeneralCodec, t testing.TB) { - var _ GeneralCodec = codec + require := require.New(t) type simpleSliceStruct struct { Arr []uint32 `serialize:"true"` } manager := NewDefaultManager() - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err := manager.RegisterCodec(0, codec) + require.NoError(err) val := &simpleSliceStruct{Arr: make([]uint32, 0, 1)} expected := []byte{0, 0, 0, 0, 0, 0} // 0 for codec version (uint16) and 0 for size (uint32) result, err := manager.Marshal(0, val) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + require.Equal(expected, result) - if !bytes.Equal(expected, result) { - t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) - } + bytesLen, err := manager.Size(0, val) + require.NoError(err) + require.Equal(len(result), bytesLen) valUnmarshaled := &simpleSliceStruct{} version, err := manager.Unmarshal(result, &valUnmarshaled) - if err != nil { - t.Fatal(err) - } - if version != 0 { - t.Fatalf("wrong version returned. Expected %d ; Returned %d", 0, version) - } - - if !reflect.DeepEqual(valUnmarshaled, val) { - t.Fatal("should be same") - } + require.NoError(err) + require.Equal(uint16(0), version) + require.Equal(val, valUnmarshaled) } // Test marshaling slice that is not nil and not empty func TestSliceWithEmptySerialization(codec GeneralCodec, t testing.TB) { - var _ GeneralCodec = codec + require := require.New(t) type emptyStruct struct{} @@ -755,38 +670,30 @@ func TestSliceWithEmptySerialization(codec GeneralCodec, t testing.TB) { } manager := NewDefaultManager() - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err := manager.RegisterCodec(0, codec) + require.NoError(err) val := &nestedSliceStruct{ Arr: make([]emptyStruct, 1000), } expected := []byte{0x00, 0x00, 0x00, 0x00, 0x03, 0xE8} // codec version (0x00, 0x00) then 1000 for numElts result, err := manager.Marshal(0, val) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(expected, result) { - t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) - } + require.NoError(err) + require.Equal(expected, result) + + bytesLen, err := manager.Size(0, val) + require.NoError(err) + require.Equal(len(result), bytesLen) unmarshaled := nestedSliceStruct{} version, err := manager.Unmarshal(expected, &unmarshaled) - if err != nil { - t.Fatal(err) - } - if version != 0 { - t.Fatalf("wrong version returned. Expected %d ; Returned %d", 0, version) - } - - if len(unmarshaled.Arr) != 1000 { - t.Fatalf("Should have created a slice of length %d", 1000) - } + require.NoError(err) + require.Equal(uint16(0), version) + require.Equal(1000, len(unmarshaled.Arr)) } func TestSliceWithEmptySerializationOutOfMemory(codec GeneralCodec, t testing.TB) { - var _ GeneralCodec = codec + require := require.New(t) type emptyStruct struct{} @@ -795,42 +702,36 @@ func TestSliceWithEmptySerializationOutOfMemory(codec GeneralCodec, t testing.TB } manager := NewDefaultManager() - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err := manager.RegisterCodec(0, codec) + require.NoError(err) val := &nestedSliceStruct{ Arr: make([]emptyStruct, math.MaxInt32), } - bytes, err := manager.Marshal(0, val) - if err == nil { - t.Fatal("should have failed due to slice length too large") - } + _, err = manager.Marshal(0, val) + require.Error(err) - unmarshaled := nestedSliceStruct{} - if _, err := manager.Unmarshal(bytes, &unmarshaled); err == nil { - t.Fatalf("Should have errored due to excess memory requested") - } + bytesLen, err := manager.Size(0, val) + require.NoError(err) + require.Equal(6, bytesLen) // 2 byte codec version + 4 byte length prefix } func TestSliceTooLarge(codec GeneralCodec, t testing.TB) { - var _ GeneralCodec = codec + require := require.New(t) manager := NewDefaultManager() - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err := manager.RegisterCodec(0, codec) + require.NoError(err) val := []struct{}{} b := []byte{0x00, 0x00, 0xff, 0xff, 0xff, 0xff} - if _, err := manager.Unmarshal(b, &val); err == nil { - t.Fatalf("Should have errored due to memory usage") - } + _, err = manager.Unmarshal(b, &val) + require.Error(err) } // Ensure serializing structs with negative number members works func TestNegativeNumbers(codec GeneralCodec, t testing.TB) { - var _ GeneralCodec = codec + require := require.New(t) type s struct { MyInt8 int8 `serialize:"true"` @@ -840,35 +741,27 @@ func TestNegativeNumbers(codec GeneralCodec, t testing.TB) { } manager := NewDefaultManager() - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err := manager.RegisterCodec(0, codec) + require.NoError(err) myS := s{-1, -2, -3, -4} bytes, err := manager.Marshal(0, myS) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + + bytesLen, err := manager.Size(0, myS) + require.NoError(err) + require.Equal(len(bytes), bytesLen) mySUnmarshaled := s{} version, err := manager.Unmarshal(bytes, &mySUnmarshaled) - if err != nil { - t.Fatal(err) - } - if version != 0 { - t.Fatalf("wrong version returned. Expected %d ; Returned %d", 0, version) - } - - if !reflect.DeepEqual(myS, mySUnmarshaled) { - t.Log(mySUnmarshaled) - t.Log(myS) - t.Fatal("expected marshaled and unmarshaled structs to be the same") - } + require.NoError(err) + require.Equal(uint16(0), version) + require.Equal(myS, mySUnmarshaled) } // Ensure deserializing structs with too many bytes errors correctly func TestTooLargeUnmarshal(codec GeneralCodec, t testing.TB) { - var _ GeneralCodec = codec + require := require.New(t) type inner struct { B uint16 `serialize:"true"` @@ -876,15 +769,12 @@ func TestTooLargeUnmarshal(codec GeneralCodec, t testing.TB) { bytes := []byte{0, 0, 0, 0} manager := NewManager(3) - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err := manager.RegisterCodec(0, codec) + require.NoError(err) s := inner{} - _, err := manager.Unmarshal(bytes, &s) - if err == nil { - t.Fatalf("Should have errored due to too many bytes provided") - } + _, err = manager.Unmarshal(bytes, &s) + require.Error(err) } type outerInterface interface { @@ -897,48 +787,39 @@ type outer struct { type innerInterface struct{} -func (it *innerInterface) ToInt() int { return 0 } +func (*innerInterface) ToInt() int { + return 0 +} type innerNoInterface struct{} // Ensure deserializing structs into the wrong interface errors gracefully func TestUnmarshalInvalidInterface(codec GeneralCodec, t testing.TB) { - var _ GeneralCodec = codec + require := require.New(t) manager := NewDefaultManager() - errs := wrappers.Errs{} - errs.Add( - codec.RegisterType(&innerInterface{}), - codec.RegisterType(&innerNoInterface{}), - manager.RegisterCodec(0, codec), - ) - if errs.Errored() { - t.Fatal(errs.Err) - } + require.NoError(codec.RegisterType(&innerInterface{})) + require.NoError(codec.RegisterType(&innerNoInterface{})) + require.NoError(manager.RegisterCodec(0, codec)) { bytes := []byte{0, 0, 0, 0, 0, 0} s := outer{} version, err := manager.Unmarshal(bytes, &s) - if err != nil { - t.Fatal(err) - } - if version != 0 { - t.Fatalf("wrong version returned. Expected %d ; Returned %d", 0, version) - } + require.NoError(err) + require.Equal(uint16(0), version) } { bytes := []byte{0, 0, 0, 0, 0, 1} s := outer{} - if _, err := manager.Unmarshal(bytes, &s); err == nil { - t.Fatalf("should have errored") - } + _, err := manager.Unmarshal(bytes, &s) + require.Error(err) } } // Ensure deserializing slices that have been length restricted errors correctly func TestRestrictedSlice(codec GeneralCodec, t testing.TB) { - var _ GeneralCodec = codec + require := require.New(t) type inner struct { Bytes []byte `serialize:"true" len:"2"` @@ -946,41 +827,36 @@ func TestRestrictedSlice(codec GeneralCodec, t testing.TB) { bytes := []byte{0, 0, 0, 0, 0, 3, 0, 1, 2} manager := NewDefaultManager() - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err := manager.RegisterCodec(0, codec) + require.NoError(err) s := inner{} - if _, err := manager.Unmarshal(bytes, &s); err == nil { - t.Fatalf("Should have errored due to large of a slice") - } + _, err = manager.Unmarshal(bytes, &s) + require.Error(err) s.Bytes = []byte{0, 1, 2} - if _, err := manager.Marshal(0, s); err == nil { - t.Fatalf("Should have errored due to large of a slice") - } + _, err = manager.Marshal(0, s) + require.Error(err) } // Test unmarshaling something with extra data func TestExtraSpace(codec GeneralCodec, t testing.TB) { - var _ GeneralCodec = codec + require := require.New(t) manager := NewDefaultManager() - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err := manager.RegisterCodec(0, codec) + require.NoError(err) - byteSlice := []byte{0x00, 0x00, 0x01, 0x02} // codec version 0x0000 then 0x01 for b then 0x02 as extra data. + // codec version 0x0000 then 0x01 for b then 0x02 as extra data. + byteSlice := []byte{0x00, 0x00, 0x01, 0x02} var b byte - _, err := manager.Unmarshal(byteSlice, &b) - if err == nil { - t.Fatalf("Should have errored due to too many bytes being passed in") - } + _, err = manager.Unmarshal(byteSlice, &b) + require.Error(err) } // Ensure deserializing slices that have been length restricted errors correctly func TestSliceLengthOverflow(codec GeneralCodec, t testing.TB) { - var _ GeneralCodec = codec + require := require.New(t) type inner struct { Vals []uint32 `serialize:"true" len:"2"` @@ -993,14 +869,12 @@ func TestSliceLengthOverflow(codec GeneralCodec, t testing.TB) { } manager := NewDefaultManager() - if err := manager.RegisterCodec(0, codec); err != nil { - t.Fatal(err) - } + err := manager.RegisterCodec(0, codec) + require.NoError(err) s := inner{} - if _, err := manager.Unmarshal(bytes, &s); err == nil { - t.Fatalf("Should have errored due to large of a slice") - } + _, err = manager.Unmarshal(bytes, &s) + require.Error(err) } type MultipleVersionsStruct struct { @@ -1013,8 +887,6 @@ type MultipleVersionsStruct struct { } func TestMultipleTags(codec GeneralCodec, t testing.TB) { - var _ GeneralCodec = codec - // received codec is expected to have both v1 and v2 registered as tags inputs := MultipleVersionsStruct{ BothTags: "both Tags", @@ -1028,6 +900,7 @@ func TestMultipleTags(codec GeneralCodec, t testing.TB) { manager := NewDefaultManager() for _, codecVersion := range []uint16{0, 1, 2022} { require := require.New(t) + err := manager.RegisterCodec(codecVersion, codec) require.NoError(err) diff --git a/avalanchego/config/config.go b/avalanchego/config/config.go index 5ea87ea5..ea71bf6e 100644 --- a/avalanchego/config/config.go +++ b/avalanchego/config/config.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package config import ( + "context" "crypto/tls" "encoding/base64" "encoding/json" @@ -19,7 +20,7 @@ import ( "github.com/spf13/viper" - "github.com/ava-labs/avalanchego/app/runner" + "github.com/ava-labs/avalanchego/api/server" "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" @@ -33,9 +34,11 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/snow/networking/benchlist" "github.com/ava-labs/avalanchego/snow/networking/router" - "github.com/ava-labs/avalanchego/snow/networking/sender" "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/staking" + "github.com/ava-labs/avalanchego/subnets" + "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/utils/compression" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/dynamicip" @@ -44,23 +47,25 @@ import ( "github.com/ava-labs/avalanchego/utils/password" "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/utils/profiler" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/storage" "github.com/ava-labs/avalanchego/utils/timer" - "github.com/ava-labs/avalanchego/vms" + "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/proposervm" ) const ( - pluginsDirName = "plugins" chainConfigFileName = "config" chainUpgradeFileName = "upgrade" subnetConfigFileExt = ".json" + ipResolutionTimeout = 30 * time.Second ) var ( + // Deprecated key --> deprecation message (i.e. which key replaces it) deprecatedKeys = map[string]string{ - DynamicUpdateDurationKey: fmt.Sprintf("replaced by %q", PublicIPResolutionFreqKey), - DynamicPublicIPResolverKey: fmt.Sprintf("replaced by %q", PublicIPResolutionServiceKey), + NetworkCompressionEnabledKey: fmt.Sprintf("use --%s instead", NetworkCompressionTypeKey), } errInvalidStakerWeights = errors.New("staking weights must be positive") @@ -74,62 +79,27 @@ var ( errStakeMaxConsumptionTooLarge = fmt.Errorf("max stake consumption must be less than or equal to %d", reward.PercentDenominator) errStakeMaxConsumptionBelowMin = errors.New("stake max consumption can't be less than min stake consumption") errStakeMintingPeriodBelowMin = errors.New("stake minting period can't be less than max stake duration") - errCannotWhitelistPrimaryNetwork = errors.New("cannot whitelist primary network") + errCannotTrackPrimaryNetwork = errors.New("cannot track primary network") errStakingKeyContentUnset = fmt.Errorf("%s key not set but %s set", StakingTLSKeyContentKey, StakingCertContentKey) errStakingCertContentUnset = fmt.Errorf("%s key set but %s not set", StakingTLSKeyContentKey, StakingCertContentKey) + errMissingStakingSigningKeyFile = errors.New("missing staking signing key file") + errTracingEndpointEmpty = fmt.Errorf("%s cannot be empty", TracingEndpointKey) + errPluginDirNotADirectory = errors.New("plugin dir is not a directory") + errZstdNotSupported = errors.New("zstd compression not supported until v1.10") ) -func GetRunnerConfig(v *viper.Viper) (runner.Config, error) { - config := runner.Config{ - DisplayVersionAndExit: v.GetBool(VersionKey), - BuildDir: GetExpandedArg(v, BuildDirKey), - PluginMode: v.GetBool(PluginModeKey), - } - - // Build directory should have this structure: - // - // build - // ├── avalanchego (the binary from compiling the app directory) - // └── plugins - // └── evm - validBuildDir := func(dir string) bool { - info, err := os.Stat(dir) - if err != nil || !info.IsDir() { - return false - } - - // make sure the expected subdirectory exists - _, err = os.Stat(filepath.Join(dir, pluginsDirName)) - return err == nil - } - if validBuildDir(config.BuildDir) { - return config, nil - } - - foundBuildDir := false - for _, dir := range defaultBuildDirs { - dir = GetExpandedString(v, dir) - if validBuildDir(dir) { - config.BuildDir = dir - foundBuildDir = true - break - } - } - if !foundBuildDir { - return runner.Config{}, fmt.Errorf( - "couldn't find valid build directory in any of the default locations: %s", - defaultBuildDirs, - ) - } - return config, nil -} - func getConsensusConfig(v *viper.Viper) avalanche.Parameters { return avalanche.Parameters{ Parameters: snowball.Parameters{ - K: v.GetInt(SnowSampleSizeKey), - Alpha: v.GetInt(SnowQuorumSizeKey), - BetaVirtuous: v.GetInt(SnowVirtuousCommitThresholdKey), + K: v.GetInt(SnowSampleSizeKey), + Alpha: v.GetInt(SnowQuorumSizeKey), + // During the X-chain linearization we require BetaVirtuous and + // BetaRogue to be equal. Therefore we use the more conservative + // BetaRogue value for both BetaVirtuous and BetaRogue. + // + // TODO: After the X-chain linearization use the + // SnowVirtuousCommitThresholdKey as before. + BetaVirtuous: v.GetInt(SnowRogueCommitThresholdKey), BetaRogue: v.GetInt(SnowRogueCommitThresholdKey), ConcurrentRepolls: v.GetInt(SnowConcurrentRepollsKey), OptimalProcessing: v.GetInt(SnowOptimalProcessingKey), @@ -223,7 +193,8 @@ func getHTTPConfig(v *viper.Viper) (node.HTTPConfig, error) { } case v.IsSet(HTTPSKeyFileKey): httpsKeyFilepath := GetExpandedArg(v, HTTPSKeyFileKey) - if httpsKey, err = os.ReadFile(filepath.Clean(httpsKeyFilepath)); err != nil { + httpsKey, err = os.ReadFile(filepath.Clean(httpsKeyFilepath)) + if err != nil { return node.HTTPConfig{}, err } } @@ -237,12 +208,19 @@ func getHTTPConfig(v *viper.Viper) (node.HTTPConfig, error) { } case v.IsSet(HTTPSCertFileKey): httpsCertFilepath := GetExpandedArg(v, HTTPSCertFileKey) - if httpsCert, err = os.ReadFile(filepath.Clean(httpsCertFilepath)); err != nil { + httpsCert, err = os.ReadFile(filepath.Clean(httpsCertFilepath)) + if err != nil { return node.HTTPConfig{}, err } } config := node.HTTPConfig{ + HTTPConfig: server.HTTPConfig{ + ReadTimeout: v.GetDuration(HTTPReadTimeoutKey), + ReadHeaderTimeout: v.GetDuration(HTTPReadHeaderTimeoutKey), + WriteTimeout: v.GetDuration(HTTPWriteTimeoutKey), + IdleTimeout: v.GetDuration(HTTPIdleTimeoutKey), + }, APIConfig: node.APIConfig{ APIIndexerConfig: node.APIIndexerConfig{ IndexAPIEnabled: v.GetBool(IndexEnabledKey), @@ -260,9 +238,8 @@ func getHTTPConfig(v *viper.Viper) (node.HTTPConfig, error) { HTTPSKey: httpsKey, HTTPSCert: httpsCert, APIAllowedOrigins: v.GetStringSlice(HTTPAllowedOrigins), - - ShutdownTimeout: v.GetDuration(HTTPShutdownTimeoutKey), - ShutdownWait: v.GetDuration(HTTPShutdownWaitKey), + ShutdownTimeout: v.GetDuration(HTTPShutdownTimeoutKey), + ShutdownWait: v.GetDuration(HTTPShutdownWaitKey), } config.APIAuthConfig, err = getAPIAuthConfig(v) @@ -316,8 +293,8 @@ func getAdaptiveTimeoutConfig(v *viper.Viper) (timer.AdaptiveTimeoutConfig, erro return config, nil } -func getGossipConfig(v *viper.Viper) sender.GossipConfig { - return sender.GossipConfig{ +func getGossipConfig(v *viper.Viper) subnets.GossipConfig { + return subnets.GossipConfig{ AcceptedFrontierValidatorSize: uint(v.GetUint32(ConsensusGossipAcceptedFrontierValidatorSizeKey)), AcceptedFrontierNonValidatorSize: uint(v.GetUint32(ConsensusGossipAcceptedFrontierNonValidatorSizeKey)), AcceptedFrontierPeerSize: uint(v.GetUint32(ConsensusGossipAcceptedFrontierPeerSizeKey)), @@ -330,13 +307,45 @@ func getGossipConfig(v *viper.Viper) sender.GossipConfig { } } -func getNetworkConfig(v *viper.Viper, halflife time.Duration) (network.Config, error) { +func getNetworkConfig( + v *viper.Viper, + stakingEnabled bool, + halflife time.Duration, + networkID uint32, // TODO remove after cortina upgrade +) (network.Config, error) { // Set the max number of recent inbound connections upgraded to be // equal to the max number of inbound connections per second. maxInboundConnsPerSec := v.GetFloat64(InboundThrottlerMaxConnsPerSecKey) upgradeCooldown := v.GetDuration(InboundConnUpgradeThrottlerCooldownKey) upgradeCooldownInSeconds := upgradeCooldown.Seconds() maxRecentConnsUpgraded := int(math.Ceil(maxInboundConnsPerSec * upgradeCooldownInSeconds)) + + var ( + compressionType compression.Type + err error + ) + if v.IsSet(NetworkCompressionTypeKey) { + if v.IsSet(NetworkCompressionEnabledKey) { + return network.Config{}, fmt.Errorf("cannot set both %q and %q", NetworkCompressionTypeKey, NetworkCompressionEnabledKey) + } + + compressionType, err = compression.TypeFromString(v.GetString(NetworkCompressionTypeKey)) + if err != nil { + return network.Config{}, err + } + } else { + if v.GetBool(NetworkCompressionEnabledKey) { + compressionType = constants.DefaultNetworkCompressionType + } else { + compressionType = compression.TypeNone + } + } + + cortinaTime := version.GetCortinaTime(networkID) + if compressionType == compression.TypeZstd && !time.Now().After(cortinaTime) { + // TODO remove after cortina upgrade + return network.Config{}, errZstdNotSupported + } config := network.Config{ // Throttling ThrottlerConfig: network.ThrottlerConfig{ @@ -373,6 +382,7 @@ func getNetworkConfig(v *viper.Viper, halflife time.Duration) (network.Config, e }, HealthConfig: network.HealthConfig{ + Enabled: stakingEnabled, MaxTimeSinceMsgSent: v.GetDuration(NetworkHealthMaxTimeSinceMsgSentKey), MaxTimeSinceMsgReceived: v.GetDuration(NetworkHealthMaxTimeSinceMsgReceivedKey), MaxPortionSendQueueBytesFull: v.GetFloat64(NetworkHealthMaxPortionSendQueueFillKey), @@ -381,9 +391,12 @@ func getNetworkConfig(v *viper.Viper, halflife time.Duration) (network.Config, e SendFailRateHalflife: halflife, }, + ProxyEnabled: v.GetBool(NetworkTCPProxyEnabledKey), + ProxyReadHeaderTimeout: v.GetDuration(NetworkTCPProxyReadTimeoutKey), + DialerConfig: dialer.Config{ - ThrottleRps: v.GetUint32(OutboundConnectionThrottlingRps), - ConnectionTimeout: v.GetDuration(OutboundConnectionTimeout), + ThrottleRps: v.GetUint32(OutboundConnectionThrottlingRpsKey), + ConnectionTimeout: v.GetDuration(OutboundConnectionTimeoutKey), }, TLSKeyLogFile: v.GetString(NetworkTLSKeyLogFileKey), @@ -407,7 +420,7 @@ func getNetworkConfig(v *viper.Viper, halflife time.Duration) (network.Config, e }, MaxClockDifference: v.GetDuration(NetworkMaxClockDifferenceKey), - CompressionEnabled: v.GetBool(NetworkCompressionEnabledKey), + CompressionType: compressionType, PingFrequency: v.GetDuration(NetworkPingFrequencyKey), AllowPrivateIPs: v.GetBool(NetworkAllowPrivateIPsKey), UptimeMetricFreq: v.GetDuration(UptimeMetricFreqKey), @@ -428,7 +441,7 @@ func getNetworkConfig(v *viper.Viper, halflife time.Duration) (network.Config, e case config.HealthConfig.MaxPortionSendQueueBytesFull < 0 || config.HealthConfig.MaxPortionSendQueueBytesFull > 1: return network.Config{}, fmt.Errorf("%s must be in [0,1]", NetworkHealthMaxPortionSendQueueFillKey) case config.DialerConfig.ConnectionTimeout < 0: - return network.Config{}, fmt.Errorf("%q must be >= 0", OutboundConnectionTimeout) + return network.Config{}, fmt.Errorf("%q must be >= 0", OutboundConnectionTimeoutKey) case config.PeerListGossipFreq < 0: return network.Config{}, fmt.Errorf("%s must be >= 0", NetworkPeerListGossipFreqKey) case config.MaxReconnectDelay < 0: @@ -451,7 +464,9 @@ func getNetworkConfig(v *viper.Viper, halflife time.Duration) (network.Config, e return config, nil } -func getBenchlistConfig(v *viper.Viper, alpha, k int) (benchlist.Config, error) { +func getBenchlistConfig(v *viper.Viper, consensusParameters avalanche.Parameters) (benchlist.Config, error) { + alpha := consensusParameters.Alpha + k := consensusParameters.K config := benchlist.Config{ Threshold: v.GetInt(BenchlistFailThresholdKey), Duration: v.GetDuration(BenchlistDurationKey), @@ -563,26 +578,16 @@ func getBootstrapConfig(v *viper.Viper, networkID uint32) (node.BootstrapConfig, } func getIPConfig(v *viper.Viper) (node.IPConfig, error) { - // If both deprecated and current flag are given, - // override deprecated flag value with new flag value. - ipResolutionService := v.GetString(DynamicPublicIPResolverKey) - if v.IsSet(PublicIPResolutionServiceKey) { - ipResolutionService = v.GetString(PublicIPResolutionServiceKey) - } - - ipResolutionFreq := v.GetDuration(DynamicUpdateDurationKey) - if v.IsSet(PublicIPResolutionFreqKey) { - ipResolutionFreq = v.GetDuration(PublicIPResolutionFreqKey) - } + ipResolutionService := v.GetString(PublicIPResolutionServiceKey) + ipResolutionFreq := v.GetDuration(PublicIPResolutionFreqKey) if ipResolutionFreq <= 0 { return node.IPConfig{}, fmt.Errorf("%q must be > 0", PublicIPResolutionFreqKey) } stakingPort := uint16(v.GetUint(StakingPortKey)) publicIP := v.GetString(PublicIPKey) - if publicIP != "" && ipResolutionService != "" { - return node.IPConfig{}, fmt.Errorf("only one of --%s and --%s/--%s can be given", PublicIPKey, DynamicPublicIPResolverKey, PublicIPResolutionServiceKey) + return node.IPConfig{}, fmt.Errorf("only one of --%s and --%s can be given", PublicIPKey, PublicIPResolutionServiceKey) } if publicIP != "" { @@ -606,7 +611,9 @@ func getIPConfig(v *viper.Viper) (node.IPConfig, error) { } // Use that to resolve our public IP. - ip, err := resolver.Resolve() + ctx, cancel := context.WithTimeout(context.Background(), ipResolutionTimeout) + defer cancel() + ip, err := resolver.Resolve(ctx) if err != nil { return node.IPConfig{}, fmt.Errorf("couldn't resolve public IP: %w", err) } @@ -760,7 +767,7 @@ func getStakingSigner(v *viper.Viper) (*bls.SecretKey, error) { } if v.IsSet(StakingSignerKeyPathKey) { - return nil, errors.New("missing staking signing key file") + return nil, errMissingStakingSigningKeyFile } key, err := bls.NewSecretKey() @@ -860,17 +867,17 @@ func getTxFeeConfig(v *viper.Viper, networkID uint32) genesis.TxFeeConfig { return genesis.GetTxFeeConfig(networkID) } -func getGenesisData(v *viper.Viper, networkID uint32) ([]byte, ids.ID, error) { +func getGenesisData(v *viper.Viper, networkID uint32, stakingCfg *genesis.StakingConfig) ([]byte, ids.ID, error) { // try first loading genesis content directly from flag/env-var if v.IsSet(GenesisConfigContentKey) { genesisData := v.GetString(GenesisConfigContentKey) - return genesis.FromFlag(networkID, genesisData) + return genesis.FromFlag(networkID, genesisData, stakingCfg) } // if content is not specified go for the file if v.IsSet(GenesisConfigFileKey) { genesisFileName := GetExpandedArg(v, GenesisConfigFileKey) - return genesis.FromFile(networkID, genesisFileName) + return genesis.FromFile(networkID, genesisFileName, stakingCfg) } // finally if file is not specified/readable go for the predefined config @@ -878,9 +885,11 @@ func getGenesisData(v *viper.Viper, networkID uint32) ([]byte, ids.ID, error) { return genesis.FromConfig(config) } -func getWhitelistedSubnets(v *viper.Viper) (ids.Set, error) { - whitelistedSubnetIDs := ids.Set{} - for _, subnet := range strings.Split(v.GetString(WhitelistedSubnetsKey), ",") { +func getTrackedSubnets(v *viper.Viper) (set.Set[ids.ID], error) { + trackSubnetsStr := v.GetString(TrackSubnetsKey) + trackSubnetsStrs := strings.Split(trackSubnetsStr, ",") + trackedSubnetIDs := set.NewSet[ids.ID](len(trackSubnetsStrs)) + for _, subnet := range trackSubnetsStrs { if subnet == "" { continue } @@ -889,11 +898,11 @@ func getWhitelistedSubnets(v *viper.Viper) (ids.Set, error) { return nil, fmt.Errorf("couldn't parse subnetID %q: %w", subnet, err) } if subnetID == constants.PrimaryNetworkID { - return nil, errCannotWhitelistPrimaryNetwork + return nil, errCannotTrackPrimaryNetwork } - whitelistedSubnetIDs.Add(subnetID) + trackedSubnetIDs.Add(subnetID) } - return whitelistedSubnetIDs, nil + return trackedSubnetIDs, nil } func getDatabaseConfig(v *viper.Viper, networkID uint32) (node.DatabaseConfig, error) { @@ -925,25 +934,25 @@ func getDatabaseConfig(v *viper.Viper, networkID uint32) (node.DatabaseConfig, e }, nil } -func getVMAliases(v *viper.Viper) (map[ids.ID][]string, error) { +func getAliases(v *viper.Viper, name string, contentKey string, fileKey string) (map[ids.ID][]string, error) { var fileBytes []byte - if v.IsSet(VMAliasesContentKey) { + if v.IsSet(contentKey) { var err error - aliasFlagContent := v.GetString(VMAliasesContentKey) + aliasFlagContent := v.GetString(contentKey) fileBytes, err = base64.StdEncoding.DecodeString(aliasFlagContent) if err != nil { - return nil, fmt.Errorf("unable to decode base64 content: %w", err) + return nil, fmt.Errorf("unable to decode base64 content for %s: %w", name, err) } } else { - aliasFilePath := filepath.Clean(v.GetString(VMAliasesFileKey)) + aliasFilePath := filepath.Clean(GetExpandedArg(v, fileKey)) exists, err := storage.FileExists(aliasFilePath) if err != nil { return nil, err } if !exists { - if v.IsSet(VMAliasesFileKey) { - return nil, fmt.Errorf("vm alias file does not exist in %v", aliasFilePath) + if v.IsSet(fileKey) { + return nil, fmt.Errorf("%s file does not exist in %v", name, aliasFilePath) } return nil, nil } @@ -954,28 +963,36 @@ func getVMAliases(v *viper.Viper) (map[ids.ID][]string, error) { } } - vmAliasMap := make(map[ids.ID][]string) - if err := json.Unmarshal(fileBytes, &vmAliasMap); err != nil { - return nil, fmt.Errorf("problem unmarshaling vmAliases: %w", err) + aliasMap := make(map[ids.ID][]string) + if err := json.Unmarshal(fileBytes, &aliasMap); err != nil { + return nil, fmt.Errorf("problem unmarshaling %s: %w", name, err) } - return vmAliasMap, nil + return aliasMap, nil +} + +func getVMAliases(v *viper.Viper) (map[ids.ID][]string, error) { + return getAliases(v, "vm aliases", VMAliasesContentKey, VMAliasesFileKey) +} + +func getChainAliases(v *viper.Viper) (map[ids.ID][]string, error) { + return getAliases(v, "chain aliases", ChainAliasesContentKey, ChainAliasesFileKey) } -func getVMManager(v *viper.Viper) (vms.Manager, error) { +func getVMAliaser(v *viper.Viper) (ids.Aliaser, error) { vmAliases, err := getVMAliases(v) if err != nil { return nil, err } - manager := vms.NewManager() + aliser := ids.NewAliaser() for vmID, aliases := range vmAliases { for _, alias := range aliases { - if err := manager.Alias(vmID, alias); err != nil { + if err := aliser.Alias(vmID, alias); err != nil { return nil, err } } } - return manager, nil + return aliser, nil } // getPathFromDirKey reads flag value from viper instance and then checks the folder existence @@ -1020,11 +1037,7 @@ func getChainConfigsFromDir(v *viper.Viper) (map[string]chains.ChainConfig, erro return make(map[string]chains.ChainConfig), nil } - chainConfigs, err := readChainConfigPath(chainConfigPath) - if err != nil { - return nil, fmt.Errorf("couldn't read chain configs: %w", err) - } - return chainConfigs, nil + return readChainConfigPath(chainConfigPath) } // getChainConfigs reads & puts chainConfigs to node config @@ -1073,7 +1086,16 @@ func readChainConfigPath(chainConfigPath string) (map[string]chains.ChainConfig, return chainConfigMap, nil } -func getSubnetConfigsFromFlags(v *viper.Viper, subnetIDs []ids.ID) (map[ids.ID]chains.SubnetConfig, error) { +// getSubnetConfigsFromFlags reads subnet configs from the correct place +// (flag or file) and returns a non-nil map. +func getSubnetConfigs(v *viper.Viper, subnetIDs []ids.ID) (map[ids.ID]subnets.Config, error) { + if v.IsSet(SubnetConfigContentKey) { + return getSubnetConfigsFromFlags(v, subnetIDs) + } + return getSubnetConfigsFromDir(v, subnetIDs) +} + +func getSubnetConfigsFromFlags(v *viper.Viper, subnetIDs []ids.ID) (map[ids.ID]subnets.Config, error) { subnetConfigContentB64 := v.GetString(SubnetConfigContentKey) subnetConfigContent, err := base64.StdEncoding.DecodeString(subnetConfigContentB64) if err != nil { @@ -1086,50 +1108,38 @@ func getSubnetConfigsFromFlags(v *viper.Viper, subnetIDs []ids.ID) (map[ids.ID]c return nil, fmt.Errorf("could not unmarshal JSON: %w", err) } - res := make(map[ids.ID]chains.SubnetConfig) + res := make(map[ids.ID]subnets.Config) for _, subnetID := range subnetIDs { if rawSubnetConfigBytes, ok := subnetConfigs[subnetID]; ok { - subnetConfig := defaultSubnetConfig(v) - if err := json.Unmarshal(rawSubnetConfigBytes, &subnetConfig); err != nil { + config := getDefaultSubnetConfig(v) + if err := json.Unmarshal(rawSubnetConfigBytes, &config); err != nil { return nil, err } - if err := subnetConfig.ConsensusParameters.Valid(); err != nil { + + if err := config.Valid(); err != nil { return nil, err } - res[subnetID] = subnetConfig + + res[subnetID] = config } } return res, nil } // getSubnetConfigs reads SubnetConfigs to node config map -func getSubnetConfigsFromDir(v *viper.Viper, subnetIDs []ids.ID) (map[ids.ID]chains.SubnetConfig, error) { +func getSubnetConfigsFromDir(v *viper.Viper, subnetIDs []ids.ID) (map[ids.ID]subnets.Config, error) { subnetConfigPath, err := getPathFromDirKey(v, SubnetConfigDirKey) if err != nil { return nil, err } + + subnetConfigs := make(map[ids.ID]subnets.Config) if len(subnetConfigPath) == 0 { // subnet config path does not exist but not explicitly specified, so ignore it - return make(map[ids.ID]chains.SubnetConfig), nil + return subnetConfigs, nil } - subnetConfigs, err := readSubnetConfigs(subnetConfigPath, subnetIDs, defaultSubnetConfig(v)) - if err != nil { - return nil, fmt.Errorf("couldn't read subnet configs: %w", err) - } - return subnetConfigs, nil -} - -func getSubnetConfigs(v *viper.Viper, subnetIDs []ids.ID) (map[ids.ID]chains.SubnetConfig, error) { - if v.IsSet(SubnetConfigContentKey) { - return getSubnetConfigsFromFlags(v, subnetIDs) - } - return getSubnetConfigsFromDir(v, subnetIDs) -} - -// readSubnetConfigs reads subnet config files from a path and given subnetIDs and returns a map. -func readSubnetConfigs(subnetConfigPath string, subnetIDs []ids.ID, defaultSubnetConfig chains.SubnetConfig) (map[ids.ID]chains.SubnetConfig, error) { - subnetConfigs := make(map[ids.ID]chains.SubnetConfig) + // reads subnet config files from a path and given subnetIDs and returns a map. for _, subnetID := range subnetIDs { filePath := filepath.Join(subnetConfigPath, subnetID.String()+subnetConfigFileExt) fileInfo, err := os.Stat(filePath) @@ -1149,24 +1159,27 @@ func readSubnetConfigs(subnetConfigPath string, subnetIDs []ids.ID, defaultSubne return nil, err } - configData := defaultSubnetConfig - if err := json.Unmarshal(file, &configData); err != nil { + config := getDefaultSubnetConfig(v) + if err := json.Unmarshal(file, &config); err != nil { return nil, err } - if err := configData.ConsensusParameters.Valid(); err != nil { + + if err := config.Valid(); err != nil { return nil, err } - subnetConfigs[subnetID] = configData + + subnetConfigs[subnetID] = config } return subnetConfigs, nil } -func defaultSubnetConfig(v *viper.Viper) chains.SubnetConfig { - return chains.SubnetConfig{ - ConsensusParameters: getConsensusConfig(v), - ValidatorOnly: false, - GossipConfig: getGossipConfig(v), +func getDefaultSubnetConfig(v *viper.Viper) subnets.Config { + return subnets.Config{ + ConsensusParameters: getConsensusConfig(v), + ValidatorOnly: false, + GossipConfig: getGossipConfig(v), + ProposerMinBlockDelay: proposervm.DefaultMinBlockDelay, } } @@ -1221,17 +1234,71 @@ func getDiskTargeterConfig(v *viper.Viper) (tracker.TargeterConfig, error) { } } -func GetNodeConfig(v *viper.Viper, buildDir string) (node.Config, error) { - nodeConfig := node.Config{} +func getTraceConfig(v *viper.Viper) (trace.Config, error) { + enabled := v.GetBool(TracingEnabledKey) + if !enabled { + return trace.Config{ + Enabled: false, + }, nil + } + + exporterTypeStr := v.GetString(TracingExporterTypeKey) + exporterType, err := trace.ExporterTypeFromString(exporterTypeStr) + if err != nil { + return trace.Config{}, err + } + + endpoint := v.GetString(TracingEndpointKey) + if endpoint == "" { + return trace.Config{}, errTracingEndpointEmpty + } + + return trace.Config{ + ExporterConfig: trace.ExporterConfig{ + Type: exporterType, + Endpoint: endpoint, + Insecure: v.GetBool(TracingInsecureKey), + // TODO add support for headers + }, + Enabled: true, + TraceSampleRate: v.GetFloat64(TracingSampleRateKey), + }, nil +} + +// Returns the path to the directory that contains VM binaries. +func getPluginDir(v *viper.Viper) (string, error) { + pluginDir := GetExpandedString(v, v.GetString(PluginDirKey)) + + if v.IsSet(PluginDirKey) { + // If the flag was given, assert it exists and is a directory + info, err := os.Stat(pluginDir) + if err != nil { + return "", fmt.Errorf("plugin dir %q not found: %w", pluginDir, err) + } + if !info.IsDir() { + return "", fmt.Errorf("%w: %q", errPluginDirNotADirectory, pluginDir) + } + } else { + // If the flag wasn't given, make sure the default location exists. + if err := os.MkdirAll(pluginDir, perms.ReadWriteExecute); err != nil { + return "", fmt.Errorf("failed to create plugin dir at %s: %w", pluginDir, err) + } + } + + return pluginDir, nil +} - // Plugin directory defaults to [buildDir]/[pluginsDirName] - nodeConfig.PluginDir = filepath.Join(buildDir, pluginsDirName) +func GetNodeConfig(v *viper.Viper) (node.Config, error) { + var ( + nodeConfig node.Config + err error + ) - // Consensus Parameters - nodeConfig.ConsensusParams = getConsensusConfig(v) - if err := nodeConfig.ConsensusParams.Valid(); err != nil { + nodeConfig.PluginDir, err = getPluginDir(v) + if err != nil { return node.Config{}, err } + nodeConfig.ConsensusShutdownTimeout = v.GetDuration(ConsensusShutdownTimeoutKey) if nodeConfig.ConsensusShutdownTimeout < 0 { return node.Config{}, fmt.Errorf("%q must be >= 0", ConsensusShutdownTimeoutKey) @@ -1243,7 +1310,14 @@ func GetNodeConfig(v *viper.Viper, buildDir string) (node.Config, error) { return node.Config{}, fmt.Errorf("%s must be >= 0", ConsensusGossipFrequencyKey) } - var err error + // App handling + nodeConfig.ConsensusAppConcurrency = int(v.GetUint(ConsensusAppConcurrencyKey)) + if nodeConfig.ConsensusAppConcurrency <= 0 { + return node.Config{}, fmt.Errorf("%s must be > 0", ConsensusAppConcurrencyKey) + } + + nodeConfig.UseCurrentHeight = v.GetBool(ProposerVMUseCurrentHeightKey) + // Logging nodeConfig.LoggingConfig, err = getLoggingConfig(v) if err != nil { @@ -1274,8 +1348,8 @@ func GetNodeConfig(v *viper.Viper, buildDir string) (node.Config, error) { return node.Config{}, err } - // Whitelisted Subnets - nodeConfig.WhitelistedSubnets, err = getWhitelistedSubnets(v) + // Tracked Subnets + nodeConfig.TrackedSubnets, err = getTrackedSubnets(v) if err != nil { return node.Config{}, err } @@ -1314,15 +1388,27 @@ func GetNodeConfig(v *viper.Viper, buildDir string) (node.Config, error) { } // Network Config - nodeConfig.NetworkConfig, err = getNetworkConfig(v, healthCheckAveragerHalflife) + nodeConfig.NetworkConfig, err = getNetworkConfig(v, nodeConfig.EnableStaking, healthCheckAveragerHalflife, nodeConfig.NetworkID) if err != nil { return node.Config{}, err } - nodeConfig.GossipConfig = getGossipConfig(v) + // Subnet Configs + subnetConfigs, err := getSubnetConfigs(v, nodeConfig.TrackedSubnets.List()) + if err != nil { + return node.Config{}, fmt.Errorf("couldn't read subnet configs: %w", err) + } + + primaryNetworkConfig := getDefaultSubnetConfig(v) + if err := primaryNetworkConfig.Valid(); err != nil { + return node.Config{}, fmt.Errorf("invalid consensus parameters: %w", err) + } + subnetConfigs[constants.PrimaryNetworkID] = primaryNetworkConfig + + nodeConfig.SubnetConfigs = subnetConfigs // Benchlist - nodeConfig.BenchlistConfig, err = getBenchlistConfig(v, nodeConfig.ConsensusParams.Alpha, nodeConfig.ConsensusParams.K) + nodeConfig.BenchlistConfig, err = getBenchlistConfig(v, primaryNetworkConfig.ConsensusParameters) if err != nil { return node.Config{}, err } @@ -1334,7 +1420,8 @@ func GetNodeConfig(v *viper.Viper, buildDir string) (node.Config, error) { nodeConfig.TxFeeConfig = getTxFeeConfig(v, nodeConfig.NetworkID) // Genesis Data - nodeConfig.GenesisBytes, nodeConfig.AvaxAssetID, err = getGenesisData(v, nodeConfig.NetworkID) + genesisStakingCfg := nodeConfig.StakingConfig.StakingConfig + nodeConfig.GenesisBytes, nodeConfig.AvaxAssetID, err = getGenesisData(v, nodeConfig.NetworkID, &genesisStakingCfg) if err != nil { return node.Config{}, fmt.Errorf("unable to load genesis file: %w", err) } @@ -1351,17 +1438,19 @@ func GetNodeConfig(v *viper.Viper, buildDir string) (node.Config, error) { return node.Config{}, err } - // Subnet Configs - subnetConfigs, err := getSubnetConfigs(v, nodeConfig.WhitelistedSubnets.List()) - if err != nil { - return node.Config{}, err + // Node health + nodeConfig.MinPercentConnectedStakeHealthy = map[ids.ID]float64{ + constants.PrimaryNetworkID: calcMinConnectedStake(primaryNetworkConfig.ConsensusParameters.Parameters), + } + + for subnetID, config := range subnetConfigs { + nodeConfig.MinPercentConnectedStakeHealthy[subnetID] = calcMinConnectedStake(config.ConsensusParameters.Parameters) } - nodeConfig.SubnetConfigs = subnetConfigs // Chain Configs nodeConfig.ChainConfigs, err = getChainConfigs(v) if err != nil { - return node.Config{}, err + return node.Config{}, fmt.Errorf("couldn't read chain configs: %w", err) } // Profiler @@ -1371,7 +1460,12 @@ func GetNodeConfig(v *viper.Viper, buildDir string) (node.Config, error) { } // VM Aliases - nodeConfig.VMManager, err = getVMManager(v) + nodeConfig.VMAliaser, err = getVMAliaser(v) + if err != nil { + return node.Config{}, err + } + // Chain aliases + nodeConfig.ChainAliases, err = getChainAliases(v) if err != nil { return node.Config{}, err } @@ -1392,5 +1486,37 @@ func GetNodeConfig(v *viper.Viper, buildDir string) (node.Config, error) { } nodeConfig.DiskTargeterConfig, err = getDiskTargeterConfig(v) - return nodeConfig, err + if err != nil { + return node.Config{}, err + } + + nodeConfig.TraceConfig, err = getTraceConfig(v) + if err != nil { + return node.Config{}, err + } + + nodeConfig.ChainDataDir = GetExpandedArg(v, ChainDataDirKey) + + nodeConfig.ProvidedFlags = providedFlags(v) + return nodeConfig, nil +} + +// calcMinConnectedStake takes [consensusParams] as input and calculates the +// expected min connected stake percentage according to alpha and k. +func calcMinConnectedStake(consensusParams snowball.Parameters) float64 { + alpha := consensusParams.Alpha + k := consensusParams.K + r := float64(alpha) / float64(k) + return r*(1-constants.MinConnectedStakeBuffer) + constants.MinConnectedStakeBuffer +} + +func providedFlags(v *viper.Viper) map[string]interface{} { + settings := v.AllSettings() + customSettings := make(map[string]interface{}, len(settings)) + for key, val := range settings { + if v.IsSet(key) { + customSettings[key] = val + } + } + return customSettings } diff --git a/avalanchego/config/config_test.go b/avalanchego/config/config_test.go index 098da2e1..de15d8fe 100644 --- a/avalanchego/config/config_test.go +++ b/avalanchego/config/config_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package config @@ -19,6 +19,7 @@ import ( "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/subnets" ) func TestGetChainConfigsFromFiles(t *testing.T) { @@ -265,7 +266,7 @@ func TestGetVMAliasesFromFile(t *testing.T) { "wrong vm id": { givenJSON: `{"wrongVmId": ["vm1","vm2"]}`, expected: nil, - errMessage: "problem unmarshaling vmAliases", + errMessage: "problem unmarshaling vm aliases", }, "vm id": { givenJSON: `{"2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i": ["vm1","vm2"], @@ -312,7 +313,7 @@ func TestGetVMAliasesFromFlag(t *testing.T) { "wrong vm id": { givenJSON: `{"wrongVmId": ["vm1","vm2"]}`, expected: nil, - errMessage: "problem unmarshaling vmAliases", + errMessage: "problem unmarshaling vm aliases", }, "vm id": { givenJSON: `{"2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i": ["vm1","vm2"], @@ -381,7 +382,7 @@ func TestGetVMAliasesDirNotExists(t *testing.T) { vmAliases, err := getVMAliases(v) require.Nil(vmAliases) require.Error(err) - require.Contains(err.Error(), "vm alias file does not exist") + require.Contains(err.Error(), "vm aliases file does not exist") // do not set it explicitly configJSON = "{}" @@ -395,36 +396,36 @@ func TestGetVMAliasesDirNotExists(t *testing.T) { func TestGetSubnetConfigsFromFile(t *testing.T) { tests := map[string]struct { givenJSON string - testF func(*require.Assertions, map[ids.ID]chains.SubnetConfig) + testF func(*require.Assertions, map[ids.ID]subnets.Config) errMessage string fileName string }{ "wrong config": { fileName: "2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i.json", givenJSON: `thisisnotjson`, - testF: func(require *require.Assertions, given map[ids.ID]chains.SubnetConfig) { + testF: func(require *require.Assertions, given map[ids.ID]subnets.Config) { require.Nil(given) }, - errMessage: "couldn't read subnet configs", + errMessage: "invalid character", }, - "subnet is not whitelisted": { + "subnet is not tracked": { fileName: "Gmt4fuNsGJAd2PX86LBvycGaBpgCYKbuULdCLZs3SEs1Jx1LU.json", givenJSON: `{"validatorOnly": true}`, - testF: func(require *require.Assertions, given map[ids.ID]chains.SubnetConfig) { + testF: func(require *require.Assertions, given map[ids.ID]subnets.Config) { require.Empty(given) }, }, "wrong extension": { fileName: "2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i.yaml", givenJSON: `{"validatorOnly": true}`, - testF: func(require *require.Assertions, given map[ids.ID]chains.SubnetConfig) { + testF: func(require *require.Assertions, given map[ids.ID]subnets.Config) { require.Empty(given) }, }, "invalid consensus parameters": { fileName: "2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i.json", givenJSON: `{"consensusParameters":{"k": 111, "alpha":1234} }`, - testF: func(require *require.Assertions, given map[ids.ID]chains.SubnetConfig) { + testF: func(require *require.Assertions, given map[ids.ID]subnets.Config) { require.Nil(given) }, errMessage: "fails the condition that: alpha <= k", @@ -432,7 +433,7 @@ func TestGetSubnetConfigsFromFile(t *testing.T) { "correct config": { fileName: "2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i.json", givenJSON: `{"validatorOnly": true, "consensusParameters":{"parents": 111, "alpha":16} }`, - testF: func(require *require.Assertions, given map[ids.ID]chains.SubnetConfig) { + testF: func(require *require.Assertions, given map[ids.ID]subnets.Config) { id, _ := ids.FromString("2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i") config, ok := given[id] require.True(ok) @@ -448,14 +449,14 @@ func TestGetSubnetConfigsFromFile(t *testing.T) { "gossip config": { fileName: "2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i.json", givenJSON: `{"appGossipNonValidatorSize": 100 }`, - testF: func(require *require.Assertions, given map[ids.ID]chains.SubnetConfig) { + testF: func(require *require.Assertions, given map[ids.ID]subnets.Config) { id, _ := ids.FromString("2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i") config, ok := given[id] require.True(ok) - require.Equal(uint(100), config.AppGossipNonValidatorSize) + require.Equal(uint(100), config.GossipConfig.AppGossipNonValidatorSize) // must still respect defaults require.Equal(20, config.ConsensusParameters.K) - require.Equal(uint(10), config.AppGossipValidatorSize) + require.Equal(uint(10), config.GossipConfig.AppGossipValidatorSize) }, errMessage: "", }, @@ -487,19 +488,19 @@ func TestGetSubnetConfigsFromFile(t *testing.T) { func TestGetSubnetConfigsFromFlags(t *testing.T) { tests := map[string]struct { givenJSON string - testF func(*require.Assertions, map[ids.ID]chains.SubnetConfig) + testF func(*require.Assertions, map[ids.ID]subnets.Config) errMessage string }{ "no configs": { givenJSON: `{}`, - testF: func(require *require.Assertions, given map[ids.ID]chains.SubnetConfig) { + testF: func(require *require.Assertions, given map[ids.ID]subnets.Config) { require.Empty(given) }, errMessage: "", }, "entry with no config": { givenJSON: `{"2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i":{}}`, - testF: func(require *require.Assertions, given map[ids.ID]chains.SubnetConfig) { + testF: func(require *require.Assertions, given map[ids.ID]subnets.Config) { require.True(len(given) == 1) id, _ := ids.FromString("2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i") config, ok := given[id] @@ -508,9 +509,9 @@ func TestGetSubnetConfigsFromFlags(t *testing.T) { require.Equal(20, config.ConsensusParameters.K) }, }, - "subnet is not whitelisted": { + "subnet is not tracked": { givenJSON: `{"Gmt4fuNsGJAd2PX86LBvycGaBpgCYKbuULdCLZs3SEs1Jx1LU":{"validatorOnly":true}}`, - testF: func(require *require.Assertions, given map[ids.ID]chains.SubnetConfig) { + testF: func(require *require.Assertions, given map[ids.ID]subnets.Config) { require.Empty(given) }, }, @@ -523,7 +524,7 @@ func TestGetSubnetConfigsFromFlags(t *testing.T) { } } }`, - testF: func(require *require.Assertions, given map[ids.ID]chains.SubnetConfig) { + testF: func(require *require.Assertions, given map[ids.ID]subnets.Config) { require.Empty(given) }, errMessage: "fails the condition that: alpha <= k", @@ -539,7 +540,7 @@ func TestGetSubnetConfigsFromFlags(t *testing.T) { "validatorOnly": true } }`, - testF: func(require *require.Assertions, given map[ids.ID]chains.SubnetConfig) { + testF: func(require *require.Assertions, given map[ids.ID]subnets.Config) { id, _ := ids.FromString("2Ctt6eGAeo4MLqTmGa7AdRecuVMPGWEX9wSsCLBYrLhX4a394i") config, ok := given[id] require.True(ok) @@ -548,8 +549,8 @@ func TestGetSubnetConfigsFromFlags(t *testing.T) { require.Equal(20, config.ConsensusParameters.Alpha) require.Equal(30, config.ConsensusParameters.K) // must still respect defaults - require.Equal(uint(10), config.AppGossipValidatorSize) - require.Equal(1024, config.ConsensusParameters.MaxOutstandingItems) + require.Equal(uint(10), config.GossipConfig.AppGossipValidatorSize) + require.Equal(256, config.ConsensusParameters.MaxOutstandingItems) }, errMessage: "", }, @@ -578,6 +579,14 @@ func TestGetSubnetConfigsFromFlags(t *testing.T) { } } +func TestCalcMinConnectedStake(t *testing.T) { + v := setupViperFlags() + defaultParams := getConsensusConfig(v) + defaultExpectedMinStake := 0.8 + minStake := calcMinConnectedStake(defaultParams.Parameters) + require.Equal(t, defaultExpectedMinStake, minStake) +} + // setups config json file and writes content func setupConfigJSON(t *testing.T, rootPath string, value string) string { configFilePath := filepath.Join(rootPath, "config.json") @@ -595,10 +604,8 @@ func setupFile(t *testing.T, path string, fileName string, value string) { func setupViperFlags() *viper.Viper { v := viper.New() fs := BuildFlagSet() - pflag.CommandLine = pflag.NewFlagSet(os.Args[0], pflag.PanicOnError) // flags are now reset - pflag.CommandLine.AddGoFlagSet(fs) pflag.Parse() - if err := v.BindPFlags(pflag.CommandLine); err != nil { + if err := v.BindPFlags(fs); err != nil { log.Fatal(err) } return v @@ -606,17 +613,8 @@ func setupViperFlags() *viper.Viper { func setupViper(configFilePath string) *viper.Viper { v := setupViperFlags() - // need to set it since in tests executable dir is somewhere /var/tmp/ (or wherever is designated by go) - // thus it searches buildDir in /var/tmp/ - // but actual buildDir resides under project_root/build - currentPath, err := os.Getwd() - if err != nil { - log.Fatal(err) - } - v.Set(BuildDirKey, filepath.Join(currentPath, "..", "build")) v.SetConfigFile(configFilePath) - err = v.ReadInConfig() - if err != nil { + if err := v.ReadInConfig(); err != nil { log.Fatal(err) } return v diff --git a/avalanchego/config/flags.go b/avalanchego/config/flags.go index 39a8dcdb..c30f6d27 100644 --- a/avalanchego/config/flags.go +++ b/avalanchego/config/flags.go @@ -1,24 +1,23 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package config import ( - "flag" "fmt" - "log" "os" "path/filepath" "runtime" "time" - "github.com/kardianos/osext" - + "github.com/spf13/pflag" "github.com/spf13/viper" "github.com/ava-labs/avalanchego/database/leveldb" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/utils/compression" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/ulimit" "github.com/ava-labs/avalanchego/utils/units" @@ -46,46 +45,35 @@ var ( defaultChainConfigDir = filepath.Join(defaultConfigDir, "chains") defaultVMConfigDir = filepath.Join(defaultConfigDir, "vms") defaultVMAliasFilePath = filepath.Join(defaultVMConfigDir, "aliases.json") + defaultChainAliasFilePath = filepath.Join(defaultChainConfigDir, "aliases.json") defaultSubnetConfigDir = filepath.Join(defaultConfigDir, "subnets") - - // Places to look for the build directory - defaultBuildDirs = []string{} + defaultPluginDir = filepath.Join(defaultUnexpandedDataDir, "plugins") + defaultChainDataDir = filepath.Join(defaultUnexpandedDataDir, "chainData") ) -func init() { - folderPath, err := osext.ExecutableFolder() - if err == nil { - defaultBuildDirs = append(defaultBuildDirs, folderPath) - defaultBuildDirs = append(defaultBuildDirs, filepath.Dir(folderPath)) +func deprecateFlags(fs *pflag.FlagSet) error { + for key, message := range deprecatedKeys { + if err := fs.MarkDeprecated(key, message); err != nil { + return err + } } - wd, err := os.Getwd() - if err != nil { - log.Fatal(err) - } - defaultBuildDirs = append(defaultBuildDirs, - wd, - filepath.Join("/", "usr", "local", "lib", constants.AppName), - defaultUnexpandedDataDir, - ) + return nil } -func addProcessFlags(fs *flag.FlagSet) { +func addProcessFlags(fs *pflag.FlagSet) { // If true, print the version and quit. fs.Bool(VersionKey, false, "If true, print version and quit") - - // Build directory - fs.String(BuildDirKey, defaultBuildDirs[0], "Path to the build directory") - - // Plugin - fs.Bool(PluginModeKey, false, "Whether the app should run as a plugin") } -func addNodeFlags(fs *flag.FlagSet) { +func addNodeFlags(fs *pflag.FlagSet) { // Home directory fs.String(DataDirKey, defaultDataDir, "Sets the base data directory where default sub-directories will be placed unless otherwise specified.") // System fs.Uint64(FdLimitKey, ulimit.DefaultFDLimit, "Attempts to raise the process file descriptor limit to at least this value and error if the value is above the system max") + // Plugin directory + fs.String(PluginDirKey, defaultPluginDir, "Path to the plugin directory") + // Config File fs.String(ConfigFileKey, "", fmt.Sprintf("Specifies a config file. Ignored if %s is specified", ConfigContentKey)) fs.String(ConfigContentKey, "", "Specifies base64 encoded config content") @@ -136,77 +124,86 @@ func addNodeFlags(fs *flag.FlagSet) { NetworkPeerListPeersGossipSizeKey, NetworkPeerListGossipFreqKey, ) - fs.Uint(NetworkPeerListNumValidatorIPsKey, 15, gossipHelpMsg) - fs.Uint(NetworkPeerListValidatorGossipSizeKey, 20, gossipHelpMsg) - fs.Uint(NetworkPeerListNonValidatorGossipSizeKey, 0, gossipHelpMsg) - fs.Uint(NetworkPeerListPeersGossipSizeKey, 10, gossipHelpMsg) - fs.Duration(NetworkPeerListGossipFreqKey, time.Minute, gossipHelpMsg) + fs.Uint(NetworkPeerListNumValidatorIPsKey, constants.DefaultNetworkPeerListNumValidatorIPs, gossipHelpMsg) + fs.Uint(NetworkPeerListValidatorGossipSizeKey, constants.DefaultNetworkPeerListValidatorGossipSize, gossipHelpMsg) + fs.Uint(NetworkPeerListNonValidatorGossipSizeKey, constants.DefaultNetworkPeerListNonValidatorGossipSize, gossipHelpMsg) + fs.Uint(NetworkPeerListPeersGossipSizeKey, constants.DefaultNetworkPeerListPeersGossipSize, gossipHelpMsg) + fs.Duration(NetworkPeerListGossipFreqKey, constants.DefaultNetworkPeerListGossipFreq, gossipHelpMsg) // Public IP Resolution - fs.String(PublicIPKey, "", "Public IP of this node for P2P communication. If empty, try to discover with NAT. Ignored if dynamic-public-ip is non-empty") - fs.Duration(DynamicUpdateDurationKey, 5*time.Minute, "Dynamic IP and NAT traversal update duration") // Deprecated - fs.String(DynamicPublicIPResolverKey, "", "'ifconfigco' (alias 'ifconfig') or 'opendns' or 'ifconfigme'. By default does not do dynamic public IP updates") // Deprecated + fs.String(PublicIPKey, "", "Public IP of this node for P2P communication. If empty, try to discover with NAT") fs.Duration(PublicIPResolutionFreqKey, 5*time.Minute, "Frequency at which this node resolves/updates its public IP and renew NAT mappings, if applicable") - fs.String(PublicIPResolutionServiceKey, "", "Only acceptable values are 'ifconfigco', 'opendns' or 'ifconfigme'. When provided, the node will use that service to periodically resolve/update its public IP") + fs.String(PublicIPResolutionServiceKey, "", fmt.Sprintf("Only acceptable values are 'ifconfigco', 'opendns' or 'ifconfigme'. When provided, the node will use that service to periodically resolve/update its public IP. Ignored if %s is set", PublicIPKey)) // Inbound Connection Throttling - fs.Duration(InboundConnUpgradeThrottlerCooldownKey, 10*time.Second, "Upgrade an inbound connection from a given IP at most once per this duration. If 0, don't rate-limit inbound connection upgrades") - fs.Float64(InboundThrottlerMaxConnsPerSecKey, 256, "Max number of inbound connections to accept (from all peers) per second") + fs.Duration(InboundConnUpgradeThrottlerCooldownKey, constants.DefaultInboundConnUpgradeThrottlerCooldown, "Upgrade an inbound connection from a given IP at most once per this duration. If 0, don't rate-limit inbound connection upgrades") + fs.Float64(InboundThrottlerMaxConnsPerSecKey, constants.DefaultInboundThrottlerMaxConnsPerSec, "Max number of inbound connections to accept (from all peers) per second") // Outbound Connection Throttling - fs.Uint(OutboundConnectionThrottlingRps, 50, "Make at most this number of outgoing peer connection attempts per second") - fs.Duration(OutboundConnectionTimeout, 30*time.Second, "Timeout when dialing a peer") + fs.Uint(OutboundConnectionThrottlingRpsKey, constants.DefaultOutboundConnectionThrottlingRps, "Make at most this number of outgoing peer connection attempts per second") + fs.Duration(OutboundConnectionTimeoutKey, constants.DefaultOutboundConnectionTimeout, "Timeout when dialing a peer") // Timeouts - fs.Duration(NetworkInitialTimeoutKey, 5*time.Second, "Initial timeout value of the adaptive timeout manager") - fs.Duration(NetworkMinimumTimeoutKey, 2*time.Second, "Minimum timeout value of the adaptive timeout manager") - fs.Duration(NetworkMaximumTimeoutKey, 10*time.Second, "Maximum timeout value of the adaptive timeout manager") - fs.Duration(NetworkMaximumInboundTimeoutKey, 10*time.Second, "Maximum timeout value of an inbound message. Defines duration within which an incoming message must be fulfilled. Incoming messages containing deadline higher than this value will be overridden with this value.") - fs.Duration(NetworkTimeoutHalflifeKey, 5*time.Minute, "Halflife of average network response time. Higher value --> network timeout is less volatile. Can't be 0") - fs.Float64(NetworkTimeoutCoefficientKey, 2, "Multiplied by average network response time to get the network timeout. Must be >= 1") - fs.Duration(NetworkReadHandshakeTimeoutKey, 15*time.Second, "Timeout value for reading handshake messages") + fs.Duration(NetworkInitialTimeoutKey, constants.DefaultNetworkInitialTimeout, "Initial timeout value of the adaptive timeout manager") + fs.Duration(NetworkMinimumTimeoutKey, constants.DefaultNetworkMinimumTimeout, "Minimum timeout value of the adaptive timeout manager") + fs.Duration(NetworkMaximumTimeoutKey, constants.DefaultNetworkMaximumTimeout, "Maximum timeout value of the adaptive timeout manager") + fs.Duration(NetworkMaximumInboundTimeoutKey, constants.DefaultNetworkMaximumInboundTimeout, "Maximum timeout value of an inbound message. Defines duration within which an incoming message must be fulfilled. Incoming messages containing deadline higher than this value will be overridden with this value.") + fs.Duration(NetworkTimeoutHalflifeKey, constants.DefaultNetworkTimeoutHalflife, "Halflife of average network response time. Higher value --> network timeout is less volatile. Can't be 0") + fs.Float64(NetworkTimeoutCoefficientKey, constants.DefaultNetworkTimeoutCoefficient, "Multiplied by average network response time to get the network timeout. Must be >= 1") + fs.Duration(NetworkReadHandshakeTimeoutKey, constants.DefaultNetworkReadHandshakeTimeout, "Timeout value for reading handshake messages") fs.Duration(NetworkPingTimeoutKey, constants.DefaultPingPongTimeout, "Timeout value for Ping-Pong with a peer") fs.Duration(NetworkPingFrequencyKey, constants.DefaultPingFrequency, "Frequency of pinging other peers") - fs.Bool(NetworkCompressionEnabledKey, true, "If true, compress certain outbound messages. This node will be able to parse compressed inbound messages regardless of this flag's value") - fs.Duration(NetworkMaxClockDifferenceKey, time.Minute, "Max allowed clock difference value between this node and peers") - fs.Bool(NetworkAllowPrivateIPsKey, true, "Allows the node to initiate outbound connection attempts to peers with private IPs") - fs.Bool(NetworkRequireValidatorToConnectKey, false, "If true, this node will only maintain a connection with another node if this node is a validator, the other node is a validator, or the other node is a beacon") - fs.Uint(NetworkPeerReadBufferSizeKey, 8*units.KiB, "Size, in bytes, of the buffer that we read peer messages into (there is one buffer per peer)") - fs.Uint(NetworkPeerWriteBufferSizeKey, 8*units.KiB, "Size, in bytes, of the buffer that we write peer messages into (there is one buffer per peer)") + fs.Bool(NetworkCompressionEnabledKey, constants.DefaultNetworkCompressionEnabled, "If true, compress certain outbound messages. This node will be able to parse compressed inbound messages regardless of this flag's value") + fs.String(NetworkCompressionTypeKey, constants.DefaultNetworkCompressionType.String(), fmt.Sprintf("Compression type for outbound messages. Must be one of [%s, %s, %s]", compression.TypeGzip, compression.TypeZstd, compression.TypeNone)) + + fs.Duration(NetworkMaxClockDifferenceKey, constants.DefaultNetworkMaxClockDifference, "Max allowed clock difference value between this node and peers") + fs.Bool(NetworkAllowPrivateIPsKey, constants.DefaultNetworkAllowPrivateIPs, "Allows the node to initiate outbound connection attempts to peers with private IPs") + fs.Bool(NetworkRequireValidatorToConnectKey, constants.DefaultNetworkRequireValidatorToConnect, "If true, this node will only maintain a connection with another node if this node is a validator, the other node is a validator, or the other node is a beacon") + fs.Uint(NetworkPeerReadBufferSizeKey, constants.DefaultNetworkPeerReadBufferSize, "Size, in bytes, of the buffer that we read peer messages into (there is one buffer per peer)") + fs.Uint(NetworkPeerWriteBufferSizeKey, constants.DefaultNetworkPeerWriteBufferSize, "Size, in bytes, of the buffer that we write peer messages into (there is one buffer per peer)") + + fs.Bool(NetworkTCPProxyEnabledKey, constants.DefaultNetworkTCPProxyEnabled, "Require all P2P connections to be initiated with a TCP proxy header") + // The PROXY protocol specification recommends setting this value to be at + // least 3 seconds to cover a TCP retransmit. + // Ref: https://www.haproxy.org/download/2.3/doc/proxy-protocol.txt + // Specifying a timeout of 0 will actually result in a timeout of 200ms, but + // a timeout of 0 should generally not be provided. + fs.Duration(NetworkTCPProxyReadTimeoutKey, constants.DefaultNetworkTCPProxyReadTimeout, "Maximum duration to wait for a TCP proxy header") fs.String(NetworkTLSKeyLogFileKey, "", "TLS key log file path. Should only be specified for debugging") // Benchlist - fs.Int(BenchlistFailThresholdKey, 10, "Number of consecutive failed queries before benchlisting a node") - fs.Duration(BenchlistDurationKey, 15*time.Minute, "Max amount of time a peer is benchlisted after surpassing the threshold") - fs.Duration(BenchlistMinFailingDurationKey, 2*time.Minute+30*time.Second, "Minimum amount of time messages to a peer must be failing before the peer is benched") + fs.Int(BenchlistFailThresholdKey, constants.DefaultBenchlistFailThreshold, "Number of consecutive failed queries before benchlisting a node") + fs.Duration(BenchlistDurationKey, constants.DefaultBenchlistDuration, "Max amount of time a peer is benchlisted after surpassing the threshold") + fs.Duration(BenchlistMinFailingDurationKey, constants.DefaultBenchlistMinFailingDuration, "Minimum amount of time messages to a peer must be failing before the peer is benched") // Router - fs.Duration(ConsensusGossipFrequencyKey, 10*time.Second, "Frequency of gossiping accepted frontiers") - fs.Duration(ConsensusShutdownTimeoutKey, 30*time.Second, "Timeout before killing an unresponsive chain") - fs.Uint(ConsensusGossipAcceptedFrontierValidatorSizeKey, 0, "Number of validators to gossip to when gossiping accepted frontier") - fs.Uint(ConsensusGossipAcceptedFrontierNonValidatorSizeKey, 0, "Number of non-validators to gossip to when gossiping accepted frontier") - fs.Uint(ConsensusGossipAcceptedFrontierPeerSizeKey, 15, "Number of peers to gossip to when gossiping accepted frontier") - fs.Uint(ConsensusGossipOnAcceptValidatorSizeKey, 0, "Number of validators to gossip to each accepted container to") - fs.Uint(ConsensusGossipOnAcceptNonValidatorSizeKey, 0, "Number of non-validators to gossip to each accepted container to") - fs.Uint(ConsensusGossipOnAcceptPeerSizeKey, 10, "Number of peers to gossip to each accepted container to") - fs.Uint(AppGossipValidatorSizeKey, 10, "Number of validators to gossip an AppGossip message to") - fs.Uint(AppGossipNonValidatorSizeKey, 0, "Number of non-validators to gossip an AppGossip message to") - fs.Uint(AppGossipPeerSizeKey, 0, "Number of peers (which may be validators or non-validators) to gossip an AppGossip message to") + fs.Duration(ConsensusGossipFrequencyKey, constants.DefaultConsensusGossipFrequency, "Frequency of gossiping accepted frontiers") + fs.Uint(ConsensusAppConcurrencyKey, constants.DefaultConsensusAppConcurrency, "Maximum number of goroutines to use when handling App messages on a chain") + fs.Duration(ConsensusShutdownTimeoutKey, constants.DefaultConsensusShutdownTimeout, "Timeout before killing an unresponsive chain") + fs.Uint(ConsensusGossipAcceptedFrontierValidatorSizeKey, constants.DefaultConsensusGossipAcceptedFrontierValidatorSize, "Number of validators to gossip to when gossiping accepted frontier") + fs.Uint(ConsensusGossipAcceptedFrontierNonValidatorSizeKey, constants.DefaultConsensusGossipAcceptedFrontierNonValidatorSize, "Number of non-validators to gossip to when gossiping accepted frontier") + fs.Uint(ConsensusGossipAcceptedFrontierPeerSizeKey, constants.DefaultConsensusGossipAcceptedFrontierPeerSize, "Number of peers to gossip to when gossiping accepted frontier") + fs.Uint(ConsensusGossipOnAcceptValidatorSizeKey, constants.DefaultConsensusGossipOnAcceptValidatorSize, "Number of validators to gossip to each accepted container to") + fs.Uint(ConsensusGossipOnAcceptNonValidatorSizeKey, constants.DefaultConsensusGossipOnAcceptNonValidatorSize, "Number of non-validators to gossip to each accepted container to") + fs.Uint(ConsensusGossipOnAcceptPeerSizeKey, constants.DefaultConsensusGossipOnAcceptPeerSize, "Number of peers to gossip to each accepted container to") + fs.Uint(AppGossipValidatorSizeKey, constants.DefaultAppGossipValidatorSize, "Number of validators to gossip an AppGossip message to") + fs.Uint(AppGossipNonValidatorSizeKey, constants.DefaultAppGossipNonValidatorSize, "Number of non-validators to gossip an AppGossip message to") + fs.Uint(AppGossipPeerSizeKey, constants.DefaultAppGossipPeerSize, "Number of peers (which may be validators or non-validators) to gossip an AppGossip message to") // Inbound Throttling - fs.Uint64(InboundThrottlerAtLargeAllocSizeKey, 6*units.MiB, "Size, in bytes, of at-large byte allocation in inbound message throttler") - fs.Uint64(InboundThrottlerVdrAllocSizeKey, 32*units.MiB, "Size, in bytes, of validator byte allocation in inbound message throttler") - fs.Uint64(InboundThrottlerNodeMaxAtLargeBytesKey, constants.DefaultMaxMessageSize, "Max number of bytes a node can take from the inbound message throttler's at-large allocation. Must be at least the max message size") - fs.Uint64(InboundThrottlerMaxProcessingMsgsPerNodeKey, 1024, "Max number of messages currently processing from a given node") - fs.Uint64(InboundThrottlerBandwidthRefillRateKey, 512*units.KiB, "Max average inbound bandwidth usage of a peer, in bytes per second. See BandwidthThrottler") - fs.Uint64(InboundThrottlerBandwidthMaxBurstSizeKey, constants.DefaultMaxMessageSize, "Max inbound bandwidth a node can use at once. Must be at least the max message size. See BandwidthThrottler") - fs.Duration(InboundThrottlerCPUMaxRecheckDelayKey, 5*time.Second, "In the CPU-based network throttler, check at least this often whether the node's CPU usage has fallen to an acceptable level") - fs.Duration(InboundThrottlerDiskMaxRecheckDelayKey, 5*time.Second, "In the disk-based network throttler, check at least this often whether the node's disk usage has fallen to an acceptable level") + fs.Uint64(InboundThrottlerAtLargeAllocSizeKey, constants.DefaultInboundThrottlerAtLargeAllocSize, "Size, in bytes, of at-large byte allocation in inbound message throttler") + fs.Uint64(InboundThrottlerVdrAllocSizeKey, constants.DefaultInboundThrottlerVdrAllocSize, "Size, in bytes, of validator byte allocation in inbound message throttler") + fs.Uint64(InboundThrottlerNodeMaxAtLargeBytesKey, constants.DefaultInboundThrottlerNodeMaxAtLargeBytes, "Max number of bytes a node can take from the inbound message throttler's at-large allocation. Must be at least the max message size") + fs.Uint64(InboundThrottlerMaxProcessingMsgsPerNodeKey, constants.DefaultInboundThrottlerMaxProcessingMsgsPerNode, "Max number of messages currently processing from a given node") + fs.Uint64(InboundThrottlerBandwidthRefillRateKey, constants.DefaultInboundThrottlerBandwidthRefillRate, "Max average inbound bandwidth usage of a peer, in bytes per second. See BandwidthThrottler") + fs.Uint64(InboundThrottlerBandwidthMaxBurstSizeKey, constants.DefaultInboundThrottlerBandwidthMaxBurstSize, "Max inbound bandwidth a node can use at once. Must be at least the max message size. See BandwidthThrottler") + fs.Duration(InboundThrottlerCPUMaxRecheckDelayKey, constants.DefaultInboundThrottlerCPUMaxRecheckDelay, "In the CPU-based network throttler, check at least this often whether the node's CPU usage has fallen to an acceptable level") + fs.Duration(InboundThrottlerDiskMaxRecheckDelayKey, constants.DefaultInboundThrottlerDiskMaxRecheckDelay, "In the disk-based network throttler, check at least this often whether the node's disk usage has fallen to an acceptable level") // Outbound Throttling - fs.Uint64(OutboundThrottlerAtLargeAllocSizeKey, 32*units.MiB, "Size, in bytes, of at-large byte allocation in outbound message throttler") - fs.Uint64(OutboundThrottlerVdrAllocSizeKey, 32*units.MiB, "Size, in bytes, of validator byte allocation in outbound message throttler") - fs.Uint64(OutboundThrottlerNodeMaxAtLargeBytesKey, constants.DefaultMaxMessageSize, "Max number of bytes a node can take from the outbound message throttler's at-large allocation. Must be at least the max message size") + fs.Uint64(OutboundThrottlerAtLargeAllocSizeKey, constants.DefaultOutboundThrottlerAtLargeAllocSize, "Size, in bytes, of at-large byte allocation in outbound message throttler") + fs.Uint64(OutboundThrottlerVdrAllocSizeKey, constants.DefaultOutboundThrottlerVdrAllocSize, "Size, in bytes, of validator byte allocation in outbound message throttler") + fs.Uint64(OutboundThrottlerNodeMaxAtLargeBytesKey, constants.DefaultOutboundThrottlerNodeMaxAtLargeBytes, "Max number of bytes a node can take from the outbound message throttler's at-large allocation. Must be at least the max message size") // HTTP APIs fs.String(HTTPHostKey, "127.0.0.1", "Address of the HTTP server") @@ -219,6 +216,10 @@ func addNodeFlags(fs *flag.FlagSet) { fs.String(HTTPAllowedOrigins, "*", "Origins to allow on the HTTP port. Defaults to * which allows all origins. Example: https://*.avax.network https://*.avax-test.network") fs.Duration(HTTPShutdownWaitKey, 0, "Duration to wait after receiving SIGTERM or SIGINT before initiating shutdown. The /health endpoint will return unhealthy during this duration") fs.Duration(HTTPShutdownTimeoutKey, 10*time.Second, "Maximum duration to wait for existing connections to complete during node shutdown") + fs.Duration(HTTPReadTimeoutKey, 30*time.Second, "Maximum duration for reading the entire request, including the body. A zero or negative value means there will be no timeout") + fs.Duration(HTTPReadHeaderTimeoutKey, 30*time.Second, fmt.Sprintf("Maximum duration to read request headers. The connection's read deadline is reset after reading the headers. If %s is zero, the value of %s is used. If both are zero, there is no timeout.", HTTPReadHeaderTimeoutKey, HTTPReadTimeoutKey)) + fs.Duration(HTTPWriteTimeoutKey, 30*time.Second, "Maximum duration before timing out writes of the response. It is reset whenever a new request's header is read. A zero or negative value means there will be no timeout.") + fs.Duration(HTTPIdleTimeoutKey, 120*time.Second, fmt.Sprintf("Maximum duration to wait for the next request when keep-alives are enabled. If %s is zero, the value of %s is used. If both are zero, there is no timeout.", HTTPIdleTimeoutKey, HTTPReadTimeoutKey)) fs.Bool(APIAuthRequiredKey, false, "Require authorization token to call HTTP APIs") fs.String(APIAuthPasswordFileKey, "", fmt.Sprintf("Password file used to initially create/validate API authorization tokens. Ignored if %s is specified. Leading and trailing whitespace is removed from the password. Can be changed via API call", @@ -228,20 +229,20 @@ func addNodeFlags(fs *flag.FlagSet) { // Enable/Disable APIs fs.Bool(AdminAPIEnabledKey, false, "If true, this node exposes the Admin API") fs.Bool(InfoAPIEnabledKey, true, "If true, this node exposes the Info API") - fs.Bool(KeystoreAPIEnabledKey, true, "If true, this node exposes the Keystore API") + fs.Bool(KeystoreAPIEnabledKey, false, "If true, this node exposes the Keystore API") fs.Bool(MetricsAPIEnabledKey, true, "If true, this node exposes the Metrics API") fs.Bool(HealthAPIEnabledKey, true, "If true, this node exposes the Health API") fs.Bool(IpcAPIEnabledKey, false, "If true, IPCs can be opened") // Health Checks fs.Duration(HealthCheckFreqKey, 30*time.Second, "Time between health checks") - fs.Duration(HealthCheckAveragerHalflifeKey, 10*time.Second, "Halflife of averager when calculating a running average in a health check") + fs.Duration(HealthCheckAveragerHalflifeKey, constants.DefaultHealthCheckAveragerHalflife, "Halflife of averager when calculating a running average in a health check") // Network Layer Health - fs.Duration(NetworkHealthMaxTimeSinceMsgSentKey, time.Minute, "Network layer returns unhealthy if haven't sent a message for at least this much time") - fs.Duration(NetworkHealthMaxTimeSinceMsgReceivedKey, time.Minute, "Network layer returns unhealthy if haven't received a message for at least this much time") - fs.Float64(NetworkHealthMaxPortionSendQueueFillKey, 0.9, "Network layer returns unhealthy if more than this portion of the pending send queue is full") - fs.Uint(NetworkHealthMinPeersKey, 1, "Network layer returns unhealthy if connected to less than this many peers") - fs.Float64(NetworkHealthMaxSendFailRateKey, .9, "Network layer reports unhealthy if more than this portion of attempted message sends fail") + fs.Duration(NetworkHealthMaxTimeSinceMsgSentKey, constants.DefaultNetworkHealthMaxTimeSinceMsgSent, "Network layer returns unhealthy if haven't sent a message for at least this much time") + fs.Duration(NetworkHealthMaxTimeSinceMsgReceivedKey, constants.DefaultNetworkHealthMaxTimeSinceMsgReceived, "Network layer returns unhealthy if haven't received a message for at least this much time") + fs.Float64(NetworkHealthMaxPortionSendQueueFillKey, constants.DefaultNetworkHealthMaxPortionSendQueueFill, "Network layer returns unhealthy if more than this portion of the pending send queue is full") + fs.Uint(NetworkHealthMinPeersKey, constants.DefaultNetworkHealthMinPeers, "Network layer returns unhealthy if connected to less than this many peers") + fs.Float64(NetworkHealthMaxSendFailRateKey, constants.DefaultNetworkHealthMaxSendFailRate, "Network layer reports unhealthy if more than this portion of attempted message sends fail") // Router Health fs.Float64(RouterHealthMaxDropRateKey, 1, "Node reports unhealthy if the router drops more than this portion of messages") fs.Uint(RouterHealthMaxOutstandingRequestsKey, 1024, "Node reports unhealthy if there are more than this many outstanding consensus requests (Get, PullQuery, etc.) over all chains") @@ -279,7 +280,7 @@ func addNodeFlags(fs *flag.FlagSet) { fs.Duration(StakeMintingPeriodKey, genesis.LocalParams.RewardConfig.MintingPeriod, "Consumption period of the staking function") fs.Uint64(StakeSupplyCapKey, genesis.LocalParams.RewardConfig.SupplyCap, "Supply cap of the staking function") // Subnets - fs.String(WhitelistedSubnetsKey, "", "Whitelist of subnets to validate") + fs.String(TrackSubnetsKey, "", "List of subnets for the node to track. A node tracking a subnet will track the uptimes of the subnet validators and attempt to sync all the chains in the subnet. Before validating a subnet, a node should be tracking the subnet to avoid impacting their subnet validation uptime") // State syncing fs.String(StateSyncIPsKey, "", "Comma separated list of state sync peer ips to connect to. Example: 127.0.0.1:9630,127.0.0.1:9631") @@ -298,17 +299,22 @@ func addNodeFlags(fs *flag.FlagSet) { // Consensus fs.Int(SnowSampleSizeKey, 20, "Number of nodes to query for each network poll") fs.Int(SnowQuorumSizeKey, 15, "Alpha value to use for required number positive results") - fs.Int(SnowVirtuousCommitThresholdKey, 15, "Beta value to use for virtuous transactions") + // TODO: Replace this temporary flag description after the X-chain + // linearization with "Beta value to use for virtuous transactions" + fs.Int(SnowVirtuousCommitThresholdKey, 15, "This flag is temporarily ignored due to the X-chain linearization") fs.Int(SnowRogueCommitThresholdKey, 20, "Beta value to use for rogue transactions") fs.Int(SnowAvalancheNumParentsKey, 5, "Number of vertexes for reference from each new vertex") fs.Int(SnowAvalancheBatchSizeKey, 30, "Number of operations to batch in each new vertex") fs.Int(SnowConcurrentRepollsKey, 4, "Minimum number of concurrent polls for finalizing consensus") - fs.Int(SnowOptimalProcessingKey, 50, "Optimal number of processing containers in consensus") - fs.Int(SnowMaxProcessingKey, 1024, "Maximum number of processing items to be considered healthy") - fs.Duration(SnowMaxTimeProcessingKey, 2*time.Minute, "Maximum amount of time an item should be processing and still be healthy") + fs.Int(SnowOptimalProcessingKey, 10, "Optimal number of processing containers in consensus") + fs.Int(SnowMaxProcessingKey, 256, "Maximum number of processing items to be considered healthy") + fs.Duration(SnowMaxTimeProcessingKey, 30*time.Second, "Maximum amount of time an item should be processing and still be healthy") fs.Uint(SnowMixedQueryNumPushVdrKey, 10, fmt.Sprintf("If this node is a validator, when a container is inserted into consensus, send a Push Query to %s validators and a Pull Query to the others. Must be <= k.", SnowMixedQueryNumPushVdrKey)) fs.Uint(SnowMixedQueryNumPushNonVdrKey, 0, fmt.Sprintf("If this node is not a validator, when a container is inserted into consensus, send a Push Query to %s validators and a Pull Query to the others. Must be <= k.", SnowMixedQueryNumPushNonVdrKey)) + // ProposerVM + fs.Bool(ProposerVMUseCurrentHeightKey, false, "Have the ProposerVM always report the last accepted P-chain block height") + // Metrics fs.Bool(MeterVMsEnabledKey, true, "Enable Meter VMs to track VM performance with more granularity") fs.Duration(UptimeMetricFreqKey, 30*time.Second, "Frequency of renewing this node's average uptime metric") @@ -327,17 +333,24 @@ func addNodeFlags(fs *flag.FlagSet) { fs.String(SubnetConfigDirKey, defaultSubnetConfigDir, fmt.Sprintf("Subnet specific configurations parent directory. Ignored if %s is specified", SubnetConfigContentKey)) fs.String(SubnetConfigContentKey, "", "Specifies base64 encoded subnets configurations") + // Chain Data Directory + fs.String(ChainDataDirKey, defaultChainDataDir, "Chain specific data directory") + // Profiles fs.String(ProfileDirKey, defaultProfileDir, "Path to the profile directory") fs.Bool(ProfileContinuousEnabledKey, false, "Whether the app should continuously produce performance profiles") fs.Duration(ProfileContinuousFreqKey, 15*time.Minute, "How frequently to rotate performance profiles") fs.Int(ProfileContinuousMaxFilesKey, 5, "Maximum number of historical profiles to keep") + + // Aliasing fs.String(VMAliasesFileKey, defaultVMAliasFilePath, fmt.Sprintf("Specifies a JSON file that maps vmIDs with custom aliases. Ignored if %s is specified", VMAliasesContentKey)) fs.String(VMAliasesContentKey, "", "Specifies base64 encoded maps vmIDs with custom aliases") + fs.String(ChainAliasesFileKey, defaultChainAliasFilePath, fmt.Sprintf("Specifies a JSON file that maps blockchainIDs with custom aliases. Ignored if %s is specified", ChainConfigContentKey)) + fs.String(ChainAliasesContentKey, "", "Specifies base64 encoded map from blockchainID to custom aliases") // Delays - fs.Duration(NetworkInitialReconnectDelayKey, time.Second, "Initial delay duration must be waited before attempting to reconnect a peer") - fs.Duration(NetworkMaxReconnectDelayKey, time.Hour, "Maximum delay duration must be waited before attempting to reconnect a peer") + fs.Duration(NetworkInitialReconnectDelayKey, constants.DefaultNetworkInitialReconnectDelay, "Initial delay duration must be waited before attempting to reconnect a peer") + fs.Duration(NetworkMaxReconnectDelayKey, constants.DefaultNetworkMaxReconnectDelay, "Maximum delay duration must be waited before attempting to reconnect a peer") // System resource trackers fs.Duration(SystemTrackerFrequencyKey, 500*time.Millisecond, "Frequency to check the real system usage of tracked processes. More frequent checks --> usage metrics are more accurate, but more expensive to track") @@ -356,13 +369,19 @@ func addNodeFlags(fs *flag.FlagSet) { fs.Float64(DiskVdrAllocKey, 1000*units.GiB, "Maximum number of disk reads/writes per second to allocate for use by validators. Must be > 0") fs.Float64(DiskMaxNonVdrUsageKey, 1000*units.GiB, "Number of disk reads/writes per second that, if fully utilized, will rate limit all non-validators. Must be >= 0") fs.Float64(DiskMaxNonVdrNodeUsageKey, 1000*units.GiB, "Maximum number of disk reads/writes per second that a non-validator can utilize. Must be >= 0") + + // Opentelemetry tracing + fs.Bool(TracingEnabledKey, false, "If true, enable opentelemetry tracing") + fs.String(TracingExporterTypeKey, trace.GRPC.String(), fmt.Sprintf("Type of exporter to use for tracing. Options are [%s, %s]", trace.GRPC, trace.HTTP)) + fs.String(TracingEndpointKey, "localhost:4317", "The endpoint to send trace data to") + fs.Bool(TracingInsecureKey, true, "If true, don't use TLS when sending trace data") + fs.Float64(TracingSampleRateKey, 0.1, "The fraction of traces to sample. If >= 1, always sample. If <= 0, never sample") + // TODO add flag to take in headers to send from exporter } // BuildFlagSet returns a complete set of flags for avalanchego -func BuildFlagSet() *flag.FlagSet { - // TODO parse directly into a *pflag.FlagSet instead of into a *flag.FlagSet - // and then putting those into a *plag.FlagSet - fs := flag.NewFlagSet(constants.AppName, flag.ContinueOnError) +func BuildFlagSet() *pflag.FlagSet { + fs := pflag.NewFlagSet(constants.AppName, pflag.ContinueOnError) addProcessFlags(fs) addNodeFlags(fs) return fs diff --git a/avalanchego/config/keys.go b/avalanchego/config/keys.go index 745c78ba..01c33643 100644 --- a/avalanchego/config/keys.go +++ b/avalanchego/config/keys.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package config @@ -38,14 +38,12 @@ const ( DBConfigFileKey = "db-config-file" DBConfigContentKey = "db-config-file-content" PublicIPKey = "public-ip" - DynamicUpdateDurationKey = "dynamic-update-duration" - DynamicPublicIPResolverKey = "dynamic-public-ip" PublicIPResolutionFreqKey = "public-ip-resolution-frequency" PublicIPResolutionServiceKey = "public-ip-resolution-service" InboundConnUpgradeThrottlerCooldownKey = "inbound-connection-throttling-cooldown" InboundThrottlerMaxConnsPerSecKey = "inbound-connection-throttling-max-conns-per-sec" - OutboundConnectionThrottlingRps = "outbound-connection-throttling-rps" - OutboundConnectionTimeout = "outbound-connection-timeout" + OutboundConnectionThrottlingRpsKey = "outbound-connection-throttling-rps" + OutboundConnectionTimeoutKey = "outbound-connection-timeout" HTTPHostKey = "http-host" HTTPPortKey = "http-port" HTTPSEnabledKey = "http-tls-enabled" @@ -56,6 +54,10 @@ const ( HTTPAllowedOrigins = "http-allowed-origins" HTTPShutdownTimeoutKey = "http-shutdown-timeout" HTTPShutdownWaitKey = "http-shutdown-wait" + HTTPReadTimeoutKey = "http-read-timeout" + HTTPReadHeaderTimeoutKey = "http-read-header-timeout" + HTTPWriteTimeoutKey = "http-write-timeout" + HTTPIdleTimeoutKey = "http-idle-timeout" APIAuthRequiredKey = "api-auth-required" APIAuthPasswordKey = "api-auth-password" APIAuthPasswordFileKey = "api-auth-password-file" @@ -96,17 +98,19 @@ const ( NetworkPingTimeoutKey = "network-ping-timeout" NetworkPingFrequencyKey = "network-ping-frequency" NetworkMaxReconnectDelayKey = "network-max-reconnect-delay" - NetworkCompressionEnabledKey = "network-compression-enabled" + NetworkCompressionEnabledKey = "network-compression-enabled" // TODO this is deprecated. Eventually remove it and constants.DefaultNetworkCompressionEnabled + NetworkCompressionTypeKey = "network-compression-type" NetworkMaxClockDifferenceKey = "network-max-clock-difference" NetworkAllowPrivateIPsKey = "network-allow-private-ips" NetworkRequireValidatorToConnectKey = "network-require-validator-to-connect" NetworkPeerReadBufferSizeKey = "network-peer-read-buffer-size" NetworkPeerWriteBufferSizeKey = "network-peer-write-buffer-size" + NetworkTCPProxyEnabledKey = "network-tcp-proxy-enabled" + NetworkTCPProxyReadTimeoutKey = "network-tcp-proxy-read-timeout" NetworkTLSKeyLogFileKey = "network-tls-key-log-file-unsafe" BenchlistFailThresholdKey = "benchlist-fail-threshold" BenchlistDurationKey = "benchlist-duration" BenchlistMinFailingDurationKey = "benchlist-min-failing-duration" - BuildDirKey = "build-dir" LogsDirKey = "log-dir" LogLevelKey = "log-level" LogDisplayLevelKey = "log-display-level" @@ -128,7 +132,7 @@ const ( SnowMaxTimeProcessingKey = "snow-max-time-processing" SnowMixedQueryNumPushVdrKey = "snow-mixed-query-num-push-vdr" SnowMixedQueryNumPushNonVdrKey = "snow-mixed-query-num-push-non-vdr" - WhitelistedSubnetsKey = "whitelisted-subnets" + TrackSubnetsKey = "track-subnets" AdminAPIEnabledKey = "api-admin-enabled" InfoAPIEnabledKey = "api-info-enabled" KeystoreAPIEnabledKey = "api-keystore-enabled" @@ -139,6 +143,7 @@ const ( IpcsPathKey = "ipcs-path" MeterVMsEnabledKey = "meter-vms-enabled" ConsensusGossipFrequencyKey = "consensus-gossip-frequency" + ConsensusAppConcurrencyKey = "consensus-app-concurrency" ConsensusGossipAcceptedFrontierValidatorSizeKey = "consensus-accepted-frontier-gossip-validator-size" ConsensusGossipAcceptedFrontierNonValidatorSizeKey = "consensus-accepted-frontier-gossip-non-validator-size" ConsensusGossipAcceptedFrontierPeerSizeKey = "consensus-accepted-frontier-gossip-peer-size" @@ -149,6 +154,7 @@ const ( AppGossipNonValidatorSizeKey = "consensus-app-gossip-non-validator-size" AppGossipPeerSizeKey = "consensus-app-gossip-peer-size" ConsensusShutdownTimeoutKey = "consensus-shutdown-timeout" + ProposerVMUseCurrentHeightKey = "proposervm-use-current-height" FdLimitKey = "fd-limit" IndexEnabledKey = "index-enabled" IndexAllowIncompleteKey = "index-allow-incomplete" @@ -158,11 +164,12 @@ const ( HealthCheckAveragerHalflifeKey = "health-check-averager-halflife" RetryBootstrapKey = "bootstrap-retry-enabled" RetryBootstrapWarnFrequencyKey = "bootstrap-retry-warn-frequency" - PluginModeKey = "plugin-mode-enabled" + PluginDirKey = "plugin-dir" BootstrapBeaconConnectionTimeoutKey = "bootstrap-beacon-connection-timeout" BootstrapMaxTimeGetAncestorsKey = "bootstrap-max-time-get-ancestors" BootstrapAncestorsMaxContainersSentKey = "bootstrap-ancestors-max-containers-sent" BootstrapAncestorsMaxContainersReceivedKey = "bootstrap-ancestors-max-containers-received" + ChainDataDirKey = "chain-data-dir" ChainConfigDirKey = "chain-config-dir" ChainConfigContentKey = "chain-config-content" SubnetConfigDirKey = "subnet-config-dir" @@ -197,4 +204,11 @@ const ( UptimeMetricFreqKey = "uptime-metric-freq" VMAliasesFileKey = "vm-aliases-file" VMAliasesContentKey = "vm-aliases-file-content" + ChainAliasesFileKey = "chain-aliases-file" + ChainAliasesContentKey = "chain-aliases-file-content" + TracingEnabledKey = "tracing-enabled" + TracingEndpointKey = "tracing-endpoint" + TracingInsecureKey = "tracing-insecure" + TracingSampleRateKey = "tracing-sample-rate" + TracingExporterTypeKey = "tracing-exporter-type" ) diff --git a/avalanchego/config/pflags.go b/avalanchego/config/pflags.go deleted file mode 100644 index 791c9d68..00000000 --- a/avalanchego/config/pflags.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package config - -import ( - "flag" - - "github.com/spf13/pflag" -) - -func deprecateFlags(fs *pflag.FlagSet) error { - for key, message := range deprecatedKeys { - if err := fs.MarkDeprecated(key, message); err != nil { - return err - } - } - return nil -} - -// buildFlagSet converts a flag set into a pflag set -func buildPFlagSet(fs *flag.FlagSet) (*pflag.FlagSet, error) { - pfs := pflag.NewFlagSet(fs.Name(), pflag.ContinueOnError) - pfs.AddGoFlagSet(fs) - - // Flag deprecations must be before parse - if err := deprecateFlags(pfs); err != nil { - return nil, err - } - return pfs, nil -} diff --git a/avalanchego/config/viper.go b/avalanchego/config/viper.go index f198e73b..1e236ea3 100644 --- a/avalanchego/config/viper.go +++ b/avalanchego/config/viper.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package config @@ -6,22 +6,22 @@ package config import ( "bytes" "encoding/base64" - "flag" "fmt" "io" + "os" "strings" + "github.com/spf13/pflag" "github.com/spf13/viper" ) // BuildViper returns the viper environment from parsing config file from // default search paths and any parsed command line flags -func BuildViper(fs *flag.FlagSet, args []string) (*viper.Viper, error) { - pfs, err := buildPFlagSet(fs) - if err != nil { +func BuildViper(fs *pflag.FlagSet, args []string) (*viper.Viper, error) { + if err := deprecateFlags(fs); err != nil { return nil, err } - if err := pfs.Parse(args); err != nil { + if err := fs.Parse(args); err != nil { return nil, err } @@ -29,7 +29,7 @@ func BuildViper(fs *flag.FlagSet, args []string) (*viper.Viper, error) { v.AutomaticEnv() v.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) v.SetEnvPrefix("avago") - if err := v.BindPFlags(pfs); err != nil { + if err := v.BindPFlags(fs); err != nil { return nil, err } @@ -56,7 +56,7 @@ func BuildViper(fs *flag.FlagSet, args []string) (*viper.Viper, error) { } // Config deprecations must be after v.ReadInConfig - deprecateConfigs(v, fs.Output()) + deprecateConfigs(v, os.Stdout) return v, nil } diff --git a/avalanchego/database/batch.go b/avalanchego/database/batch.go index 573446e8..b097dc60 100644 --- a/avalanchego/database/batch.go +++ b/avalanchego/database/batch.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // For ease of implementation, our database's interface matches Ethereum's @@ -7,6 +7,8 @@ package database +import "golang.org/x/exp/slices" + // Batch is a write-only database that commits changes to its host database // when Write is called. A batch cannot be used concurrently. type Batch interface { @@ -38,3 +40,54 @@ type Batcher interface { // until a final write is called. NewBatch() Batch } + +type BatchOp struct { + Key []byte + Value []byte + Delete bool +} + +type BatchOps struct { + Ops []BatchOp + size int +} + +func (b *BatchOps) Put(key, value []byte) error { + b.Ops = append(b.Ops, BatchOp{ + Key: slices.Clone(key), + Value: slices.Clone(value), + }) + b.size += len(key) + len(value) + return nil +} + +func (b *BatchOps) Delete(key []byte) error { + b.Ops = append(b.Ops, BatchOp{ + Key: slices.Clone(key), + Delete: true, + }) + b.size += len(key) + return nil +} + +func (b *BatchOps) Size() int { + return b.size +} + +func (b *BatchOps) Reset() { + b.Ops = b.Ops[:0] + b.size = 0 +} + +func (b *BatchOps) Replay(w KeyValueWriterDeleter) error { + for _, op := range b.Ops { + if op.Delete { + if err := w.Delete(op.Key); err != nil { + return err + } + } else if err := w.Put(op.Key, op.Value); err != nil { + return err + } + } + return nil +} diff --git a/avalanchego/database/benchmark_database.go b/avalanchego/database/benchmark_database.go index 48745168..9f4ae21c 100644 --- a/avalanchego/database/benchmark_database.go +++ b/avalanchego/database/benchmark_database.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package database @@ -99,7 +99,6 @@ func BenchmarkPut(b *testing.B, db Database, name string, keys, values [][]byte) } // BenchmarkDelete measures the time it takes to delete a (k, v) from a database. -//nolint:interfacer // This function takes in a database to be the expected type. func BenchmarkDelete(b *testing.B, db Database, name string, keys, values [][]byte) { count := len(keys) if count == 0 { @@ -127,7 +126,6 @@ func BenchmarkDelete(b *testing.B, db Database, name string, keys, values [][]by } // BenchmarkBatchPut measures the time it takes to batch put. -//nolint:interfacer // This function takes in a database to be the expected type. func BenchmarkBatchPut(b *testing.B, db Database, name string, keys, values [][]byte) { count := len(keys) if count == 0 { @@ -145,7 +143,6 @@ func BenchmarkBatchPut(b *testing.B, db Database, name string, keys, values [][] } // BenchmarkBatchDelete measures the time it takes to batch delete. -//nolint:interfacer // This function takes in a database to be the expected type. func BenchmarkBatchDelete(b *testing.B, db Database, name string, keys, values [][]byte) { count := len(keys) if count == 0 { @@ -163,7 +160,6 @@ func BenchmarkBatchDelete(b *testing.B, db Database, name string, keys, values [ } // BenchmarkBatchWrite measures the time it takes to batch write. -//nolint:interfacer // This function takes in a database to be the expected type. func BenchmarkBatchWrite(b *testing.B, db Database, name string, keys, values [][]byte) { count := len(keys) if count == 0 { @@ -237,7 +233,6 @@ func BenchmarkParallelPut(b *testing.B, db Database, name string, keys, values [ } // BenchmarkParallelDelete measures the time it takes to delete a (k, v) from the db. -//nolint:interfacer // This function takes in a database to be the expected type. func BenchmarkParallelDelete(b *testing.B, db Database, name string, keys, values [][]byte) { count := len(keys) if count == 0 { diff --git a/avalanchego/database/common.go b/avalanchego/database/common.go index 4de692d7..a27b0d27 100644 --- a/avalanchego/database/common.go +++ b/avalanchego/database/common.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package database diff --git a/avalanchego/database/corruptabledb/db.go b/avalanchego/database/corruptabledb/db.go index d8f04024..a9a945ff 100644 --- a/avalanchego/database/corruptabledb/db.go +++ b/avalanchego/database/corruptabledb/db.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package corruptabledb import ( + "context" "fmt" "sync" @@ -11,8 +12,8 @@ import ( ) var ( - _ database.Database = &Database{} - _ database.Batch = &batch{} + _ database.Database = (*Database)(nil) + _ database.Batch = (*batch)(nil) ) // CorruptableDB is a wrapper around Database @@ -70,13 +71,15 @@ func (db *Database) Compact(start []byte, limit []byte) error { return db.handleError(db.Database.Compact(start, limit)) } -func (db *Database) Close() error { return db.handleError(db.Database.Close()) } +func (db *Database) Close() error { + return db.handleError(db.Database.Close()) +} -func (db *Database) HealthCheck() (interface{}, error) { +func (db *Database) HealthCheck(ctx context.Context) (interface{}, error) { if err := db.corrupted(); err != nil { return nil, err } - return db.Database.HealthCheck() + return db.Database.HealthCheck(ctx) } func (db *Database) NewBatch() database.Batch { diff --git a/avalanchego/database/corruptabledb/db_test.go b/avalanchego/database/corruptabledb/db_test.go index b9143cbd..6c05036e 100644 --- a/avalanchego/database/corruptabledb/db_test.go +++ b/avalanchego/database/corruptabledb/db_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package corruptabledb import ( + "context" "errors" "testing" @@ -13,6 +14,8 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" ) +var errTest = errors.New("non-nil error") + func TestInterface(t *testing.T) { for _, test := range database.Tests { baseDB := memdb.New() @@ -21,6 +24,14 @@ func TestInterface(t *testing.T) { } } +func FuzzInterface(f *testing.F) { + for _, test := range database.FuzzTests { + baseDB := memdb.New() + db := New(baseDB) + test(f, db) + } +} + // TestCorruption tests to make sure corruptabledb wrapper works as expected. func TestCorruption(t *testing.T) { key := []byte("hello") @@ -50,19 +61,18 @@ func TestCorruption(t *testing.T) { return corruptableBatch.Write() }, "corrupted healthcheck": func(db database.Database) error { - _, err := db.HealthCheck() + _, err := db.HealthCheck(context.Background()) return err }, } baseDB := memdb.New() // wrap this db corruptableDB := New(baseDB) - initError := errors.New("corruption error") - _ = corruptableDB.handleError(initError) + _ = corruptableDB.handleError(errTest) for name, testFn := range tests { t.Run(name, func(tt *testing.T) { err := testFn(corruptableDB) - require.ErrorIsf(tt, err, initError, "not received the corruption error") + require.ErrorIsf(tt, err, errTest, "not received the corruption error") }) } } diff --git a/avalanchego/database/database.go b/avalanchego/database/database.go index c762bc3d..89993a81 100644 --- a/avalanchego/database/database.go +++ b/avalanchego/database/database.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // For ease of implementation, our database's interface matches Ethereum's @@ -32,6 +32,11 @@ type KeyValueWriter interface { // Put inserts the given value into the key-value data store. // // Note: [key] and [value] are safe to modify and read after calling Put. + // + // If [value] is nil or an empty slice, then when it's retrieved + // it may be nil or an empty slice. + // + // Similarly, a nil [key] is treated the same as an empty slice. Put(key []byte, value []byte) error } diff --git a/avalanchego/database/encdb/db.go b/avalanchego/database/encdb/db.go index d0db1ae1..42518bef 100644 --- a/avalanchego/database/encdb/db.go +++ b/avalanchego/database/encdb/db.go @@ -1,20 +1,21 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package encdb import ( + "context" "crypto/cipher" "crypto/rand" "sync" "golang.org/x/crypto/chacha20poly1305" + "golang.org/x/exp/slices" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/nodb" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/hashing" ) @@ -23,9 +24,9 @@ const ( ) var ( - _ database.Database = &Database{} - _ database.Batch = &batch{} - _ database.Iterator = &iterator{} + _ database.Database = (*Database)(nil) + _ database.Batch = (*batch)(nil) + _ database.Iterator = (*iterator)(nil) ) // Database encrypts all values that are provided @@ -34,6 +35,7 @@ type Database struct { codec codec.Manager cipher cipher.AEAD db database.Database + closed bool } // New returns a new encrypted database @@ -56,7 +58,7 @@ func (db *Database) Has(key []byte) (bool, error) { db.lock.RLock() defer db.lock.RUnlock() - if db.db == nil { + if db.closed { return false, database.ErrClosed } return db.db.Has(key) @@ -66,7 +68,7 @@ func (db *Database) Get(key []byte) ([]byte, error) { db.lock.RLock() defer db.lock.RUnlock() - if db.db == nil { + if db.closed { return nil, database.ErrClosed } encVal, err := db.db.Get(key) @@ -80,7 +82,7 @@ func (db *Database) Put(key, value []byte) error { db.lock.Lock() defer db.lock.Unlock() - if db.db == nil { + if db.closed { return database.ErrClosed } @@ -95,7 +97,7 @@ func (db *Database) Delete(key []byte) error { db.lock.Lock() defer db.lock.Unlock() - if db.db == nil { + if db.closed { return database.ErrClosed } return db.db.Delete(key) @@ -124,8 +126,10 @@ func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database db.lock.RLock() defer db.lock.RUnlock() - if db.db == nil { - return &nodb.Iterator{Err: database.ErrClosed} + if db.closed { + return &database.IteratorError{ + Err: database.ErrClosed, + } } return &iterator{ Iterator: db.db.NewIteratorWithStartAndPrefix(start, prefix), @@ -137,7 +141,7 @@ func (db *Database) Compact(start, limit []byte) error { db.lock.Lock() defer db.lock.Unlock() - if db.db == nil { + if db.closed { return database.ErrClosed } return db.db.Compact(start, limit) @@ -147,10 +151,10 @@ func (db *Database) Close() error { db.lock.Lock() defer db.lock.Unlock() - if db.db == nil { + if db.closed { return database.ErrClosed } - db.db = nil + db.closed = true return nil } @@ -158,34 +162,31 @@ func (db *Database) isClosed() bool { db.lock.RLock() defer db.lock.RUnlock() - return db.db == nil + return db.closed } -func (db *Database) HealthCheck() (interface{}, error) { +func (db *Database) HealthCheck(ctx context.Context) (interface{}, error) { db.lock.RLock() defer db.lock.RUnlock() - if db.db == nil { + if db.closed { return nil, database.ErrClosed } - return db.db.HealthCheck() -} - -type keyValue struct { - key []byte - value []byte - delete bool + return db.db.HealthCheck(ctx) } type batch struct { database.Batch - db *Database - writes []keyValue + db *Database + ops []database.BatchOp } func (b *batch) Put(key, value []byte) error { - b.writes = append(b.writes, keyValue{utils.CopyBytes(key), utils.CopyBytes(value), false}) + b.ops = append(b.ops, database.BatchOp{ + Key: slices.Clone(key), + Value: slices.Clone(value), + }) encValue, err := b.db.encrypt(value) if err != nil { return err @@ -194,7 +195,10 @@ func (b *batch) Put(key, value []byte) error { } func (b *batch) Delete(key []byte) error { - b.writes = append(b.writes, keyValue{utils.CopyBytes(key), nil, true}) + b.ops = append(b.ops, database.BatchOp{ + Key: slices.Clone(key), + Delete: true, + }) return b.Batch.Delete(key) } @@ -202,7 +206,7 @@ func (b *batch) Write() error { b.db.lock.Lock() defer b.db.lock.Unlock() - if b.db.db == nil { + if b.db.closed { return database.ErrClosed } @@ -211,22 +215,22 @@ func (b *batch) Write() error { // Reset resets the batch for reuse. func (b *batch) Reset() { - if cap(b.writes) > len(b.writes)*database.MaxExcessCapacityFactor { - b.writes = make([]keyValue, 0, cap(b.writes)/database.CapacityReductionFactor) + if cap(b.ops) > len(b.ops)*database.MaxExcessCapacityFactor { + b.ops = make([]database.BatchOp, 0, cap(b.ops)/database.CapacityReductionFactor) } else { - b.writes = b.writes[:0] + b.ops = b.ops[:0] } b.Batch.Reset() } // Replay replays the batch contents. func (b *batch) Replay(w database.KeyValueWriterDeleter) error { - for _, keyvalue := range b.writes { - if keyvalue.delete { - if err := w.Delete(keyvalue.key); err != nil { + for _, op := range b.ops { + if op.Delete { + if err := w.Delete(op.Key); err != nil { return err } - } else if err := w.Put(keyvalue.key, keyvalue.value); err != nil { + } else if err := w.Put(op.Key, op.Value); err != nil { return err } } @@ -274,9 +278,13 @@ func (it *iterator) Error() error { return it.Iterator.Error() } -func (it *iterator) Key() []byte { return it.key } +func (it *iterator) Key() []byte { + return it.key +} -func (it *iterator) Value() []byte { return it.val } +func (it *iterator) Value() []byte { + return it.val +} type encryptedValue struct { Ciphertext []byte `serialize:"true"` diff --git a/avalanchego/database/encdb/db_test.go b/avalanchego/database/encdb/db_test.go index 571a79df..fe64ecc1 100644 --- a/avalanchego/database/encdb/db_test.go +++ b/avalanchego/database/encdb/db_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package encdb @@ -6,15 +6,18 @@ package encdb import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" ) +const testPassword = "lol totally a secure password" //nolint:gosec + func TestInterface(t *testing.T) { - pw := "lol totally a secure password" // #nosec G101 for _, test := range database.Tests { unencryptedDB := memdb.New() - db, err := New([]byte(pw), unencryptedDB) + db, err := New([]byte(testPassword), unencryptedDB) if err != nil { t.Fatal(err) } @@ -23,13 +26,23 @@ func TestInterface(t *testing.T) { } } +func FuzzInterface(f *testing.F) { + for _, test := range database.FuzzTests { + unencryptedDB := memdb.New() + db, err := New([]byte(testPassword), unencryptedDB) + if err != nil { + require.NoError(f, err) + } + test(f, db) + } +} + func BenchmarkInterface(b *testing.B) { - pw := "lol totally a secure password" // #nosec G101 for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) for _, bench := range database.Benchmarks { unencryptedDB := memdb.New() - db, err := New([]byte(pw), unencryptedDB) + db, err := New([]byte(testPassword), unencryptedDB) if err != nil { b.Fatal(err) } diff --git a/avalanchego/database/errors.go b/avalanchego/database/errors.go index ee77dbbb..ee46521b 100644 --- a/avalanchego/database/errors.go +++ b/avalanchego/database/errors.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package database diff --git a/avalanchego/database/helpers.go b/avalanchego/database/helpers.go index e1281b53..e56245b1 100644 --- a/avalanchego/database/helpers.go +++ b/avalanchego/database/helpers.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package database @@ -13,6 +13,7 @@ import ( ) const ( + Uint64Size = 8 // bytes // kvPairOverhead is an estimated overhead for a kv pair in a database. kvPairOverhead = 8 // bytes ) @@ -49,13 +50,13 @@ func GetUInt64(db KeyValueReader, key []byte) (uint64, error) { } func PackUInt64(val uint64) []byte { - bytes := make([]byte, 8) + bytes := make([]byte, Uint64Size) binary.BigEndian.PutUint64(bytes, val) return bytes } func ParseUInt64(b []byte) (uint64, error) { - if len(b) != 8 { + if len(b) != Uint64Size { return 0, errWrongSize } return binary.BigEndian.Uint64(b), nil diff --git a/avalanchego/database/iterator.go b/avalanchego/database/iterator.go index 04402023..dab02b4f 100644 --- a/avalanchego/database/iterator.go +++ b/avalanchego/database/iterator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // For ease of implementation, our database's interface matches Ethereum's @@ -7,6 +7,8 @@ package database +var _ Iterator = (*IteratorError)(nil) + // Iterator iterates over a database's key/value pairs. // // When it encounters an error any seek will return false and will yield no key/ @@ -60,3 +62,26 @@ type Iteratee interface { // key. NewIteratorWithStartAndPrefix(start, prefix []byte) Iterator } + +// IteratorError does nothing and returns the provided error +type IteratorError struct { + Err error +} + +func (*IteratorError) Next() bool { + return false +} + +func (i *IteratorError) Error() error { + return i.Err +} + +func (*IteratorError) Key() []byte { + return nil +} + +func (*IteratorError) Value() []byte { + return nil +} + +func (*IteratorError) Release() {} diff --git a/avalanchego/database/leveldb/db.go b/avalanchego/database/leveldb/db.go index 91681a66..4a8b6518 100644 --- a/avalanchego/database/leveldb/db.go +++ b/avalanchego/database/leveldb/db.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package leveldb import ( "bytes" + "context" "encoding/json" "fmt" "math" @@ -22,6 +23,8 @@ import ( "go.uber.org/zap" + "golang.org/x/exp/slices" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" @@ -63,9 +66,9 @@ const ( ) var ( - _ database.Database = &Database{} - _ database.Batch = &batch{} - _ database.Iterator = &iter{} + _ database.Database = (*Database)(nil) + _ database.Batch = (*batch)(nil) + _ database.Iterator = (*iter)(nil) ) // Database is a persistent key-value store. Apart from basic data storage @@ -76,7 +79,7 @@ type Database struct { // metrics is only initialized and used when [MetricUpdateFrequency] is >= 0 // in the config metrics metrics - closed utils.AtomicBool + closed utils.Atomic[bool] closeOnce sync.Once // closeCh is closed when Close() is called. closeCh chan struct{} @@ -199,7 +202,7 @@ func New(file string, configBytes []byte, log logging.Logger, namespace string, } } - log.Info("creating new leveldb", + log.Info("creating leveldb", zap.Reflect("config", parsedConfig), ) @@ -290,7 +293,9 @@ func (db *Database) Delete(key []byte) error { // NewBatch creates a write/delete-only buffer that is atomically committed to // the database when write is called -func (db *Database) NewBatch() database.Batch { return &batch{db: db} } +func (db *Database) NewBatch() database.Batch { + return &batch{db: db} +} // NewIterator creates a lexicographically ordered iterator over the database func (db *Database) NewIterator() database.Iterator { @@ -348,7 +353,7 @@ func (db *Database) Compact(start []byte, limit []byte) error { } func (db *Database) Close() error { - db.closed.SetValue(true) + db.closed.Set(true) db.closeOnce.Do(func() { close(db.closeCh) }) @@ -356,8 +361,8 @@ func (db *Database) Close() error { return updateError(db.DB.Close()) } -func (db *Database) HealthCheck() (interface{}, error) { - if db.closed.GetValue() { +func (db *Database) HealthCheck(context.Context) (interface{}, error) { + if db.closed.Get() { return nil, database.ErrClosed } return nil, nil @@ -385,7 +390,9 @@ func (b *batch) Delete(key []byte) error { } // Size retrieves the amount of data queued up for writing. -func (b *batch) Size() int { return b.size } +func (b *batch) Size() int { + return b.size +} // Write flushes any accumulated data to disk. func (b *batch) Write() error { @@ -409,7 +416,9 @@ func (b *batch) Replay(w database.KeyValueWriterDeleter) error { } // Inner returns itself -func (b *batch) Inner() database.Batch { return b } +func (b *batch) Inner() database.Batch { + return b +} type replayer struct { writerDeleter database.KeyValueWriterDeleter @@ -440,7 +449,7 @@ type iter struct { func (it *iter) Next() bool { // Short-circuit and set an error if the underlying database has been closed. - if it.db.closed.GetValue() { + if it.db.closed.Get() { it.key = nil it.val = nil it.err = database.ErrClosed @@ -449,8 +458,8 @@ func (it *iter) Next() bool { hasNext := it.Iterator.Next() if hasNext { - it.key = utils.CopyBytes(it.Iterator.Key()) - it.val = utils.CopyBytes(it.Iterator.Value()) + it.key = slices.Clone(it.Iterator.Key()) + it.val = slices.Clone(it.Iterator.Value()) } else { it.key = nil it.val = nil @@ -465,9 +474,13 @@ func (it *iter) Error() error { return updateError(it.Iterator.Error()) } -func (it *iter) Key() []byte { return it.key } +func (it *iter) Key() []byte { + return it.key +} -func (it *iter) Value() []byte { return it.val } +func (it *iter) Value() []byte { + return it.val +} func updateError(err error) error { switch err { diff --git a/avalanchego/database/leveldb/db_test.go b/avalanchego/database/leveldb/db_test.go index 764ad806..703bc412 100644 --- a/avalanchego/database/leveldb/db_test.go +++ b/avalanchego/database/leveldb/db_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package leveldb @@ -8,6 +8,8 @@ import ( "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/utils/logging" ) @@ -30,6 +32,24 @@ func TestInterface(t *testing.T) { } } +func FuzzInterface(f *testing.F) { + for _, test := range database.FuzzTests { + folder := f.TempDir() + db, err := New(folder, nil, logging.NoLog{}, "", prometheus.NewRegistry()) + if err != nil { + require.NoError(f, err) + } + + defer db.Close() + + test(f, db) + + // The database may have been closed by the test, so we don't care if it + // errors here. + _ = db.Close() + } +} + func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) diff --git a/avalanchego/database/leveldb/metrics.go b/avalanchego/database/leveldb/metrics.go index 6d19c661..8b2971a3 100644 --- a/avalanchego/database/leveldb/metrics.go +++ b/avalanchego/database/leveldb/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package leveldb diff --git a/avalanchego/database/linkeddb/codec.go b/avalanchego/database/linkeddb/codec.go index 2c92d1a6..7780690b 100644 --- a/avalanchego/database/linkeddb/codec.go +++ b/avalanchego/database/linkeddb/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package linkeddb diff --git a/avalanchego/database/linkeddb/linkeddb.go b/avalanchego/database/linkeddb/linkeddb.go index 2879527b..597e1258 100644 --- a/avalanchego/database/linkeddb/linkeddb.go +++ b/avalanchego/database/linkeddb/linkeddb.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package linkeddb @@ -6,9 +6,11 @@ package linkeddb import ( "sync" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" + "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/utils" ) const ( @@ -18,8 +20,8 @@ const ( var ( headKey = []byte{0x01} - _ LinkedDB = &linkedDB{} - _ database.Iterator = &iterator{} + _ LinkedDB = (*linkedDB)(nil) + _ database.Iterator = (*iterator)(nil) ) // LinkedDB provides a key value interface while allowing iteration. @@ -43,7 +45,7 @@ type linkedDB struct { headKeyIsSynced, headKeyExists, headKeyIsUpdated, updatedHeadKeyExists bool headKey, updatedHeadKey []byte // these variables provide caching for the nodes. - nodeCache cache.Cacher // key -> *node + nodeCache cache.Cacher[string, *node] // key -> *node updatedNodes map[string]*node // db is the underlying database that this list is stored in. @@ -62,14 +64,16 @@ type node struct { func New(db database.Database, cacheSize int) LinkedDB { return &linkedDB{ - nodeCache: &cache.LRU{Size: cacheSize}, + nodeCache: &cache.LRU[string, *node]{Size: cacheSize}, updatedNodes: make(map[string]*node), db: db, batch: db.NewBatch(), } } -func NewDefault(db database.Database) LinkedDB { return New(db, defaultCacheSize) } +func NewDefault(db database.Database) LinkedDB { + return New(db, defaultCacheSize) +} func (ldb *linkedDB) Has(key []byte) (bool, error) { ldb.lock.RLock() @@ -95,7 +99,7 @@ func (ldb *linkedDB) Put(key, value []byte) error { // If the key already has a node in the list, update that node. existingNode, err := ldb.getNode(key) if err == nil { - existingNode.Value = utils.CopyBytes(value) + existingNode.Value = slices.Clone(value) if err := ldb.putNode(key, existingNode); err != nil { return err } @@ -106,7 +110,7 @@ func (ldb *linkedDB) Put(key, value []byte) error { } // The key isn't currently in the list, so we should add it as the head. - newHead := node{Value: utils.CopyBytes(value)} + newHead := node{Value: slices.Clone(value)} if headKey, err := ldb.getHeadKey(); err == nil { // The list currently has a head, so we need to update the old head. oldHead, err := ldb.getNode(headKey) @@ -228,7 +232,9 @@ func (ldb *linkedDB) Head() ([]byte, []byte, error) { // This iterator does not guarantee that keys are returned in lexicographic // order. -func (ldb *linkedDB) NewIterator() database.Iterator { return &iterator{ldb: ldb} } +func (ldb *linkedDB) NewIterator() database.Iterator { + return &iterator{ldb: ldb} +} // NewIteratorWithStart returns an iterator that starts at [start]. // This iterator does not guarantee that keys are returned in lexicographic @@ -294,8 +300,7 @@ func (ldb *linkedDB) getNode(key []byte) (node, error) { defer ldb.cacheLock.Unlock() keyStr := string(key) - if nodeIntf, exists := ldb.nodeCache.Get(keyStr); exists { - n := nodeIntf.(*node) + if n, exists := ldb.nodeCache.Get(keyStr); exists { if n == nil { return node{}, database.ErrNotFound } @@ -304,9 +309,7 @@ func (ldb *linkedDB) getNode(key []byte) (node, error) { nodeBytes, err := ldb.db.Get(nodeKey(key)) if err == database.ErrNotFound { - // Passing [nil] without the pointer cast would result in a panic when - // performing the type assertion in the above cache check. - ldb.nodeCache.Put(keyStr, (*node)(nil)) + ldb.nodeCache.Put(keyStr, nil) return node{}, err } if err != nil { @@ -336,9 +339,7 @@ func (ldb *linkedDB) deleteNode(key []byte) error { func (ldb *linkedDB) resetBatch() { ldb.headKeyIsUpdated = false - for key := range ldb.updatedNodes { - delete(ldb.updatedNodes, key) - } + maps.Clear(ldb.updatedNodes) ldb.batch.Reset() } @@ -416,10 +417,19 @@ func (it *iterator) Next() bool { return true } -func (it *iterator) Error() error { return it.err } -func (it *iterator) Key() []byte { return it.key } -func (it *iterator) Value() []byte { return it.value } -func (it *iterator) Release() {} +func (it *iterator) Error() error { + return it.err +} + +func (it *iterator) Key() []byte { + return it.key +} + +func (it *iterator) Value() []byte { + return it.value +} + +func (*iterator) Release() {} func nodeKey(key []byte) []byte { newKey := make([]byte, len(key)+1) diff --git a/avalanchego/database/linkeddb/linkeddb_test.go b/avalanchego/database/linkeddb/linkeddb_test.go index d120d693..9ee698dc 100644 --- a/avalanchego/database/linkeddb/linkeddb_test.go +++ b/avalanchego/database/linkeddb/linkeddb_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package linkeddb diff --git a/avalanchego/database/manager/manager.go b/avalanchego/database/manager/manager.go index c2459052..45d3cb08 100644 --- a/avalanchego/database/manager/manager.go +++ b/avalanchego/database/manager/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package manager @@ -29,7 +29,7 @@ var ( errNoDBs = errors.New("no dbs given") ) -var _ Manager = &manager{} +var _ Manager = (*manager)(nil) type Manager interface { // Current returns the database with the current database version. @@ -180,7 +180,7 @@ func new( return filepath.SkipDir }) - SortDescending(manager.databases) + utils.Sort(manager.databases) // If an error occurred walking [dbDirPath] close the // database manager and return the original error here. @@ -210,8 +210,8 @@ func NewManagerFromDBs(dbs []*VersionedDatabase) (Manager, error) { if len(dbs) == 0 { return nil, errNoDBs } - SortDescending(dbs) - sortedAndUnique := utils.IsSortedAndUnique(innerSortDescendingVersionedDBs(dbs)) + utils.Sort(dbs) + sortedAndUnique := utils.IsSortedAndUniqueSortable(dbs) if !sortedAndUnique { return nil, errNonSortedAndUniqueDBs } @@ -220,7 +220,9 @@ func NewManagerFromDBs(dbs []*VersionedDatabase) (Manager, error) { }, nil } -func (m *manager) Current() *VersionedDatabase { return m.databases[0] } +func (m *manager) Current() *VersionedDatabase { + return m.databases[0] +} func (m *manager) Previous() (*VersionedDatabase, bool) { if len(m.databases) < 2 { @@ -229,7 +231,9 @@ func (m *manager) Previous() (*VersionedDatabase, bool) { return m.databases[1], true } -func (m *manager) GetDatabases() []*VersionedDatabase { return m.databases } +func (m *manager) GetDatabases() []*VersionedDatabase { + return m.databases +} func (m *manager) Close() error { errs := wrappers.Errs{} diff --git a/avalanchego/database/manager/manager_test.go b/avalanchego/database/manager/manager_test.go index e4f3843e..ffa82bb9 100644 --- a/avalanchego/database/manager/manager_test.go +++ b/avalanchego/database/manager/manager_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package manager @@ -21,65 +21,62 @@ import ( ) func TestNewSingleLevelDB(t *testing.T) { + require := require.New(t) dir := t.TempDir() v1 := version.Semantic1_0_0 dbPath := filepath.Join(dir, v1.String()) db, err := leveldb.New(dbPath, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) err = db.Close() - if err != nil { - t.Fatal(err) - } + require.NoError(err) manager, err := NewLevelDB(dir, nil, logging.NoLog{}, v1, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) semDB := manager.Current() cmp := semDB.Version.Compare(v1) - require.Equal(t, 0, cmp, "incorrect version on current database") + require.Equal(0, cmp, "incorrect version on current database") _, exists := manager.Previous() - require.False(t, exists, "there should be no previous database") + require.False(exists, "there should be no previous database") dbs := manager.GetDatabases() - require.Len(t, dbs, 1) + require.Len(dbs, 1) err = manager.Close() - require.NoError(t, err) + require.NoError(err) } func TestNewCreatesSingleDB(t *testing.T) { + require := require.New(t) + dir := t.TempDir() v1 := version.Semantic1_0_0 manager, err := NewLevelDB(dir, nil, logging.NoLog{}, v1, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) semDB := manager.Current() cmp := semDB.Version.Compare(v1) - require.Equal(t, 0, cmp, "incorrect version on current database") + require.Equal(0, cmp, "incorrect version on current database") _, exists := manager.Previous() - require.False(t, exists, "there should be no previous database") + require.False(exists, "there should be no previous database") dbs := manager.GetDatabases() - require.Len(t, dbs, 1) + require.Len(dbs, 1) err = manager.Close() - require.NoError(t, err) + require.NoError(err) } func TestNewInvalidMemberPresent(t *testing.T) { + require := require.New(t) + dir := t.TempDir() v1 := &version.Semantic{ @@ -95,41 +92,37 @@ func TestNewInvalidMemberPresent(t *testing.T) { dbPath1 := filepath.Join(dir, v1.String()) db1, err := leveldb.New(dbPath1, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) dbPath2 := filepath.Join(dir, v2.String()) db2, err := leveldb.New(dbPath2, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) err = db2.Close() - if err != nil { - t.Fatal(err) - } + require.NoError(err) _, err = NewLevelDB(dir, nil, logging.NoLog{}, v2, "", prometheus.NewRegistry()) - require.Error(t, err, "expected to error creating the manager due to an open db") + require.Error(err, "expected to error creating the manager due to an open db") err = db1.Close() - require.NoError(t, err) + require.NoError(err) f, err := os.Create(filepath.Join(dir, "dummy")) - require.NoError(t, err) + require.NoError(err) err = f.Close() - require.NoError(t, err) + require.NoError(err) db, err := NewLevelDB(dir, nil, logging.NoLog{}, v1, "", prometheus.NewRegistry()) - require.NoError(t, err, "expected not to error with a non-directory file being present") + require.NoError(err, "expected not to error with a non-directory file being present") err = db.Close() - require.NoError(t, err) + require.NoError(err) } func TestNewSortsDatabases(t *testing.T) { + require := require.New(t) + dir := t.TempDir() vers := []*version.Semantic{ @@ -163,49 +156,41 @@ func TestNewSortsDatabases(t *testing.T) { for _, version := range vers { dbPath := filepath.Join(dir, version.String()) db, err := leveldb.New(dbPath, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) err = db.Close() - if err != nil { - t.Fatal(err) - } + require.NoError(err) } manager, err := NewLevelDB(dir, nil, logging.NoLog{}, vers[0], "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) defer func() { err = manager.Close() - require.NoError(t, err, "problem closing database manager") + require.NoError(err, "problem closing database manager") }() semDB := manager.Current() cmp := semDB.Version.Compare(vers[0]) - require.Equal(t, 0, cmp, "incorrect version on current database") + require.Equal(0, cmp, "incorrect version on current database") prev, exists := manager.Previous() - if !exists { - t.Fatal("expected to find a previous database") - } + require.True(exists, "expected to find a previous database") cmp = prev.Version.Compare(vers[1]) - require.Equal(t, 0, cmp, "incorrect version on previous database") + require.Equal(0, cmp, "incorrect version on previous database") dbs := manager.GetDatabases() - if len(dbs) != len(vers) { - t.Fatalf("Expected to find %d databases, but found %d", len(vers), len(dbs)) - } + require.Equal(len(vers), len(dbs)) for i, db := range dbs { cmp = db.Version.Compare(vers[i]) - require.Equal(t, 0, cmp, "expected to find database version %s, but found %s", vers[i], db.Version.String()) + require.Equal(0, cmp, "expected to find database version %s, but found %s", vers[i], db.Version.String()) } } func TestPrefixDBManager(t *testing.T) { + require := require.New(t) + db := memdb.New() prefix0 := []byte{0} @@ -219,10 +204,10 @@ func TestPrefixDBManager(t *testing.T) { k1 := []byte{'c', 'u', 'r', 'r', 'y'} v1 := []byte{'w', 'u', 'r', 's', 't'} - require.NoError(t, db0.Put(k0, v0)) - require.NoError(t, db1.Put(k1, v1)) - require.NoError(t, db0.Close()) - require.NoError(t, db1.Close()) + require.NoError(db0.Put(k0, v0)) + require.NoError(db1.Put(k1, v1)) + require.NoError(db0.Close()) + require.NoError(db1.Close()) m := &manager{databases: []*VersionedDatabase{ { @@ -235,15 +220,17 @@ func TestPrefixDBManager(t *testing.T) { m1 := m0.NewPrefixDBManager(prefix1) val, err := m0.Current().Database.Get(k0) - require.NoError(t, err) - require.Equal(t, v0, val) + require.NoError(err) + require.Equal(v0, val) val, err = m1.Current().Database.Get(k1) - require.NoError(t, err) - require.Equal(t, v1, val) + require.NoError(err) + require.Equal(v1, val) } func TestNestedPrefixDBManager(t *testing.T) { + require := require.New(t) + db := memdb.New() prefix0 := []byte{0} @@ -257,10 +244,10 @@ func TestNestedPrefixDBManager(t *testing.T) { k1 := []byte{'c', 'u', 'r', 'r', 'y'} v1 := []byte{'w', 'u', 'r', 's', 't'} - require.NoError(t, db0.Put(k0, v0)) - require.NoError(t, db1.Put(k1, v1)) - require.NoError(t, db0.Close()) - require.NoError(t, db1.Close()) + require.NoError(db0.Put(k0, v0)) + require.NoError(db1.Put(k1, v1)) + require.NoError(db0.Close()) + require.NoError(db1.Close()) m := &manager{databases: []*VersionedDatabase{ { @@ -273,15 +260,17 @@ func TestNestedPrefixDBManager(t *testing.T) { m1 := m0.NewNestedPrefixDBManager(prefix1) val, err := m0.Current().Database.Get(k0) - require.NoError(t, err) - require.Equal(t, v0, val) + require.NoError(err) + require.Equal(v0, val) val, err = m1.Current().Database.Get(k1) - require.NoError(t, err) - require.Equal(t, v1, val) + require.NoError(err) + require.Equal(v1, val) } func TestMeterDBManager(t *testing.T) { + require := require.New(t) + registry := prometheus.NewRegistry() m := &manager{databases: []*VersionedDatabase{ @@ -311,24 +300,26 @@ func TestMeterDBManager(t *testing.T) { // that there are no errors registering metrics for multiple // versioned databases. manager, err := m.NewMeterDBManager("", registry) - require.NoError(t, err) + require.NoError(err) dbs := manager.GetDatabases() - require.Len(t, dbs, 3) + require.Len(dbs, 3) _, ok := dbs[0].Database.(*meterdb.Database) - require.True(t, ok) + require.True(ok) _, ok = dbs[1].Database.(*meterdb.Database) - require.False(t, ok) + require.False(ok) _, ok = dbs[2].Database.(*meterdb.Database) - require.False(t, ok) + require.False(ok) // Confirm that the error from a name conflict is handled correctly _, err = m.NewMeterDBManager("", registry) - require.Error(t, err) + require.Error(err) } func TestCompleteMeterDBManager(t *testing.T) { + require := require.New(t) + registry := prometheus.NewRegistry() m := &manager{databases: []*VersionedDatabase{ @@ -358,24 +349,26 @@ func TestCompleteMeterDBManager(t *testing.T) { // that there are no errors registering metrics for multiple // versioned databases. manager, err := m.NewCompleteMeterDBManager("", registry) - require.NoError(t, err) + require.NoError(err) dbs := manager.GetDatabases() - require.Len(t, dbs, 3) + require.Len(dbs, 3) _, ok := dbs[0].Database.(*meterdb.Database) - require.True(t, ok) + require.True(ok) _, ok = dbs[1].Database.(*meterdb.Database) - require.True(t, ok) + require.True(ok) _, ok = dbs[2].Database.(*meterdb.Database) - require.True(t, ok) + require.True(ok) // Confirm that the error from a name conflict is handled correctly _, err = m.NewCompleteMeterDBManager("", registry) - require.Error(t, err) + require.Error(err) } func TestNewManagerFromDBs(t *testing.T) { + require := require.New(t) + versions := []*version.Semantic{ { Major: 3, @@ -408,22 +401,25 @@ func TestNewManagerFromDBs(t *testing.T) { Version: versions[0], }, }) - require.NoError(t, err) + require.NoError(err) dbs := m.GetDatabases() - require.Len(t, dbs, len(versions)) + require.Len(dbs, len(versions)) for i, db := range dbs { - require.Equal(t, 0, db.Version.Compare(versions[i])) + require.Equal(0, db.Version.Compare(versions[i])) } } func TestNewManagerFromNoDBs(t *testing.T) { + require := require.New(t) // Should error if no dbs are given _, err := NewManagerFromDBs(nil) - require.Error(t, err) + require.Error(err) } func TestNewManagerFromNonUniqueDBs(t *testing.T) { + require := require.New(t) + _, err := NewManagerFromDBs( []*VersionedDatabase{ { @@ -451,5 +447,5 @@ func TestNewManagerFromNonUniqueDBs(t *testing.T) { }, }, }) - require.Error(t, err) + require.Error(err) } diff --git a/avalanchego/database/manager/mocks/manager.go b/avalanchego/database/manager/mocks/manager.go deleted file mode 100644 index 475c2e38..00000000 --- a/avalanchego/database/manager/mocks/manager.go +++ /dev/null @@ -1,197 +0,0 @@ -// Code generated by mockery v0.0.0-dev. DO NOT EDIT. - -package mocks - -import ( - manager "github.com/ava-labs/avalanchego/database/manager" - mock "github.com/stretchr/testify/mock" - - prometheus "github.com/prometheus/client_golang/prometheus" -) - -// Manager is an autogenerated mock type for the Manager type -type Manager struct { - mock.Mock -} - -// Close provides a mock function with given fields: -func (_m *Manager) Close() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Current provides a mock function with given fields: -func (_m *Manager) Current() *manager.VersionedDatabase { - ret := _m.Called() - - var r0 *manager.VersionedDatabase - if rf, ok := ret.Get(0).(func() *manager.VersionedDatabase); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*manager.VersionedDatabase) - } - } - - return r0 -} - -// CurrentDBBootstrapped provides a mock function with given fields: -func (_m *Manager) CurrentDBBootstrapped() (bool, error) { - ret := _m.Called() - - var r0 bool - if rf, ok := ret.Get(0).(func() bool); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetDatabases provides a mock function with given fields: -func (_m *Manager) GetDatabases() []*manager.VersionedDatabase { - ret := _m.Called() - - var r0 []*manager.VersionedDatabase - if rf, ok := ret.Get(0).(func() []*manager.VersionedDatabase); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*manager.VersionedDatabase) - } - } - - return r0 -} - -// MarkCurrentDBBootstrapped provides a mock function with given fields: -func (_m *Manager) MarkCurrentDBBootstrapped() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// NewCompleteMeterDBManager provides a mock function with given fields: namespace, registerer -func (_m *Manager) NewCompleteMeterDBManager(namespace string, registerer prometheus.Registerer) (manager.Manager, error) { - ret := _m.Called(namespace, registerer) - - var r0 manager.Manager - if rf, ok := ret.Get(0).(func(string, prometheus.Registerer) manager.Manager); ok { - r0 = rf(namespace, registerer) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(manager.Manager) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(string, prometheus.Registerer) error); ok { - r1 = rf(namespace, registerer) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// NewMeterDBManager provides a mock function with given fields: namespace, registerer -func (_m *Manager) NewMeterDBManager(namespace string, registerer prometheus.Registerer) (manager.Manager, error) { - ret := _m.Called(namespace, registerer) - - var r0 manager.Manager - if rf, ok := ret.Get(0).(func(string, prometheus.Registerer) manager.Manager); ok { - r0 = rf(namespace, registerer) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(manager.Manager) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(string, prometheus.Registerer) error); ok { - r1 = rf(namespace, registerer) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// NewNestedPrefixDBManager provides a mock function with given fields: prefix -func (_m *Manager) NewNestedPrefixDBManager(prefix []byte) manager.Manager { - ret := _m.Called(prefix) - - var r0 manager.Manager - if rf, ok := ret.Get(0).(func([]byte) manager.Manager); ok { - r0 = rf(prefix) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(manager.Manager) - } - } - - return r0 -} - -// NewPrefixDBManager provides a mock function with given fields: prefix -func (_m *Manager) NewPrefixDBManager(prefix []byte) manager.Manager { - ret := _m.Called(prefix) - - var r0 manager.Manager - if rf, ok := ret.Get(0).(func([]byte) manager.Manager); ok { - r0 = rf(prefix) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(manager.Manager) - } - } - - return r0 -} - -// Previous provides a mock function with given fields: -func (_m *Manager) Previous() (*manager.VersionedDatabase, bool) { - ret := _m.Called() - - var r0 *manager.VersionedDatabase - if rf, ok := ret.Get(0).(func() *manager.VersionedDatabase); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*manager.VersionedDatabase) - } - } - - var r1 bool - if rf, ok := ret.Get(1).(func() bool); ok { - r1 = rf() - } else { - r1 = ret.Get(1).(bool) - } - - return r0, r1 -} diff --git a/avalanchego/database/manager/versioned_database.go b/avalanchego/database/manager/versioned_database.go index 23a36a5b..6ff983a9 100644 --- a/avalanchego/database/manager/versioned_database.go +++ b/avalanchego/database/manager/versioned_database.go @@ -1,15 +1,16 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package manager import ( - "sort" - "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/version" ) +var _ utils.Sortable[*VersionedDatabase] = (*VersionedDatabase)(nil) + type VersionedDatabase struct { Database database.Database Version *version.Semantic @@ -20,15 +21,7 @@ func (db *VersionedDatabase) Close() error { return db.Database.Close() } -type innerSortDescendingVersionedDBs []*VersionedDatabase - -// Less returns true if the version at index i is greater than the version at index j -// such that it will sort in descending order (newest version --> oldest version) -func (dbs innerSortDescendingVersionedDBs) Less(i, j int) bool { - return dbs[i].Version.Compare(dbs[j].Version) > 0 +// Note this sorts in descending order (newest version --> oldest version) +func (db *VersionedDatabase) Less(other *VersionedDatabase) bool { + return db.Version.Compare(other.Version) > 0 } - -func (dbs innerSortDescendingVersionedDBs) Len() int { return len(dbs) } -func (dbs innerSortDescendingVersionedDBs) Swap(i, j int) { dbs[j], dbs[i] = dbs[i], dbs[j] } - -func SortDescending(dbs []*VersionedDatabase) { sort.Sort(innerSortDescendingVersionedDBs(dbs)) } diff --git a/avalanchego/database/memdb/db.go b/avalanchego/database/memdb/db.go index 62ed1441..92b687af 100644 --- a/avalanchego/database/memdb/db.go +++ b/avalanchego/database/memdb/db.go @@ -1,16 +1,16 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package memdb import ( - "sort" + "context" "strings" "sync" + "golang.org/x/exp/slices" + "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/nodb" - "github.com/ava-labs/avalanchego/utils" ) const ( @@ -22,9 +22,9 @@ const ( ) var ( - _ database.Database = &Database{} - _ database.Batch = &batch{} - _ database.Iterator = &iterator{} + _ database.Database = (*Database)(nil) + _ database.Batch = (*batch)(nil) + _ database.Iterator = (*iterator)(nil) ) // Database is an ephemeral key-value store that implements the Database @@ -35,11 +35,15 @@ type Database struct { } // New returns a map with the Database interface methods implemented. -func New() *Database { return NewWithSize(DefaultSize) } +func New() *Database { + return NewWithSize(DefaultSize) +} // NewWithSize returns a map pre-allocated to the provided size with the // Database interface methods implemented. -func NewWithSize(size int) *Database { return &Database{db: make(map[string][]byte, size)} } +func NewWithSize(size int) *Database { + return &Database{db: make(map[string][]byte, size)} +} func (db *Database) Close() error { db.lock.Lock() @@ -78,7 +82,7 @@ func (db *Database) Get(key []byte) ([]byte, error) { return nil, database.ErrClosed } if entry, ok := db.db[string(key)]; ok { - return utils.CopyBytes(entry), nil + return slices.Clone(entry), nil } return nil, database.ErrNotFound } @@ -90,7 +94,7 @@ func (db *Database) Put(key []byte, value []byte) error { if db.db == nil { return database.ErrClosed } - db.db[string(key)] = utils.CopyBytes(value) + db.db[string(key)] = slices.Clone(value) return nil } @@ -105,7 +109,9 @@ func (db *Database) Delete(key []byte) error { return nil } -func (db *Database) NewBatch() database.Batch { return &batch{db: db} } +func (db *Database) NewBatch() database.Batch { + return &batch{db: db} +} func (db *Database) NewIterator() database.Iterator { return db.NewIteratorWithStartAndPrefix(nil, nil) @@ -124,7 +130,9 @@ func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database defer db.lock.RUnlock() if db.db == nil { - return &nodb.Iterator{Err: database.ErrClosed} + return &database.IteratorError{ + Err: database.ErrClosed, + } } startString := string(start) @@ -135,7 +143,7 @@ func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database keys = append(keys, key) } } - sort.Strings(keys) // Keys need to be in sorted order + slices.Sort(keys) // Keys need to be in sorted order values := make([][]byte, 0, len(keys)) for _, key := range keys { values = append(values, db.db[key]) @@ -147,7 +155,7 @@ func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database } } -func (db *Database) Compact(start []byte, limit []byte) error { +func (db *Database) Compact(_, _ []byte) error { db.lock.RLock() defer db.lock.RUnlock() @@ -157,39 +165,19 @@ func (db *Database) Compact(start []byte, limit []byte) error { return nil } -func (db *Database) HealthCheck() (interface{}, error) { +func (db *Database) HealthCheck(context.Context) (interface{}, error) { if db.isClosed() { return nil, database.ErrClosed } return nil, nil } -type keyValue struct { - key []byte - value []byte - delete bool -} - type batch struct { - db *Database - writes []keyValue - size int -} - -func (b *batch) Put(key, value []byte) error { - b.writes = append(b.writes, keyValue{utils.CopyBytes(key), utils.CopyBytes(value), false}) - b.size += len(key) + len(value) - return nil -} + database.BatchOps -func (b *batch) Delete(key []byte) error { - b.writes = append(b.writes, keyValue{utils.CopyBytes(key), nil, true}) - b.size += len(key) - return nil + db *Database } -func (b *batch) Size() int { return b.size } - func (b *batch) Write() error { b.db.lock.Lock() defer b.db.lock.Unlock() @@ -198,42 +186,20 @@ func (b *batch) Write() error { return database.ErrClosed } - for _, kv := range b.writes { - key := string(kv.key) - if kv.delete { - delete(b.db.db, key) + for _, op := range b.Ops { + if op.Delete { + delete(b.db.db, string(op.Key)) } else { - b.db.db[key] = kv.value + b.db.db[string(op.Key)] = op.Value } } return nil } -func (b *batch) Reset() { - if cap(b.writes) > len(b.writes)*database.MaxExcessCapacityFactor { - b.writes = make([]keyValue, 0, cap(b.writes)/database.CapacityReductionFactor) - } else { - b.writes = b.writes[:0] - } - b.size = 0 +func (b *batch) Inner() database.Batch { + return b } -func (b *batch) Replay(w database.KeyValueWriterDeleter) error { - for _, keyvalue := range b.writes { - if keyvalue.delete { - if err := w.Delete(keyvalue.key); err != nil { - return err - } - } else if err := w.Put(keyvalue.key, keyvalue.value); err != nil { - return err - } - } - return nil -} - -// Inner returns itself -func (b *batch) Inner() database.Batch { return b } - type iterator struct { db *Database initialized bool @@ -266,7 +232,9 @@ func (it *iterator) Next() bool { return len(it.keys) > 0 } -func (it *iterator) Error() error { return it.err } +func (it *iterator) Error() error { + return it.err +} func (it *iterator) Key() []byte { if len(it.keys) > 0 { @@ -282,4 +250,7 @@ func (it *iterator) Value() []byte { return nil } -func (it *iterator) Release() { it.keys = nil; it.values = nil } +func (it *iterator) Release() { + it.keys = nil + it.values = nil +} diff --git a/avalanchego/database/memdb/db_test.go b/avalanchego/database/memdb/db_test.go index ed3279ce..c7518978 100644 --- a/avalanchego/database/memdb/db_test.go +++ b/avalanchego/database/memdb/db_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package memdb @@ -15,6 +15,12 @@ func TestInterface(t *testing.T) { } } +func FuzzInterface(f *testing.F) { + for _, test := range database.FuzzTests { + test(f, New()) + } +} + func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) diff --git a/avalanchego/database/meterdb/db.go b/avalanchego/database/meterdb/db.go index f3d7d74e..a2640ca2 100644 --- a/avalanchego/database/meterdb/db.go +++ b/avalanchego/database/meterdb/db.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package meterdb import ( + "context" + "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/database" @@ -11,9 +13,9 @@ import ( ) var ( - _ database.Database = &Database{} - _ database.Batch = &batch{} - _ database.Iterator = &iterator{} + _ database.Database = (*Database)(nil) + _ database.Batch = (*batch)(nil) + _ database.Iterator = (*iterator)(nil) ) // Database tracks the amount of time each operation takes and how many bytes @@ -130,9 +132,9 @@ func (db *Database) Close() error { return err } -func (db *Database) HealthCheck() (interface{}, error) { +func (db *Database) HealthCheck(ctx context.Context) (interface{}, error) { start := db.clock.Time() - result, err := db.db.HealthCheck() + result, err := db.db.HealthCheck(ctx) end := db.clock.Time() db.healthCheck.Observe(float64(end.Sub(start))) return result, err diff --git a/avalanchego/database/meterdb/db_test.go b/avalanchego/database/meterdb/db_test.go index fc03fe19..7cc60257 100644 --- a/avalanchego/database/meterdb/db_test.go +++ b/avalanchego/database/meterdb/db_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package meterdb @@ -8,6 +8,8 @@ import ( "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" ) @@ -24,6 +26,17 @@ func TestInterface(t *testing.T) { } } +func FuzzInterface(f *testing.F) { + for _, test := range database.FuzzTests { + baseDB := memdb.New() + db, err := New("", prometheus.NewRegistry(), baseDB) + if err != nil { + require.NoError(f, err) + } + test(f, db) + } +} + func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) diff --git a/avalanchego/database/meterdb/metrics.go b/avalanchego/database/meterdb/metrics.go index 814396c3..a0a20e9d 100644 --- a/avalanchego/database/meterdb/metrics.go +++ b/avalanchego/database/meterdb/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package meterdb diff --git a/avalanchego/database/mock_batch.go b/avalanchego/database/mock_batch.go index 3a26ec69..778bee90 100644 --- a/avalanchego/database/mock_batch.go +++ b/avalanchego/database/mock_batch.go @@ -1,3 +1,6 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/database (interfaces: Batch) diff --git a/avalanchego/database/mockdb/db.go b/avalanchego/database/mockdb/db.go deleted file mode 100644 index 17ce1bb7..00000000 --- a/avalanchego/database/mockdb/db.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package mockdb - -import ( - "errors" - - "github.com/ava-labs/avalanchego/database" -) - -var ( - errNoFunction = errors.New("user didn't specify what value(s) return") - - _ database.Database = &Database{} -) - -// Database is a mock database meant to be used in tests. -// You specify the database's return value(s) for a given method call by -// assign value to the corresponding member. -// For example, to specify what should happen when Has is called, -// assign a value to OnHas. -// If no value is assigned to the corresponding member, the method returns an error or nil -// If you -type Database struct { - // Executed when Has is called - OnHas func([]byte) (bool, error) - OnGet func([]byte) ([]byte, error) - OnPut func([]byte, []byte) error - OnDelete func([]byte) error - OnNewBatch func() database.Batch - OnNewIterator func() database.Iterator - OnNewIteratorWithStart func([]byte) database.Iterator - OnNewIteratorWithPrefix func([]byte) database.Iterator - OnNewIteratorWithStartAndPrefix func([]byte, []byte) database.Iterator - OnCompact func([]byte, []byte) error - OnClose func() error - OnHealthCheck func() (interface{}, error) -} - -// New returns a new mock database -func New() *Database { return &Database{} } - -func (db *Database) Has(k []byte) (bool, error) { - if db.OnHas == nil { - return false, errNoFunction - } - return db.OnHas(k) -} - -func (db *Database) Get(k []byte) ([]byte, error) { - if db.OnGet == nil { - return nil, errNoFunction - } - return db.OnGet(k) -} - -func (db *Database) Put(k, v []byte) error { - if db.OnPut == nil { - return errNoFunction - } - return db.OnPut(k, v) -} - -func (db *Database) Delete(k []byte) error { - if db.OnDelete == nil { - return errNoFunction - } - return db.OnDelete(k) -} - -func (db *Database) NewBatch() database.Batch { - if db.OnNewBatch == nil { - return nil - } - return db.OnNewBatch() -} - -func (db *Database) NewIterator() database.Iterator { - if db.OnNewIterator == nil { - return nil - } - return db.OnNewIterator() -} - -func (db *Database) NewIteratorWithStart(start []byte) database.Iterator { - if db.OnNewIteratorWithStart == nil { - return nil - } - return db.OnNewIteratorWithStart(start) -} - -func (db *Database) NewIteratorWithPrefix(prefix []byte) database.Iterator { - if db.OnNewIteratorWithPrefix == nil { - return nil - } - return db.OnNewIteratorWithPrefix(prefix) -} - -func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { - if db.OnNewIteratorWithStartAndPrefix == nil { - return nil - } - return db.OnNewIteratorWithStartAndPrefix(start, prefix) -} - -func (db *Database) Compact(start []byte, limit []byte) error { - if db.OnCompact == nil { - return errNoFunction - } - return db.OnCompact(start, limit) -} - -func (db *Database) Close() error { - if db.OnClose == nil { - return errNoFunction - } - return db.OnClose() -} - -func (db *Database) HealthCheck() (interface{}, error) { - if db.OnHealthCheck == nil { - return nil, errNoFunction - } - return db.OnHealthCheck() -} diff --git a/avalanchego/database/mockdb/db_test.go b/avalanchego/database/mockdb/db_test.go deleted file mode 100644 index 9388a4e7..00000000 --- a/avalanchego/database/mockdb/db_test.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package mockdb - -import ( - "bytes" - "errors" - "testing" -) - -// Assert that when no members are assigned values, every method returns nil/error -func TestDefaultError(t *testing.T) { - db := New() - - if err := db.Close(); err == nil { - t.Fatal("should have errored") - } - if _, err := db.Has([]byte{}); err == nil { - t.Fatal("should have errored") - } - if _, err := db.Get([]byte{}); err == nil { - t.Fatal("should have errored") - } - if err := db.Put([]byte{}, []byte{}); err == nil { - t.Fatal("should have errored") - } - if err := db.Delete([]byte{}); err == nil { - t.Fatal("should have errored") - } - if batch := db.NewBatch(); batch != nil { - t.Fatal("should have been nil") - } - if iterator := db.NewIterator(); iterator != nil { - t.Fatal("should have errored") - } - if iterator := db.NewIteratorWithPrefix([]byte{}); iterator != nil { - t.Fatal("should have errored") - } - if iterator := db.NewIteratorWithStart([]byte{}); iterator != nil { - t.Fatal("should have errored") - } - if iterator := db.NewIteratorWithStartAndPrefix([]byte{}, []byte{}); iterator != nil { - t.Fatal("should have errored") - } - if err := db.Compact([]byte{}, []byte{}); err == nil { - t.Fatal("should have errored") - } - if _, err := db.HealthCheck(); err == nil { - t.Fatal("should have errored") - } -} - -// Assert that mocking works for Get -func TestGet(t *testing.T) { - db := New() - - // Mock Has() - db.OnHas = func(b []byte) (bool, error) { - if bytes.Equal(b, []byte{1, 2, 3}) { - return true, nil - } - return false, errors.New("") - } - - if has, err := db.Has([]byte{1, 2, 3}); err != nil { - t.Fatal("should not have errored") - } else if has != true { - t.Fatal("has should be true") - } - - if _, err := db.Has([]byte{1, 2}); err == nil { - t.Fatal("should have have errored") - } -} diff --git a/avalanchego/database/nodb/db.go b/avalanchego/database/nodb/db.go deleted file mode 100644 index 24d71e83..00000000 --- a/avalanchego/database/nodb/db.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package nodb - -import ( - "github.com/ava-labs/avalanchego/database" -) - -var ( - _ database.Database = &Database{} - _ database.Batch = &Batch{} - _ database.Iterator = &Iterator{} -) - -// Database is a lightning fast key value store with probabilistic operations. -type Database struct{} - -// Has returns false, nil -func (*Database) Has([]byte) (bool, error) { return false, database.ErrClosed } - -// Get returns nil, error -func (*Database) Get([]byte) ([]byte, error) { return nil, database.ErrClosed } - -// Put returns nil -func (*Database) Put(_, _ []byte) error { return database.ErrClosed } - -// Delete returns nil -func (*Database) Delete([]byte) error { return database.ErrClosed } - -// NewBatch returns a new batch -func (*Database) NewBatch() database.Batch { return &Batch{} } - -// NewIterator returns a new empty iterator -func (*Database) NewIterator() database.Iterator { return &Iterator{} } - -// NewIteratorWithStart returns a new empty iterator -func (*Database) NewIteratorWithStart([]byte) database.Iterator { return &Iterator{} } - -// NewIteratorWithPrefix returns a new empty iterator -func (*Database) NewIteratorWithPrefix([]byte) database.Iterator { return &Iterator{} } - -// NewIteratorWithStartAndPrefix returns a new empty iterator -func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { - return &Iterator{} -} - -// Compact returns nil -func (*Database) Compact(_, _ []byte) error { return database.ErrClosed } - -// Close returns nil -func (*Database) Close() error { return database.ErrClosed } - -// HealthCheck returns error -func (*Database) HealthCheck() (interface{}, error) { return nil, database.ErrClosed } - -// Batch does nothing -type Batch struct{} - -// Put returns nil -func (*Batch) Put(_, _ []byte) error { return database.ErrClosed } - -// Delete returns nil -func (*Batch) Delete([]byte) error { return database.ErrClosed } - -// Size returns 0 -func (*Batch) Size() int { return 0 } - -// Write returns nil -func (*Batch) Write() error { return database.ErrClosed } - -// Reset does nothing -func (*Batch) Reset() {} - -// Replay does nothing -func (*Batch) Replay(database.KeyValueWriterDeleter) error { return database.ErrClosed } - -// Inner returns itself -func (b *Batch) Inner() database.Batch { return b } - -// Iterator does nothing -type Iterator struct{ Err error } - -// Next returns false -func (*Iterator) Next() bool { return false } - -// Error returns any errors -func (it *Iterator) Error() error { return it.Err } - -// Key returns nil -func (*Iterator) Key() []byte { return nil } - -// Value returns nil -func (*Iterator) Value() []byte { return nil } - -// Release does nothing -func (*Iterator) Release() {} diff --git a/avalanchego/database/prefixdb/db.go b/avalanchego/database/prefixdb/db.go index 65f9fa8c..f4ba04e3 100644 --- a/avalanchego/database/prefixdb/db.go +++ b/avalanchego/database/prefixdb/db.go @@ -1,14 +1,15 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package prefixdb import ( + "context" "sync" + "golang.org/x/exp/slices" + "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/nodb" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/hashing" ) @@ -17,9 +18,9 @@ const ( ) var ( - _ database.Database = &Database{} - _ database.Batch = &batch{} - _ database.Iterator = &iterator{} + _ database.Database = (*Database)(nil) + _ database.Batch = (*batch)(nil) + _ database.Iterator = (*iterator)(nil) ) // Database partitions a database into a sub-database by prefixing all keys with @@ -34,7 +35,8 @@ type Database struct { // concurrently with another operation. All other operations can hold RLock. lock sync.RWMutex // The underlying storage - db database.Database + db database.Database + closed bool } // New returns a new prefixed database @@ -69,7 +71,7 @@ func (db *Database) Has(key []byte) (bool, error) { db.lock.RLock() defer db.lock.RUnlock() - if db.db == nil { + if db.closed { return false, database.ErrClosed } prefixedKey := db.prefix(key) @@ -85,7 +87,7 @@ func (db *Database) Get(key []byte) ([]byte, error) { db.lock.RLock() defer db.lock.RUnlock() - if db.db == nil { + if db.closed { return nil, database.ErrClosed } prefixedKey := db.prefix(key) @@ -102,7 +104,7 @@ func (db *Database) Put(key, value []byte) error { db.lock.RLock() defer db.lock.RUnlock() - if db.db == nil { + if db.closed { return database.ErrClosed } prefixedKey := db.prefix(key) @@ -118,7 +120,7 @@ func (db *Database) Delete(key []byte) error { db.lock.RLock() defer db.lock.RUnlock() - if db.db == nil { + if db.closed { return database.ErrClosed } prefixedKey := db.prefix(key) @@ -152,8 +154,10 @@ func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database db.lock.RLock() defer db.lock.RUnlock() - if db.db == nil { - return &nodb.Iterator{Err: database.ErrClosed} + if db.closed { + return &database.IteratorError{ + Err: database.ErrClosed, + } } prefixedStart := db.prefix(start) prefixedPrefix := db.prefix(prefix) @@ -170,7 +174,7 @@ func (db *Database) Compact(start, limit []byte) error { db.lock.RLock() defer db.lock.RUnlock() - if db.db == nil { + if db.closed { return database.ErrClosed } return db.db.Compact(db.prefix(start), db.prefix(limit)) @@ -180,10 +184,10 @@ func (db *Database) Close() error { db.lock.Lock() defer db.lock.Unlock() - if db.db == nil { + if db.closed { return database.ErrClosed } - db.db = nil + db.closed = true return nil } @@ -191,17 +195,17 @@ func (db *Database) isClosed() bool { db.lock.RLock() defer db.lock.RUnlock() - return db.db == nil + return db.closed } -func (db *Database) HealthCheck() (interface{}, error) { +func (db *Database) HealthCheck(ctx context.Context) (interface{}, error) { db.lock.RLock() defer db.lock.RUnlock() - if db.db == nil { + if db.closed { return nil, database.ErrClosed } - return db.db.HealthCheck() + return db.db.HealthCheck(ctx) } // Return a copy of [key], prepended with this db's prefix. @@ -225,12 +229,6 @@ func (db *Database) prefix(key []byte) []byte { return prefixedKey } -type keyValue struct { - key []byte - value []byte - delete bool -} - // Batch of database operations type batch struct { database.Batch @@ -239,7 +237,7 @@ type batch struct { // Each key is prepended with the database's prefix. // Each byte slice underlying a key should be returned to the pool // when this batch is reset. - writes []keyValue + ops []database.BatchOp } // Assumes that it is OK for the argument to b.Batch.Put @@ -248,8 +246,11 @@ type batch struct { // [value] may be modified after this method returns. func (b *batch) Put(key, value []byte) error { prefixedKey := b.db.prefix(key) - copiedValue := utils.CopyBytes(value) - b.writes = append(b.writes, keyValue{prefixedKey, copiedValue, false}) + copiedValue := slices.Clone(value) + b.ops = append(b.ops, database.BatchOp{ + Key: prefixedKey, + Value: copiedValue, + }) return b.Batch.Put(prefixedKey, copiedValue) } @@ -258,7 +259,10 @@ func (b *batch) Put(key, value []byte) error { // [key] may be modified after this method returns. func (b *batch) Delete(key []byte) error { prefixedKey := b.db.prefix(key) - b.writes = append(b.writes, keyValue{prefixedKey, nil, true}) + b.ops = append(b.ops, database.BatchOp{ + Key: prefixedKey, + Delete: true, + }) return b.Batch.Delete(prefixedKey) } @@ -267,7 +271,7 @@ func (b *batch) Write() error { b.db.lock.RLock() defer b.db.lock.RUnlock() - if b.db.db == nil { + if b.db.closed { return database.ErrClosed } return b.Batch.Write() @@ -279,15 +283,15 @@ func (b *batch) Reset() { // Don't return the byte buffers underneath each value back to the pool // because we assume in batch.Replay that it's not safe to modify the // value argument to w.Put. - for _, kv := range b.writes { - b.db.bufferPool.Put(kv.key) + for _, op := range b.ops { + b.db.bufferPool.Put(op.Key) } // Clear b.writes - if cap(b.writes) > len(b.writes)*database.MaxExcessCapacityFactor { - b.writes = make([]keyValue, 0, cap(b.writes)/database.CapacityReductionFactor) + if cap(b.ops) > len(b.ops)*database.MaxExcessCapacityFactor { + b.ops = make([]database.BatchOp, 0, cap(b.ops)/database.CapacityReductionFactor) } else { - b.writes = b.writes[:0] + b.ops = b.ops[:0] } b.Batch.Reset() } @@ -296,14 +300,14 @@ func (b *batch) Reset() { // Assumes it's safe to modify the key argument to w.Delete and w.Put // after those methods return. func (b *batch) Replay(w database.KeyValueWriterDeleter) error { - for _, keyvalue := range b.writes { - keyWithoutPrefix := keyvalue.key[len(b.db.dbPrefix):] - if keyvalue.delete { + for _, op := range b.ops { + keyWithoutPrefix := op.Key[len(b.db.dbPrefix):] + if op.Delete { if err := w.Delete(keyWithoutPrefix); err != nil { return err } } else { - if err := w.Put(keyWithoutPrefix, keyvalue.value); err != nil { + if err := w.Put(keyWithoutPrefix, op.Value); err != nil { return err } } @@ -344,9 +348,13 @@ func (it *iterator) Next() bool { return hasNext } -func (it *iterator) Key() []byte { return it.key } +func (it *iterator) Key() []byte { + return it.key +} -func (it *iterator) Value() []byte { return it.val } +func (it *iterator) Value() []byte { + return it.val +} // Error returns [database.ErrClosed] if the underlying db was closed // otherwise it returns the normal iterator error. diff --git a/avalanchego/database/prefixdb/db_test.go b/avalanchego/database/prefixdb/db_test.go index 39c73d87..4ea308a3 100644 --- a/avalanchego/database/prefixdb/db_test.go +++ b/avalanchego/database/prefixdb/db_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package prefixdb @@ -22,6 +22,12 @@ func TestInterface(t *testing.T) { } } +func FuzzInterface(f *testing.F) { + for _, test := range database.FuzzTests { + test(f, New([]byte(""), memdb.New())) + } +} + func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) diff --git a/avalanchego/database/rpcdb/db_client.go b/avalanchego/database/rpcdb/db_client.go index f49380ac..8a6f004c 100644 --- a/avalanchego/database/rpcdb/db_client.go +++ b/avalanchego/database/rpcdb/db_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcdb @@ -6,39 +6,28 @@ package rpcdb import ( "context" "encoding/json" - "sync/atomic" "google.golang.org/protobuf/types/known/emptypb" "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/nodb" "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" rpcdbpb "github.com/ava-labs/avalanchego/proto/pb/rpcdb" ) -const ( - maxBatchSize = 128 * units.KiB - - // baseElementSize is an approximation of the protobuf encoding overhead per - // element - baseElementSize = 8 // bytes -) - var ( - _ database.Database = &DatabaseClient{} - _ database.Batch = &batch{} - _ database.Iterator = &iterator{} + _ database.Database = (*DatabaseClient)(nil) + _ database.Batch = (*batch)(nil) + _ database.Iterator = (*iterator)(nil) ) // DatabaseClient is an implementation of database that talks over RPC. type DatabaseClient struct { client rpcdbpb.DatabaseClient - closed utils.AtomicBool - batchIndex int64 + closed utils.Atomic[bool] } // NewClient returns a database instance connected to a remote database instance @@ -54,7 +43,7 @@ func (db *DatabaseClient) Has(key []byte) (bool, error) { if err != nil { return false, err } - return resp.Has, errCodeToError[resp.Err] + return resp.Has, errEnumToError[resp.Err] } // Get attempts to return the value that was mapped to the key that was provided @@ -65,7 +54,7 @@ func (db *DatabaseClient) Get(key []byte) ([]byte, error) { if err != nil { return nil, err } - return resp.Value, errCodeToError[resp.Err] + return resp.Value, errEnumToError[resp.Err] } // Put attempts to set the value this key maps to @@ -77,7 +66,7 @@ func (db *DatabaseClient) Put(key, value []byte) error { if err != nil { return err } - return errCodeToError[resp.Err] + return errEnumToError[resp.Err] } // Delete attempts to remove any mapping from the key @@ -88,11 +77,13 @@ func (db *DatabaseClient) Delete(key []byte) error { if err != nil { return err } - return errCodeToError[resp.Err] + return errEnumToError[resp.Err] } // NewBatch returns a new batch -func (db *DatabaseClient) NewBatch() database.Batch { return &batch{db: db} } +func (db *DatabaseClient) NewBatch() database.Batch { + return &batch{db: db} +} func (db *DatabaseClient) NewIterator() database.Iterator { return db.NewIteratorWithStartAndPrefix(nil, nil) @@ -113,7 +104,9 @@ func (db *DatabaseClient) NewIteratorWithStartAndPrefix(start, prefix []byte) da Prefix: prefix, }) if err != nil { - return &nodb.Iterator{Err: err} + return &database.IteratorError{ + Err: err, + } } return &iterator{ db: db, @@ -130,21 +123,21 @@ func (db *DatabaseClient) Compact(start, limit []byte) error { if err != nil { return err } - return errCodeToError[resp.Err] + return errEnumToError[resp.Err] } // Close attempts to close the database func (db *DatabaseClient) Close() error { - db.closed.SetValue(true) + db.closed.Set(true) resp, err := db.client.Close(context.Background(), &rpcdbpb.CloseRequest{}) if err != nil { return err } - return errCodeToError[resp.Err] + return errEnumToError[resp.Err] } -func (db *DatabaseClient) HealthCheck() (interface{}, error) { - health, err := db.client.HealthCheck(context.Background(), &emptypb.Empty{}) +func (db *DatabaseClient) HealthCheck(ctx context.Context) (interface{}, error) { + health, err := db.client.HealthCheck(ctx, &emptypb.Empty{}) if err != nil { return nil, err } @@ -152,106 +145,46 @@ func (db *DatabaseClient) HealthCheck() (interface{}, error) { return json.RawMessage(health.Details), nil } -type keyValue struct { - key []byte - value []byte - delete bool -} - type batch struct { - db *DatabaseClient - writes []keyValue - size int -} + database.BatchOps -func (b *batch) Put(key, value []byte) error { - b.writes = append(b.writes, keyValue{utils.CopyBytes(key), utils.CopyBytes(value), false}) - b.size += len(key) + len(value) - return nil -} - -func (b *batch) Delete(key []byte) error { - b.writes = append(b.writes, keyValue{utils.CopyBytes(key), nil, true}) - b.size += len(key) - return nil + db *DatabaseClient } -func (b *batch) Size() int { return b.size } - func (b *batch) Write() error { - request := &rpcdbpb.WriteBatchRequest{ - Id: atomic.AddInt64(&b.db.batchIndex, 1), - Continues: true, - } - currentSize := 0 - keySet := make(map[string]struct{}, len(b.writes)) - for i := len(b.writes) - 1; i >= 0; i-- { - kv := b.writes[i] - key := string(kv.key) - if _, overwritten := keySet[key]; overwritten { + request := &rpcdbpb.WriteBatchRequest{} + keySet := set.NewSet[string](len(b.Ops)) + for i := len(b.Ops) - 1; i >= 0; i-- { + op := b.Ops[i] + key := string(op.Key) + if keySet.Contains(key) { continue } - keySet[key] = struct{}{} - - sizeChange := baseElementSize + len(kv.key) + len(kv.value) - if newSize := currentSize + sizeChange; newSize > maxBatchSize { - resp, err := b.db.client.WriteBatch(context.Background(), request) - if err != nil { - return err - } - if err := errCodeToError[resp.Err]; err != nil { - return err - } - currentSize = 0 - request.Deletes = request.Deletes[:0] - request.Puts = request.Puts[:0] - } - currentSize += sizeChange + keySet.Add(key) - if kv.delete { + if op.Delete { request.Deletes = append(request.Deletes, &rpcdbpb.DeleteRequest{ - Key: kv.key, + Key: op.Key, }) } else { request.Puts = append(request.Puts, &rpcdbpb.PutRequest{ - Key: kv.key, - Value: kv.value, + Key: op.Key, + Value: op.Value, }) } } - request.Continues = false resp, err := b.db.client.WriteBatch(context.Background(), request) if err != nil { return err } - return errCodeToError[resp.Err] -} - -func (b *batch) Reset() { - if cap(b.writes) > len(b.writes)*database.MaxExcessCapacityFactor { - b.writes = make([]keyValue, 0, cap(b.writes)/database.CapacityReductionFactor) - } else { - b.writes = b.writes[:0] - } - b.size = 0 + return errEnumToError[resp.Err] } -func (b *batch) Replay(w database.KeyValueWriterDeleter) error { - for _, keyvalue := range b.writes { - if keyvalue.delete { - if err := w.Delete(keyvalue.key); err != nil { - return err - } - } else if err := w.Put(keyvalue.key, keyvalue.value); err != nil { - return err - } - } - return nil +func (b *batch) Inner() database.Batch { + return b } -func (b *batch) Inner() database.Batch { return b } - type iterator struct { db *DatabaseClient id uint64 @@ -263,7 +196,7 @@ type iterator struct { // Next attempts to move the iterator to the next element and returns if this // succeeded func (it *iterator) Next() bool { - if it.db.closed.GetValue() { + if it.db.closed.Get() { it.data = nil it.errs.Add(database.ErrClosed) return false @@ -297,7 +230,7 @@ func (it *iterator) Error() error { if err != nil { it.errs.Add(err) } else { - it.errs.Add(errCodeToError[resp.Err]) + it.errs.Add(errEnumToError[resp.Err]) } return it.errs.Err } @@ -326,6 +259,6 @@ func (it *iterator) Release() { if err != nil { it.errs.Add(err) } else { - it.errs.Add(errCodeToError[resp.Err]) + it.errs.Add(errEnumToError[resp.Err]) } } diff --git a/avalanchego/database/rpcdb/db_server.go b/avalanchego/database/rpcdb/db_server.go index 8eb1e99a..e9e13573 100644 --- a/avalanchego/database/rpcdb/db_server.go +++ b/avalanchego/database/rpcdb/db_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcdb @@ -12,10 +12,13 @@ import ( "google.golang.org/protobuf/types/known/emptypb" "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/utils/units" rpcdbpb "github.com/ava-labs/avalanchego/proto/pb/rpcdb" ) +const iterationBatchSize = 128 * units.KiB + var errUnknownIterator = errors.New("unknown iterator") // DatabaseServer is a database that is managed over RPC. @@ -24,13 +27,6 @@ type DatabaseServer struct { db database.Database - // batchLock protects [batches] from concurrent modifications. Note that - // [batchLock] does not protect the actual Batch. Batches are documented as - // not being safe for concurrent use. Therefore, it is up to the client to - // respect this invariant. - batchLock sync.Mutex - batches map[int64]database.Batch - // iteratorLock protects [nextIteratorID] and [iterators] from concurrent // modifications. Similarly to [batchLock], [iteratorLock] does not protect // the actual Iterator. Iterators are documented as not being safe for @@ -45,7 +41,6 @@ type DatabaseServer struct { func NewServer(db database.Database) *DatabaseServer { return &DatabaseServer{ db: db, - batches: make(map[int64]database.Batch), iterators: make(map[uint64]database.Iterator), } } @@ -55,7 +50,7 @@ func (db *DatabaseServer) Has(_ context.Context, req *rpcdbpb.HasRequest) (*rpcd has, err := db.db.Has(req.Key) return &rpcdbpb.HasResponse{ Has: has, - Err: errorToErrCode[err], + Err: errorToErrEnum[err], }, errorToRPCError(err) } @@ -64,39 +59,39 @@ func (db *DatabaseServer) Get(_ context.Context, req *rpcdbpb.GetRequest) (*rpcd value, err := db.db.Get(req.Key) return &rpcdbpb.GetResponse{ Value: value, - Err: errorToErrCode[err], + Err: errorToErrEnum[err], }, errorToRPCError(err) } // Put delegates the Put call to the managed database and returns the result func (db *DatabaseServer) Put(_ context.Context, req *rpcdbpb.PutRequest) (*rpcdbpb.PutResponse, error) { err := db.db.Put(req.Key, req.Value) - return &rpcdbpb.PutResponse{Err: errorToErrCode[err]}, errorToRPCError(err) + return &rpcdbpb.PutResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) } // Delete delegates the Delete call to the managed database and returns the // result func (db *DatabaseServer) Delete(_ context.Context, req *rpcdbpb.DeleteRequest) (*rpcdbpb.DeleteResponse, error) { err := db.db.Delete(req.Key) - return &rpcdbpb.DeleteResponse{Err: errorToErrCode[err]}, errorToRPCError(err) + return &rpcdbpb.DeleteResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) } // Compact delegates the Compact call to the managed database and returns the // result func (db *DatabaseServer) Compact(_ context.Context, req *rpcdbpb.CompactRequest) (*rpcdbpb.CompactResponse, error) { err := db.db.Compact(req.Start, req.Limit) - return &rpcdbpb.CompactResponse{Err: errorToErrCode[err]}, errorToRPCError(err) + return &rpcdbpb.CompactResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) } // Close delegates the Close call to the managed database and returns the result func (db *DatabaseServer) Close(context.Context, *rpcdbpb.CloseRequest) (*rpcdbpb.CloseResponse, error) { err := db.db.Close() - return &rpcdbpb.CloseResponse{Err: errorToErrCode[err]}, errorToRPCError(err) + return &rpcdbpb.CloseResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) } // HealthCheck performs a heath check against the underlying database. -func (db *DatabaseServer) HealthCheck(context.Context, *emptypb.Empty) (*rpcdbpb.HealthCheckResponse, error) { - health, err := db.db.HealthCheck() +func (db *DatabaseServer) HealthCheck(ctx context.Context, _ *emptypb.Empty) (*rpcdbpb.HealthCheckResponse, error) { + health, err := db.db.HealthCheck(ctx) if err != nil { return &rpcdbpb.HealthCheckResponse{}, err } @@ -110,42 +105,26 @@ func (db *DatabaseServer) HealthCheck(context.Context, *emptypb.Empty) (*rpcdbpb // WriteBatch takes in a set of key-value pairs and atomically writes them to // the internal database func (db *DatabaseServer) WriteBatch(_ context.Context, req *rpcdbpb.WriteBatchRequest) (*rpcdbpb.WriteBatchResponse, error) { - db.batchLock.Lock() - batch, exists := db.batches[req.Id] - if !exists { - batch = db.db.NewBatch() - db.batches[req.Id] = batch - } - db.batchLock.Unlock() - + batch := db.db.NewBatch() for _, put := range req.Puts { if err := batch.Put(put.Key, put.Value); err != nil { - // Because we are reporting an error, we free the allocated batch. - delete(db.batches, req.Id) - - return &rpcdbpb.WriteBatchResponse{Err: errorToErrCode[err]}, errorToRPCError(err) + return &rpcdbpb.WriteBatchResponse{ + Err: errorToErrEnum[err], + }, errorToRPCError(err) } } - for _, del := range req.Deletes { if err := batch.Delete(del.Key); err != nil { - // Because we are reporting an error, we free the allocated batch. - delete(db.batches, req.Id) - - return &rpcdbpb.WriteBatchResponse{Err: errorToErrCode[err]}, errorToRPCError(err) + return &rpcdbpb.WriteBatchResponse{ + Err: errorToErrEnum[err], + }, errorToRPCError(err) } } - if req.Continues { - return &rpcdbpb.WriteBatchResponse{}, nil - } - - db.batchLock.Lock() - delete(db.batches, req.Id) - db.batchLock.Unlock() - err := batch.Write() - return &rpcdbpb.WriteBatchResponse{Err: errorToErrCode[err]}, errorToRPCError(err) + return &rpcdbpb.WriteBatchResponse{ + Err: errorToErrEnum[err], + }, errorToRPCError(err) } // NewIteratorWithStartAndPrefix allocates an iterator and returns the iterator @@ -171,9 +150,11 @@ func (db *DatabaseServer) IteratorNext(_ context.Context, req *rpcdbpb.IteratorN return nil, errUnknownIterator } - size := 0 - data := []*rpcdbpb.PutRequest(nil) - for size < maxBatchSize && it.Next() { + var ( + size int + data []*rpcdbpb.PutRequest + ) + for size < iterationBatchSize && it.Next() { key := it.Key() value := it.Value() size += len(key) + len(value) @@ -196,7 +177,7 @@ func (db *DatabaseServer) IteratorError(_ context.Context, req *rpcdbpb.Iterator return nil, errUnknownIterator } err := it.Error() - return &rpcdbpb.IteratorErrorResponse{Err: errorToErrCode[err]}, errorToRPCError(err) + return &rpcdbpb.IteratorErrorResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) } // IteratorRelease attempts to release the resources allocated to an iterator @@ -205,12 +186,12 @@ func (db *DatabaseServer) IteratorRelease(_ context.Context, req *rpcdbpb.Iterat it, exists := db.iterators[req.Id] if !exists { db.iteratorLock.Unlock() - return &rpcdbpb.IteratorReleaseResponse{Err: 0}, nil + return &rpcdbpb.IteratorReleaseResponse{}, nil } delete(db.iterators, req.Id) db.iteratorLock.Unlock() err := it.Error() it.Release() - return &rpcdbpb.IteratorReleaseResponse{Err: errorToErrCode[err]}, errorToRPCError(err) + return &rpcdbpb.IteratorReleaseResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) } diff --git a/avalanchego/database/rpcdb/db_test.go b/avalanchego/database/rpcdb/db_test.go index b16b13db..eec557ed 100644 --- a/avalanchego/database/rpcdb/db_test.go +++ b/avalanchego/database/rpcdb/db_test.go @@ -1,18 +1,14 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcdb import ( "context" - "net" "testing" "github.com/stretchr/testify/require" - "google.golang.org/grpc" - "google.golang.org/grpc/test/bufconn" - "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/corruptabledb" "github.com/ava-labs/avalanchego/database/memdb" @@ -21,10 +17,6 @@ import ( rpcdbpb "github.com/ava-labs/avalanchego/proto/pb/rpcdb" ) -const ( - bufSize = 1024 * 1024 -) - type testDatabase struct { client *DatabaseClient server *memdb.Database @@ -36,27 +28,19 @@ func setupDB(t testing.TB) *testDatabase { server: memdb.New(), } - listener := bufconn.Listen(bufSize) + listener, err := grpcutils.NewListener() + if err != nil { + t.Fatalf("Failed to create listener: %s", err) + } serverCloser := grpcutils.ServerCloser{} - serverFunc := func(opts []grpc.ServerOption) *grpc.Server { - server := grpc.NewServer(opts...) - rpcdbpb.RegisterDatabaseServer(server, NewServer(db.server)) - serverCloser.Add(server) - return server - } + server := grpcutils.NewServer() + rpcdbpb.RegisterDatabaseServer(server, NewServer(db.server)) + serverCloser.Add(server) - go grpcutils.Serve(listener, serverFunc) + go grpcutils.Serve(listener, server) - dialer := grpc.WithContextDialer( - func(context.Context, string) (net.Conn, error) { - return listener.Dial() - }, - ) - - dopts := grpcutils.DefaultDialOptions - dopts = append(dopts, dialer) - conn, err := grpcutils.Dial("", dopts...) + conn, err := grpcutils.Dial(listener.Addr().String()) if err != nil { t.Fatalf("Failed to dial: %s", err) } @@ -79,6 +63,15 @@ func TestInterface(t *testing.T) { } } +func FuzzInterface(f *testing.F) { + for _, test := range database.FuzzTests { + db := setupDB(f) + test(f, db.client) + + db.closeFn() + } +} + func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) @@ -125,7 +118,7 @@ func TestHealthCheck(t *testing.T) { require.NoError(scenario.testFn(db)) // check db HealthCheck - _, err := db.HealthCheck() + _, err := db.HealthCheck(context.Background()) if err == nil && scenario.wantErr { t.Fatalf("wanted error got nil") return @@ -134,10 +127,10 @@ func TestHealthCheck(t *testing.T) { require.Containsf(err.Error(), scenario.wantErrMsg, "expected error containing %q, got %s", scenario.wantErrMsg, err) return } - require.Nil(err) + require.NoError(err) // check rpc HealthCheck - _, err = baseDB.client.HealthCheck() + _, err = baseDB.client.HealthCheck(context.Background()) if err == nil && scenario.wantErr { t.Fatalf("wanted error got nil") return @@ -146,7 +139,7 @@ func TestHealthCheck(t *testing.T) { require.Containsf(err.Error(), scenario.wantErrMsg, "expected error containing %q, got %s", scenario.wantErrMsg, err) return } - require.Nil(err) + require.NoError(err) }) } } diff --git a/avalanchego/database/rpcdb/errors.go b/avalanchego/database/rpcdb/errors.go index b7d1bd52..8a1fae2f 100644 --- a/avalanchego/database/rpcdb/errors.go +++ b/avalanchego/database/rpcdb/errors.go @@ -1,25 +1,27 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcdb import ( "github.com/ava-labs/avalanchego/database" + + rpcdbpb "github.com/ava-labs/avalanchego/proto/pb/rpcdb" ) var ( - errCodeToError = map[uint32]error{ - 1: database.ErrClosed, - 2: database.ErrNotFound, + errEnumToError = map[rpcdbpb.Error]error{ + rpcdbpb.Error_ERROR_CLOSED: database.ErrClosed, + rpcdbpb.Error_ERROR_NOT_FOUND: database.ErrNotFound, } - errorToErrCode = map[error]uint32{ - database.ErrClosed: 1, - database.ErrNotFound: 2, + errorToErrEnum = map[error]rpcdbpb.Error{ + database.ErrClosed: rpcdbpb.Error_ERROR_CLOSED, + database.ErrNotFound: rpcdbpb.Error_ERROR_NOT_FOUND, } ) func errorToRPCError(err error) error { - if _, ok := errorToErrCode[err]; ok { + if _, ok := errorToErrEnum[err]; ok { return nil } return err diff --git a/avalanchego/database/test_database.go b/avalanchego/database/test_database.go index ee57bc1e..51039171 100644 --- a/avalanchego/database/test_database.go +++ b/avalanchego/database/test_database.go @@ -1,15 +1,19 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package database import ( "bytes" - "crypto/rand" + "io" "testing" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" + "golang.org/x/sync/errgroup" "github.com/ava-labs/avalanchego/utils" @@ -19,14 +23,17 @@ import ( // Tests is a list of all database tests var Tests = []func(t *testing.T, db Database){ TestSimpleKeyValue, + TestEmptyKey, TestKeyEmptyValue, TestSimpleKeyValueClosed, + TestNewBatchClosed, TestBatchPut, TestBatchDelete, TestBatchReset, TestBatchReuse, TestBatchRewrite, TestBatchReplay, + TestBatchReplayPropagateError, TestBatchInner, TestBatchLargeSize, TestIteratorSnapshot, @@ -48,51 +55,49 @@ var Tests = []func(t *testing.T, db Database){ TestModifyValueAfterBatchPutReplay, TestConcurrentBatches, TestManySmallConcurrentKVPairBatches, + TestPutGetEmpty, +} + +var FuzzTests = []func(*testing.F, Database){ + FuzzKeyValue, } // TestSimpleKeyValue tests to make sure that simple Put + Get + Delete + Has // calls return the expected values. func TestSimpleKeyValue(t *testing.T, db Database) { + require := require.New(t) + key := []byte("hello") value := []byte("world") - if has, err := db.Has(key); err != nil { - t.Fatalf("Unexpected error on db.Has: %s", err) - } else if has { - t.Fatalf("db.Has unexpectedly returned true on key %s", key) - } else if v, err := db.Get(key); err != ErrNotFound { - t.Fatalf("Expected %s on db.Get for missing key %s. Returned 0x%x", ErrNotFound, key, v) - } else if err := db.Delete(key); err != nil { - t.Fatalf("Unexpected error on db.Delete: %s", err) - } + has, err := db.Has(key) + require.NoError(err) + require.False(has) - if err := db.Put(key, value); err != nil { - t.Fatalf("Unexpected error on db.Put: %s", err) - } + _, err = db.Get(key) + require.Equal(ErrNotFound, err) - if has, err := db.Has(key); err != nil { - t.Fatalf("Unexpected error on db.Has: %s", err) - } else if !has { - t.Fatalf("db.Has unexpectedly returned false on key %s", key) - } else if v, err := db.Get(key); err != nil { - t.Fatalf("Unexpected error on db.Get: %s", err) - } else if !bytes.Equal(value, v) { - t.Fatalf("db.Get: Returned: 0x%x ; Expected: 0x%x", v, value) - } + require.NoError(db.Delete(key)) + require.NoError(db.Put(key, value)) - if err := db.Delete(key); err != nil { - t.Fatalf("Unexpected error on db.Delete: %s", err) - } + has, err = db.Has(key) + require.NoError(err) + require.True(has) - if has, err := db.Has(key); err != nil { - t.Fatalf("Unexpected error on db.Has: %s", err) - } else if has { - t.Fatalf("db.Has unexpectedly returned true on key %s", key) - } else if v, err := db.Get(key); err != ErrNotFound { - t.Fatalf("Expected %s on db.Get for missing key %s. Returned 0x%x", ErrNotFound, key, v) - } else if err := db.Delete(key); err != nil { - t.Fatalf("Unexpected error on db.Delete: %s", err) - } + v, err := db.Get(key) + require.NoError(err) + require.Equal(value, v) + + require.NoError(db.Delete(key)) + + has, err = db.Has(key) + require.NoError(err) + require.False(has) + + _, err = db.Get(key) + require.Equal(ErrNotFound, err) + + require.NoError(db.Delete(key)) } func TestKeyEmptyValue(t *testing.T, db Database) { @@ -104,263 +109,259 @@ func TestKeyEmptyValue(t *testing.T, db Database) { _, err := db.Get(key) require.Equal(ErrNotFound, err) - err = db.Put(key, val) - require.NoError(err) + require.NoError(db.Put(key, val)) value, err := db.Get(key) require.NoError(err) - require.Len(value, len(val)) + require.Empty(value) +} + +func TestEmptyKey(t *testing.T, db Database) { + require := require.New(t) + + var ( + nilKey = []byte(nil) + emptyKey = []byte{} + val1 = []byte("hi") + val2 = []byte("hello") + ) + + // Test that nil key can be retrieved by empty key + _, err := db.Get(nilKey) + require.Equal(ErrNotFound, err) + + require.NoError(db.Put(nilKey, val1)) + + value, err := db.Get(emptyKey) + require.NoError(err) + require.Equal(value, val1) + + // Test that empty key can be retrieved by nil key + require.NoError(db.Put(emptyKey, val2)) + + value, err = db.Get(nilKey) + require.NoError(err) + require.Equal(value, val2) } // TestSimpleKeyValueClosed tests to make sure that Put + Get + Delete + Has // calls return the correct error when the database has been closed. func TestSimpleKeyValueClosed(t *testing.T, db Database) { + require := require.New(t) + key := []byte("hello") value := []byte("world") - if has, err := db.Has(key); err != nil { - t.Fatalf("Unexpected error on db.Has: %s", err) - } else if has { - t.Fatalf("db.Has unexpectedly returned true on key %s", key) - } else if v, err := db.Get(key); err != ErrNotFound { - t.Fatalf("Expected %s on db.Get for missing key %s. Returned 0x%x", ErrNotFound, key, v) - } else if err := db.Delete(key); err != nil { - t.Fatalf("Unexpected error on db.Delete: %s", err) - } + has, err := db.Has(key) + require.NoError(err) + require.False(has) - if err := db.Put(key, value); err != nil { - t.Fatalf("Unexpected error on db.Put: %s", err) - } + _, err = db.Get(key) + require.Equal(ErrNotFound, err) - if has, err := db.Has(key); err != nil { - t.Fatalf("Unexpected error on db.Has: %s", err) - } else if !has { - t.Fatalf("db.Has unexpectedly returned false on key %s", key) - } else if v, err := db.Get(key); err != nil { - t.Fatalf("Unexpected error on db.Get: %s", err) - } else if !bytes.Equal(value, v) { - t.Fatalf("db.Get: Returned: 0x%x ; Expected: 0x%x", v, value) - } + require.NoError(db.Delete(key)) + require.NoError(db.Put(key, value)) - if err := db.Close(); err != nil { - t.Fatalf("Unexpected error on db.Close: %s", err) - } + has, err = db.Has(key) + require.NoError(err) + require.True(has) - if _, err := db.Has(key); err != ErrClosed { - t.Fatalf("Expected %s on db.Has after close", ErrClosed) - } else if _, err := db.Get(key); err != ErrClosed { - t.Fatalf("Expected %s on db.Get after close", ErrClosed) - } else if err := db.Put(key, value); err != ErrClosed { - t.Fatalf("Expected %s on db.Put after close", ErrClosed) - } else if err := db.Delete(key); err != ErrClosed { - t.Fatalf("Expected %s on db.Delete after close", ErrClosed) - } else if err := db.Close(); err != ErrClosed { - t.Fatalf("Expected %s on db.Close after close", ErrClosed) - } + v, err := db.Get(key) + require.NoError(err) + require.Equal(value, v) + + require.NoError(db.Close()) + + _, err = db.Has(key) + require.Equal(ErrClosed, err) + + _, err = db.Get(key) + require.Equal(ErrClosed, err) + + require.Equal(ErrClosed, db.Put(key, value)) + require.Equal(ErrClosed, db.Delete(key)) + require.Equal(ErrClosed, db.Close()) } // TestMemorySafetyDatabase ensures it is safe to modify a key after passing it // to Database.Put and Database.Get. func TestMemorySafetyDatabase(t *testing.T, db Database) { - key := []byte("key") + require := require.New(t) + + key := []byte("1key") + keyCopy := slices.Clone(key) value := []byte("value") - key2 := []byte("key2") + key2 := []byte("2key") value2 := []byte("value2") // Put both K/V pairs in the database - if err := db.Put(key, value); err != nil { - t.Fatal(err) - } else if err := db.Put(key2, value2); err != nil { - t.Fatal(err) - } + require.NoError(db.Put(key, value)) + require.NoError(db.Put(key2, value2)) + // Get the value for [key] gotVal, err := db.Get(key) - if err != nil { - t.Fatalf("should have been able to get value but got %s", err) - } else if !bytes.Equal(gotVal, value) { - t.Fatal("got the wrong value") - } + require.NoError(err) + require.Equal(value, gotVal) + // Modify [key]; make sure the value we got before hasn't changed - key = key2 + key[0] = key2[0] gotVal2, err := db.Get(key) - switch { - case err != nil: - t.Fatal(err) - case !bytes.Equal(gotVal2, value2): - t.Fatal("got wrong value") - case !bytes.Equal(gotVal, value): - t.Fatal("value changed") - } + require.NoError(err) + require.Equal(value2, gotVal2) + require.Equal(value, gotVal) + // Reset [key] to its original value and make sure it's correct - key = []byte("key") + key[0] = keyCopy[0] gotVal, err = db.Get(key) - if err != nil { - t.Fatalf("should have been able to get value but got %s", err) - } else if !bytes.Equal(gotVal, value) { - t.Fatal("got the wrong value") - } + require.NoError(err) + require.Equal(value, gotVal) +} + +// TestNewBatchClosed tests to make sure that calling NewBatch on a closed +// database returns a batch that errors correctly. +func TestNewBatchClosed(t *testing.T, db Database) { + require := require.New(t) + + require.NoError(db.Close()) + + batch := db.NewBatch() + require.NotNil(batch) + + key := []byte("hello") + value := []byte("world") + + require.NoError(batch.Put(key, value)) + require.Positive(batch.Size()) + require.Equal(ErrClosed, batch.Write()) } // TestBatchPut tests to make sure that batched writes work as expected. func TestBatchPut(t *testing.T, db Database) { + require := require.New(t) + key := []byte("hello") value := []byte("world") batch := db.NewBatch() - if batch == nil { - t.Fatalf("db.NewBatch returned nil") - } + require.NotNil(batch) - if err := batch.Put(key, value); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } else if size := batch.Size(); size <= 0 { - t.Fatalf("batch.Size: Returned: %d ; Expected: > 0", size) - } + require.NoError(batch.Put(key, value)) + require.Positive(batch.Size()) + require.NoError(batch.Write()) - if err := batch.Write(); err != nil { - t.Fatalf("Unexpected error on batch.Write: %s", err) - } + has, err := db.Has(key) + require.NoError(err) + require.True(has) - if has, err := db.Has(key); err != nil { - t.Fatalf("Unexpected error on db.Has: %s", err) - } else if !has { - t.Fatalf("db.Has unexpectedly returned false on key %s", key) - } else if v, err := db.Get(key); err != nil { - t.Fatalf("Unexpected error on db.Get: %s", err) - } else if !bytes.Equal(value, v) { - t.Fatalf("db.Get: Returned: 0x%x ; Expected: 0x%x", v, value) - } else if err := db.Delete(key); err != nil { - t.Fatalf("Unexpected error on db.Delete: %s", err) - } + v, err := db.Get(key) + require.NoError(err) + require.Equal(value, v) - if batch = db.NewBatch(); batch == nil { - t.Fatalf("db.NewBatch returned nil") - } else if err := batch.Put(key, value); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } else if err := db.Close(); err != nil { - t.Fatalf("Error while closing the database: %s", err) - } else if err := batch.Write(); err != ErrClosed { - t.Fatalf("Expected %s on batch.Write", ErrClosed) - } + require.NoError(db.Delete(key)) + + batch = db.NewBatch() + require.NotNil(batch) + + require.NoError(batch.Put(key, value)) + require.NoError(db.Close()) + require.Equal(ErrClosed, batch.Write()) } // TestBatchDelete tests to make sure that batched deletes work as expected. func TestBatchDelete(t *testing.T, db Database) { + require := require.New(t) + key := []byte("hello") value := []byte("world") - if err := db.Put(key, value); err != nil { - t.Fatalf("Unexpected error on db.Put: %s", err) - } + require.NoError(db.Put(key, value)) batch := db.NewBatch() - if batch == nil { - t.Fatalf("db.NewBatch returned nil") - } + require.NotNil(batch) - if err := batch.Delete(key); err != nil { - t.Fatalf("Unexpected error on batch.Delete: %s", err) - } + require.NoError(batch.Delete(key)) + require.NoError(batch.Write()) - if err := batch.Write(); err != nil { - t.Fatalf("Unexpected error on batch.Write: %s", err) - } + has, err := db.Has(key) + require.NoError(err) + require.False(has) - if has, err := db.Has(key); err != nil { - t.Fatalf("Unexpected error on db.Has: %s", err) - } else if has { - t.Fatalf("db.Has unexpectedly returned true on key %s", key) - } else if v, err := db.Get(key); err != ErrNotFound { - t.Fatalf("Expected %s on db.Get for missing key %s. Returned 0x%x", ErrNotFound, key, v) - } else if err := db.Delete(key); err != nil { - t.Fatalf("Unexpected error on db.Delete: %s", err) - } + _, err = db.Get(key) + require.Equal(ErrNotFound, err) + + require.NoError(db.Delete(key)) } // TestMemorySafetyDatabase ensures it is safe to modify a key after passing it // to Batch.Put. func TestMemorySafetyBatch(t *testing.T, db Database) { + require := require.New(t) + key := []byte("hello") + keyCopy := slices.Clone(key) value := []byte("world") - valueCopy := []byte("world") + valueCopy := slices.Clone(value) batch := db.NewBatch() - if batch == nil { - t.Fatalf("db.NewBatch returned nil") - } + require.NotNil(batch) // Put a key in the batch - if err := batch.Put(key, value); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } else if size := batch.Size(); size <= 0 { - t.Fatalf("batch.Size: Returned: %d ; Expected: > 0", size) - } + require.NoError(batch.Put(key, value)) + require.Positive(batch.Size()) // Modify the key - keyCopy := key - key = []byte("jello") - if err := batch.Write(); err != nil { - t.Fatalf("Unexpected error on batch.Write: %s", err) - } + key[0] = 'j' + require.NoError(batch.Write()) // Make sure the original key was written to the database - if has, err := db.Has(keyCopy); err != nil { - t.Fatalf("Unexpected error on db.Has: %s", err) - } else if !has { - t.Fatalf("db.Has unexpectedly returned false on key %s", key) - } else if v, err := db.Get(keyCopy); err != nil { - t.Fatalf("Unexpected error on db.Get: %s", err) - } else if !bytes.Equal(valueCopy, v) { - t.Fatalf("db.Get: Returned: 0x%x ; Expected: 0x%x", v, value) - } + has, err := db.Has(keyCopy) + require.NoError(err) + require.True(has) + + v, err := db.Get(keyCopy) + require.NoError(err) + require.Equal(valueCopy, v) // Make sure the new key wasn't written to the database - if has, err := db.Has(key); err != nil { - t.Fatalf("Unexpected error on db.Has: %s", err) - } else if has { - t.Fatal("database shouldn't have the new key") - } + has, err = db.Has(key) + require.NoError(err) + require.False(has) } // TestBatchReset tests to make sure that a batch drops un-written operations // when it is reset. func TestBatchReset(t *testing.T, db Database) { + require := require.New(t) + key := []byte("hello") value := []byte("world") - if err := db.Put(key, value); err != nil { - t.Fatalf("Unexpected error on db.Put: %s", err) - } + require.NoError(db.Put(key, value)) batch := db.NewBatch() - if batch == nil { - t.Fatalf("db.NewBatch returned nil") - } + require.NotNil(batch) - if err := batch.Delete(key); err != nil { - t.Fatalf("Unexpected error on batch.Delete: %s", err) - } + require.NoError(batch.Delete(key)) batch.Reset() - if err := batch.Write(); err != nil { - t.Fatalf("Unexpected error on batch.Write: %s", err) - } + require.Zero(batch.Size()) + require.NoError(batch.Write()) - if has, err := db.Has(key); err != nil { - t.Fatalf("Unexpected error on db.Has: %s", err) - } else if !has { - t.Fatalf("db.Has unexpectedly returned false on key %s", key) - } else if v, err := db.Get(key); err != nil { - t.Fatalf("Unexpected error on db.Get: %s", err) - } else if !bytes.Equal(value, v) { - t.Fatalf("db.Get: Returned: 0x%x ; Expected: 0x%x", v, value) - } + has, err := db.Has(key) + require.NoError(err) + require.True(has) + + v, err := db.Get(key) + require.NoError(err) + require.Equal(value, v) } // TestBatchReuse tests to make sure that a batch can be reused once it is // reset. func TestBatchReuse(t *testing.T, db Database) { + require := require.New(t) + key1 := []byte("hello1") value1 := []byte("world1") @@ -368,100 +369,73 @@ func TestBatchReuse(t *testing.T, db Database) { value2 := []byte("world2") batch := db.NewBatch() - if batch == nil { - t.Fatalf("db.NewBatch returned nil") - } - - if err := batch.Put(key1, value1); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } - - if err := batch.Write(); err != nil { - t.Fatalf("Unexpected error on batch.Write: %s", err) - } + require.NotNil(batch) - if err := db.Delete(key1); err != nil { - t.Fatalf("Unexpected error on database.Delete: %s", err) - } + require.NoError(batch.Put(key1, value1)) + require.NoError(batch.Write()) + require.NoError(db.Delete(key1)) - if has, err := db.Has(key1); err != nil { - t.Fatalf("Unexpected error on db.Has: %s", err) - } else if has { - t.Fatalf("db.Has unexpectedly returned true on key %s", key1) - } + has, err := db.Has(key1) + require.NoError(err) + require.False(has) batch.Reset() - if err := batch.Put(key2, value2); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } + require.Zero(batch.Size()) + require.NoError(batch.Put(key2, value2)) + require.NoError(batch.Write()) - if err := batch.Write(); err != nil { - t.Fatalf("Unexpected error on batch.Write: %s", err) - } + has, err = db.Has(key1) + require.NoError(err) + require.False(has) - if has, err := db.Has(key1); err != nil { - t.Fatalf("Unexpected error on db.Has: %s", err) - } else if has { - t.Fatalf("db.Has unexpectedly returned true on key %s", key1) - } else if has, err := db.Has(key2); err != nil { - t.Fatalf("Unexpected error on db.Has: %s", err) - } else if !has { - t.Fatalf("db.Has unexpectedly returned false on key %s", key2) - } else if v, err := db.Get(key2); err != nil { - t.Fatalf("Unexpected error on db.Get: %s", err) - } else if !bytes.Equal(value2, v) { - t.Fatalf("db.Get: Returned: 0x%x ; Expected: 0x%x", v, value2) - } + has, err = db.Has(key2) + require.NoError(err) + require.True(has) + + v, err := db.Get(key2) + require.NoError(err) + require.Equal(value2, v) } // TestBatchRewrite tests to make sure that write can be called multiple times // on a batch and the values will be updated correctly. func TestBatchRewrite(t *testing.T, db Database) { + require := require.New(t) + key := []byte("hello1") value := []byte("world1") batch := db.NewBatch() - if batch == nil { - t.Fatalf("db.NewBatch returned nil") - } + require.NotNil(batch) - if err := batch.Put(key, value); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } - - if err := batch.Write(); err != nil { - t.Fatalf("Unexpected error on batch.Write: %s", err) - } + require.NoError(batch.Put(key, value)) + require.NoError(batch.Write()) + require.NoError(db.Delete(key)) - if err := db.Delete(key); err != nil { - t.Fatalf("Unexpected error on database.Delete: %s", err) - } + has, err := db.Has(key) + require.NoError(err) + require.False(has) - if has, err := db.Has(key); err != nil { - t.Fatalf("Unexpected error on db.Has: %s", err) - } else if has { - t.Fatalf("db.Has unexpectedly returned true on key %s", key) - } + require.NoError(batch.Write()) - if err := batch.Write(); err != nil { - t.Fatalf("Unexpected error on batch.Write: %s", err) - } + has, err = db.Has(key) + require.NoError(err) + require.True(has) - if has, err := db.Has(key); err != nil { - t.Fatalf("Unexpected error on db.Has: %s", err) - } else if !has { - t.Fatalf("db.Has unexpectedly returned false on key %s", key) - } else if v, err := db.Get(key); err != nil { - t.Fatalf("Unexpected error on db.Get: %s", err) - } else if !bytes.Equal(value, v) { - t.Fatalf("db.Get: Returned: 0x%x ; Expected: 0x%x", v, value) - } + v, err := db.Get(key) + require.NoError(err) + require.Equal(value, v) } // TestBatchReplay tests to make sure that batches will correctly replay their // contents. func TestBatchReplay(t *testing.T, db Database) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + require := require.New(t) + key1 := []byte("hello1") value1 := []byte("world1") @@ -469,64 +443,66 @@ func TestBatchReplay(t *testing.T, db Database) { value2 := []byte("world2") batch := db.NewBatch() - if batch == nil { - t.Fatalf("db.NewBatch returned nil") - } - - if err := batch.Put(key1, value1); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } else if err := batch.Put(key2, value2); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } - - secondBatch := db.NewBatch() - if secondBatch == nil { - t.Fatalf("db.NewBatch returned nil") - } - - if err := batch.Replay(secondBatch); err != nil { - t.Fatalf("Unexpected error on batch.Replay: %s", err) - } - - if err := secondBatch.Write(); err != nil { - t.Fatalf("Unexpected error on batch.Write: %s", err) + require.NotNil(batch) + + require.NoError(batch.Put(key1, value1)) + require.NoError(batch.Put(key2, value2)) + require.NoError(batch.Delete(key1)) + require.NoError(batch.Delete(key2)) + require.NoError(batch.Put(key1, value2)) + + for i := 0; i < 2; i++ { + mockBatch := NewMockBatch(ctrl) + gomock.InOrder( + mockBatch.EXPECT().Put(key1, value1).Times(1), + mockBatch.EXPECT().Put(key2, value2).Times(1), + mockBatch.EXPECT().Delete(key1).Times(1), + mockBatch.EXPECT().Delete(key2).Times(1), + mockBatch.EXPECT().Put(key1, value2).Times(1), + ) + + require.NoError(batch.Replay(mockBatch)) } +} - if has, err := db.Has(key1); err != nil { - t.Fatalf("Unexpected error on db.Has: %s", err) - } else if !has { - t.Fatalf("db.Has unexpectedly returned false on key %s", key1) - } else if v, err := db.Get(key1); err != nil { - t.Fatalf("Unexpected error on db.Get: %s", err) - } else if !bytes.Equal(value1, v) { - t.Fatalf("db.Get: Returned: 0x%x ; Expected: 0x%x", v, value1) - } +// TestBatchReplayPropagateError tests to make sure that batches will correctly +// propagate any returned error during Replay. +func TestBatchReplayPropagateError(t *testing.T, db Database) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() - thirdBatch := db.NewBatch() - if thirdBatch == nil { - t.Fatalf("db.NewBatch returned nil") - } + require := require.New(t) - if err := thirdBatch.Delete(key1); err != nil { - t.Fatalf("Unexpected error on batch.Delete: %s", err) - } else if err := thirdBatch.Delete(key2); err != nil { - t.Fatalf("Unexpected error on batch.Delete: %s", err) - } + key1 := []byte("hello1") + value1 := []byte("world1") - if err := db.Close(); err != nil { - t.Fatalf("Unexpected error on db.Close: %s", err) - } + key2 := []byte("hello2") + value2 := []byte("world2") - if err := batch.Replay(db); err != ErrClosed { - t.Fatalf("Expected %s on batch.Replay", ErrClosed) - } else if err := thirdBatch.Replay(db); err != ErrClosed { - t.Fatalf("Expected %s on batch.Replay", ErrClosed) - } + batch := db.NewBatch() + require.NotNil(batch) + + require.NoError(batch.Put(key1, value1)) + require.NoError(batch.Put(key2, value2)) + + mockBatch := NewMockBatch(ctrl) + gomock.InOrder( + mockBatch.EXPECT().Put(key1, value1).Return(ErrClosed).Times(1), + ) + require.Equal(ErrClosed, batch.Replay(mockBatch)) + + mockBatch = NewMockBatch(ctrl) + gomock.InOrder( + mockBatch.EXPECT().Put(key1, value1).Return(io.ErrClosedPipe).Times(1), + ) + require.Equal(io.ErrClosedPipe, batch.Replay(mockBatch)) } // TestBatchInner tests to make sure that inner can be used to write to the // database. func TestBatchInner(t *testing.T, db Database) { + require := require.New(t) + key1 := []byte("hello1") value1 := []byte("world1") @@ -534,70 +510,54 @@ func TestBatchInner(t *testing.T, db Database) { value2 := []byte("world2") firstBatch := db.NewBatch() - if firstBatch == nil { - t.Fatalf("db.NewBatch returned nil") - } + require.NotNil(firstBatch) - if err := firstBatch.Put(key1, value1); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } + require.NoError(firstBatch.Put(key1, value1)) secondBatch := db.NewBatch() - if secondBatch == nil { - t.Fatalf("db.NewBatch returned nil") - } + require.NotNil(firstBatch) - if err := secondBatch.Put(key2, value2); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } + require.NoError(secondBatch.Put(key2, value2)) innerFirstBatch := firstBatch.Inner() + require.NotNil(innerFirstBatch) + innerSecondBatch := secondBatch.Inner() + require.NotNil(innerSecondBatch) - if err := innerFirstBatch.Replay(innerSecondBatch); err != nil { - t.Fatalf("Unexpected error on batch.Replay: %s", err) - } + require.NoError(innerFirstBatch.Replay(innerSecondBatch)) + require.NoError(innerSecondBatch.Write()) - if err := innerSecondBatch.Write(); err != nil { - t.Fatalf("Unexpected error on batch.Write: %s", err) - } + has, err := db.Has(key1) + require.NoError(err) + require.True(has) - if has, err := db.Has(key1); err != nil { - t.Fatalf("Unexpected error on db.Has: %s", err) - } else if !has { - t.Fatalf("db.Has unexpectedly returned false on key %s", key1) - } else if v, err := db.Get(key1); err != nil { - t.Fatalf("Unexpected error on db.Get: %s", err) - } else if !bytes.Equal(value1, v) { - t.Fatalf("db.Get: Returned: 0x%x ; Expected: 0x%x", v, value1) - } else if has, err := db.Has(key2); err != nil { - t.Fatalf("Unexpected error on db.Has: %s", err) - } else if !has { - t.Fatalf("db.Has unexpectedly returned false on key %s", key2) - } else if v, err := db.Get(key2); err != nil { - t.Fatalf("Unexpected error on db.Get: %s", err) - } else if !bytes.Equal(value2, v) { - t.Fatalf("db.Get: Returned: 0x%x ; Expected: 0x%x", v, value2) - } + v, err := db.Get(key1) + require.NoError(err) + require.Equal(value1, v) + + has, err = db.Has(key2) + require.NoError(err) + require.True(has) + + v, err = db.Get(key2) + require.NoError(err) + require.Equal(value2, v) } // TestBatchLargeSize tests to make sure that the batch can support a large // amount of entries. func TestBatchLargeSize(t *testing.T, db Database) { - totalSize := 8 * units.MiB // 8 MiB - elementSize := 4 * units.KiB // 4 KiB - pairSize := 2 * elementSize // 8 KiB - - bytes := make([]byte, totalSize) - _, err := rand.Read(bytes) - if err != nil { - t.Fatal(err) - } + require := require.New(t) + + totalSize := 8 * units.MiB + elementSize := 4 * units.KiB + pairSize := 2 * elementSize // 8 KiB + + bytes := utils.RandomBytes(totalSize) batch := db.NewBatch() - if batch == nil { - t.Fatalf("db.NewBatch returned nil") - } + require.NotNil(batch) for len(bytes) > pairSize { key := bytes[:elementSize] @@ -606,141 +566,108 @@ func TestBatchLargeSize(t *testing.T, db Database) { value := bytes[:elementSize] bytes = bytes[elementSize:] - if err := batch.Put(key, value); err != nil { - t.Fatal(err) - } + require.NoError(batch.Put(key, value)) } - if err := batch.Write(); err != nil { - t.Fatalf("Unexpected error on batch.Write: %s", err) - } + require.NoError(batch.Write()) } // TestIteratorSnapshot tests to make sure the database iterates over a snapshot // of the database at the time of the iterator creation. func TestIteratorSnapshot(t *testing.T, db Database) { + require := require.New(t) + key1 := []byte("hello1") value1 := []byte("world1") key2 := []byte("hello2") value2 := []byte("world2") - if err := db.Put(key1, value1); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } + require.NoError(db.Put(key1, value1)) iterator := db.NewIterator() - if iterator == nil { - t.Fatalf("db.NewIterator returned nil") - } + require.NotNil(iterator) + defer iterator.Release() - if err := db.Put(key2, value2); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } + require.NoError(db.Put(key2, value2)) + require.True(iterator.Next()) + require.Equal(key1, iterator.Key()) + require.Equal(value1, iterator.Value()) - if !iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) - } else if key := iterator.Key(); !bytes.Equal(key, key1) { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key1) - } else if value := iterator.Value(); !bytes.Equal(value, value1) { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value1) - } else if iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", true, false) - } else if key := iterator.Key(); key != nil { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: nil", key) - } else if value := iterator.Value(); value != nil { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: nil", value) - } else if err := iterator.Error(); err != nil { - t.Fatalf("iterator.Error Returned: %s ; Expected: nil", err) - } + require.False(iterator.Next()) + require.Nil(iterator.Key()) + require.Nil(iterator.Value()) + require.NoError(iterator.Error()) } // TestIterator tests to make sure the database iterates over the database // contents lexicographically. func TestIterator(t *testing.T, db Database) { + require := require.New(t) + key1 := []byte("hello1") value1 := []byte("world1") key2 := []byte("hello2") value2 := []byte("world2") - if err := db.Put(key1, value1); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } else if err := db.Put(key2, value2); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } + require.NoError(db.Put(key1, value1)) + require.NoError(db.Put(key2, value2)) iterator := db.NewIterator() - if iterator == nil { - t.Fatalf("db.NewIterator returned nil") - } + require.NotNil(iterator) + defer iterator.Release() - if !iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) - } else if key := iterator.Key(); !bytes.Equal(key, key1) { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key1) - } else if value := iterator.Value(); !bytes.Equal(value, value1) { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value1) - } else if !iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) - } else if key := iterator.Key(); !bytes.Equal(key, key2) { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key2) - } else if value := iterator.Value(); !bytes.Equal(value, value2) { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value2) - } else if iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", true, false) - } else if key := iterator.Key(); key != nil { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: nil", key) - } else if value := iterator.Value(); value != nil { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: nil", value) - } else if err := iterator.Error(); err != nil { - t.Fatalf("iterator.Error Returned: %s ; Expected: nil", err) - } + require.True(iterator.Next()) + require.Equal(key1, iterator.Key()) + require.Equal(value1, iterator.Value()) + + require.True(iterator.Next()) + require.Equal(key2, iterator.Key()) + require.Equal(value2, iterator.Value()) + + require.False(iterator.Next()) + require.Nil(iterator.Key()) + require.Nil(iterator.Value()) + require.NoError(iterator.Error()) } // TestIteratorStart tests to make sure the the iterator can be configured to // start mid way through the database. func TestIteratorStart(t *testing.T, db Database) { + require := require.New(t) + key1 := []byte("hello1") value1 := []byte("world1") key2 := []byte("hello2") value2 := []byte("world2") - if err := db.Put(key1, value1); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } else if err := db.Put(key2, value2); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } + require.NoError(db.Put(key1, value1)) + require.NoError(db.Put(key2, value2)) iterator := db.NewIteratorWithStart(key2) - if iterator == nil { - t.Fatalf("db.NewIteratorWithStart returned nil") - } + require.NotNil(iterator) + defer iterator.Release() - if !iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) - } else if key := iterator.Key(); !bytes.Equal(key, key2) { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key2) - } else if value := iterator.Value(); !bytes.Equal(value, value2) { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value2) - } else if iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", true, false) - } else if key := iterator.Key(); key != nil { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: nil", key) - } else if value := iterator.Value(); value != nil { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: nil", value) - } else if err := iterator.Error(); err != nil { - t.Fatalf("iterator.Error Returned: %s ; Expected: nil", err) - } + require.True(iterator.Next()) + require.Equal(key2, iterator.Key()) + require.Equal(value2, iterator.Value()) + + require.False(iterator.Next()) + require.Nil(iterator.Key()) + require.Nil(iterator.Value()) + require.NoError(iterator.Error()) } // TestIteratorPrefix tests to make sure the iterator can be configured to skip // keys missing the provided prefix. func TestIteratorPrefix(t *testing.T, db Database) { + require := require.New(t) + key1 := []byte("hello") value1 := []byte("world1") @@ -750,40 +677,30 @@ func TestIteratorPrefix(t *testing.T, db Database) { key3 := []byte("joy") value3 := []byte("world3") - if err := db.Put(key1, value1); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } else if err := db.Put(key2, value2); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } else if err := db.Put(key3, value3); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } + require.NoError(db.Put(key1, value1)) + require.NoError(db.Put(key2, value2)) + require.NoError(db.Put(key3, value3)) iterator := db.NewIteratorWithPrefix([]byte("h")) - if iterator == nil { - t.Fatalf("db.NewIteratorWithPrefix returned nil") - } + require.NotNil(iterator) + defer iterator.Release() - if !iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) - } else if key := iterator.Key(); !bytes.Equal(key, key1) { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key1) - } else if value := iterator.Value(); !bytes.Equal(value, value1) { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value1) - } else if iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", true, false) - } else if key := iterator.Key(); key != nil { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: nil", key) - } else if value := iterator.Value(); value != nil { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: nil", value) - } else if err := iterator.Error(); err != nil { - t.Fatalf("iterator.Error Returned: %s ; Expected: nil", err) - } + require.True(iterator.Next()) + require.Equal(key1, iterator.Key()) + require.Equal(value1, iterator.Value()) + + require.False(iterator.Next()) + require.Nil(iterator.Key()) + require.Nil(iterator.Value()) + require.NoError(iterator.Error()) } // TestIteratorStartPrefix tests to make sure that the iterator can start mid // way through the database while skipping a prefix. func TestIteratorStartPrefix(t *testing.T, db Database) { + require := require.New(t) + key1 := []byte("hello1") value1 := []byte("world1") @@ -793,46 +710,34 @@ func TestIteratorStartPrefix(t *testing.T, db Database) { key3 := []byte("hello3") value3 := []byte("world3") - if err := db.Put(key1, value1); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } else if err := db.Put(key2, value2); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } else if err := db.Put(key3, value3); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } + require.NoError(db.Put(key1, value1)) + require.NoError(db.Put(key2, value2)) + require.NoError(db.Put(key3, value3)) iterator := db.NewIteratorWithStartAndPrefix(key1, []byte("h")) - if iterator == nil { - t.Fatalf("db.NewIteratorWithStartAndPrefix returned nil") - } + require.NotNil(iterator) + defer iterator.Release() - if !iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) - } else if key := iterator.Key(); !bytes.Equal(key, key1) { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key1) - } else if value := iterator.Value(); !bytes.Equal(value, value1) { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value1) - } else if !iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) - } else if key := iterator.Key(); !bytes.Equal(key, key3) { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", key, key3) - } else if value := iterator.Value(); !bytes.Equal(value, value3) { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", value, value3) - } else if iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", true, false) - } else if key := iterator.Key(); key != nil { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: nil", key) - } else if value := iterator.Value(); value != nil { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: nil", value) - } else if err := iterator.Error(); err != nil { - t.Fatalf("iterator.Error Returned: %s ; Expected: nil", err) - } + require.True(iterator.Next()) + require.Equal(key1, iterator.Key()) + require.Equal(value1, iterator.Value()) + + require.True(iterator.Next()) + require.Equal(key3, iterator.Key()) + require.Equal(value3, iterator.Value()) + + require.False(iterator.Next()) + require.Nil(iterator.Key()) + require.Nil(iterator.Value()) + require.NoError(iterator.Error()) } // TestIteratorMemorySafety tests to make sure that keys can values are able to // be modified from the returned iterator. func TestIteratorMemorySafety(t *testing.T, db Database) { + require := require.New(t) + key1 := []byte("hello1") value1 := []byte("world1") @@ -842,18 +747,13 @@ func TestIteratorMemorySafety(t *testing.T, db Database) { key3 := []byte("hello3") value3 := []byte("world3") - if err := db.Put(key1, value1); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } else if err := db.Put(key2, value2); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } else if err := db.Put(key3, value3); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } + require.NoError(db.Put(key1, value1)) + require.NoError(db.Put(key2, value2)) + require.NoError(db.Put(key3, value3)) iterator := db.NewIterator() - if iterator == nil { - t.Fatalf("db.NewIterator returned nil") - } + require.NotNil(iterator) + defer iterator.Release() keys := [][]byte{} @@ -879,99 +779,68 @@ func TestIteratorMemorySafety(t *testing.T, db Database) { expectedKey := expectedKeys[i] expectedValue := expectedValues[i] - if !bytes.Equal(key, expectedKey) { - t.Fatalf("Wrong key") - } - if !bytes.Equal(value, expectedValue) { - t.Fatalf("Wrong key") - } + require.Equal(expectedKey, key) + require.Equal(expectedValue, value) } } // TestIteratorClosed tests to make sure that an iterator that was created with // a closed database will report a closed error correctly. func TestIteratorClosed(t *testing.T, db Database) { + require := require.New(t) + key1 := []byte("hello1") value1 := []byte("world1") - if err := db.Put(key1, value1); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } - - if err := db.Close(); err != nil { - t.Fatalf("Unexpected error on db.Close: %s", err) - } + require.NoError(db.Put(key1, value1)) + require.NoError(db.Close()) { iterator := db.NewIterator() - if iterator == nil { - t.Fatalf("db.NewIterator returned nil") - } + require.NotNil(iterator) + defer iterator.Release() - if iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", true, false) - } else if key := iterator.Key(); key != nil { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: nil", key) - } else if value := iterator.Value(); value != nil { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: nil", value) - } else if err := iterator.Error(); err != ErrClosed { - t.Fatalf("Expected %s on iterator.Error", ErrClosed) - } + require.False(iterator.Next()) + require.Nil(iterator.Key()) + require.Nil(iterator.Value()) + require.Equal(ErrClosed, iterator.Error()) } { iterator := db.NewIteratorWithPrefix(nil) - if iterator == nil { - t.Fatalf("db.NewIteratorWithPrefix returned nil") - } + require.NotNil(iterator) + defer iterator.Release() - if iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", true, false) - } else if key := iterator.Key(); key != nil { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: nil", key) - } else if value := iterator.Value(); value != nil { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: nil", value) - } else if err := iterator.Error(); err != ErrClosed { - t.Fatalf("Expected %s on iterator.Error", ErrClosed) - } + require.False(iterator.Next()) + require.Nil(iterator.Key()) + require.Nil(iterator.Value()) + require.Equal(ErrClosed, iterator.Error()) } { iterator := db.NewIteratorWithStart(nil) - if iterator == nil { - t.Fatalf("db.NewIteratorWithStart returned nil") - } + require.NotNil(iterator) + defer iterator.Release() - if iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", true, false) - } else if key := iterator.Key(); key != nil { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: nil", key) - } else if value := iterator.Value(); value != nil { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: nil", value) - } else if err := iterator.Error(); err != ErrClosed { - t.Fatalf("Expected %s on iterator.Error", ErrClosed) - } + require.False(iterator.Next()) + require.Nil(iterator.Key()) + require.Nil(iterator.Value()) + require.Equal(ErrClosed, iterator.Error()) } { iterator := db.NewIteratorWithStartAndPrefix(nil, nil) - if iterator == nil { - t.Fatalf("db.NewIteratorWithStartAndPrefix returned nil") - } + require.NotNil(iterator) + defer iterator.Release() - if iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", true, false) - } else if key := iterator.Key(); key != nil { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: nil", key) - } else if value := iterator.Value(); value != nil { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: nil", value) - } else if err := iterator.Error(); err != ErrClosed { - t.Fatalf("Expected %s on iterator.Error", ErrClosed) - } + require.False(iterator.Next()) + require.Nil(iterator.Key()) + require.Nil(iterator.Value()) + require.Equal(ErrClosed, iterator.Error()) } } @@ -981,86 +850,62 @@ func TestIteratorClosed(t *testing.T, db Database) { // Additionally tests that an iterator that has already called Next() can still serve // its current value after the underlying DB was closed. func TestIteratorError(t *testing.T, db Database) { + require := require.New(t) + key1 := []byte("hello1") value1 := []byte("world1") + key2 := []byte("hello2") value2 := []byte("world2") - if err := db.Put(key1, value1); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } - if err := db.Put(key2, value2); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } + require.NoError(db.Put(key1, value1)) + require.NoError(db.Put(key2, value2)) iterator := db.NewIterator() - if iterator == nil { - t.Fatalf("db.NewIterator returned nil") - } + require.NotNil(iterator) + defer iterator.Release() // Call Next() and ensure that if the database is closed, the iterator // can still report the current contents. - if !iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) - } - if err := db.Close(); err != nil { - t.Fatalf("Unexpected error on db.Close: %s", err) - } - - if itKey := iterator.Key(); !bytes.Equal(itKey, key1) { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: 0x%x", itKey, key1) - } - if itValue := iterator.Value(); !bytes.Equal(itValue, value1) { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: 0x%x", itValue, value1) - } + require.True(iterator.Next()) + require.NoError(db.Close()) + require.Equal(key1, iterator.Key()) + require.Equal(value1, iterator.Value()) // Subsequent calls to the iterator should return false and report an error - if iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", true, false) - } - if err := iterator.Error(); err != ErrClosed { - t.Fatalf("iterator.Error Returned: %v ; Expected: %v", err, ErrClosed) - } + require.False(iterator.Next()) + require.Nil(iterator.Key()) + require.Nil(iterator.Value()) + require.Equal(ErrClosed, iterator.Error()) } // TestIteratorErrorAfterRelease tests to make sure that an iterator that was // released still reports the error correctly. func TestIteratorErrorAfterRelease(t *testing.T, db Database) { + require := require.New(t) + key := []byte("hello1") value := []byte("world1") - if err := db.Put(key, value); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } - - if err := db.Close(); err != nil { - t.Fatalf("Unexpected error on db.Close: %s", err) - } + require.NoError(db.Put(key, value)) + require.NoError(db.Close()) iterator := db.NewIterator() - if iterator == nil { - t.Fatalf("db.NewIterator returned nil") - } + require.NotNil(iterator) iterator.Release() - if iterator.Next() { - t.Fatalf("iterator.Next Returned: %v ; Expected: %v", false, true) - } - if key := iterator.Key(); key != nil { - t.Fatalf("iterator.Key Returned: 0x%x ; Expected: nil", key) - } - if value := iterator.Value(); value != nil { - t.Fatalf("iterator.Value Returned: 0x%x ; Expected: nil", value) - } - if err := iterator.Error(); err != ErrClosed { - t.Fatalf("Expected %s on iterator.Error", ErrClosed) - } + require.False(iterator.Next()) + require.Nil(iterator.Key()) + require.Nil(iterator.Value()) + require.Equal(ErrClosed, iterator.Error()) } // TestCompactNoPanic tests to make sure compact never panics. func TestCompactNoPanic(t *testing.T, db Database) { + require := require.New(t) + key1 := []byte("hello1") value1 := []byte("world1") @@ -1070,25 +915,13 @@ func TestCompactNoPanic(t *testing.T, db Database) { key3 := []byte("hello3") value3 := []byte("world3") - if err := db.Put(key1, value1); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } else if err := db.Put(key2, value2); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } else if err := db.Put(key3, value3); err != nil { - t.Fatalf("Unexpected error on batch.Put: %s", err) - } - - if err := db.Compact(nil, nil); err != nil { - t.Fatalf("Unexpected error on db.Compact") - } - - if err := db.Close(); err != nil { - t.Fatalf("Unexpected error on db.Close: %s", err) - } + require.NoError(db.Put(key1, value1)) + require.NoError(db.Put(key2, value2)) + require.NoError(db.Put(key3, value3)) - if err := db.Compact(nil, nil); err != ErrClosed { - t.Fatalf("Expected error %s on db.Close but got %s", ErrClosed, err) - } + require.NoError(db.Compact(nil, nil)) + require.NoError(db.Close()) + require.Equal(ErrClosed, db.Compact(nil, nil)) } // TestClear tests to make sure the deletion helper works as expected. @@ -1104,28 +937,21 @@ func TestClear(t *testing.T, db Database) { key3 := []byte("hello3") value3 := []byte("world3") - err := db.Put(key1, value1) - require.NoError(err) - - err = db.Put(key2, value2) - require.NoError(err) - - err = db.Put(key3, value3) - require.NoError(err) + require.NoError(db.Put(key1, value1)) + require.NoError(db.Put(key2, value2)) + require.NoError(db.Put(key3, value3)) count, err := Count(db) require.NoError(err) require.Equal(3, count) - err = Clear(db, db) - require.NoError(err) + require.NoError(Clear(db, db)) count, err = Count(db) require.NoError(err) require.Equal(0, count) - err = db.Close() - require.NoError(err) + require.NoError(db.Close()) } // TestClearPrefix tests to make sure prefix deletion works as expected. @@ -1141,21 +967,15 @@ func TestClearPrefix(t *testing.T, db Database) { key3 := []byte("hello3") value3 := []byte("world3") - err := db.Put(key1, value1) - require.NoError(err) - - err = db.Put(key2, value2) - require.NoError(err) - - err = db.Put(key3, value3) - require.NoError(err) + require.NoError(db.Put(key1, value1)) + require.NoError(db.Put(key2, value2)) + require.NoError(db.Put(key3, value3)) count, err := Count(db) require.NoError(err) require.Equal(3, count) - err = ClearPrefix(db, db, []byte("hello")) - require.NoError(err) + require.NoError(ClearPrefix(db, db, []byte("hello"))) count, err = Count(db) require.NoError(err) @@ -1173,8 +993,7 @@ func TestClearPrefix(t *testing.T, db Database) { require.NoError(err) require.False(has) - err = db.Close() - require.NoError(err) + require.NoError(db.Close()) } func TestModifyValueAfterPut(t *testing.T, db Database) { @@ -1182,10 +1001,9 @@ func TestModifyValueAfterPut(t *testing.T, db Database) { key := []byte{1} value := []byte{1, 2} - originalValue := utils.CopyBytes(value) + originalValue := slices.Clone(value) - err := db.Put(key, value) - require.NoError(err) + require.NoError(db.Put(key, value)) // Modify the value that was Put into the database // to see if the database copied the value correctly. @@ -1200,17 +1018,15 @@ func TestModifyValueAfterBatchPut(t *testing.T, db Database) { key := []byte{1} value := []byte{1, 2} - originalValue := utils.CopyBytes(value) + originalValue := slices.Clone(value) batch := db.NewBatch() - err := batch.Put(key, value) - require.NoError(err) + require.NoError(batch.Put(key, value)) // Modify the value that was Put into the Batch and then Write the // batch to the database. value[0] = 2 - err = batch.Write() - require.NoError(err) + require.NoError(batch.Write()) // Verify that the value written to the database contains matches the original // value of the byte slice when Put was called. @@ -1224,11 +1040,10 @@ func TestModifyValueAfterBatchPutReplay(t *testing.T, db Database) { key := []byte{1} value := []byte{1, 2} - originalValue := utils.CopyBytes(value) + originalValue := slices.Clone(value) batch := db.NewBatch() - err := batch.Put(key, value) - require.NoError(err) + require.NoError(batch.Put(key, value)) // Modify the value that was Put into the Batch and then Write the // batch to the database. @@ -1236,10 +1051,8 @@ func TestModifyValueAfterBatchPutReplay(t *testing.T, db Database) { // Create a new batch and replay the batch onto this one before writing it to the DB. replayBatch := db.NewBatch() - err = batch.Replay(replayBatch) - require.NoError(err) - err = replayBatch.Write() - require.NoError(err) + require.NoError(batch.Replay(replayBatch)) + require.NoError(replayBatch.Write()) // Verify that the value written to the database contains matches the original // value of the byte slice when Put was called. @@ -1306,3 +1119,46 @@ func runConcurrentBatches( } return eg.Wait() } + +func TestPutGetEmpty(t *testing.T, db Database) { + require := require.New(t) + + key := []byte("hello") + + require.NoError(db.Put(key, nil)) + + value, err := db.Get(key) + require.NoError(err) + require.Empty(value) // May be nil or empty byte slice. + + require.NoError(db.Put(key, []byte{})) + + value, err = db.Get(key) + require.NoError(err) + require.Empty(value) // May be nil or empty byte slice. +} + +func FuzzKeyValue(f *testing.F, db Database) { + f.Fuzz(func(t *testing.T, key []byte, value []byte) { + require := require.New(t) + + require.NoError(db.Put(key, value)) + + exists, err := db.Has(key) + require.NoError(err) + require.True(exists) + + gotVal, err := db.Get(key) + require.NoError(err) + require.True(bytes.Equal(value, gotVal)) + + require.NoError(db.Delete(key)) + + exists, err = db.Has(key) + require.NoError(err) + require.False(exists) + + _, err = db.Get(key) + require.Equal(ErrNotFound, err) + }) +} diff --git a/avalanchego/database/versiondb/db.go b/avalanchego/database/versiondb/db.go index a9f5a3a3..d65dca94 100644 --- a/avalanchego/database/versiondb/db.go +++ b/avalanchego/database/versiondb/db.go @@ -1,28 +1,25 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package versiondb import ( - "sort" + "context" "strings" "sync" + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/database/nodb" - "github.com/ava-labs/avalanchego/utils" -) - -const ( - iterativeDeleteThreshold = 512 ) var ( - _ database.Database = &Database{} - _ Commitable = &Database{} - _ database.Batch = &batch{} - _ database.Iterator = &iterator{} + _ database.Database = (*Database)(nil) + _ Commitable = (*Database)(nil) + _ database.Batch = (*batch)(nil) + _ database.Iterator = (*iterator)(nil) ) // Commitable defines the interface that specifies that something may be @@ -80,7 +77,7 @@ func (db *Database) Get(key []byte) ([]byte, error) { if val.delete { return nil, database.ErrNotFound } - return utils.CopyBytes(val.value), nil + return slices.Clone(val.value), nil } return db.db.Get(key) } @@ -92,7 +89,7 @@ func (db *Database) Put(key, value []byte) error { if db.mem == nil { return database.ErrClosed } - db.mem[string(key)] = valueDelete{value: utils.CopyBytes(value)} + db.mem[string(key)] = valueDelete{value: slices.Clone(value)} return nil } @@ -107,7 +104,9 @@ func (db *Database) Delete(key []byte) error { return nil } -func (db *Database) NewBatch() database.Batch { return &batch{db: db} } +func (db *Database) NewBatch() database.Batch { + return &batch{db: db} +} func (db *Database) NewIterator() database.Iterator { return db.NewIteratorWithStartAndPrefix(nil, nil) @@ -126,7 +125,9 @@ func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database defer db.lock.RUnlock() if db.mem == nil { - return &nodb.Iterator{Err: database.ErrClosed} + return &database.IteratorError{ + Err: database.ErrClosed, + } } startString := string(start) @@ -137,7 +138,7 @@ func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database keys = append(keys, key) } } - sort.Strings(keys) // Keys need to be in sorted order + slices.Sort(keys) // Keys need to be in sorted order values := make([]valueDelete, len(keys)) for i, key := range keys { values[i] = db.mem[key] @@ -209,15 +210,7 @@ func (db *Database) Abort() { } func (db *Database) abort() { - // If there are a lot of keys, clear the map by just allocating a new one - if len(db.mem) > iterativeDeleteThreshold { - db.mem = make(map[string]valueDelete, memdb.DefaultSize) - return - } - // If there aren't many keys, clear the map iteratively - for key := range db.mem { - delete(db.mem, key) - } + maps.Clear(db.mem) } // CommitBatch returns a batch that contains all uncommitted puts/deletes. @@ -272,42 +265,22 @@ func (db *Database) isClosed() bool { return db.db == nil } -func (db *Database) HealthCheck() (interface{}, error) { +func (db *Database) HealthCheck(ctx context.Context) (interface{}, error) { db.lock.RLock() defer db.lock.RUnlock() if db.mem == nil { return nil, database.ErrClosed } - return db.db.HealthCheck() -} - -type keyValue struct { - key []byte - value []byte - delete bool + return db.db.HealthCheck(ctx) } type batch struct { - db *Database - writes []keyValue - size int -} + database.BatchOps -func (b *batch) Put(key, value []byte) error { - b.writes = append(b.writes, keyValue{utils.CopyBytes(key), utils.CopyBytes(value), false}) - b.size += len(key) + len(value) - return nil -} - -func (b *batch) Delete(key []byte) error { - b.writes = append(b.writes, keyValue{utils.CopyBytes(key), nil, true}) - b.size += len(key) - return nil + db *Database } -func (b *batch) Size() int { return b.size } - func (b *batch) Write() error { b.db.lock.Lock() defer b.db.lock.Unlock() @@ -316,40 +289,19 @@ func (b *batch) Write() error { return database.ErrClosed } - for _, kv := range b.writes { - b.db.mem[string(kv.key)] = valueDelete{ - value: kv.value, - delete: kv.delete, + for _, op := range b.Ops { + b.db.mem[string(op.Key)] = valueDelete{ + value: op.Value, + delete: op.Delete, } } return nil } -func (b *batch) Reset() { - if cap(b.writes) > len(b.writes)*database.MaxExcessCapacityFactor { - b.writes = make([]keyValue, 0, cap(b.writes)/database.CapacityReductionFactor) - } else { - b.writes = b.writes[:0] - } - b.size = 0 -} - -func (b *batch) Replay(w database.KeyValueWriterDeleter) error { - for _, kv := range b.writes { - if kv.delete { - if err := w.Delete(kv.key); err != nil { - return err - } - } else if err := w.Put(kv.key, kv.value); err != nil { - return err - } - } - return nil +func (b *batch) Inner() database.Batch { + return b } -// Inner returns itself -func (b *batch) Inner() database.Batch { return b } - // iterator walks over both the in memory database and the underlying database // at the same time. type iterator struct { @@ -455,9 +407,13 @@ func (it *iterator) Error() error { return it.Iterator.Error() } -func (it *iterator) Key() []byte { return it.key } +func (it *iterator) Key() []byte { + return it.key +} -func (it *iterator) Value() []byte { return it.value } +func (it *iterator) Value() []byte { + return it.value +} func (it *iterator) Release() { it.key = nil diff --git a/avalanchego/database/versiondb/db_test.go b/avalanchego/database/versiondb/db_test.go index 558b1f5b..417fc1ec 100644 --- a/avalanchego/database/versiondb/db_test.go +++ b/avalanchego/database/versiondb/db_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package versiondb @@ -18,6 +18,13 @@ func TestInterface(t *testing.T) { } } +func FuzzInterface(f *testing.F) { + for _, test := range database.FuzzTests { + baseDB := memdb.New() + test(f, New(baseDB)) + } +} + func TestIterate(t *testing.T) { baseDB := memdb.New() db := New(baseDB) @@ -379,7 +386,7 @@ func BenchmarkInterface(b *testing.B) { baseDB := memdb.New() db := New(baseDB) bench(b, db, "versiondb", keys, values) - db.Close() + _ = db.Close() } } } diff --git a/avalanchego/genesis/aliases.go b/avalanchego/genesis/aliases.go index bdcca296..b12e50d6 100644 --- a/avalanchego/genesis/aliases.go +++ b/avalanchego/genesis/aliases.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis diff --git a/avalanchego/genesis/beacons.go b/avalanchego/genesis/beacons.go index e613b19c..191dc067 100644 --- a/avalanchego/genesis/beacons.go +++ b/avalanchego/genesis/beacons.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis diff --git a/avalanchego/genesis/config.go b/avalanchego/genesis/config.go index 12610e2d..5dfe8984 100644 --- a/avalanchego/genesis/config.go +++ b/avalanchego/genesis/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis @@ -12,13 +12,15 @@ import ( "path/filepath" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/formatting/address" + "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/wrappers" - - safemath "github.com/ava-labs/avalanchego/utils/math" ) +var _ utils.Sortable[Allocation] = Allocation{} + type LockedAmount struct { Amount uint64 `json:"amount"` Locktime uint64 `json:"locktime"` @@ -46,6 +48,11 @@ func (a Allocation) Unparse(networkID uint32) (UnparsedAllocation, error) { return ua, err } +func (a Allocation) Less(other Allocation) bool { + return a.InitialAmount < other.InitialAmount || + (a.InitialAmount == other.InitialAmount && a.AVAXAddr.Less(other.AVAXAddr)) +} + type Staker struct { NodeID ids.NodeID `json:"nodeID"` RewardAddress ids.ShortID `json:"rewardAddress"` @@ -131,12 +138,12 @@ func (c *Config) InitialSupply() (uint64, error) { initialSupply := uint64(0) for _, allocation := range c.Allocations { - newInitialSupply, err := safemath.Add64(initialSupply, allocation.InitialAmount) + newInitialSupply, err := math.Add64(initialSupply, allocation.InitialAmount) if err != nil { return 0, err } for _, unlock := range allocation.UnlockSchedule { - newInitialSupply, err = safemath.Add64(newInitialSupply, unlock.Amount) + newInitialSupply, err = math.Add64(newInitialSupply, unlock.Amount) if err != nil { return 0, err } diff --git a/avalanchego/genesis/config_test.go b/avalanchego/genesis/config_test.go new file mode 100644 index 00000000..c7fea58c --- /dev/null +++ b/avalanchego/genesis/config_test.go @@ -0,0 +1,67 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package genesis + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" +) + +func TestAllocationLess(t *testing.T) { + type test struct { + name string + alloc1 Allocation + alloc2 Allocation + expected bool + } + tests := []test{ + { + name: "equal", + alloc1: Allocation{}, + alloc2: Allocation{}, + expected: false, + }, + { + name: "first initial amount smaller", + alloc1: Allocation{}, + alloc2: Allocation{ + InitialAmount: 1, + }, + expected: true, + }, + { + name: "first initial amount larger", + alloc1: Allocation{ + InitialAmount: 1, + }, + alloc2: Allocation{}, + expected: false, + }, + { + name: "first bytes smaller", + alloc1: Allocation{}, + alloc2: Allocation{ + AVAXAddr: ids.ShortID{1}, + }, + expected: true, + }, + { + name: "first bytes larger", + alloc1: Allocation{ + AVAXAddr: ids.ShortID{1}, + }, + alloc2: Allocation{}, + expected: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + require.Equal(tt.expected, tt.alloc1.Less(tt.alloc2)) + }) + } +} diff --git a/avalanchego/genesis/genesis.go b/avalanchego/genesis/genesis.go index 593282d4..083d4c4e 100644 --- a/avalanchego/genesis/genesis.go +++ b/avalanchego/genesis/genesis.go @@ -1,20 +1,20 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis import ( - "bytes" "errors" "fmt" - "sort" "time" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/json" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/avm" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/nftfx" @@ -33,6 +33,7 @@ const ( ) var ( + errStakeDurationTooHigh = errors.New("initial stake duration larger than maximum configured") errNoInitiallyStakedFunds = errors.New("initial staked funds cannot be empty") errNoSupply = errors.New("initial supply must be > 0") errNoStakeDuration = errors.New("initial stake duration must be > 0") @@ -52,8 +53,8 @@ func validateInitialStakedFunds(config *Config) error { return errNoInitiallyStakedFunds } - allocationSet := ids.ShortSet{} - initialStakedFundsSet := ids.ShortSet{} + allocationSet := set.Set[ids.ShortID]{} + initialStakedFundsSet := set.Set[ids.ShortID]{} for _, allocation := range config.Allocations { // It is ok to have duplicates as different // ethAddrs could claim to the same avaxAddr. @@ -106,7 +107,7 @@ func validateInitialStakedFunds(config *Config) error { // validateConfig returns an error if the provided // *Config is not considered valid. -func validateConfig(networkID uint32, config *Config) error { +func validateConfig(networkID uint32, config *Config, stakingCfg *StakingConfig) error { if networkID != config.NetworkID { return fmt.Errorf( "networkID %d specified but genesis config contains networkID %d", @@ -139,6 +140,12 @@ func validateConfig(networkID uint32, config *Config) error { return errNoStakeDuration } + // Initial stake duration of genesis validators must be + // not larger than maximal stake duration specified for any validator. + if config.InitialStakeDuration > uint64(stakingCfg.MaxStakeDuration.Seconds()) { + return errStakeDurationTooHigh + } + if len(config.InitialStakers) == 0 { return errNoStakers } @@ -180,10 +187,11 @@ func validateConfig(networkID uint32, config *Config) error { // loads the network genesis data from the config at [filepath]. // // FromFile returns: +// // 1. The byte representation of the genesis state of the platform chain // (ie the genesis state of the network) // 2. The asset ID of AVAX -func FromFile(networkID uint32, filepath string) ([]byte, ids.ID, error) { +func FromFile(networkID uint32, filepath string, stakingCfg *StakingConfig) ([]byte, ids.ID, error) { switch networkID { case constants.FlareID, constants.SongbirdID, constants.CostwoID, constants.CostonID, constants.LocalFlareID, constants.LocalID: return nil, ids.ID{}, fmt.Errorf( @@ -198,7 +206,7 @@ func FromFile(networkID uint32, filepath string) ([]byte, ids.ID, error) { return nil, ids.ID{}, fmt.Errorf("unable to load provided genesis config at %s: %w", filepath, err) } - if err := validateConfig(networkID, config); err != nil { + if err := validateConfig(networkID, config, stakingCfg); err != nil { return nil, ids.ID{}, fmt.Errorf("genesis config validation failed: %w", err) } @@ -221,10 +229,11 @@ func FromFile(networkID uint32, filepath string) ([]byte, ids.ID, error) { // loads the network genesis data from [genesisContent]. // // FromFlag returns: +// // 1. The byte representation of the genesis state of the platform chain // (ie the genesis state of the network) // 2. The asset ID of AVAX -func FromFlag(networkID uint32, genesisContent string) ([]byte, ids.ID, error) { +func FromFlag(networkID uint32, genesisContent string, stakingCfg *StakingConfig) ([]byte, ids.ID, error) { switch networkID { case constants.FlareID, constants.SongbirdID, constants.CostwoID, constants.CostonID, constants.LocalFlareID, constants.LocalID: return nil, ids.ID{}, fmt.Errorf( @@ -239,7 +248,7 @@ func FromFlag(networkID uint32, genesisContent string) ([]byte, ids.ID, error) { return nil, ids.ID{}, fmt.Errorf("unable to load genesis content from flag: %w", err) } - if err := validateConfig(networkID, customConfig); err != nil { + if err := validateConfig(networkID, customConfig, stakingCfg); err != nil { return nil, ids.ID{}, fmt.Errorf("genesis config validation failed: %w", err) } @@ -247,6 +256,7 @@ func FromFlag(networkID uint32, genesisContent string) ([]byte, ids.ID, error) { } // FromConfig returns: +// // 1. The byte representation of the genesis state of the platform chain // (ie the genesis state of the network) // 2. The asset ID of AVAX @@ -274,7 +284,7 @@ func FromConfig(config *Config) ([]byte, ids.ID, error) { xAllocations = append(xAllocations, allocation) } } - sortXAllocation(xAllocations) + utils.Sort(xAllocations) for _, allocation := range xAllocations { addr, err := address.FormatBech32(hrp, allocation.AVAXAddr.Bytes()) @@ -322,7 +332,7 @@ func FromConfig(config *Config) ([]byte, ids.ID, error) { return nil, ids.ID{}, fmt.Errorf("couldn't calculate the initial supply: %w", err) } - initiallyStaked := ids.ShortSet{} + initiallyStaked := set.Set[ids.ShortID]{} initiallyStaked.Add(config.InitialStakedFunds...) skippedAllocations := []Allocation(nil) @@ -561,16 +571,3 @@ func AVAXAssetID(avmGenesisBytes []byte) (ids.ID, error) { } return tx.ID(), nil } - -type innerSortXAllocation []Allocation - -func (xa innerSortXAllocation) Less(i, j int) bool { - return xa[i].InitialAmount < xa[j].InitialAmount || - (xa[i].InitialAmount == xa[j].InitialAmount && - bytes.Compare(xa[i].AVAXAddr.Bytes(), xa[j].AVAXAddr.Bytes()) == -1) -} - -func (xa innerSortXAllocation) Len() int { return len(xa) } -func (xa innerSortXAllocation) Swap(i, j int) { xa[j], xa[i] = xa[i], xa[j] } - -func sortXAllocation(a []Allocation) { sort.Sort(innerSortXAllocation(a)) } diff --git a/avalanchego/genesis/genesis_local.go b/avalanchego/genesis/genesis_local.go index ae66c0c4..03578e6a 100644 --- a/avalanchego/genesis/genesis_local.go +++ b/avalanchego/genesis/genesis_local.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis @@ -6,15 +6,13 @@ package genesis import ( "time" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/platformvm/reward" - // "github.com/flare-foundation/flare/utils/units" - // "github.com/flare-foundation/flare/vms/platformvm/reward" ) var ( - EWOQKey *crypto.PrivateKeySECP256K1R + EWOQKey *secp256k1.PrivateKey ) var ( diff --git a/avalanchego/genesis/genesis_mainnet.go b/avalanchego/genesis/genesis_mainnet.go index 236d68c5..4e55c891 100644 --- a/avalanchego/genesis/genesis_mainnet.go +++ b/avalanchego/genesis/genesis_mainnet.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis diff --git a/avalanchego/genesis/genesis_test.go b/avalanchego/genesis/genesis_test.go index ebfb0439..d11e2436 100644 --- a/avalanchego/genesis/genesis_test.go +++ b/avalanchego/genesis/genesis_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis @@ -9,6 +9,7 @@ import ( "fmt" "path/filepath" "testing" + "time" _ "embed" @@ -27,6 +28,10 @@ var ( invalidGenesisConfigJSON = []byte(`{ "networkID": 9999}}}} }`) + + genesisStakingCfg = &StakingConfig{ + MaxStakeDuration: 365 * 24 * time.Hour, + } ) func TestValidateConfig(t *testing.T) { @@ -64,7 +69,7 @@ func TestValidateConfig(t *testing.T) { thisConfig.Allocations = []Allocation{} return &thisConfig }(), - err: "initial supply must be > 0", + err: errNoSupply.Error(), }, "no initial stakers": { networkID: 162, @@ -73,7 +78,7 @@ func TestValidateConfig(t *testing.T) { thisConfig.InitialStakers = []Staker{} return &thisConfig }(), - err: "initial stakers must be > 0", + err: errNoStakers.Error(), }, "invalid initial stake duration": { networkID: 162, @@ -82,7 +87,16 @@ func TestValidateConfig(t *testing.T) { thisConfig.InitialStakeDuration = 0 return &thisConfig }(), - err: "initial stake duration must be > 0", + err: errNoStakeDuration.Error(), + }, + "too large initial stake duration": { + networkID: 12345, + config: func() *Config { + thisConfig := LocalConfig + thisConfig.InitialStakeDuration = uint64(genesisStakingCfg.MaxStakeDuration+time.Second) / uint64(time.Second) + return &thisConfig + }(), + err: errStakeDurationTooHigh.Error(), }, "invalid stake offset": { networkID: 14, @@ -100,7 +114,7 @@ func TestValidateConfig(t *testing.T) { thisConfig.InitialStakedFunds = []ids.ShortID(nil) return &thisConfig }(), - err: "initial staked funds cannot be empty", + err: errNoInitiallyStakedFunds.Error(), }, "duplicate initial staked funds": { networkID: 162, @@ -118,7 +132,7 @@ func TestValidateConfig(t *testing.T) { thisConfig.CChainGenesis = "" return &thisConfig }(), - err: "C-Chain genesis cannot be empty", + err: errNoCChainGenesis.Error(), }, "empty message": { networkID: 162, @@ -134,7 +148,7 @@ func TestValidateConfig(t *testing.T) { t.Run(name, func(t *testing.T) { require := require.New(t) - err := validateConfig(test.networkID, test.config) + err := validateConfig(test.networkID, test.config, genesisStakingCfg) if len(test.err) > 0 { require.Error(err) require.Contains(err.Error(), test.err) @@ -215,7 +229,7 @@ func TestGenesisFromFile(t *testing.T) { customFile = test.missingFilepath } - genesisBytes, _, err := FromFile(test.networkID, customFile) + genesisBytes, _, err := FromFile(test.networkID, customFile, genesisStakingCfg) if len(test.err) > 0 { require.Error(err) require.Contains(err.Error(), test.err) @@ -304,7 +318,7 @@ func TestGenesisFromFlag(t *testing.T) { } content := base64.StdEncoding.EncodeToString(genBytes) - genesisBytes, _, err := FromFlag(test.networkID, content) + genesisBytes, _, err := FromFlag(test.networkID, content, genesisStakingCfg) if len(test.err) > 0 { require.Error(err) require.Contains(err.Error(), test.err) diff --git a/avalanchego/genesis/params.go b/avalanchego/genesis/params.go index dcc49ee5..7336c0b3 100644 --- a/avalanchego/genesis/params.go +++ b/avalanchego/genesis/params.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis diff --git a/avalanchego/genesis/unparsed_config.go b/avalanchego/genesis/unparsed_config.go index 91241163..9831d835 100644 --- a/avalanchego/genesis/unparsed_config.go +++ b/avalanchego/genesis/unparsed_config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis diff --git a/avalanchego/go.mod b/avalanchego/go.mod index f9b4a26e..6276845a 100644 --- a/avalanchego/go.mod +++ b/avalanchego/go.mod @@ -9,118 +9,122 @@ module github.com/ava-labs/avalanchego go 1.21 require ( + github.com/DataDog/zstd v1.5.2 github.com/Microsoft/go-winio v0.5.2 github.com/NYTimes/gziphandler v1.1.1 - github.com/ava-labs/avalanche-network-runner-sdk v0.2.0 - github.com/ava-labs/coreth v0.11.0-rc.4 - github.com/btcsuite/btcd v0.23.1 - github.com/btcsuite/btcd/btcutil v1.1.1 - github.com/decred/dcrd/dcrec/secp256k1/v3 v3.0.0-20200627015759-01fd2de07837 - github.com/golang-jwt/jwt v3.2.1+incompatible + github.com/ava-labs/avalanche-network-runner-sdk v0.3.0 + github.com/ava-labs/coreth v0.12.0-rc.2 + github.com/ava-labs/ledger-avalanche/go v0.0.0-20230105152938-00a24d05a8c7 + github.com/btcsuite/btcd/btcutil v1.1.3 + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 + github.com/golang-jwt/jwt/v4 v4.3.0 github.com/golang/mock v1.6.0 github.com/google/btree v1.1.2 github.com/gorilla/mux v1.8.0 github.com/gorilla/rpc v1.2.0 github.com/gorilla/websocket v1.4.2 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/hashicorp/go-hclog v1.2.2 - github.com/hashicorp/go-plugin v1.4.4 github.com/holiman/bloomfilter/v2 v2.0.3 github.com/huin/goupnp v1.0.3 github.com/jackpal/gateway v1.0.6 github.com/jackpal/go-nat-pmp v1.0.2 - github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 github.com/mr-tron/base58 v1.2.0 github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d - github.com/onsi/ginkgo/v2 v2.1.4 - github.com/onsi/gomega v1.19.0 - github.com/prometheus/client_golang v1.12.2 + github.com/onsi/ginkgo/v2 v2.4.0 + github.com/onsi/gomega v1.24.0 + github.com/pires/go-proxyproto v0.6.2 + github.com/prometheus/client_golang v1.13.0 github.com/prometheus/client_model v0.2.0 github.com/rs/cors v1.7.0 github.com/shirou/gopsutil v3.21.11+incompatible github.com/spaolacci/murmur3 v1.1.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.12.0 - github.com/stretchr/testify v1.7.2 + github.com/stretchr/testify v1.8.1 github.com/supranational/blst v0.3.11-0.20220920110316-f72618070295 github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a - go.uber.org/zap v1.21.0 + go.opentelemetry.io/otel v1.11.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.0 + go.opentelemetry.io/otel/sdk v1.11.0 + go.opentelemetry.io/otel/trace v1.11.0 + go.uber.org/zap v1.24.0 golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d - golang.org/x/net v0.0.0-20220708220712-1185a9018129 - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 + golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 + golang.org/x/sync v0.1.0 + golang.org/x/term v0.5.0 golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac gonum.org/v1/gonum v0.11.0 - google.golang.org/genproto v0.0.0-20220712132514-bdd2acd4974d - google.golang.org/grpc v1.49.0 - google.golang.org/protobuf v1.28.0 + google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c + google.golang.org/grpc v1.50.1 + google.golang.org/protobuf v1.28.1 gopkg.in/natefinch/lumberjack.v2 v2.0.0 ) require ( github.com/VictoriaMetrics/fastcache v1.10.0 // indirect - github.com/aead/siphash v1.0.1 // indirect + github.com/benbjohnson/clock v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect - github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 // indirect - github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect - github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect - github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect - github.com/btcsuite/winsvc v1.0.0 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/deckarep/golang-set v1.8.0 // indirect - github.com/decred/dcrd/crypto/blake256 v1.0.0 // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect - github.com/decred/dcrd/lru v1.1.1 // indirect - github.com/ethereum/go-ethereum v1.10.25 // indirect - github.com/fatih/color v1.13.0 // indirect + github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 // indirect + github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf // indirect + github.com/ethereum/go-ethereum v1.10.26 // indirect github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect - github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/go-stack/stack v1.8.0 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect + github.com/google/go-cmp v0.5.9 // indirect github.com/google/uuid v1.2.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0 // indirect github.com/hashicorp/go-bexpr v0.1.10 // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/hashicorp/yamux v0.0.0-20200609203250-aecfd211c9ce // indirect + github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e // indirect github.com/holiman/uint256 v1.2.0 // indirect - github.com/jessevdk/go-flags v1.5.0 // indirect - github.com/jrick/logrotate v1.0.0 // indirect - github.com/kkdai/bstream v1.0.0 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-isatty v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect - github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect - github.com/oklog/run v1.1.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.0.1 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/common v0.32.1 // indirect - github.com/prometheus/procfs v0.7.3 // indirect - github.com/rjeczalik/notify v0.9.2 // indirect + github.com/prometheus/common v0.37.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect + github.com/rjeczalik/notify v0.9.3 // indirect github.com/spf13/afero v1.8.2 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969 // indirect - github.com/stretchr/objx v0.2.0 // indirect github.com/subosito/gotenv v1.3.0 // indirect github.com/tklauser/go-sysconf v0.3.5 // indirect github.com/tklauser/numcpus v0.2.2 // indirect github.com/tyler-smith/go-bip39 v1.0.2 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect - go.uber.org/atomic v1.9.0 // indirect + github.com/zondax/hid v0.9.1 // indirect + github.com/zondax/ledger-go v0.14.1 // indirect + go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.0 // indirect + go.opentelemetry.io/proto/otlp v0.19.0 // indirect + go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.8.0 // indirect - golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect - golang.org/x/text v0.3.7 // indirect + golang.org/x/net v0.7.0 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect gopkg.in/ini.v1 v1.66.4 // indirect gopkg.in/urfave/cli.v1 v1.20.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/avalanchego/go.sum b/avalanchego/go.sum index a6cfd9c6..71de57d5 100644 --- a/avalanchego/go.sum +++ b/avalanchego/go.sum @@ -39,13 +39,15 @@ dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7 github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/zstd v1.5.2 h1:vUG4lAyuPCXO0TLbXvPv7EB7cNK1QV/luu55UHLrrn8= +github.com/DataDog/zstd v1.5.2/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/VictoriaMetrics/fastcache v1.10.0 h1:5hDJnLsKLpnUEToub7ETuRu8RCkb40woBZAUiKonXzY= github.com/VictoriaMetrics/fastcache v1.10.0/go.mod h1:tjiYeEfYXCqacuvYw/7UoDIeJaNxq6132xHICNP77w8= -github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -55,45 +57,48 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5 github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/ava-labs/avalanche-network-runner-sdk v0.2.0 h1:YNvM0oFlb7A825kGe0XwwZuvIXTKF1BsuvxJdRLhIaI= -github.com/ava-labs/avalanche-network-runner-sdk v0.2.0/go.mod h1:bEBRVZnGeRiNdDJAFUj+gA/TPzNDbpY/WzgDAHHwJb8= -github.com/ava-labs/coreth v0.11.0-rc.4 h1:oYZMWZcXYa4dH2hQBIAH/DD0rL2cB3btPGdabpCH5Ug= -github.com/ava-labs/coreth v0.11.0-rc.4/go.mod h1:IhfO9oA8KicFyYZA3nIqjV/TS6xzAqT5ml2QKfNGtGA= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/ava-labs/avalanche-network-runner-sdk v0.3.0 h1:TVi9JEdKNU/RevYZ9PyW4pULbEdS+KQDA9Ki2DUvuAs= +github.com/ava-labs/avalanche-network-runner-sdk v0.3.0/go.mod h1:SgKJvtqvgo/Bl/c8fxEHCLaSxEbzimYfBopcfrajxQk= +github.com/ava-labs/coreth v0.12.0-rc.2 h1:UNyGhuC2HxZ8eCLZiZON8xRiJkNHVZ75zknu/xqkKBA= +github.com/ava-labs/coreth v0.12.0-rc.2/go.mod h1:ZGhoIZTWbIaTmzEbprXu0hLtLdoE2PSTEFnCTYr0BRk= +github.com/ava-labs/ledger-avalanche/go v0.0.0-20230105152938-00a24d05a8c7 h1:EdxD90j5sClfL5Ngpz2TlnbnkNYdFPDXa0jDOjam65c= +github.com/ava-labs/ledger-avalanche/go v0.0.0-20230105152938-00a24d05a8c7/go.mod h1:XhiXSrh90sHUbkERzaxEftCmUz53eCijshDLZ4fByVM= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= -github.com/btcsuite/btcd v0.23.1 h1:IB8cVQcC2X5mHbnfirLG5IZnkWYNTPlLZVrxUYSotbE= -github.com/btcsuite/btcd v0.23.1/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY= -github.com/btcsuite/btcd/btcec/v2 v2.1.1/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= -github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k= -github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU= +github.com/btcsuite/btcd v0.23.0 h1:V2/ZgjfDFIygAX3ZapeigkVBoVUtOJKSwrhZdlpSvaA= +github.com/btcsuite/btcd v0.23.0/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY= +github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= +github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= +github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= +github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A= -github.com/btcsuite/btcd/btcutil v1.1.1 h1:hDcDaXiP0uEzR8Biqo2weECKqEw0uHDZ9ixIWevVQqY= -github.com/btcsuite/btcd/btcutil v1.1.1/go.mod h1:nbKlBMNm9FGsdvKvu0essceubPiAcI57pYBNnsLAa34= +github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE= +github.com/btcsuite/btcd/btcutil v1.1.3 h1:xfbtw8lwpp0G6NwSHb+UE67ryTFHJAiNuipusjXSohQ= +github.com/btcsuite/btcd/btcutil v1.1.3/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= -github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= -github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= -github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -105,45 +110,47 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4= github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo= -github.com/decred/dcrd/chaincfg/chainhash v1.0.2 h1:rt5Vlq/jM3ZawwiacWjPa+smINyLRN07EO0cNBV6DGU= -github.com/decred/dcrd/chaincfg/chainhash v1.0.2/go.mod h1:BpbrGgrPTr3YJYRN3Bm+D9NuaFd+zGyNeIKgrhCXK60= github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= -github.com/decred/dcrd/dcrec/secp256k1/v3 v3.0.0-20200627015759-01fd2de07837 h1:g2cyFTu5FKWhCo7L4hVJ797Q506B4EywA7L9I6OebgA= -github.com/decred/dcrd/dcrec/secp256k1/v3 v3.0.0-20200627015759-01fd2de07837/go.mod h1:J70FGZSbzsjecRTiTzER+3f1KZLNaXkuv+yeFTKoxM8= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= -github.com/decred/dcrd/lru v1.1.1 h1:kWFDaW0OWx6AD6Ki342c+JPmHbiVdE6rK81pT3fuo/Y= -github.com/decred/dcrd/lru v1.1.1/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= +github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 h1:Izz0+t1Z5nI16/II7vuEo/nHjodOg0p7+OiDpjX5t1E= +github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf h1:Yt+4K30SdjOkRoRRm3vYNQgR+/ZIy0RmeUDZo7Y8zeQ= +github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= +github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ethereum/go-ethereum v1.10.25 h1:5dFrKJDnYf8L6/5o42abCE6a9yJm9cs4EJVRyYMr55s= -github.com/ethereum/go-ethereum v1.10.25/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/ethereum/go-ethereum v1.10.26 h1:i/7d9RBBwiXCEuyduBQzJw/mKmnvzsN14jqBmytw72s= +github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -153,18 +160,29 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= +github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= -github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog= +github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -211,8 +229,8 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -244,20 +262,19 @@ github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0 h1:kr3j8iIMR4ywO/O0rvksXaJvauGGCMg2zAZIiNZ9uIQ= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0/go.mod h1:ummNFgdgLhhX7aIiy35vVmQNS0rWXknfPE0qe6fmFXg= github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= -github.com/hashicorp/go-hclog v1.2.2 h1:ihRI7YFwcZdiSD7SIenIhHfQH3OuDvWerAUBZbeQS3M= -github.com/hashicorp/go-hclog v1.2.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-plugin v1.4.4 h1:NVdrSdFRt3SkZtNckJ6tog7gbpRrcbOjQi/rgF7JYWQ= -github.com/hashicorp/go-plugin v1.4.4/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/yamux v0.0.0-20200609203250-aecfd211c9ce h1:7UnVY3T/ZnHUrfviiAgIUjg2PXxsQfs5bphsG8F7Keo= -github.com/hashicorp/yamux v0.0.0-20200609203250-aecfd211c9ce/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e h1:pIYdhNkDh+YENVNi3gto8n9hAmRxKxoar0iE6BLucjw= +github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e/go.mod h1:j9cQbcqHQujT0oKJ38PylVfqohClLr3CvDC+Qcg+lhU= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM= @@ -274,12 +291,7 @@ github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7Bd github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= -github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/jrick/logrotate v1.0.0 h1:lQ1bL/n9mBNeIXoTUoYRlK4dHuNJVofX9oWqBtPnSzI= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -289,17 +301,14 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= -github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/kkdai/bstream v1.0.0 h1:Se5gHwgp2VT2uHfDrkbbgbgEvV9cimLELwrPJctSjg8= -github.com/kkdai/bstream v1.0.0/go.mod h1:FDnDOHt5Yx4p3FaHcioFT0QjDOtgUpvjeZqAs+NVZZA= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -308,10 +317,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= @@ -319,8 +326,6 @@ github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= -github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -340,8 +345,6 @@ github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96d github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -352,19 +355,22 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY= -github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= +github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= +github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg= +github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.0.1 h1:8e3L2cCQzLFi2CR4g7vGFuFxX7Jl1kKX8gW+iV0GUKU= github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= +github.com/pires/go-proxyproto v0.6.2 h1:KAZ7UteSOt6urjme6ZldyFm4wDe/z0ZUP0Yv0Dos0d8= +github.com/pires/go-proxyproto v0.6.2/go.mod h1:Odh9VFOZJCf9G8cLW5o435Xf1J95Jw9Gw5rnCjcwzAY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -376,8 +382,9 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= -github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= +github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -386,16 +393,18 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/rjeczalik/notify v0.9.2 h1:MiTWrPj55mNDHEiIX5YUSKefw/+lCQVoAFmD6oQm5w8= -github.com/rjeczalik/notify v0.9.2/go.mod h1:aErll2f0sUX9PXZnVNyeiObbmTlk5jnMoCa4QEjJeqM= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= +github.com/rjeczalik/notify v0.9.3 h1:6rJAzHTGKXGj76sbRgDiDcYj/HniypXmSJo1SWakZeY= +github.com/rjeczalik/notify v0.9.3/go.mod h1:gF3zSOrafR9DQEWSE8TjfI9NkooDxbyT4UgRGKZA0lc= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= @@ -408,6 +417,7 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= @@ -424,20 +434,23 @@ github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969 h1:Oo2KZNP70K github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= github.com/supranational/blst v0.3.11-0.20220920110316-f72618070295 h1:rVKS9JjtqE4/PscoIsP46sRnJhfq8YFbjlk0fUJTRnY= github.com/supranational/blst v0.3.11-0.20220920110316-f72618070295/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a h1:1ur3QoCqvE5fl+nylMaIr9PVV1w343YRDtsy+Rwu7XI= github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= github.com/tklauser/go-sysconf v0.3.5 h1:uu3Xl4nkLzQfXNsWn15rPc/HQCJKObbt1dKJeWp3vU4= @@ -453,23 +466,42 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/zondax/hid v0.9.1 h1:gQe66rtmyZ8VeGFcOpbuH3r7erYtNEAezCAYu8LdkJo= +github.com/zondax/hid v0.9.1/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= +github.com/zondax/ledger-go v0.14.1 h1:Pip65OOl4iJ84WTpA4BKChvOufMhhbxED3BaihoZN4c= +github.com/zondax/ledger-go v0.14.1/go.mod h1:fZ3Dqg6qcdXWSOJFKMG8GCTnD7slO/RL2feOQv8K320= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opentelemetry.io/otel v1.11.0 h1:kfToEGMDq6TrVrJ9Vht84Y8y9enykSZzDDZglV0kIEk= +go.opentelemetry.io/otel v1.11.0/go.mod h1:H2KtuEphyMvlhZ+F7tg9GRhAOe60moNx61Ex+WmiKkk= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.0 h1:0dly5et1i/6Th3WHn0M6kYiJfFNzhhxanrJ0bOfnjEo= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.0/go.mod h1:+Lq4/WkdCkjbGcBMVHHg2apTbv8oMBf29QCnyCCJjNQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.0 h1:eyJ6njZmH16h9dOKCi7lMswAnGsSOwgTqWzfxqcuNr8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.0/go.mod h1:FnDp7XemjN3oZ3xGunnfOUTVwd2XcvLbtRAuOSU3oc8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.0 h1:j2RFV0Qdt38XQ2Jvi4WIsQ56w8T7eSirYbMw19VXRDg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.0/go.mod h1:pILgiTEtrqvZpoiuGdblDgS5dbIaTgDrkIuKfEFkt+A= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.0 h1:v29I/NbVp7LXQYMFZhU6q17D0jSEbYOAVONlrO1oH5s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.0/go.mod h1:/RpLsmbQLDO1XCbWAM4S6TSwj8FKwwgyKKyqtvVfAnw= +go.opentelemetry.io/otel/sdk v1.11.0 h1:ZnKIL9V9Ztaq+ME43IUi/eo22mNsb6a7tGfzaOWB5fo= +go.opentelemetry.io/otel/sdk v1.11.0/go.mod h1:REusa8RsyKaq0OlyangWXaw97t2VogoO4SSEeKkSTAk= +go.opentelemetry.io/otel/trace v1.11.0 h1:20U/Vj42SX+mASlXLmSGBg6jpI1jQtv682lZtTAOVFI= +go.opentelemetry.io/otel/trace v1.11.0/go.mod h1:nyYjis9jy0gytE9LXGU+/m1sHTKbRY0fX0hulNNDP1U= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= -go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= -go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -557,10 +589,11 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220708220712-1185a9018129 h1:vucSRfWwTsoXro7P+3Cjlr6flUMtzCwzlvkxEQtHHB0= -golang.org/x/net v0.0.0-20220708220712-1185a9018129/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -571,6 +604,8 @@ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -581,8 +616,9 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -607,7 +643,6 @@ golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -634,7 +669,6 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -647,13 +681,14 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -662,8 +697,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -718,7 +754,6 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -789,8 +824,9 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20220712132514-bdd2acd4974d h1:YbuF5+kdiC516xIP60RvlHeFbY9sRDR73QsAGHpkeVw= -google.golang.org/genproto v0.0.0-20220712132514-bdd2acd4974d/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c h1:QgY/XxIAIeccR+Ca/rDdKubLIU9rcJ3xfy1DC/Wd2Oo= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -809,9 +845,10 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -825,13 +862,14 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= @@ -847,7 +885,6 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/avalanchego/ids/aliases.go b/avalanchego/ids/aliases.go index 483fb4eb..f0f10139 100644 --- a/avalanchego/ids/aliases.go +++ b/avalanchego/ids/aliases.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids @@ -48,8 +48,8 @@ func (a *aliaser) Lookup(alias string) (ID, error) { a.lock.RLock() defer a.lock.RUnlock() - if ID, ok := a.dealias[alias]; ok { - return ID, nil + if id, ok := a.dealias[alias]; ok { + return id, nil } return ID{}, fmt.Errorf("there is no ID with alias %s", alias) } diff --git a/avalanchego/ids/aliases_test.go b/avalanchego/ids/aliases_test.go index e2f09bda..624d40eb 100644 --- a/avalanchego/ids/aliases_test.go +++ b/avalanchego/ids/aliases_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids diff --git a/avalanchego/ids/bag.go b/avalanchego/ids/bag.go deleted file mode 100644 index 778d7e40..00000000 --- a/avalanchego/ids/bag.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package ids - -import ( - "fmt" - "strings" -) - -const ( - minBagSize = 16 -) - -// Bag is a multiset of IDs. -// -// A bag has the ability to split and filter on its bits for ease of use for -// binary voting. -type Bag struct { - counts map[ID]int - size int - - mode ID - modeFreq int - - threshold int - metThreshold Set -} - -func (b *Bag) init() { - if b.counts == nil { - b.counts = make(map[ID]int, minBagSize) - } -} - -// SetThreshold sets the number of times an ID must be added to be contained in -// the threshold set. -func (b *Bag) SetThreshold(threshold int) { - if b.threshold == threshold { - return - } - - b.threshold = threshold - b.metThreshold.Clear() - for vote, count := range b.counts { - if count >= threshold { - b.metThreshold.Add(vote) - } - } -} - -// Add increases the number of times each id has been seen by one. -func (b *Bag) Add(ids ...ID) { - for _, id := range ids { - b.AddCount(id, 1) - } -} - -// AddCount increases the number of times the id has been seen by count. -// -// count must be >= 0 -func (b *Bag) AddCount(id ID, count int) { - if count <= 0 { - return - } - - b.init() - - totalCount := b.counts[id] + count - b.counts[id] = totalCount - b.size += count - - if totalCount > b.modeFreq { - b.mode = id - b.modeFreq = totalCount - } - if totalCount >= b.threshold { - b.metThreshold.Add(id) - } -} - -// Count returns the number of times the id has been added. -func (b *Bag) Count(id ID) int { - return b.counts[id] -} - -// Len returns the number of times an id has been added. -func (b *Bag) Len() int { return b.size } - -// List returns a list of all ids that have been added. -func (b *Bag) List() []ID { - idList := make([]ID, len(b.counts)) - i := 0 - for id := range b.counts { - idList[i] = id - i++ - } - return idList -} - -// Equals returns true if the bags contain the same elements -func (b *Bag) Equals(oIDs Bag) bool { - if b.Len() != oIDs.Len() { - return false - } - for key, value := range b.counts { - if value != oIDs.counts[key] { - return false - } - } - return true -} - -// Mode returns the id that has been seen the most and the number of times it -// has been seen. Ties are broken by the first id to be seen the reported number -// of times. -func (b *Bag) Mode() (ID, int) { return b.mode, b.modeFreq } - -// Threshold returns the ids that have been seen at least threshold times. -func (b *Bag) Threshold() Set { return b.metThreshold } - -// Filter returns the bag of ids with the same counts as this bag, except all -// the ids in the returned bag must have the same bits in the range [start, end) -// as id. -func (b *Bag) Filter(start, end int, id ID) Bag { - newBag := Bag{} - for vote, count := range b.counts { - if EqualSubset(start, end, id, vote) { - newBag.AddCount(vote, count) - } - } - return newBag -} - -// Split returns the bags of ids with the same counts a this bag, except all ids -// in the 0th index have a 0 at bit [index], and all ids in the 1st index have a -// 1 at bit [index]. -func (b *Bag) Split(index uint) [2]Bag { - splitVotes := [2]Bag{} - for vote, count := range b.counts { - bit := vote.Bit(index) - splitVotes[bit].AddCount(vote, count) - } - return splitVotes -} - -func (b *Bag) PrefixedString(prefix string) string { - sb := strings.Builder{} - - sb.WriteString(fmt.Sprintf("Bag: (Size = %d)", b.Len())) - for id, count := range b.counts { - sb.WriteString(fmt.Sprintf("\n%s ID[%s]: Count = %d", prefix, id, count)) - } - - return sb.String() -} - -func (b *Bag) String() string { return b.PrefixedString("") } diff --git a/avalanchego/ids/bag_test.go b/avalanchego/ids/bag_test.go deleted file mode 100644 index f99a1abc..00000000 --- a/avalanchego/ids/bag_test.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package ids - -import ( - "testing" -) - -func TestBagAdd(t *testing.T) { - id0 := Empty - id1 := ID{1} - - bag := Bag{} - - if count := bag.Count(id0); count != 0 { - t.Fatalf("Bag.Count returned %d expected %d", count, 0) - } else if count := bag.Count(id1); count != 0 { - t.Fatalf("Bag.Count returned %d expected %d", count, 0) - } else if size := bag.Len(); size != 0 { - t.Fatalf("Bag.Len returned %d elements expected %d", count, 0) - } else if list := bag.List(); len(list) != 0 { - t.Fatalf("Bag.List returned %v expected %v", list, nil) - } else if mode, freq := bag.Mode(); mode != Empty { - t.Fatalf("Bag.Mode[0] returned %s expected %s", mode, ID{}) - } else if freq != 0 { - t.Fatalf("Bag.Mode[1] returned %d expected %d", freq, 0) - } else if threshold := bag.Threshold(); threshold.Len() != 0 { - t.Fatalf("Bag.Threshold returned %s expected %s", threshold, Set{}) - } - - bag.Add(id0) - - if count := bag.Count(id0); count != 1 { - t.Fatalf("Bag.Count returned %d expected %d", count, 1) - } else if count := bag.Count(id1); count != 0 { - t.Fatalf("Bag.Count returned %d expected %d", count, 0) - } else if size := bag.Len(); size != 1 { - t.Fatalf("Bag.Len returned %d expected %d", count, 1) - } else if list := bag.List(); len(list) != 1 { - t.Fatalf("Bag.List returned %d expected %d", len(list), 1) - } else if mode, freq := bag.Mode(); mode != id0 { - t.Fatalf("Bag.Mode[0] returned %s expected %s", mode, id0) - } else if freq != 1 { - t.Fatalf("Bag.Mode[1] returned %d expected %d", freq, 1) - } else if threshold := bag.Threshold(); threshold.Len() != 1 { - t.Fatalf("Bag.Threshold returned %d expected %d", len(threshold), 1) - } - - bag.Add(id0) - - if count := bag.Count(id0); count != 2 { - t.Fatalf("Bag.Count returned %d expected %d", count, 2) - } else if count := bag.Count(id1); count != 0 { - t.Fatalf("Bag.Count returned %d expected %d", count, 0) - } else if size := bag.Len(); size != 2 { - t.Fatalf("Bag.Len returned %d expected %d", count, 2) - } else if list := bag.List(); len(list) != 1 { - t.Fatalf("Bag.List returned %d expected %d", len(list), 1) - } else if mode, freq := bag.Mode(); mode != id0 { - t.Fatalf("Bag.Mode[0] returned %s expected %s", mode, id0) - } else if freq != 2 { - t.Fatalf("Bag.Mode[1] returned %d expected %d", freq, 2) - } else if threshold := bag.Threshold(); threshold.Len() != 1 { - t.Fatalf("Bag.Threshold returned %d expected %d", len(threshold), 1) - } - - bag.AddCount(id1, 3) - - if count := bag.Count(id0); count != 2 { - t.Fatalf("Bag.Count returned %d expected %d", count, 2) - } else if count := bag.Count(id1); count != 3 { - t.Fatalf("Bag.Count returned %d expected %d", count, 3) - } else if size := bag.Len(); size != 5 { - t.Fatalf("Bag.Len returned %d expected %d", count, 5) - } else if list := bag.List(); len(list) != 2 { - t.Fatalf("Bag.List returned %d expected %d", len(list), 2) - } else if mode, freq := bag.Mode(); mode != id1 { - t.Fatalf("Bag.Mode[0] returned %s expected %s", mode, id1) - } else if freq != 3 { - t.Fatalf("Bag.Mode[1] returned %d expected %d", freq, 3) - } else if threshold := bag.Threshold(); threshold.Len() != 2 { - t.Fatalf("Bag.Threshold returned %d expected %d", len(threshold), 2) - } -} - -func TestBagSetThreshold(t *testing.T) { - id0 := Empty - id1 := ID{1} - - bag := Bag{} - - bag.AddCount(id0, 2) - bag.AddCount(id1, 3) - - bag.SetThreshold(0) - - if count := bag.Count(id0); count != 2 { - t.Fatalf("Bag.Count returned %d expected %d", count, 2) - } else if count := bag.Count(id1); count != 3 { - t.Fatalf("Bag.Count returned %d expected %d", count, 3) - } else if size := bag.Len(); size != 5 { - t.Fatalf("Bag.Len returned %d expected %d", count, 5) - } else if list := bag.List(); len(list) != 2 { - t.Fatalf("Bag.List returned %d expected %d", len(list), 2) - } else if mode, freq := bag.Mode(); mode != id1 { - t.Fatalf("Bag.Mode[0] returned %s expected %s", mode, id1) - } else if freq != 3 { - t.Fatalf("Bag.Mode[1] returned %d expected %d", freq, 3) - } else if threshold := bag.Threshold(); threshold.Len() != 2 { - t.Fatalf("Bag.Threshold returned %d expected %d", len(threshold), 2) - } - - bag.SetThreshold(3) - - if count := bag.Count(id0); count != 2 { - t.Fatalf("Bag.Count returned %d expected %d", count, 2) - } else if count := bag.Count(id1); count != 3 { - t.Fatalf("Bag.Count returned %d expected %d", count, 3) - } else if size := bag.Len(); size != 5 { - t.Fatalf("Bag.Len returned %d expected %d", count, 5) - } else if list := bag.List(); len(list) != 2 { - t.Fatalf("Bag.List returned %d expected %d", len(list), 2) - } else if mode, freq := bag.Mode(); mode != id1 { - t.Fatalf("Bag.Mode[0] returned %s expected %s", mode, id1) - } else if freq != 3 { - t.Fatalf("Bag.Mode[1] returned %d expected %d", freq, 3) - } else if threshold := bag.Threshold(); threshold.Len() != 1 { - t.Fatalf("Bag.Threshold returned %d expected %d", len(threshold), 1) - } else if !threshold.Contains(id1) { - t.Fatalf("Bag.Threshold doesn't contain %s", id1) - } -} - -func TestBagFilter(t *testing.T) { - id0 := Empty - id1 := ID{1} - id2 := ID{2} - - bag := Bag{} - - bag.AddCount(id0, 1) - bag.AddCount(id1, 3) - bag.AddCount(id2, 5) - - even := bag.Filter(0, 1, id0) - - if count := even.Count(id0); count != 1 { - t.Fatalf("Bag.Count returned %d expected %d", count, 1) - } else if count := even.Count(id1); count != 0 { - t.Fatalf("Bag.Count returned %d expected %d", count, 0) - } else if count := even.Count(id2); count != 5 { - t.Fatalf("Bag.Count returned %d expected %d", count, 5) - } -} - -func TestBagSplit(t *testing.T) { - id0 := Empty - id1 := ID{1} - id2 := ID{2} - - bag := Bag{} - - bag.AddCount(id0, 1) - bag.AddCount(id1, 3) - bag.AddCount(id2, 5) - - bags := bag.Split(0) - - evens := bags[0] - odds := bags[1] - - if count := evens.Count(id0); count != 1 { - t.Fatalf("Bag.Count returned %d expected %d", count, 1) - } else if count := evens.Count(id1); count != 0 { - t.Fatalf("Bag.Count returned %d expected %d", count, 0) - } else if count := evens.Count(id2); count != 5 { - t.Fatalf("Bag.Count returned %d expected %d", count, 5) - } else if count := odds.Count(id0); count != 0 { - t.Fatalf("Bag.Count returned %d expected %d", count, 0) - } else if count := odds.Count(id1); count != 3 { - t.Fatalf("Bag.Count returned %d expected %d", count, 3) - } else if count := odds.Count(id2); count != 0 { - t.Fatalf("Bag.Count returned %d expected %d", count, 0) - } -} - -func TestBagString(t *testing.T) { - id0 := Empty - - bag := Bag{} - - bag.AddCount(id0, 1337) - - expected := "Bag: (Size = 1337)\n" + - " ID[11111111111111111111111111111111LpoYY]: Count = 1337" - - if bagString := bag.String(); bagString != expected { - t.Fatalf("Bag.String:\nReturned:\n%s\nExpected:\n%s", bagString, expected) - } -} diff --git a/avalanchego/ids/bit_set.go b/avalanchego/ids/bit_set.go deleted file mode 100644 index ee13432c..00000000 --- a/avalanchego/ids/bit_set.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package ids - -import ( - "fmt" - "math/bits" -) - -// BitSet64 is a set that can contain uints in the range [0, 64). All functions -// are O(1). The zero value is the empty set. -type BitSet64 uint64 - -// Add [i] to the set of ints -func (bs *BitSet64) Add(i uint) { *bs |= 1 << i } - -// Union adds all the elements in [s] to this set -func (bs *BitSet64) Union(s BitSet64) { *bs |= s } - -// Intersection takes the intersection of [s] with this set -func (bs *BitSet64) Intersection(s BitSet64) { *bs &= s } - -// Difference removes all the elements in [s] from this set -func (bs *BitSet64) Difference(s BitSet64) { *bs &^= s } - -// Remove [i] from the set of ints -func (bs *BitSet64) Remove(i uint) { *bs &^= 1 << i } - -// Clear removes all elements from this set -func (bs *BitSet64) Clear() { *bs = 0 } - -// Contains returns true if [i] was previously added to this set -func (bs BitSet64) Contains(i uint) bool { return bs&(1<= 0 -func (b *NodeIDBag) AddCount(id NodeID, count int) { - if count <= 0 { - return - } - - b.init() - - totalCount := b.counts[id] + count - b.counts[id] = totalCount - b.size += count -} - -// Count returns the number of times the id has been added. -func (b *NodeIDBag) Count(id NodeID) int { - b.init() - return b.counts[id] -} - -// Remove sets the count of the provided ID to zero. -func (b *NodeIDBag) Remove(id NodeID) { - b.init() - count := b.counts[id] - delete(b.counts, id) - b.size -= count -} - -// Len returns the number of times an id has been added. -func (b *NodeIDBag) Len() int { return b.size } - -// List returns a list of all IDs that have been added, -// without duplicates. -// e.g. a bag with {ID1, ID1, ID2} returns ids.ShortID[]{ID1, ID2} -func (b *NodeIDBag) List() []NodeID { - idList := make([]NodeID, len(b.counts)) - i := 0 - for id := range b.counts { - idList[i] = id - i++ - } - return idList -} - -// Equals returns true if the bags contain the same elements -func (b *NodeIDBag) Equals(oIDs NodeIDBag) bool { - if b.Len() != oIDs.Len() { - return false - } - for key, value := range b.counts { - if value != oIDs.counts[key] { - return false - } - } - return true -} - -func (b *NodeIDBag) PrefixedString(prefix string) string { - sb := strings.Builder{} - - sb.WriteString(fmt.Sprintf("Bag: (Size = %d)", b.Len())) - for id, count := range b.counts { - sb.WriteString(fmt.Sprintf("\n%s ID[%s]: Count = %d", prefix, id, count)) - } - - return sb.String() -} - -func (b *NodeIDBag) String() string { return b.PrefixedString("") } diff --git a/avalanchego/ids/node_id_set.go b/avalanchego/ids/node_id_set.go deleted file mode 100644 index bf909735..00000000 --- a/avalanchego/ids/node_id_set.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package ids - -import "strings" - -// NodeIDSet is a set of NodeIDs -type NodeIDSet map[NodeID]struct{} - -// Return a new NodeIDSet with initial capacity [size]. -// More or less than [size] elements can be added to this set. -// Using NewNodeIDSet() rather than ids.NodeIDSet{} is just an optimization that can -// be used if you know how many elements will be put in this set. -func NewNodeIDSet(size int) NodeIDSet { - if size < 0 { - return NodeIDSet{} - } - return make(map[NodeID]struct{}, size) -} - -func (ids *NodeIDSet) init(size int) { - if *ids == nil { - if minShortSetSize > size { - size = minShortSetSize - } - *ids = make(map[NodeID]struct{}, size) - } -} - -// Add all the ids to this set, if the id is already in the set, nothing happens -func (ids *NodeIDSet) Add(idList ...NodeID) { - ids.init(2 * len(idList)) - for _, id := range idList { - (*ids)[id] = struct{}{} - } -} - -// Union adds all the ids from the provided set to this set. -func (ids *NodeIDSet) Union(idSet NodeIDSet) { - ids.init(2 * idSet.Len()) - for id := range idSet { - (*ids)[id] = struct{}{} - } -} - -// Difference removes all the ids from the provided set to this set. -func (ids *NodeIDSet) Difference(idSet NodeIDSet) { - for id := range idSet { - delete(*ids, id) - } -} - -// Contains returns true if the set contains this id, false otherwise -func (ids *NodeIDSet) Contains(id NodeID) bool { - ids.init(1) - _, contains := (*ids)[id] - return contains -} - -// Len returns the number of ids in this set -func (ids NodeIDSet) Len() int { return len(ids) } - -// Remove all the id from this set, if the id isn't in the set, nothing happens -func (ids *NodeIDSet) Remove(idList ...NodeID) { - ids.init(1) - for _, id := range idList { - delete(*ids, id) - } -} - -// Clear empties this set -func (ids *NodeIDSet) Clear() { *ids = nil } - -// CappedList returns a list of length at most [size]. -// Size should be >= 0. If size < 0, returns nil. -func (ids NodeIDSet) CappedList(size int) []NodeID { - if size < 0 { - return nil - } - if l := ids.Len(); l < size { - size = l - } - i := 0 - idList := make([]NodeID, size) - for id := range ids { - if i >= size { - break - } - idList[i] = id - i++ - } - return idList -} - -// List converts this set into a list -func (ids NodeIDSet) List() []NodeID { - idList := make([]NodeID, len(ids)) - i := 0 - for id := range ids { - idList[i] = id - i++ - } - return idList -} - -// SortedList returns this set as a sorted list -func (ids NodeIDSet) SortedList() []NodeID { - lst := ids.List() - SortNodeIDs(lst) - return lst -} - -// Equals returns true if the sets contain the same elements -func (ids NodeIDSet) Equals(oIDs NodeIDSet) bool { - if ids.Len() != oIDs.Len() { - return false - } - for key := range oIDs { - if _, contains := ids[key]; !contains { - return false - } - } - return true -} - -// String returns the string representation of a set -func (ids NodeIDSet) String() string { - sb := strings.Builder{} - sb.WriteString("{") - first := true - for id := range ids { - if !first { - sb.WriteString(", ") - } - first = false - sb.WriteString(id.String()) - } - sb.WriteString("}") - return sb.String() -} - -// Returns an element. If the set is empty, returns false -func (ids *NodeIDSet) Peek() (NodeID, bool) { - for id := range *ids { - return id, true - } - return NodeID{}, false -} - -// Removes and returns an element. If the set is empty, does nothing and returns -// false -func (ids *NodeIDSet) Pop() (NodeID, bool) { - for id := range *ids { - delete(*ids, id) - return id, true - } - return NodeID{}, false -} diff --git a/avalanchego/ids/node_id_test.go b/avalanchego/ids/node_id_test.go index aea225ba..52c90c8e 100644 --- a/avalanchego/ids/node_id_test.go +++ b/avalanchego/ids/node_id_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids @@ -6,8 +6,9 @@ package ids import ( "bytes" "encoding/json" - "reflect" "testing" + + "github.com/stretchr/testify/require" ) func TestNodeIDEquality(t *testing.T) { @@ -163,23 +164,6 @@ func TestNodeIDString(t *testing.T) { } } -func TestSortNodeIDs(t *testing.T) { - ids := []NodeID{ - {'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}, - {'W', 'a', 'l', 'l', 'e', ' ', 'l', 'a', 'b', 's'}, - {'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}, - } - SortNodeIDs(ids) - expected := []NodeID{ - {'W', 'a', 'l', 'l', 'e', ' ', 'l', 'a', 'b', 's'}, - {'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}, - {'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}, - } - if !reflect.DeepEqual(ids, expected) { - t.Fatal("[]NodeID was not sorted lexographically") - } -} - func TestNodeIDMapMarshalling(t *testing.T) { originalMap := map[NodeID]int{ {'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}: 1, @@ -205,3 +189,27 @@ func TestNodeIDMapMarshalling(t *testing.T) { } } } + +func TestNodeIDLess(t *testing.T) { + require := require.New(t) + + id1 := NodeID{} + id2 := NodeID{} + require.False(id1.Less(id2)) + require.False(id2.Less(id1)) + + id1 = NodeID{1} + id2 = NodeID{} + require.False(id1.Less(id2)) + require.True(id2.Less(id1)) + + id1 = NodeID{1} + id2 = NodeID{1} + require.False(id1.Less(id2)) + require.False(id2.Less(id1)) + + id1 = NodeID{1} + id2 = NodeID{1, 2} + require.True(id1.Less(id2)) + require.False(id2.Less(id1)) +} diff --git a/avalanchego/ids/request_id.go b/avalanchego/ids/request_id.go new file mode 100644 index 00000000..779f819d --- /dev/null +++ b/avalanchego/ids/request_id.go @@ -0,0 +1,18 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ids + +// RequestID is a unique identifier for an in-flight request pending a response. +type RequestID struct { + // The node this request came from + NodeID NodeID + // The chain this request came from + SourceChainID ID + // The chain the expected response should come from + DestinationChainID ID + // The unique identifier for this request + RequestID uint32 + // The message opcode + Op byte +} diff --git a/avalanchego/ids/set.go b/avalanchego/ids/set.go deleted file mode 100644 index 9f9a1d8d..00000000 --- a/avalanchego/ids/set.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package ids - -import ( - "encoding/json" - "strings" -) - -const ( - // The minimum capacity of a set - minSetSize = 16 - - // If a set has more than this many keys, it will be cleared by setting the map to nil - // rather than iteratively deleting - clearSizeThreshold = 512 -) - -// Set is a set of IDs -type Set map[ID]struct{} - -// Return a new set with initial capacity [size]. -// More or less than [size] elements can be added to this set. -// Using NewSet() rather than ids.Set{} is just an optimization that can -// be used if you know how many elements will be put in this set. -func NewSet(size int) Set { - if size < 0 { - return Set{} - } - return make(map[ID]struct{}, size) -} - -func (ids *Set) init(size int) { - if *ids == nil { - if minSetSize > size { - size = minSetSize - } - *ids = make(map[ID]struct{}, size) - } -} - -// Add all the ids to this set, if the id is already in the set, nothing happens -func (ids *Set) Add(idList ...ID) { - ids.init(2 * len(idList)) - for _, id := range idList { - (*ids)[id] = struct{}{} - } -} - -// Union adds all the ids from the provided set to this set. -func (ids *Set) Union(set Set) { - ids.init(2 * set.Len()) - for id := range set { - (*ids)[id] = struct{}{} - } -} - -// Difference removes all the ids from the provided set to this set. -func (ids *Set) Difference(set Set) { - for id := range set { - delete(*ids, id) - } -} - -// Contains returns true if the set contains this id, false otherwise -func (ids *Set) Contains(id ID) bool { - _, contains := (*ids)[id] - return contains -} - -// Overlaps returns true if the intersection of the set is non-empty -func (ids *Set) Overlaps(big Set) bool { - small := *ids - if small.Len() > big.Len() { - small, big = big, small - } - - for id := range small { - if _, ok := big[id]; ok { - return true - } - } - return false -} - -// Len returns the number of ids in this set -func (ids Set) Len() int { return len(ids) } - -// Remove all the id from this set, if the id isn't in the set, nothing happens -func (ids *Set) Remove(idList ...ID) { - for _, id := range idList { - delete(*ids, id) - } -} - -// Clear empties this set -func (ids *Set) Clear() { - if len(*ids) > clearSizeThreshold { - *ids = nil - return - } - for key := range *ids { - delete(*ids, key) - } -} - -// List converts this set into a list -func (ids Set) List() []ID { - idList := make([]ID, ids.Len()) - i := 0 - for id := range ids { - idList[i] = id - i++ - } - return idList -} - -// SortedList returns this set as a sorted list -func (ids Set) SortedList() []ID { - lst := ids.List() - SortIDs(lst) - return lst -} - -// CappedList returns a list of length at most [size]. -// Size should be >= 0. If size < 0, returns nil. -func (ids Set) CappedList(size int) []ID { - if size < 0 { - return nil - } - if l := ids.Len(); l < size { - size = l - } - i := 0 - idList := make([]ID, size) - for id := range ids { - if i >= size { - break - } - idList[i] = id - i++ - } - return idList -} - -// Equals returns true if the sets contain the same elements -func (ids Set) Equals(oIDs Set) bool { - if ids.Len() != oIDs.Len() { - return false - } - for key := range oIDs { - if _, contains := ids[key]; !contains { - return false - } - } - return true -} - -// String returns the string representation of a set -func (ids Set) String() string { - sb := strings.Builder{} - sb.WriteString("{") - first := true - for id := range ids { - if !first { - sb.WriteString(", ") - } - first = false - sb.WriteString(id.String()) - } - sb.WriteString("}") - return sb.String() -} - -// Removes and returns an element. If the set is empty, does nothing and returns -// false. -func (ids *Set) Pop() (ID, bool) { - for id := range *ids { - delete(*ids, id) - return id, true - } - return ID{}, false -} - -func (ids *Set) MarshalJSON() ([]byte, error) { - idsList := ids.List() - SortIDs(idsList) - return json.Marshal(idsList) -} diff --git a/avalanchego/ids/set_benchmark_test.go b/avalanchego/ids/set_benchmark_test.go deleted file mode 100644 index b1db94e0..00000000 --- a/avalanchego/ids/set_benchmark_test.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package ids - -import ( - "crypto/rand" - "strconv" - "testing" -) - -func BenchmarkSetListSmall(b *testing.B) { - smallLen := 5 - set := Set{} - for i := 0; i < smallLen; i++ { - var id ID - if _, err := rand.Read(id[:]); err != nil { - b.Fatal(err) - } - set.Add(id) - } - b.ResetTimer() - for n := 0; n < b.N; n++ { - set.List() - } -} - -func BenchmarkSetListMedium(b *testing.B) { - mediumLen := 25 - set := Set{} - for i := 0; i < mediumLen; i++ { - var id ID - if _, err := rand.Read(id[:]); err != nil { - b.Fatal(err) - } - set.Add(id) - } - b.ResetTimer() - - for n := 0; n < b.N; n++ { - set.List() - } -} - -func BenchmarkSetListLarge(b *testing.B) { - largeLen := 100000 - set := Set{} - for i := 0; i < largeLen; i++ { - var id ID - if _, err := rand.Read(id[:]); err != nil { - b.Fatal(err) - } - set.Add(id) - } - b.ResetTimer() - for n := 0; n < b.N; n++ { - set.List() - } -} - -func BenchmarkSetClear(b *testing.B) { - for _, numElts := range []int{10, 25, 50, 100, 250, 500, 1000} { - b.Run(strconv.Itoa(numElts), func(b *testing.B) { - set := NewSet(numElts) - for n := 0; n < b.N; n++ { - set.Add(make([]ID, numElts)...) - set.Clear() - } - }) - } -} diff --git a/avalanchego/ids/set_test.go b/avalanchego/ids/set_test.go deleted file mode 100644 index 22bf26ec..00000000 --- a/avalanchego/ids/set_test.go +++ /dev/null @@ -1,217 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package ids - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestSet(t *testing.T) { - id1 := ID{1} - - ids := Set{} - - ids.Add(id1) - if !ids.Contains(id1) { - t.Fatalf("Initial value not set correctly") - } - - ids.Remove(id1) - if ids.Contains(id1) { - t.Fatalf("Value not removed correctly") - } - - ids.Add(id1) - if !ids.Contains(id1) { - t.Fatalf("Initial value not set correctly") - } else if ids.Len() != 1 { - t.Fatalf("Bad set size") - } else if list := ids.List(); len(list) != 1 { - t.Fatalf("Bad list size") - } else if list[0] != id1 { - t.Fatalf("Set value not correct") - } - - ids.Clear() - if ids.Contains(id1) { - t.Fatalf("Value not removed correctly") - } - - ids.Add(id1) - - ids2 := Set{} - - if ids.Overlaps(ids2) { - t.Fatalf("Empty set shouldn't overlap") - } - - ids2.Union(ids) - if !ids2.Contains(id1) { - t.Fatalf("Value not union added correctly") - } - - if !ids.Overlaps(ids2) { - t.Fatalf("Sets overlap") - } - - ids2.Difference(ids) - if ids2.Contains(id1) { - t.Fatalf("Value not difference removed correctly") - } - - if ids.Overlaps(ids2) { - t.Fatalf("Sets don't overlap") - } -} - -func TestSetCappedList(t *testing.T) { - set := Set{} - - id := Empty - - if list := set.CappedList(0); len(list) != 0 { - t.Fatalf("List should have been empty but was %v", list) - } - - set.Add(id) - - if list := set.CappedList(0); len(list) != 0 { - t.Fatalf("List should have been empty but was %v", list) - } else if list := set.CappedList(1); len(list) != 1 { - t.Fatalf("List should have had length %d but had %d", 1, len(list)) - } else if returnedID := list[0]; id != returnedID { - t.Fatalf("List should have been %s but was %s", id, returnedID) - } else if list := set.CappedList(2); len(list) != 1 { - t.Fatalf("List should have had length %d but had %d", 1, len(list)) - } else if returnedID := list[0]; id != returnedID { - t.Fatalf("List should have been %s but was %s", id, returnedID) - } - - id2 := ID{1} - set.Add(id2) - - if list := set.CappedList(0); len(list) != 0 { - t.Fatalf("List should have been empty but was %v", list) - } else if list := set.CappedList(1); len(list) != 1 { - t.Fatalf("List should have had length %d but had %d", 1, len(list)) - } else if returnedID := list[0]; id != returnedID && id2 != returnedID { - t.Fatalf("List should have been %s but was %s", id, returnedID) - } else if list := set.CappedList(2); len(list) != 2 { - t.Fatalf("List should have had length %d but had %d", 2, len(list)) - } else if list := set.CappedList(3); len(list) != 2 { - t.Fatalf("List should have had length %d but had %d", 2, len(list)) - } else if returnedID := list[0]; id != returnedID && id2 != returnedID { - t.Fatalf("list contains unexpected element %s", returnedID) - } else if returnedID := list[1]; id != returnedID && id2 != returnedID { - t.Fatalf("list contains unexpected element %s", returnedID) - } -} - -// Test that Clear() works with both the iterative and set-to-nil path -func TestSetClearLarge(t *testing.T) { - // Using iterative clear path - set := Set{} - for i := 0; i < clearSizeThreshold; i++ { - set.Add(GenerateTestID()) - } - set.Clear() - if set.Len() != 0 { - t.Fatal("length should be 0") - } - set.Add(GenerateTestID()) - if set.Len() != 1 { - t.Fatal("length should be 1") - } - - // Using bulk (set map to nil) path - set = Set{} - for i := 0; i < clearSizeThreshold+1; i++ { - set.Add(GenerateTestID()) - } - set.Clear() - if set.Len() != 0 { - t.Fatal("length should be 0") - } - set.Add(GenerateTestID()) - if set.Len() != 1 { - t.Fatal("length should be 1") - } -} - -func TestSetPop(t *testing.T) { - var s Set - _, ok := s.Pop() - require.False(t, ok) - - s = make(Set) - _, ok = s.Pop() - require.False(t, ok) - - id1, id2 := GenerateTestID(), GenerateTestID() - s.Add(id1, id2) - - got, ok := s.Pop() - require.True(t, ok) - require.True(t, got == id1 || got == id2) - require.EqualValues(t, 1, s.Len()) - - got, ok = s.Pop() - require.True(t, ok) - require.True(t, got == id1 || got == id2) - require.EqualValues(t, 0, s.Len()) - - _, ok = s.Pop() - require.False(t, ok) -} - -func TestSetMarshalJSON(t *testing.T) { - require := require.New(t) - set := Set{} - { - asJSON, err := set.MarshalJSON() - require.NoError(err) - require.Equal("[]", string(asJSON)) - } - id1, id2 := GenerateTestID(), GenerateTestID() - set.Add(id1) - { - asJSON, err := set.MarshalJSON() - require.NoError(err) - require.Equal(fmt.Sprintf("[\"%s\"]", id1), string(asJSON)) - } - set.Add(id2) - { - asJSON, err := set.MarshalJSON() - require.NoError(err) - require.Equal(fmt.Sprintf("[\"%s\",\"%s\"]", id1, id2), string(asJSON)) - } -} - -func TestSortedList(t *testing.T) { - require := require.New(t) - - set := Set{} - require.Len(set.SortedList(), 0) - - set.Add(ID{0}) - sorted := set.SortedList() - require.Len(sorted, 1) - require.Equal(ID{0}, sorted[0]) - - set.Add(ID{1}) - sorted = set.SortedList() - require.Len(sorted, 2) - require.Equal(ID{0}, sorted[0]) - require.Equal(ID{1}, sorted[1]) - - set.Add(ID{2}) - sorted = set.SortedList() - require.Len(sorted, 3) - require.Equal(ID{0}, sorted[0]) - require.Equal(ID{1}, sorted[1]) - require.Equal(ID{2}, sorted[2]) -} diff --git a/avalanchego/ids/short.go b/avalanchego/ids/short.go index 244523f1..1ae86348 100644 --- a/avalanchego/ids/short.go +++ b/avalanchego/ids/short.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids @@ -7,15 +7,19 @@ import ( "bytes" "encoding/hex" "fmt" - "sort" "strings" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/cb58" "github.com/ava-labs/avalanchego/utils/hashing" ) // ShortEmpty is a useful all zero value -var ShortEmpty = ShortID{} +var ( + ShortEmpty = ShortID{} + + _ utils.Sortable[ShortID] = ShortID{} +) // ShortID wraps a 20 byte hash as an identifier type ShortID [20]byte @@ -79,10 +83,14 @@ func (id *ShortID) UnmarshalText(text []byte) error { // Bytes returns the 20 byte hash as a slice. It is assumed this slice is not // modified. -func (id ShortID) Bytes() []byte { return id[:] } +func (id ShortID) Bytes() []byte { + return id[:] +} // Hex returns a hex encoded string of this id. -func (id ShortID) Hex() string { return hex.EncodeToString(id.Bytes()) } +func (id ShortID) Hex() string { + return hex.EncodeToString(id.Bytes()) +} func (id ShortID) String() string { // We assume that the maximum size of a byte slice that @@ -100,34 +108,8 @@ func (id ShortID) MarshalText() ([]byte, error) { return []byte(id.String()), nil } -type sortShortIDData []ShortID - -func (ids sortShortIDData) Less(i, j int) bool { - return bytes.Compare( - ids[i].Bytes(), - ids[j].Bytes()) == -1 -} -func (ids sortShortIDData) Len() int { return len(ids) } -func (ids sortShortIDData) Swap(i, j int) { ids[j], ids[i] = ids[i], ids[j] } - -// SortShortIDs sorts the ids lexicographically -func SortShortIDs(ids []ShortID) { sort.Sort(sortShortIDData(ids)) } - -// IsSortedAndUniqueShortIDs returns true if the ids are sorted and unique -func IsSortedAndUniqueShortIDs(ids []ShortID) bool { - for i := 0; i < len(ids)-1; i++ { - if bytes.Compare(ids[i].Bytes(), ids[i+1].Bytes()) != -1 { - return false - } - } - return true -} - -// IsUniqueShortIDs returns true iff [ids] are unique -func IsUniqueShortIDs(ids []ShortID) bool { - set := ShortSet{} - set.Add(ids...) - return set.Len() == len(ids) +func (id ShortID) Less(other ShortID) bool { + return bytes.Compare(id[:], other[:]) == -1 } // ShortIDsToStrings converts an array of shortIDs to an array of their string diff --git a/avalanchego/ids/short_set.go b/avalanchego/ids/short_set.go deleted file mode 100644 index 4386e719..00000000 --- a/avalanchego/ids/short_set.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package ids - -import "strings" - -const ( - minShortSetSize = 16 -) - -// ShortSet is a set of ShortIDs -type ShortSet map[ShortID]struct{} - -// Return a new ShortSet with initial capacity [size]. -// More or less than [size] elements can be added to this set. -// Using NewShortSet() rather than ids.ShortSet{} is just an optimization that can -// be used if you know how many elements will be put in this set. -func NewShortSet(size int) ShortSet { - if size < 0 { - return ShortSet{} - } - return make(map[ShortID]struct{}, size) -} - -func (ids *ShortSet) init(size int) { - if *ids == nil { - if minShortSetSize > size { - size = minShortSetSize - } - *ids = make(map[ShortID]struct{}, size) - } -} - -// Add all the ids to this set, if the id is already in the set, nothing happens -func (ids *ShortSet) Add(idList ...ShortID) { - ids.init(2 * len(idList)) - for _, id := range idList { - (*ids)[id] = struct{}{} - } -} - -// Union adds all the ids from the provided set to this set. -func (ids *ShortSet) Union(idSet ShortSet) { - ids.init(2 * idSet.Len()) - for id := range idSet { - (*ids)[id] = struct{}{} - } -} - -// Difference removes all the ids from the provided set to this set. -func (ids *ShortSet) Difference(idSet ShortSet) { - for id := range idSet { - delete(*ids, id) - } -} - -// Contains returns true if the set contains this id, false otherwise -func (ids *ShortSet) Contains(id ShortID) bool { - ids.init(1) - _, contains := (*ids)[id] - return contains -} - -// Len returns the number of ids in this set -func (ids ShortSet) Len() int { return len(ids) } - -// Remove all the id from this set, if the id isn't in the set, nothing happens -func (ids *ShortSet) Remove(idList ...ShortID) { - ids.init(1) - for _, id := range idList { - delete(*ids, id) - } -} - -// Clear empties this set -func (ids *ShortSet) Clear() { *ids = nil } - -// CappedList returns a list of length at most [size]. -// Size should be >= 0. If size < 0, returns nil. -func (ids ShortSet) CappedList(size int) []ShortID { - if size < 0 { - return nil - } - if l := ids.Len(); l < size { - size = l - } - i := 0 - idList := make([]ShortID, size) - for id := range ids { - if i >= size { - break - } - idList[i] = id - i++ - } - return idList -} - -// List converts this set into a list -func (ids ShortSet) List() []ShortID { - idList := make([]ShortID, len(ids)) - i := 0 - for id := range ids { - idList[i] = id - i++ - } - return idList -} - -// SortedList returns this set as a sorted list -func (ids ShortSet) SortedList() []ShortID { - lst := ids.List() - SortShortIDs(lst) - return lst -} - -// Equals returns true if the sets contain the same elements -func (ids ShortSet) Equals(oIDs ShortSet) bool { - if ids.Len() != oIDs.Len() { - return false - } - for key := range oIDs { - if _, contains := ids[key]; !contains { - return false - } - } - return true -} - -// String returns the string representation of a set -func (ids ShortSet) String() string { - sb := strings.Builder{} - sb.WriteString("{") - first := true - for id := range ids { - if !first { - sb.WriteString(", ") - } - first = false - sb.WriteString(id.String()) - } - sb.WriteString("}") - return sb.String() -} - -// Returns an element. If the set is empty, returns false -func (ids *ShortSet) Peek() (ShortID, bool) { - for id := range *ids { - return id, true - } - return ShortID{}, false -} - -// Removes and returns an element. If the set is empty, does nothing and returns -// false -func (ids *ShortSet) Pop() (ShortID, bool) { - for id := range *ids { - delete(*ids, id) - return id, true - } - return ShortID{}, false -} diff --git a/avalanchego/ids/short_set_test.go b/avalanchego/ids/short_set_test.go deleted file mode 100644 index beb4ee8e..00000000 --- a/avalanchego/ids/short_set_test.go +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package ids - -import ( - "strings" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestShortSetContains(t *testing.T) { - set := ShortSet{} - - id0 := ShortID{0} - id1 := ShortID{1} - - switch { - case set.Contains(id0): - t.Fatalf("Sets shouldn't contain %s", id0) - case set.Contains(id1): - t.Fatalf("Sets shouldn't contain %s", id1) - } - - set.Add(id0) - - switch { - case !set.Contains(id0): - t.Fatalf("Set should contain %s", id0) - case set.Contains(id1): - t.Fatalf("Set shouldn't contain %s", id1) - } - - set.Add(id1) - - switch { - case !set.Contains(id0): - t.Fatalf("Set should contain %s", id0) - case !set.Contains(id1): - t.Fatalf("Set should contain %s", id1) - } - - set.Remove(id0) - - switch { - case set.Contains(id0): - t.Fatalf("Sets shouldn't contain %s", id0) - case !set.Contains(id1): - t.Fatalf("Set should contain %s", id1) - } - - set.Add(id0) - - switch { - case !set.Contains(id0): - t.Fatalf("Set should contain %s", id0) - case !set.Contains(id1): - t.Fatalf("Set should contain %s", id1) - } -} - -func TestShortSetUnion(t *testing.T) { - set := ShortSet{} - unionSet := ShortSet{} - - id0 := ShortID{0} - id1 := ShortID{1} - - unionSet.Add(id0) - set.Union(unionSet) - - switch { - case !set.Contains(id0): - t.Fatalf("Set should contain %s", id0) - case set.Contains(id1): - t.Fatalf("Set shouldn't contain %s", id1) - } - - unionSet.Add(id1) - set.Union(unionSet) - - switch { - case !set.Contains(id0): - t.Fatalf("Set should contain %s", id0) - case !set.Contains(id1): - t.Fatalf("Set should contain %s", id1) - } - - set.Remove(id0) - - switch { - case set.Contains(id0): - t.Fatalf("Sets shouldn't contain %s", id0) - case !set.Contains(id1): - t.Fatalf("Set should contain %s", id1) - } - - set.Clear() - set.Union(unionSet) - - switch { - case !set.Contains(id0): - t.Fatalf("Set should contain %s", id0) - case !set.Contains(id1): - t.Fatalf("Set should contain %s", id1) - } -} - -func TestShortSetDifference(t *testing.T) { - set := ShortSet{} - diffSet := ShortSet{} - - id0 := ShortID{0} - id1 := ShortID{1} - - diffSet.Add(id0) - set.Add(id0, id1) - - switch { - case !set.Contains(id0): - t.Fatalf("Set should contain %s", id0) - case !set.Contains(id1): - t.Fatalf("Set should contain %s", id1) - } - - set.Difference(diffSet) - - switch { - case set.Contains(id0): - t.Fatalf("Set shouldn't contain %s", id0) - case !set.Contains(id1): - t.Fatalf("Set should contain %s", id1) - } -} - -func TestShortSetEquals(t *testing.T) { - set := ShortSet{} - otherSet := ShortSet{} - if !set.Equals(otherSet) { - t.Fatal("Empty sets should be equal") - } - if !otherSet.Equals(set) { - t.Fatal("Empty sets should be equal") - } - - set.Add(ShortID{1, 2, 3, 4, 5}) - if set.Equals(otherSet) { - t.Fatal("Sets should be unequal") - } - if otherSet.Equals(set) { - t.Fatal("Sets should be unequal") - } - - otherSet.Add(ShortID{1, 2, 3, 4, 5}) - if !set.Equals(otherSet) { - t.Fatal("sets should be equal") - } - if !otherSet.Equals(set) { - t.Fatal("sets should be equal") - } - - otherSet.Add(ShortID{6, 7, 8, 9, 10}) - if set.Equals(otherSet) { - t.Fatal("Sets should be unequal") - } - if otherSet.Equals(set) { - t.Fatal("Sets should be unequal") - } - - set.Add(ShortID{6, 7, 8, 9, 10}) - if !set.Equals(otherSet) { - t.Fatal("sets should be equal") - } - if !otherSet.Equals(set) { - t.Fatal("sets should be equal") - } - - otherSet.Add(ShortID{11, 12, 13, 14, 15}) - if set.Equals(otherSet) { - t.Fatal("Sets should be unequal") - } - if otherSet.Equals(set) { - t.Fatal("Sets should be unequal") - } - - set.Add(ShortID{11, 12, 13, 14, 16}) - if set.Equals(otherSet) { - t.Fatal("Sets should be unequal") - } - if otherSet.Equals(set) { - t.Fatal("Sets should be unequal") - } -} - -func TestShortSetList(t *testing.T) { - set := ShortSet{} - otherSet := ShortSet{} - - id0 := ShortID{0} - id1 := ShortID{1} - - set.Add(id0) - otherSet.Add(set.List()...) - - if !set.Equals(otherSet) { - t.Fatalf("Sets should be equal but are:\n%s\n%s", set, otherSet) - } - - set.Add(id1) - otherSet.Clear() - otherSet.Add(set.List()...) - - if !set.Equals(otherSet) { - t.Fatalf("Sets should be equal but are:\n%s\n%s", set, otherSet) - } -} - -func TestShortSetCappedList(t *testing.T) { - set := ShortSet{} - - id := ShortEmpty - - if list := set.CappedList(0); len(list) != 0 { - t.Fatalf("List should have been empty but was %v", list) - } - - set.Add(id) - - if list := set.CappedList(0); len(list) != 0 { - t.Fatalf("List should have been empty but was %v", list) - } else if list := set.CappedList(1); len(list) != 1 { - t.Fatalf("List should have had length %d but had %d", 1, len(list)) - } else if returnedID := list[0]; id != returnedID { - t.Fatalf("List should have been %s but was %s", id, returnedID) - } else if list := set.CappedList(2); len(list) != 1 { - t.Fatalf("List should have had length %d but had %d", 1, len(list)) - } else if returnedID := list[0]; id != returnedID { - t.Fatalf("List should have been %s but was %s", id, returnedID) - } -} - -func TestShortSetString(t *testing.T) { - set := ShortSet{} - - id0 := ShortID{0} - id1 := ShortID{1} - - if str := set.String(); str != "{}" { - t.Fatalf("Set should have been %s but was %s", "{}", str) - } - - set.Add(id0) - - if str := set.String(); str != "{111111111111111111116DBWJs}" { - t.Fatalf("Set should have been %s but was %s", "{111111111111111111116DBWJs}", str) - } - - set.Add(id1) - - if str := set.String(); !strings.Contains(str, "111111111111111111116DBWJs") { - t.Fatalf("Set should have contained %s", "111111111111111111116DBWJs") - } else if !strings.Contains(str, "6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt") { - t.Fatalf("Set should have contained %s", "6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt") - } else if count := strings.Count(str, ","); count != 1 { - t.Fatalf("Should only have one %s in %s", ",", str) - } -} - -func TestShortSetPop(t *testing.T) { - var s ShortSet - _, ok := s.Pop() - require.False(t, ok) - - s = make(ShortSet) - _, ok = s.Pop() - require.False(t, ok) - - id1, id2 := GenerateTestShortID(), GenerateTestShortID() - s.Add(id1, id2) - - got, ok := s.Pop() - require.True(t, ok) - require.True(t, got == id1 || got == id2) - require.EqualValues(t, 1, s.Len()) - - got, ok = s.Pop() - require.True(t, ok) - require.True(t, got == id1 || got == id2) - require.EqualValues(t, 0, s.Len()) - - _, ok = s.Pop() - require.False(t, ok) -} - -func TestShortSortedList(t *testing.T) { - require := require.New(t) - - set := ShortSet{} - require.Len(set.SortedList(), 0) - - set.Add(ShortID{0}) - sorted := set.SortedList() - require.Len(sorted, 1) - require.Equal(ShortID{0}, sorted[0]) - - set.Add(ShortID{1}) - sorted = set.SortedList() - require.Len(sorted, 2) - require.Equal(ShortID{0}, sorted[0]) - require.Equal(ShortID{1}, sorted[1]) - - set.Add(ShortID{2}) - sorted = set.SortedList() - require.Len(sorted, 3) - require.Equal(ShortID{0}, sorted[0]) - require.Equal(ShortID{1}, sorted[1]) - require.Equal(ShortID{2}, sorted[2]) -} diff --git a/avalanchego/ids/short_test.go b/avalanchego/ids/short_test.go deleted file mode 100644 index f0c90f56..00000000 --- a/avalanchego/ids/short_test.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package ids - -import ( - "encoding/json" - "fmt" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestShortString(t *testing.T) { - id := ShortID{1} - - xPrefixedID := id.PrefixedString("X-") - pPrefixedID := id.PrefixedString("P-") - - newID, err := ShortFromPrefixedString(xPrefixedID, "X-") - if err != nil { - t.Fatal(err) - } - if newID != id { - t.Fatalf("ShortFromPrefixedString did not produce the identical ID") - } - - _, err = ShortFromPrefixedString(pPrefixedID, "X-") - if err == nil { - t.Fatal("Using the incorrect prefix did not cause an error") - } - - tooLongPrefix := "hasnfaurnourieurn3eiur3nriu3nri34iurni34unr3iunrasfounaeouern3ur" - _, err = ShortFromPrefixedString(xPrefixedID, tooLongPrefix) - if err == nil { - t.Fatal("Using the incorrect prefix did not cause an error") - } -} - -func TestIsUniqueShortIDs(t *testing.T) { - ids := []ShortID{} - if IsUniqueShortIDs(ids) == false { - t.Fatal("should be unique") - } - id1 := GenerateTestShortID() - ids = append(ids, id1) - if IsUniqueShortIDs(ids) == false { - t.Fatal("should be unique") - } - ids = append(ids, GenerateTestShortID()) - if IsUniqueShortIDs(ids) == false { - t.Fatal("should be unique") - } - ids = append(ids, id1) - if IsUniqueShortIDs(ids) == true { - t.Fatal("should not be unique") - } -} - -func TestIsSortedAndUniqueShortIDs(t *testing.T) { - id0 := ShortID{0} - id1 := ShortID{1} - id2 := ShortID{2} - - tests := []struct { - arr []ShortID - isSorted bool - }{ - { - arr: nil, - isSorted: true, - }, - { - arr: []ShortID{}, - isSorted: true, - }, - { - arr: []ShortID{GenerateTestShortID()}, - isSorted: true, - }, - { - arr: []ShortID{id0, id0}, - isSorted: false, - }, - { - arr: []ShortID{id0, id1}, - isSorted: true, - }, - { - arr: []ShortID{id1, id0}, - isSorted: false, - }, - { - arr: []ShortID{id0, id1, id2}, - isSorted: true, - }, - { - arr: []ShortID{id0, id1, id2, id2}, - isSorted: false, - }, - { - arr: []ShortID{id0, id1, id1}, - isSorted: false, - }, - { - arr: []ShortID{id0, id0, id1}, - isSorted: false, - }, - { - arr: []ShortID{id2, id1, id0}, - isSorted: false, - }, - { - arr: []ShortID{id2, id1, id2}, - isSorted: false, - }, - } - for _, test := range tests { - t.Run(fmt.Sprintf("%v", test.arr), func(t *testing.T) { - if test.isSorted { - if !IsSortedAndUniqueShortIDs(test.arr) { - t.Fatal("should have been marked as sorted and unique") - } - } else if IsSortedAndUniqueShortIDs(test.arr) { - t.Fatal("shouldn't have been marked as sorted and unique") - } - }) - } -} - -func TestShortIDMapMarshalling(t *testing.T) { - originalMap := map[ShortID]int{ - {'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}: 1, - {'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}: 2, - } - mapJSON, err := json.Marshal(originalMap) - if err != nil { - t.Fatal(err) - } - - var unmarshalledMap map[ShortID]int - err = json.Unmarshal(mapJSON, &unmarshalledMap) - if err != nil { - t.Fatal(err) - } - - if len(originalMap) != len(unmarshalledMap) { - t.Fatalf("wrong map lengths") - } - for originalID, num := range originalMap { - if unmarshalledMap[originalID] != num { - t.Fatalf("map was incorrectly Unmarshalled") - } - } -} - -func TestShortIDsToStrings(t *testing.T) { - shortIDs := []ShortID{{1}, {2}, {2}} - expected := []string{"6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt", "BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp", "BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp"} - shortStrings := ShortIDsToStrings(shortIDs) - require.EqualValues(t, expected, shortStrings) -} diff --git a/avalanchego/ids/slice.go b/avalanchego/ids/slice.go deleted file mode 100644 index 4013f72e..00000000 --- a/avalanchego/ids/slice.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package ids - -// Equals returns true if the arrays are equal -func Equals(a, b []ID) bool { - if len(a) != len(b) { - return false - } - - for i, aID := range a { - if aID != b[i] { - return false - } - } - return true -} - -// UnsortedEquals returns true if the have the same number of each ID -func UnsortedEquals(a, b []ID) bool { - if len(a) != len(b) { - return false - } - - aBag := Bag{} - aBag.Add(a...) - - bBag := Bag{} - bBag.Add(b...) - - return aBag.Equals(bBag) -} diff --git a/avalanchego/ids/test_aliases.go b/avalanchego/ids/test_aliases.go index b9f8210e..06a7fe3a 100644 --- a/avalanchego/ids/test_aliases.go +++ b/avalanchego/ids/test_aliases.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids @@ -15,7 +15,7 @@ var AliasTests = []func(require *require.Assertions, r AliaserReader, w AliaserW AliaserRemoveAliasTest, } -func AliaserLookupErrorTest(require *require.Assertions, r AliaserReader, w AliaserWriter) { +func AliaserLookupErrorTest(require *require.Assertions, r AliaserReader, _ AliaserWriter) { _, err := r.Lookup("Batman") require.Error(err, "expected an error due to missing alias") } @@ -30,7 +30,7 @@ func AliaserLookupTest(require *require.Assertions, r AliaserReader, w AliaserWr require.Equal(id, res) } -func AliaserAliasesEmptyTest(require *require.Assertions, r AliaserReader, w AliaserWriter) { +func AliaserAliasesEmptyTest(require *require.Assertions, r AliaserReader, _ AliaserWriter) { id := ID{'J', 'a', 'm', 'e', 's', ' ', 'G', 'o', 'r', 'd', 'o', 'n'} aliases, err := r.Aliases(id) @@ -71,7 +71,7 @@ func AliaserPrimaryAliasTest(require *require.Assertions, r AliaserReader, w Ali require.Equal(expected, res) } -func AliaserAliasClashTest(require *require.Assertions, r AliaserReader, w AliaserWriter) { +func AliaserAliasClashTest(require *require.Assertions, _ AliaserReader, w AliaserWriter) { id1 := ID{'B', 'r', 'u', 'c', 'e', ' ', 'W', 'a', 'y', 'n', 'e'} id2 := ID{'D', 'i', 'c', 'k', ' ', 'G', 'r', 'a', 'y', 's', 'o', 'n'} err := w.Alias(id1, "Batman") diff --git a/avalanchego/ids/test_generator.go b/avalanchego/ids/test_generator.go index 06ae5d33..2c1344af 100644 --- a/avalanchego/ids/test_generator.go +++ b/avalanchego/ids/test_generator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ids diff --git a/avalanchego/ids/unique_bag.go b/avalanchego/ids/unique_bag.go deleted file mode 100644 index d0a95443..00000000 --- a/avalanchego/ids/unique_bag.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package ids - -import ( - "fmt" - "strings" -) - -const ( - minUniqueBagSize = 16 -) - -type UniqueBag map[ID]BitSet64 - -func (b *UniqueBag) init() { - if *b == nil { - *b = make(map[ID]BitSet64, minUniqueBagSize) - } -} - -func (b *UniqueBag) Add(setID uint, idSet ...ID) { - bs := BitSet64(0) - bs.Add(setID) - - for _, id := range idSet { - b.UnionSet(id, bs) - } -} - -func (b *UniqueBag) UnionSet(id ID, set BitSet64) { - b.init() - - previousSet := (*b)[id] - previousSet.Union(set) - (*b)[id] = previousSet -} - -func (b *UniqueBag) DifferenceSet(id ID, set BitSet64) { - b.init() - - previousSet := (*b)[id] - previousSet.Difference(set) - (*b)[id] = previousSet -} - -func (b *UniqueBag) Difference(diff *UniqueBag) { - b.init() - - for id, previousSet := range *b { - if previousSetDiff, exists := (*diff)[id]; exists { - previousSet.Difference(previousSetDiff) - } - (*b)[id] = previousSet - } -} - -func (b *UniqueBag) GetSet(id ID) BitSet64 { return (*b)[id] } - -func (b *UniqueBag) RemoveSet(id ID) { delete(*b, id) } - -func (b *UniqueBag) List() []ID { - idList := make([]ID, len(*b)) - i := 0 - for id := range *b { - idList[i] = id - i++ - } - return idList -} - -func (b *UniqueBag) Bag(alpha int) Bag { - bag := Bag{ - counts: make(map[ID]int, len(*b)), - } - bag.SetThreshold(alpha) - for id, bs := range *b { - bag.AddCount(id, bs.Len()) - } - return bag -} - -func (b *UniqueBag) PrefixedString(prefix string) string { - sb := strings.Builder{} - - sb.WriteString(fmt.Sprintf("UniqueBag: (Size = %d)", len(*b))) - for id, set := range *b { - sb.WriteString(fmt.Sprintf("\n%s ID[%s]: Members = %s", prefix, id, set)) - } - - return sb.String() -} - -func (b *UniqueBag) String() string { return b.PrefixedString("") } - -func (b *UniqueBag) Clear() { - for id := range *b { - delete(*b, id) - } -} diff --git a/avalanchego/ids/unique_bag_test.go b/avalanchego/ids/unique_bag_test.go deleted file mode 100644 index 13221dcc..00000000 --- a/avalanchego/ids/unique_bag_test.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package ids - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestUniqueBag(t *testing.T) { - var ub1 UniqueBag - - ub1.init() - - if ub1 == nil { - t.Fatalf("Unique Bag still nil after initialized") - } else if len(ub1.List()) != 0 { - t.Fatalf("Unique Bag should be empty") - } - - id1 := Empty.Prefix(1) - id2 := Empty.Prefix(2) - - ub2 := make(UniqueBag) - ub2.Add(1, id1, id2) - - if !ub2.GetSet(id1).Contains(1) { - t.Fatalf("Set missing element") - } else if !ub2.GetSet(id2).Contains(1) { - t.Fatalf("Set missing element") - } - - var bs1 BitSet64 - bs1.Add(2) - bs1.Add(4) - - ub3 := make(UniqueBag) - - ub3.UnionSet(id1, bs1) - - bs1.Clear() - bs1 = ub3.GetSet(id1) - switch { - case bs1.Len() != 2: - t.Fatalf("Incorrect length of set") - case !bs1.Contains(2): - t.Fatalf("Set missing element") - case !bs1.Contains(4): - t.Fatalf("Set missing element") - } - - // Difference test - bs1.Clear() - - ub4 := make(UniqueBag) - ub4.Add(1, id1) - ub4.Add(2, id1) - ub4.Add(5, id2) - ub4.Add(8, id2) - - ub5 := make(UniqueBag) - ub5.Add(5, id2) - ub5.Add(5, id1) - - ub4.Difference(&ub5) - - if len(ub5.List()) != 2 { - t.Fatalf("Incorrect number of ids in Unique Bag") - } - - ub4id1 := ub4.GetSet(id1) - switch { - case ub4id1.Len() != 2: - t.Fatalf("Set of Unique Bag has incorrect length") - case !ub4id1.Contains(1): - t.Fatalf("Set of Unique Bag missing element") - case !ub4id1.Contains(2): - t.Fatalf("Set of Unique Bag missing element") - } - - ub4id2 := ub4.GetSet(id2) - if ub4id2.Len() != 1 { - t.Fatalf("Set of Unique Bag has incorrect length") - } else if !ub4id2.Contains(8) { - t.Fatalf("Set of Unique Bag missing element") - } - - // DifferenceSet test - - ub6 := make(UniqueBag) - ub6.Add(1, id1) - ub6.Add(2, id1) - ub6.Add(7, id1) - - diffBitSet := BitSet64(0) - diffBitSet.Add(1) - diffBitSet.Add(7) - - ub6.DifferenceSet(id1, diffBitSet) - - ub6id1 := ub6.GetSet(id1) - - if ub6id1.Len() != 1 { - t.Fatalf("Set of Unique Bag missing element") - } else if !ub6id1.Contains(2) { - t.Fatalf("Set of Unique Bag missing element") - } -} - -func TestUniqueBagClear(t *testing.T) { - b := UniqueBag{} - id1, id2 := GenerateTestID(), GenerateTestID() - b.Add(0, id1) - b.Add(1, id1, id2) - - b.Clear() - require.Len(t, b.List(), 0) - - bs := b.GetSet(id1) - require.EqualValues(t, 0, bs.Len()) - - bs = b.GetSet(id2) - require.EqualValues(t, 0, bs.Len()) -} diff --git a/avalanchego/indexer/client.go b/avalanchego/indexer/client.go index 14fb84eb..785018e3 100644 --- a/avalanchego/indexer/client.go +++ b/avalanchego/indexer/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer @@ -13,7 +13,7 @@ import ( "github.com/ava-labs/avalanchego/utils/rpc" ) -var _ Client = &client{} +var _ Client = (*client)(nil) // Client interface for Avalanche Indexer API Endpoint type Client interface { @@ -24,14 +24,14 @@ type Client interface { GetContainerRange(ctx context.Context, startIndex uint64, numToFetch int, options ...rpc.Option) ([]Container, error) // Get a container by its index GetContainerByIndex(ctx context.Context, index uint64, options ...rpc.Option) (Container, error) - // Get the most recently accepted container - GetLastAccepted(context.Context, ...rpc.Option) (Container, error) + // Get the most recently accepted container and its index + GetLastAccepted(context.Context, ...rpc.Option) (Container, uint64, error) // Returns 1 less than the number of containers accepted on this chain GetIndex(ctx context.Context, containerID ids.ID, options ...rpc.Option) (uint64, error) // Returns true if the given container is accepted IsAccepted(ctx context.Context, containerID ids.ID, options ...rpc.Option) (bool, error) - // Get a container by its index - GetContainerByID(ctx context.Context, containerID ids.ID, options ...rpc.Option) (Container, error) + // Get a container and its index by its ID + GetContainerByID(ctx context.Context, containerID ids.ID, options ...rpc.Option) (Container, uint64, error) } // Client implementation for Avalanche Indexer API Endpoint @@ -47,13 +47,13 @@ type client struct { // - http://1.2.3.4:9650/ext/index/X/tx func NewClient(uri string) Client { return &client{ - requester: rpc.NewEndpointRequester(uri, "index"), + requester: rpc.NewEndpointRequester(uri), } } func (c *client) GetContainerRange(ctx context.Context, startIndex uint64, numToFetch int, options ...rpc.Option) ([]Container, error) { var fcs GetContainerRangeResponse - err := c.requester.SendRequest(ctx, "getContainerRange", &GetContainerRangeArgs{ + err := c.requester.SendRequest(ctx, "index.getContainerRange", &GetContainerRangeArgs{ StartIndex: json.Uint64(startIndex), NumToFetch: json.Uint64(numToFetch), Encoding: formatting.Hex, @@ -79,7 +79,7 @@ func (c *client) GetContainerRange(ctx context.Context, startIndex uint64, numTo func (c *client) GetContainerByIndex(ctx context.Context, index uint64, options ...rpc.Option) (Container, error) { var fc FormattedContainer - err := c.requester.SendRequest(ctx, "getContainerByIndex", &GetContainerByIndexArgs{ + err := c.requester.SendRequest(ctx, "index.getContainerByIndex", &GetContainerByIndexArgs{ Index: json.Uint64(index), Encoding: formatting.Hex, }, &fc, options...) @@ -98,29 +98,29 @@ func (c *client) GetContainerByIndex(ctx context.Context, index uint64, options }, nil } -func (c *client) GetLastAccepted(ctx context.Context, options ...rpc.Option) (Container, error) { +func (c *client) GetLastAccepted(ctx context.Context, options ...rpc.Option) (Container, uint64, error) { var fc FormattedContainer - err := c.requester.SendRequest(ctx, "getLastAccepted", &GetLastAcceptedArgs{ + err := c.requester.SendRequest(ctx, "index.getLastAccepted", &GetLastAcceptedArgs{ Encoding: formatting.Hex, }, &fc, options...) if err != nil { - return Container{}, nil + return Container{}, 0, err } containerBytes, err := formatting.Decode(fc.Encoding, fc.Bytes) if err != nil { - return Container{}, fmt.Errorf("couldn't decode container %s: %w", fc.ID, err) + return Container{}, 0, fmt.Errorf("couldn't decode container %s: %w", fc.ID, err) } return Container{ ID: fc.ID, Timestamp: fc.Timestamp.Unix(), Bytes: containerBytes, - }, nil + }, uint64(fc.Index), nil } func (c *client) GetIndex(ctx context.Context, id ids.ID, options ...rpc.Option) (uint64, error) { var index GetIndexResponse - err := c.requester.SendRequest(ctx, "getIndex", &GetIndexArgs{ + err := c.requester.SendRequest(ctx, "index.getIndex", &GetIndexArgs{ ID: id, }, &index, options...) return uint64(index.Index), err @@ -128,29 +128,29 @@ func (c *client) GetIndex(ctx context.Context, id ids.ID, options ...rpc.Option) func (c *client) IsAccepted(ctx context.Context, id ids.ID, options ...rpc.Option) (bool, error) { var res IsAcceptedResponse - err := c.requester.SendRequest(ctx, "isAccepted", &IsAcceptedArgs{ + err := c.requester.SendRequest(ctx, "index.isAccepted", &IsAcceptedArgs{ ID: id, }, &res, options...) return res.IsAccepted, err } -func (c *client) GetContainerByID(ctx context.Context, id ids.ID, options ...rpc.Option) (Container, error) { +func (c *client) GetContainerByID(ctx context.Context, id ids.ID, options ...rpc.Option) (Container, uint64, error) { var fc FormattedContainer - err := c.requester.SendRequest(ctx, "getContainerByID", &GetContainerByIDArgs{ + err := c.requester.SendRequest(ctx, "index.getContainerByID", &GetContainerByIDArgs{ ID: id, Encoding: formatting.Hex, }, &fc, options...) if err != nil { - return Container{}, err + return Container{}, 0, err } containerBytes, err := formatting.Decode(fc.Encoding, fc.Bytes) if err != nil { - return Container{}, fmt.Errorf("couldn't decode container %s: %w", fc.ID, err) + return Container{}, 0, fmt.Errorf("couldn't decode container %s: %w", fc.ID, err) } return Container{ ID: fc.ID, Timestamp: fc.Timestamp.Unix(), Bytes: containerBytes, - }, nil + }, uint64(fc.Index), nil } diff --git a/avalanchego/indexer/client_test.go b/avalanchego/indexer/client_test.go index 1e0c6e89..4fb34cf9 100644 --- a/avalanchego/indexer/client_test.go +++ b/avalanchego/indexer/client_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer @@ -12,6 +12,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/formatting" + "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/rpc" ) @@ -21,7 +22,7 @@ type mockClient struct { onSendRequestF func(reply interface{}) error } -func (mc *mockClient) SendRequest(ctx context.Context, method string, _ interface{}, reply interface{}, options ...rpc.Option) error { +func (mc *mockClient) SendRequest(_ context.Context, method string, _ interface{}, reply interface{}, _ ...rpc.Option) error { mc.require.Equal(mc.expectedMethod, method) return mc.onSendRequestF(reply) } @@ -33,7 +34,7 @@ func TestIndexClient(t *testing.T) { // Test GetIndex client.requester = &mockClient{ require: require, - expectedMethod: "getIndex", + expectedMethod: "index.getIndex", onSendRequestF: func(reply interface{}) error { *(reply.(*GetIndexResponse)) = GetIndexResponse{Index: 5} return nil @@ -51,19 +52,21 @@ func TestIndexClient(t *testing.T) { require.NoError(err) client.requester = &mockClient{ require: require, - expectedMethod: "getLastAccepted", + expectedMethod: "index.getLastAccepted", onSendRequestF: func(reply interface{}) error { *(reply.(*FormattedContainer)) = FormattedContainer{ ID: id, Bytes: bytesStr, + Index: json.Uint64(10), } return nil }, } - container, err := client.GetLastAccepted(context.Background()) + container, index, err := client.GetLastAccepted(context.Background()) require.NoError(err) require.EqualValues(id, container.ID) require.EqualValues(bytes, container.Bytes) + require.EqualValues(index, 10) } { // Test GetContainerRange @@ -73,7 +76,7 @@ func TestIndexClient(t *testing.T) { require.NoError(err) client.requester = &mockClient{ require: require, - expectedMethod: "getContainerRange", + expectedMethod: "index.getContainerRange", onSendRequestF: func(reply interface{}) error { *(reply.(*GetContainerRangeResponse)) = GetContainerRangeResponse{Containers: []FormattedContainer{{ ID: id, @@ -92,7 +95,7 @@ func TestIndexClient(t *testing.T) { // Test IsAccepted client.requester = &mockClient{ require: require, - expectedMethod: "isAccepted", + expectedMethod: "index.isAccepted", onSendRequestF: func(reply interface{}) error { *(reply.(*IsAcceptedResponse)) = IsAcceptedResponse{IsAccepted: true} return nil @@ -110,18 +113,20 @@ func TestIndexClient(t *testing.T) { require.NoError(err) client.requester = &mockClient{ require: require, - expectedMethod: "getContainerByID", + expectedMethod: "index.getContainerByID", onSendRequestF: func(reply interface{}) error { *(reply.(*FormattedContainer)) = FormattedContainer{ ID: id, Bytes: bytesStr, + Index: json.Uint64(10), } return nil }, } - container, err := client.GetContainerByID(context.Background(), id) + container, index, err := client.GetContainerByID(context.Background(), id) require.NoError(err) require.EqualValues(id, container.ID) require.EqualValues(bytes, container.Bytes) + require.EqualValues(index, 10) } } diff --git a/avalanchego/indexer/container.go b/avalanchego/indexer/container.go index f4c03f87..c640fdd9 100644 --- a/avalanchego/indexer/container.go +++ b/avalanchego/indexer/container.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer diff --git a/avalanchego/indexer/examples/p-chain/main.go b/avalanchego/indexer/examples/p-chain/main.go new file mode 100644 index 00000000..257591f4 --- /dev/null +++ b/avalanchego/indexer/examples/p-chain/main.go @@ -0,0 +1,55 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/ava-labs/avalanchego/indexer" + "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/proposervm/block" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" +) + +// This example program continuously polls for the next P-Chain block +// and prints the ID of the block and its transactions. +func main() { + var ( + uri = fmt.Sprintf("%s/ext/index/P/block", primary.LocalAPIURI) + client = indexer.NewClient(uri) + ctx = context.Background() + nextIndex uint64 + ) + for { + container, err := client.GetContainerByIndex(ctx, nextIndex) + if err != nil { + time.Sleep(time.Second) + log.Printf("polling for next accepted block\n") + continue + } + + platformvmBlockBytes := container.Bytes + proposerVMBlock, err := block.Parse(container.Bytes) + if err == nil { + platformvmBlockBytes = proposerVMBlock.Block() + } + + platformvmBlock, err := blocks.Parse(blocks.Codec, platformvmBlockBytes) + if err != nil { + log.Fatalf("failed to parse platformvm block: %s\n", err) + } + + acceptedTxs := platformvmBlock.Txs() + log.Printf("accepted block %s with %d transactions\n", platformvmBlock.ID(), len(acceptedTxs)) + + for _, tx := range acceptedTxs { + log.Printf("accepted transaction %s\n", tx.ID()) + } + + nextIndex++ + } +} diff --git a/avalanchego/indexer/examples/x-chain-blocks/main.go b/avalanchego/indexer/examples/x-chain-blocks/main.go new file mode 100644 index 00000000..a995f961 --- /dev/null +++ b/avalanchego/indexer/examples/x-chain-blocks/main.go @@ -0,0 +1,55 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/ava-labs/avalanchego/indexer" + "github.com/ava-labs/avalanchego/vms/proposervm/block" + "github.com/ava-labs/avalanchego/wallet/chain/x" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" +) + +// This example program continuously polls for the next X-Chain block +// and prints the ID of the block and its transactions. +func main() { + var ( + uri = fmt.Sprintf("%s/ext/index/X/block", primary.LocalAPIURI) + client = indexer.NewClient(uri) + ctx = context.Background() + nextIndex uint64 + ) + for { + container, err := client.GetContainerByIndex(ctx, nextIndex) + if err != nil { + time.Sleep(time.Second) + log.Printf("polling for next accepted block\n") + continue + } + + proposerVMBlock, err := block.Parse(container.Bytes) + if err != nil { + log.Fatalf("failed to parse proposervm block: %s\n", err) + } + + avmBlockBytes := proposerVMBlock.Block() + avmBlock, err := x.Parser.ParseBlock(avmBlockBytes) + if err != nil { + log.Fatalf("failed to parse avm block: %s\n", err) + } + + acceptedTxs := avmBlock.Txs() + log.Printf("accepted block %s with %d transactions\n", avmBlock.ID(), len(acceptedTxs)) + + for _, tx := range acceptedTxs { + log.Printf("accepted transaction %s\n", tx.ID()) + } + + nextIndex++ + } +} diff --git a/avalanchego/indexer/index.go b/avalanchego/indexer/index.go index a51246c0..07de46ee 100644 --- a/avalanchego/indexer/index.go +++ b/avalanchego/indexer/index.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer @@ -37,7 +37,7 @@ var ( errNoneAccepted = errors.New("no containers have been accepted") errNumToFetchZero = fmt.Errorf("numToFetch must be in [1,%d]", MaxFetchedByRange) - _ Index = &index{} + _ Index = (*index)(nil) ) // Index indexes containers in their order of acceptance @@ -212,7 +212,7 @@ func (i *index) getContainerByIndexBytes(indexBytes []byte) (Container, error) { return Container{}, fmt.Errorf("couldn't read from database: %w", err) } var container Container - if _, err = i.codec.Unmarshal(containerBytes, &container); err != nil { + if _, err := i.codec.Unmarshal(containerBytes, &container); err != nil { return Container{}, fmt.Errorf("couldn't unmarshal container: %w", err) } return container, nil @@ -241,7 +241,7 @@ func (i *index) GetContainerRange(startIndex, numToFetch uint64) ([]Container, e } // Calculate the last index we will fetch - lastIndex := math.Min64(startIndex+numToFetch-1, lastAcceptedIndex) + lastIndex := math.Min(startIndex+numToFetch-1, lastAcceptedIndex) // [lastIndex] is always >= [startIndex] so this is safe. // [numToFetch] is limited to [MaxFetchedByRange] so [containers] is bounded in size. containers := make([]Container, int(lastIndex)-int(startIndex)+1) @@ -293,9 +293,10 @@ func (i *index) GetLastAccepted() (Container, error) { // Assumes i.lock is held // Returns: -// 1) The index of the most recently accepted transaction, -// or 0 if no transactions have been accepted -// 2) Whether at least 1 transaction has been accepted +// +// 1. The index of the most recently accepted transaction, or 0 if no +// transactions have been accepted +// 2. Whether at least 1 transaction has been accepted func (i *index) lastAcceptedIndex() (uint64, bool) { return i.nextAcceptedIndex - 1, i.nextAcceptedIndex != 0 } diff --git a/avalanchego/indexer/index_test.go b/avalanchego/indexer/index_test.go index 22e54465..79ef4a5b 100644 --- a/avalanchego/indexer/index_test.go +++ b/avalanchego/indexer/index_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer @@ -16,6 +16,7 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" ) @@ -99,7 +100,7 @@ func TestIndex(t *testing.T) { // Ensure that the data is correct lastTimestamp := int64(0) - sawContainers := ids.Set{} + sawContainers := set.Set[ids.ID]{} for _, container := range containersList { require.False(sawContainers.Contains(container.ID)) // Should only see this container once require.Contains(containers, container.ID) diff --git a/avalanchego/indexer/indexer.go b/avalanchego/indexer/indexer.go index ad15e5b1..8936e57f 100644 --- a/avalanchego/indexer/indexer.go +++ b/avalanchego/indexer/indexer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer @@ -21,9 +21,9 @@ import ( "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/engine/avalanche" + "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/snow/engine/snowman" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/json" @@ -52,19 +52,20 @@ var ( previouslyIndexedPrefix = byte(0x05) hasRunKey = []byte{0x07} - _ Indexer = &indexer{} + _ Indexer = (*indexer)(nil) ) // Config for an indexer type Config struct { - DB database.Database - Log logging.Logger - IndexingEnabled bool - AllowIncompleteIndex bool - DecisionAcceptorGroup snow.AcceptorGroup - ConsensusAcceptorGroup snow.AcceptorGroup - APIServer server.PathAdder - ShutdownF func() + DB database.Database + Log logging.Logger + IndexingEnabled bool + AllowIncompleteIndex bool + BlockAcceptorGroup snow.AcceptorGroup + TxAcceptorGroup snow.AcceptorGroup + VertexAcceptorGroup snow.AcceptorGroup + APIServer server.PathAdder + ShutdownF func() } // Indexer causes accepted containers for a given chain @@ -80,18 +81,19 @@ type Indexer interface { // NewIndexer returns a new Indexer and registers a new endpoint on the given API server. func NewIndexer(config Config) (Indexer, error) { indexer := &indexer{ - codec: codec.NewManager(codecMaxSize), - log: config.Log, - db: config.DB, - allowIncompleteIndex: config.AllowIncompleteIndex, - indexingEnabled: config.IndexingEnabled, - decisionAcceptorGroup: config.DecisionAcceptorGroup, - consensusAcceptorGroup: config.ConsensusAcceptorGroup, - txIndices: map[ids.ID]Index{}, - vtxIndices: map[ids.ID]Index{}, - blockIndices: map[ids.ID]Index{}, - pathAdder: config.APIServer, - shutdownF: config.ShutdownF, + codec: codec.NewManager(codecMaxSize), + log: config.Log, + db: config.DB, + allowIncompleteIndex: config.AllowIncompleteIndex, + indexingEnabled: config.IndexingEnabled, + blockAcceptorGroup: config.BlockAcceptorGroup, + txAcceptorGroup: config.TxAcceptorGroup, + vertexAcceptorGroup: config.VertexAcceptorGroup, + txIndices: map[ids.ID]Index{}, + vtxIndices: map[ids.ID]Index{}, + blockIndices: map[ids.ID]Index{}, + pathAdder: config.APIServer, + shutdownF: config.ShutdownF, } if err := indexer.codec.RegisterCodec( @@ -139,28 +141,29 @@ type indexer struct { // Chain ID --> index of txs of that chain (if applicable) txIndices map[ids.ID]Index + // Notifies of newly accepted blocks + blockAcceptorGroup snow.AcceptorGroup // Notifies of newly accepted transactions - decisionAcceptorGroup snow.AcceptorGroup - // Notifies of newly accepted blocks and vertices - consensusAcceptorGroup snow.AcceptorGroup + txAcceptorGroup snow.AcceptorGroup + // Notifies of newly accepted vertices + vertexAcceptorGroup snow.AcceptorGroup } -// Assumes [engine]'s context lock is not held -func (i *indexer) RegisterChain(name string, engine common.Engine) { +// Assumes [ctx.Lock] is not held +func (i *indexer) RegisterChain(chainName string, ctx *snow.ConsensusContext, vm common.VM) { i.lock.Lock() defer i.lock.Unlock() - ctx := engine.Context() if i.closed { i.log.Debug("not registering chain to indexer", zap.String("reason", "indexer is closed"), - zap.String("chainName", name), + zap.String("chainName", chainName), ) return } else if ctx.SubnetID != constants.PrimaryNetworkID { i.log.Debug("not registering chain to indexer", zap.String("reason", "not in the primary network"), - zap.String("chainName", name), + zap.String("chainName", chainName), ) return } @@ -177,7 +180,7 @@ func (i *indexer) RegisterChain(name string, engine common.Engine) { isIncomplete, err := i.isIncomplete(chainID) if err != nil { i.log.Error("couldn't get whether chain is incomplete", - zap.String("chainName", name), + zap.String("chainName", chainName), zap.Error(err), ) if err := i.close(); err != nil { @@ -192,7 +195,7 @@ func (i *indexer) RegisterChain(name string, engine common.Engine) { previouslyIndexed, err := i.previouslyIndexed(chainID) if err != nil { i.log.Error("couldn't get whether chain was previously indexed", - zap.String("chainName", name), + zap.String("chainName", chainName), zap.Error(err), ) if err := i.close(); err != nil { @@ -208,7 +211,7 @@ func (i *indexer) RegisterChain(name string, engine common.Engine) { // We indexed this chain in a previous run but not in this run. // This would create an incomplete index, which is not allowed, so exit. i.log.Fatal("running would cause index to become incomplete but incomplete indices are disabled", - zap.String("chainName", name), + zap.String("chainName", chainName), ) if err := i.close(); err != nil { i.log.Error("failed to close indexer", @@ -224,7 +227,7 @@ func (i *indexer) RegisterChain(name string, engine common.Engine) { return } i.log.Fatal("couldn't mark chain as incomplete", - zap.String("chainName", name), + zap.String("chainName", chainName), zap.Error(err), ) if err := i.close(); err != nil { @@ -237,7 +240,7 @@ func (i *indexer) RegisterChain(name string, engine common.Engine) { if !i.allowIncompleteIndex && isIncomplete && (previouslyIndexed || i.hasRunBefore) { i.log.Fatal("index is incomplete but incomplete indices are disabled. Shutting down", - zap.String("chainName", name), + zap.String("chainName", chainName), ) if err := i.close(); err != nil { i.log.Error("failed to close indexer", @@ -250,7 +253,7 @@ func (i *indexer) RegisterChain(name string, engine common.Engine) { // Mark that in this run, this chain was indexed if err := i.markPreviouslyIndexed(chainID); err != nil { i.log.Error("couldn't mark chain as indexed", - zap.String("chainName", name), + zap.String("chainName", chainName), zap.Error(err), ) if err := i.close(); err != nil { @@ -261,27 +264,29 @@ func (i *indexer) RegisterChain(name string, engine common.Engine) { return } - switch engine.(type) { - case snowman.Engine: - index, err := i.registerChainHelper(chainID, blockPrefix, name, "block", i.consensusAcceptorGroup) - if err != nil { - i.log.Fatal("failed to create block index", - zap.String("chainName", name), + index, err := i.registerChainHelper(chainID, blockPrefix, chainName, "block", i.blockAcceptorGroup) + if err != nil { + i.log.Fatal("failed to create index", + zap.String("chainName", chainName), + zap.String("endpoint", "block"), + zap.Error(err), + ) + if err := i.close(); err != nil { + i.log.Error("failed to close indexer", zap.Error(err), ) - if err := i.close(); err != nil { - i.log.Error("failed to close indexer", - zap.Error(err), - ) - } - return } - i.blockIndices[chainID] = index - case avalanche.Engine: - vtxIndex, err := i.registerChainHelper(chainID, vtxPrefix, name, "vtx", i.consensusAcceptorGroup) + return + } + i.blockIndices[chainID] = index + + switch vm.(type) { + case vertex.DAGVM: + vtxIndex, err := i.registerChainHelper(chainID, vtxPrefix, chainName, "vtx", i.vertexAcceptorGroup) if err != nil { - i.log.Fatal("couldn't create vertex index", - zap.String("chainName", name), + i.log.Fatal("couldn't create index", + zap.String("chainName", chainName), + zap.String("endpoint", "vtx"), zap.Error(err), ) if err := i.close(); err != nil { @@ -293,31 +298,32 @@ func (i *indexer) RegisterChain(name string, engine common.Engine) { } i.vtxIndices[chainID] = vtxIndex - txIndex, err := i.registerChainHelper(chainID, txPrefix, name, "tx", i.decisionAcceptorGroup) + txIndex, err := i.registerChainHelper(chainID, txPrefix, chainName, "tx", i.txAcceptorGroup) if err != nil { - i.log.Fatal("couldn't create tx index for", - zap.String("chainName", name), + i.log.Fatal("couldn't create index", + zap.String("chainName", chainName), + zap.String("endpoint", "tx"), zap.Error(err), ) if err := i.close(); err != nil { - i.log.Error("failed to close indexer:", + i.log.Error("failed to close indexer", zap.Error(err), ) } return } i.txIndices[chainID] = txIndex + case block.ChainVM: default: - engineType := fmt.Sprintf("%T", engine) - i.log.Error("got unexpected engine type", - zap.String("engineType", engineType), + vmType := fmt.Sprintf("%T", vm) + i.log.Error("got unexpected vm type", + zap.String("vmType", vmType), ) if err := i.close(); err != nil { i.log.Error("failed to close indexer", zap.Error(err), ) } - return } } @@ -381,19 +387,19 @@ func (i *indexer) close() error { for chainID, txIndex := range i.txIndices { errs.Add( txIndex.Close(), - i.decisionAcceptorGroup.DeregisterAcceptor(chainID, fmt.Sprintf("%s%s", indexNamePrefix, chainID)), + i.txAcceptorGroup.DeregisterAcceptor(chainID, fmt.Sprintf("%s%s", indexNamePrefix, chainID)), ) } for chainID, vtxIndex := range i.vtxIndices { errs.Add( vtxIndex.Close(), - i.consensusAcceptorGroup.DeregisterAcceptor(chainID, fmt.Sprintf("%s%s", indexNamePrefix, chainID)), + i.vertexAcceptorGroup.DeregisterAcceptor(chainID, fmt.Sprintf("%s%s", indexNamePrefix, chainID)), ) } for chainID, blockIndex := range i.blockIndices { errs.Add( blockIndex.Close(), - i.consensusAcceptorGroup.DeregisterAcceptor(chainID, fmt.Sprintf("%s%s", indexNamePrefix, chainID)), + i.blockAcceptorGroup.DeregisterAcceptor(chainID, fmt.Sprintf("%s%s", indexNamePrefix, chainID)), ) } errs.Add(i.db.Close()) diff --git a/avalanchego/indexer/indexer_test.go b/avalanchego/indexer/indexer_test.go index fa67985d..79af29fb 100644 --- a/avalanchego/indexer/indexer_test.go +++ b/avalanchego/indexer/indexer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer @@ -19,19 +19,19 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/snow/consensus/avalanche" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" + "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/logging" - - avengmocks "github.com/ava-labs/avalanchego/snow/engine/avalanche/mocks" - avvtxmocks "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex/mocks" - smblockmocks "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" - smengmocks "github.com/ava-labs/avalanchego/snow/engine/snowman/mocks" ) -var _ server.PathAdder = &apiServerMock{} +var ( + _ server.PathAdder = (*apiServerMock)(nil) + + errUnimplemented = errors.New("unimplemented") +) type apiServerMock struct { timesCalled int @@ -46,22 +46,23 @@ func (a *apiServerMock) AddRoute(_ *common.HTTPHandler, _ *sync.RWMutex, base, e return nil } -func (a *apiServerMock) AddAliases(string, ...string) error { - return errors.New("unimplemented") +func (*apiServerMock) AddAliases(string, ...string) error { + return errUnimplemented } // Test that newIndexer sets fields correctly func TestNewIndexer(t *testing.T) { require := require.New(t) config := Config{ - IndexingEnabled: true, - AllowIncompleteIndex: true, - Log: logging.NoLog{}, - DB: memdb.New(), - DecisionAcceptorGroup: snow.NewAcceptorGroup(logging.NoLog{}), - ConsensusAcceptorGroup: snow.NewAcceptorGroup(logging.NoLog{}), - APIServer: &apiServerMock{}, - ShutdownF: func() {}, + IndexingEnabled: true, + AllowIncompleteIndex: true, + Log: logging.NoLog{}, + DB: memdb.New(), + BlockAcceptorGroup: snow.NewAcceptorGroup(logging.NoLog{}), + TxAcceptorGroup: snow.NewAcceptorGroup(logging.NoLog{}), + VertexAcceptorGroup: snow.NewAcceptorGroup(logging.NoLog{}), + APIServer: &apiServerMock{}, + ShutdownF: func() {}, } idxrIntf, err := NewIndexer(config) @@ -81,8 +82,9 @@ func TestNewIndexer(t *testing.T) { require.Len(idxr.txIndices, 0) require.NotNil(idxr.vtxIndices) require.Len(idxr.vtxIndices, 0) - require.NotNil(idxr.consensusAcceptorGroup) - require.NotNil(idxr.decisionAcceptorGroup) + require.NotNil(idxr.blockAcceptorGroup) + require.NotNil(idxr.txAcceptorGroup) + require.NotNil(idxr.vertexAcceptorGroup) require.NotNil(idxr.shutdownF) require.False(idxr.hasRunBefore) } @@ -95,13 +97,14 @@ func TestMarkHasRunAndShutdown(t *testing.T) { shutdown := &sync.WaitGroup{} shutdown.Add(1) config := Config{ - IndexingEnabled: true, - Log: logging.NoLog{}, - DB: db, - DecisionAcceptorGroup: snow.NewAcceptorGroup(logging.NoLog{}), - ConsensusAcceptorGroup: snow.NewAcceptorGroup(logging.NoLog{}), - APIServer: &apiServerMock{}, - ShutdownF: func() { shutdown.Done() }, + IndexingEnabled: true, + Log: logging.NoLog{}, + DB: db, + BlockAcceptorGroup: snow.NewAcceptorGroup(logging.NoLog{}), + TxAcceptorGroup: snow.NewAcceptorGroup(logging.NoLog{}), + VertexAcceptorGroup: snow.NewAcceptorGroup(logging.NoLog{}), + APIServer: &apiServerMock{}, + ShutdownF: shutdown.Done, } idxrIntf, err := NewIndexer(config) @@ -131,15 +134,17 @@ func TestIndexer(t *testing.T) { baseDB := memdb.New() db := versiondb.New(baseDB) + server := &apiServerMock{} config := Config{ - IndexingEnabled: true, - AllowIncompleteIndex: false, - Log: logging.NoLog{}, - DB: db, - DecisionAcceptorGroup: snow.NewAcceptorGroup(logging.NoLog{}), - ConsensusAcceptorGroup: snow.NewAcceptorGroup(logging.NoLog{}), - APIServer: &apiServerMock{}, - ShutdownF: func() {}, + IndexingEnabled: true, + AllowIncompleteIndex: false, + Log: logging.NoLog{}, + DB: db, + BlockAcceptorGroup: snow.NewAcceptorGroup(logging.NoLog{}), + TxAcceptorGroup: snow.NewAcceptorGroup(logging.NoLog{}), + VertexAcceptorGroup: snow.NewAcceptorGroup(logging.NoLog{}), + APIServer: server, + ShutdownF: func() {}, } // Create indexer @@ -161,19 +166,14 @@ func TestIndexer(t *testing.T) { require.False(previouslyIndexed) // Register this chain, creating a new index - chainVM := smblockmocks.NewMockChainVM(ctrl) - chainEngine := &smengmocks.Engine{} - chainEngine.On("Context").Return(chain1Ctx) - chainEngine.On("GetVM").Return(chainVM) - - idxr.RegisterChain("chain1", chainEngine) + chainVM := mocks.NewMockChainVM(ctrl) + idxr.RegisterChain("chain1", chain1Ctx, chainVM) isIncomplete, err = idxr.isIncomplete(chain1Ctx.ChainID) require.NoError(err) require.False(isIncomplete) previouslyIndexed, err = idxr.previouslyIndexed(chain1Ctx.ChainID) require.NoError(err) require.True(previouslyIndexed) - server := config.APIServer.(*apiServerMock) require.EqualValues(1, server.timesCalled) require.EqualValues("index/chain1", server.bases[0]) require.EqualValues("/block", server.endpoints[0]) @@ -189,7 +189,7 @@ func TestIndexer(t *testing.T) { Timestamp: now.UnixNano(), } - require.NoError(config.ConsensusAcceptorGroup.Accept(chain1Ctx, blkID, blkBytes)) + require.NoError(config.BlockAcceptorGroup.Accept(chain1Ctx, blkID, blkBytes)) blkIdx := idxr.blockIndices[chain1Ctx.ChainID] require.NotNil(blkIdx) @@ -251,12 +251,14 @@ func TestIndexer(t *testing.T) { require.False(isIncomplete) // Register the same chain as before - idxr.RegisterChain("chain1", chainEngine) + idxr.RegisterChain("chain1", chain1Ctx, chainVM) blkIdx = idxr.blockIndices[chain1Ctx.ChainID] require.NotNil(blkIdx) container, err = blkIdx.GetLastAccepted() require.NoError(err) require.Equal(blkID, container.ID) + require.EqualValues(1, server.timesCalled) // block index for chain + require.Contains(server.endpoints, "/block") // Register a DAG chain chain2Ctx := snow.DefaultConsensusContextTest() @@ -267,18 +269,15 @@ func TestIndexer(t *testing.T) { previouslyIndexed, err = idxr.previouslyIndexed(chain2Ctx.ChainID) require.NoError(err) require.False(previouslyIndexed) - dagVM := &avvtxmocks.DAGVM{} - dagEngine := &avengmocks.Engine{} - dagEngine.On("Context").Return(chain2Ctx) - dagEngine.On("GetVM").Return(dagVM).Once() - idxr.RegisterChain("chain2", dagEngine) - require.NoError(err) - server = config.APIServer.(*apiServerMock) - require.EqualValues(3, server.timesCalled) // block index, vtx index, tx index + dagVM := vertex.NewMockLinearizableVM(ctrl) + idxr.RegisterChain("chain2", chain2Ctx, dagVM) + require.NoError(err) + require.EqualValues(4, server.timesCalled) // block index for chain, block index for dag, vtx index, tx index require.Contains(server.bases, "index/chain2") + require.Contains(server.endpoints, "/block") require.Contains(server.endpoints, "/vtx") require.Contains(server.endpoints, "/tx") - require.Len(idxr.blockIndices, 1) + require.Len(idxr.blockIndices, 2) require.Len(idxr.txIndices, 1) require.Len(idxr.vtxIndices, 1) @@ -286,21 +285,11 @@ func TestIndexer(t *testing.T) { vtxID, vtxBytes := ids.GenerateTestID(), utils.RandomBytes(32) expectedVtx := Container{ ID: vtxID, - Bytes: blkBytes, + Bytes: vtxBytes, Timestamp: now.UnixNano(), } - // Mocked VM knows about this block now - dagEngine.On("GetVtx", vtxID).Return( - &avalanche.TestVertex{ - TestDecidable: choices.TestDecidable{ - StatusV: choices.Accepted, - IDV: vtxID, - }, - BytesV: vtxBytes, - }, nil, - ).Once() - require.NoError(config.ConsensusAcceptorGroup.Accept(chain2Ctx, vtxID, blkBytes)) + require.NoError(config.VertexAcceptorGroup.Accept(chain2Ctx, vtxID, vtxBytes)) vtxIdx := idxr.vtxIndices[chain2Ctx.ChainID] require.NotNil(vtxIdx) @@ -339,7 +328,7 @@ func TestIndexer(t *testing.T) { Timestamp: now.UnixNano(), } // Mocked VM knows about this tx now - dagVM.On("GetTx", txID).Return( + dagVM.EXPECT().GetTx(gomock.Any(), txID).Return( &snowstorm.TestTx{ TestDecidable: choices.TestDecidable{ IDV: txID, @@ -347,9 +336,9 @@ func TestIndexer(t *testing.T) { }, BytesV: txBytes, }, nil, - ).Once() + ).AnyTimes() - require.NoError(config.DecisionAcceptorGroup.Accept(chain2Ctx, txID, blkBytes)) + require.NoError(config.TxAcceptorGroup.Accept(chain2Ctx, txID, blkBytes)) txIdx := idxr.txIndices[chain2Ctx.ChainID] require.NotNil(txIdx) @@ -402,8 +391,8 @@ func TestIndexer(t *testing.T) { require.NoError(err) idxr, ok = idxrIntf.(*indexer) require.True(ok) - idxr.RegisterChain("chain1", chainEngine) - idxr.RegisterChain("chain2", dagEngine) + idxr.RegisterChain("chain1", chain1Ctx, chainVM) + idxr.RegisterChain("chain2", chain2Ctx, dagVM) // Verify state lastAcceptedTx, err = idxr.txIndices[chain2Ctx.ChainID].GetLastAccepted() @@ -426,14 +415,15 @@ func TestIncompleteIndex(t *testing.T) { baseDB := memdb.New() config := Config{ - IndexingEnabled: false, - AllowIncompleteIndex: false, - Log: logging.NoLog{}, - DB: versiondb.New(baseDB), - DecisionAcceptorGroup: snow.NewAcceptorGroup(logging.NoLog{}), - ConsensusAcceptorGroup: snow.NewAcceptorGroup(logging.NoLog{}), - APIServer: &apiServerMock{}, - ShutdownF: func() {}, + IndexingEnabled: false, + AllowIncompleteIndex: false, + Log: logging.NoLog{}, + DB: versiondb.New(baseDB), + BlockAcceptorGroup: snow.NewAcceptorGroup(logging.NoLog{}), + TxAcceptorGroup: snow.NewAcceptorGroup(logging.NoLog{}), + VertexAcceptorGroup: snow.NewAcceptorGroup(logging.NoLog{}), + APIServer: &apiServerMock{}, + ShutdownF: func() {}, } idxrIntf, err := NewIndexer(config) require.NoError(err) @@ -450,9 +440,8 @@ func TestIncompleteIndex(t *testing.T) { previouslyIndexed, err := idxr.previouslyIndexed(chain1Ctx.ChainID) require.NoError(err) require.False(previouslyIndexed) - chainEngine := &smengmocks.Engine{} - chainEngine.On("Context").Return(chain1Ctx) - idxr.RegisterChain("chain1", chainEngine) + chainVM := mocks.NewMockChainVM(ctrl) + idxr.RegisterChain("chain1", chain1Ctx, chainVM) isIncomplete, err = idxr.isIncomplete(chain1Ctx.ChainID) require.NoError(err) require.True(isIncomplete) @@ -471,7 +460,7 @@ func TestIncompleteIndex(t *testing.T) { // Register the chain again. Should die due to incomplete index. require.NoError(config.DB.(*versiondb.Database).Commit()) - idxr.RegisterChain("chain1", chainEngine) + idxr.RegisterChain("chain1", chain1Ctx, chainVM) require.True(idxr.closed) // Close and re-open the indexer, this time with indexing enabled @@ -486,7 +475,7 @@ func TestIncompleteIndex(t *testing.T) { require.True(idxr.allowIncompleteIndex) // Register the chain again. Should be OK - idxr.RegisterChain("chain1", chainEngine) + idxr.RegisterChain("chain1", chain1Ctx, chainVM) require.False(idxr.closed) // Close the indexer and re-open with indexing disabled and @@ -510,14 +499,15 @@ func TestIgnoreNonDefaultChains(t *testing.T) { baseDB := memdb.New() db := versiondb.New(baseDB) config := Config{ - IndexingEnabled: true, - AllowIncompleteIndex: false, - Log: logging.NoLog{}, - DB: db, - DecisionAcceptorGroup: snow.NewAcceptorGroup(logging.NoLog{}), - ConsensusAcceptorGroup: snow.NewAcceptorGroup(logging.NoLog{}), - APIServer: &apiServerMock{}, - ShutdownF: func() {}, + IndexingEnabled: true, + AllowIncompleteIndex: false, + Log: logging.NoLog{}, + DB: db, + BlockAcceptorGroup: snow.NewAcceptorGroup(logging.NoLog{}), + TxAcceptorGroup: snow.NewAcceptorGroup(logging.NoLog{}), + VertexAcceptorGroup: snow.NewAcceptorGroup(logging.NoLog{}), + APIServer: &apiServerMock{}, + ShutdownF: func() {}, } // Create indexer @@ -532,10 +522,7 @@ func TestIgnoreNonDefaultChains(t *testing.T) { chain1Ctx.SubnetID = ids.GenerateTestID() // RegisterChain should return without adding an index for this chain - chainVM := smblockmocks.NewMockChainVM(ctrl) - chainEngine := &smengmocks.Engine{} - chainEngine.On("Context").Return(chain1Ctx) - chainEngine.On("GetVM").Return(chainVM) - idxr.RegisterChain("chain1", chainEngine) + chainVM := mocks.NewMockChainVM(ctrl) + idxr.RegisterChain("chain1", chain1Ctx, chainVM) require.Len(idxr.blockIndices, 0) } diff --git a/avalanchego/indexer/service.go b/avalanchego/indexer/service.go index 664d7675..98bc91e9 100644 --- a/avalanchego/indexer/service.go +++ b/avalanchego/indexer/service.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer @@ -91,7 +91,7 @@ type GetContainerRangeResponse struct { // If [startIndex] > the last accepted index, returns an error (unless the above apply.) // If [n] > [MaxFetchedByRange], returns an error. // If we run out of transactions, returns the ones fetched before running out. -func (s *service) GetContainerRange(r *http.Request, args *GetContainerRangeArgs, reply *GetContainerRangeResponse) error { +func (s *service) GetContainerRange(_ *http.Request, args *GetContainerRangeArgs, reply *GetContainerRangeResponse) error { containers, err := s.Index.GetContainerRange(uint64(args.StartIndex), uint64(args.NumToFetch)) if err != nil { return err @@ -119,7 +119,7 @@ type GetIndexResponse struct { Index json.Uint64 `json:"index"` } -func (s *service) GetIndex(r *http.Request, args *GetIndexArgs, reply *GetIndexResponse) error { +func (s *service) GetIndex(_ *http.Request, args *GetIndexArgs, reply *GetIndexResponse) error { index, err := s.Index.GetIndex(args.ID) reply.Index = json.Uint64(index) return err @@ -133,7 +133,7 @@ type IsAcceptedResponse struct { IsAccepted bool `json:"isAccepted"` } -func (s *service) IsAccepted(r *http.Request, args *IsAcceptedArgs, reply *IsAcceptedResponse) error { +func (s *service) IsAccepted(_ *http.Request, args *IsAcceptedArgs, reply *IsAcceptedResponse) error { _, err := s.Index.GetIndex(args.ID) if err == nil { reply.IsAccepted = true @@ -151,7 +151,7 @@ type GetContainerByIDArgs struct { Encoding formatting.Encoding `json:"encoding"` } -func (s *service) GetContainerByID(r *http.Request, args *GetContainerByIDArgs, reply *FormattedContainer) error { +func (s *service) GetContainerByID(_ *http.Request, args *GetContainerByIDArgs, reply *FormattedContainer) error { container, err := s.Index.GetContainerByID(args.ID) if err != nil { return err diff --git a/avalanchego/ipcs/chainipc.go b/avalanchego/ipcs/chainipc.go index 3ef1450d..56d43933 100644 --- a/avalanchego/ipcs/chainipc.go +++ b/avalanchego/ipcs/chainipc.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ipcs @@ -9,6 +9,8 @@ import ( "go.uber.org/zap" + "golang.org/x/exp/maps" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/logging" @@ -33,23 +35,33 @@ type context struct { // ChainIPCs maintains IPCs for a set of chains type ChainIPCs struct { context - chains map[ids.ID]*EventSockets - consensusAcceptorGroup snow.AcceptorGroup - decisionAcceptorGroup snow.AcceptorGroup + chains map[ids.ID]*EventSockets + blockAcceptorGroup snow.AcceptorGroup + txAcceptorGroup snow.AcceptorGroup + vertexAcceptorGroup snow.AcceptorGroup } // NewChainIPCs creates a new *ChainIPCs that writes consensus and decision // events to IPC sockets -func NewChainIPCs(log logging.Logger, path string, networkID uint32, consensusAcceptorGroup, decisionAcceptorGroup snow.AcceptorGroup, defaultChainIDs []ids.ID) (*ChainIPCs, error) { +func NewChainIPCs( + log logging.Logger, + path string, + networkID uint32, + blockAcceptorGroup snow.AcceptorGroup, + txAcceptorGroup snow.AcceptorGroup, + vertexAcceptorGroup snow.AcceptorGroup, + defaultChainIDs []ids.ID, +) (*ChainIPCs, error) { cipcs := &ChainIPCs{ context: context{ log: log, networkID: networkID, path: path, }, - chains: make(map[ids.ID]*EventSockets), - consensusAcceptorGroup: consensusAcceptorGroup, - decisionAcceptorGroup: decisionAcceptorGroup, + chains: make(map[ids.ID]*EventSockets), + blockAcceptorGroup: blockAcceptorGroup, + txAcceptorGroup: txAcceptorGroup, + vertexAcceptorGroup: vertexAcceptorGroup, } for _, chainID := range defaultChainIDs { if _, err := cipcs.Publish(chainID); err != nil { @@ -68,7 +80,13 @@ func (cipcs *ChainIPCs) Publish(chainID ids.ID) (*EventSockets, error) { return es, nil } - es, err := newEventSockets(cipcs.context, chainID, cipcs.consensusAcceptorGroup, cipcs.decisionAcceptorGroup) + es, err := newEventSockets( + cipcs.context, + chainID, + cipcs.blockAcceptorGroup, + cipcs.txAcceptorGroup, + cipcs.vertexAcceptorGroup, + ) if err != nil { cipcs.log.Error("can't create ipcs", zap.Error(err), @@ -98,13 +116,7 @@ func (cipcs *ChainIPCs) Unpublish(chainID ids.ID) (bool, error) { // GetPublishedBlockchains returns the chains that are currently being published func (cipcs *ChainIPCs) GetPublishedBlockchains() []ids.ID { - chainIds := make([]ids.ID, 0, len(cipcs.chains)) - - for id := range cipcs.chains { - chainIds = append(chainIds, id) - } - - return chainIds + return maps.Keys(cipcs.chains) } func (cipcs *ChainIPCs) Shutdown() error { diff --git a/avalanchego/ipcs/eventsocket.go b/avalanchego/ipcs/eventsocket.go index 0892b7d3..37b370c3 100644 --- a/avalanchego/ipcs/eventsocket.go +++ b/avalanchego/ipcs/eventsocket.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ipcs @@ -15,7 +15,7 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" ) -var _ snow.Acceptor = &EventSockets{} +var _ snow.Acceptor = (*EventSockets)(nil) // EventSockets is a set of named eventSockets type EventSockets struct { @@ -24,13 +24,31 @@ type EventSockets struct { } // newEventSockets creates a *ChainIPCs with both consensus and decisions IPCs -func newEventSockets(ctx context, chainID ids.ID, consensusAcceptorGroup, decisionAcceptorGroup snow.AcceptorGroup) (*EventSockets, error) { - consensusIPC, err := newEventIPCSocket(ctx, chainID, ipcConsensusIdentifier, consensusAcceptorGroup) +func newEventSockets( + ctx context, + chainID ids.ID, + blockAcceptorGroup snow.AcceptorGroup, + txAcceptorGroup snow.AcceptorGroup, + vertexAcceptorGroup snow.AcceptorGroup, +) (*EventSockets, error) { + consensusIPC, err := newEventIPCSocket( + ctx, + chainID, + ipcConsensusIdentifier, + blockAcceptorGroup, + vertexAcceptorGroup, + ) if err != nil { return nil, err } - decisionsIPC, err := newEventIPCSocket(ctx, chainID, ipcDecisionsIdentifier, decisionAcceptorGroup) + decisionsIPC, err := newEventIPCSocket( + ctx, + chainID, + ipcDecisionsIdentifier, + blockAcceptorGroup, + txAcceptorGroup, + ) if err != nil { return nil, err } @@ -93,7 +111,13 @@ type eventSocket struct { // newEventIPCSocket creates a *eventSocket for the given chain and // EventDispatcher that writes to a local IPC socket -func newEventIPCSocket(ctx context, chainID ids.ID, name string, acceptorGroup snow.AcceptorGroup) (*eventSocket, error) { +func newEventIPCSocket( + ctx context, + chainID ids.ID, + name string, + snowmanAcceptorGroup snow.AcceptorGroup, + avalancheAcceptorGroup snow.AcceptorGroup, +) (*eventSocket, error) { var ( url = ipcURL(ctx, chainID, name) ipcName = ipcIdentifierPrefix + "-" + name @@ -109,7 +133,12 @@ func newEventIPCSocket(ctx context, chainID ids.ID, name string, acceptorGroup s url: url, socket: socket.NewSocket(url, ctx.log), unregisterFn: func() error { - return acceptorGroup.DeregisterAcceptor(chainID, ipcName) + errs := wrappers.Errs{} + errs.Add( + snowmanAcceptorGroup.DeregisterAcceptor(chainID, ipcName), + avalancheAcceptorGroup.DeregisterAcceptor(chainID, ipcName), + ) + return errs.Err }, } @@ -120,7 +149,14 @@ func newEventIPCSocket(ctx context, chainID ids.ID, name string, acceptorGroup s return nil, err } - if err := acceptorGroup.RegisterAcceptor(chainID, ipcName, eis, false); err != nil { + if err := snowmanAcceptorGroup.RegisterAcceptor(chainID, ipcName, eis, false); err != nil { + if err := eis.stop(); err != nil { + return nil, err + } + return nil, err + } + + if err := avalancheAcceptorGroup.RegisterAcceptor(chainID, ipcName, eis, false); err != nil { if err := eis.stop(); err != nil { return nil, err } diff --git a/avalanchego/ipcs/socket/socket.go b/avalanchego/ipcs/socket/socket.go index 786fb36d..d3ca391d 100644 --- a/avalanchego/ipcs/socket/socket.go +++ b/avalanchego/ipcs/socket/socket.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package socket diff --git a/avalanchego/ipcs/socket/socket_test.go b/avalanchego/ipcs/socket/socket_test.go index 4baf0a4c..3489fef2 100644 --- a/avalanchego/ipcs/socket/socket_test.go +++ b/avalanchego/ipcs/socket/socket_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package socket @@ -47,11 +47,11 @@ func TestSocketSendAndReceive(t *testing.T) { // Test max message size client.SetMaxMessageSize(msgLen) - if _, err = client.Recv(); err != nil { + if _, err := client.Recv(); err != nil { t.Fatal("Failed to receive from socket:", err.Error()) } client.SetMaxMessageSize(msgLen - 1) - if _, err = client.Recv(); err != ErrMessageTooLarge { + if _, err := client.Recv(); err != ErrMessageTooLarge { t.Fatal("Should have received message too large error, got:", err) } } diff --git a/avalanchego/ipcs/socket/socket_unix.go b/avalanchego/ipcs/socket/socket_unix.go index 578a29df..cf5d7c1a 100644 --- a/avalanchego/ipcs/socket/socket_unix.go +++ b/avalanchego/ipcs/socket/socket_unix.go @@ -1,7 +1,7 @@ //go:build !windows && !plan9 && !js // +build !windows,!plan9,!js -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package socket @@ -23,18 +23,19 @@ func listen(addr string) (net.Listener, error) { return nil, err } - // Try to listen on the socket. If that fails we check to see if it's a stale - // socket and remove it if it is. Then we try to listen one more time. + // Try to listen on the socket. l, err := net.ListenUnix("unix", uAddr) - if err != nil { - if err = removeIfStaleUnixSocket(addr); err != nil { - return nil, err - } - if l, err = net.ListenUnix("unix", uAddr); err != nil { - return nil, err - } + if err == nil { + return l, nil } - return l, err + + // Check to see if the socket is stale and remove it if it is. + if err := removeIfStaleUnixSocket(addr); err != nil { + return nil, err + } + + // Try listening again now that it shouldn't be stale. + return net.ListenUnix("unix", uAddr) } // Dial creates a new *Client connected to the given address over a Unix socket @@ -59,13 +60,16 @@ func Dial(addr string) (*Client, error) { // that is refusing connections func removeIfStaleUnixSocket(socketPath string) error { // Ensure it's a socket; if not return without an error - if st, err := os.Stat(socketPath); err != nil || st.Mode()&os.ModeType != os.ModeSocket { + st, err := os.Stat(socketPath) + if err != nil { + return nil + } + if st.Mode()&os.ModeType != os.ModeSocket { return nil } // Try to connect conn, err := net.DialTimeout("unix", socketPath, staleSocketTimeout) - switch { // The connection was refused so this socket is stale; remove it case isSyscallError(err, syscall.ECONNREFUSED): @@ -74,6 +78,8 @@ func removeIfStaleUnixSocket(socketPath string) error { // The socket is alive so close this connection and leave the socket alone case err == nil: return conn.Close() + + default: + return nil } - return nil } diff --git a/avalanchego/ipcs/socket/socket_windows.go b/avalanchego/ipcs/socket/socket_windows.go index 3f034ae5..ea61157f 100644 --- a/avalanchego/ipcs/socket/socket_windows.go +++ b/avalanchego/ipcs/socket/socket_windows.go @@ -1,7 +1,7 @@ //go:build windows // +build windows -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package socket @@ -10,6 +10,7 @@ import ( "net" "github.com/Microsoft/go-winio" + "github.com/ava-labs/avalanchego/utils/constants" ) diff --git a/avalanchego/main/main.go b/avalanchego/main/main.go index 297eb06b..93bfa54f 100644 --- a/avalanchego/main/main.go +++ b/avalanchego/main/main.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package main @@ -10,7 +10,9 @@ import ( "github.com/spf13/pflag" - "github.com/ava-labs/avalanchego/app/runner" + "golang.org/x/term" + + "github.com/ava-labs/avalanchego/app" "github.com/ava-labs/avalanchego/config" "github.com/ava-labs/avalanchego/version" ) @@ -29,18 +31,12 @@ func main() { os.Exit(1) } - runnerConfig, err := config.GetRunnerConfig(v) - if err != nil { - fmt.Printf("couldn't load process config: %s\n", err) - os.Exit(1) - } - - if runnerConfig.DisplayVersionAndExit { + if v.GetBool(config.VersionKey) { fmt.Print(version.String) os.Exit(0) } - nodeConfig, err := config.GetNodeConfig(v, runnerConfig.BuildDir) + nodeConfig, err := config.GetNodeConfig(v) if err != nil { fmt.Printf("couldn't load node config: %s\n", err) os.Exit(1) @@ -49,5 +45,11 @@ func main() { // Flare specific: set the application prefix (flare for songbird and avalanche for flare) version.InitApplicationPrefix(nodeConfig.NetworkID) - runner.Run(runnerConfig, nodeConfig) + nodeApp := app.New(nodeConfig) // Create node wrapper + if term.IsTerminal(int(os.Stdout.Fd())) { + fmt.Println(app.Header) + } + + exitCode := app.Run(nodeApp) + os.Exit(exitCode) } diff --git a/avalanchego/message/builder_test.go b/avalanchego/message/builder_test.go deleted file mode 100644 index 7cb168cb..00000000 --- a/avalanchego/message/builder_test.go +++ /dev/null @@ -1,507 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import ( - "net" - "testing" - "time" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/ips" - "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/version" -) - -var ( - UncompressingBuilder OutboundMsgBuilder - TestInboundMsgBuilder InboundMsgBuilder - TestCodec Codec - - dummyNodeID = ids.EmptyNodeID - dummyOnFinishedHandling = func() {} -) - -func init() { - codec, err := NewCodecWithMemoryPool("", prometheus.NewRegistry(), 2*units.MiB, 10*time.Second) - if err != nil { - panic(err) - } - TestCodec = codec - UncompressingBuilder = NewOutboundBuilderWithPacker(codec, false /*compress*/) - TestInboundMsgBuilder = NewInboundBuilderWithPacker(codec) -} - -func TestBuildVersion(t *testing.T) { - networkID := uint32(12345) - myTime := uint64(time.Now().Unix()) - ip := ips.IPPort{ - IP: net.IPv4(1, 2, 3, 4), - } - - myVersion := &version.Semantic{ - Major: 1, - Minor: 2, - Patch: 3, - } - myVersionStr := myVersion.String() - myVersionTime := uint64(time.Now().Unix()) - sig := make([]byte, 65) - subnetID := ids.Empty.Prefix(1) - subnetIDs := [][]byte{subnetID[:]} - msg, err := UncompressingBuilder.Version( - networkID, - myTime, - ip, - myVersionStr, - myVersionTime, - sig, - []ids.ID{subnetID}, - ) - require.NoError(t, err) - require.NotNil(t, msg) - require.Equal(t, Version, msg.Op()) - - parsedMsg, err := TestCodec.Parse(msg.Bytes(), dummyNodeID, dummyOnFinishedHandling) - - require.NoError(t, err) - require.NotNil(t, parsedMsg) - require.Equal(t, Version, parsedMsg.Op()) - - networkIDIntf, err := parsedMsg.Get(NetworkID) - require.NoError(t, err) - require.EqualValues(t, networkID, networkIDIntf) - - myTimeIntf, err := parsedMsg.Get(MyTime) - require.NoError(t, err) - require.EqualValues(t, myTime, myTimeIntf) - - ipIntf, err := parsedMsg.Get(IP) - require.NoError(t, err) - require.EqualValues(t, ip, ipIntf) - - versionStrIntf, err := parsedMsg.Get(VersionStr) - require.NoError(t, err) - require.EqualValues(t, myVersionStr, versionStrIntf) - - versionTimeIntf, err := parsedMsg.Get(VersionTime) - require.NoError(t, err) - require.EqualValues(t, myVersionTime, versionTimeIntf) - - sigBytesIntf, err := parsedMsg.Get(SigBytes) - require.NoError(t, err) - require.EqualValues(t, sig, sigBytesIntf) - - trackedSubnetsIntf, err := parsedMsg.Get(TrackedSubnets) - require.NoError(t, err) - require.EqualValues(t, subnetIDs, trackedSubnetsIntf) -} - -func TestBuildGetAcceptedFrontier(t *testing.T) { - chainID := ids.Empty.Prefix(0) - requestID := uint32(5) - deadline := uint64(15) - - msg, err := UncompressingBuilder.GetAcceptedFrontier(chainID, requestID, time.Duration(deadline)) - require.NoError(t, err) - require.NotNil(t, msg) - require.Equal(t, GetAcceptedFrontier, msg.Op()) - - parsedMsg, err := TestCodec.Parse(msg.Bytes(), dummyNodeID, dummyOnFinishedHandling) - require.NoError(t, err) - require.NotNil(t, parsedMsg) - require.Equal(t, GetAcceptedFrontier, parsedMsg.Op()) - - chainIDIntf, err := parsedMsg.Get(ChainID) - require.NoError(t, err) - require.Equal(t, chainID[:], chainIDIntf) - - requestIDIntf, err := parsedMsg.Get(RequestID) - require.NoError(t, err) - require.Equal(t, requestID, requestIDIntf) - - deadlineIntf, err := parsedMsg.Get(Deadline) - require.NoError(t, err) - require.Equal(t, deadline, deadlineIntf) -} - -func TestBuildAcceptedFrontier(t *testing.T) { - chainID := ids.Empty.Prefix(0) - requestID := uint32(5) - containerID := ids.Empty.Prefix(1) - containerIDs := [][]byte{containerID[:]} - - msg, err := UncompressingBuilder.AcceptedFrontier(chainID, requestID, []ids.ID{containerID}) - require.NoError(t, err) - require.NotNil(t, msg) - require.Equal(t, AcceptedFrontier, msg.Op()) - - parsedMsg, err := TestCodec.Parse(msg.Bytes(), dummyNodeID, dummyOnFinishedHandling) - require.NoError(t, err) - require.NotNil(t, parsedMsg) - require.Equal(t, AcceptedFrontier, parsedMsg.Op()) - - chainIDIntf, err := parsedMsg.Get(ChainID) - require.NoError(t, err) - require.Equal(t, chainID[:], chainIDIntf) - - requestIDIntf, err := parsedMsg.Get(RequestID) - require.NoError(t, err) - require.Equal(t, requestID, requestIDIntf) - - containerIDsIntf, err := parsedMsg.Get(ContainerIDs) - require.NoError(t, err) - require.Equal(t, containerIDs, containerIDsIntf) -} - -func TestBuildGetAccepted(t *testing.T) { - chainID := ids.Empty.Prefix(0) - requestID := uint32(5) - deadline := uint64(15) - containerID := ids.Empty.Prefix(1) - containerIDs := [][]byte{containerID[:]} - - msg, err := UncompressingBuilder.GetAccepted(chainID, requestID, time.Duration(deadline), []ids.ID{containerID}) - require.NoError(t, err) - require.NotNil(t, msg) - require.Equal(t, GetAccepted, msg.Op()) - - parsedMsg, err := TestCodec.Parse(msg.Bytes(), dummyNodeID, dummyOnFinishedHandling) - require.NoError(t, err) - require.NotNil(t, parsedMsg) - require.Equal(t, GetAccepted, parsedMsg.Op()) - - chainIDIntf, err := parsedMsg.Get(ChainID) - require.NoError(t, err) - require.Equal(t, chainID[:], chainIDIntf) - - requestIDIntf, err := parsedMsg.Get(RequestID) - require.NoError(t, err) - require.Equal(t, requestID, requestIDIntf) - - deadlineIntf, err := parsedMsg.Get(Deadline) - require.NoError(t, err) - require.Equal(t, deadline, deadlineIntf) - - containerIDsIntf, err := parsedMsg.Get(ContainerIDs) - require.NoError(t, err) - require.Equal(t, containerIDs, containerIDsIntf) -} - -func TestBuildAccepted(t *testing.T) { - chainID := ids.Empty.Prefix(0) - requestID := uint32(5) - containerID := ids.Empty.Prefix(1) - containerIDs := [][]byte{containerID[:]} - - msg, err := UncompressingBuilder.Accepted(chainID, requestID, []ids.ID{containerID}) - require.NoError(t, err) - require.NotNil(t, msg) - require.Equal(t, Accepted, msg.Op()) - - parsedMsg, err := TestCodec.Parse(msg.Bytes(), dummyNodeID, dummyOnFinishedHandling) - require.NoError(t, err) - require.NotNil(t, parsedMsg) - require.Equal(t, Accepted, parsedMsg.Op()) - - chainIDIntf, err := parsedMsg.Get(ChainID) - require.NoError(t, err) - require.Equal(t, chainID[:], chainIDIntf) - - requestIDIntf, err := parsedMsg.Get(RequestID) - require.NoError(t, err) - require.Equal(t, requestID, requestIDIntf) - - containerIDsIntf, err := parsedMsg.Get(ContainerIDs) - require.NoError(t, err) - require.Equal(t, containerIDs, containerIDsIntf) -} - -func TestBuildGet(t *testing.T) { - chainID := ids.Empty.Prefix(0) - requestID := uint32(5) - deadline := uint64(15) - containerID := ids.Empty.Prefix(1) - - msg, err := UncompressingBuilder.Get(chainID, requestID, time.Duration(deadline), containerID) - require.NoError(t, err) - require.NotNil(t, msg) - require.Equal(t, Get, msg.Op()) - - parsedMsg, err := TestCodec.Parse(msg.Bytes(), dummyNodeID, dummyOnFinishedHandling) - require.NoError(t, err) - require.NotNil(t, parsedMsg) - require.Equal(t, Get, parsedMsg.Op()) - - chainIDIntf, err := parsedMsg.Get(ChainID) - require.NoError(t, err) - require.Equal(t, chainID[:], chainIDIntf) - - requestIDIntf, err := parsedMsg.Get(RequestID) - require.NoError(t, err) - require.Equal(t, requestID, requestIDIntf) - - deadlineIntf, err := parsedMsg.Get(Deadline) - require.NoError(t, err) - require.Equal(t, deadline, deadlineIntf) - - containerIDIntf, err := parsedMsg.Get(ContainerID) - require.NoError(t, err) - require.Equal(t, containerID[:], containerIDIntf) -} - -func TestBuildPut(t *testing.T) { - chainID := ids.Empty.Prefix(0) - requestID := uint32(5) - container := []byte{2} - - for _, compress := range []bool{false, true} { - builder := NewOutboundBuilderWithPacker(TestCodec, compress) - msg, err := builder.Put(chainID, requestID, container) - require.NoError(t, err) - require.NotNil(t, msg) - require.Equal(t, Put, msg.Op()) - - parsedMsg, err := TestCodec.Parse(msg.Bytes(), dummyNodeID, dummyOnFinishedHandling) - require.NoError(t, err) - require.NotNil(t, parsedMsg) - require.Equal(t, Put, parsedMsg.Op()) - - chainIDIntf, err := parsedMsg.Get(ChainID) - require.NoError(t, err) - require.Equal(t, chainID[:], chainIDIntf) - - requestIDIntf, err := parsedMsg.Get(RequestID) - require.NoError(t, err) - require.Equal(t, requestID, requestIDIntf) - - containerIDIntf, err := parsedMsg.Get(ContainerID) - require.NoError(t, err) - require.Equal(t, ids.Empty[:], containerIDIntf) - - containerIntf, err := parsedMsg.Get(ContainerBytes) - require.NoError(t, err) - require.Equal(t, container, containerIntf) - } -} - -func TestBuildPushQuery(t *testing.T) { - chainID := ids.Empty.Prefix(0) - requestID := uint32(5) - deadline := uint64(15) - container := []byte{2} - - for _, compress := range []bool{false, true} { - builder := NewOutboundBuilderWithPacker(TestCodec, compress) - msg, err := builder.PushQuery(chainID, requestID, time.Duration(deadline), container) - require.NoError(t, err) - require.NotNil(t, msg) - require.Equal(t, PushQuery, msg.Op()) - - parsedMsg, err := TestCodec.Parse(msg.Bytes(), dummyNodeID, dummyOnFinishedHandling) - require.NoError(t, err) - require.NotNil(t, parsedMsg) - require.Equal(t, PushQuery, parsedMsg.Op()) - - chainIDIntf, err := parsedMsg.Get(ChainID) - require.NoError(t, err) - require.Equal(t, chainID[:], chainIDIntf) - - requestIDIntf, err := parsedMsg.Get(RequestID) - require.NoError(t, err) - require.Equal(t, requestID, requestIDIntf) - - deadlineIntf, err := parsedMsg.Get(Deadline) - require.NoError(t, err) - require.Equal(t, deadline, deadlineIntf) - - containerIDIntf, err := parsedMsg.Get(ContainerID) - require.NoError(t, err) - require.Equal(t, ids.Empty[:], containerIDIntf) - - containerIntf, err := parsedMsg.Get(ContainerBytes) - require.NoError(t, err) - require.Equal(t, container, containerIntf) - } -} - -func TestBuildPullQuery(t *testing.T) { - chainID := ids.Empty.Prefix(0) - requestID := uint32(5) - deadline := uint64(15) - containerID := ids.Empty.Prefix(1) - - msg, err := UncompressingBuilder.PullQuery(chainID, requestID, time.Duration(deadline), containerID) - require.NoError(t, err) - require.NotNil(t, msg) - require.Equal(t, PullQuery, msg.Op()) - - parsedMsg, err := TestCodec.Parse(msg.Bytes(), dummyNodeID, dummyOnFinishedHandling) - require.NoError(t, err) - require.NotNil(t, parsedMsg) - require.Equal(t, PullQuery, parsedMsg.Op()) - - chainIDIntf, err := parsedMsg.Get(ChainID) - require.NoError(t, err) - require.Equal(t, chainID[:], chainIDIntf) - - requestIDIntf, err := parsedMsg.Get(RequestID) - require.NoError(t, err) - require.Equal(t, requestID, requestIDIntf) - - deadlineIntf, err := parsedMsg.Get(Deadline) - require.NoError(t, err) - require.Equal(t, deadline, deadlineIntf) - - containerIDIntf, err := parsedMsg.Get(ContainerID) - require.NoError(t, err) - require.Equal(t, containerID[:], containerIDIntf) -} - -func TestBuildChits(t *testing.T) { - chainID := ids.Empty.Prefix(0) - requestID := uint32(5) - containerID := ids.Empty.Prefix(1) - containerIDs := [][]byte{containerID[:]} - - msg, err := UncompressingBuilder.Chits(chainID, requestID, []ids.ID{containerID}) - require.NoError(t, err) - require.NotNil(t, msg) - require.Equal(t, Chits, msg.Op()) - - parsedMsg, err := TestCodec.Parse(msg.Bytes(), dummyNodeID, dummyOnFinishedHandling) - require.NoError(t, err) - require.NotNil(t, parsedMsg) - require.Equal(t, Chits, parsedMsg.Op()) - - chainIDIntf, err := parsedMsg.Get(ChainID) - require.NoError(t, err) - require.Equal(t, chainID[:], chainIDIntf) - - requestIDIntf, err := parsedMsg.Get(RequestID) - require.NoError(t, err) - require.Equal(t, requestID, requestIDIntf) - - containerIDsIntf, err := parsedMsg.Get(ContainerIDs) - require.NoError(t, err) - require.Equal(t, containerIDs, containerIDsIntf) -} - -func TestBuildAncestors(t *testing.T) { - chainID := ids.Empty.Prefix(0) - requestID := uint32(5) - container := ids.Empty.Prefix(1) - container2 := ids.Empty.Prefix(2) - containers := [][]byte{container[:], container2[:]} - - for _, compress := range []bool{false, true} { - builder := NewOutboundBuilderWithPacker(TestCodec, compress) - msg, err := builder.Ancestors(chainID, requestID, containers) - require.NoError(t, err) - require.NotNil(t, msg) - require.Equal(t, Ancestors, msg.Op()) - - parsedMsg, err := TestCodec.Parse(msg.Bytes(), dummyNodeID, dummyOnFinishedHandling) - require.NoError(t, err) - require.NotNil(t, parsedMsg) - require.Equal(t, Ancestors, parsedMsg.Op()) - - chainIDIntf, err := parsedMsg.Get(ChainID) - require.NoError(t, err) - require.Equal(t, chainID[:], chainIDIntf) - - requestIDIntf, err := parsedMsg.Get(RequestID) - require.NoError(t, err) - require.Equal(t, requestID, requestIDIntf) - - multiContainerBytesIntf, err := parsedMsg.Get(MultiContainerBytes) - require.NoError(t, err) - require.Equal(t, containers, multiContainerBytesIntf) - } -} - -func TestBuildAppRequestMsg(t *testing.T) { - chainID := ids.GenerateTestID() - appRequestBytes := make([]byte, 1024) - appRequestBytes[0] = 1 - appRequestBytes[len(appRequestBytes)-1] = 1 - deadline := uint64(time.Now().Unix()) - - for _, compress := range []bool{false, true} { - builder := NewOutboundBuilderWithPacker(TestCodec, compress) - msg, err := builder.AppRequest(chainID, 1, time.Duration(deadline), appRequestBytes) - require.NoError(t, err) - require.NotNil(t, msg) - require.Equal(t, AppRequest, msg.Op()) - - parsedMsg, err := TestCodec.Parse(msg.Bytes(), dummyNodeID, dummyOnFinishedHandling) - require.NoError(t, err) - require.NotNil(t, parsedMsg) - require.Equal(t, AppRequest, parsedMsg.Op()) - } -} - -func TestBuildAppResponseMsg(t *testing.T) { - chainID := ids.GenerateTestID() - appResponseBytes := make([]byte, 1024) - appResponseBytes[0] = 1 - appResponseBytes[len(appResponseBytes)-1] = 1 - - for _, compress := range []bool{false, true} { - builder := NewOutboundBuilderWithPacker(TestCodec, compress) - msg, err := builder.AppResponse(chainID, 1, appResponseBytes) - require.NoError(t, err) - require.NotNil(t, msg) - require.Equal(t, AppResponse, msg.Op()) - - parsedMsg, err := TestCodec.Parse(msg.Bytes(), dummyNodeID, dummyOnFinishedHandling) - require.NoError(t, err) - require.NotNil(t, msg) - require.Equal(t, AppResponse, msg.Op()) - - requestIDIntf, err := parsedMsg.Get(RequestID) - require.NoError(t, err) - require.EqualValues(t, 1, requestIDIntf) - - appBytesIntf, err := parsedMsg.Get(AppBytes) - require.NoError(t, err) - require.Equal(t, appResponseBytes, appBytesIntf) - - chainIDIntf, err := parsedMsg.Get(ChainID) - require.NoError(t, err) - require.Equal(t, chainID[:], chainIDIntf) - } -} - -func TestBuildAppGossipMsg(t *testing.T) { - chainID := ids.GenerateTestID() - appGossipBytes := make([]byte, 1024) - appGossipBytes[0] = 1 - appGossipBytes[len(appGossipBytes)-1] = 1 - - for _, compress := range []bool{false, true} { - testBuilder := NewOutboundBuilderWithPacker(TestCodec, compress) - msg, err := testBuilder.AppGossip(chainID, appGossipBytes) - require.NoError(t, err) - require.NotNil(t, msg) - require.Equal(t, AppGossip, msg.Op()) - - parsedMsg, err := TestCodec.Parse(msg.Bytes(), dummyNodeID, dummyOnFinishedHandling) - require.NoError(t, err) - require.NotNil(t, msg) - require.Equal(t, AppGossip, msg.Op()) - - appBytesIntf, err := parsedMsg.Get(AppBytes) - require.NoError(t, err) - require.Equal(t, appGossipBytes, appBytesIntf) - - chainIDIntf, err := parsedMsg.Get(ChainID) - require.NoError(t, err) - require.Equal(t, chainID[:], chainIDIntf) - } -} diff --git a/avalanchego/message/codec.go b/avalanchego/message/codec.go deleted file mode 100644 index 182ad434..00000000 --- a/avalanchego/message/codec.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import ( - "errors" - "fmt" - "math" - "sync" - "time" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/compression" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/metric" - "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/utils/wrappers" -) - -var ( - errMissingField = errors.New("message missing field") - errBadOp = errors.New("input field has invalid operation") - - _ Codec = &codec{} -) - -type Packer interface { - Pack( - op Op, - fieldValues map[Field]interface{}, - compress bool, - bypassThrottling bool, - ) (OutboundMessage, error) -} - -type Parser interface { - SetTime(t time.Time) // useful in UTs - - // Parse reads given bytes as InboundMessage - // Overrides client specified deadline in a message to maxDeadlineDuration - Parse(bytes []byte, nodeID ids.NodeID, onFinishedHandling func()) (InboundMessage, error) -} - -type Codec interface { - Packer - Parser -} - -// codec defines the serialization and deserialization of network messages. -// It's safe for multiple goroutines to call Pack and Parse concurrently. -type codec struct { - // Contains []byte. Used as an optimization. - // Can be accessed by multiple goroutines concurrently. - byteSlicePool sync.Pool - - clock mockable.Clock - - compressTimeMetrics map[Op]metric.Averager - decompressTimeMetrics map[Op]metric.Averager - compressor compression.Compressor - maxMessageTimeout time.Duration -} - -func NewCodecWithMemoryPool(namespace string, metrics prometheus.Registerer, maxMessageSize int64, maxMessageTimeout time.Duration) (Codec, error) { - cpr, err := compression.NewGzipCompressor(maxMessageSize) - if err != nil { - return nil, err - } - c := &codec{ - byteSlicePool: sync.Pool{ - New: func() interface{} { - return make([]byte, 0, constants.DefaultByteSliceCap) - }, - }, - compressTimeMetrics: make(map[Op]metric.Averager, len(ExternalOps)), - decompressTimeMetrics: make(map[Op]metric.Averager, len(ExternalOps)), - compressor: cpr, - maxMessageTimeout: maxMessageTimeout, - } - - errs := wrappers.Errs{} - for _, op := range ExternalOps { - if !op.Compressible() { - continue - } - - c.compressTimeMetrics[op] = metric.NewAveragerWithErrs( - namespace, - fmt.Sprintf("%s_compress_time", op), - fmt.Sprintf("time (in ns) to compress %s messages", op), - metrics, - &errs, - ) - c.decompressTimeMetrics[op] = metric.NewAveragerWithErrs( - namespace, - fmt.Sprintf("%s_decompress_time", op), - fmt.Sprintf("time (in ns) to decompress %s messages", op), - metrics, - &errs, - ) - } - return c, errs.Err -} - -func (c *codec) SetTime(t time.Time) { - c.clock.Set(t) -} - -// Pack attempts to pack a map of fields into a message. -// The first byte of the message is the opcode of the message. -// Uses [buffer] to hold the message's byte repr. -// [buffer]'s contents may be overwritten by this method. -// [buffer] may be nil. -// If [compress], compress the payload. -// If [bypassThrottling], mark the message to avoid outbound throttling checks. -func (c *codec) Pack( - op Op, - fieldValues map[Field]interface{}, - compress bool, - bypassThrottling bool, -) (OutboundMessage, error) { - msgFields, ok := messages[op] - if !ok { - return nil, errBadOp - } - - buffer := c.byteSlicePool.Get().([]byte) - p := wrappers.Packer{ - MaxSize: math.MaxInt32, - Bytes: buffer[:0], - } - // Pack the op code (message type) - p.PackByte(byte(op)) - - // Optionally, pack whether the payload is compressed - if op.Compressible() { - p.PackBool(compress) - } - - // Pack the uncompressed payload - for _, field := range msgFields { - data, ok := fieldValues[field] - if !ok { - return nil, errMissingField - } - field.Packer()(&p, data) - } - if p.Err != nil { - return nil, p.Err - } - msg := &outboundMessageWithPacker{ - outboundMessage: outboundMessage{ - op: op, - bytes: p.Bytes, - bypassThrottling: bypassThrottling, - }, - - refs: 1, - c: c, - } - if !compress { - return msg, nil - } - - // If [compress], compress the payload (not the op code, not isCompressed). - // The slice below is guaranteed to be in-bounds because [p.Err] == nil - // implies that len(msg.bytes) >= 2 - payloadBytes := msg.bytes[wrappers.BoolLen+wrappers.ByteLen:] - startTime := time.Now() - compressedPayloadBytes, err := c.compressor.Compress(payloadBytes) - if err != nil { - return nil, fmt.Errorf("couldn't compress payload of %s message: %w", op, err) - } - c.compressTimeMetrics[op].Observe(float64(time.Since(startTime))) - msg.bytesSavedCompression = len(payloadBytes) - len(compressedPayloadBytes) // may be negative - // Remove the uncompressed payload (keep just the message type and isCompressed) - msg.bytes = msg.bytes[:wrappers.BoolLen+wrappers.ByteLen] - // Attach the compressed payload - msg.bytes = append(msg.bytes, compressedPayloadBytes...) - return msg, nil -} - -// Parse attempts to convert bytes into a message. -// The first byte of the message is the opcode of the message. -// Overrides client specified deadline in a message to maxDeadlineDuration -func (c *codec) Parse(bytes []byte, nodeID ids.NodeID, onFinishedHandling func()) (InboundMessage, error) { - p := wrappers.Packer{Bytes: bytes} - - // Unpack the op code (message type) - op := Op(p.UnpackByte()) - - msgFields, ok := messages[op] - if !ok { // Unknown message type - return nil, errBadOp - } - - // See if messages of this type may be compressed - compressed := false - if op.Compressible() { - compressed = p.UnpackBool() - } - if p.Err != nil { - return nil, p.Err - } - - bytesSaved := 0 - - // If the payload is compressed, decompress it - if compressed { - // The slice below is guaranteed to be in-bounds because [p.Err] == nil - compressedPayloadBytes := p.Bytes[wrappers.ByteLen+wrappers.BoolLen:] - startTime := time.Now() - payloadBytes, err := c.compressor.Decompress(compressedPayloadBytes) - if err != nil { - return nil, fmt.Errorf("couldn't decompress payload of %s message: %w", op, err) - } - c.decompressTimeMetrics[op].Observe(float64(time.Since(startTime))) - // Replace the compressed payload with the decompressed payload. - // Remove the compressed payload and isCompressed; keep just the message type - p.Bytes = p.Bytes[:wrappers.ByteLen] - // Rewind offset by 1 because we removed the bool flag - // since the data now is uncompressed - p.Offset -= wrappers.BoolLen - // Attach the decompressed payload. - p.Bytes = append(p.Bytes, payloadBytes...) - bytesSaved = len(payloadBytes) - len(compressedPayloadBytes) - } - - // Parse each field of the payload - fieldValues := make(map[Field]interface{}, len(msgFields)) - for _, field := range msgFields { - fieldValues[field] = field.Unpacker()(&p) - } - if p.Err != nil { - return nil, p.Err - } - - if p.Offset != len(p.Bytes) { - return nil, fmt.Errorf("expected length %d but got %d", p.Offset, len(p.Bytes)) - } - - var expirationTime time.Time - if deadline, hasDeadline := fieldValues[Deadline]; hasDeadline { - deadlineDuration := time.Duration(deadline.(uint64)) - if deadlineDuration > c.maxMessageTimeout { - deadlineDuration = c.maxMessageTimeout - } - expirationTime = c.clock.Time().Add(deadlineDuration) - } - - return &inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: op, - bytesSavedCompression: bytesSaved, - nodeID: nodeID, - expirationTime: expirationTime, - onFinishedHandling: onFinishedHandling, - }, - fields: fieldValues, - }, nil -} diff --git a/avalanchego/message/codec_test.go b/avalanchego/message/codec_test.go deleted file mode 100644 index 190d7184..00000000 --- a/avalanchego/message/codec_test.go +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import ( - "math" - "net" - "testing" - "time" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/staking" - "github.com/ava-labs/avalanchego/utils/ips" - "github.com/ava-labs/avalanchego/utils/units" -) - -func TestCodecPackInvalidOp(t *testing.T) { - codec, err := NewCodecWithMemoryPool("", prometheus.NewRegistry(), 2*units.MiB, 10*time.Second) - require.NoError(t, err) - - _, err = codec.Pack(math.MaxUint8, make(map[Field]interface{}), false, false) - require.Error(t, err) - - _, err = codec.Pack(math.MaxUint8, make(map[Field]interface{}), true, false) - require.Error(t, err) -} - -func TestCodecPackMissingField(t *testing.T) { - codec, err := NewCodecWithMemoryPool("", prometheus.NewRegistry(), 2*units.MiB, 10*time.Second) - require.NoError(t, err) - - _, err = codec.Pack(Get, make(map[Field]interface{}), false, false) - require.Error(t, err) - - _, err = codec.Pack(Get, make(map[Field]interface{}), true, false) - require.Error(t, err) -} - -func TestCodecParseInvalidOp(t *testing.T) { - codec, err := NewCodecWithMemoryPool("", prometheus.NewRegistry(), 2*units.MiB, 10*time.Second) - require.NoError(t, err) - - _, err = codec.Parse([]byte{math.MaxUint8}, dummyNodeID, dummyOnFinishedHandling) - require.Error(t, err) -} - -func TestCodecParseExtraSpace(t *testing.T) { - codec, err := NewCodecWithMemoryPool("", prometheus.NewRegistry(), 2*units.MiB, 10*time.Second) - require.NoError(t, err) - - _, err = codec.Parse([]byte{byte(Ping), 0x00, 0x00}, dummyNodeID, dummyOnFinishedHandling) - require.Error(t, err) - - _, err = codec.Parse([]byte{byte(Ping), 0x00, 0x01}, dummyNodeID, dummyOnFinishedHandling) - require.Error(t, err) -} - -func TestDeadlineOverride(t *testing.T) { - c, err := NewCodecWithMemoryPool("", prometheus.NewRegistry(), 2*units.MiB, 10*time.Second) - require.NoError(t, err) - - id := ids.GenerateTestID() - m := inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: PushQuery, - }, - fields: map[Field]interface{}{ - ChainID: id[:], - RequestID: uint32(1337), - Deadline: uint64(time.Now().Add(1337 * time.Hour).Unix()), - ContainerID: id[:], - ContainerBytes: make([]byte, 1024), - }, - } - - packedIntf, err := c.Pack(m.op, m.fields, m.op.Compressible(), false) - require.NoError(t, err, "failed to pack on operation %s", m.op) - - unpackedIntf, err := c.Parse(packedIntf.Bytes(), dummyNodeID, dummyOnFinishedHandling) - require.NoError(t, err, "failed to parse w/ compression on operation %s", m.op) - - unpacked := unpackedIntf.(*inboundMessageWithPacker) - require.NotEqual(t, unpacked.ExpirationTime(), time.Now().Add(1337*time.Hour)) - require.True(t, time.Since(unpacked.ExpirationTime()) <= 10*time.Second) -} - -// Test packing and then parsing messages -// when using a gzip compressor -func TestCodecPackParseGzip(t *testing.T) { - c, err := NewCodecWithMemoryPool("", prometheus.DefaultRegisterer, 2*units.MiB, 10*time.Second) - require.NoError(t, err) - id := ids.GenerateTestID() - - tlsCert, err := staking.NewTLSCert() - require.NoError(t, err) - cert := tlsCert.Leaf - - msgs := []inboundMessageWithPacker{ - { - inboundMessage: inboundMessage{ - op: Version, - }, - fields: map[Field]interface{}{ - NetworkID: uint32(0), - NodeID: uint32(1337), - MyTime: uint64(time.Now().Unix()), - IP: ips.IPPort{IP: net.IPv4(1, 2, 3, 4)}, - VersionStr: "v1.2.3", - VersionTime: uint64(time.Now().Unix()), - SigBytes: []byte{'y', 'e', 'e', 't'}, - TrackedSubnets: [][]byte{id[:]}, - }, - }, - { - inboundMessage: inboundMessage{ - op: PeerList, - }, - fields: map[Field]interface{}{ - Peers: []ips.ClaimedIPPort{ - { - Cert: cert, - IPPort: ips.IPPort{IP: net.IPv4(1, 2, 3, 4)}, - Timestamp: uint64(time.Now().Unix()), - Signature: make([]byte, 65), - }, - }, - }, - }, - { - inboundMessage: inboundMessage{ - op: Ping, - }, - fields: map[Field]interface{}{}, - }, - { - inboundMessage: inboundMessage{ - op: Pong, - }, - fields: map[Field]interface{}{ - Uptime: uint8(80), - }, - }, - { - inboundMessage: inboundMessage{ - op: GetAcceptedFrontier, - }, - fields: map[Field]interface{}{ - ChainID: id[:], - RequestID: uint32(1337), - Deadline: uint64(time.Now().Unix()), - }, - }, - { - inboundMessage: inboundMessage{ - op: AcceptedFrontier, - }, - fields: map[Field]interface{}{ - ChainID: id[:], - RequestID: uint32(1337), - ContainerIDs: [][]byte{id[:]}, - }, - }, - { - inboundMessage: inboundMessage{ - op: GetAccepted, - }, - fields: map[Field]interface{}{ - ChainID: id[:], - RequestID: uint32(1337), - Deadline: uint64(time.Now().Unix()), - ContainerIDs: [][]byte{id[:]}, - }, - }, - { - inboundMessage: inboundMessage{ - op: Accepted, - }, - fields: map[Field]interface{}{ - ChainID: id[:], - RequestID: uint32(1337), - ContainerIDs: [][]byte{id[:]}, - }, - }, - { - inboundMessage: inboundMessage{ - op: Ancestors, - }, - fields: map[Field]interface{}{ - ChainID: id[:], - RequestID: uint32(1337), - MultiContainerBytes: [][]byte{id[:]}, - }, - }, - { - inboundMessage: inboundMessage{ - op: Get, - }, - fields: map[Field]interface{}{ - ChainID: id[:], - RequestID: uint32(1337), - Deadline: uint64(time.Now().Unix()), - ContainerID: id[:], - }, - }, - { - inboundMessage: inboundMessage{ - op: Put, - }, - fields: map[Field]interface{}{ - ChainID: id[:], - RequestID: uint32(1337), - ContainerID: id[:], - ContainerBytes: make([]byte, 1024), - }, - }, - { - inboundMessage: inboundMessage{ - op: PushQuery, - }, - fields: map[Field]interface{}{ - ChainID: id[:], - RequestID: uint32(1337), - Deadline: uint64(time.Now().Unix()), - ContainerID: id[:], - ContainerBytes: make([]byte, 1024), - }, - }, - { - inboundMessage: inboundMessage{ - op: PullQuery, - }, - fields: map[Field]interface{}{ - ChainID: id[:], - RequestID: uint32(1337), - Deadline: uint64(time.Now().Unix()), - ContainerID: id[:], - }, - }, - { - inboundMessage: inboundMessage{ - op: Chits, - }, - fields: map[Field]interface{}{ - ChainID: id[:], - RequestID: uint32(1337), - ContainerIDs: [][]byte{id[:]}, - }, - }, - } - for _, m := range msgs { - packedIntf, err := c.Pack(m.op, m.fields, m.op.Compressible(), false) - require.NoError(t, err, "failed to pack on operation %s", m.op) - - unpackedIntf, err := c.Parse(packedIntf.Bytes(), dummyNodeID, dummyOnFinishedHandling) - require.NoError(t, err, "failed to parse w/ compression on operation %s", m.op) - - unpacked := unpackedIntf.(*inboundMessageWithPacker) - - require.EqualValues(t, len(m.fields), len(unpacked.fields)) - } -} diff --git a/avalanchego/message/creator.go b/avalanchego/message/creator.go index 059c5ec9..f1a6def2 100644 --- a/avalanchego/message/creator.go +++ b/avalanchego/message/creator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -9,46 +9,42 @@ import ( "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/compression" + "github.com/ava-labs/avalanchego/utils/logging" ) -var _ Creator = &creator{} +var _ Creator = (*creator)(nil) type Creator interface { OutboundMsgBuilder InboundMsgBuilder - InternalMsgBuilder } type creator struct { OutboundMsgBuilder InboundMsgBuilder - InternalMsgBuilder } -func NewCreator(metrics prometheus.Registerer, parentNamespace string, compressionEnabled bool, maxInboundMessageTimeout time.Duration) (Creator, error) { +func NewCreator( + log logging.Logger, + metrics prometheus.Registerer, + parentNamespace string, + compressionType compression.Type, + maxMessageTimeout time.Duration, +) (Creator, error) { namespace := fmt.Sprintf("%s_codec", parentNamespace) - codec, err := NewCodecWithMemoryPool(namespace, metrics, int64(constants.DefaultMaxMessageSize), maxInboundMessageTimeout) + builder, err := newMsgBuilder( + log, + namespace, + metrics, + maxMessageTimeout, + ) if err != nil { return nil, err } - return &creator{ - OutboundMsgBuilder: NewOutboundBuilderWithPacker(codec, compressionEnabled), - InboundMsgBuilder: NewInboundBuilderWithPacker(codec), - InternalMsgBuilder: NewInternalBuilder(), - }, nil -} -func NewCreatorWithProto(metrics prometheus.Registerer, parentNamespace string, compressionEnabled bool, maxInboundMessageTimeout time.Duration) (Creator, error) { - // different namespace, not to be in conflict with packer - namespace := fmt.Sprintf("%s_proto_codec", parentNamespace) - builder, err := newMsgBuilderProtobuf(namespace, metrics, int64(constants.DefaultMaxMessageSize), maxInboundMessageTimeout) - if err != nil { - return nil, err - } return &creator{ - OutboundMsgBuilder: newOutboundBuilderWithProto(compressionEnabled, builder), - InboundMsgBuilder: newInboundBuilderWithProto(builder), - InternalMsgBuilder: NewInternalBuilder(), + OutboundMsgBuilder: newOutboundBuilder(compressionType, builder), + InboundMsgBuilder: newInboundBuilder(builder), }, nil } diff --git a/avalanchego/message/fields.go b/avalanchego/message/fields.go index d1b8d248..87bffe51 100644 --- a/avalanchego/message/fields.go +++ b/avalanchego/message/fields.go @@ -1,151 +1,142 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message import ( - "github.com/ava-labs/avalanchego/utils/wrappers" + "errors" + "time" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/utils/constants" ) -// Field that may be packed into a message -type Field uint32 - -// Fields that may be packed. These values are not sent over the wire. -const ( - VersionStr Field = iota // Used in handshake - NetworkID // Used in handshake - NodeID // TODO: remove NodeID. Used in handshake - MyTime // Used in handshake - IP // Used in handshake - ChainID // Used for dispatching - RequestID // Used for all messages - Deadline // Used for request messages - ContainerID // Used for querying - ContainerBytes // Used for gossiping - ContainerIDs // Used for querying - MultiContainerBytes // Used in Ancestors - SigBytes // Used in handshake / peer gossiping - VersionTime // Used in handshake / peer gossiping - Peers // Used in peer gossiping - TrackedSubnets // Used in handshake / peer gossiping - AppBytes // Used at application level - VMMessage // Used internally - Uptime // Used for Pong - SummaryBytes // Used for state sync - SummaryHeights // Used for state sync - SummaryIDs // Used for state sync - VersionStruct // Used internally +var ( + errMissingField = errors.New("message missing field") + + _ chainIDGetter = (*p2p.GetStateSummaryFrontier)(nil) + _ chainIDGetter = (*p2p.StateSummaryFrontier)(nil) + _ chainIDGetter = (*p2p.GetAcceptedStateSummary)(nil) + _ chainIDGetter = (*p2p.AcceptedStateSummary)(nil) + _ chainIDGetter = (*p2p.GetAcceptedFrontier)(nil) + _ chainIDGetter = (*p2p.AcceptedFrontier)(nil) + _ chainIDGetter = (*p2p.GetAccepted)(nil) + _ chainIDGetter = (*p2p.Accepted)(nil) + _ chainIDGetter = (*p2p.GetAncestors)(nil) + _ chainIDGetter = (*p2p.Ancestors)(nil) + _ chainIDGetter = (*p2p.Get)(nil) + _ chainIDGetter = (*p2p.Put)(nil) + _ chainIDGetter = (*p2p.PushQuery)(nil) + _ chainIDGetter = (*p2p.PullQuery)(nil) + _ chainIDGetter = (*p2p.Chits)(nil) + _ chainIDGetter = (*p2p.AppRequest)(nil) + _ chainIDGetter = (*p2p.AppResponse)(nil) + _ chainIDGetter = (*p2p.AppGossip)(nil) + + _ requestIDGetter = (*p2p.GetStateSummaryFrontier)(nil) + _ requestIDGetter = (*p2p.StateSummaryFrontier)(nil) + _ requestIDGetter = (*p2p.GetAcceptedStateSummary)(nil) + _ requestIDGetter = (*p2p.AcceptedStateSummary)(nil) + _ requestIDGetter = (*p2p.GetAcceptedFrontier)(nil) + _ requestIDGetter = (*p2p.AcceptedFrontier)(nil) + _ requestIDGetter = (*p2p.GetAccepted)(nil) + _ requestIDGetter = (*p2p.Accepted)(nil) + _ requestIDGetter = (*p2p.GetAncestors)(nil) + _ requestIDGetter = (*p2p.Ancestors)(nil) + _ requestIDGetter = (*p2p.Get)(nil) + _ requestIDGetter = (*p2p.Put)(nil) + _ requestIDGetter = (*p2p.PushQuery)(nil) + _ requestIDGetter = (*p2p.PullQuery)(nil) + _ requestIDGetter = (*p2p.Chits)(nil) + _ requestIDGetter = (*p2p.AppRequest)(nil) + _ requestIDGetter = (*p2p.AppResponse)(nil) + + _ engineTypeGetter = (*p2p.GetAcceptedFrontier)(nil) + _ engineTypeGetter = (*p2p.GetAccepted)(nil) + _ engineTypeGetter = (*p2p.GetAncestors)(nil) + _ engineTypeGetter = (*p2p.Get)(nil) + _ engineTypeGetter = (*p2p.Put)(nil) + _ engineTypeGetter = (*p2p.PushQuery)(nil) + _ engineTypeGetter = (*p2p.PullQuery)(nil) + + _ deadlineGetter = (*p2p.GetStateSummaryFrontier)(nil) + _ deadlineGetter = (*p2p.GetAcceptedStateSummary)(nil) + _ deadlineGetter = (*p2p.GetAcceptedFrontier)(nil) + _ deadlineGetter = (*p2p.GetAccepted)(nil) + _ deadlineGetter = (*p2p.GetAncestors)(nil) + _ deadlineGetter = (*p2p.Get)(nil) + _ deadlineGetter = (*p2p.PushQuery)(nil) + _ deadlineGetter = (*p2p.PullQuery)(nil) + _ deadlineGetter = (*p2p.AppRequest)(nil) ) -// Packer returns the packer function that can be used to pack this field. -func (f Field) Packer() func(*wrappers.Packer, interface{}) { - switch f { - case VersionStr: - return wrappers.TryPackStr - case NetworkID, NodeID, RequestID: - return wrappers.TryPackInt - case MyTime, Deadline, VersionTime: - return wrappers.TryPackLong - case IP: - return wrappers.TryPackIP - case ChainID, ContainerID: // TODO: This will be shortened to use a modified varint spec - return wrappers.TryPackHash - case ContainerBytes, AppBytes, SigBytes, SummaryBytes: - return wrappers.TryPackBytes - case ContainerIDs, TrackedSubnets, SummaryIDs: - return wrappers.TryPackHashes - case MultiContainerBytes: - return wrappers.TryPack2DBytes - case Peers: - return wrappers.TryPackClaimedIPPortList - case Uptime: - return wrappers.TryPackByte - case SummaryHeights: - return wrappers.TryPackUint64Slice - default: - return nil +type chainIDGetter interface { + GetChainId() []byte +} + +func GetChainID(m any) (ids.ID, error) { + msg, ok := m.(chainIDGetter) + if !ok { + return ids.Empty, errMissingField + } + chainIDBytes := msg.GetChainId() + return ids.ToID(chainIDBytes) +} + +type sourceChainIDGetter interface { + GetSourceChainID() ids.ID +} + +func GetSourceChainID(m any) (ids.ID, error) { + msg, ok := m.(sourceChainIDGetter) + if !ok { + return GetChainID(m) } + return msg.GetSourceChainID(), nil } -// Unpacker returns the unpacker function that can be used to unpack this field. -func (f Field) Unpacker() func(*wrappers.Packer) interface{} { - switch f { - case VersionStr: - return wrappers.TryUnpackStr - case NetworkID, NodeID, RequestID: - return wrappers.TryUnpackInt - case MyTime, Deadline, VersionTime: - return wrappers.TryUnpackLong - case IP: - return wrappers.TryUnpackIP - case ChainID, ContainerID: // TODO: This will be shortened to use a modified varint spec - return wrappers.TryUnpackHash - case ContainerBytes, AppBytes, SigBytes, SummaryBytes: - return wrappers.TryUnpackBytes - case ContainerIDs, TrackedSubnets, SummaryIDs: - return wrappers.TryUnpackHashes - case MultiContainerBytes: - return wrappers.TryUnpack2DBytes - case Peers: - return wrappers.TryUnpackClaimedIPPortList - case Uptime: - return wrappers.TryUnpackByte - case SummaryHeights: - return wrappers.TryUnpackUint64Slice - default: - return nil +type requestIDGetter interface { + GetRequestId() uint32 +} + +func GetRequestID(m any) (uint32, bool) { + if msg, ok := m.(requestIDGetter); ok { + requestID := msg.GetRequestId() + return requestID, true + } + + // AppGossip is the only message currently not containing a requestID + // Here we assign the requestID already in use for gossiped containers + // to allow a uniform handling of all messages + if _, ok := m.(*p2p.AppGossip); ok { + return constants.GossipMsgRequestID, true + } + + return 0, false +} + +type engineTypeGetter interface { + GetEngineType() p2p.EngineType +} + +func GetEngineType(m any) (p2p.EngineType, bool) { + msg, ok := m.(engineTypeGetter) + if !ok { + return p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, false } + return msg.GetEngineType(), true +} + +type deadlineGetter interface { + GetDeadline() uint64 } -func (f Field) String() string { - switch f { - case VersionStr: - return "VersionStr" - case NetworkID: - return "NetworkID" - case NodeID: - return "NodeID" - case MyTime: - return "MyTime" - case IP: - return "IP" - case ChainID: - return "ChainID" - case RequestID: - return "RequestID" - case Deadline: - return "Deadline" - case ContainerID: - return "ContainerID" - case ContainerBytes: - return "Container Bytes" - case ContainerIDs: - return "Container IDs" - case MultiContainerBytes: - return "MultiContainerBytes" - case AppBytes: - return "AppBytes" - case SigBytes: - return "SigBytes" - case VersionTime: - return "VersionTime" - case Peers: - return "Peers" - case TrackedSubnets: - return "TrackedSubnets" - case VMMessage: - return "VMMessage" - case Uptime: - return "Uptime" - case SummaryBytes: - return "Summary" - case SummaryHeights: - return "SummaryHeights" - case SummaryIDs: - return "SummaryIDs" - case VersionStruct: - return "VersionStruct" - default: - return "Unknown Field" +func GetDeadline(m any) (time.Duration, bool) { + msg, ok := m.(deadlineGetter) + if !ok { + return 0, false } + deadline := msg.GetDeadline() + return time.Duration(deadline), true } diff --git a/avalanchego/message/inbound_msg_builder.go b/avalanchego/message/inbound_msg_builder.go index 9dcd99e5..1cc1edcd 100644 --- a/avalanchego/message/inbound_msg_builder.go +++ b/avalanchego/message/inbound_msg_builder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -7,213 +7,92 @@ import ( "time" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/utils/timer/mockable" ) -var _ InboundMsgBuilder = &inMsgBuilderWithPacker{} +var _ InboundMsgBuilder = (*inMsgBuilder)(nil) type InboundMsgBuilder interface { - Parser - - InboundGetStateSummaryFrontier( - chainID ids.ID, - requestID uint32, - deadline time.Duration, - nodeID ids.NodeID, - ) InboundMessage - - InboundStateSummaryFrontier( - chainID ids.ID, - requestID uint32, - summary []byte, - nodeID ids.NodeID, - ) InboundMessage - - InboundGetAcceptedStateSummary( - chainID ids.ID, - requestID uint32, - heights []uint64, - deadline time.Duration, - nodeID ids.NodeID, - ) InboundMessage - - InboundAcceptedStateSummary( - chainID ids.ID, - requestID uint32, - summaryIDs []ids.ID, - nodeID ids.NodeID, - ) InboundMessage - - InboundGetAcceptedFrontier( - chainID ids.ID, - requestID uint32, - deadline time.Duration, - nodeID ids.NodeID, - ) InboundMessage - - InboundAcceptedFrontier( - chainID ids.ID, - requestID uint32, - containerIDs []ids.ID, - nodeID ids.NodeID, - ) InboundMessage - - InboundGetAccepted( - chainID ids.ID, - requestID uint32, - deadline time.Duration, - containerIDs []ids.ID, - nodeID ids.NodeID, - ) InboundMessage - - InboundAccepted( - chainID ids.ID, - requestID uint32, - containerIDs []ids.ID, - nodeID ids.NodeID, - ) InboundMessage - - InboundAncestors( - chainID ids.ID, - requestID uint32, - containers [][]byte, - nodeID ids.NodeID, - ) InboundMessage // used in UTs only - - InboundPushQuery( - chainID ids.ID, - requestID uint32, - deadline time.Duration, - container []byte, - nodeID ids.NodeID, - ) InboundMessage - - InboundPullQuery( - chainID ids.ID, - requestID uint32, - deadline time.Duration, - containerID ids.ID, + // Parse reads given bytes as InboundMessage + Parse( + bytes []byte, nodeID ids.NodeID, - ) InboundMessage - - InboundChits( - chainID ids.ID, - requestID uint32, - containerIDs []ids.ID, - nodeID ids.NodeID, - ) InboundMessage - - InboundAppRequest( - chainID ids.ID, - requestID uint32, - deadline time.Duration, - msg []byte, - nodeID ids.NodeID, - ) InboundMessage - - InboundAppResponse( - chainID ids.ID, - requestID uint32, - msg []byte, - nodeID ids.NodeID, - ) InboundMessage - - InboundGet( - chainID ids.ID, - requestID uint32, - deadline time.Duration, - containerID ids.ID, - nodeID ids.NodeID, - ) InboundMessage - - InboundPut( - chainID ids.ID, - requestID uint32, - container []byte, - nodeID ids.NodeID, - ) InboundMessage // used in UTs only + onFinishedHandling func(), + ) (InboundMessage, error) } -type inMsgBuilderWithPacker struct { - Codec - clock mockable.Clock +type inMsgBuilder struct { + builder *msgBuilder } -func NewInboundBuilderWithPacker(c Codec) InboundMsgBuilder { - return &inMsgBuilderWithPacker{ - Codec: c, +func newInboundBuilder(builder *msgBuilder) InboundMsgBuilder { + return &inMsgBuilder{ + builder: builder, } } -func (b *inMsgBuilderWithPacker) SetTime(t time.Time) { - b.clock.Set(t) - b.Codec.SetTime(t) +func (b *inMsgBuilder) Parse(bytes []byte, nodeID ids.NodeID, onFinishedHandling func()) (InboundMessage, error) { + return b.builder.parseInbound(bytes, nodeID, onFinishedHandling) } -func (b *inMsgBuilderWithPacker) InboundGetStateSummaryFrontier( +func InboundGetStateSummaryFrontier( chainID ids.ID, requestID uint32, deadline time.Duration, nodeID ids.NodeID, ) InboundMessage { - received := b.clock.Time() - return &inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: GetStateSummaryFrontier, - nodeID: nodeID, - expirationTime: received.Add(deadline), - }, - fields: map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, + return &inboundMessage{ + nodeID: nodeID, + op: GetStateSummaryFrontierOp, + message: &p2p.GetStateSummaryFrontier{ + ChainId: chainID[:], + RequestId: requestID, Deadline: uint64(deadline), }, + expiration: time.Now().Add(deadline), } } -func (b *inMsgBuilderWithPacker) InboundStateSummaryFrontier( +func InboundStateSummaryFrontier( chainID ids.ID, requestID uint32, summary []byte, nodeID ids.NodeID, ) InboundMessage { - return &inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: StateSummaryFrontier, - nodeID: nodeID, - }, - fields: map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - SummaryBytes: summary, - }, + return &inboundMessage{ + nodeID: nodeID, + op: StateSummaryFrontierOp, + message: &p2p.StateSummaryFrontier{ + ChainId: chainID[:], + RequestId: requestID, + Summary: summary, + }, + expiration: mockable.MaxTime, } } -func (b *inMsgBuilderWithPacker) InboundGetAcceptedStateSummary( +func InboundGetAcceptedStateSummary( chainID ids.ID, requestID uint32, heights []uint64, deadline time.Duration, nodeID ids.NodeID, ) InboundMessage { - received := b.clock.Time() - return &inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: GetAcceptedStateSummary, - nodeID: nodeID, - expirationTime: received.Add(deadline), - }, - fields: map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - Deadline: uint64(deadline), - SummaryHeights: heights, + return &inboundMessage{ + nodeID: nodeID, + op: GetAcceptedStateSummaryOp, + message: &p2p.GetAcceptedStateSummary{ + ChainId: chainID[:], + RequestId: requestID, + Deadline: uint64(deadline), + Heights: heights, }, + expiration: time.Now().Add(deadline), } } -func (b *inMsgBuilderWithPacker) InboundAcceptedStateSummary( +func InboundAcceptedStateSummary( chainID ids.ID, requestID uint32, summaryIDs []ids.ID, @@ -221,41 +100,39 @@ func (b *inMsgBuilderWithPacker) InboundAcceptedStateSummary( ) InboundMessage { summaryIDBytes := make([][]byte, len(summaryIDs)) encodeIDs(summaryIDs, summaryIDBytes) - return &inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: AcceptedStateSummary, - nodeID: nodeID, - }, - fields: map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - SummaryIDs: summaryIDBytes, - }, + return &inboundMessage{ + nodeID: nodeID, + op: AcceptedStateSummaryOp, + message: &p2p.AcceptedStateSummary{ + ChainId: chainID[:], + RequestId: requestID, + SummaryIds: summaryIDBytes, + }, + expiration: mockable.MaxTime, } } -func (b *inMsgBuilderWithPacker) InboundGetAcceptedFrontier( +func InboundGetAcceptedFrontier( chainID ids.ID, requestID uint32, deadline time.Duration, nodeID ids.NodeID, + engineType p2p.EngineType, ) InboundMessage { - received := b.clock.Time() - return &inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: GetAcceptedFrontier, - nodeID: nodeID, - expirationTime: received.Add(deadline), - }, - fields: map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - Deadline: uint64(deadline), - }, + return &inboundMessage{ + nodeID: nodeID, + op: GetAcceptedFrontierOp, + message: &p2p.GetAcceptedFrontier{ + ChainId: chainID[:], + RequestId: requestID, + Deadline: uint64(deadline), + EngineType: engineType, + }, + expiration: time.Now().Add(deadline), } } -func (b *inMsgBuilderWithPacker) InboundAcceptedFrontier( +func InboundAcceptedFrontier( chainID ids.ID, requestID uint32, containerIDs []ids.ID, @@ -263,45 +140,43 @@ func (b *inMsgBuilderWithPacker) InboundAcceptedFrontier( ) InboundMessage { containerIDBytes := make([][]byte, len(containerIDs)) encodeIDs(containerIDs, containerIDBytes) - return &inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: AcceptedFrontier, - nodeID: nodeID, - }, - fields: map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - ContainerIDs: containerIDBytes, - }, + return &inboundMessage{ + nodeID: nodeID, + op: AcceptedFrontierOp, + message: &p2p.AcceptedFrontier{ + ChainId: chainID[:], + RequestId: requestID, + ContainerIds: containerIDBytes, + }, + expiration: mockable.MaxTime, } } -func (b *inMsgBuilderWithPacker) InboundGetAccepted( +func InboundGetAccepted( chainID ids.ID, requestID uint32, deadline time.Duration, containerIDs []ids.ID, nodeID ids.NodeID, + engineType p2p.EngineType, ) InboundMessage { - received := b.clock.Time() containerIDBytes := make([][]byte, len(containerIDs)) encodeIDs(containerIDs, containerIDBytes) - return &inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: GetAccepted, - nodeID: nodeID, - expirationTime: received.Add(deadline), - }, - fields: map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, + return &inboundMessage{ + nodeID: nodeID, + op: GetAcceptedOp, + message: &p2p.GetAccepted{ + ChainId: chainID[:], + RequestId: requestID, Deadline: uint64(deadline), - ContainerIDs: containerIDBytes, + ContainerIds: containerIDBytes, + EngineType: engineType, }, + expiration: time.Now().Add(deadline), } } -func (b *inMsgBuilderWithPacker) InboundAccepted( +func InboundAccepted( chainID ids.ID, requestID uint32, containerIDs []ids.ID, @@ -309,186 +184,121 @@ func (b *inMsgBuilderWithPacker) InboundAccepted( ) InboundMessage { containerIDBytes := make([][]byte, len(containerIDs)) encodeIDs(containerIDs, containerIDBytes) - return &inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: Accepted, - nodeID: nodeID, - }, - fields: map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - ContainerIDs: containerIDBytes, - }, + return &inboundMessage{ + nodeID: nodeID, + op: AcceptedOp, + message: &p2p.Accepted{ + ChainId: chainID[:], + RequestId: requestID, + ContainerIds: containerIDBytes, + }, + expiration: mockable.MaxTime, } } -func (b *inMsgBuilderWithPacker) InboundPushQuery( +func InboundPushQuery( chainID ids.ID, requestID uint32, deadline time.Duration, container []byte, nodeID ids.NodeID, + engineType p2p.EngineType, ) InboundMessage { - received := b.clock.Time() - return &inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: PushQuery, - nodeID: nodeID, - expirationTime: received.Add(deadline), - }, - fields: map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - Deadline: uint64(deadline), - ContainerBytes: container, - }, + return &inboundMessage{ + nodeID: nodeID, + op: PushQueryOp, + message: &p2p.PushQuery{ + ChainId: chainID[:], + RequestId: requestID, + Deadline: uint64(deadline), + Container: container, + EngineType: engineType, + }, + expiration: time.Now().Add(deadline), } } -func (b *inMsgBuilderWithPacker) InboundPullQuery( +func InboundPullQuery( chainID ids.ID, requestID uint32, deadline time.Duration, containerID ids.ID, nodeID ids.NodeID, + engineType p2p.EngineType, ) InboundMessage { - received := b.clock.Time() - return &inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: PullQuery, - nodeID: nodeID, - expirationTime: received.Add(deadline), - }, - fields: map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, + return &inboundMessage{ + nodeID: nodeID, + op: PullQueryOp, + message: &p2p.PullQuery{ + ChainId: chainID[:], + RequestId: requestID, Deadline: uint64(deadline), - ContainerID: containerID[:], + ContainerId: containerID[:], + EngineType: engineType, }, + expiration: time.Now().Add(deadline), } } -func (b *inMsgBuilderWithPacker) InboundChits( +func InboundChits( chainID ids.ID, requestID uint32, - containerIDs []ids.ID, + preferredContainerIDs []ids.ID, + acceptedContainerIDs []ids.ID, nodeID ids.NodeID, ) InboundMessage { - containerIDBytes := make([][]byte, len(containerIDs)) - encodeIDs(containerIDs, containerIDBytes) - return &inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: Chits, - nodeID: nodeID, - }, - fields: map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - ContainerIDs: containerIDBytes, - }, + preferredContainerIDBytes := make([][]byte, len(preferredContainerIDs)) + encodeIDs(preferredContainerIDs, preferredContainerIDBytes) + acceptedContainerIDBytes := make([][]byte, len(acceptedContainerIDs)) + encodeIDs(acceptedContainerIDs, acceptedContainerIDBytes) + return &inboundMessage{ + nodeID: nodeID, + op: ChitsOp, + message: &p2p.Chits{ + ChainId: chainID[:], + RequestId: requestID, + PreferredContainerIds: preferredContainerIDBytes, + AcceptedContainerIds: acceptedContainerIDBytes, + }, + expiration: mockable.MaxTime, } } -func (b *inMsgBuilderWithPacker) InboundAppRequest( +func InboundAppRequest( chainID ids.ID, requestID uint32, deadline time.Duration, msg []byte, nodeID ids.NodeID, ) InboundMessage { - received := b.clock.Time() - return &inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: AppRequest, - nodeID: nodeID, - expirationTime: received.Add(deadline), - }, - fields: map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, + return &inboundMessage{ + nodeID: nodeID, + op: AppRequestOp, + message: &p2p.AppRequest{ + ChainId: chainID[:], + RequestId: requestID, Deadline: uint64(deadline), AppBytes: msg, }, + expiration: time.Now().Add(deadline), } } -func (b *inMsgBuilderWithPacker) InboundAppResponse( +func InboundAppResponse( chainID ids.ID, requestID uint32, msg []byte, nodeID ids.NodeID, ) InboundMessage { - return &inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: AppResponse, - nodeID: nodeID, - }, - fields: map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, + return &inboundMessage{ + nodeID: nodeID, + op: AppResponseOp, + message: &p2p.AppResponse{ + ChainId: chainID[:], + RequestId: requestID, AppBytes: msg, }, - } -} - -func (b *inMsgBuilderWithPacker) InboundGet( - chainID ids.ID, - requestID uint32, - deadline time.Duration, - containerID ids.ID, - nodeID ids.NodeID, -) InboundMessage { // used in UTs only - received := b.clock.Time() - return &inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: Put, - nodeID: nodeID, - expirationTime: received.Add(deadline), - }, - fields: map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - Deadline: uint64(deadline), - ContainerID: containerID[:], - }, - } -} - -func (b *inMsgBuilderWithPacker) InboundPut( - chainID ids.ID, - requestID uint32, - container []byte, - nodeID ids.NodeID, -) InboundMessage { // used in UTs only - return &inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: Put, - nodeID: nodeID, - }, - fields: map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - ContainerBytes: container, - }, - } -} - -func (b *inMsgBuilderWithPacker) InboundAncestors( - chainID ids.ID, - requestID uint32, - containers [][]byte, - nodeID ids.NodeID, -) InboundMessage { // used in UTs only - return &inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: Ancestors, - nodeID: nodeID, - }, - fields: map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - MultiContainerBytes: containers, - }, + expiration: mockable.MaxTime, } } diff --git a/avalanchego/message/inbound_msg_builder_proto.go b/avalanchego/message/inbound_msg_builder_proto.go deleted file mode 100644 index c3681755..00000000 --- a/avalanchego/message/inbound_msg_builder_proto.go +++ /dev/null @@ -1,439 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import ( - "time" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/proto/pb/p2p" -) - -var _ InboundMsgBuilder = &inMsgBuilderWithProto{} - -type inMsgBuilderWithProto struct { - protoBuilder *msgBuilderProtobuf -} - -// Use "message.NewCreatorWithProto" to import this function -// since we do not expose "msgBuilderProtobuf" yet -func newInboundBuilderWithProto(protoBuilder *msgBuilderProtobuf) InboundMsgBuilder { - return &inMsgBuilderWithProto{ - protoBuilder: protoBuilder, - } -} - -func (b *inMsgBuilderWithProto) SetTime(t time.Time) { - b.protoBuilder.clock.Set(t) -} - -func (b *inMsgBuilderWithProto) Parse(bytes []byte, nodeID ids.NodeID, onFinishedHandling func()) (InboundMessage, error) { - return b.protoBuilder.parseInbound(bytes, nodeID, onFinishedHandling) -} - -func (b *inMsgBuilderWithProto) InboundGetStateSummaryFrontier( - chainID ids.ID, - requestID uint32, - deadline time.Duration, - nodeID ids.NodeID, -) InboundMessage { - received := b.protoBuilder.clock.Time() - return &inboundMessageWithProto{ - inboundMessage: inboundMessage{ - op: GetStateSummaryFrontier, - nodeID: nodeID, - expirationTime: received.Add(deadline), - }, - msg: &p2p.Message{ - Message: &p2p.Message_GetStateSummaryFrontier{ - GetStateSummaryFrontier: &p2p.GetStateSummaryFrontier{ - ChainId: chainID[:], - RequestId: requestID, - Deadline: uint64(deadline), - }, - }, - }, - } -} - -func (b *inMsgBuilderWithProto) InboundStateSummaryFrontier( - chainID ids.ID, - requestID uint32, - summary []byte, - nodeID ids.NodeID, -) InboundMessage { - return &inboundMessageWithProto{ - inboundMessage: inboundMessage{ - op: StateSummaryFrontier, - nodeID: nodeID, - }, - msg: &p2p.Message{ - Message: &p2p.Message_StateSummaryFrontier_{ - StateSummaryFrontier_: &p2p.StateSummaryFrontier{ - ChainId: chainID[:], - RequestId: requestID, - Summary: summary, - }, - }, - }, - } -} - -func (b *inMsgBuilderWithProto) InboundGetAcceptedStateSummary( - chainID ids.ID, - requestID uint32, - heights []uint64, - deadline time.Duration, - nodeID ids.NodeID, -) InboundMessage { - received := b.protoBuilder.clock.Time() - return &inboundMessageWithProto{ - inboundMessage: inboundMessage{ - op: GetAcceptedStateSummary, - nodeID: nodeID, - expirationTime: received.Add(deadline), - }, - msg: &p2p.Message{ - Message: &p2p.Message_GetAcceptedStateSummary{ - GetAcceptedStateSummary: &p2p.GetAcceptedStateSummary{ - ChainId: chainID[:], - RequestId: requestID, - Deadline: uint64(deadline), - Heights: heights, - }, - }, - }, - } -} - -func (b *inMsgBuilderWithProto) InboundAcceptedStateSummary( - chainID ids.ID, - requestID uint32, - summaryIDs []ids.ID, - nodeID ids.NodeID, -) InboundMessage { - summaryIDBytes := make([][]byte, len(summaryIDs)) - encodeIDs(summaryIDs, summaryIDBytes) - return &inboundMessageWithProto{ - inboundMessage: inboundMessage{ - op: AcceptedStateSummary, - nodeID: nodeID, - }, - msg: &p2p.Message{ - Message: &p2p.Message_AcceptedStateSummary_{ - AcceptedStateSummary_: &p2p.AcceptedStateSummary{ - ChainId: chainID[:], - RequestId: requestID, - SummaryIds: summaryIDBytes, - }, - }, - }, - } -} - -func (b *inMsgBuilderWithProto) InboundGetAcceptedFrontier( - chainID ids.ID, - requestID uint32, - deadline time.Duration, - nodeID ids.NodeID, -) InboundMessage { - received := b.protoBuilder.clock.Time() - return &inboundMessageWithProto{ - inboundMessage: inboundMessage{ - op: GetAcceptedFrontier, - nodeID: nodeID, - expirationTime: received.Add(deadline), - }, - msg: &p2p.Message{ - Message: &p2p.Message_GetAcceptedFrontier{ - GetAcceptedFrontier: &p2p.GetAcceptedFrontier{ - ChainId: chainID[:], - RequestId: requestID, - Deadline: uint64(deadline), - }, - }, - }, - } -} - -func (b *inMsgBuilderWithProto) InboundAcceptedFrontier( - chainID ids.ID, - requestID uint32, - containerIDs []ids.ID, - nodeID ids.NodeID, -) InboundMessage { - containerIDBytes := make([][]byte, len(containerIDs)) - encodeIDs(containerIDs, containerIDBytes) - return &inboundMessageWithProto{ - inboundMessage: inboundMessage{ - op: AcceptedFrontier, - nodeID: nodeID, - }, - msg: &p2p.Message{ - Message: &p2p.Message_AcceptedFrontier_{ - AcceptedFrontier_: &p2p.AcceptedFrontier{ - ChainId: chainID[:], - RequestId: requestID, - ContainerIds: containerIDBytes, - }, - }, - }, - } -} - -func (b *inMsgBuilderWithProto) InboundGetAccepted( - chainID ids.ID, - requestID uint32, - deadline time.Duration, - containerIDs []ids.ID, - nodeID ids.NodeID, -) InboundMessage { - received := b.protoBuilder.clock.Time() - containerIDBytes := make([][]byte, len(containerIDs)) - encodeIDs(containerIDs, containerIDBytes) - return &inboundMessageWithProto{ - inboundMessage: inboundMessage{ - op: GetAccepted, - nodeID: nodeID, - expirationTime: received.Add(deadline), - }, - msg: &p2p.Message{ - Message: &p2p.Message_GetAccepted{ - GetAccepted: &p2p.GetAccepted{ - ChainId: chainID[:], - RequestId: requestID, - Deadline: uint64(deadline), - ContainerIds: containerIDBytes, - }, - }, - }, - } -} - -func (b *inMsgBuilderWithProto) InboundAccepted( - chainID ids.ID, - requestID uint32, - containerIDs []ids.ID, - nodeID ids.NodeID, -) InboundMessage { - containerIDBytes := make([][]byte, len(containerIDs)) - encodeIDs(containerIDs, containerIDBytes) - return &inboundMessageWithProto{ - inboundMessage: inboundMessage{ - op: Accepted, - nodeID: nodeID, - }, - msg: &p2p.Message{ - Message: &p2p.Message_Accepted_{ - Accepted_: &p2p.Accepted{ - ChainId: chainID[:], - RequestId: requestID, - ContainerIds: containerIDBytes, - }, - }, - }, - } -} - -func (b *inMsgBuilderWithProto) InboundPushQuery( - chainID ids.ID, - requestID uint32, - deadline time.Duration, - container []byte, - nodeID ids.NodeID, -) InboundMessage { - received := b.protoBuilder.clock.Time() - return &inboundMessageWithProto{ - inboundMessage: inboundMessage{ - op: PushQuery, - nodeID: nodeID, - expirationTime: received.Add(deadline), - }, - msg: &p2p.Message{ - Message: &p2p.Message_PushQuery{ - PushQuery: &p2p.PushQuery{ - ChainId: chainID[:], - RequestId: requestID, - Deadline: uint64(deadline), - Container: container, - }, - }, - }, - } -} - -func (b *inMsgBuilderWithProto) InboundPullQuery( - chainID ids.ID, - requestID uint32, - deadline time.Duration, - containerID ids.ID, - nodeID ids.NodeID, -) InboundMessage { - received := b.protoBuilder.clock.Time() - return &inboundMessageWithProto{ - inboundMessage: inboundMessage{ - op: PullQuery, - nodeID: nodeID, - expirationTime: received.Add(deadline), - }, - msg: &p2p.Message{ - Message: &p2p.Message_PullQuery{ - PullQuery: &p2p.PullQuery{ - ChainId: chainID[:], - RequestId: requestID, - Deadline: uint64(deadline), - ContainerId: containerID[:], - }, - }, - }, - } -} - -func (b *inMsgBuilderWithProto) InboundChits( - chainID ids.ID, - requestID uint32, - containerIDs []ids.ID, - nodeID ids.NodeID, -) InboundMessage { - containerIDBytes := make([][]byte, len(containerIDs)) - encodeIDs(containerIDs, containerIDBytes) - return &inboundMessageWithProto{ - inboundMessage: inboundMessage{ - op: Chits, - nodeID: nodeID, - }, - msg: &p2p.Message{ - Message: &p2p.Message_Chits{ - Chits: &p2p.Chits{ - ChainId: chainID[:], - RequestId: requestID, - ContainerIds: containerIDBytes, - }, - }, - }, - } -} - -func (b *inMsgBuilderWithProto) InboundAppRequest( - chainID ids.ID, - requestID uint32, - deadline time.Duration, - msg []byte, - nodeID ids.NodeID, -) InboundMessage { - received := b.protoBuilder.clock.Time() - return &inboundMessageWithProto{ - inboundMessage: inboundMessage{ - op: AppRequest, - nodeID: nodeID, - expirationTime: received.Add(deadline), - }, - msg: &p2p.Message{ - Message: &p2p.Message_AppRequest{ - AppRequest: &p2p.AppRequest{ - ChainId: chainID[:], - RequestId: requestID, - Deadline: uint64(deadline), - AppBytes: msg, - }, - }, - }, - } -} - -func (b *inMsgBuilderWithProto) InboundAppResponse( - chainID ids.ID, - requestID uint32, - msg []byte, - nodeID ids.NodeID, -) InboundMessage { - return &inboundMessageWithProto{ - inboundMessage: inboundMessage{ - op: AppResponse, - nodeID: nodeID, - }, - msg: &p2p.Message{ - Message: &p2p.Message_AppResponse{ - AppResponse: &p2p.AppResponse{ - ChainId: chainID[:], - RequestId: requestID, - AppBytes: msg, - }, - }, - }, - } -} - -func (b *inMsgBuilderWithProto) InboundGet( - chainID ids.ID, - requestID uint32, - deadline time.Duration, - containerID ids.ID, - nodeID ids.NodeID, -) InboundMessage { // used in UTs only - received := b.protoBuilder.clock.Time() - return &inboundMessageWithProto{ - inboundMessage: inboundMessage{ - op: Get, - nodeID: nodeID, - expirationTime: received.Add(deadline), - }, - msg: &p2p.Message{ - Message: &p2p.Message_Get{ - Get: &p2p.Get{ - ChainId: chainID[:], - RequestId: requestID, - Deadline: uint64(deadline), - ContainerId: containerID[:], - }, - }, - }, - } -} - -func (b *inMsgBuilderWithProto) InboundPut( - chainID ids.ID, - requestID uint32, - container []byte, - nodeID ids.NodeID, -) InboundMessage { // used in UTs only - return &inboundMessageWithProto{ - inboundMessage: inboundMessage{ - op: Put, - nodeID: nodeID, - }, - msg: &p2p.Message{ - Message: &p2p.Message_Put{ - Put: &p2p.Put{ - ChainId: chainID[:], - RequestId: requestID, - Container: container, - }, - }, - }, - } -} - -func (b *inMsgBuilderWithProto) InboundAncestors( - chainID ids.ID, - requestID uint32, - containers [][]byte, - nodeID ids.NodeID, -) InboundMessage { // used in UTs only - return &inboundMessageWithProto{ - inboundMessage: inboundMessage{ - op: Ancestors, - nodeID: nodeID, - }, - msg: &p2p.Message{ - Message: &p2p.Message_Ancestors_{ - Ancestors_: &p2p.Ancestors{ - ChainId: chainID[:], - RequestId: requestID, - Containers: containers, - }, - }, - }, - } -} diff --git a/avalanchego/message/inbound_msg_builder_proto_test.go b/avalanchego/message/inbound_msg_builder_proto_test.go deleted file mode 100644 index a29bf2c2..00000000 --- a/avalanchego/message/inbound_msg_builder_proto_test.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import ( - "testing" - "time" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/constants" -) - -func Test_newInboundBuilderWithProto(t *testing.T) { - t.Parallel() - require := require.New(t) - - mb, err := newMsgBuilderProtobuf("test", prometheus.NewRegistry(), int64(constants.DefaultMaxMessageSize), 5*time.Second) - require.NoError(err) - - builder := newInboundBuilderWithProto(mb) - - inMsg := builder.InboundAccepted( - ids.GenerateTestID(), - uint32(12345), - []ids.ID{ids.GenerateTestID()}, - ids.GenerateTestNodeID(), - ) - - t.Logf("outbound message built %q", inMsg.Op().String()) -} diff --git a/avalanchego/message/inbound_msg_builder_test.go b/avalanchego/message/inbound_msg_builder_test.go new file mode 100644 index 00000000..667a205d --- /dev/null +++ b/avalanchego/message/inbound_msg_builder_test.go @@ -0,0 +1,406 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/timer/mockable" +) + +func Test_newMsgBuilder(t *testing.T) { + t.Parallel() + require := require.New(t) + + mb, err := newMsgBuilder( + logging.NoLog{}, + "test", + prometheus.NewRegistry(), + 10*time.Second, + ) + require.NoError(err) + require.NotNil(mb) +} + +func TestInboundMsgBuilder(t *testing.T) { + var ( + chainID = ids.GenerateTestID() + requestID uint32 = 12345 + deadline = time.Hour + nodeID = ids.GenerateTestNodeID() + summary = []byte{9, 8, 7} + appBytes = []byte{1, 3, 3, 7} + container = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9} + containerIDs = []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} + acceptedContainerIDs = []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} + summaryIDs = []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} + heights = []uint64{1000, 2000} + engineType = p2p.EngineType_ENGINE_TYPE_SNOWMAN + ) + + t.Run( + "InboundGetStateSummaryFrontier", + func(t *testing.T) { + require := require.New(t) + + start := time.Now() + msg := InboundGetStateSummaryFrontier( + chainID, + requestID, + deadline, + nodeID, + ) + end := time.Now() + + require.Equal(GetStateSummaryFrontierOp, msg.Op()) + require.Equal(nodeID, msg.NodeID()) + require.False(msg.Expiration().Before(start.Add(deadline))) + require.False(end.Add(deadline).Before(msg.Expiration())) + innerMsg, ok := msg.Message().(*p2p.GetStateSummaryFrontier) + require.True(ok) + require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(requestID, innerMsg.RequestId) + }, + ) + + t.Run( + "InboundStateSummaryFrontier", + func(t *testing.T) { + require := require.New(t) + + msg := InboundStateSummaryFrontier( + chainID, + requestID, + summary, + nodeID, + ) + + require.Equal(StateSummaryFrontierOp, msg.Op()) + require.Equal(nodeID, msg.NodeID()) + require.Equal(mockable.MaxTime, msg.Expiration()) + innerMsg, ok := msg.Message().(*p2p.StateSummaryFrontier) + require.True(ok) + require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(requestID, innerMsg.RequestId) + require.Equal(summary, innerMsg.Summary) + }, + ) + + t.Run( + "InboundGetAcceptedStateSummary", + func(t *testing.T) { + require := require.New(t) + + start := time.Now() + msg := InboundGetAcceptedStateSummary( + chainID, + requestID, + heights, + deadline, + nodeID, + ) + end := time.Now() + + require.Equal(GetAcceptedStateSummaryOp, msg.Op()) + require.Equal(nodeID, msg.NodeID()) + require.False(msg.Expiration().Before(start.Add(deadline))) + require.False(end.Add(deadline).Before(msg.Expiration())) + innerMsg, ok := msg.Message().(*p2p.GetAcceptedStateSummary) + require.True(ok) + require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(requestID, innerMsg.RequestId) + require.Equal(heights, innerMsg.Heights) + }, + ) + + t.Run( + "InboundAcceptedStateSummary", + func(t *testing.T) { + require := require.New(t) + + msg := InboundAcceptedStateSummary( + chainID, + requestID, + summaryIDs, + nodeID, + ) + + require.Equal(AcceptedStateSummaryOp, msg.Op()) + require.Equal(nodeID, msg.NodeID()) + require.Equal(mockable.MaxTime, msg.Expiration()) + innerMsg, ok := msg.Message().(*p2p.AcceptedStateSummary) + require.True(ok) + require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(requestID, innerMsg.RequestId) + summaryIDsBytes := make([][]byte, len(summaryIDs)) + for i, id := range summaryIDs { + id := id + summaryIDsBytes[i] = id[:] + } + require.Equal(summaryIDsBytes, innerMsg.SummaryIds) + }, + ) + + t.Run( + "InboundGetAcceptedFrontier", + func(t *testing.T) { + require := require.New(t) + + start := time.Now() + msg := InboundGetAcceptedFrontier( + chainID, + requestID, + deadline, + nodeID, + engineType, + ) + end := time.Now() + + require.Equal(GetAcceptedFrontierOp, msg.Op()) + require.Equal(nodeID, msg.NodeID()) + require.False(msg.Expiration().Before(start.Add(deadline))) + require.False(end.Add(deadline).Before(msg.Expiration())) + innerMsg, ok := msg.Message().(*p2p.GetAcceptedFrontier) + require.True(ok) + require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(requestID, innerMsg.RequestId) + require.Equal(engineType, innerMsg.EngineType) + }, + ) + + t.Run( + "InboundAcceptedFrontier", + func(t *testing.T) { + require := require.New(t) + + msg := InboundAcceptedFrontier( + chainID, + requestID, + containerIDs, + nodeID, + ) + + require.Equal(AcceptedFrontierOp, msg.Op()) + require.Equal(nodeID, msg.NodeID()) + require.Equal(mockable.MaxTime, msg.Expiration()) + innerMsg, ok := msg.Message().(*p2p.AcceptedFrontier) + require.True(ok) + require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(requestID, innerMsg.RequestId) + containerIDsBytes := make([][]byte, len(containerIDs)) + for i, id := range containerIDs { + id := id + containerIDsBytes[i] = id[:] + } + require.Equal(containerIDsBytes, innerMsg.ContainerIds) + }, + ) + + t.Run( + "InboundGetAccepted", + func(t *testing.T) { + require := require.New(t) + + start := time.Now() + msg := InboundGetAccepted( + chainID, + requestID, + deadline, + containerIDs, + nodeID, + engineType, + ) + end := time.Now() + + require.Equal(GetAcceptedOp, msg.Op()) + require.Equal(nodeID, msg.NodeID()) + require.False(msg.Expiration().Before(start.Add(deadline))) + require.False(end.Add(deadline).Before(msg.Expiration())) + innerMsg, ok := msg.Message().(*p2p.GetAccepted) + require.True(ok) + require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(requestID, innerMsg.RequestId) + require.Equal(engineType, innerMsg.EngineType) + }, + ) + + t.Run( + "InboundAccepted", + func(t *testing.T) { + require := require.New(t) + + msg := InboundAccepted( + chainID, + requestID, + containerIDs, + nodeID, + ) + + require.Equal(AcceptedOp, msg.Op()) + require.Equal(nodeID, msg.NodeID()) + require.Equal(mockable.MaxTime, msg.Expiration()) + innerMsg, ok := msg.Message().(*p2p.Accepted) + require.True(ok) + require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(requestID, innerMsg.RequestId) + containerIDsBytes := make([][]byte, len(containerIDs)) + for i, id := range containerIDs { + id := id + containerIDsBytes[i] = id[:] + } + require.Equal(containerIDsBytes, innerMsg.ContainerIds) + }, + ) + + t.Run( + "InboundPushQuery", + func(t *testing.T) { + require := require.New(t) + + start := time.Now() + msg := InboundPushQuery( + chainID, + requestID, + deadline, + container, + nodeID, + engineType, + ) + end := time.Now() + + require.Equal(PushQueryOp, msg.Op()) + require.Equal(nodeID, msg.NodeID()) + require.False(msg.Expiration().Before(start.Add(deadline))) + require.False(end.Add(deadline).Before(msg.Expiration())) + innerMsg, ok := msg.Message().(*p2p.PushQuery) + require.True(ok) + require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(requestID, innerMsg.RequestId) + require.Equal(container, innerMsg.Container) + require.Equal(engineType, innerMsg.EngineType) + }, + ) + + t.Run( + "InboundPullQuery", + func(t *testing.T) { + require := require.New(t) + + start := time.Now() + msg := InboundPullQuery( + chainID, + requestID, + deadline, + containerIDs[0], + nodeID, + engineType, + ) + end := time.Now() + + require.Equal(PullQueryOp, msg.Op()) + require.Equal(nodeID, msg.NodeID()) + require.False(msg.Expiration().Before(start.Add(deadline))) + require.False(end.Add(deadline).Before(msg.Expiration())) + innerMsg, ok := msg.Message().(*p2p.PullQuery) + require.True(ok) + require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(requestID, innerMsg.RequestId) + require.Equal(containerIDs[0][:], innerMsg.ContainerId) + require.Equal(engineType, innerMsg.EngineType) + }, + ) + + t.Run( + "InboundChits", + func(t *testing.T) { + require := require.New(t) + + msg := InboundChits( + chainID, + requestID, + containerIDs, + acceptedContainerIDs, + nodeID, + ) + + require.Equal(ChitsOp, msg.Op()) + require.Equal(nodeID, msg.NodeID()) + require.Equal(mockable.MaxTime, msg.Expiration()) + innerMsg, ok := msg.Message().(*p2p.Chits) + require.True(ok) + require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(requestID, innerMsg.RequestId) + containerIDsBytes := make([][]byte, len(containerIDs)) + for i, id := range containerIDs { + id := id + containerIDsBytes[i] = id[:] + } + require.Equal(containerIDsBytes, innerMsg.PreferredContainerIds) + acceptedContainerIDsBytes := make([][]byte, len(acceptedContainerIDs)) + for i, id := range acceptedContainerIDs { + id := id + acceptedContainerIDsBytes[i] = id[:] + } + require.Equal(acceptedContainerIDsBytes, innerMsg.AcceptedContainerIds) + }, + ) + + t.Run( + "InboundAppRequest", + func(t *testing.T) { + require := require.New(t) + + start := time.Now() + msg := InboundAppRequest( + chainID, + requestID, + deadline, + appBytes, + nodeID, + ) + end := time.Now() + + require.Equal(AppRequestOp, msg.Op()) + require.Equal(nodeID, msg.NodeID()) + require.False(msg.Expiration().Before(start.Add(deadline))) + require.False(end.Add(deadline).Before(msg.Expiration())) + innerMsg, ok := msg.Message().(*p2p.AppRequest) + require.True(ok) + require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(requestID, innerMsg.RequestId) + require.Equal(appBytes, innerMsg.AppBytes) + }, + ) + + t.Run( + "InboundAppResponse", + func(t *testing.T) { + require := require.New(t) + + msg := InboundAppResponse( + chainID, + requestID, + appBytes, + nodeID, + ) + + require.Equal(AppResponseOp, msg.Op()) + require.Equal(nodeID, msg.NodeID()) + require.Equal(mockable.MaxTime, msg.Expiration()) + innerMsg, ok := msg.Message().(*p2p.AppResponse) + require.True(ok) + require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(requestID, innerMsg.RequestId) + require.Equal(appBytes, innerMsg.AppBytes) + }, + ) +} diff --git a/avalanchego/message/internal_msg_builder.go b/avalanchego/message/internal_msg_builder.go index 3f988eed..9f27d256 100644 --- a/avalanchego/message/internal_msg_builder.go +++ b/avalanchego/message/internal_msg_builder.go @@ -1,106 +1,530 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. +//nolint:stylecheck // proto generates interfaces that fail linting package message import ( + "time" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/version" ) -var _ InternalMsgBuilder = internalMsgBuilder{} +var ( + disconnected = &Disconnected{} + timeout = &Timeout{} + gossipRequest = &GossipRequest{} + + _ chainIDGetter = (*GetStateSummaryFrontierFailed)(nil) + _ requestIDGetter = (*GetStateSummaryFrontierFailed)(nil) + + _ chainIDGetter = (*GetAcceptedStateSummaryFailed)(nil) + _ requestIDGetter = (*GetAcceptedStateSummaryFailed)(nil) + + _ chainIDGetter = (*GetAcceptedFrontierFailed)(nil) + _ requestIDGetter = (*GetAcceptedFrontierFailed)(nil) + _ engineTypeGetter = (*GetAcceptedFrontierFailed)(nil) + + _ chainIDGetter = (*GetAcceptedFailed)(nil) + _ requestIDGetter = (*GetAcceptedFailed)(nil) + _ engineTypeGetter = (*GetAcceptedFailed)(nil) + + _ chainIDGetter = (*GetAncestorsFailed)(nil) + _ requestIDGetter = (*GetAncestorsFailed)(nil) + _ engineTypeGetter = (*GetAncestorsFailed)(nil) -type InternalMsgBuilder interface { - InternalFailedRequest( - op Op, - nodeID ids.NodeID, - chainID ids.ID, - requestID uint32, - ) InboundMessage + _ chainIDGetter = (*GetFailed)(nil) + _ requestIDGetter = (*GetFailed)(nil) + _ engineTypeGetter = (*GetFailed)(nil) + + _ chainIDGetter = (*QueryFailed)(nil) + _ requestIDGetter = (*QueryFailed)(nil) + _ engineTypeGetter = (*QueryFailed)(nil) + + _ chainIDGetter = (*AppRequestFailed)(nil) + _ requestIDGetter = (*AppRequestFailed)(nil) + + _ sourceChainIDGetter = (*CrossChainAppRequest)(nil) + _ chainIDGetter = (*CrossChainAppRequest)(nil) + _ requestIDGetter = (*CrossChainAppRequest)(nil) + + _ sourceChainIDGetter = (*CrossChainAppRequestFailed)(nil) + _ chainIDGetter = (*CrossChainAppRequestFailed)(nil) + _ requestIDGetter = (*CrossChainAppRequestFailed)(nil) + + _ sourceChainIDGetter = (*CrossChainAppResponse)(nil) + _ chainIDGetter = (*CrossChainAppResponse)(nil) + _ requestIDGetter = (*CrossChainAppResponse)(nil) +) - InternalTimeout(nodeID ids.NodeID) InboundMessage - InternalConnected(nodeID ids.NodeID, nodeVersion *version.Application) InboundMessage - InternalDisconnected(nodeID ids.NodeID) InboundMessage - InternalVMMessage(nodeID ids.NodeID, notification uint32) InboundMessage - InternalGossipRequest(nodeID ids.NodeID) InboundMessage +type GetStateSummaryFrontierFailed struct { + ChainID ids.ID `json:"chain_id,omitempty"` + RequestID uint32 `json:"request_id,omitempty"` } -type internalMsgBuilder struct{} +func (m *GetStateSummaryFrontierFailed) GetChainId() []byte { + return m.ChainID[:] +} -func NewInternalBuilder() InternalMsgBuilder { - return internalMsgBuilder{} +func (m *GetStateSummaryFrontierFailed) GetRequestId() uint32 { + return m.RequestID } -func (internalMsgBuilder) InternalFailedRequest( - op Op, +func InternalGetStateSummaryFrontierFailed( nodeID ids.NodeID, chainID ids.ID, requestID uint32, ) InboundMessage { - return &inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: op, - nodeID: nodeID, + return &inboundMessage{ + nodeID: nodeID, + op: GetStateSummaryFrontierFailedOp, + message: &GetStateSummaryFrontierFailed{ + ChainID: chainID, + RequestID: requestID, }, - fields: map[Field]interface{}{ - ChainID: chainID[:], + expiration: mockable.MaxTime, + } +} + +type GetAcceptedStateSummaryFailed struct { + ChainID ids.ID `json:"chain_id,omitempty"` + RequestID uint32 `json:"request_id,omitempty"` +} + +func (m *GetAcceptedStateSummaryFailed) GetChainId() []byte { + return m.ChainID[:] +} + +func (m *GetAcceptedStateSummaryFailed) GetRequestId() uint32 { + return m.RequestID +} + +func InternalGetAcceptedStateSummaryFailed( + nodeID ids.NodeID, + chainID ids.ID, + requestID uint32, +) InboundMessage { + return &inboundMessage{ + nodeID: nodeID, + op: GetAcceptedStateSummaryFailedOp, + message: &GetAcceptedStateSummaryFailed{ + ChainID: chainID, RequestID: requestID, }, + expiration: mockable.MaxTime, } } -func (internalMsgBuilder) InternalTimeout(nodeID ids.NodeID) InboundMessage { - return &inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: Timeout, - nodeID: nodeID, +type GetAcceptedFrontierFailed struct { + ChainID ids.ID `json:"chain_id,omitempty"` + RequestID uint32 `json:"request_id,omitempty"` + EngineType p2p.EngineType `json:"engine_type,omitempty"` +} + +func (m *GetAcceptedFrontierFailed) GetChainId() []byte { + return m.ChainID[:] +} + +func (m *GetAcceptedFrontierFailed) GetRequestId() uint32 { + return m.RequestID +} + +func (m *GetAcceptedFrontierFailed) GetEngineType() p2p.EngineType { + return m.EngineType +} + +func InternalGetAcceptedFrontierFailed( + nodeID ids.NodeID, + chainID ids.ID, + requestID uint32, + engineType p2p.EngineType, +) InboundMessage { + return &inboundMessage{ + nodeID: nodeID, + op: GetAcceptedFrontierFailedOp, + message: &GetAcceptedFrontierFailed{ + ChainID: chainID, + RequestID: requestID, + EngineType: engineType, }, + expiration: mockable.MaxTime, } } -func (internalMsgBuilder) InternalConnected(nodeID ids.NodeID, nodeVersion *version.Application) InboundMessage { - return &inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: Connected, - nodeID: nodeID, +type GetAcceptedFailed struct { + ChainID ids.ID `json:"chain_id,omitempty"` + RequestID uint32 `json:"request_id,omitempty"` + EngineType p2p.EngineType `json:"engine_type,omitempty"` +} + +func (m *GetAcceptedFailed) GetChainId() []byte { + return m.ChainID[:] +} + +func (m *GetAcceptedFailed) GetRequestId() uint32 { + return m.RequestID +} + +func (m *GetAcceptedFailed) GetEngineType() p2p.EngineType { + return m.EngineType +} + +func InternalGetAcceptedFailed( + nodeID ids.NodeID, + chainID ids.ID, + requestID uint32, + engineType p2p.EngineType, +) InboundMessage { + return &inboundMessage{ + nodeID: nodeID, + op: GetAcceptedFailedOp, + message: &GetAcceptedFailed{ + ChainID: chainID, + RequestID: requestID, + EngineType: engineType, }, - fields: map[Field]interface{}{ - VersionStruct: nodeVersion, + expiration: mockable.MaxTime, + } +} + +type GetAncestorsFailed struct { + ChainID ids.ID `json:"chain_id,omitempty"` + RequestID uint32 `json:"request_id,omitempty"` + EngineType p2p.EngineType `json:"engine_type,omitempty"` +} + +func (m *GetAncestorsFailed) GetChainId() []byte { + return m.ChainID[:] +} + +func (m *GetAncestorsFailed) GetRequestId() uint32 { + return m.RequestID +} + +func (m *GetAncestorsFailed) GetEngineType() p2p.EngineType { + return m.EngineType +} + +func InternalGetAncestorsFailed( + nodeID ids.NodeID, + chainID ids.ID, + requestID uint32, + engineType p2p.EngineType, +) InboundMessage { + return &inboundMessage{ + nodeID: nodeID, + op: GetAncestorsFailedOp, + message: &GetAncestorsFailed{ + ChainID: chainID, + RequestID: requestID, + EngineType: engineType, }, + expiration: mockable.MaxTime, } } -func (internalMsgBuilder) InternalDisconnected(nodeID ids.NodeID) InboundMessage { - return &inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: Disconnected, - nodeID: nodeID, +type GetFailed struct { + ChainID ids.ID `json:"chain_id,omitempty"` + RequestID uint32 `json:"request_id,omitempty"` + EngineType p2p.EngineType `json:"engine_type,omitempty"` +} + +func (m *GetFailed) GetChainId() []byte { + return m.ChainID[:] +} + +func (m *GetFailed) GetRequestId() uint32 { + return m.RequestID +} + +func (m *GetFailed) GetEngineType() p2p.EngineType { + return m.EngineType +} + +func InternalGetFailed( + nodeID ids.NodeID, + chainID ids.ID, + requestID uint32, + engineType p2p.EngineType, +) InboundMessage { + return &inboundMessage{ + nodeID: nodeID, + op: GetFailedOp, + message: &GetFailed{ + ChainID: chainID, + RequestID: requestID, + EngineType: engineType, }, + expiration: mockable.MaxTime, } } -func (internalMsgBuilder) InternalVMMessage( +type QueryFailed struct { + ChainID ids.ID `json:"chain_id,omitempty"` + RequestID uint32 `json:"request_id,omitempty"` + EngineType p2p.EngineType `json:"engine_type,omitempty"` +} + +func (m *QueryFailed) GetChainId() []byte { + return m.ChainID[:] +} + +func (m *QueryFailed) GetRequestId() uint32 { + return m.RequestID +} + +func (m *QueryFailed) GetEngineType() p2p.EngineType { + return m.EngineType +} + +func InternalQueryFailed( nodeID ids.NodeID, - notification uint32, + chainID ids.ID, + requestID uint32, + engineType p2p.EngineType, ) InboundMessage { - return &inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: Notify, - nodeID: nodeID, + return &inboundMessage{ + nodeID: nodeID, + op: QueryFailedOp, + message: &QueryFailed{ + ChainID: chainID, + RequestID: requestID, + EngineType: engineType, }, - fields: map[Field]interface{}{ - VMMessage: notification, + expiration: mockable.MaxTime, + } +} + +type AppRequestFailed struct { + ChainID ids.ID `json:"chain_id,omitempty"` + RequestID uint32 `json:"request_id,omitempty"` +} + +func (m *AppRequestFailed) GetChainId() []byte { + return m.ChainID[:] +} + +func (m *AppRequestFailed) GetRequestId() uint32 { + return m.RequestID +} + +func InternalAppRequestFailed( + nodeID ids.NodeID, + chainID ids.ID, + requestID uint32, +) InboundMessage { + return &inboundMessage{ + nodeID: nodeID, + op: AppRequestFailedOp, + message: &AppRequestFailed{ + ChainID: chainID, + RequestID: requestID, + }, + expiration: mockable.MaxTime, + } +} + +type CrossChainAppRequest struct { + SourceChainID ids.ID `json:"source_chain_id,omitempty"` + DestinationChainID ids.ID `json:"destination_chain_id,omitempty"` + RequestID uint32 `json:"request_id,omitempty"` + Message []byte `json:"message,omitempty"` +} + +func (m *CrossChainAppRequest) GetSourceChainID() ids.ID { + return m.SourceChainID +} + +func (m *CrossChainAppRequest) GetChainId() []byte { + return m.DestinationChainID[:] +} + +func (m *CrossChainAppRequest) GetRequestId() uint32 { + return m.RequestID +} + +func InternalCrossChainAppRequest( + nodeID ids.NodeID, + sourceChainID ids.ID, + destinationChainID ids.ID, + requestID uint32, + deadline time.Duration, + msg []byte, +) InboundMessage { + return &inboundMessage{ + nodeID: nodeID, + op: CrossChainAppRequestOp, + message: &CrossChainAppRequest{ + SourceChainID: sourceChainID, + DestinationChainID: destinationChainID, + RequestID: requestID, + Message: msg, + }, + expiration: time.Now().Add(deadline), + } +} + +type CrossChainAppRequestFailed struct { + SourceChainID ids.ID `json:"source_chain_id,omitempty"` + DestinationChainID ids.ID `json:"destination_chain_id,omitempty"` + RequestID uint32 `json:"request_id,omitempty"` +} + +func (m *CrossChainAppRequestFailed) GetSourceChainID() ids.ID { + return m.SourceChainID +} + +func (m *CrossChainAppRequestFailed) GetChainId() []byte { + return m.DestinationChainID[:] +} + +func (m *CrossChainAppRequestFailed) GetRequestId() uint32 { + return m.RequestID +} + +func InternalCrossChainAppRequestFailed( + nodeID ids.NodeID, + sourceChainID ids.ID, + destinationChainID ids.ID, + requestID uint32, +) InboundMessage { + return &inboundMessage{ + nodeID: nodeID, + op: CrossChainAppRequestFailedOp, + message: &CrossChainAppRequestFailed{ + SourceChainID: sourceChainID, + DestinationChainID: destinationChainID, + RequestID: requestID, }, + expiration: mockable.MaxTime, } } -func (internalMsgBuilder) InternalGossipRequest( +type CrossChainAppResponse struct { + SourceChainID ids.ID `json:"source_chain_id,omitempty"` + DestinationChainID ids.ID `json:"destination_chain_id,omitempty"` + RequestID uint32 `json:"request_id,omitempty"` + Message []byte `json:"message,omitempty"` +} + +func (m *CrossChainAppResponse) GetSourceChainID() ids.ID { + return m.SourceChainID +} + +func (m *CrossChainAppResponse) GetChainId() []byte { + return m.DestinationChainID[:] +} + +func (m *CrossChainAppResponse) GetRequestId() uint32 { + return m.RequestID +} + +func InternalCrossChainAppResponse( + nodeID ids.NodeID, + sourceChainID ids.ID, + destinationChainID ids.ID, + requestID uint32, + msg []byte, +) InboundMessage { + return &inboundMessage{ + nodeID: nodeID, + op: CrossChainAppResponseOp, + message: &CrossChainAppResponse{ + SourceChainID: sourceChainID, + DestinationChainID: destinationChainID, + RequestID: requestID, + Message: msg, + }, + expiration: mockable.MaxTime, + } +} + +type Connected struct { + NodeVersion *version.Application `json:"node_version,omitempty"` +} + +func InternalConnected(nodeID ids.NodeID, nodeVersion *version.Application) InboundMessage { + return &inboundMessage{ + nodeID: nodeID, + op: ConnectedOp, + message: &Connected{ + NodeVersion: nodeVersion, + }, + expiration: mockable.MaxTime, + } +} + +// ConnectedSubnet contains the subnet ID of the subnet that the node is +// connected to. +type ConnectedSubnet struct { + SubnetID ids.ID `json:"subnet_id,omitempty"` +} + +// InternalConnectedSubnet returns a message that indicates the node with [nodeID] is +// connected to the subnet with the given [subnetID]. +func InternalConnectedSubnet(nodeID ids.NodeID, subnetID ids.ID) InboundMessage { + return &inboundMessage{ + nodeID: nodeID, + op: ConnectedSubnetOp, + message: &ConnectedSubnet{ + SubnetID: subnetID, + }, + expiration: mockable.MaxTime, + } +} + +type Disconnected struct{} + +func InternalDisconnected(nodeID ids.NodeID) InboundMessage { + return &inboundMessage{ + nodeID: nodeID, + op: DisconnectedOp, + message: disconnected, + expiration: mockable.MaxTime, + } +} + +type VMMessage struct { + Notification uint32 `json:"notification,omitempty"` +} + +func InternalVMMessage( nodeID ids.NodeID, + notification uint32, ) InboundMessage { - return &inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: GossipRequest, - nodeID: nodeID, + return &inboundMessage{ + nodeID: nodeID, + op: NotifyOp, + message: &VMMessage{ + Notification: notification, }, + expiration: mockable.MaxTime, + } +} + +type GossipRequest struct{} + +func InternalGossipRequest( + nodeID ids.NodeID, +) InboundMessage { + return &inboundMessage{ + nodeID: nodeID, + op: GossipRequestOp, + message: gossipRequest, + expiration: mockable.MaxTime, + } +} + +type Timeout struct{} + +func InternalTimeout(nodeID ids.NodeID) InboundMessage { + return &inboundMessage{ + nodeID: nodeID, + op: TimeoutOp, + message: timeout, + expiration: mockable.MaxTime, } } diff --git a/avalanchego/message/messages.go b/avalanchego/message/messages.go index 0fc2d5f9..99ac9dc6 100644 --- a/avalanchego/message/messages.go +++ b/avalanchego/message/messages.go @@ -1,564 +1,200 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message import ( - "crypto/x509" "errors" "fmt" - "net" - "strings" - "sync" "time" "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" + "google.golang.org/protobuf/proto" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/utils/compression" - "github.com/ava-labs/avalanchego/utils/ips" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/metric" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/wrappers" - - p2ppb "github.com/ava-labs/avalanchego/proto/pb/p2p" ) var ( - _ InboundMessage = &inboundMessageWithPacker{} - _ InboundMessage = &inboundMessageWithProto{} - _ OutboundMessage = &outboundMessageWithPacker{} - _ OutboundMessage = &outboundMessageWithProto{} - - errUnknownMessageTypeForOp = errors.New("unknown message type for Op") - errUnexpectedCompressedOp = errors.New("unexpected compressed Op") + _ InboundMessage = (*inboundMessage)(nil) + _ OutboundMessage = (*outboundMessage)(nil) - errInvalidIPAddrLen = errors.New("invalid IP address field length (expected 16-byte)") - errInvalidCert = errors.New("invalid TLS certificate field") + errUnknownCompressionType = errors.New("message is compressed with an unknown compression type") ) -// InboundMessage represents a set of fields for an inbound message that can be serialized into a byte stream +// InboundMessage represents a set of fields for an inbound message type InboundMessage interface { - fmt.Stringer - - BytesSavedCompression() int - Op() Op - Get(Field) (interface{}, error) + // NodeID returns the ID of the node that sent this message NodeID() ids.NodeID - ExpirationTime() time.Time + // Op returns the op that describes this message type + Op() Op + // Message returns the message that was sent + Message() any + // Expiration returns the time that the sender will have already timed out + // this request + Expiration() time.Time + // OnFinishedHandling must be called one time when this message has been + // handled by the message handler OnFinishedHandling() + // BytesSavedCompression returns the number of bytes that this message saved + // due to being compressed + BytesSavedCompression() int } type inboundMessage struct { - op Op - bytesSavedCompression int nodeID ids.NodeID - expirationTime time.Time + op Op + message any + expiration time.Time onFinishedHandling func() + bytesSavedCompression int } -// Op returns the value of the specified operation in this message -func (inMsg *inboundMessage) Op() Op { return inMsg.op } - -// BytesSavedCompression returns the number of bytes this message saved due to -// compression. That is, the number of bytes we did not receive over the -// network due to the message being compressed. 0 for messages that were not -// compressed. -func (inMsg *inboundMessage) BytesSavedCompression() int { - return inMsg.bytesSavedCompression +func (m *inboundMessage) NodeID() ids.NodeID { + return m.nodeID } -// NodeID returns the node that the msg was sent by. -func (inMsg *inboundMessage) NodeID() ids.NodeID { return inMsg.nodeID } - -// ExpirationTime returns the time this message doesn't need to be responded to. -// A zero time means message does not expire. -func (inMsg *inboundMessage) ExpirationTime() time.Time { return inMsg.expirationTime } - -// OnFinishedHandling is the function to be called once inboundMessage is -// complete. -func (inMsg *inboundMessage) OnFinishedHandling() { - if inMsg.onFinishedHandling != nil { - inMsg.onFinishedHandling() - } +func (m *inboundMessage) Op() Op { + return m.op } -type inboundMessageWithPacker struct { - inboundMessage - - fields map[Field]interface{} +func (m *inboundMessage) Message() any { + return m.message } -// Field returns the value of the specified field in this message -func (inMsg *inboundMessageWithPacker) Get(field Field) (interface{}, error) { - value, ok := inMsg.fields[field] - if !ok { - return nil, fmt.Errorf("%w: %s", errMissingField, field) - } - return value, nil +func (m *inboundMessage) Expiration() time.Time { + return m.expiration } -func (inMsg *inboundMessageWithPacker) String() string { - sb := strings.Builder{} - sb.WriteString(fmt.Sprintf("(Op: %s, NodeID: %s", inMsg.op, inMsg.nodeID)) - if requestIDIntf, exists := inMsg.fields[RequestID]; exists { - sb.WriteString(fmt.Sprintf(", RequestID: %d", requestIDIntf.(uint32))) +func (m *inboundMessage) OnFinishedHandling() { + if m.onFinishedHandling != nil { + m.onFinishedHandling() } - if !inMsg.expirationTime.IsZero() { - sb.WriteString(fmt.Sprintf(", Deadline: %d", inMsg.expirationTime.Unix())) - } - switch inMsg.op { - case GetAccepted, Accepted, Chits, AcceptedFrontier: - sb.WriteString(fmt.Sprintf(", NumContainerIDs: %d)", len(inMsg.fields[ContainerIDs].([][]byte)))) - case Get, GetAncestors, PullQuery: - sb.WriteString(fmt.Sprintf(", ContainerID: 0x%x)", inMsg.fields[ContainerID].([]byte))) - case Ancestors: - sb.WriteString(fmt.Sprintf(", NumContainers: %d)", len(inMsg.fields[MultiContainerBytes].([][]byte)))) - case Notify: - sb.WriteString(fmt.Sprintf(", Notification: %d)", inMsg.fields[VMMessage].(uint32))) - case AppRequest, AppResponse, AppGossip: - sb.WriteString(fmt.Sprintf(", len(AppMsg): %d)", len(inMsg.fields[AppBytes].([]byte)))) - default: - sb.WriteString(")") - } - - return sb.String() -} - -type inboundMessageWithProto struct { - inboundMessage - - msg *p2ppb.Message -} - -func (inMsg *inboundMessageWithProto) String() string { - return inMsg.msg.String() } -func (inMsg *inboundMessageWithProto) Get(field Field) (interface{}, error) { - return getField(inMsg.msg, field) -} - -// TODO: once protobuf-based p2p messaging is fully activated, -// move the semantic checks out of this package -func getField(m *p2ppb.Message, field Field) (interface{}, error) { - switch m.GetMessage().(type) { - case *p2ppb.Message_Pong: - msg := m.GetPong() - if field == Uptime { - // the original packer-based pong base uses uint8 - return uint8(msg.UptimePct), nil - } - - case *p2ppb.Message_Version: - msg := m.GetVersion() - switch field { - case NetworkID: - return msg.NetworkId, nil - case MyTime: - return msg.MyTime, nil - case IP: - // "net.IP" type in Golang is 16-byte - // regardless of whether it's IPV4 or 6 (see net.IPv6len) - // however, proto message does not enforce the length - // so we need to verify here - // TODO: once we complete the migration - // move this semantic verification outside of this package - if len(msg.IpAddr) != net.IPv6len { - return nil, fmt.Errorf( - "%w: invalid IP address length %d in version message", - errInvalidIPAddrLen, - len(msg.IpAddr), - ) - } - return ips.IPPort{ - IP: net.IP(msg.IpAddr), - Port: uint16(msg.IpPort), - }, nil - case VersionStr: - return msg.MyVersion, nil - case VersionTime: - return msg.MyVersionTime, nil - case SigBytes: - return msg.Sig, nil - case TrackedSubnets: - return msg.TrackedSubnets, nil - } - - case *p2ppb.Message_PeerList: - msg := m.GetPeerList() - if field == Peers { - peers := make([]ips.ClaimedIPPort, len(msg.GetClaimedIpPorts())) - for i, p := range msg.GetClaimedIpPorts() { - tlsCert, err := x509.ParseCertificate(p.X509Certificate) - if err != nil { - // this certificate is different than the certificate received - // during the TLS handshake (and so this error can occur) - return nil, fmt.Errorf( - "%w: failed to parse peer certificate for peer_list message (%v)", - errInvalidCert, - err, - ) - } - // TODO: once we complete the migration - // move this semantic verification outside of this package - if len(p.IpAddr) != net.IPv6len { - return nil, fmt.Errorf( - "%w: invalid IP address length %d in peer_list message", - errInvalidIPAddrLen, - len(p.IpAddr), - ) - } - peers[i] = ips.ClaimedIPPort{ - Cert: tlsCert, - IPPort: ips.IPPort{ - IP: net.IP(p.IpAddr), - Port: uint16(p.IpPort), - }, - Timestamp: p.Timestamp, - Signature: p.Signature, - } - } - return peers, nil - } - - case *p2ppb.Message_GetStateSummaryFrontier: - msg := m.GetGetStateSummaryFrontier() - switch field { - case ChainID: - return msg.ChainId, nil - case RequestID: - return msg.RequestId, nil - case Deadline: - return msg.Deadline, nil - } - - case *p2ppb.Message_StateSummaryFrontier_: - msg := m.GetStateSummaryFrontier_() - switch field { - case ChainID: - return msg.ChainId, nil - case RequestID: - return msg.RequestId, nil - case SummaryBytes: - return msg.Summary, nil - } - - case *p2ppb.Message_GetAcceptedStateSummary: - msg := m.GetGetAcceptedStateSummary() - switch field { - case ChainID: - return msg.ChainId, nil - case RequestID: - return msg.RequestId, nil - case Deadline: - return msg.Deadline, nil - case SummaryHeights: - return msg.Heights, nil - } - - case *p2ppb.Message_AcceptedStateSummary_: - msg := m.GetAcceptedStateSummary_() - switch field { - case ChainID: - return msg.ChainId, nil - case RequestID: - return msg.RequestId, nil - case SummaryIDs: - return msg.SummaryIds, nil - } - - case *p2ppb.Message_GetAcceptedFrontier: - msg := m.GetGetAcceptedFrontier() - switch field { - case ChainID: - return msg.ChainId, nil - case RequestID: - return msg.RequestId, nil - case Deadline: - return msg.Deadline, nil - } - - case *p2ppb.Message_AcceptedFrontier_: - msg := m.GetAcceptedFrontier_() - switch field { - case ChainID: - return msg.ChainId, nil - case RequestID: - return msg.RequestId, nil - case ContainerIDs: - return msg.ContainerIds, nil - } - - case *p2ppb.Message_GetAccepted: - msg := m.GetGetAccepted() - switch field { - case ChainID: - return msg.ChainId, nil - case RequestID: - return msg.RequestId, nil - case Deadline: - return msg.Deadline, nil - case ContainerIDs: - return msg.ContainerIds, nil - } - - case *p2ppb.Message_Accepted_: - msg := m.GetAccepted_() - switch field { - case ChainID: - return msg.ChainId, nil - case RequestID: - return msg.RequestId, nil - case ContainerIDs: - return msg.ContainerIds, nil - } - - case *p2ppb.Message_GetAncestors: - msg := m.GetGetAncestors() - switch field { - case ChainID: - return msg.ChainId, nil - case RequestID: - return msg.RequestId, nil - case Deadline: - return msg.Deadline, nil - case ContainerID: - return msg.ContainerId, nil - } - - case *p2ppb.Message_Ancestors_: - msg := m.GetAncestors_() - switch field { - case ChainID: - return msg.ChainId, nil - case RequestID: - return msg.RequestId, nil - case MultiContainerBytes: - return msg.Containers, nil - } - - case *p2ppb.Message_Get: - msg := m.GetGet() - switch field { - case ChainID: - return msg.ChainId, nil - case RequestID: - return msg.RequestId, nil - case Deadline: - return msg.Deadline, nil - case ContainerID: - return msg.ContainerId, nil - } - - case *p2ppb.Message_Put: - msg := m.GetPut() - switch field { - case ChainID: - return msg.ChainId, nil - case RequestID: - return msg.RequestId, nil - case ContainerBytes: - return msg.Container, nil - } - - case *p2ppb.Message_PushQuery: - msg := m.GetPushQuery() - switch field { - case ChainID: - return msg.ChainId, nil - case RequestID: - return msg.RequestId, nil - case Deadline: - return msg.Deadline, nil - case ContainerBytes: - return msg.Container, nil - } - - case *p2ppb.Message_PullQuery: - msg := m.GetPullQuery() - switch field { - case ChainID: - return msg.ChainId, nil - case RequestID: - return msg.RequestId, nil - case Deadline: - return msg.Deadline, nil - case ContainerID: - return msg.ContainerId, nil - } - - case *p2ppb.Message_Chits: - msg := m.GetChits() - switch field { - case ChainID: - return msg.ChainId, nil - case RequestID: - return msg.RequestId, nil - case ContainerIDs: - return msg.ContainerIds, nil - } - - case *p2ppb.Message_AppRequest: - msg := m.GetAppRequest() - switch field { - case ChainID: - return msg.ChainId, nil - case RequestID: - return msg.RequestId, nil - case Deadline: - return msg.Deadline, nil - case AppBytes: - return msg.AppBytes, nil - } - - case *p2ppb.Message_AppResponse: - msg := m.GetAppResponse() - switch field { - case ChainID: - return msg.ChainId, nil - case RequestID: - return msg.RequestId, nil - case AppBytes: - return msg.AppBytes, nil - } - - case *p2ppb.Message_AppGossip: - msg := m.GetAppGossip() - switch field { - case ChainID: - return msg.ChainId, nil - case AppBytes: - return msg.AppBytes, nil - } - } - return nil, fmt.Errorf("%w: %s", errMissingField, field) +func (m *inboundMessage) BytesSavedCompression() int { + return m.bytesSavedCompression } // OutboundMessage represents a set of fields for an outbound message that can // be serialized into a byte stream type OutboundMessage interface { - BytesSavedCompression() int - Bytes() []byte - Op() Op + // BypassThrottling returns true if we should send this message, regardless + // of any outbound message throttling BypassThrottling() bool - - AddRef() - DecRef() - - IsProto() bool + // Op returns the op that describes this message type + Op() Op + // Bytes returns the bytes that will be sent + Bytes() []byte + // BytesSavedCompression returns the number of bytes that this message saved + // due to being compressed + BytesSavedCompression() int } type outboundMessage struct { + bypassThrottling bool op Op bytes []byte bytesSavedCompression int - bypassThrottling bool } -// Op returns the value of the specified operation in this message -func (outMsg *outboundMessage) Op() Op { return outMsg.op } - -// Bytes returns this message in bytes -func (outMsg *outboundMessage) Bytes() []byte { return outMsg.bytes } - -// BytesSavedCompression returns the number of bytes this message saved due to -// compression. That is, the number of bytes we did not send over the -// network due to the message being compressed. 0 for messages that were not -// compressed. -func (outMsg *outboundMessage) BytesSavedCompression() int { - return outMsg.bytesSavedCompression +func (m *outboundMessage) BypassThrottling() bool { + return m.bypassThrottling } -// BypassThrottling when attempting to send this message -func (outMsg *outboundMessage) BypassThrottling() bool { return outMsg.bypassThrottling } - -type outboundMessageWithPacker struct { - outboundMessage - - refLock sync.Mutex - refs int - c *codec +func (m *outboundMessage) Op() Op { + return m.op } -func (outMsg *outboundMessageWithPacker) AddRef() { - outMsg.refLock.Lock() - defer outMsg.refLock.Unlock() - - outMsg.refs++ +func (m *outboundMessage) Bytes() []byte { + return m.bytes } -// Once the reference count of this message goes to 0, the byte slice should not -// be inspected. -func (outMsg *outboundMessageWithPacker) DecRef() { - outMsg.refLock.Lock() - defer outMsg.refLock.Unlock() - - outMsg.refs-- - if outMsg.refs == 0 { - outMsg.c.byteSlicePool.Put(outMsg.bytes) - } +func (m *outboundMessage) BytesSavedCompression() int { + return m.bytesSavedCompression } -func (outMsg *outboundMessageWithPacker) IsProto() bool { return false } - -type outboundMessageWithProto struct { - outboundMessage - - msg *p2ppb.Message -} - -func (outMsg *outboundMessageWithProto) AddRef() {} -func (outMsg *outboundMessageWithProto) DecRef() {} -func (outMsg *outboundMessageWithProto) IsProto() bool { return true } - // TODO: add other compression algorithms with extended interface -type msgBuilderProtobuf struct { - gzipCompressor compression.Compressor - clock mockable.Clock +type msgBuilder struct { + log logging.Logger - compressTimeMetrics map[Op]metric.Averager - decompressTimeMetrics map[Op]metric.Averager + gzipCompressor compression.Compressor + gzipCompressTimeMetrics map[Op]metric.Averager + gzipDecompressTimeMetrics map[Op]metric.Averager + + zstdCompressor compression.Compressor + zstdCompressTimeMetrics map[Op]metric.Averager + zstdDecompressTimeMetrics map[Op]metric.Averager maxMessageTimeout time.Duration } -// NOTE: the metrics registration paths are the same as "NewCodecWithMemoryPool"! -// To avoid conflicts, use the different namespace if created at the same time. -func newMsgBuilderProtobuf(namespace string, metrics prometheus.Registerer, maxMessageSize int64, maxMessageTimeout time.Duration) (*msgBuilderProtobuf, error) { - cpr, err := compression.NewGzipCompressor(maxMessageSize) +func newMsgBuilder( + log logging.Logger, + namespace string, + metrics prometheus.Registerer, + maxMessageTimeout time.Duration, +) (*msgBuilder, error) { + gzipCompressor, err := compression.NewGzipCompressor(constants.DefaultMaxMessageSize) + if err != nil { + return nil, err + } + zstdCompressor, err := compression.NewZstdCompressor(constants.DefaultMaxMessageSize) if err != nil { return nil, err } - mb := &msgBuilderProtobuf{ - gzipCompressor: cpr, + mb := &msgBuilder{ + log: log, - compressTimeMetrics: make(map[Op]metric.Averager, len(ExternalOps)), - decompressTimeMetrics: make(map[Op]metric.Averager, len(ExternalOps)), + gzipCompressor: gzipCompressor, + gzipCompressTimeMetrics: make(map[Op]metric.Averager, len(ExternalOps)), + gzipDecompressTimeMetrics: make(map[Op]metric.Averager, len(ExternalOps)), + + zstdCompressor: zstdCompressor, + zstdCompressTimeMetrics: make(map[Op]metric.Averager, len(ExternalOps)), + zstdDecompressTimeMetrics: make(map[Op]metric.Averager, len(ExternalOps)), maxMessageTimeout: maxMessageTimeout, } errs := wrappers.Errs{} for _, op := range ExternalOps { - if !op.Compressible() { - continue - } - - mb.compressTimeMetrics[op] = metric.NewAveragerWithErrs( + mb.gzipCompressTimeMetrics[op] = metric.NewAveragerWithErrs( + namespace, + fmt.Sprintf("gzip_%s_compress_time", op), + fmt.Sprintf("time (in ns) to compress %s messages with gzip", op), + metrics, + &errs, + ) + mb.gzipDecompressTimeMetrics[op] = metric.NewAveragerWithErrs( + namespace, + fmt.Sprintf("gzip_%s_decompress_time", op), + fmt.Sprintf("time (in ns) to decompress %s messages with gzip", op), + metrics, + &errs, + ) + mb.zstdCompressTimeMetrics[op] = metric.NewAveragerWithErrs( namespace, - fmt.Sprintf("%s_compress_time", op), - fmt.Sprintf("time (in ns) to compress %s messages", op), + fmt.Sprintf("zstd_%s_compress_time", op), + fmt.Sprintf("time (in ns) to compress %s messages with zstd", op), metrics, &errs, ) - mb.decompressTimeMetrics[op] = metric.NewAveragerWithErrs( + mb.zstdDecompressTimeMetrics[op] = metric.NewAveragerWithErrs( namespace, - fmt.Sprintf("%s_decompress_time", op), - fmt.Sprintf("time (in ns) to decompress %s messages", op), + fmt.Sprintf("zstd_%s_decompress_time", op), + fmt.Sprintf("time (in ns) to decompress %s messages with zstd", op), metrics, &errs, ) @@ -566,17 +202,18 @@ func newMsgBuilderProtobuf(namespace string, metrics prometheus.Registerer, maxM return mb, errs.Err } -// NOTE THAT the passed message must be verified beforehand. -// NOTE THAT the passed message will be modified if compression is enabled. -// TODO: find a way to not in-place modify the message -func (mb *msgBuilderProtobuf) marshal(m *p2ppb.Message, gzipCompress bool) ([]byte, int, time.Duration, error) { - uncompressedMsgBytes, err := proto.Marshal(m) +func (mb *msgBuilder) marshal( + uncompressedMsg *p2p.Message, + compressionType compression.Type, +) ([]byte, int, Op, error) { + uncompressedMsgBytes, err := proto.Marshal(uncompressedMsg) if err != nil { return nil, 0, 0, err } - if !gzipCompress { - return uncompressedMsgBytes, 0, 0, nil + op, err := ToOp(uncompressedMsg) + if err != nil { + return nil, 0, 0, err } // If compression is enabled, we marshal twice: @@ -585,161 +222,160 @@ func (mb *msgBuilderProtobuf) marshal(m *p2ppb.Message, gzipCompress bool) ([]by // // This recursive packing allows us to avoid an extra compression on/off // field in the message. - startTime := time.Now() - compressedBytes, err := mb.gzipCompressor.Compress(uncompressedMsgBytes) + var ( + startTime = time.Now() + compressedMsg p2p.Message + opToCompressTimeMetrics map[Op]metric.Averager + ) + switch compressionType { + case compression.TypeNone: + return uncompressedMsgBytes, 0, op, nil + case compression.TypeGzip: + compressedBytes, err := mb.gzipCompressor.Compress(uncompressedMsgBytes) + if err != nil { + return nil, 0, 0, err + } + compressedMsg = p2p.Message{ + Message: &p2p.Message_CompressedGzip{ + CompressedGzip: compressedBytes, + }, + } + opToCompressTimeMetrics = mb.gzipCompressTimeMetrics + case compression.TypeZstd: + compressedBytes, err := mb.zstdCompressor.Compress(uncompressedMsgBytes) + if err != nil { + return nil, 0, 0, err + } + compressedMsg = p2p.Message{ + Message: &p2p.Message_CompressedZstd{ + CompressedZstd: compressedBytes, + }, + } + opToCompressTimeMetrics = mb.zstdCompressTimeMetrics + default: + return nil, 0, 0, errUnknownCompressionType + } + + compressedMsgBytes, err := proto.Marshal(&compressedMsg) if err != nil { return nil, 0, 0, err } compressTook := time.Since(startTime) - // Original message can be discarded for the compressed message. - m.Message = &p2ppb.Message_CompressedGzip{ - CompressedGzip: compressedBytes, - } - compressedMsgBytes, err := proto.Marshal(m) - if err != nil { - return nil, 0, 0, err + if compressTimeMetric, ok := opToCompressTimeMetrics[op]; ok { + compressTimeMetric.Observe(float64(compressTook)) + } else { + // Should never happen + mb.log.Warn("no compression metric found for op", + zap.Stringer("op", op), + zap.Stringer("compressionType", compressionType), + ) } bytesSaved := len(uncompressedMsgBytes) - len(compressedMsgBytes) - return compressedMsgBytes, bytesSaved, compressTook, nil + return compressedMsgBytes, bytesSaved, op, nil } -func (mb *msgBuilderProtobuf) unmarshal(b []byte) (Op, *p2ppb.Message, bool, int, time.Duration, error) { - m := new(p2ppb.Message) +func (mb *msgBuilder) unmarshal(b []byte) (*p2p.Message, int, Op, error) { + m := new(p2p.Message) if err := proto.Unmarshal(b, m); err != nil { - return 0, nil, false, 0, 0, err + return nil, 0, 0, err } - compressed := m.GetCompressedGzip() - if len(compressed) == 0 { + // Figure out what compression type, if any, was used to compress the message. + var ( + opToDecompressTimeMetrics map[Op]metric.Averager + compressor compression.Compressor + compressedBytes []byte + gzipCompressed = m.GetCompressedGzip() + zstdCompressed = m.GetCompressedZstd() + ) + switch { + case len(gzipCompressed) > 0: + opToDecompressTimeMetrics = mb.gzipDecompressTimeMetrics + compressor = mb.gzipCompressor + compressedBytes = gzipCompressed + case len(zstdCompressed) > 0: + opToDecompressTimeMetrics = mb.zstdDecompressTimeMetrics + compressor = mb.zstdCompressor + compressedBytes = zstdCompressed + default: // The message wasn't compressed - op, err := msgToOp(m) - return op, m, false, 0, 0, err + op, err := ToOp(m) + return m, 0, op, err } startTime := time.Now() - decompressed, err := mb.gzipCompressor.Decompress(compressed) + + decompressed, err := compressor.Decompress(compressedBytes) if err != nil { - return 0, nil, true, 0, 0, err + return nil, 0, 0, err } - decompressTook := time.Since(startTime) + bytesSavedCompression := len(decompressed) - len(compressedBytes) if err := proto.Unmarshal(decompressed, m); err != nil { - return 0, nil, true, 0, 0, err + return nil, 0, 0, err } + decompressTook := time.Since(startTime) - op, err := msgToOp(m) + // Record decompression time metric + op, err := ToOp(m) if err != nil { - return 0, nil, true, 0, 0, err + return nil, 0, 0, err } - if !op.Compressible() { - return 0, nil, true, 0, 0, errUnexpectedCompressedOp + if decompressTimeMetric, ok := opToDecompressTimeMetrics[op]; ok { + decompressTimeMetric.Observe(float64(decompressTook)) + } else { + // Should never happen + mb.log.Warn("no decompression metric found for op", + zap.Stringer("op", op), + ) } - bytesSavedCompression := len(decompressed) - len(compressed) - return op, m, true, bytesSavedCompression, decompressTook, nil + return m, bytesSavedCompression, op, nil } -func msgToOp(m *p2ppb.Message) (Op, error) { - switch m.GetMessage().(type) { - case *p2ppb.Message_Ping: - return Ping, nil - case *p2ppb.Message_Pong: - return Pong, nil - case *p2ppb.Message_Version: - return Version, nil - case *p2ppb.Message_PeerList: - return PeerList, nil - case *p2ppb.Message_GetStateSummaryFrontier: - return GetStateSummaryFrontier, nil - case *p2ppb.Message_StateSummaryFrontier_: - return StateSummaryFrontier, nil - case *p2ppb.Message_GetAcceptedStateSummary: - return GetAcceptedStateSummary, nil - case *p2ppb.Message_AcceptedStateSummary_: - return AcceptedStateSummary, nil - case *p2ppb.Message_GetAcceptedFrontier: - return GetAcceptedFrontier, nil - case *p2ppb.Message_AcceptedFrontier_: - return AcceptedFrontier, nil - case *p2ppb.Message_GetAccepted: - return GetAccepted, nil - case *p2ppb.Message_Accepted_: - return Accepted, nil - case *p2ppb.Message_GetAncestors: - return GetAncestors, nil - case *p2ppb.Message_Ancestors_: - return Ancestors, nil - case *p2ppb.Message_Get: - return Get, nil - case *p2ppb.Message_Put: - return Put, nil - case *p2ppb.Message_PushQuery: - return PushQuery, nil - case *p2ppb.Message_PullQuery: - return PullQuery, nil - case *p2ppb.Message_Chits: - return Chits, nil - case *p2ppb.Message_AppRequest: - return AppRequest, nil - case *p2ppb.Message_AppResponse: - return AppResponse, nil - case *p2ppb.Message_AppGossip: - return AppGossip, nil - default: - return 0, fmt.Errorf("%w: unknown message %T", errUnknownMessageTypeForOp, m.GetMessage()) - } -} - -// NOTE THAT the passed message will be updated if compression is enabled. -// TODO: find a way to not in-place modify the message -func (mb *msgBuilderProtobuf) createOutbound(op Op, msg *p2ppb.Message, gzipCompress bool, bypassThrottling bool) (*outboundMessageWithProto, error) { - b, saved, compressTook, err := mb.marshal(msg, gzipCompress) +func (mb *msgBuilder) createOutbound(m *p2p.Message, compressionType compression.Type, bypassThrottling bool) (*outboundMessage, error) { + b, saved, op, err := mb.marshal(m, compressionType) if err != nil { return nil, err } - if gzipCompress { - mb.compressTimeMetrics[op].Observe(float64(compressTook)) - } - return &outboundMessageWithProto{ - outboundMessage: outboundMessage{ - op: op, - bytes: b, - bytesSavedCompression: saved, - bypassThrottling: bypassThrottling, - }, - msg: msg, + return &outboundMessage{ + bypassThrottling: bypassThrottling, + op: op, + bytes: b, + bytesSavedCompression: saved, }, nil } -func (mb *msgBuilderProtobuf) parseInbound(bytes []byte, nodeID ids.NodeID, onFinishedHandling func()) (*inboundMessageWithProto, error) { - op, m, wasCompressed, bytesSavedCompression, decompressTook, err := mb.unmarshal(bytes) +func (mb *msgBuilder) parseInbound( + bytes []byte, + nodeID ids.NodeID, + onFinishedHandling func(), +) (*inboundMessage, error) { + m, bytesSavedCompression, op, err := mb.unmarshal(bytes) if err != nil { return nil, err } - if wasCompressed { - mb.decompressTimeMetrics[op].Observe(float64(decompressTook)) + + msg, err := Unwrap(m) + if err != nil { + return nil, err } - var expirationTime time.Time - if deadline, err := getField(m, Deadline); err == nil { - deadlineDuration := time.Duration(deadline.(uint64)) - if deadlineDuration > mb.maxMessageTimeout { - deadlineDuration = mb.maxMessageTimeout - } - expirationTime = mb.clock.Time().Add(deadlineDuration) + expiration := mockable.MaxTime + if deadline, ok := GetDeadline(msg); ok { + deadline = math.Min(deadline, mb.maxMessageTimeout) + expiration = time.Now().Add(deadline) } - return &inboundMessageWithProto{ - inboundMessage: inboundMessage{ - op: op, - bytesSavedCompression: bytesSavedCompression, - nodeID: nodeID, - expirationTime: expirationTime, - onFinishedHandling: onFinishedHandling, - }, - msg: m, + return &inboundMessage{ + nodeID: nodeID, + op: op, + message: msg, + expiration: expiration, + onFinishedHandling: onFinishedHandling, + bytesSavedCompression: bytesSavedCompression, }, nil } diff --git a/avalanchego/message/messages_benchmark_test.go b/avalanchego/message/messages_benchmark_test.go index 346c27da..f87493fc 100644 --- a/avalanchego/message/messages_benchmark_test.go +++ b/avalanchego/message/messages_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -16,10 +16,14 @@ import ( "google.golang.org/protobuf/proto" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/ips" - "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/utils/compression" + "github.com/ava-labs/avalanchego/utils/logging" +) - p2ppb "github.com/ava-labs/avalanchego/proto/pb/p2p" +var ( + dummyNodeID = ids.EmptyNodeID + dummyOnFinishedHandling = func() {} ) // Benchmarks marshal-ing "Version" message. @@ -30,54 +34,21 @@ import ( // $ go install -v golang.org/x/perf/cmd/benchstat@latest // // $ go test -run=NONE -bench=BenchmarkMarshalVersion > /tmp/cpu.before.txt -// $ USE_PROTO=true go test -run=NONE -bench=BenchmarkMarshalVersion > /tmp/cpu.after.txt -// $ USE_PROTO=true USE_PROTO_BUILDER=true go test -run=NONE -bench=BenchmarkMarshalVersion > /tmp/cpu.after.txt +// $ USE_BUILDER=true go test -run=NONE -bench=BenchmarkMarshalVersion > /tmp/cpu.after.txt // $ benchcmp /tmp/cpu.before.txt /tmp/cpu.after.txt // $ benchstat -alpha 0.03 -geomean /tmp/cpu.before.txt /tmp/cpu.after.txt // // $ go test -run=NONE -bench=BenchmarkMarshalVersion -benchmem > /tmp/mem.before.txt -// $ USE_PROTO=true go test -run=NONE -bench=BenchmarkMarshalVersion -benchmem > /tmp/mem.after.txt -// $ USE_PROTO=true USE_PROTO_BUILDER=true go test -run=NONE -bench=BenchmarkMarshalVersion -benchmem > /tmp/mem.after.txt +// $ USE_BUILDER=true go test -run=NONE -bench=BenchmarkMarshalVersion -benchmem > /tmp/mem.after.txt // $ benchcmp /tmp/mem.before.txt /tmp/mem.after.txt // $ benchstat -alpha 0.03 -geomean /tmp/mem.before.txt /tmp/mem.after.txt func BenchmarkMarshalVersion(b *testing.B) { require := require.New(b) - b.StopTimer() - id := ids.GenerateTestID() - - // version message does not require compression - // thus no in-place update for proto test cases - // which makes the benchmarks fairer to proto - // as there's no need to copy the original test message - // for each run - inboundMsg := inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: Version, - }, - fields: map[Field]interface{}{ - NetworkID: uint32(1337), - NodeID: uint32(0), - MyTime: uint64(time.Now().Unix()), - IP: ips.IPPort{IP: net.IPv4(1, 2, 3, 4)}, - VersionStr: "v1.2.3", - VersionTime: uint64(time.Now().Unix()), - SigBytes: []byte{'y', 'e', 'e', 't'}, - TrackedSubnets: [][]byte{id[:]}, - }, - } - packerCodec, err := NewCodecWithMemoryPool("", prometheus.NewRegistry(), 2*units.MiB, 10*time.Second) - require.NoError(err) - - packerMsg, err := packerCodec.Pack(inboundMsg.op, inboundMsg.fields, inboundMsg.op.Compressible(), false) - require.NoError(err) - - packerMsgN := len(packerMsg.Bytes()) - - protoMsg := p2ppb.Message{ - Message: &p2ppb.Message_Version{ - Version: &p2ppb.Version{ + msg := p2p.Message{ + Message: &p2p.Message_Version{ + Version: &p2p.Version{ NetworkId: uint32(1337), MyTime: uint64(time.Now().Unix()), IpAddr: []byte(net.IPv4(1, 2, 3, 4).To16()), @@ -89,29 +60,21 @@ func BenchmarkMarshalVersion(b *testing.B) { }, }, } - protoMsgN := proto.Size(&protoMsg) + msgLen := proto.Size(&msg) - useProto := os.Getenv("USE_PROTO") != "" - useProtoBuilder := os.Getenv("USE_PROTO_BUILDER") != "" + useBuilder := os.Getenv("USE_BUILDER") != "" - protoCodec, err := newMsgBuilderProtobuf("", prometheus.NewRegistry(), 2*units.MiB, 10*time.Second) + codec, err := newMsgBuilder(logging.NoLog{}, "", prometheus.NewRegistry(), 10*time.Second) require.NoError(err) - b.Logf("marshaling packer %d-byte, proto %d-byte (use proto %v, use proto builder %v)", packerMsgN, protoMsgN, useProto, useProtoBuilder) + b.Logf("proto length %d-byte (use builder %v)", msgLen, useBuilder) - b.StartTimer() + b.ResetTimer() for i := 0; i < b.N; i++ { - if !useProto { - // version does not compress message - _, err := packerCodec.Pack(inboundMsg.op, inboundMsg.fields, false, false) - require.NoError(err) - continue - } - - if useProtoBuilder { - _, err = protoCodec.createOutbound(inboundMsg.op, &protoMsg, false, false) + if useBuilder { + _, err = codec.createOutbound(&msg, compression.TypeNone, false) } else { - _, err = proto.Marshal(&protoMsg) + _, err = proto.Marshal(&msg) } require.NoError(err) } @@ -125,14 +88,12 @@ func BenchmarkMarshalVersion(b *testing.B) { // $ go install -v golang.org/x/perf/cmd/benchstat@latest // // $ go test -run=NONE -bench=BenchmarkUnmarshalVersion > /tmp/cpu.before.txt -// $ USE_PROTO=true go test -run=NONE -bench=BenchmarkUnmarshalVersion > /tmp/cpu.after.txt -// $ USE_PROTO=true USE_PROTO_BUILDER=true go test -run=NONE -bench=BenchmarkUnmarshalVersion > /tmp/cpu.after.txt +// $ USE_BUILDER=true go test -run=NONE -bench=BenchmarkUnmarshalVersion > /tmp/cpu.after.txt // $ benchcmp /tmp/cpu.before.txt /tmp/cpu.after.txt // $ benchstat -alpha 0.03 -geomean /tmp/cpu.before.txt /tmp/cpu.after.txt // // $ go test -run=NONE -bench=BenchmarkUnmarshalVersion -benchmem > /tmp/mem.before.txt -// $ USE_PROTO=true go test -run=NONE -bench=BenchmarkUnmarshalVersion -benchmem > /tmp/mem.after.txt -// $ USE_PROTO=true USE_PROTO_BUILDER=true go test -run=NONE -bench=BenchmarkUnmarshalVersion -benchmem > /tmp/mem.after.txt +// $ USE_BUILDER=true go test -run=NONE -bench=BenchmarkUnmarshalVersion -benchmem > /tmp/mem.after.txt // $ benchcmp /tmp/mem.before.txt /tmp/mem.after.txt // $ benchstat -alpha 0.03 -geomean /tmp/mem.before.txt /tmp/mem.after.txt func BenchmarkUnmarshalVersion(b *testing.B) { @@ -141,27 +102,9 @@ func BenchmarkUnmarshalVersion(b *testing.B) { b.StopTimer() id := ids.GenerateTestID() - inboundMsg := inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: Version, - }, - fields: map[Field]interface{}{ - NetworkID: uint32(1337), - NodeID: uint32(0), - MyTime: uint64(time.Now().Unix()), - IP: ips.IPPort{IP: net.IPv4(1, 2, 3, 4)}, - VersionStr: "v1.2.3", - VersionTime: uint64(time.Now().Unix()), - SigBytes: []byte{'y', 'e', 'e', 't'}, - TrackedSubnets: [][]byte{id[:]}, - }, - } - packerCodec, err := NewCodecWithMemoryPool("", prometheus.NewRegistry(), 2*units.MiB, 10*time.Second) - require.NoError(err) - - protoMsg := p2ppb.Message{ - Message: &p2ppb.Message_Version{ - Version: &p2ppb.Version{ + msg := p2p.Message{ + Message: &p2p.Message_Version{ + Version: &p2p.Version{ NetworkId: uint32(1337), MyTime: uint64(time.Now().Unix()), IpAddr: []byte(net.IPv4(1, 2, 3, 4).To16()), @@ -174,33 +117,20 @@ func BenchmarkUnmarshalVersion(b *testing.B) { }, } - rawMsg, err := proto.Marshal(&protoMsg) + rawMsg, err := proto.Marshal(&msg) require.NoError(err) - useProto := os.Getenv("USE_PROTO") != "" - if !useProto { - msgInf, err := packerCodec.Pack(inboundMsg.op, inboundMsg.fields, inboundMsg.op.Compressible(), false) - require.NoError(err) - rawMsg = msgInf.Bytes() - } - - useProtoBuilder := os.Getenv("USE_PROTO_BUILDER") != "" - protoCodec, err := newMsgBuilderProtobuf("", prometheus.NewRegistry(), 2*units.MiB, 10*time.Second) + useBuilder := os.Getenv("USE_BUILDER") != "" + codec, err := newMsgBuilder(logging.NoLog{}, "", prometheus.NewRegistry(), 10*time.Second) require.NoError(err) b.StartTimer() for i := 0; i < b.N; i++ { - if !useProto { - _, err := packerCodec.Parse(rawMsg, dummyNodeID, dummyOnFinishedHandling) - require.NoError(err) - continue - } - - if useProtoBuilder { - _, err = protoCodec.parseInbound(rawMsg, dummyNodeID, dummyOnFinishedHandling) + if useBuilder { + _, err = codec.parseInbound(rawMsg, dummyNodeID, dummyOnFinishedHandling) } else { - var protoMsg p2ppb.Message - err = proto.Unmarshal(rawMsg, &protoMsg) + var msg p2p.Message + err = proto.Unmarshal(rawMsg, &msg) } require.NoError(err) } diff --git a/avalanchego/message/messages_test.go b/avalanchego/message/messages_test.go index 7b0dcd1f..c04e3ea4 100644 --- a/avalanchego/message/messages_test.go +++ b/avalanchego/message/messages_test.go @@ -1,11 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message import ( "bytes" - "fmt" "net" "testing" "time" @@ -17,153 +16,21 @@ import ( "google.golang.org/protobuf/proto" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/staking" - "github.com/ava-labs/avalanchego/utils/ips" - "github.com/ava-labs/avalanchego/utils/units" - - p2ppb "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/utils/compression" + "github.com/ava-labs/avalanchego/utils/logging" ) -// Ensures the message size with proto not blow up compared to packer. -func TestProtoMarshalSizeVersion(t *testing.T) { - t.Parallel() - require := require.New(t) - - id := ids.GenerateTestID() - inboundMsg := inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: Version, - }, - fields: map[Field]interface{}{ - NetworkID: uint32(1337), - NodeID: uint32(0), - MyTime: uint64(time.Now().Unix()), - IP: ips.IPPort{IP: net.IPv4(1, 2, 3, 4)}, - VersionStr: "v1.2.3", - VersionTime: uint64(time.Now().Unix()), - SigBytes: []byte{'y', 'e', 'e', 't'}, - TrackedSubnets: [][]byte{id[:]}, - }, - } - packerCodec, err := NewCodecWithMemoryPool( - "", - prometheus.NewRegistry(), - 2*units.MiB, - 10*time.Second, - ) - require.NoError(err) - - packerMsg, err := packerCodec.Pack( - inboundMsg.op, - inboundMsg.fields, - inboundMsg.op.Compressible(), - false, - ) - require.NoError(err) - - packerMsgN := len(packerMsg.Bytes()) - - protoMsg := p2ppb.Message{ - Message: &p2ppb.Message_Version{ - Version: &p2ppb.Version{ - NetworkId: uint32(1337), - MyTime: uint64(time.Now().Unix()), - IpAddr: []byte(net.IPv4(1, 2, 3, 4).To16()), - IpPort: 0, - MyVersion: "v1.2.3", - MyVersionTime: uint64(time.Now().Unix()), - Sig: []byte{'y', 'e', 'e', 't'}, - TrackedSubnets: [][]byte{id[:]}, - }, - }, - } - protoMsgN := proto.Size(&protoMsg) - - t.Logf("marshaled; packer %d-byte, proto %d-byte", packerMsgN, protoMsgN) - require.Greater(packerMsgN, protoMsgN) -} - -// Ensures the message size with proto not blow up compared to packer. -func TestProtoMarshalSizeAncestors(t *testing.T) { - t.Parallel() - require := require.New(t) - - id := ids.GenerateTestID() - inboundMsg := inboundMessageWithPacker{ - inboundMessage: inboundMessage{ - op: Ancestors, - }, - fields: map[Field]interface{}{ - ChainID: id[:], - RequestID: uint32(12345), - MultiContainerBytes: [][]byte{ - bytes.Repeat([]byte{0}, 100), - bytes.Repeat([]byte{0}, 100), - bytes.Repeat([]byte{0}, 100), - }, - }, - } - packerCodec, err := NewCodecWithMemoryPool( - "", - prometheus.NewRegistry(), - 2*units.MiB, - 10*time.Second, - ) - require.NoError(err) - - compressible := inboundMsg.op.Compressible() - require.True(compressible) - - packerMsg, err := packerCodec.Pack( - inboundMsg.op, - inboundMsg.fields, - compressible, - false, - ) - require.NoError(err) - - packerMsgN := len(packerMsg.Bytes()) - - protoMsg := p2ppb.Message{ - Message: &p2ppb.Message_Ancestors_{ - Ancestors_: &p2ppb.Ancestors{ - ChainId: id[:], - RequestId: 12345, - Containers: [][]byte{ - bytes.Repeat([]byte{0}, 100), - bytes.Repeat([]byte{0}, 100), - bytes.Repeat([]byte{0}, 100), - }, - }, - }, - } - - mb, err := newMsgBuilderProtobuf( - "test", - prometheus.NewRegistry(), - units.MiB, - 5*time.Second, - ) - require.NoError(err) - - b, _, _, err := mb.marshal(&protoMsg, compressible) - require.NoError(err) - - protoMsgN := len(b) - t.Logf("marshaled; packer %d-byte, proto %d-byte", packerMsgN, protoMsgN) - - require.GreaterOrEqual(packerMsgN, protoMsgN) -} - -func TestNewOutboundInboundMessageWithProto(t *testing.T) { +func TestMessage(t *testing.T) { t.Parallel() require := require.New(t) - mb, err := newMsgBuilderProtobuf( + mb, err := newMsgBuilder( + logging.NoLog{}, "test", prometheus.NewRegistry(), - units.MiB, 5*time.Second, ) require.NoError(err) @@ -184,969 +51,828 @@ func TestNewOutboundInboundMessageWithProto(t *testing.T) { nowUnix := time.Now().Unix() tests := []struct { - desc string - op Op - msg *p2ppb.Message - gzipCompress bool - bypassThrottling bool - bytesSaved bool // if true, outbound message saved bytes must be non-zero - expectedOutboundErr error // expected error for creating outbound message - fields map[Field]interface{} // expected fields from the inbound message - expectedGetFieldErr map[Field]error // expected error for getting the specified field + desc string + op Op + msg *p2p.Message + compressionType compression.Type + bypassThrottling bool + bytesSaved bool // if true, outbound message saved bytes must be non-zero }{ { - desc: "valid pong outbound message with no compression", - op: Pong, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_Pong{ - Pong: &p2ppb.Pong{ - UptimePct: 1, - }, + desc: "ping message with no compression", + op: PingOp, + msg: &p2p.Message{ + Message: &p2p.Message_Ping{ + Ping: &p2p.Ping{}, }, }, - gzipCompress: false, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - Uptime: uint8(1), - }, - expectedGetFieldErr: nil, + compressionType: compression.TypeNone, + bypassThrottling: true, + bytesSaved: false, }, { - desc: "valid ping outbound message with no compression", - op: Ping, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_Ping{ - Ping: &p2ppb.Ping{}, + desc: "pong message with no compression no subnet uptimes", + op: PongOp, + msg: &p2p.Message{ + Message: &p2p.Message_Pong{ + Pong: &p2p.Pong{ + Uptime: 100, + }, }, }, - gzipCompress: false, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: nil, - expectedGetFieldErr: nil, + compressionType: compression.TypeNone, + bypassThrottling: true, + bytesSaved: false, }, { - desc: "valid get_accepted_frontier outbound message with no compression", - op: GetAcceptedFrontier, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_GetAcceptedFrontier{ - GetAcceptedFrontier: &p2ppb.GetAcceptedFrontier{ - ChainId: testID[:], - RequestId: 1, - Deadline: 1, + desc: "pong message with no compression and subnet uptimes", + op: PongOp, + msg: &p2p.Message{ + Message: &p2p.Message_Pong{ + Pong: &p2p.Pong{ + Uptime: 100, + SubnetUptimes: []*p2p.SubnetUptime{ + { + SubnetId: testID[:], + Uptime: 100, + }, + }, }, }, }, - gzipCompress: false, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - RequestID: uint32(1), - Deadline: uint64(1), - }, - expectedGetFieldErr: nil, + compressionType: compression.TypeNone, + bypassThrottling: true, + bytesSaved: false, }, { - desc: "valid accepted_frontier outbound message with no compression", - op: AcceptedFrontier, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_AcceptedFrontier_{ - AcceptedFrontier_: &p2ppb.AcceptedFrontier{ - ChainId: testID[:], - RequestId: 1, - ContainerIds: [][]byte{testID[:], testID[:]}, + desc: "version message with no compression", + op: VersionOp, + msg: &p2p.Message{ + Message: &p2p.Message_Version{ + Version: &p2p.Version{ + NetworkId: uint32(1337), + MyTime: uint64(nowUnix), + IpAddr: []byte(net.IPv6zero), + IpPort: 9651, + MyVersion: "v1.2.3", + MyVersionTime: uint64(nowUnix), + Sig: []byte{'y', 'e', 'e', 't'}, + TrackedSubnets: [][]byte{testID[:]}, }, }, }, - gzipCompress: false, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - RequestID: uint32(1), - ContainerIDs: [][]byte{testID[:], testID[:]}, - }, - expectedGetFieldErr: nil, + compressionType: compression.TypeNone, + bypassThrottling: true, + bytesSaved: false, }, { - desc: "valid get_accepted outbound message with no compression", - op: GetAccepted, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_GetAccepted{ - GetAccepted: &p2ppb.GetAccepted{ - ChainId: testID[:], - RequestId: 1, - Deadline: 1, - ContainerIds: [][]byte{testID[:], testID[:]}, + desc: "peer_list message with no compression", + op: PeerListOp, + msg: &p2p.Message{ + Message: &p2p.Message_PeerList{ + PeerList: &p2p.PeerList{ + ClaimedIpPorts: []*p2p.ClaimedIpPort{ + { + X509Certificate: testTLSCert.Certificate[0], + IpAddr: []byte(net.IPv4zero), + IpPort: 10, + Timestamp: 1, + Signature: []byte{0}, + }, + }, }, }, }, - gzipCompress: false, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - RequestID: uint32(1), - Deadline: uint64(1), - ContainerIDs: [][]byte{testID[:], testID[:]}, - }, - expectedGetFieldErr: nil, + compressionType: compression.TypeNone, + bypassThrottling: true, + bytesSaved: false, }, { - desc: "valid accepted outbound message with no compression", - op: Accepted, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_Accepted_{ - Accepted_: &p2ppb.Accepted{ - ChainId: testID[:], - RequestId: 1, - ContainerIds: [][]byte{testID[:], testID[:]}, + desc: "peer_list message with gzip compression", + op: PeerListOp, + msg: &p2p.Message{ + Message: &p2p.Message_PeerList{ + PeerList: &p2p.PeerList{ + ClaimedIpPorts: []*p2p.ClaimedIpPort{ + { + X509Certificate: testTLSCert.Certificate[0], + IpAddr: []byte(net.IPv6zero), + IpPort: 9651, + Timestamp: uint64(nowUnix), + Signature: compressibleContainers[0], + }, + }, }, }, }, - gzipCompress: false, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - RequestID: uint32(1), - ContainerIDs: [][]byte{testID[:], testID[:]}, - }, - expectedGetFieldErr: nil, + compressionType: compression.TypeGzip, + bypassThrottling: true, + bytesSaved: true, }, { - desc: "valid get_ancestors outbound message with no compression", - op: GetAncestors, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_GetAncestors{ - GetAncestors: &p2ppb.GetAncestors{ - ChainId: testID[:], - RequestId: 1, - Deadline: 1, - ContainerId: testID[:], + desc: "peer_list message with zstd compression", + op: PeerListOp, + msg: &p2p.Message{ + Message: &p2p.Message_PeerList{ + PeerList: &p2p.PeerList{ + ClaimedIpPorts: []*p2p.ClaimedIpPort{ + { + X509Certificate: testTLSCert.Certificate[0], + IpAddr: []byte(net.IPv6zero), + IpPort: 9651, + Timestamp: uint64(nowUnix), + Signature: compressibleContainers[0], + }, + }, }, }, }, - gzipCompress: false, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - RequestID: uint32(1), - Deadline: uint64(1), - ContainerID: testID[:], - }, - expectedGetFieldErr: nil, + compressionType: compression.TypeZstd, + bypassThrottling: true, + bytesSaved: true, }, { - desc: "valid ancestor outbound message with no compression", - op: Ancestors, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_Ancestors_{ - Ancestors_: &p2ppb.Ancestors{ - ChainId: testID[:], - RequestId: 12345, - Containers: compressibleContainers, + desc: "peer_list_ack message with no compression", + op: PeerListAckOp, + msg: &p2p.Message{ + Message: &p2p.Message_PeerListAck{ + PeerListAck: &p2p.PeerListAck{ + PeerAcks: []*p2p.PeerAck{ + { + TxId: testID[:], + Timestamp: 1, + }, + }, }, }, }, - gzipCompress: false, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - RequestID: uint32(12345), - MultiContainerBytes: compressibleContainers, - }, - expectedGetFieldErr: nil, + compressionType: compression.TypeNone, + bypassThrottling: false, + bytesSaved: false, }, { - desc: "valid ancestor outbound message with compression", - op: Ancestors, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_Ancestors_{ - Ancestors_: &p2ppb.Ancestors{ - ChainId: testID[:], - RequestId: 12345, - Containers: compressibleContainers, + desc: "get_state_summary_frontier message with no compression", + op: GetStateSummaryFrontierOp, + msg: &p2p.Message{ + Message: &p2p.Message_GetStateSummaryFrontier{ + GetStateSummaryFrontier: &p2p.GetStateSummaryFrontier{ + ChainId: testID[:], + RequestId: 1, + Deadline: 1, }, }, }, - gzipCompress: true, - bypassThrottling: true, - bytesSaved: true, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - RequestID: uint32(12345), - MultiContainerBytes: compressibleContainers, - }, - expectedGetFieldErr: nil, + compressionType: compression.TypeNone, + bypassThrottling: true, + bytesSaved: false, }, { - desc: "valid get outbound message with no compression", - op: Get, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_Get{ - Get: &p2ppb.Get{ - ChainId: testID[:], - RequestId: 1, - Deadline: 1, - ContainerId: testID[:], + desc: "state_summary_frontier message with no compression", + op: StateSummaryFrontierOp, + msg: &p2p.Message{ + Message: &p2p.Message_StateSummaryFrontier_{ + StateSummaryFrontier_: &p2p.StateSummaryFrontier{ + ChainId: testID[:], + RequestId: 1, + Summary: []byte{0}, }, }, }, - gzipCompress: false, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - RequestID: uint32(1), - Deadline: uint64(1), - ContainerID: testID[:], - }, - expectedGetFieldErr: nil, + compressionType: compression.TypeNone, + bypassThrottling: true, + bytesSaved: false, }, { - desc: "valid put outbound message with no compression", - op: Put, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_Put{ - Put: &p2ppb.Put{ + desc: "state_summary_frontier message with gzip compression", + op: StateSummaryFrontierOp, + msg: &p2p.Message{ + Message: &p2p.Message_StateSummaryFrontier_{ + StateSummaryFrontier_: &p2p.StateSummaryFrontier{ ChainId: testID[:], RequestId: 1, - Container: []byte{0}, + Summary: compressibleContainers[0], }, }, }, - gzipCompress: false, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - RequestID: uint32(1), - ContainerBytes: []byte{0}, - }, - expectedGetFieldErr: nil, + compressionType: compression.TypeGzip, + bypassThrottling: true, + bytesSaved: true, }, { - desc: "valid put outbound message with compression", - op: Put, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_Put{ - Put: &p2ppb.Put{ + desc: "state_summary_frontier message with zstd compression", + op: StateSummaryFrontierOp, + msg: &p2p.Message{ + Message: &p2p.Message_StateSummaryFrontier_{ + StateSummaryFrontier_: &p2p.StateSummaryFrontier{ ChainId: testID[:], RequestId: 1, - Container: compressibleContainers[0], + Summary: compressibleContainers[0], }, }, }, - gzipCompress: true, - bypassThrottling: true, - bytesSaved: true, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - RequestID: uint32(1), - ContainerBytes: compressibleContainers[0], - }, - expectedGetFieldErr: nil, + compressionType: compression.TypeZstd, + bypassThrottling: true, + bytesSaved: true, }, { - desc: "valid push_query outbound message with no compression", - op: PushQuery, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_PushQuery{ - PushQuery: &p2ppb.PushQuery{ + desc: "get_accepted_state_summary message with no compression", + op: GetAcceptedStateSummaryOp, + msg: &p2p.Message{ + Message: &p2p.Message_GetAcceptedStateSummary{ + GetAcceptedStateSummary: &p2p.GetAcceptedStateSummary{ ChainId: testID[:], RequestId: 1, Deadline: 1, - Container: []byte{0}, + Heights: []uint64{0}, }, }, }, - gzipCompress: false, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - RequestID: uint32(1), - Deadline: uint64(1), - ContainerBytes: []byte{0}, + compressionType: compression.TypeNone, + bypassThrottling: true, + bytesSaved: false, + }, + { + desc: "get_accepted_state_summary message with gzip compression", + op: GetAcceptedStateSummaryOp, + msg: &p2p.Message{ + Message: &p2p.Message_GetAcceptedStateSummary{ + GetAcceptedStateSummary: &p2p.GetAcceptedStateSummary{ + ChainId: testID[:], + RequestId: 1, + Deadline: 1, + Heights: []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + }, + }, }, - expectedGetFieldErr: nil, + compressionType: compression.TypeGzip, + bypassThrottling: true, + bytesSaved: false, }, { - desc: "valid push_query outbound message with compression", - op: PushQuery, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_PushQuery{ - PushQuery: &p2ppb.PushQuery{ + desc: "get_accepted_state_summary message with zstd compression", + op: GetAcceptedStateSummaryOp, + msg: &p2p.Message{ + Message: &p2p.Message_GetAcceptedStateSummary{ + GetAcceptedStateSummary: &p2p.GetAcceptedStateSummary{ ChainId: testID[:], RequestId: 1, Deadline: 1, - Container: compressibleContainers[0], + Heights: []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, }, }, }, - gzipCompress: true, - bypassThrottling: true, - bytesSaved: true, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - RequestID: uint32(1), - Deadline: uint64(1), - ContainerBytes: compressibleContainers[0], + compressionType: compression.TypeZstd, + bypassThrottling: true, + bytesSaved: true, + }, + { + desc: "accepted_state_summary message with no compression", + op: AcceptedStateSummaryOp, + msg: &p2p.Message{ + Message: &p2p.Message_AcceptedStateSummary_{ + AcceptedStateSummary_: &p2p.AcceptedStateSummary{ + ChainId: testID[:], + RequestId: 1, + SummaryIds: [][]byte{testID[:], testID[:]}, + }, + }, }, - expectedGetFieldErr: nil, + compressionType: compression.TypeNone, + bypassThrottling: true, + bytesSaved: false, }, { - desc: "valid pull_query outbound message with no compression", - op: PullQuery, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_PullQuery{ - PullQuery: &p2ppb.PullQuery{ - ChainId: testID[:], - RequestId: 1, - Deadline: 1, - ContainerId: testID[:], + desc: "accepted_state_summary message with gzip compression", + op: AcceptedStateSummaryOp, + msg: &p2p.Message{ + Message: &p2p.Message_AcceptedStateSummary_{ + AcceptedStateSummary_: &p2p.AcceptedStateSummary{ + ChainId: testID[:], + RequestId: 1, + SummaryIds: [][]byte{testID[:], testID[:], testID[:], testID[:], testID[:], testID[:], testID[:], testID[:], testID[:]}, }, }, }, - gzipCompress: false, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - RequestID: uint32(1), - Deadline: uint64(1), - ContainerID: testID[:], + compressionType: compression.TypeGzip, + bypassThrottling: true, + bytesSaved: true, + }, + { + desc: "accepted_state_summary message with zstd compression", + op: AcceptedStateSummaryOp, + msg: &p2p.Message{ + Message: &p2p.Message_AcceptedStateSummary_{ + AcceptedStateSummary_: &p2p.AcceptedStateSummary{ + ChainId: testID[:], + RequestId: 1, + SummaryIds: [][]byte{testID[:], testID[:], testID[:], testID[:], testID[:], testID[:], testID[:], testID[:], testID[:]}, + }, + }, + }, + compressionType: compression.TypeZstd, + bypassThrottling: true, + bytesSaved: true, + }, + { + desc: "get_accepted_frontier message with no compression", + op: GetAcceptedFrontierOp, + msg: &p2p.Message{ + Message: &p2p.Message_GetAcceptedFrontier{ + GetAcceptedFrontier: &p2p.GetAcceptedFrontier{ + ChainId: testID[:], + RequestId: 1, + Deadline: 1, + EngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + }, + }, }, - expectedGetFieldErr: nil, + compressionType: compression.TypeNone, + bypassThrottling: true, + bytesSaved: false, }, { - desc: "valid chits outbound message with no compression", - op: Chits, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_Chits{ - Chits: &p2ppb.Chits{ + desc: "accepted_frontier message with no compression", + op: AcceptedFrontierOp, + msg: &p2p.Message{ + Message: &p2p.Message_AcceptedFrontier_{ + AcceptedFrontier_: &p2p.AcceptedFrontier{ ChainId: testID[:], RequestId: 1, ContainerIds: [][]byte{testID[:], testID[:]}, }, }, }, - gzipCompress: false, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - RequestID: uint32(1), - ContainerIDs: [][]byte{testID[:], testID[:]}, - }, - expectedGetFieldErr: nil, + compressionType: compression.TypeNone, + bypassThrottling: true, + bytesSaved: false, }, { - desc: "valid peer_list outbound message with no compression", - op: PeerList, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_PeerList{ - PeerList: &p2ppb.PeerList{ - ClaimedIpPorts: []*p2ppb.ClaimedIpPort{ - { - X509Certificate: testTLSCert.Certificate[0], - IpAddr: []byte(net.IPv4zero), - IpPort: 10, - Timestamp: 1, - Signature: []byte{0}, - }, - }, + desc: "get_accepted message with no compression", + op: GetAcceptedOp, + msg: &p2p.Message{ + Message: &p2p.Message_GetAccepted{ + GetAccepted: &p2p.GetAccepted{ + ChainId: testID[:], + RequestId: 1, + Deadline: 1, + ContainerIds: [][]byte{testID[:], testID[:]}, + EngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, }, }, }, - gzipCompress: false, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - Peers: []ips.ClaimedIPPort{ - { - Cert: testTLSCert.Leaf, - IPPort: ips.IPPort{IP: net.IPv4zero, Port: uint16(10)}, - Timestamp: uint64(1), - Signature: []byte{0}, + compressionType: compression.TypeNone, + bypassThrottling: true, + bytesSaved: false, + }, + { + desc: "accepted message with no compression", + op: AcceptedOp, + msg: &p2p.Message{ + Message: &p2p.Message_Accepted_{ + Accepted_: &p2p.Accepted{ + ChainId: testID[:], + RequestId: 1, + ContainerIds: [][]byte{testID[:], testID[:]}, }, }, }, - expectedGetFieldErr: nil, + compressionType: compression.TypeNone, + bypassThrottling: true, + bytesSaved: false, }, { - desc: "invalid peer_list inbound message with invalid cert", - op: PeerList, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_PeerList{ - PeerList: &p2ppb.PeerList{ - ClaimedIpPorts: []*p2ppb.ClaimedIpPort{ - { - X509Certificate: []byte{0}, - IpAddr: []byte(net.IPv4zero[4:]), - IpPort: 10, - Timestamp: 1, - Signature: []byte{0}, - }, - }, + desc: "get_ancestors message with no compression", + op: GetAncestorsOp, + msg: &p2p.Message{ + Message: &p2p.Message_GetAncestors{ + GetAncestors: &p2p.GetAncestors{ + ChainId: testID[:], + RequestId: 1, + Deadline: 1, + ContainerId: testID[:], + EngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, }, }, }, - gzipCompress: false, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - Peers: nil, - }, - expectedGetFieldErr: map[Field]error{Peers: errInvalidCert}, + compressionType: compression.TypeNone, + bypassThrottling: true, + bytesSaved: false, }, { - desc: "invalid peer_list inbound message with invalid ip", - op: PeerList, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_PeerList{ - PeerList: &p2ppb.PeerList{ - ClaimedIpPorts: []*p2ppb.ClaimedIpPort{ - { - X509Certificate: testTLSCert.Certificate[0], - IpAddr: []byte(net.IPv4zero[4:]), - IpPort: 10, - Timestamp: 1, - Signature: []byte{0}, - }, - }, + desc: "ancestors message with no compression", + op: AncestorsOp, + msg: &p2p.Message{ + Message: &p2p.Message_Ancestors_{ + Ancestors_: &p2p.Ancestors{ + ChainId: testID[:], + RequestId: 12345, + Containers: compressibleContainers, }, }, }, - gzipCompress: false, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - Peers: nil, - }, - expectedGetFieldErr: map[Field]error{Peers: errInvalidIPAddrLen}, + compressionType: compression.TypeNone, + bypassThrottling: true, + bytesSaved: false, }, { - desc: "valid peer_list outbound message with compression", - op: PeerList, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_PeerList{ - PeerList: &p2ppb.PeerList{ - ClaimedIpPorts: []*p2ppb.ClaimedIpPort{ - { - X509Certificate: testTLSCert.Certificate[0], - IpAddr: []byte(net.IPv6zero), - IpPort: 9651, - Timestamp: uint64(nowUnix), - Signature: compressibleContainers[0], - }, - }, + desc: "ancestors message with gzip compression", + op: AncestorsOp, + msg: &p2p.Message{ + Message: &p2p.Message_Ancestors_{ + Ancestors_: &p2p.Ancestors{ + ChainId: testID[:], + RequestId: 12345, + Containers: compressibleContainers, }, }, }, - gzipCompress: true, - bypassThrottling: true, - bytesSaved: true, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - Peers: []ips.ClaimedIPPort{ - { - Cert: testTLSCert.Leaf, - IPPort: ips.IPPort{IP: net.IPv6zero, Port: uint16(9651)}, - Timestamp: uint64(nowUnix), - Signature: compressibleContainers[0], + compressionType: compression.TypeGzip, + bypassThrottling: true, + bytesSaved: true, + }, + { + desc: "ancestors message with zstd compression", + op: AncestorsOp, + msg: &p2p.Message{ + Message: &p2p.Message_Ancestors_{ + Ancestors_: &p2p.Ancestors{ + ChainId: testID[:], + RequestId: 12345, + Containers: compressibleContainers, }, }, }, - expectedGetFieldErr: nil, + compressionType: compression.TypeZstd, + bypassThrottling: true, + bytesSaved: true, }, { - desc: "invalid peer_list outbound message with compression and invalid cert", - op: PeerList, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_PeerList{ - PeerList: &p2ppb.PeerList{ - ClaimedIpPorts: []*p2ppb.ClaimedIpPort{ - { - X509Certificate: testTLSCert.Certificate[0][10:], - IpAddr: []byte(net.IPv6zero), - IpPort: 9651, - Timestamp: uint64(nowUnix), - Signature: compressibleContainers[0], - }, - }, + desc: "get message with no compression", + op: GetOp, + msg: &p2p.Message{ + Message: &p2p.Message_Get{ + Get: &p2p.Get{ + ChainId: testID[:], + RequestId: 1, + Deadline: 1, + ContainerId: testID[:], + EngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, }, }, }, - gzipCompress: true, - bypassThrottling: true, - bytesSaved: true, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - Peers: nil, - }, - expectedGetFieldErr: map[Field]error{Peers: errInvalidCert}, + compressionType: compression.TypeNone, + bypassThrottling: true, + bytesSaved: false, }, { - desc: "invalid peer_list outbound message with compression and invalid ip", - op: PeerList, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_PeerList{ - PeerList: &p2ppb.PeerList{ - ClaimedIpPorts: []*p2ppb.ClaimedIpPort{ - { - X509Certificate: testTLSCert.Certificate[0], - IpAddr: []byte(net.IPv6zero[:5]), - IpPort: 9651, - Timestamp: uint64(nowUnix), - Signature: compressibleContainers[0], - }, - }, + desc: "put message with no compression", + op: PutOp, + msg: &p2p.Message{ + Message: &p2p.Message_Put{ + Put: &p2p.Put{ + ChainId: testID[:], + RequestId: 1, + Container: []byte{0}, + EngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, }, }, }, - gzipCompress: true, - bypassThrottling: true, - bytesSaved: true, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - Peers: nil, - }, - expectedGetFieldErr: map[Field]error{Peers: errInvalidIPAddrLen}, + compressionType: compression.TypeNone, + bypassThrottling: true, + bytesSaved: false, }, { - desc: "valid version outbound message with no compression", - op: Version, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_Version{ - Version: &p2ppb.Version{ - NetworkId: uint32(1337), - MyTime: uint64(nowUnix), - IpAddr: []byte(net.IPv6zero), - IpPort: 9651, - MyVersion: "v1.2.3", - MyVersionTime: uint64(nowUnix), - Sig: []byte{'y', 'e', 'e', 't'}, - TrackedSubnets: [][]byte{testID[:]}, + desc: "put message with gzip compression", + op: PutOp, + msg: &p2p.Message{ + Message: &p2p.Message_Put{ + Put: &p2p.Put{ + ChainId: testID[:], + RequestId: 1, + Container: compressibleContainers[0], + EngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, }, }, }, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - NetworkID: uint32(1337), - MyTime: uint64(nowUnix), - IP: ips.IPPort{IP: net.IPv6zero, Port: uint16(9651)}, - VersionStr: "v1.2.3", - VersionTime: uint64(nowUnix), - SigBytes: []byte{'y', 'e', 'e', 't'}, - TrackedSubnets: [][]byte{testID[:]}, - }, - expectedGetFieldErr: nil, + compressionType: compression.TypeGzip, + bypassThrottling: true, + bytesSaved: true, }, { - desc: "invalid version inbound message with invalid ip", - op: Version, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_Version{ - Version: &p2ppb.Version{ - NetworkId: uint32(1337), - MyTime: uint64(nowUnix), - IpAddr: []byte(net.IPv6zero[1:]), - IpPort: 9651, - MyVersion: "v1.2.3", - MyVersionTime: uint64(nowUnix), - Sig: []byte{'y', 'e', 'e', 't'}, - TrackedSubnets: [][]byte{testID[:]}, + desc: "put message with zstd compression", + op: PutOp, + msg: &p2p.Message{ + Message: &p2p.Message_Put{ + Put: &p2p.Put{ + ChainId: testID[:], + RequestId: 1, + Container: compressibleContainers[0], + EngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, }, }, }, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - NetworkID: uint32(1337), - MyTime: uint64(nowUnix), - IP: nil, - VersionStr: "v1.2.3", - VersionTime: uint64(nowUnix), - SigBytes: []byte{'y', 'e', 'e', 't'}, - TrackedSubnets: [][]byte{testID[:]}, - }, - expectedGetFieldErr: map[Field]error{IP: errInvalidIPAddrLen}, + compressionType: compression.TypeZstd, + bypassThrottling: true, + bytesSaved: true, }, { - desc: "valid app_request outbound message with no compression", - op: AppRequest, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_AppRequest{ - AppRequest: &p2ppb.AppRequest{ - ChainId: testID[:], - RequestId: 1, - Deadline: 1, - AppBytes: compressibleContainers[0], + desc: "push_query message with no compression", + op: PushQueryOp, + msg: &p2p.Message{ + Message: &p2p.Message_PushQuery{ + PushQuery: &p2p.PushQuery{ + ChainId: testID[:], + RequestId: 1, + Deadline: 1, + Container: []byte{0}, + EngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, }, }, }, - gzipCompress: false, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - RequestID: uint32(1), - Deadline: uint64(1), - AppBytes: compressibleContainers[0], - }, - expectedGetFieldErr: nil, + compressionType: compression.TypeNone, + bypassThrottling: true, + bytesSaved: false, }, { - desc: "valid app_request outbound message with compression", - op: AppRequest, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_AppRequest{ - AppRequest: &p2ppb.AppRequest{ - ChainId: testID[:], - RequestId: 1, - Deadline: 1, - AppBytes: compressibleContainers[0], + desc: "push_query message with gzip compression", + op: PushQueryOp, + msg: &p2p.Message{ + Message: &p2p.Message_PushQuery{ + PushQuery: &p2p.PushQuery{ + ChainId: testID[:], + RequestId: 1, + Deadline: 1, + Container: compressibleContainers[0], + EngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, }, }, }, - gzipCompress: true, - bypassThrottling: true, - bytesSaved: true, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - RequestID: uint32(1), - Deadline: uint64(1), - AppBytes: compressibleContainers[0], - }, - expectedGetFieldErr: nil, + compressionType: compression.TypeGzip, + bypassThrottling: true, + bytesSaved: true, }, { - desc: "valid app_response outbound message with no compression", - op: AppResponse, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_AppResponse{ - AppResponse: &p2ppb.AppResponse{ - ChainId: testID[:], - RequestId: 1, - AppBytes: compressibleContainers[0], + desc: "push_query message with zstd compression", + op: PushQueryOp, + msg: &p2p.Message{ + Message: &p2p.Message_PushQuery{ + PushQuery: &p2p.PushQuery{ + ChainId: testID[:], + RequestId: 1, + Deadline: 1, + Container: compressibleContainers[0], + EngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, }, }, }, - gzipCompress: false, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - RequestID: uint32(1), - AppBytes: compressibleContainers[0], - }, - expectedGetFieldErr: nil, + compressionType: compression.TypeZstd, + bypassThrottling: true, + bytesSaved: true, }, { - desc: "valid app_response outbound message with compression", - op: AppResponse, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_AppResponse{ - AppResponse: &p2ppb.AppResponse{ - ChainId: testID[:], - RequestId: 1, - AppBytes: compressibleContainers[0], + desc: "pull_query message with no compression", + op: PullQueryOp, + msg: &p2p.Message{ + Message: &p2p.Message_PullQuery{ + PullQuery: &p2p.PullQuery{ + ChainId: testID[:], + RequestId: 1, + Deadline: 1, + ContainerId: testID[:], + EngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, }, }, }, - gzipCompress: true, - bypassThrottling: true, - bytesSaved: true, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - RequestID: uint32(1), - AppBytes: compressibleContainers[0], - }, - expectedGetFieldErr: nil, + compressionType: compression.TypeNone, + bypassThrottling: true, + bytesSaved: false, }, { - desc: "valid app_gossip outbound message with no compression", - op: AppGossip, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_AppGossip{ - AppGossip: &p2ppb.AppGossip{ - ChainId: testID[:], - AppBytes: compressibleContainers[0], + desc: "chits message with no compression", + op: ChitsOp, + msg: &p2p.Message{ + Message: &p2p.Message_Chits{ + Chits: &p2p.Chits{ + ChainId: testID[:], + RequestId: 1, + PreferredContainerIds: [][]byte{testID[:], testID[:]}, }, }, }, - gzipCompress: false, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - AppBytes: compressibleContainers[0], - }, - expectedGetFieldErr: nil, + compressionType: compression.TypeNone, + bypassThrottling: true, + bytesSaved: false, }, { - desc: "valid app_gossip outbound message with compression", - op: AppGossip, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_AppGossip{ - AppGossip: &p2ppb.AppGossip{ - ChainId: testID[:], - AppBytes: compressibleContainers[0], + desc: "app_request message with no compression", + op: AppRequestOp, + msg: &p2p.Message{ + Message: &p2p.Message_AppRequest{ + AppRequest: &p2p.AppRequest{ + ChainId: testID[:], + RequestId: 1, + Deadline: 1, + AppBytes: compressibleContainers[0], }, }, }, - gzipCompress: true, - bypassThrottling: true, - bytesSaved: true, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - AppBytes: compressibleContainers[0], - }, - expectedGetFieldErr: nil, + compressionType: compression.TypeNone, + bypassThrottling: true, + bytesSaved: false, }, { - desc: "valid get_state_summary_frontier outbound message with no compression", - op: GetStateSummaryFrontier, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_GetStateSummaryFrontier{ - GetStateSummaryFrontier: &p2ppb.GetStateSummaryFrontier{ + desc: "app_request message with gzip compression", + op: AppRequestOp, + msg: &p2p.Message{ + Message: &p2p.Message_AppRequest{ + AppRequest: &p2p.AppRequest{ ChainId: testID[:], RequestId: 1, Deadline: 1, + AppBytes: compressibleContainers[0], }, }, }, - gzipCompress: false, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - RequestID: uint32(1), - Deadline: uint64(1), - }, - expectedGetFieldErr: nil, + compressionType: compression.TypeGzip, + bypassThrottling: true, + bytesSaved: true, }, { - desc: "valid state_summary_frontier outbound message with no compression", - op: StateSummaryFrontier, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_StateSummaryFrontier_{ - StateSummaryFrontier_: &p2ppb.StateSummaryFrontier{ + desc: "app_request message with zstd compression", + op: AppRequestOp, + msg: &p2p.Message{ + Message: &p2p.Message_AppRequest{ + AppRequest: &p2p.AppRequest{ ChainId: testID[:], RequestId: 1, - Summary: []byte{0}, + Deadline: 1, + AppBytes: compressibleContainers[0], }, }, }, - gzipCompress: false, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - RequestID: uint32(1), - SummaryBytes: []byte{0}, - }, - expectedGetFieldErr: nil, + compressionType: compression.TypeZstd, + bypassThrottling: true, + bytesSaved: true, }, { - desc: "valid state_summary_frontier outbound message with compression", - op: StateSummaryFrontier, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_StateSummaryFrontier_{ - StateSummaryFrontier_: &p2ppb.StateSummaryFrontier{ + desc: "app_response message with no compression", + op: AppResponseOp, + msg: &p2p.Message{ + Message: &p2p.Message_AppResponse{ + AppResponse: &p2p.AppResponse{ ChainId: testID[:], RequestId: 1, - Summary: compressibleContainers[0], + AppBytes: compressibleContainers[0], }, }, }, - gzipCompress: true, - bypassThrottling: true, - bytesSaved: true, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - RequestID: uint32(1), - SummaryBytes: compressibleContainers[0], - }, - expectedGetFieldErr: nil, + compressionType: compression.TypeNone, + bypassThrottling: true, + bytesSaved: false, }, { - desc: "valid get_accepted_state_summary_frontier outbound message with no compression", - op: GetAcceptedStateSummary, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_GetAcceptedStateSummary{ - GetAcceptedStateSummary: &p2ppb.GetAcceptedStateSummary{ + desc: "app_response message with gzip compression", + op: AppResponseOp, + msg: &p2p.Message{ + Message: &p2p.Message_AppResponse{ + AppResponse: &p2p.AppResponse{ ChainId: testID[:], RequestId: 1, - Deadline: 1, - Heights: []uint64{0}, + AppBytes: compressibleContainers[0], }, }, }, - gzipCompress: false, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - RequestID: uint32(1), - Deadline: uint64(1), - SummaryHeights: []uint64{0}, - }, - expectedGetFieldErr: nil, + compressionType: compression.TypeGzip, + bypassThrottling: true, + bytesSaved: true, }, { - desc: "valid get_accepted_state_summary_frontier outbound message with compression", - op: GetAcceptedStateSummary, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_GetAcceptedStateSummary{ - GetAcceptedStateSummary: &p2ppb.GetAcceptedStateSummary{ + desc: "app_response message with zstd compression", + op: AppResponseOp, + msg: &p2p.Message{ + Message: &p2p.Message_AppResponse{ + AppResponse: &p2p.AppResponse{ ChainId: testID[:], RequestId: 1, - Deadline: 1, - Heights: []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + AppBytes: compressibleContainers[0], }, }, }, - gzipCompress: true, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - RequestID: uint32(1), - Deadline: uint64(1), - SummaryHeights: []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, - }, - expectedGetFieldErr: nil, + compressionType: compression.TypeZstd, + bypassThrottling: true, + bytesSaved: true, }, { - desc: "valid accepted_state_summary_frontier outbound message with no compression", - op: AcceptedStateSummary, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_AcceptedStateSummary_{ - AcceptedStateSummary_: &p2ppb.AcceptedStateSummary{ - ChainId: testID[:], - RequestId: 1, - SummaryIds: [][]byte{testID[:], testID[:]}, + desc: "app_gossip message with no compression", + op: AppGossipOp, + msg: &p2p.Message{ + Message: &p2p.Message_AppGossip{ + AppGossip: &p2p.AppGossip{ + ChainId: testID[:], + AppBytes: compressibleContainers[0], }, }, }, - gzipCompress: false, - bypassThrottling: true, - bytesSaved: false, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - RequestID: uint32(1), - SummaryIDs: [][]byte{testID[:], testID[:]}, - }, - expectedGetFieldErr: nil, + compressionType: compression.TypeNone, + bypassThrottling: true, + bytesSaved: false, }, { - desc: "valid accepted_state_summary_frontier outbound message with compression", - op: AcceptedStateSummary, - msg: &p2ppb.Message{ - Message: &p2ppb.Message_AcceptedStateSummary_{ - AcceptedStateSummary_: &p2ppb.AcceptedStateSummary{ - ChainId: testID[:], - RequestId: 1, - SummaryIds: [][]byte{testID[:], testID[:], testID[:], testID[:], testID[:], testID[:], testID[:], testID[:], testID[:]}, + desc: "app_gossip message with gzip compression", + op: AppGossipOp, + msg: &p2p.Message{ + Message: &p2p.Message_AppGossip{ + AppGossip: &p2p.AppGossip{ + ChainId: testID[:], + AppBytes: compressibleContainers[0], }, }, }, - gzipCompress: true, - bypassThrottling: true, - bytesSaved: true, - expectedOutboundErr: nil, - fields: map[Field]interface{}{ - ChainID: testID[:], - RequestID: uint32(1), - SummaryIDs: [][]byte{testID[:], testID[:], testID[:], testID[:], testID[:], testID[:], testID[:], testID[:], testID[:]}, + compressionType: compression.TypeGzip, + bypassThrottling: true, + bytesSaved: true, + }, + { + desc: "app_gossip message with zstd compression", + op: AppGossipOp, + msg: &p2p.Message{ + Message: &p2p.Message_AppGossip{ + AppGossip: &p2p.AppGossip{ + ChainId: testID[:], + AppBytes: compressibleContainers[0], + }, + }, }, + compressionType: compression.TypeZstd, + bypassThrottling: true, + bytesSaved: true, }, } for _, tv := range tests { require.True(t.Run(tv.desc, func(t2 *testing.T) { - // copy before we in-place update via marshal - oldProtoMsgS := tv.msg.String() - - encodedMsg, err := mb.createOutbound(tv.op, tv.msg, tv.gzipCompress, tv.bypassThrottling) - require.ErrorIs(err, tv.expectedOutboundErr, fmt.Errorf("unexpected error %v (%T)", err, err)) - if tv.expectedOutboundErr != nil { - return - } + encodedMsg, err := mb.createOutbound(tv.msg, tv.compressionType, tv.bypassThrottling) + require.NoError(err) - require.Equal(encodedMsg.BypassThrottling(), tv.bypassThrottling) + require.Equal(tv.bypassThrottling, encodedMsg.BypassThrottling()) + require.Equal(tv.op, encodedMsg.Op()) bytesSaved := encodedMsg.BytesSavedCompression() - if bytesSaved > 0 { - t2.Logf("saved %d bytes", bytesSaved) - } require.Equal(tv.bytesSaved, bytesSaved > 0) - if (bytesSaved > 0) != tv.bytesSaved { - // if bytes saved expected via compression, - // the outbound message BytesSavedCompression should return >bytesSaved - t.Fatalf("unexpected BytesSavedCompression>0 (%d), expected bytes saved %v", bytesSaved, tv.bytesSaved) - } - parsedMsg, err := mb.parseInbound(encodedMsg.Bytes(), ids.EmptyNodeID, func() {}) require.NoError(err) + require.Equal(tv.op, parsedMsg.Op()) + })) + } +} - // before/after compression, the message should be the same - require.Equal(parsedMsg.msg.String(), oldProtoMsgS) +func TestEmptyInboundMessage(t *testing.T) { + t.Parallel() - for field, v1 := range tv.fields { - v2, err := getField(parsedMsg.msg, field) + require := require.New(t) - // expects "getField" error - if expectedGetFieldErr, ok := tv.expectedGetFieldErr[field]; ok { - require.ErrorIs(err, expectedGetFieldErr) - continue - } + mb, err := newMsgBuilder( + logging.NoLog{}, + "test", + prometheus.NewRegistry(), + 5*time.Second, + ) + require.NoError(err) - require.NoError(err) - require.Equal(v1, v2) - } - })) + msg := &p2p.Message{} + msgBytes, err := proto.Marshal(msg) + require.NoError(err) + + _, err = mb.parseInbound(msgBytes, ids.EmptyNodeID, func() {}) + require.ErrorIs(err, errUnknownMessageType) +} + +func TestNilInboundMessage(t *testing.T) { + t.Parallel() + + require := require.New(t) + + mb, err := newMsgBuilder( + logging.NoLog{}, + "test", + prometheus.NewRegistry(), + 5*time.Second, + ) + require.NoError(err) + + msg := &p2p.Message{ + Message: &p2p.Message_Ping{ + Ping: nil, + }, } + msgBytes, err := proto.Marshal(msg) + require.NoError(err) + + parsedMsg, err := mb.parseInbound(msgBytes, ids.EmptyNodeID, func() {}) + require.NoError(err) + + pingMsg, ok := parsedMsg.message.(*p2p.Ping) + require.True(ok) + require.NotNil(pingMsg) } diff --git a/avalanchego/message/mock_message.go b/avalanchego/message/mock_message.go new file mode 100644 index 00000000..a32b3366 --- /dev/null +++ b/avalanchego/message/mock_message.go @@ -0,0 +1,93 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/message (interfaces: OutboundMessage) + +// Package message is a generated GoMock package. +package message + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockOutboundMessage is a mock of OutboundMessage interface. +type MockOutboundMessage struct { + ctrl *gomock.Controller + recorder *MockOutboundMessageMockRecorder +} + +// MockOutboundMessageMockRecorder is the mock recorder for MockOutboundMessage. +type MockOutboundMessageMockRecorder struct { + mock *MockOutboundMessage +} + +// NewMockOutboundMessage creates a new mock instance. +func NewMockOutboundMessage(ctrl *gomock.Controller) *MockOutboundMessage { + mock := &MockOutboundMessage{ctrl: ctrl} + mock.recorder = &MockOutboundMessageMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockOutboundMessage) EXPECT() *MockOutboundMessageMockRecorder { + return m.recorder +} + +// BypassThrottling mocks base method. +func (m *MockOutboundMessage) BypassThrottling() bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BypassThrottling") + ret0, _ := ret[0].(bool) + return ret0 +} + +// BypassThrottling indicates an expected call of BypassThrottling. +func (mr *MockOutboundMessageMockRecorder) BypassThrottling() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BypassThrottling", reflect.TypeOf((*MockOutboundMessage)(nil).BypassThrottling)) +} + +// Bytes mocks base method. +func (m *MockOutboundMessage) Bytes() []byte { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Bytes") + ret0, _ := ret[0].([]byte) + return ret0 +} + +// Bytes indicates an expected call of Bytes. +func (mr *MockOutboundMessageMockRecorder) Bytes() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bytes", reflect.TypeOf((*MockOutboundMessage)(nil).Bytes)) +} + +// BytesSavedCompression mocks base method. +func (m *MockOutboundMessage) BytesSavedCompression() int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BytesSavedCompression") + ret0, _ := ret[0].(int) + return ret0 +} + +// BytesSavedCompression indicates an expected call of BytesSavedCompression. +func (mr *MockOutboundMessageMockRecorder) BytesSavedCompression() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BytesSavedCompression", reflect.TypeOf((*MockOutboundMessage)(nil).BytesSavedCompression)) +} + +// Op mocks base method. +func (m *MockOutboundMessage) Op() Op { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Op") + ret0, _ := ret[0].(Op) + return ret0 +} + +// Op indicates an expected call of Op. +func (mr *MockOutboundMessageMockRecorder) Op() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Op", reflect.TypeOf((*MockOutboundMessage)(nil).Op)) +} diff --git a/avalanchego/message/mock_outbound_message_builder.go b/avalanchego/message/mock_outbound_message_builder.go new file mode 100644 index 00000000..c11c43d2 --- /dev/null +++ b/avalanchego/message/mock_outbound_message_builder.go @@ -0,0 +1,386 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/message (interfaces: OutboundMsgBuilder) + +// Package message is a generated GoMock package. +package message + +import ( + reflect "reflect" + time "time" + + ids "github.com/ava-labs/avalanchego/ids" + p2p "github.com/ava-labs/avalanchego/proto/pb/p2p" + ips "github.com/ava-labs/avalanchego/utils/ips" + gomock "github.com/golang/mock/gomock" +) + +// MockOutboundMsgBuilder is a mock of OutboundMsgBuilder interface. +type MockOutboundMsgBuilder struct { + ctrl *gomock.Controller + recorder *MockOutboundMsgBuilderMockRecorder +} + +// MockOutboundMsgBuilderMockRecorder is the mock recorder for MockOutboundMsgBuilder. +type MockOutboundMsgBuilderMockRecorder struct { + mock *MockOutboundMsgBuilder +} + +// NewMockOutboundMsgBuilder creates a new mock instance. +func NewMockOutboundMsgBuilder(ctrl *gomock.Controller) *MockOutboundMsgBuilder { + mock := &MockOutboundMsgBuilder{ctrl: ctrl} + mock.recorder = &MockOutboundMsgBuilderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockOutboundMsgBuilder) EXPECT() *MockOutboundMsgBuilderMockRecorder { + return m.recorder +} + +// Accepted mocks base method. +func (m *MockOutboundMsgBuilder) Accepted(arg0 ids.ID, arg1 uint32, arg2 []ids.ID) (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Accepted", arg0, arg1, arg2) + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Accepted indicates an expected call of Accepted. +func (mr *MockOutboundMsgBuilderMockRecorder) Accepted(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Accepted", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Accepted), arg0, arg1, arg2) +} + +// AcceptedFrontier mocks base method. +func (m *MockOutboundMsgBuilder) AcceptedFrontier(arg0 ids.ID, arg1 uint32, arg2 []ids.ID) (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AcceptedFrontier", arg0, arg1, arg2) + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AcceptedFrontier indicates an expected call of AcceptedFrontier. +func (mr *MockOutboundMsgBuilderMockRecorder) AcceptedFrontier(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AcceptedFrontier", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).AcceptedFrontier), arg0, arg1, arg2) +} + +// AcceptedStateSummary mocks base method. +func (m *MockOutboundMsgBuilder) AcceptedStateSummary(arg0 ids.ID, arg1 uint32, arg2 []ids.ID) (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AcceptedStateSummary", arg0, arg1, arg2) + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AcceptedStateSummary indicates an expected call of AcceptedStateSummary. +func (mr *MockOutboundMsgBuilderMockRecorder) AcceptedStateSummary(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AcceptedStateSummary", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).AcceptedStateSummary), arg0, arg1, arg2) +} + +// Ancestors mocks base method. +func (m *MockOutboundMsgBuilder) Ancestors(arg0 ids.ID, arg1 uint32, arg2 [][]byte) (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Ancestors", arg0, arg1, arg2) + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Ancestors indicates an expected call of Ancestors. +func (mr *MockOutboundMsgBuilderMockRecorder) Ancestors(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ancestors", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Ancestors), arg0, arg1, arg2) +} + +// AppGossip mocks base method. +func (m *MockOutboundMsgBuilder) AppGossip(arg0 ids.ID, arg1 []byte) (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AppGossip", arg0, arg1) + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AppGossip indicates an expected call of AppGossip. +func (mr *MockOutboundMsgBuilderMockRecorder) AppGossip(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppGossip", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).AppGossip), arg0, arg1) +} + +// AppRequest mocks base method. +func (m *MockOutboundMsgBuilder) AppRequest(arg0 ids.ID, arg1 uint32, arg2 time.Duration, arg3 []byte) (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AppRequest", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AppRequest indicates an expected call of AppRequest. +func (mr *MockOutboundMsgBuilderMockRecorder) AppRequest(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppRequest", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).AppRequest), arg0, arg1, arg2, arg3) +} + +// AppResponse mocks base method. +func (m *MockOutboundMsgBuilder) AppResponse(arg0 ids.ID, arg1 uint32, arg2 []byte) (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AppResponse", arg0, arg1, arg2) + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AppResponse indicates an expected call of AppResponse. +func (mr *MockOutboundMsgBuilderMockRecorder) AppResponse(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppResponse", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).AppResponse), arg0, arg1, arg2) +} + +// Chits mocks base method. +func (m *MockOutboundMsgBuilder) Chits(arg0 ids.ID, arg1 uint32, arg2, arg3 []ids.ID) (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Chits", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Chits indicates an expected call of Chits. +func (mr *MockOutboundMsgBuilderMockRecorder) Chits(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Chits", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Chits), arg0, arg1, arg2, arg3) +} + +// Get mocks base method. +func (m *MockOutboundMsgBuilder) Get(arg0 ids.ID, arg1 uint32, arg2 time.Duration, arg3 ids.ID, arg4 p2p.EngineType) (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockOutboundMsgBuilderMockRecorder) Get(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Get), arg0, arg1, arg2, arg3, arg4) +} + +// GetAccepted mocks base method. +func (m *MockOutboundMsgBuilder) GetAccepted(arg0 ids.ID, arg1 uint32, arg2 time.Duration, arg3 []ids.ID, arg4 p2p.EngineType) (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAccepted", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAccepted indicates an expected call of GetAccepted. +func (mr *MockOutboundMsgBuilderMockRecorder) GetAccepted(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccepted", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).GetAccepted), arg0, arg1, arg2, arg3, arg4) +} + +// GetAcceptedFrontier mocks base method. +func (m *MockOutboundMsgBuilder) GetAcceptedFrontier(arg0 ids.ID, arg1 uint32, arg2 time.Duration, arg3 p2p.EngineType) (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAcceptedFrontier", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAcceptedFrontier indicates an expected call of GetAcceptedFrontier. +func (mr *MockOutboundMsgBuilderMockRecorder) GetAcceptedFrontier(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAcceptedFrontier", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).GetAcceptedFrontier), arg0, arg1, arg2, arg3) +} + +// GetAcceptedStateSummary mocks base method. +func (m *MockOutboundMsgBuilder) GetAcceptedStateSummary(arg0 ids.ID, arg1 uint32, arg2 time.Duration, arg3 []uint64) (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAcceptedStateSummary", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAcceptedStateSummary indicates an expected call of GetAcceptedStateSummary. +func (mr *MockOutboundMsgBuilderMockRecorder) GetAcceptedStateSummary(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAcceptedStateSummary", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).GetAcceptedStateSummary), arg0, arg1, arg2, arg3) +} + +// GetAncestors mocks base method. +func (m *MockOutboundMsgBuilder) GetAncestors(arg0 ids.ID, arg1 uint32, arg2 time.Duration, arg3 ids.ID, arg4 p2p.EngineType) (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAncestors", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetAncestors indicates an expected call of GetAncestors. +func (mr *MockOutboundMsgBuilderMockRecorder) GetAncestors(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAncestors", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).GetAncestors), arg0, arg1, arg2, arg3, arg4) +} + +// GetStateSummaryFrontier mocks base method. +func (m *MockOutboundMsgBuilder) GetStateSummaryFrontier(arg0 ids.ID, arg1 uint32, arg2 time.Duration) (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetStateSummaryFrontier", arg0, arg1, arg2) + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetStateSummaryFrontier indicates an expected call of GetStateSummaryFrontier. +func (mr *MockOutboundMsgBuilderMockRecorder) GetStateSummaryFrontier(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStateSummaryFrontier", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).GetStateSummaryFrontier), arg0, arg1, arg2) +} + +// PeerList mocks base method. +func (m *MockOutboundMsgBuilder) PeerList(arg0 []ips.ClaimedIPPort, arg1 bool) (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PeerList", arg0, arg1) + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PeerList indicates an expected call of PeerList. +func (mr *MockOutboundMsgBuilderMockRecorder) PeerList(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeerList", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).PeerList), arg0, arg1) +} + +// PeerListAck mocks base method. +func (m *MockOutboundMsgBuilder) PeerListAck(arg0 []*p2p.PeerAck) (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PeerListAck", arg0) + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PeerListAck indicates an expected call of PeerListAck. +func (mr *MockOutboundMsgBuilderMockRecorder) PeerListAck(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeerListAck", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).PeerListAck), arg0) +} + +// Ping mocks base method. +func (m *MockOutboundMsgBuilder) Ping() (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Ping") + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Ping indicates an expected call of Ping. +func (mr *MockOutboundMsgBuilderMockRecorder) Ping() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Ping", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Ping)) +} + +// Pong mocks base method. +func (m *MockOutboundMsgBuilder) Pong(arg0 uint32, arg1 []*p2p.SubnetUptime) (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Pong", arg0, arg1) + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Pong indicates an expected call of Pong. +func (mr *MockOutboundMsgBuilderMockRecorder) Pong(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Pong", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Pong), arg0, arg1) +} + +// PullQuery mocks base method. +func (m *MockOutboundMsgBuilder) PullQuery(arg0 ids.ID, arg1 uint32, arg2 time.Duration, arg3 ids.ID, arg4 p2p.EngineType) (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PullQuery", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PullQuery indicates an expected call of PullQuery. +func (mr *MockOutboundMsgBuilderMockRecorder) PullQuery(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PullQuery", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).PullQuery), arg0, arg1, arg2, arg3, arg4) +} + +// PushQuery mocks base method. +func (m *MockOutboundMsgBuilder) PushQuery(arg0 ids.ID, arg1 uint32, arg2 time.Duration, arg3 []byte, arg4 p2p.EngineType) (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PushQuery", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// PushQuery indicates an expected call of PushQuery. +func (mr *MockOutboundMsgBuilderMockRecorder) PushQuery(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PushQuery", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).PushQuery), arg0, arg1, arg2, arg3, arg4) +} + +// Put mocks base method. +func (m *MockOutboundMsgBuilder) Put(arg0 ids.ID, arg1 uint32, arg2 []byte, arg3 p2p.EngineType) (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Put", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Put indicates an expected call of Put. +func (mr *MockOutboundMsgBuilderMockRecorder) Put(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Put", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Put), arg0, arg1, arg2, arg3) +} + +// StateSummaryFrontier mocks base method. +func (m *MockOutboundMsgBuilder) StateSummaryFrontier(arg0 ids.ID, arg1 uint32, arg2 []byte) (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateSummaryFrontier", arg0, arg1, arg2) + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateSummaryFrontier indicates an expected call of StateSummaryFrontier. +func (mr *MockOutboundMsgBuilderMockRecorder) StateSummaryFrontier(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSummaryFrontier", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).StateSummaryFrontier), arg0, arg1, arg2) +} + +// Version mocks base method. +func (m *MockOutboundMsgBuilder) Version(arg0 uint32, arg1 uint64, arg2 ips.IPPort, arg3 string, arg4 uint64, arg5 []byte, arg6 []ids.ID) (OutboundMessage, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Version", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(OutboundMessage) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Version indicates an expected call of Version. +func (mr *MockOutboundMsgBuilderMockRecorder) Version(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockOutboundMsgBuilder)(nil).Version), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} diff --git a/avalanchego/message/ops.go b/avalanchego/message/ops.go index 4ded5283..e3ee5cf2 100644 --- a/avalanchego/message/ops.go +++ b/avalanchego/message/ops.go @@ -1,98 +1,102 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message +import ( + "errors" + "fmt" + + "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/utils/set" +) + // Op is an opcode type Op byte // Types of messages that may be sent between nodes // Note: If you add a new parseable Op below, you must also add it to ops // (declared below) -// -// "_" are used in places where old message types were defined that are no -// longer supported. When new messages are introduced these values are typically -// safe to reuse. const ( // Handshake: - _ Op = iota // Used to be a GetVersion message - _ // Used to be a Version message - _ // Used to be a GetPeerList message - Pong - Ping - _ // Used to be a Pong message + PingOp Op = iota + PongOp + VersionOp + PeerListOp + PeerListAckOp + // State sync: + GetStateSummaryFrontierOp + GetStateSummaryFrontierFailedOp + StateSummaryFrontierOp + GetAcceptedStateSummaryOp + GetAcceptedStateSummaryFailedOp + AcceptedStateSummaryOp // Bootstrapping: - GetAcceptedFrontier - AcceptedFrontier - GetAccepted - Accepted - GetAncestors - Ancestors + GetAcceptedFrontierOp + GetAcceptedFrontierFailedOp + AcceptedFrontierOp + GetAcceptedOp + GetAcceptedFailedOp + AcceptedOp + GetAncestorsOp + GetAncestorsFailedOp + AncestorsOp // Consensus: - Get - Put - PushQuery - PullQuery - Chits - // Handshake / peer gossiping - _ // Used to be a Version message - PeerList - Version - // Application level: - AppRequest - AppResponse - AppGossip - // State sync - GetStateSummaryFrontier - StateSummaryFrontier - GetAcceptedStateSummary - AcceptedStateSummary - - // Internal messages (External messages should be added above these): - GetAcceptedFrontierFailed - GetAcceptedFailed - GetAncestorsFailed - GetFailed - QueryFailed - AppRequestFailed - Timeout - Connected - Disconnected - Notify - GossipRequest - GetStateSummaryFrontierFailed - GetAcceptedStateSummaryFailed + GetOp + GetFailedOp + PutOp + PushQueryOp + PullQueryOp + QueryFailedOp + ChitsOp + // Application: + AppRequestOp + AppRequestFailedOp + AppResponseOp + AppGossipOp + // Cross chain: + CrossChainAppRequestOp + CrossChainAppRequestFailedOp + CrossChainAppResponseOp + // Internal: + ConnectedOp + ConnectedSubnetOp + DisconnectedOp + NotifyOp + GossipRequestOp + TimeoutOp ) var ( HandshakeOps = []Op{ - Version, - PeerList, - Ping, - Pong, + PingOp, + PongOp, + VersionOp, + PeerListOp, + PeerListAckOp, } // List of all consensus request message types ConsensusRequestOps = []Op{ - GetAcceptedFrontier, - GetAccepted, - GetAncestors, - Get, - PushQuery, - PullQuery, - AppRequest, - GetStateSummaryFrontier, - GetAcceptedStateSummary, + GetStateSummaryFrontierOp, + GetAcceptedStateSummaryOp, + GetAcceptedFrontierOp, + GetAcceptedOp, + GetAncestorsOp, + GetOp, + PushQueryOp, + PullQueryOp, + AppRequestOp, } ConsensusResponseOps = []Op{ - AcceptedFrontier, - Accepted, - Ancestors, - Put, - Chits, - AppResponse, - StateSummaryFrontier, - AcceptedStateSummary, + StateSummaryFrontierOp, + AcceptedStateSummaryOp, + AcceptedFrontierOp, + AcceptedOp, + AncestorsOp, + PutOp, + ChitsOp, + AppResponseOp, } // AppGossip is the only message that is sent unrequested without the // expectation of a response @@ -100,227 +104,305 @@ var ( ConsensusRequestOps, append( ConsensusResponseOps, - AppGossip, + AppGossipOp, )..., ) ConsensusInternalOps = []Op{ - GetAcceptedFrontierFailed, - GetAcceptedFailed, - GetAncestorsFailed, - GetFailed, - QueryFailed, - AppRequestFailed, - Timeout, - Connected, - Disconnected, - Notify, - GossipRequest, - GetStateSummaryFrontierFailed, - GetAcceptedStateSummaryFailed, + GetStateSummaryFrontierFailedOp, + GetAcceptedStateSummaryFailedOp, + GetAcceptedFrontierFailedOp, + GetAcceptedFailedOp, + GetAncestorsFailedOp, + GetFailedOp, + QueryFailedOp, + AppRequestFailedOp, + CrossChainAppRequestOp, + CrossChainAppRequestFailedOp, + CrossChainAppResponseOp, + ConnectedOp, + ConnectedSubnetOp, + DisconnectedOp, + NotifyOp, + GossipRequestOp, + TimeoutOp, } ConsensusOps = append(ConsensusExternalOps, ConsensusInternalOps...) ExternalOps = append(ConsensusExternalOps, HandshakeOps...) SynchronousOps = []Op{ - GetAcceptedFrontier, - AcceptedFrontier, - GetAccepted, - Accepted, - GetAncestors, - Ancestors, - Get, - Put, - PushQuery, - PullQuery, - Chits, - GetAcceptedFrontierFailed, - GetAcceptedFailed, - GetAncestorsFailed, - GetFailed, - QueryFailed, - Connected, - Disconnected, - // State sync - GetStateSummaryFrontier, - StateSummaryFrontier, - GetAcceptedStateSummary, - AcceptedStateSummary, - GetStateSummaryFrontierFailed, - GetAcceptedStateSummaryFailed, + GetStateSummaryFrontierOp, + GetStateSummaryFrontierFailedOp, + StateSummaryFrontierOp, + GetAcceptedStateSummaryOp, + GetAcceptedStateSummaryFailedOp, + AcceptedStateSummaryOp, + // Bootstrapping + GetAcceptedFrontierOp, + GetAcceptedFrontierFailedOp, + AcceptedFrontierOp, + GetAcceptedOp, + GetAcceptedFailedOp, + AcceptedOp, + GetAncestorsOp, + GetAncestorsFailedOp, + AncestorsOp, + // Consensus + GetOp, + GetFailedOp, + PutOp, + PushQueryOp, + PullQueryOp, + QueryFailedOp, + ChitsOp, + // Internal + ConnectedOp, + ConnectedSubnetOp, + DisconnectedOp, } AsynchronousOps = []Op{ - AppRequest, - AppGossip, - AppRequestFailed, - AppResponse, + // Application + AppRequestOp, + AppRequestFailedOp, + AppGossipOp, + AppResponseOp, + // Cross chain + CrossChainAppRequestOp, + CrossChainAppRequestFailedOp, + CrossChainAppResponseOp, } - RequestToResponseOps = map[Op]Op{ - GetAcceptedFrontier: AcceptedFrontier, - GetAccepted: Accepted, - GetAncestors: Ancestors, - Get: Put, - PushQuery: Chits, - PullQuery: Chits, - AppRequest: AppResponse, - GetStateSummaryFrontier: StateSummaryFrontier, - GetAcceptedStateSummary: AcceptedStateSummary, - } - ResponseToFailedOps = map[Op]Op{ - AcceptedFrontier: GetAcceptedFrontierFailed, - Accepted: GetAcceptedFailed, - Ancestors: GetAncestorsFailed, - Put: GetFailed, - Chits: QueryFailed, - AppResponse: AppRequestFailed, - StateSummaryFrontier: GetStateSummaryFrontierFailed, - AcceptedStateSummary: GetAcceptedStateSummaryFailed, - } FailedToResponseOps = map[Op]Op{ - GetStateSummaryFrontierFailed: StateSummaryFrontier, - GetAcceptedStateSummaryFailed: AcceptedStateSummary, - GetAcceptedFrontierFailed: AcceptedFrontier, - GetAcceptedFailed: Accepted, - GetAncestorsFailed: Ancestors, - GetFailed: Put, - QueryFailed: Chits, - AppRequestFailed: AppResponse, + GetStateSummaryFrontierFailedOp: StateSummaryFrontierOp, + GetAcceptedStateSummaryFailedOp: AcceptedStateSummaryOp, + GetAcceptedFrontierFailedOp: AcceptedFrontierOp, + GetAcceptedFailedOp: AcceptedOp, + GetAncestorsFailedOp: AncestorsOp, + GetFailedOp: PutOp, + QueryFailedOp: ChitsOp, + AppRequestFailedOp: AppResponseOp, + CrossChainAppRequestFailedOp: CrossChainAppResponseOp, } - UnrequestedOps = map[Op]struct{}{ - GetAcceptedFrontier: {}, - GetAccepted: {}, - GetAncestors: {}, - Get: {}, - PushQuery: {}, - PullQuery: {}, - AppRequest: {}, - AppGossip: {}, - GetStateSummaryFrontier: {}, - GetAcceptedStateSummary: {}, + UnrequestedOps = set.Set[Op]{ + GetAcceptedFrontierOp: {}, + GetAcceptedOp: {}, + GetAncestorsOp: {}, + GetOp: {}, + PushQueryOp: {}, + PullQueryOp: {}, + AppRequestOp: {}, + AppGossipOp: {}, + CrossChainAppRequestOp: {}, + GetStateSummaryFrontierOp: {}, + GetAcceptedStateSummaryOp: {}, } - // Defines the messages that can be sent/received with this network - messages = map[Op][]Field{ - // Handshake: - // TODO: remove NodeID from the Version message - Version: {NetworkID, NodeID, MyTime, IP, VersionStr, VersionTime, SigBytes, TrackedSubnets}, - PeerList: {Peers}, - Ping: {}, - Pong: {Uptime}, - // Bootstrapping: - GetAcceptedFrontier: {ChainID, RequestID, Deadline}, - AcceptedFrontier: {ChainID, RequestID, ContainerIDs}, - GetAccepted: {ChainID, RequestID, Deadline, ContainerIDs}, - Accepted: {ChainID, RequestID, ContainerIDs}, - GetAncestors: {ChainID, RequestID, Deadline, ContainerID}, - Ancestors: {ChainID, RequestID, MultiContainerBytes}, - // Consensus: - Get: {ChainID, RequestID, Deadline, ContainerID}, - Put: {ChainID, RequestID, ContainerID, ContainerBytes}, - PushQuery: {ChainID, RequestID, Deadline, ContainerID, ContainerBytes}, - PullQuery: {ChainID, RequestID, Deadline, ContainerID}, - Chits: {ChainID, RequestID, ContainerIDs}, - - // Application level: - AppRequest: {ChainID, RequestID, Deadline, AppBytes}, - AppResponse: {ChainID, RequestID, AppBytes}, - AppGossip: {ChainID, AppBytes}, - // State Sync - GetStateSummaryFrontier: {ChainID, RequestID, Deadline}, - StateSummaryFrontier: {ChainID, RequestID, SummaryBytes}, - GetAcceptedStateSummary: {ChainID, RequestID, Deadline, SummaryHeights}, - AcceptedStateSummary: {ChainID, RequestID, SummaryIDs}, - } + errUnknownMessageType = errors.New("unknown message type") ) -func (op Op) Compressible() bool { - switch op { - case PeerList, Put, Ancestors, PushQuery, - AppRequest, AppResponse, AppGossip, - StateSummaryFrontier, GetAcceptedStateSummary, AcceptedStateSummary: - return true - default: - return false - } -} - func (op Op) String() string { switch op { - case Version: - return "version" - case PeerList: - return "peerlist" - case Ping: + // Handshake + case PingOp: return "ping" - case Pong: + case PongOp: return "pong" - case GetAcceptedFrontier: + case VersionOp: + return "version" + case PeerListOp: + return "peerlist" + case PeerListAckOp: + return "peerlist_ack" + // State sync + case GetStateSummaryFrontierOp: + return "get_state_summary_frontier" + case GetStateSummaryFrontierFailedOp: + return "get_state_summary_frontier_failed" + case StateSummaryFrontierOp: + return "state_summary_frontier" + case GetAcceptedStateSummaryOp: + return "get_accepted_state_summary" + case GetAcceptedStateSummaryFailedOp: + return "get_accepted_state_summary_failed" + case AcceptedStateSummaryOp: + return "accepted_state_summary" + // Bootstrapping + case GetAcceptedFrontierOp: return "get_accepted_frontier" - case AcceptedFrontier: + case GetAcceptedFrontierFailedOp: + return "get_accepted_frontier_failed" + case AcceptedFrontierOp: return "accepted_frontier" - case GetAccepted: + case GetAcceptedOp: return "get_accepted" - case Accepted: + case GetAcceptedFailedOp: + return "get_accepted_failed" + case AcceptedOp: return "accepted" - case Get: - return "get" - case GetAncestors: + case GetAncestorsOp: return "get_ancestors" - case Put: - return "put" - case Ancestors: + case GetAncestorsFailedOp: + return "get_ancestors_failed" + case AncestorsOp: return "ancestors" - case PushQuery: + // Consensus + case GetOp: + return "get" + case GetFailedOp: + return "get_failed" + case PutOp: + return "put" + case PushQueryOp: return "push_query" - case PullQuery: + case PullQueryOp: return "pull_query" - case Chits: + case QueryFailedOp: + return "query_failed" + case ChitsOp: return "chits" - case AppRequest: + // Application + case AppRequestOp: return "app_request" - case AppResponse: + case AppRequestFailedOp: + return "app_request_failed" + case AppResponseOp: return "app_response" - case AppGossip: + case AppGossipOp: return "app_gossip" - case GetStateSummaryFrontier: - return "get_state_summary_frontier" - case StateSummaryFrontier: - return "state_summary_frontier" - case GetAcceptedStateSummary: - return "get_accepted_state_summary" - case AcceptedStateSummary: - return "accepted_state_summary" - - case GetAcceptedFrontierFailed: - return "get_accepted_frontier_failed" - case GetAcceptedFailed: - return "get_accepted_failed" - case GetAncestorsFailed: - return "get_ancestors_failed" - case GetFailed: - return "get_failed" - case QueryFailed: - return "query_failed" - case AppRequestFailed: - return "app_request_failed" - case GetStateSummaryFrontierFailed: - return "get_state_summary_frontier_failed" - case GetAcceptedStateSummaryFailed: - return "get_accepted_state_summary_failed" - case Timeout: - return "timeout" - case Connected: + // Cross chain + case CrossChainAppRequestOp: + return "cross_chain_app_request" + case CrossChainAppRequestFailedOp: + return "cross_chain_app_request_failed" + case CrossChainAppResponseOp: + return "cross_chain_app_response" + // Internal + case ConnectedOp: return "connected" - case Disconnected: + case ConnectedSubnetOp: + return "connected_subnet" + case DisconnectedOp: return "disconnected" - case Notify: + case NotifyOp: return "notify" - case GossipRequest: + case GossipRequestOp: return "gossip_request" + case TimeoutOp: + return "timeout" + default: + return "unknown" + } +} + +func Unwrap(m *p2p.Message) (interface{}, error) { + switch msg := m.GetMessage().(type) { + // Handshake: + case *p2p.Message_Ping: + return msg.Ping, nil + case *p2p.Message_Pong: + return msg.Pong, nil + case *p2p.Message_Version: + return msg.Version, nil + case *p2p.Message_PeerList: + return msg.PeerList, nil + case *p2p.Message_PeerListAck: + return msg.PeerListAck, nil + // State sync: + case *p2p.Message_GetStateSummaryFrontier: + return msg.GetStateSummaryFrontier, nil + case *p2p.Message_StateSummaryFrontier_: + return msg.StateSummaryFrontier_, nil + case *p2p.Message_GetAcceptedStateSummary: + return msg.GetAcceptedStateSummary, nil + case *p2p.Message_AcceptedStateSummary_: + return msg.AcceptedStateSummary_, nil + // Bootstrapping: + case *p2p.Message_GetAcceptedFrontier: + return msg.GetAcceptedFrontier, nil + case *p2p.Message_AcceptedFrontier_: + return msg.AcceptedFrontier_, nil + case *p2p.Message_GetAccepted: + return msg.GetAccepted, nil + case *p2p.Message_Accepted_: + return msg.Accepted_, nil + case *p2p.Message_GetAncestors: + return msg.GetAncestors, nil + case *p2p.Message_Ancestors_: + return msg.Ancestors_, nil + // Consensus: + case *p2p.Message_Get: + return msg.Get, nil + case *p2p.Message_Put: + return msg.Put, nil + case *p2p.Message_PushQuery: + return msg.PushQuery, nil + case *p2p.Message_PullQuery: + return msg.PullQuery, nil + case *p2p.Message_Chits: + return msg.Chits, nil + // Application: + case *p2p.Message_AppRequest: + return msg.AppRequest, nil + case *p2p.Message_AppResponse: + return msg.AppResponse, nil + case *p2p.Message_AppGossip: + return msg.AppGossip, nil + default: + return nil, fmt.Errorf("%w: %T", errUnknownMessageType, msg) + } +} + +func ToOp(m *p2p.Message) (Op, error) { + switch msg := m.GetMessage().(type) { + case *p2p.Message_Ping: + return PingOp, nil + case *p2p.Message_Pong: + return PongOp, nil + case *p2p.Message_Version: + return VersionOp, nil + case *p2p.Message_PeerList: + return PeerListOp, nil + case *p2p.Message_PeerListAck: + return PeerListAckOp, nil + case *p2p.Message_GetStateSummaryFrontier: + return GetStateSummaryFrontierOp, nil + case *p2p.Message_StateSummaryFrontier_: + return StateSummaryFrontierOp, nil + case *p2p.Message_GetAcceptedStateSummary: + return GetAcceptedStateSummaryOp, nil + case *p2p.Message_AcceptedStateSummary_: + return AcceptedStateSummaryOp, nil + case *p2p.Message_GetAcceptedFrontier: + return GetAcceptedFrontierOp, nil + case *p2p.Message_AcceptedFrontier_: + return AcceptedFrontierOp, nil + case *p2p.Message_GetAccepted: + return GetAcceptedOp, nil + case *p2p.Message_Accepted_: + return AcceptedOp, nil + case *p2p.Message_GetAncestors: + return GetAncestorsOp, nil + case *p2p.Message_Ancestors_: + return AncestorsOp, nil + case *p2p.Message_Get: + return GetOp, nil + case *p2p.Message_Put: + return PutOp, nil + case *p2p.Message_PushQuery: + return PushQueryOp, nil + case *p2p.Message_PullQuery: + return PullQueryOp, nil + case *p2p.Message_Chits: + return ChitsOp, nil + case *p2p.Message_AppRequest: + return AppRequestOp, nil + case *p2p.Message_AppResponse: + return AppResponseOp, nil + case *p2p.Message_AppGossip: + return AppGossipOp, nil default: - return "Unknown Op" + return 0, fmt.Errorf("%w: %T", errUnknownMessageType, msg) } } diff --git a/avalanchego/message/outbound_msg_builder.go b/avalanchego/message/outbound_msg_builder.go index 8a6c3f5f..693120b1 100644 --- a/avalanchego/message/outbound_msg_builder.go +++ b/avalanchego/message/outbound_msg_builder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -7,10 +7,12 @@ import ( "time" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/utils/compression" "github.com/ava-labs/avalanchego/utils/ips" ) -var _ OutboundMsgBuilder = &outMsgBuilderWithPacker{} +var _ OutboundMsgBuilder = (*outMsgBuilder)(nil) // OutboundMsgBuilder builds outbound messages. Outbound messages are returned // with a reference count of 1. Once the reference count hits 0, the message @@ -31,9 +33,16 @@ type OutboundMsgBuilder interface { bypassThrottling bool, ) (OutboundMessage, error) + PeerListAck( + peerAcks []*p2p.PeerAck, + ) (OutboundMessage, error) + Ping() (OutboundMessage, error) - Pong(uptimePercentage uint8) (OutboundMessage, error) + Pong( + primaryUptime uint32, + subnetUptimes []*p2p.SubnetUptime, + ) (OutboundMessage, error) GetStateSummaryFrontier( chainID ids.ID, @@ -64,6 +73,7 @@ type OutboundMsgBuilder interface { chainID ids.ID, requestID uint32, deadline time.Duration, + engineType p2p.EngineType, ) (OutboundMessage, error) AcceptedFrontier( @@ -77,6 +87,7 @@ type OutboundMsgBuilder interface { requestID uint32, deadline time.Duration, containerIDs []ids.ID, + engineType p2p.EngineType, ) (OutboundMessage, error) Accepted( @@ -90,6 +101,7 @@ type OutboundMsgBuilder interface { requestID uint32, deadline time.Duration, containerID ids.ID, + engineType p2p.EngineType, ) (OutboundMessage, error) Ancestors( @@ -103,12 +115,14 @@ type OutboundMsgBuilder interface { requestID uint32, deadline time.Duration, containerID ids.ID, + engineType p2p.EngineType, ) (OutboundMessage, error) Put( chainID ids.ID, requestID uint32, container []byte, + engineType p2p.EngineType, ) (OutboundMessage, error) PushQuery( @@ -116,6 +130,7 @@ type OutboundMsgBuilder interface { requestID uint32, deadline time.Duration, container []byte, + engineType p2p.EngineType, ) (OutboundMessage, error) PullQuery( @@ -123,12 +138,14 @@ type OutboundMsgBuilder interface { requestID uint32, deadline time.Duration, containerID ids.ID, + engineType p2p.EngineType, ) (OutboundMessage, error) Chits( chainID ids.ID, requestID uint32, - containerIDs []ids.ID, + preferredContainerIDs []ids.ID, + acceptedContainerIDs []ids.ID, ) (OutboundMessage, error) AppRequest( @@ -150,19 +167,52 @@ type OutboundMsgBuilder interface { ) (OutboundMessage, error) } -type outMsgBuilderWithPacker struct { - c Codec - compress bool +type outMsgBuilder struct { + compressionType compression.Type + + builder *msgBuilder } -func NewOutboundBuilderWithPacker(c Codec, enableCompression bool) OutboundMsgBuilder { - return &outMsgBuilderWithPacker{ - c: c, - compress: enableCompression, +// Use "message.NewCreator" to import this function +// since we do not expose "msgBuilder" yet +func newOutboundBuilder(compressionType compression.Type, builder *msgBuilder) OutboundMsgBuilder { + return &outMsgBuilder{ + compressionType: compressionType, + builder: builder, } } -func (b *outMsgBuilderWithPacker) Version( +func (b *outMsgBuilder) Ping() (OutboundMessage, error) { + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_Ping{ + Ping: &p2p.Ping{}, + }, + }, + compression.TypeNone, + false, + ) +} + +func (b *outMsgBuilder) Pong( + primaryUptime uint32, + subnetUptimes []*p2p.SubnetUptime, +) (OutboundMessage, error) { + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_Pong{ + Pong: &p2p.Pong{ + Uptime: primaryUptime, + SubnetUptimes: subnetUptimes, + }, + }, + }, + compression.TypeNone, + false, + ) +} + +func (b *outMsgBuilder) Version( networkID uint32, myTime uint64, ip ips.IPPort, @@ -172,380 +222,455 @@ func (b *outMsgBuilderWithPacker) Version( trackedSubnets []ids.ID, ) (OutboundMessage, error) { subnetIDBytes := make([][]byte, len(trackedSubnets)) - for i, containerID := range trackedSubnets { - copy := containerID - subnetIDBytes[i] = copy[:] - } - return b.c.Pack( - Version, - map[Field]interface{}{ - NetworkID: networkID, - NodeID: uint32(0), - MyTime: myTime, - IP: ip, - VersionStr: myVersion, - VersionTime: myVersionTime, - SigBytes: sig, - TrackedSubnets: subnetIDBytes, + encodeIDs(trackedSubnets, subnetIDBytes) + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_Version{ + Version: &p2p.Version{ + NetworkId: networkID, + MyTime: myTime, + IpAddr: ip.IP.To16(), + IpPort: uint32(ip.Port), + MyVersion: myVersion, + MyVersionTime: myVersionTime, + Sig: sig, + TrackedSubnets: subnetIDBytes, + }, + }, }, - b.compress && Version.Compressible(), + compression.TypeNone, true, ) } -func (b *outMsgBuilderWithPacker) PeerList(peers []ips.ClaimedIPPort, bypassThrottling bool) (OutboundMessage, error) { - return b.c.Pack( - PeerList, - map[Field]interface{}{ - Peers: peers, +func (b *outMsgBuilder) PeerList(peers []ips.ClaimedIPPort, bypassThrottling bool) (OutboundMessage, error) { + claimIPPorts := make([]*p2p.ClaimedIpPort, len(peers)) + for i, p := range peers { + claimIPPorts[i] = &p2p.ClaimedIpPort{ + X509Certificate: p.Cert.Raw, + IpAddr: p.IPPort.IP.To16(), + IpPort: uint32(p.IPPort.Port), + Timestamp: p.Timestamp, + Signature: p.Signature, + TxId: p.TxID[:], + } + } + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_PeerList{ + PeerList: &p2p.PeerList{ + ClaimedIpPorts: claimIPPorts, + }, + }, }, - b.compress && PeerList.Compressible(), + b.compressionType, bypassThrottling, ) } -func (b *outMsgBuilderWithPacker) Ping() (OutboundMessage, error) { - return b.c.Pack( - Ping, - nil, - b.compress && Ping.Compressible(), - false, - ) -} - -func (b *outMsgBuilderWithPacker) Pong(uptimePercentage uint8) (OutboundMessage, error) { - return b.c.Pack( - Pong, - map[Field]interface{}{ - Uptime: uptimePercentage, +func (b *outMsgBuilder) PeerListAck(peerAcks []*p2p.PeerAck) (OutboundMessage, error) { + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_PeerListAck{ + PeerListAck: &p2p.PeerListAck{ + PeerAcks: peerAcks, + }, + }, }, - b.compress && Pong.Compressible(), + compression.TypeNone, false, ) } -func (b *outMsgBuilderWithPacker) GetStateSummaryFrontier( +func (b *outMsgBuilder) GetStateSummaryFrontier( chainID ids.ID, requestID uint32, deadline time.Duration, ) (OutboundMessage, error) { - return b.c.Pack( - GetStateSummaryFrontier, - map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - Deadline: uint64(deadline), + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_GetStateSummaryFrontier{ + GetStateSummaryFrontier: &p2p.GetStateSummaryFrontier{ + ChainId: chainID[:], + RequestId: requestID, + Deadline: uint64(deadline), + }, + }, }, - b.compress && GetStateSummaryFrontier.Compressible(), + compression.TypeNone, false, ) } -func (b *outMsgBuilderWithPacker) StateSummaryFrontier( +func (b *outMsgBuilder) StateSummaryFrontier( chainID ids.ID, requestID uint32, summary []byte, ) (OutboundMessage, error) { - return b.c.Pack( - StateSummaryFrontier, - map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - SummaryBytes: summary, + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_StateSummaryFrontier_{ + StateSummaryFrontier_: &p2p.StateSummaryFrontier{ + ChainId: chainID[:], + RequestId: requestID, + Summary: summary, + }, + }, }, - b.compress && StateSummaryFrontier.Compressible(), + b.compressionType, false, ) } -func (b *outMsgBuilderWithPacker) GetAcceptedStateSummary( +func (b *outMsgBuilder) GetAcceptedStateSummary( chainID ids.ID, requestID uint32, deadline time.Duration, heights []uint64, ) (OutboundMessage, error) { - return b.c.Pack( - GetAcceptedStateSummary, - map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - Deadline: uint64(deadline), - SummaryHeights: heights, + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_GetAcceptedStateSummary{ + GetAcceptedStateSummary: &p2p.GetAcceptedStateSummary{ + ChainId: chainID[:], + RequestId: requestID, + Deadline: uint64(deadline), + Heights: heights, + }, + }, }, - b.compress && GetAcceptedStateSummary.Compressible(), + b.compressionType, false, ) } -func (b *outMsgBuilderWithPacker) AcceptedStateSummary( +func (b *outMsgBuilder) AcceptedStateSummary( chainID ids.ID, requestID uint32, summaryIDs []ids.ID, ) (OutboundMessage, error) { summaryIDBytes := make([][]byte, len(summaryIDs)) encodeIDs(summaryIDs, summaryIDBytes) - return b.c.Pack( - AcceptedStateSummary, - map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - SummaryIDs: summaryIDBytes, + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_AcceptedStateSummary_{ + AcceptedStateSummary_: &p2p.AcceptedStateSummary{ + ChainId: chainID[:], + RequestId: requestID, + SummaryIds: summaryIDBytes, + }, + }, }, - b.compress && AcceptedStateSummary.Compressible(), + b.compressionType, false, ) } -func (b *outMsgBuilderWithPacker) GetAcceptedFrontier( +func (b *outMsgBuilder) GetAcceptedFrontier( chainID ids.ID, requestID uint32, deadline time.Duration, + engineType p2p.EngineType, ) (OutboundMessage, error) { - return b.c.Pack( - GetAcceptedFrontier, - map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - Deadline: uint64(deadline), + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_GetAcceptedFrontier{ + GetAcceptedFrontier: &p2p.GetAcceptedFrontier{ + ChainId: chainID[:], + RequestId: requestID, + Deadline: uint64(deadline), + EngineType: engineType, + }, + }, }, - b.compress && GetAcceptedFrontier.Compressible(), + compression.TypeNone, false, ) } -func (b *outMsgBuilderWithPacker) AcceptedFrontier( +func (b *outMsgBuilder) AcceptedFrontier( chainID ids.ID, requestID uint32, containerIDs []ids.ID, ) (OutboundMessage, error) { containerIDBytes := make([][]byte, len(containerIDs)) encodeIDs(containerIDs, containerIDBytes) - return b.c.Pack( - AcceptedFrontier, - map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - ContainerIDs: containerIDBytes, + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_AcceptedFrontier_{ + AcceptedFrontier_: &p2p.AcceptedFrontier{ + ChainId: chainID[:], + RequestId: requestID, + ContainerIds: containerIDBytes, + }, + }, }, - b.compress && AcceptedFrontier.Compressible(), + compression.TypeNone, false, ) } -func (b *outMsgBuilderWithPacker) GetAccepted( +func (b *outMsgBuilder) GetAccepted( chainID ids.ID, requestID uint32, deadline time.Duration, containerIDs []ids.ID, + engineType p2p.EngineType, ) (OutboundMessage, error) { containerIDBytes := make([][]byte, len(containerIDs)) encodeIDs(containerIDs, containerIDBytes) - return b.c.Pack( - GetAccepted, - map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - Deadline: uint64(deadline), - ContainerIDs: containerIDBytes, + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_GetAccepted{ + GetAccepted: &p2p.GetAccepted{ + ChainId: chainID[:], + RequestId: requestID, + Deadline: uint64(deadline), + ContainerIds: containerIDBytes, + EngineType: engineType, + }, + }, }, - b.compress && GetAccepted.Compressible(), + compression.TypeNone, false, ) } -func (b *outMsgBuilderWithPacker) Accepted( +func (b *outMsgBuilder) Accepted( chainID ids.ID, requestID uint32, containerIDs []ids.ID, ) (OutboundMessage, error) { containerIDBytes := make([][]byte, len(containerIDs)) encodeIDs(containerIDs, containerIDBytes) - return b.c.Pack( - Accepted, - map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - ContainerIDs: containerIDBytes, + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_Accepted_{ + Accepted_: &p2p.Accepted{ + ChainId: chainID[:], + RequestId: requestID, + ContainerIds: containerIDBytes, + }, + }, }, - b.compress && Accepted.Compressible(), + compression.TypeNone, false, ) } -func (b *outMsgBuilderWithPacker) GetAncestors( +func (b *outMsgBuilder) GetAncestors( chainID ids.ID, requestID uint32, deadline time.Duration, containerID ids.ID, + engineType p2p.EngineType, ) (OutboundMessage, error) { - return b.c.Pack( - GetAncestors, - map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - Deadline: uint64(deadline), - ContainerID: containerID[:], + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_GetAncestors{ + GetAncestors: &p2p.GetAncestors{ + ChainId: chainID[:], + RequestId: requestID, + Deadline: uint64(deadline), + ContainerId: containerID[:], + EngineType: engineType, + }, + }, }, - b.compress && GetAncestors.Compressible(), + compression.TypeNone, false, ) } -func (b *outMsgBuilderWithPacker) Ancestors( +func (b *outMsgBuilder) Ancestors( chainID ids.ID, requestID uint32, containers [][]byte, ) (OutboundMessage, error) { - return b.c.Pack( - Ancestors, - map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - MultiContainerBytes: containers, + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_Ancestors_{ + Ancestors_: &p2p.Ancestors{ + ChainId: chainID[:], + RequestId: requestID, + Containers: containers, + }, + }, }, - b.compress && Ancestors.Compressible(), + b.compressionType, false, ) } -func (b *outMsgBuilderWithPacker) Get( +func (b *outMsgBuilder) Get( chainID ids.ID, requestID uint32, deadline time.Duration, containerID ids.ID, + engineType p2p.EngineType, ) (OutboundMessage, error) { - return b.c.Pack( - Get, - map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - Deadline: uint64(deadline), - ContainerID: containerID[:], + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_Get{ + Get: &p2p.Get{ + ChainId: chainID[:], + RequestId: requestID, + Deadline: uint64(deadline), + ContainerId: containerID[:], + EngineType: engineType, + }, + }, }, - b.compress && Get.Compressible(), + compression.TypeNone, false, ) } -func (b *outMsgBuilderWithPacker) Put( +func (b *outMsgBuilder) Put( chainID ids.ID, requestID uint32, container []byte, + engineType p2p.EngineType, ) (OutboundMessage, error) { - return b.c.Pack( - Put, - map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - ContainerID: ids.Empty[:], // Populated for backwards compatibility - ContainerBytes: container, + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_Put{ + Put: &p2p.Put{ + ChainId: chainID[:], + RequestId: requestID, + Container: container, + EngineType: engineType, + }, + }, }, - b.compress && Put.Compressible(), + b.compressionType, false, ) } -func (b *outMsgBuilderWithPacker) PushQuery( +func (b *outMsgBuilder) PushQuery( chainID ids.ID, requestID uint32, deadline time.Duration, container []byte, + engineType p2p.EngineType, ) (OutboundMessage, error) { - return b.c.Pack( - PushQuery, - map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - Deadline: uint64(deadline), - ContainerID: ids.Empty[:], // Populated for backwards compatibility - ContainerBytes: container, + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_PushQuery{ + PushQuery: &p2p.PushQuery{ + ChainId: chainID[:], + RequestId: requestID, + Deadline: uint64(deadline), + Container: container, + EngineType: engineType, + }, + }, }, - b.compress && PushQuery.Compressible(), + b.compressionType, false, ) } -func (b *outMsgBuilderWithPacker) PullQuery( +func (b *outMsgBuilder) PullQuery( chainID ids.ID, requestID uint32, deadline time.Duration, containerID ids.ID, + engineType p2p.EngineType, ) (OutboundMessage, error) { - return b.c.Pack( - PullQuery, - map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - Deadline: uint64(deadline), - ContainerID: containerID[:], + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_PullQuery{ + PullQuery: &p2p.PullQuery{ + ChainId: chainID[:], + RequestId: requestID, + Deadline: uint64(deadline), + ContainerId: containerID[:], + EngineType: engineType, + }, + }, }, - b.compress && PullQuery.Compressible(), + compression.TypeNone, false, ) } -func (b *outMsgBuilderWithPacker) Chits( +func (b *outMsgBuilder) Chits( chainID ids.ID, requestID uint32, - containerIDs []ids.ID, + preferredContainerIDs []ids.ID, + acceptedContainerIDs []ids.ID, ) (OutboundMessage, error) { - containerIDBytes := make([][]byte, len(containerIDs)) - encodeIDs(containerIDs, containerIDBytes) - return b.c.Pack( - Chits, - map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - ContainerIDs: containerIDBytes, + preferredContainerIDBytes := make([][]byte, len(preferredContainerIDs)) + encodeIDs(preferredContainerIDs, preferredContainerIDBytes) + acceptedContainerIDBytes := make([][]byte, len(acceptedContainerIDs)) + encodeIDs(acceptedContainerIDs, acceptedContainerIDBytes) + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_Chits{ + Chits: &p2p.Chits{ + ChainId: chainID[:], + RequestId: requestID, + PreferredContainerIds: preferredContainerIDBytes, + AcceptedContainerIds: acceptedContainerIDBytes, + }, + }, }, - b.compress && Chits.Compressible(), + compression.TypeNone, false, ) } -// Application level request -func (b *outMsgBuilderWithPacker) AppRequest( +func (b *outMsgBuilder) AppRequest( chainID ids.ID, requestID uint32, deadline time.Duration, msg []byte, ) (OutboundMessage, error) { - return b.c.Pack( - AppRequest, - map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - Deadline: uint64(deadline), - AppBytes: msg, + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_AppRequest{ + AppRequest: &p2p.AppRequest{ + ChainId: chainID[:], + RequestId: requestID, + Deadline: uint64(deadline), + AppBytes: msg, + }, + }, }, - b.compress && AppRequest.Compressible(), + b.compressionType, false, ) } -// Application level response -func (b *outMsgBuilderWithPacker) AppResponse(chainID ids.ID, requestID uint32, msg []byte) (OutboundMessage, error) { - return b.c.Pack( - AppResponse, - map[Field]interface{}{ - ChainID: chainID[:], - RequestID: requestID, - AppBytes: msg, +func (b *outMsgBuilder) AppResponse(chainID ids.ID, requestID uint32, msg []byte) (OutboundMessage, error) { + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_AppResponse{ + AppResponse: &p2p.AppResponse{ + ChainId: chainID[:], + RequestId: requestID, + AppBytes: msg, + }, + }, }, - b.compress && AppResponse.Compressible(), + b.compressionType, false, ) } -// Application level gossiped message -func (b *outMsgBuilderWithPacker) AppGossip(chainID ids.ID, msg []byte) (OutboundMessage, error) { - return b.c.Pack( - AppGossip, - map[Field]interface{}{ - ChainID: chainID[:], - AppBytes: msg, +func (b *outMsgBuilder) AppGossip(chainID ids.ID, msg []byte) (OutboundMessage, error) { + return b.builder.createOutbound( + &p2p.Message{ + Message: &p2p.Message_AppGossip{ + AppGossip: &p2p.AppGossip{ + ChainId: chainID[:], + AppBytes: msg, + }, + }, }, - b.compress && AppGossip.Compressible(), + b.compressionType, false, ) } diff --git a/avalanchego/message/outbound_msg_builder_proto.go b/avalanchego/message/outbound_msg_builder_proto.go deleted file mode 100644 index a67c587e..00000000 --- a/avalanchego/message/outbound_msg_builder_proto.go +++ /dev/null @@ -1,517 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import ( - "time" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/ips" - - p2ppb "github.com/ava-labs/avalanchego/proto/pb/p2p" -) - -var _ OutboundMsgBuilder = &outMsgBuilderWithProto{} - -type outMsgBuilderWithProto struct { - compress bool // set to "true" if compression is enabled - - protoBuilder *msgBuilderProtobuf -} - -// Use "message.NewCreatorWithProto" to import this function -// since we do not expose "msgBuilderProtobuf" yet -func newOutboundBuilderWithProto(enableCompression bool, protoBuilder *msgBuilderProtobuf) OutboundMsgBuilder { - return &outMsgBuilderWithProto{ - compress: enableCompression, - protoBuilder: protoBuilder, - } -} - -func (b *outMsgBuilderWithProto) Version( - networkID uint32, - myTime uint64, - ip ips.IPPort, - myVersion string, - myVersionTime uint64, - sig []byte, - trackedSubnets []ids.ID, -) (OutboundMessage, error) { - subnetIDBytes := make([][]byte, len(trackedSubnets)) - for i, containerID := range trackedSubnets { - copy := containerID - subnetIDBytes[i] = copy[:] - } - return b.protoBuilder.createOutbound( - Version, - &p2ppb.Message{ - Message: &p2ppb.Message_Version{ - Version: &p2ppb.Version{ - NetworkId: networkID, - MyTime: myTime, - IpAddr: []byte(ip.IP.To16()), // ref. "wrappers.TryPackIP" - IpPort: uint32(ip.Port), - MyVersion: myVersion, - MyVersionTime: myVersionTime, - Sig: sig, - TrackedSubnets: subnetIDBytes, - }, - }, - }, - b.compress && Version.Compressible(), - true, - ) -} - -func (b *outMsgBuilderWithProto) PeerList(peers []ips.ClaimedIPPort, bypassThrottling bool) (OutboundMessage, error) { - claimIPPorts := make([]*p2ppb.ClaimedIpPort, len(peers)) - for i, p := range peers { - // ref. "wrappers.TryPackClaimedIPPortList", "PackX509Certificate" - claimIPPorts[i] = &p2ppb.ClaimedIpPort{ - // the inbound message parser will call "x509.ParseCertificate(p.X509Certificate)" - // to decode this message - X509Certificate: p.Cert.Raw, - IpAddr: []byte(p.IPPort.IP.To16()), - IpPort: uint32(p.IPPort.Port), - Timestamp: p.Timestamp, - Signature: p.Signature, - } - } - return b.protoBuilder.createOutbound( - PeerList, - &p2ppb.Message{ - Message: &p2ppb.Message_PeerList{ - PeerList: &p2ppb.PeerList{ - ClaimedIpPorts: claimIPPorts, - }, - }, - }, - b.compress && PeerList.Compressible(), - bypassThrottling, - ) -} - -func (b *outMsgBuilderWithProto) Ping() (OutboundMessage, error) { - return b.protoBuilder.createOutbound( - Ping, - &p2ppb.Message{ - Message: &p2ppb.Message_Ping{ - Ping: &p2ppb.Ping{}, - }, - }, - b.compress && Ping.Compressible(), - false, - ) -} - -func (b *outMsgBuilderWithProto) Pong(uptimePercentage uint8) (OutboundMessage, error) { - return b.protoBuilder.createOutbound( - Pong, - &p2ppb.Message{ - Message: &p2ppb.Message_Pong{ - Pong: &p2ppb.Pong{ - UptimePct: uint32(uptimePercentage), - }, - }, - }, - b.compress && Pong.Compressible(), - false, - ) -} - -func (b *outMsgBuilderWithProto) GetStateSummaryFrontier( - chainID ids.ID, - requestID uint32, - deadline time.Duration, -) (OutboundMessage, error) { - return b.protoBuilder.createOutbound( - GetStateSummaryFrontier, - &p2ppb.Message{ - Message: &p2ppb.Message_GetStateSummaryFrontier{ - GetStateSummaryFrontier: &p2ppb.GetStateSummaryFrontier{ - ChainId: chainID[:], - RequestId: requestID, - Deadline: uint64(deadline), - }, - }, - }, - b.compress && GetStateSummaryFrontier.Compressible(), - false, - ) -} - -func (b *outMsgBuilderWithProto) StateSummaryFrontier( - chainID ids.ID, - requestID uint32, - summary []byte, -) (OutboundMessage, error) { - return b.protoBuilder.createOutbound( - StateSummaryFrontier, - &p2ppb.Message{ - Message: &p2ppb.Message_StateSummaryFrontier_{ - StateSummaryFrontier_: &p2ppb.StateSummaryFrontier{ - ChainId: chainID[:], - RequestId: requestID, - Summary: summary, - }, - }, - }, - b.compress && StateSummaryFrontier.Compressible(), - false, - ) -} - -func (b *outMsgBuilderWithProto) GetAcceptedStateSummary( - chainID ids.ID, - requestID uint32, - deadline time.Duration, - heights []uint64, -) (OutboundMessage, error) { - return b.protoBuilder.createOutbound( - GetAcceptedStateSummary, - &p2ppb.Message{ - Message: &p2ppb.Message_GetAcceptedStateSummary{ - GetAcceptedStateSummary: &p2ppb.GetAcceptedStateSummary{ - ChainId: chainID[:], - RequestId: requestID, - Deadline: uint64(deadline), - Heights: heights, - }, - }, - }, - b.compress && GetAcceptedStateSummary.Compressible(), - false, - ) -} - -func (b *outMsgBuilderWithProto) AcceptedStateSummary( - chainID ids.ID, - requestID uint32, - summaryIDs []ids.ID, -) (OutboundMessage, error) { - summaryIDBytes := make([][]byte, len(summaryIDs)) - encodeIDs(summaryIDs, summaryIDBytes) - return b.protoBuilder.createOutbound( - AcceptedStateSummary, - &p2ppb.Message{ - Message: &p2ppb.Message_AcceptedStateSummary_{ - AcceptedStateSummary_: &p2ppb.AcceptedStateSummary{ - ChainId: chainID[:], - RequestId: requestID, - SummaryIds: summaryIDBytes, - }, - }, - }, - b.compress && AcceptedStateSummary.Compressible(), - false, - ) -} - -func (b *outMsgBuilderWithProto) GetAcceptedFrontier( - chainID ids.ID, - requestID uint32, - deadline time.Duration, -) (OutboundMessage, error) { - return b.protoBuilder.createOutbound( - GetAcceptedFrontier, - &p2ppb.Message{ - Message: &p2ppb.Message_GetAcceptedFrontier{ - GetAcceptedFrontier: &p2ppb.GetAcceptedFrontier{ - ChainId: chainID[:], - RequestId: requestID, - Deadline: uint64(deadline), - }, - }, - }, - b.compress && GetAcceptedFrontier.Compressible(), - false, - ) -} - -func (b *outMsgBuilderWithProto) AcceptedFrontier( - chainID ids.ID, - requestID uint32, - containerIDs []ids.ID, -) (OutboundMessage, error) { - containerIDBytes := make([][]byte, len(containerIDs)) - encodeIDs(containerIDs, containerIDBytes) - return b.protoBuilder.createOutbound( - AcceptedFrontier, - &p2ppb.Message{ - Message: &p2ppb.Message_AcceptedFrontier_{ - AcceptedFrontier_: &p2ppb.AcceptedFrontier{ - ChainId: chainID[:], - RequestId: requestID, - ContainerIds: containerIDBytes, - }, - }, - }, - b.compress && AcceptedFrontier.Compressible(), - false, - ) -} - -func (b *outMsgBuilderWithProto) GetAccepted( - chainID ids.ID, - requestID uint32, - deadline time.Duration, - containerIDs []ids.ID, -) (OutboundMessage, error) { - containerIDBytes := make([][]byte, len(containerIDs)) - encodeIDs(containerIDs, containerIDBytes) - return b.protoBuilder.createOutbound( - GetAccepted, - &p2ppb.Message{ - Message: &p2ppb.Message_GetAccepted{ - GetAccepted: &p2ppb.GetAccepted{ - ChainId: chainID[:], - RequestId: requestID, - Deadline: uint64(deadline), - ContainerIds: containerIDBytes, - }, - }, - }, - b.compress && GetAccepted.Compressible(), - false, - ) -} - -func (b *outMsgBuilderWithProto) Accepted( - chainID ids.ID, - requestID uint32, - containerIDs []ids.ID, -) (OutboundMessage, error) { - containerIDBytes := make([][]byte, len(containerIDs)) - encodeIDs(containerIDs, containerIDBytes) - return b.protoBuilder.createOutbound( - Accepted, - &p2ppb.Message{ - Message: &p2ppb.Message_Accepted_{ - Accepted_: &p2ppb.Accepted{ - ChainId: chainID[:], - RequestId: requestID, - ContainerIds: containerIDBytes, - }, - }, - }, - b.compress && Accepted.Compressible(), - false, - ) -} - -func (b *outMsgBuilderWithProto) GetAncestors( - chainID ids.ID, - requestID uint32, - deadline time.Duration, - containerID ids.ID, -) (OutboundMessage, error) { - return b.protoBuilder.createOutbound( - GetAncestors, - &p2ppb.Message{ - Message: &p2ppb.Message_GetAncestors{ - GetAncestors: &p2ppb.GetAncestors{ - ChainId: chainID[:], - RequestId: requestID, - Deadline: uint64(deadline), - ContainerId: containerID[:], - }, - }, - }, - b.compress && GetAncestors.Compressible(), - false, - ) -} - -func (b *outMsgBuilderWithProto) Ancestors( - chainID ids.ID, - requestID uint32, - containers [][]byte, -) (OutboundMessage, error) { - return b.protoBuilder.createOutbound( - Ancestors, - &p2ppb.Message{ - Message: &p2ppb.Message_Ancestors_{ - Ancestors_: &p2ppb.Ancestors{ - ChainId: chainID[:], - RequestId: requestID, - Containers: containers, - }, - }, - }, - b.compress && Ancestors.Compressible(), - false, - ) -} - -func (b *outMsgBuilderWithProto) Get( - chainID ids.ID, - requestID uint32, - deadline time.Duration, - containerID ids.ID, -) (OutboundMessage, error) { - return b.protoBuilder.createOutbound( - Get, - &p2ppb.Message{ - Message: &p2ppb.Message_Get{ - Get: &p2ppb.Get{ - ChainId: chainID[:], - RequestId: requestID, - Deadline: uint64(deadline), - ContainerId: containerID[:], - }, - }, - }, - b.compress && Get.Compressible(), - false, - ) -} - -func (b *outMsgBuilderWithProto) Put( - chainID ids.ID, - requestID uint32, - container []byte, -) (OutboundMessage, error) { - return b.protoBuilder.createOutbound( - Put, - &p2ppb.Message{ - Message: &p2ppb.Message_Put{ - Put: &p2ppb.Put{ - ChainId: chainID[:], - RequestId: requestID, - Container: container, - }, - }, - }, - b.compress && Put.Compressible(), - false, - ) -} - -func (b *outMsgBuilderWithProto) PushQuery( - chainID ids.ID, - requestID uint32, - deadline time.Duration, - container []byte, -) (OutboundMessage, error) { - return b.protoBuilder.createOutbound( - PushQuery, - &p2ppb.Message{ - Message: &p2ppb.Message_PushQuery{ - PushQuery: &p2ppb.PushQuery{ - ChainId: chainID[:], - RequestId: requestID, - Deadline: uint64(deadline), - Container: container, - }, - }, - }, - b.compress && PushQuery.Compressible(), - false, - ) -} - -func (b *outMsgBuilderWithProto) PullQuery( - chainID ids.ID, - requestID uint32, - deadline time.Duration, - containerID ids.ID, -) (OutboundMessage, error) { - return b.protoBuilder.createOutbound( - PullQuery, - &p2ppb.Message{ - Message: &p2ppb.Message_PullQuery{ - PullQuery: &p2ppb.PullQuery{ - ChainId: chainID[:], - RequestId: requestID, - Deadline: uint64(deadline), - ContainerId: containerID[:], - }, - }, - }, - b.compress && PullQuery.Compressible(), - false, - ) -} - -func (b *outMsgBuilderWithProto) Chits( - chainID ids.ID, - requestID uint32, - containerIDs []ids.ID, -) (OutboundMessage, error) { - containerIDBytes := make([][]byte, len(containerIDs)) - encodeIDs(containerIDs, containerIDBytes) - return b.protoBuilder.createOutbound( - Chits, - &p2ppb.Message{ - Message: &p2ppb.Message_Chits{ - Chits: &p2ppb.Chits{ - ChainId: chainID[:], - RequestId: requestID, - ContainerIds: containerIDBytes, - }, - }, - }, - b.compress && Chits.Compressible(), - false, - ) -} - -// Application level request -func (b *outMsgBuilderWithProto) AppRequest( - chainID ids.ID, - requestID uint32, - deadline time.Duration, - msg []byte, -) (OutboundMessage, error) { - return b.protoBuilder.createOutbound( - AppRequest, - &p2ppb.Message{ - Message: &p2ppb.Message_AppRequest{ - AppRequest: &p2ppb.AppRequest{ - ChainId: chainID[:], - RequestId: requestID, - Deadline: uint64(deadline), - AppBytes: msg, - }, - }, - }, - b.compress && AppRequest.Compressible(), - false, - ) -} - -// Application level response -func (b *outMsgBuilderWithProto) AppResponse(chainID ids.ID, requestID uint32, msg []byte) (OutboundMessage, error) { - return b.protoBuilder.createOutbound( - AppResponse, - &p2ppb.Message{ - Message: &p2ppb.Message_AppResponse{ - AppResponse: &p2ppb.AppResponse{ - ChainId: chainID[:], - RequestId: requestID, - AppBytes: msg, - }, - }, - }, - b.compress && AppResponse.Compressible(), - false, - ) -} - -// Application level gossiped message -func (b *outMsgBuilderWithProto) AppGossip(chainID ids.ID, msg []byte) (OutboundMessage, error) { - return b.protoBuilder.createOutbound( - AppGossip, - &p2ppb.Message{ - Message: &p2ppb.Message_AppGossip{ - AppGossip: &p2ppb.AppGossip{ - ChainId: chainID[:], - AppBytes: msg, - }, - }, - }, - b.compress && AppGossip.Compressible(), - false, - ) -} diff --git a/avalanchego/message/outbound_msg_builder_proto_test.go b/avalanchego/message/outbound_msg_builder_proto_test.go deleted file mode 100644 index 9e1af16a..00000000 --- a/avalanchego/message/outbound_msg_builder_proto_test.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -import ( - "testing" - "time" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/constants" -) - -func Test_newOutboundBuilderWithProto(t *testing.T) { - t.Parallel() - require := require.New(t) - - mb, err := newMsgBuilderProtobuf("test", prometheus.NewRegistry(), int64(constants.DefaultMaxMessageSize), 5*time.Second) - require.NoError(err) - - builder := newOutboundBuilderWithProto(true /*compress*/, mb) - - outMsg, err := builder.GetAcceptedStateSummary(ids.GenerateTestID(), uint32(12345), time.Hour, []uint64{1000, 2000}) - require.NoError(err) - - t.Logf("outbound message built %q with size %d", outMsg.Op().String(), len(outMsg.Bytes())) -} diff --git a/avalanchego/message/outbound_msg_builder_test.go b/avalanchego/message/outbound_msg_builder_test.go new file mode 100644 index 00000000..50f273bf --- /dev/null +++ b/avalanchego/message/outbound_msg_builder_test.go @@ -0,0 +1,48 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/compression" + "github.com/ava-labs/avalanchego/utils/logging" +) + +func Test_newOutboundBuilder(t *testing.T) { + t.Parallel() + + mb, err := newMsgBuilder( + logging.NoLog{}, + "test", + prometheus.NewRegistry(), + 10*time.Second, + ) + require.NoError(t, err) + + for _, compressionType := range []compression.Type{ + compression.TypeNone, + compression.TypeGzip, + compression.TypeZstd, + } { + t.Run(compressionType.String(), func(t *testing.T) { + builder := newOutboundBuilder(compressionType, mb) + + outMsg, err := builder.GetAcceptedStateSummary( + ids.GenerateTestID(), + 12345, + time.Hour, + []uint64{1000, 2000}, + ) + require.NoError(t, err) + t.Logf("outbound message with compression type %s built message with size %d", compressionType, len(outMsg.Bytes())) + }) + } +} diff --git a/avalanchego/message/test_message.go b/avalanchego/message/test_message.go deleted file mode 100644 index 92d9dec1..00000000 --- a/avalanchego/message/test_message.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package message - -type TestMsg struct { - op Op - bytes []byte - bypassThrottling bool -} - -func NewTestMsg(op Op, bytes []byte, bypassThrottling bool) *TestMsg { - return &TestMsg{ - op: op, - bytes: bytes, - bypassThrottling: bypassThrottling, - } -} - -func (m *TestMsg) Op() Op { return m.op } -func (*TestMsg) Get(Field) interface{} { return nil } -func (m *TestMsg) Bytes() []byte { return m.bytes } -func (*TestMsg) BytesSavedCompression() int { return 0 } -func (*TestMsg) AddRef() {} -func (*TestMsg) DecRef() {} -func (*TestMsg) IsProto() bool { return false } -func (m *TestMsg) BypassThrottling() bool { return m.bypassThrottling } diff --git a/avalanchego/nat/nat.go b/avalanchego/nat/nat.go index 9520921f..33749ca0 100644 --- a/avalanchego/nat/nat.go +++ b/avalanchego/nat/nat.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nat @@ -25,9 +25,9 @@ type Router interface { // True iff this router supports NAT SupportsNAT() bool // Map external port [extPort] to internal port [intPort] for [duration] - MapPort(protocol string, intPort, extPort uint16, desc string, duration time.Duration) error + MapPort(intPort, extPort uint16, desc string, duration time.Duration) error // Undo a port mapping - UnmapPort(protocol string, intPort, extPort uint16) error + UnmapPort(intPort, extPort uint16) error // Return our external IP ExternalIP() (net.IP, error) } @@ -63,13 +63,13 @@ func NewPortMapper(log logging.Logger, r Router) Mapper { // Map external port [extPort] (exposed to the internet) to internal port [intPort] (where our process is listening) // and set [ip]. Does this every [updateTime]. [ip] may be nil. -func (m *Mapper) Map(protocol string, intPort, extPort uint16, desc string, ip ips.DynamicIPPort, updateTime time.Duration) { +func (m *Mapper) Map(intPort, extPort uint16, desc string, ip ips.DynamicIPPort, updateTime time.Duration) { if !m.r.SupportsNAT() { return } // we attempt a port map, and log an Error if it fails. - err := m.retryMapPort(protocol, intPort, extPort, desc, mapTimeout) + err := m.retryMapPort(intPort, extPort, desc, mapTimeout) if err != nil { m.log.Error("NAT traversal failed", zap.Uint16("externalPort", extPort), @@ -83,14 +83,15 @@ func (m *Mapper) Map(protocol string, intPort, extPort uint16, desc string, ip i ) } - go m.keepPortMapping(protocol, intPort, extPort, desc, ip, updateTime) + m.wg.Add(1) + go m.keepPortMapping(intPort, extPort, desc, ip, updateTime) } // Retry port map up to maxRefreshRetries with a 1 second delay -func (m *Mapper) retryMapPort(protocol string, intPort, extPort uint16, desc string, timeout time.Duration) error { +func (m *Mapper) retryMapPort(intPort, extPort uint16, desc string, timeout time.Duration) error { var err error for retryCnt := 0; retryCnt < maxRefreshRetries; retryCnt++ { - err = m.r.MapPort(protocol, intPort, extPort, desc, timeout) + err = m.r.MapPort(intPort, extPort, desc, timeout) if err == nil { return nil } @@ -109,20 +110,17 @@ func (m *Mapper) retryMapPort(protocol string, intPort, extPort uint16, desc str // keepPortMapping runs in the background to keep a port mapped. It renews the mapping from [extPort] // to [intPort]] every [updateTime]. Updates [ip] every [updateTime]. -func (m *Mapper) keepPortMapping(protocol string, intPort, extPort uint16, desc string, ip ips.DynamicIPPort, updateTime time.Duration) { +func (m *Mapper) keepPortMapping(intPort, extPort uint16, desc string, ip ips.DynamicIPPort, updateTime time.Duration) { updateTimer := time.NewTimer(updateTime) - m.wg.Add(1) - defer func(extPort uint16) { updateTimer.Stop() m.log.Debug("unmapping port", - zap.String("protocol", protocol), zap.Uint16("externalPort", extPort), ) - if err := m.r.UnmapPort(protocol, intPort, extPort); err != nil { + if err := m.r.UnmapPort(intPort, extPort); err != nil { m.log.Debug("error unmapping port", zap.Uint16("externalPort", extPort), zap.Uint16("internalPort", intPort), @@ -136,7 +134,7 @@ func (m *Mapper) keepPortMapping(protocol string, intPort, extPort uint16, desc for { select { case <-updateTimer.C: - err := m.retryMapPort(protocol, intPort, extPort, desc, mapTimeout) + err := m.retryMapPort(intPort, extPort, desc, mapTimeout) if err != nil { m.log.Warn("renew NAT traversal failed", zap.Uint16("externalPort", extPort), diff --git a/avalanchego/nat/no_router.go b/avalanchego/nat/no_router.go index 32a089dd..5c894c8c 100644 --- a/avalanchego/nat/no_router.go +++ b/avalanchego/nat/no_router.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nat @@ -10,9 +10,10 @@ import ( ) var ( - errNoRouterCantMapPorts = errors.New("can't map ports without a known router") - errFetchingIP = errors.New("getting outbound IP failed") - _ Router = &noRouter{} + _ Router = (*noRouter)(nil) + + errNoRouterCantMapPorts = errors.New("can't map ports without a known router") + errFetchingIP = errors.New("getting outbound IP failed") ) const googleDNSServer = "8.8.8.8:80" @@ -26,11 +27,11 @@ func (noRouter) SupportsNAT() bool { return false } -func (noRouter) MapPort(_ string, intPort, extPort uint16, _ string, _ time.Duration) error { +func (noRouter) MapPort(uint16, uint16, string, time.Duration) error { return errNoRouterCantMapPorts } -func (noRouter) UnmapPort(string, uint16, uint16) error { +func (noRouter) UnmapPort(uint16, uint16) error { return nil } diff --git a/avalanchego/nat/pmp.go b/avalanchego/nat/pmp.go index 57df17b7..ad2032ec 100644 --- a/avalanchego/nat/pmp.go +++ b/avalanchego/nat/pmp.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nat @@ -14,11 +14,19 @@ import ( natpmp "github.com/jackpal/go-nat-pmp" ) +const ( + // pmpProtocol is intentionally lowercase and should not be confused with + // upnpProtocol. + // See: + // - https://github.com/jackpal/go-nat-pmp/blob/v1.0.2/natpmp.go#L82 + pmpProtocol = "tcp" + pmpClientTimeout = 500 * time.Millisecond +) + var ( - errInvalidLifetime = errors.New("invalid mapping duration range") + _ Router = (*pmpRouter)(nil) - pmpClientTimeout = 500 * time.Millisecond - _ Router = &pmpRouter{} + errInvalidLifetime = errors.New("invalid mapping duration range") ) // pmpRouter adapts the NAT-PMP protocol implementation so it conforms to the @@ -27,18 +35,16 @@ type pmpRouter struct { client *natpmp.Client } -func (r *pmpRouter) SupportsNAT() bool { +func (*pmpRouter) SupportsNAT() bool { return true } func (r *pmpRouter) MapPort( - networkProtocol string, newInternalPort uint16, newExternalPort uint16, - mappingName string, + _ string, mappingDuration time.Duration, ) error { - protocol := networkProtocol internalPort := int(newInternalPort) externalPort := int(newExternalPort) @@ -49,19 +55,14 @@ func (r *pmpRouter) MapPort( return errInvalidLifetime } - _, err := r.client.AddPortMapping(protocol, internalPort, externalPort, int(lifetime)) + _, err := r.client.AddPortMapping(pmpProtocol, internalPort, externalPort, int(lifetime)) return err } -func (r *pmpRouter) UnmapPort( - networkProtocol string, - internalPort uint16, - _ uint16, -) error { - protocol := networkProtocol +func (r *pmpRouter) UnmapPort(internalPort uint16, _ uint16) error { internalPortInt := int(internalPort) - _, err := r.client.AddPortMapping(protocol, internalPortInt, 0, 0) + _, err := r.client.AddPortMapping(pmpProtocol, internalPortInt, 0, 0) return err } @@ -79,7 +80,9 @@ func getPMPRouter() *pmpRouter { return nil } - pmp := &pmpRouter{natpmp.NewClientWithTimeout(gatewayIP, pmpClientTimeout)} + pmp := &pmpRouter{ + client: natpmp.NewClientWithTimeout(gatewayIP, pmpClientTimeout), + } if _, err := pmp.ExternalIP(); err != nil { return nil } diff --git a/avalanchego/nat/upnp.go b/avalanchego/nat/upnp.go index 93fcf034..2571048e 100644 --- a/avalanchego/nat/upnp.go +++ b/avalanchego/nat/upnp.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nat @@ -15,10 +15,17 @@ import ( ) const ( + // upnpProtocol is intentionally uppercase and should not be confused with + // pmpProtocol. + // See: + // - https://github.com/huin/goupnp/blob/v1.0.3/dcps/internetgateway1/internetgateway1.go#L2361 + // - https://github.com/huin/goupnp/blob/v1.0.3/dcps/internetgateway1/internetgateway1.go#L3618 + // - https://github.com/huin/goupnp/blob/v1.0.3/dcps/internetgateway2/internetgateway2.go#L3919 + upnpProtocol = "TCP" soapRequestTimeout = 10 * time.Second ) -var _ Router = &upnpRouter{} +var _ Router = (*upnpRouter)(nil) // upnpClient is the interface used by goupnp for their client implementations type upnpClient interface { @@ -66,7 +73,7 @@ type upnpRouter struct { client upnpClient } -func (r *upnpRouter) SupportsNAT() bool { +func (*upnpRouter) SupportsNAT() bool { return true } @@ -118,7 +125,6 @@ func (r *upnpRouter) ExternalIP() (net.IP, error) { } func (r *upnpRouter) MapPort( - protocol string, intPort, extPort uint16, desc string, @@ -126,19 +132,19 @@ func (r *upnpRouter) MapPort( ) error { ip, err := r.localIP() if err != nil { - return nil + return err } lifetime := duration.Seconds() if lifetime < 0 || lifetime > math.MaxUint32 { return errInvalidLifetime } - return r.client.AddPortMapping("", extPort, protocol, intPort, + return r.client.AddPortMapping("", extPort, upnpProtocol, intPort, ip.String(), true, desc, uint32(lifetime)) } -func (r *upnpRouter) UnmapPort(protocol string, _, extPort uint16) error { - return r.client.DeletePortMapping("", extPort, protocol) +func (r *upnpRouter) UnmapPort(_, extPort uint16) error { + return r.client.DeletePortMapping("", extPort, upnpProtocol) } // create UPnP SOAP service client with URN @@ -184,7 +190,14 @@ func discover(target string) *upnpRouter { if _, nat, err := client.GetNATRSIPStatus(); err != nil || !nat { return } - r = &upnpRouter{dev.Root, client} + newRouter := &upnpRouter{ + dev: dev.Root, + client: client, + } + if _, err := newRouter.localIP(); err != nil { + return + } + r = newRouter }) router <- r }(&devs[i]) diff --git a/avalanchego/network/README.md b/avalanchego/network/README.md new file mode 100644 index 00000000..5364d9db --- /dev/null +++ b/avalanchego/network/README.md @@ -0,0 +1,162 @@ +# Avalanche Networking + +## Table of Contents + +- [Overview](#overview) +- [Peers](#peers) + - [Lifecycle](#lifecycle) + - [Bootstrapping](#bootstrapping) + - [Connecting](#connecting) + - [Peer Handshake](#peer-handshake) + - [Connected](#connected) + - [PeerList Gossip](#peerlist-gossip) + - [Messages](#messages) + - [Gossip](#gossip) + +## Overview + +Avalanche is a decentralized [p2p](https://en.wikipedia.org/wiki/Peer-to-peer) (peer-to-peer) network of nodes that work together to run the Avalanche blockchain protocol. + +The `network` package implements the networking layer of the protocol which allows a node to discover, connect to, and communicate with other peers. + +## Peers + +Peers are defined as members of the network that communicate with one another to participate in the Avalanche protocol. + +Peers communicate by enqueuing messages between one another. Each peer on either side of the connection asynchronously reads and writes messages to and from the remote peer. Messages include both application-level messages used to support the Avalanche protocol, as well as networking-level messages used to implement the peer-to-peer communication layer. + +```mermaid +sequenceDiagram + loop + Peer-1->>Peer-2: Write outbound messages + Peer-2->>Peer-1: Read incoming messages + end + loop + Peer-2->>Peer-1: Write outbound messages + Peer-1->>Peer-2: Read incoming messages + end +``` + +### Lifecycle + +#### Bootstrapping + +When starting an Avalanche node, a node needs to be able to initiate some process that eventually allows itself to become a participating member of the network. In traditional web2 systems, it's common to use a web service by hitting the service's DNS and being routed to an available server behind a load balancer. In decentralized p2p systems however, connecting to a node is more complex as no single entity owns the network. [Avalanche consensus](https://docs.avax.network/overview/getting-started/avalanche-consensus) requires a node to repeatedly sample peers in the network, so each node needs some way of discovering and connecting to every other peer to participate in the protocol. + +In Avalanche, nodes connect to an initial set of bootstrapper nodes known as **beacons** (this is user-configurable). Once connected to a set of beacons, a node is able to discover other nodes in the network. Over time, a node eventually discovers other peers in the network through `PeerList` messages it receives through: + +- The handshake initiated between two peers when attempting to connect to a peer (see [Connecting](#connecting)). +- Periodic `PeerList` gossip messages that every peer sends to the peers it's connected to (see [Connected](#connected)). + +#### Connecting + +##### Peer Handshake + +Upon connection to any peer, a handshake is performed between the node attempting to establish the outbound connection to the peer and the peer receiving the inbound connection. + +When attempting to establish the connection, the first message that the node attempting to connect to the peer in the network is a `Version` message describing compatibility of the candidate node with the peer. As an example, nodes that are attempting to connect with an incompatible version of AvalancheGo or a significantly skewed local clock are rejected by the peer. + +```mermaid +sequenceDiagram +Note over Node,Peer: Initiate Handshake +Note left of Node: I want to connect to you! +Note over Node,Peer: Version message +Node->>Peer: AvalancheGo v1.0.0 +Note right of Peer: My version v1.9.4 is incompatible with your version v1.0.0. +Peer-xNode: Connection dropped +Note over Node,Peer: Handshake Failed +``` + +If the `Version` message is successfully received and the peer decides that it wants a connection with this node, it replies with a `PeerList` message that contains metadata about other peers that allows a node to connect to them. Upon reception of a `PeerList` message, a node will attempt to connect to any peers that the node is not already connected to to allow the node to discover more peers in the network. + +```mermaid +sequenceDiagram +Note over Node,Peer: Initiate Handshake +Note left of Node: I want to connect to you! +Note over Node,Peer: Version message +Node->>Peer: AvalancheGo v1.9.4 +Note right of Peer: LGTM! +Note over Node,Peer: PeerList message +Peer->>Node: Peer-X, Peer-Y, Peer-Z +Note over Node,Peer: Handshake Complete +Node->>Peer: ACK Peer-X, Peer-Y, Peer-Z +``` + +Once the node attempting to join the network receives this `PeerList` message, the handshake is complete and the node is now connected to the peer. The node attempts to connect to the new peers discovered in the `PeerList` message. Each connection results in another peer handshake, which results in the node incrementally discovering more and more peers in the network as more and more `PeerList` messages are exchanged. + +#### Connected + +Some peers aren't discovered through the `PeerList` messages exchanged through peer handshakes. This can happen if a peer is either not randomly sampled, or if a new peer joins the network after the node has already connected to the network. + +```mermaid +sequenceDiagram +Node ->> Peer-1: Version - v1.9.5 +Peer-1 ->> Node: PeerList - Peer-2 +Node ->> Peer-1: ACK - Peer-2 +Note left of Node: Node is connected to Peer-1 and now tries to connect to Peer-2. +Node ->> Peer-2: Version - v1.9.5 +Peer-2 ->> Node: PeerList - Peer-1 +Node ->> Peer-2: ACK - Peer-1 +Note left of Node: Peer-3 was never sampled, so we haven't connected yet! +Node --> Peer-3: No connection +``` + +To guarantee that a node can discover all peers, each node periodically gossips a sample of the peers it knows about to other peers. + +##### PeerList Gossip + +###### Messages + +A `PeerList` is the message that is used to communicate the presence of peers in the network. Each `PeerList` message contains networking-level metadata about the peer that provides the necessary information to connect to it, alongside the corresponding transaction id that added that peer to the validator set. Transaction ids are unique hashes that only add a single validator, so it is guaranteed that there is a 1:1 mapping between a validator and its associated transaction id. + +`PeerListAck` messages are sent in response to `PeerList` messages to allow a peer to confirm which peers it will actually attempt to connect to. Because nodes only gossip peers they believe another peer doesn't already know about to optimize bandwidth, `PeerListAck` messages are important to confirm that a peer will attempt to connect to someone. Without this, a node might gossip a peer to another peer and assume a connection between the two is being established, and not re-gossip the peer in future gossip cycles. If the connection was never actually wanted by the peer being gossiped to due to a transient reason, that peer would never be able to re-discover the gossiped peer and could be isolated from a subset of the network. + +Once a `PeerListAck` message is received from a peer, the node that sent the original `PeerList` message marks the corresponding acknowledged validators as already having been transmitted to the peer, so that it's excluded from subsequent iterations of `PeerList` gossip. + +###### Gossip + +Handshake messages provide a node with some knowledge of peers in the network, but offers no guarantee that learning about a subset of peers from each peer the node connects with will result in the node learning about every peer in the network. + +In order to provide a probabilistic guarantee that all peers in the network will eventually learn of one another, each node periodically gossips a sample of the peers that they're aware of to a sample of the peers that they're connected to. Over time, this probabilistically guarantees that every peer will eventually learn of every other peer. + +To optimize bandwidth usage, each node tracks which peers are guaranteed to know of which peers. A node learns this information by tracking both inbound and outbound `PeerList` gossip. + +- Inbound + - If a node ever receives `PeerList` from a peer, that peer _must_ have known about the peers in that `PeerList` message in order to have gossiped them. +- Outbound + - If a node sends a `PeerList` to a peer and the peer replies with an `PeerListAck` message, then all peers in the `PeerListAck` must be known by the peer. + +To efficiently track which peers know of which peers, the peers that each peer is aware of is represented in a [bit set](https://en.wikipedia.org/wiki/Bit_array). A peer is represented by either a `0` if it isn't known by the peer yet, or a `1` if it is known by the peer. + +An node follows the following steps for every cycle of `PeerList` gossip: + +1. Get a sample of peers in the network that the node is connected to +2. For each peer: + 1. Figure out which peers the node hasn't gossiped to them yet. + 2. Take a random sample of these unknown peers. + 3. Send a message describing these peers to the peer. + +```mermaid +sequenceDiagram +Note left of Node: Initialize gossip bit set for Peer-123 +Note left of Node: Peer-123: [0, 0, 0] +Node->>Peer-123: PeerList - Peer-1 +Peer-123->>Node: PeerListAck - Peer-1 +Note left of Node: Peer-123: [1, 0, 0] +Node->>Peer-123: PeerList - Peer-3 +Peer-123->>Node: PeerListAck - Peer-3 +Note left of Node: Peer-123: [1, 0, 1] +Node->>Peer-123: PeerList - Peer-2 +Peer-123->>Node: PeerListAck - Peer-2 +Note left of Node: Peer-123: [1, 1, 1] +Note left of Node: No more gossip left to send to Peer-123! +``` + +Because network state is generally expected to be stable (i.e nodes are not continuously flickering online/offline), as more and more gossip messages are exchanged nodes eventually realize that the peers that they are connected to have learned about every other peer. + +A node eventually stops gossiping peers when there's no more new peers to gossip about. `PeerList` gossip only resumes once: + +1. a new peer joins +2. a peer disconnects and reconnects +3. a new validator joins the network +4. a validator's IP is updated diff --git a/avalanchego/network/certs_test.go b/avalanchego/network/certs_test.go index fd556021..8405107d 100644 --- a/avalanchego/network/certs_test.go +++ b/avalanchego/network/certs_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network diff --git a/avalanchego/network/config.go b/avalanchego/network/config.go index c963ad5f..11191b83 100644 --- a/avalanchego/network/config.go +++ b/avalanchego/network/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network @@ -10,15 +10,21 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/dialer" + "github.com/ava-labs/avalanchego/network/peer" "github.com/ava-labs/avalanchego/network/throttling" "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/compression" "github.com/ava-labs/avalanchego/utils/ips" + "github.com/ava-labs/avalanchego/utils/set" ) // HealthConfig describes parameters for network layer health checks. type HealthConfig struct { + // Marks if the health check should be enabled + Enabled bool `json:"-"` + // MinConnectedPeers is the minimum number of peers that the network should // be connected to to be considered healthy. MinConnectedPeers uint `json:"minConnectedPeers"` @@ -104,6 +110,9 @@ type Config struct { DelayConfig `json:"delayConfig"` ThrottlerConfig ThrottlerConfig `json:"throttlerConfig"` + ProxyEnabled bool `json:"proxyEnabled"` + ProxyReadHeaderTimeout time.Duration `json:"proxyReadHeaderTimeout"` + DialerConfig dialer.Config `json:"dialerConfig"` TLSConfig *tls.Config `json:"-"` @@ -117,19 +126,19 @@ type Config struct { PingFrequency time.Duration `json:"pingFrequency"` AllowPrivateIPs bool `json:"allowPrivateIPs"` - // CompressionEnabled will compress available outbound messages when set to - // true. - CompressionEnabled bool `json:"compressionEnabled"` + // The compression type to use when compressing outbound messages. + // Assumes all peers support this compression type. + CompressionType compression.Type `json:"compressionType"` // TLSKey is this node's TLS key that is used to sign IPs. TLSKey crypto.Signer `json:"-"` - // WhitelistedSubnets of the node. - WhitelistedSubnets ids.Set `json:"whitelistedSubnets"` - Beacons validators.Set `json:"beacons"` + // TrackedSubnets of the node. + TrackedSubnets set.Set[ids.ID] `json:"-"` + Beacons validators.Set `json:"-"` // Validators are the current validators in the Avalanche network - Validators validators.Manager `json:"validators"` + Validators validators.Manager `json:"-"` UptimeCalculator uptime.Calculator `json:"-"` @@ -139,7 +148,7 @@ type Config struct { // UptimeRequirement is the fraction of time a validator must be online and // responsive for us to vote that they should receive a staking reward. - UptimeRequirement float64 `json:"uptimeRequirement"` + UptimeRequirement float64 `json:"-"` // RequireValidatorToConnect require that all connections must have at least // one validator between the 2 peers. This can be useful to enable if the @@ -170,4 +179,7 @@ type Config struct { // Specifies how much disk usage each peer can cause before // we rate-limit them. DiskTargeter tracker.Targeter `json:"-"` + + // Tracks which validators have been sent to which peers + GossipTracker peer.GossipTracker `json:"-"` } diff --git a/avalanchego/network/conn_test.go b/avalanchego/network/conn_test.go index ad97c759..8a48e5ac 100644 --- a/avalanchego/network/conn_test.go +++ b/avalanchego/network/conn_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network @@ -7,7 +7,7 @@ import ( "net" ) -var _ net.Conn = &testConn{} +var _ net.Conn = (*testConn)(nil) type testConn struct { net.Conn diff --git a/avalanchego/network/dialer/dialer.go b/avalanchego/network/dialer/dialer.go index 358d1852..22e8c3ba 100644 --- a/avalanchego/network/dialer/dialer.go +++ b/avalanchego/network/dialer/dialer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dialer @@ -16,7 +16,7 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" ) -var _ Dialer = &dialer{} +var _ Dialer = (*dialer)(nil) // Dialer attempts to create a connection with the provided IP/port pair type Dialer interface { diff --git a/avalanchego/network/dialer/dialer_test.go b/avalanchego/network/dialer/dialer_test.go index f2debb61..8fd516cd 100644 --- a/avalanchego/network/dialer/dialer_test.go +++ b/avalanchego/network/dialer/dialer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dialer @@ -8,7 +8,6 @@ import ( "net" "strconv" "strings" - "sync" "testing" "time" @@ -21,25 +20,15 @@ import ( // Test that canceling a context passed into Dial results // in giving up trying to connect func TestDialerCancelDial(t *testing.T) { - done := make(chan struct{}, 1) + require := require.New(t) - var ( - l net.Listener - err error - setupWg sync.WaitGroup - connsMadeWg sync.WaitGroup - ) - setupWg.Add(1) - connsMadeWg.Add(5) + l, err := net.Listen("tcp", "127.0.0.1:") + require.NoError(err) + done := make(chan struct{}) go func() { - // Continuously accept connections from myself - l, err = net.Listen("tcp", "127.0.0.1:") - if err != nil { - t.Error(err) - } - setupWg.Done() for { + // Continuously accept connections from myself _, err := l.Accept() if err != nil { // Distinguish between an error that occurred because @@ -48,49 +37,40 @@ func TestDialerCancelDial(t *testing.T) { case <-done: return default: - t.Error(err) - return + require.FailNow(err.Error()) } } - connsMadeWg.Done() } }() - // Wait until [l] has been populated to avoid race condition - setupWg.Wait() - port, _ := strconv.Atoi(strings.Split(l.Addr().String(), ":")[1]) + port, err := strconv.Atoi(strings.Split(l.Addr().String(), ":")[1]) + require.NoError(err) myIP := ips.IPPort{ IP: net.ParseIP("127.0.0.1"), Port: uint16(port), } - // Create a dialer that should allow 10 outgoing connections per second - dialer := NewDialer("tcp", Config{ThrottleRps: 10, ConnectionTimeout: 30 * time.Second}, logging.NoLog{}) - // Make 5 outgoing connections. Should not be throttled. - for i := 0; i < 5; i++ { - startTime := time.Now() - _, err := dialer.Dial(context.Background(), myIP) - require.NoError(t, err) - // Connecting to myself shouldn't take more than 50 ms if outgoing - // connections aren't throttled - require.WithinDuration(t, startTime, time.Now(), 50*time.Millisecond) - } + // Create a dialer + dialer := NewDialer( + "tcp", + Config{ + ThrottleRps: 10, + ConnectionTimeout: 30 * time.Second, + }, + logging.NoLog{}, + ) - // Make another outgoing connection but immediately cancel the context - // (actually we cancel it before calling Dial but same difference) + // Make an outgoing connection with a cancelled context ctx, cancel := context.WithCancel(context.Background()) cancel() - sixthDialDone := make(chan struct{}, 1) - go func() { - _, err := dialer.Dial(ctx, myIP) - require.Error(t, err) - close(sixthDialDone) - }() + _, err = dialer.Dial(ctx, myIP) + require.Error(err) + + // Make an outgoing connection with a non-cancelled context + conn, err := dialer.Dial(context.Background(), myIP) + require.NoError(err) + _ = conn.Close() - // First 5 connections should have succeeded but not the 6th, cancelled one - connsMadeWg.Wait() - // Don't exit test before we assert that the sixth Dial attempt errors - <-sixthDialDone - done <- struct{}{} // mark that test is done + close(done) // stop listener goroutine _ = l.Close() } diff --git a/avalanchego/network/dialer_test.go b/avalanchego/network/dialer_test.go index f3ca1c00..9009985a 100644 --- a/avalanchego/network/dialer_test.go +++ b/avalanchego/network/dialer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network @@ -15,7 +15,7 @@ import ( var ( errRefused = errors.New("connection refused") - _ dialer.Dialer = &testDialer{} + _ dialer.Dialer = (*testDialer)(nil) ) type testDialer struct { diff --git a/avalanchego/network/example_test.go b/avalanchego/network/example_test.go new file mode 100644 index 00000000..77bedc2e --- /dev/null +++ b/avalanchego/network/example_test.go @@ -0,0 +1,154 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "context" + "os" + "time" + + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/snow/networking/router" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/ips" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/version" +) + +var _ router.ExternalHandler = (*testExternalHandler)(nil) + +// Note: all of the external handler's methods are called on peer goroutines. It +// is possible for multiple concurrent calls to happen with different NodeIDs. +// However, a given NodeID will only be performing one call at a time. +type testExternalHandler struct { + log logging.Logger +} + +// Note: HandleInbound will be called with raw P2P messages, the networking +// implementation does not implicitly register timeouts, so this handler is only +// called by messages explicitly sent by the peer. If timeouts are required, +// that must be handled by the user of this utility. +func (t *testExternalHandler) HandleInbound(_ context.Context, message message.InboundMessage) { + t.log.Info( + "receiving message", + zap.Stringer("op", message.Op()), + ) +} + +func (t *testExternalHandler) Connected(nodeID ids.NodeID, version *version.Application, subnetID ids.ID) { + t.log.Info( + "connected", + zap.Stringer("nodeID", nodeID), + zap.Stringer("version", version), + zap.Stringer("subnetID", subnetID), + ) +} + +func (t *testExternalHandler) Disconnected(nodeID ids.NodeID) { + t.log.Info( + "disconnected", + zap.Stringer("nodeID", nodeID), + ) +} + +type testAggressiveValidatorSet struct { + validators.Set +} + +func (*testAggressiveValidatorSet) Contains(ids.NodeID) bool { + return true +} + +func ExampleNewTestNetwork() { + log := logging.NewLogger( + "networking", + logging.NewWrappedCore( + logging.Info, + os.Stdout, + logging.Colors.ConsoleEncoder(), + ), + ) + + // Needs to be periodically updated by the caller to have the latest + // validator set + validators := &testAggressiveValidatorSet{ + Set: validators.NewSet(), + } + + // If we want to be able to communicate with non-primary network subnets, we + // should register them here. + trackedSubnets := set.Set[ids.ID]{} + + // Messages and connections are handled by the external handler. + handler := &testExternalHandler{ + log: log, + } + + network, err := NewTestNetwork( + log, + constants.CostwoID, + validators, + trackedSubnets, + handler, + ) + if err != nil { + log.Fatal( + "failed to create test network", + zap.Error(err), + ) + return + } + + // We need to initially connect to some nodes in the network before peer + // gossip will enable connecting to all the remaining nodes in the network. + beaconIPs, beaconIDs := genesis.SampleBeacons(constants.CostwoID, 5) + for i, beaconIDStr := range beaconIDs { + beaconID, err := ids.NodeIDFromString(beaconIDStr) + if err != nil { + log.Fatal( + "failed to parse beaconID", + zap.String("beaconID", beaconIDStr), + zap.Error(err), + ) + return + } + + beaconIPStr := beaconIPs[i] + ipPort, err := ips.ToIPPort(beaconIPStr) + if err != nil { + log.Fatal( + "failed to parse beaconIP", + zap.String("beaconIP", beaconIPStr), + zap.Error(err), + ) + return + } + + network.ManuallyTrack(beaconID, ipPort) + } + + // Typically network.StartClose() should be called based on receiving a + // SIGINT or SIGTERM. For the example, we close the network after 15s. + go log.RecoverAndPanic(func() { + time.Sleep(15 * time.Second) + network.StartClose() + }) + + // network.Send(...) and network.Gossip(...) can be used here to send + // messages to peers. + + // Calling network.Dispatch() will block until a fatal error occurs or + // network.StartClose() is called. + err = network.Dispatch() + log.Info( + "network exited", + zap.Error(err), + ) +} diff --git a/avalanchego/network/handler_test.go b/avalanchego/network/handler_test.go index 5879edc5..64350b3b 100644 --- a/avalanchego/network/handler_test.go +++ b/avalanchego/network/handler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network @@ -9,7 +9,7 @@ import ( "github.com/ava-labs/avalanchego/version" ) -var _ router.ExternalHandler = &testHandler{} +var _ router.ExternalHandler = (*testHandler)(nil) type testHandler struct { router.InboundHandler diff --git a/avalanchego/network/listener_test.go b/avalanchego/network/listener_test.go index 1b8b78d9..1b15b006 100644 --- a/avalanchego/network/listener_test.go +++ b/avalanchego/network/listener_test.go @@ -1,20 +1,15 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network import ( - "errors" "net" "github.com/ava-labs/avalanchego/utils/ips" ) -var ( - errClosed = errors.New("closed") - - _ net.Listener = &testListener{} -) +var _ net.Listener = (*testListener)(nil) type testListener struct { ip ips.IPPort diff --git a/avalanchego/network/metrics.go b/avalanchego/network/metrics.go index 5d30a2d0..ad7826eb 100644 --- a/avalanchego/network/metrics.go +++ b/avalanchego/network/metrics.go @@ -1,35 +1,47 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network import ( + "sync" + "time" + "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/peer" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" ) type metrics struct { - numTracked prometheus.Gauge - numPeers prometheus.Gauge - numSubnetPeers *prometheus.GaugeVec - timeSinceLastMsgSent prometheus.Gauge - timeSinceLastMsgReceived prometheus.Gauge - sendQueuePortionFull prometheus.Gauge - sendFailRate prometheus.Gauge - connected prometheus.Counter - disconnected prometheus.Counter - acceptFailed *prometheus.CounterVec - inboundConnRateLimited prometheus.Counter - inboundConnAllowed prometheus.Counter - nodeUptimeWeightedAverage prometheus.Gauge - nodeUptimeRewardingStake prometheus.Gauge + numTracked prometheus.Gauge + numPeers prometheus.Gauge + numSubnetPeers *prometheus.GaugeVec + timeSinceLastMsgSent prometheus.Gauge + timeSinceLastMsgReceived prometheus.Gauge + sendQueuePortionFull prometheus.Gauge + sendFailRate prometheus.Gauge + connected prometheus.Counter + disconnected prometheus.Counter + acceptFailed prometheus.Counter + inboundConnRateLimited prometheus.Counter + inboundConnAllowed prometheus.Counter + numUselessPeerListBytes prometheus.Counter + nodeUptimeWeightedAverage prometheus.Gauge + nodeUptimeRewardingStake prometheus.Gauge + nodeSubnetUptimeWeightedAverage *prometheus.GaugeVec + nodeSubnetUptimeRewardingStake *prometheus.GaugeVec + peerConnectedLifetimeAverage prometheus.Gauge + + lock sync.RWMutex + peerConnectedStartTimes map[ids.NodeID]float64 + peerConnectedStartTimesSum float64 } -func newMetrics(namespace string, registerer prometheus.Registerer, initialSubnetIDs ids.Set) (*metrics, error) { +func newMetrics(namespace string, registerer prometheus.Registerer, initialSubnetIDs set.Set[ids.ID]) (*metrics, error) { m := &metrics{ numPeers: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, @@ -79,16 +91,21 @@ func newMetrics(namespace string, registerer prometheus.Registerer, initialSubne Name: "times_disconnected", Help: "Times this node disconnected from a peer it had completed a handshake with", }), - acceptFailed: prometheus.NewCounterVec(prometheus.CounterOpts{ + acceptFailed: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Name: "accept_failed", - Help: "Times this node failed to accept connection from a peer it had completed a handshake with", - }, []string{"error"}), + Help: "Times this node's listener failed to accept an inbound connection", + }), inboundConnAllowed: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Name: "inbound_conn_throttler_allowed", Help: "Times this node allowed (attempted to upgrade) an inbound connection", }), + numUselessPeerListBytes: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "num_useless_peerlist_bytes", + Help: "Amount of useless bytes (i.e. information about nodes we already knew/don't want to connect to) received in PeerList messages", + }), inboundConnRateLimited: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Name: "inbound_conn_throttler_rate_limited", @@ -104,6 +121,30 @@ func newMetrics(namespace string, registerer prometheus.Registerer, initialSubne Name: "node_uptime_rewarding_stake", Help: "The percentage of total stake which thinks this node is eligible for rewards", }), + nodeSubnetUptimeWeightedAverage: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "node_subnet_uptime_weighted_average", + Help: "This node's subnet uptime averages weighted by observing subnet peer stakes", + }, + []string{"subnetID"}, + ), + nodeSubnetUptimeRewardingStake: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "node_subnet_uptime_rewarding_stake", + Help: "The percentage of subnet's total stake which thinks this node is eligible for subnet's rewards", + }, + []string{"subnetID"}, + ), + peerConnectedLifetimeAverage: prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "peer_connected_duration_average", + Help: "The average duration of all peer connections in nanoseconds", + }, + ), + peerConnectedStartTimes: make(map[ids.NodeID]float64), } errs := wrappers.Errs{} @@ -119,24 +160,28 @@ func newMetrics(namespace string, registerer prometheus.Registerer, initialSubne registerer.Register(m.disconnected), registerer.Register(m.acceptFailed), registerer.Register(m.inboundConnAllowed), + registerer.Register(m.numUselessPeerListBytes), registerer.Register(m.inboundConnRateLimited), registerer.Register(m.nodeUptimeWeightedAverage), registerer.Register(m.nodeUptimeRewardingStake), + registerer.Register(m.nodeSubnetUptimeWeightedAverage), + registerer.Register(m.nodeSubnetUptimeRewardingStake), + registerer.Register(m.peerConnectedLifetimeAverage), ) - // init subnet tracker metrics with whitelisted subnets + // init subnet tracker metrics with tracked subnets for subnetID := range initialSubnetIDs { // no need to track primary network ID if subnetID == constants.PrimaryNetworkID { continue } // initialize to 0 - m.numSubnetPeers.WithLabelValues(subnetID.String()).Set(0) + subnetIDStr := subnetID.String() + m.numSubnetPeers.WithLabelValues(subnetIDStr).Set(0) + m.nodeSubnetUptimeWeightedAverage.WithLabelValues(subnetIDStr).Set(0) + m.nodeSubnetUptimeRewardingStake.WithLabelValues(subnetIDStr).Set(0) } - // initialize to 0 - _ = m.acceptFailed.WithLabelValues("timeout") - _ = m.acceptFailed.WithLabelValues("temporary") return m, errs.Err } @@ -148,6 +193,13 @@ func (m *metrics) markConnected(peer peer.Peer) { for subnetID := range trackedSubnets { m.numSubnetPeers.WithLabelValues(subnetID.String()).Inc() } + + m.lock.Lock() + defer m.lock.Unlock() + + now := float64(time.Now().UnixNano()) + m.peerConnectedStartTimes[peer.ID()] = now + m.peerConnectedStartTimesSum += now } func (m *metrics) markDisconnected(peer peer.Peer) { @@ -158,4 +210,26 @@ func (m *metrics) markDisconnected(peer peer.Peer) { for subnetID := range trackedSubnets { m.numSubnetPeers.WithLabelValues(subnetID.String()).Dec() } + + m.lock.Lock() + defer m.lock.Unlock() + + peerID := peer.ID() + start := m.peerConnectedStartTimes[peerID] + m.peerConnectedStartTimesSum -= start + + delete(m.peerConnectedStartTimes, peerID) +} + +func (m *metrics) updatePeerConnectionLifetimeMetrics() { + m.lock.RLock() + defer m.lock.RUnlock() + + avg := float64(0) + if n := len(m.peerConnectedStartTimes); n > 0 { + avgStartTime := m.peerConnectedStartTimesSum / float64(n) + avg = float64(time.Now().UnixNano()) - avgStartTime + } + + m.peerConnectedLifetimeAverage.Set(avg) } diff --git a/avalanchego/network/network.go b/avalanchego/network/network.go index 53111d77..8a41baba 100644 --- a/avalanchego/network/network.go +++ b/avalanchego/network/network.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network @@ -15,23 +15,31 @@ import ( gomath "math" + "github.com/pires/go-proxyproto" + "github.com/prometheus/client_golang/prometheus" "go.uber.org/zap" + "golang.org/x/exp/maps" + "github.com/ava-labs/avalanchego/api/health" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/network/dialer" "github.com/ava-labs/avalanchego/network/peer" "github.com/ava-labs/avalanchego/network/throttling" - "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/networking/sender" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/subnets" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/sampler" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" ) @@ -44,9 +52,15 @@ const ( ) var ( - _ sender.ExternalSender = &network{} - _ Network = &network{} - errNoPrimaryValidators = errors.New("no default subnet validators") + _ sender.ExternalSender = (*network)(nil) + _ Network = (*network)(nil) + + errMissingPrimaryValidators = errors.New("missing primary validator set") + errNotValidator = errors.New("node is not a validator") + errNotTracked = errors.New("subnet is not tracked") + errSubnetNotExist = errors.New("subnet does not exist") + errExpectedProxy = errors.New("expected proxy") + errExpectedTCPProtocol = errors.New("expected TCP protocol") ) // Network defines the functionality of the networking library. @@ -59,7 +73,6 @@ type Network interface { health.Checker peer.Network - common.SubnetTracker // StartClose this network and all existing connections it has. Calling // StartClose multiple times is handled gracefully. @@ -84,21 +97,31 @@ type Network interface { // info about the peers in [nodeIDs] that have finished the handshake. PeerInfo(nodeIDs []ids.NodeID) []peer.Info - NodeUptime() (UptimeResult, bool) + // NodeUptime returns given node's [subnetID] UptimeResults in the view of + // this node's peer validators. + NodeUptime(subnetID ids.ID) (UptimeResult, error) } type UptimeResult struct { + // RewardingStakePercentage shows what percent of network stake thinks we're + // above the uptime requirement. + RewardingStakePercentage float64 + + // WeightedAveragePercentage is the average perceived uptime of this node, + // weighted by stake. + // Note that this is different from RewardingStakePercentage, which shows + // the percent of the network stake that thinks this node is above the + // uptime requirement. WeightedAveragePercentage is weighted by uptime. + // i.e If uptime requirement is 85 and a peer reports 40 percent it will be + // counted (40*weight) in WeightedAveragePercentage but not in + // RewardingStakePercentage since 40 < 85 WeightedAveragePercentage float64 - RewardingStakePercentage float64 } type network struct { config *Config peerConfig *peer.Config metrics *metrics - // Signs my IP so I can send my signed IP address to other nodes in Version - // messages - ipSigner *ipSigner outboundMsgThrottler throttling.OutboundMsgThrottler @@ -122,13 +145,20 @@ type network struct { sendFailRateCalculator math.Averager - peersLock sync.RWMutex + // Tracks which peers know about which peers + gossipTracker peer.GossipTracker + peersLock sync.RWMutex + // peerIPs contains the most up to date set of signed IPs for nodes we are + // currently connected or attempting to connect to. + // Note: The txID provided inside of a claimed IP is not verified and should + // not be accessed from this map. + peerIPs map[ids.NodeID]*ips.ClaimedIPPort // trackedIPs contains the set of IPs that we are currently attempting to // connect to. An entry is added to this set when we first start attempting // to connect to the peer. An entry is deleted from this set once we have // finished the handshake. trackedIPs map[ids.NodeID]*trackedIP - manuallyTrackedIDs ids.NodeIDSet + manuallyTrackedIDs set.Set[ids.NodeID] connectingPeers peer.Set connectedPeers peer.Set closing bool @@ -153,17 +183,37 @@ type network struct { func NewNetwork( config *Config, msgCreator message.Creator, - msgCreatorWithProto message.Creator, - banffTime time.Time, // TODO: remove this once we complete banff migration metricsRegisterer prometheus.Registerer, log logging.Logger, listener net.Listener, dialer dialer.Dialer, router router.ExternalHandler, ) (Network, error) { - primaryNetworkValidators, ok := config.Validators.GetValidators(constants.PrimaryNetworkID) + primaryNetworkValidators, ok := config.Validators.Get(constants.PrimaryNetworkID) if !ok { - return nil, errNoPrimaryValidators + return nil, errMissingPrimaryValidators + } + + if config.ProxyEnabled { + // Wrap the listener to process the proxy header. + listener = &proxyproto.Listener{ + Listener: listener, + Policy: func(net.Addr) (proxyproto.Policy, error) { + // Do not perform any fuzzy matching, the header must be + // provided. + return proxyproto.REQUIRE, nil + }, + ValidateHeader: func(h *proxyproto.Header) error { + if !h.Command.IsProxy() { + return errExpectedProxy + } + if h.TransportProtocol != proxyproto.TCPv4 && h.TransportProtocol != proxyproto.TCPv6 { + return errExpectedTCPProtocol + } + return nil + }, + ReadHeaderTimeout: config.ProxyReadHeaderTimeout, + } } inboundMsgThrottler, err := throttling.NewInboundMsgThrottler( @@ -196,33 +246,31 @@ func NewNetwork( return nil, fmt.Errorf("initializing peer metrics failed with: %w", err) } - metrics, err := newMetrics(config.Namespace, metricsRegisterer, config.WhitelistedSubnets) + metrics, err := newMetrics(config.Namespace, metricsRegisterer, config.TrackedSubnets) if err != nil { return nil, fmt.Errorf("initializing network metrics failed with: %w", err) } peerConfig := &peer.Config{ - ReadBufferSize: config.PeerReadBufferSize, - WriteBufferSize: config.PeerWriteBufferSize, - Metrics: peerMetrics, - MessageCreator: msgCreator, - MessageCreatorWithProto: msgCreatorWithProto, - - // TODO: remove this once we complete banff migration - BanffTime: banffTime, + ReadBufferSize: config.PeerReadBufferSize, + WriteBufferSize: config.PeerWriteBufferSize, + Metrics: peerMetrics, + MessageCreator: msgCreator, Log: log, InboundMsgThrottler: inboundMsgThrottler, Network: nil, // This is set below. Router: router, VersionCompatibility: version.GetCompatibility(config.NetworkID), - MySubnets: config.WhitelistedSubnets, + MySubnets: config.TrackedSubnets, Beacons: config.Beacons, NetworkID: config.NetworkID, PingFrequency: config.PingFrequency, PongTimeout: config.PingPongTimeout, MaxClockDifference: config.MaxClockDifference, ResourceTracker: config.ResourceTracker, + UptimeCalculator: config.UptimeCalculator, + IPSigner: peer.NewIPSigner(config.MyIPPort, config.TLSKey), } onCloseCtx, cancel := context.WithCancel(context.Background()) @@ -230,7 +278,6 @@ func NewNetwork( config: config, peerConfig: peerConfig, metrics: metrics, - ipSigner: newIPSigner(config.MyIPPort, &peerConfig.Clock, config.TLSKey), outboundMsgThrottler: outboundMsgThrottler, inboundConnUpgradeThrottler: throttling.NewInboundConnUpgradeThrottler(log, config.ThrottlerConfig.InboundConnUpgradeThrottlerConfig), @@ -248,7 +295,9 @@ func NewNetwork( time.Now(), )), + peerIPs: make(map[ids.NodeID]*ips.ClaimedIPPort), trackedIPs: make(map[ids.NodeID]*trackedIP), + gossipTracker: config.GossipTracker, connectingPeers: peer.NewSet(), connectedPeers: peer.NewSet(), router: router, @@ -257,8 +306,8 @@ func NewNetwork( return n, nil } -func (n *network) Send(msg message.OutboundMessage, nodeIDs ids.NodeIDSet, subnetID ids.ID, validatorOnly bool) ids.NodeIDSet { - peers := n.getPeers(nodeIDs, subnetID, validatorOnly) +func (n *network) Send(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], subnetID ids.ID, allower subnets.Allower) set.Set[ids.NodeID] { + peers := n.getPeers(nodeIDs, subnetID, allower) n.peerConfig.Metrics.MultipleSendsFailed( msg.Op(), nodeIDs.Len()-len(peers), @@ -269,19 +318,19 @@ func (n *network) Send(msg message.OutboundMessage, nodeIDs ids.NodeIDSet, subne func (n *network) Gossip( msg message.OutboundMessage, subnetID ids.ID, - validatorOnly bool, numValidatorsToSend int, numNonValidatorsToSend int, numPeersToSend int, -) ids.NodeIDSet { - peers := n.samplePeers(subnetID, validatorOnly, numValidatorsToSend, numNonValidatorsToSend, numPeersToSend) + allower subnets.Allower, +) set.Set[ids.NodeID] { + peers := n.samplePeers(subnetID, numValidatorsToSend, numNonValidatorsToSend, numPeersToSend, allower) return n.send(msg, peers) } // HealthCheck returns information about several network layer health checks. // 1) Information about health check results // 2) An error if the health check reports unhealthy -func (n *network) HealthCheck() (interface{}, error) { +func (n *network) HealthCheck(context.Context) (interface{}, error) { n.peersLock.RLock() connectedTo := n.connectedPeers.Len() n.peersLock.RUnlock() @@ -298,20 +347,28 @@ func (n *network) HealthCheck() (interface{}, error) { // Make sure we've received an incoming message within the threshold now := n.peerConfig.Clock.Time() - lastMsgReceivedAt := time.Unix(atomic.LoadInt64(&n.peerConfig.LastReceived), 0) - timeSinceLastMsgReceived := now.Sub(lastMsgReceivedAt) - wasMsgReceivedRecently := timeSinceLastMsgReceived <= n.config.HealthConfig.MaxTimeSinceMsgReceived + lastMsgReceivedAt, msgReceived := n.getLastReceived() + wasMsgReceivedRecently := msgReceived + timeSinceLastMsgReceived := time.Duration(0) + if msgReceived { + timeSinceLastMsgReceived = now.Sub(lastMsgReceivedAt) + wasMsgReceivedRecently = timeSinceLastMsgReceived <= n.config.HealthConfig.MaxTimeSinceMsgReceived + details[TimeSinceLastMsgReceivedKey] = timeSinceLastMsgReceived.String() + n.metrics.timeSinceLastMsgReceived.Set(float64(timeSinceLastMsgReceived)) + } healthy = healthy && wasMsgReceivedRecently - details[TimeSinceLastMsgReceivedKey] = timeSinceLastMsgReceived.String() - n.metrics.timeSinceLastMsgReceived.Set(float64(timeSinceLastMsgReceived)) // Make sure we've sent an outgoing message within the threshold - lastMsgSentAt := time.Unix(atomic.LoadInt64(&n.peerConfig.LastSent), 0) - timeSinceLastMsgSent := now.Sub(lastMsgSentAt) - wasMsgSentRecently := timeSinceLastMsgSent <= n.config.HealthConfig.MaxTimeSinceMsgSent + lastMsgSentAt, msgSent := n.getLastSent() + wasMsgSentRecently := msgSent + timeSinceLastMsgSent := time.Duration(0) + if msgSent { + timeSinceLastMsgSent = now.Sub(lastMsgSentAt) + wasMsgSentRecently = timeSinceLastMsgSent <= n.config.HealthConfig.MaxTimeSinceMsgSent + details[TimeSinceLastMsgSentKey] = timeSinceLastMsgSent.String() + n.metrics.timeSinceLastMsgSent.Set(float64(timeSinceLastMsgSent)) + } healthy = healthy && wasMsgSentRecently - details[TimeSinceLastMsgSentKey] = timeSinceLastMsgSent.String() - n.metrics.timeSinceLastMsgSent.Set(float64(timeSinceLastMsgSent)) // Make sure the message send failed rate isn't too high isMsgFailRate := sendFailRate <= n.config.HealthConfig.MaxSendFailRate @@ -319,25 +376,33 @@ func (n *network) HealthCheck() (interface{}, error) { details[SendFailRateKey] = sendFailRate n.metrics.sendFailRate.Set(sendFailRate) - // Network layer is unhealthy - if !healthy { - var errorReasons []string - if !isConnected { - errorReasons = append(errorReasons, fmt.Sprintf("not connected to a minimum of %d peer(s) only %d", n.config.HealthConfig.MinConnectedPeers, connectedTo)) - } - if !wasMsgReceivedRecently { - errorReasons = append(errorReasons, fmt.Sprintf("no messages from network received in %s > %s", timeSinceLastMsgReceived, n.config.HealthConfig.MaxTimeSinceMsgReceived)) - } - if !wasMsgSentRecently { - errorReasons = append(errorReasons, fmt.Sprintf("no messages from network sent in %s > %s", timeSinceLastMsgSent, n.config.HealthConfig.MaxTimeSinceMsgSent)) - } - if !isMsgFailRate { - errorReasons = append(errorReasons, fmt.Sprintf("messages failure send rate %g > %g", sendFailRate, n.config.HealthConfig.MaxSendFailRate)) - } + // emit metrics about the lifetime of peer connections + n.metrics.updatePeerConnectionLifetimeMetrics() + + // Network layer is healthy + if healthy || !n.config.HealthConfig.Enabled { + return details, nil + } + + var errorReasons []string + if !isConnected { + errorReasons = append(errorReasons, fmt.Sprintf("not connected to a minimum of %d peer(s) only %d", n.config.HealthConfig.MinConnectedPeers, connectedTo)) + } + if !msgReceived { + errorReasons = append(errorReasons, "no messages received from network") + } else if !wasMsgReceivedRecently { + errorReasons = append(errorReasons, fmt.Sprintf("no messages from network received in %s > %s", timeSinceLastMsgReceived, n.config.HealthConfig.MaxTimeSinceMsgReceived)) + } + if !msgSent { + errorReasons = append(errorReasons, "no messages sent to network") + } else if !wasMsgSentRecently { + errorReasons = append(errorReasons, fmt.Sprintf("no messages from network sent in %s > %s", timeSinceLastMsgSent, n.config.HealthConfig.MaxTimeSinceMsgSent)) + } - return details, fmt.Errorf("network layer is unhealthy reason: %s", strings.Join(errorReasons, ", ")) + if !isMsgFailRate { + errorReasons = append(errorReasons, fmt.Sprintf("messages failure send rate %g > %g", sendFailRate, n.config.HealthConfig.MaxSendFailRate)) } - return details, nil + return details, fmt.Errorf("network layer is unhealthy reason: %s", strings.Join(errorReasons, ", ")) } // Connected is called after the peer finishes the handshake. @@ -354,8 +419,33 @@ func (n *network) Connected(nodeID ids.NodeID) { return } - tracked, ok := n.trackedIPs[nodeID] - if ok { + peerIP := peer.IP() + newIP := &ips.ClaimedIPPort{ + Cert: peer.Cert(), + IPPort: peerIP.IPPort, + Timestamp: peerIP.Timestamp, + Signature: peerIP.Signature, + } + prevIP, ok := n.peerIPs[nodeID] + if !ok { + // If the IP wasn't previously tracked, then we never could have + // gossiped it. This means we don't need to reset the validator's + // tracked set. + n.peerIPs[nodeID] = newIP + } else if prevIP.Timestamp < newIP.Timestamp { + // The previous IP was stale, so we should gossip the newer IP. + n.peerIPs[nodeID] = newIP + + if !prevIP.IPPort.Equal(newIP.IPPort) { + // This IP is actually different, so we should gossip it. + n.peerConfig.Log.Debug("resetting gossip due to ip change", + zap.Stringer("nodeID", nodeID), + ) + _ = n.gossipTracker.ResetValidator(nodeID) + } + } + + if tracked, ok := n.trackedIPs[nodeID]; ok { tracked.stopTracking() delete(n.trackedIPs, nodeID) } @@ -378,74 +468,179 @@ func (n *network) Connected(nodeID ids.NodeID) { // peer is a validator/beacon. func (n *network) AllowConnection(nodeID ids.NodeID) bool { return !n.config.RequireValidatorToConnect || - n.config.Validators.Contains(constants.PrimaryNetworkID, n.config.MyNodeID) || + validators.Contains(n.config.Validators, constants.PrimaryNetworkID, n.config.MyNodeID) || n.WantsConnection(nodeID) } -func (n *network) Track(claimedIPPort ips.ClaimedIPPort) bool { - nodeID := ids.NodeIDFromCert(claimedIPPort.Cert) - - // Verify that we do want to attempt to make a connection to this peer - // before verifying that the IP has been correctly signed. - // - // This check only improves performance, as the values are recalculated once - // the lock is grabbed before actually attempting to connect to the peer. - if !n.shouldTrack(nodeID, claimedIPPort) { - return false - } - - signedIP := peer.SignedIP{ - IP: peer.UnsignedIP{ - IP: claimedIPPort.IPPort, - Timestamp: claimedIPPort.Timestamp, - }, - Signature: claimedIPPort.Signature, - } - - if err := signedIP.Verify(claimedIPPort.Cert); err != nil { - n.peerConfig.Log.Debug("signature verification failed", - zap.Stringer("nodeID", nodeID), +func (n *network) Track(peerID ids.NodeID, claimedIPPorts []*ips.ClaimedIPPort) ([]*p2p.PeerAck, error) { + // Perform all signature verification and hashing before grabbing the peer + // lock. + // Note: Avoiding signature verification when the IP isn't needed is a + // **significant** performance optimization. + // Note: To avoid signature verification when the IP isn't needed, we + // optimistically filter out IPs. This can result in us not tracking an IP + // that we otherwise would have. This case can only happen if the node + // became a validator between the time we verified the signature and when we + // processed the IP; which should be very rare. + ipAuths, err := n.authenticateIPs(claimedIPPorts) + if err != nil { + n.peerConfig.Log.Debug("authenticating claimed IPs failed", + zap.Stringer("nodeID", peerID), zap.Error(err), ) - return false + return nil, err } + // Information for them to update about us + ipLen := len(claimedIPPorts) + newestTimestamp := make(map[ids.ID]uint64, ipLen) + // Information for us to update about them + txIDsWithUpToDateIP := make([]ids.ID, 0, ipLen) + + // Atomically modify peer data n.peersLock.Lock() defer n.peersLock.Unlock() + for i, ip := range claimedIPPorts { + ipAuth := ipAuths[i] + nodeID := ipAuth.nodeID + // Invariant: [ip] is only used to modify local node state if + // [verifiedIP] is true. + // Note: modifying peer-level state is allowed regardless of + // [verifiedIP]. + verifiedIP := ipAuth.verified + + // Re-fetch latest info for a [nodeID] in case it changed since we last + // held [peersLock]. + prevIP, previouslyTracked, shouldUpdateOurIP, shouldDial := n.peerIPStatus(nodeID, ip) + tracked, isTracked := n.trackedIPs[nodeID] + + // Evaluate if the gossiped IP is useful to us or to the peer that + // shared it with us. + switch { + case previouslyTracked && prevIP.Timestamp > ip.Timestamp: + // Our previous IP was more up to date. We should tell the peer + // not to gossip their IP to us. We should still gossip our IP to + // them. + newestTimestamp[ip.TxID] = prevIP.Timestamp + + n.metrics.numUselessPeerListBytes.Add(float64(ip.BytesLen())) + case previouslyTracked && prevIP.Timestamp == ip.Timestamp: + // Our previous IP was equally fresh. We should tell the peer + // not to gossip this IP to us. We should not gossip our IP to them. + newestTimestamp[ip.TxID] = prevIP.Timestamp + txIDsWithUpToDateIP = append(txIDsWithUpToDateIP, ip.TxID) + + n.metrics.numUselessPeerListBytes.Add(float64(ip.BytesLen())) + case verifiedIP && shouldUpdateOurIP: + // This IP is more up to date. We should tell the peer not to gossip + // this IP to us. We should not gossip our IP to them. + newestTimestamp[ip.TxID] = ip.Timestamp + txIDsWithUpToDateIP = append(txIDsWithUpToDateIP, ip.TxID) + + // In the future, we should gossip this IP rather than the old IP. + n.peerIPs[nodeID] = ip + + // If the new IP is equal to the old IP, there is no reason to + // refresh the references to it. This can happen when a node + // restarts but does not change their IP. + if prevIP.IPPort.Equal(ip.IPPort) { + continue + } - if _, connected := n.connectedPeers.GetByID(nodeID); connected { - // If I'm currently connected to [nodeID] then they will have told me - // how to connect to them in the future, and I don't need to attempt to - // connect to them now. - return false + // We should gossip this new IP to all our peers. + n.peerConfig.Log.Debug("resetting gossip due to ip change", + zap.Stringer("nodeID", nodeID), + ) + _ = n.gossipTracker.ResetValidator(nodeID) + + // We should update any existing outbound connection attempts. + if isTracked { + // Stop tracking the old IP and start tracking the new one. + tracked := tracked.trackNewIP(ip.IPPort) + n.trackedIPs[nodeID] = tracked + n.dial(n.onCloseCtx, nodeID, tracked) + } + case verifiedIP && shouldDial: + // Invariant: [isTracked] is false here. + + // This is the first we've heard of this IP and we want to connect + // to it. We should tell the peer not to gossip this IP to us again. + newestTimestamp[ip.TxID] = ip.Timestamp + // We should not gossip this IP back to them. + txIDsWithUpToDateIP = append(txIDsWithUpToDateIP, ip.TxID) + + // We don't need to reset gossip about this validator because + // we've never gossiped it before. + n.peerIPs[nodeID] = ip + + tracked := newTrackedIP(ip.IPPort) + n.trackedIPs[nodeID] = tracked + n.dial(n.onCloseCtx, nodeID, tracked) + default: + // This IP isn't desired + n.metrics.numUselessPeerListBytes.Add(float64(ip.BytesLen())) + } + } + + txIDsToAck := maps.Keys(newestTimestamp) + txIDsToAck, ok := n.gossipTracker.AddKnown(peerID, txIDsWithUpToDateIP, txIDsToAck) + if !ok { + n.peerConfig.Log.Error("failed to update known peers", + zap.Stringer("nodeID", peerID), + ) + return nil, nil } - tracked, isTracked := n.trackedIPs[nodeID] - switch { - case isTracked: - if tracked.ip.Timestamp >= claimedIPPort.Timestamp { - return false + peerAcks := make([]*p2p.PeerAck, len(txIDsToAck)) + for i, txID := range txIDsToAck { + txID := txID + peerAcks[i] = &p2p.PeerAck{ + TxId: txID[:], + // By responding with the highest timestamp, not just the timestamp + // the peer provided us, we may be able to avoid some unnecessary + // gossip in the case that the peer is about to update this + // validator's IP. + Timestamp: newestTimestamp[txID], + } + } + return peerAcks, nil +} + +func (n *network) MarkTracked(peerID ids.NodeID, ips []*p2p.PeerAck) error { + txIDs := make([]ids.ID, 0, len(ips)) + + n.peersLock.RLock() + defer n.peersLock.RUnlock() + + for _, ip := range ips { + txID, err := ids.ToID(ip.TxId) + if err != nil { + return err + } + + // If [txID]'s corresponding nodeID isn't known, then they must no + // longer be a validator. Therefore we wouldn't gossip their IP anyways. + nodeID, ok := n.gossipTracker.GetNodeID(txID) + if !ok { + continue + } + + // If the peer returns a lower timestamp than I currently have, then I + // have updated the IP since I sent the PeerList message this is in + // response to. That means that I should re-gossip this node's IP to the + // peer. + myIP, previouslyTracked := n.peerIPs[nodeID] + if previouslyTracked && myIP.Timestamp <= ip.Timestamp { + txIDs = append(txIDs, txID) } - // Stop tracking the old IP and instead start tracking new one. - tracked := tracked.trackNewIP(&peer.UnsignedIP{ - IP: claimedIPPort.IPPort, - Timestamp: claimedIPPort.Timestamp, - }) - n.trackedIPs[nodeID] = tracked - n.dial(n.onCloseCtx, nodeID, tracked) - return true - case n.wantsConnection(nodeID): - tracked := newTrackedIP(&peer.UnsignedIP{ - IP: claimedIPPort.IPPort, - Timestamp: claimedIPPort.Timestamp, - }) - n.trackedIPs[nodeID] = tracked - n.dial(n.onCloseCtx, nodeID, tracked) - return true - default: - // This node isn't tracked and we don't want to connect to it. - return false } + + if _, ok := n.gossipTracker.AddKnown(peerID, txIDs, nil); !ok { + n.peerConfig.Log.Error("failed to update known peers", + zap.Stringer("nodeID", peerID), + ) + } + return nil } // Disconnected is called after the peer's handling has been shutdown. @@ -454,6 +649,13 @@ func (n *network) Track(claimedIPPort ips.ClaimedIPPort) bool { // call. Note that this is from the perspective of a single peer object, because // a peer with the same ID can reconnect to this network instance. func (n *network) Disconnected(nodeID ids.NodeID) { + if !n.gossipTracker.StopTrackingPeer(nodeID) { + n.peerConfig.Log.Error( + "stopped non-existent peer tracker", + zap.Stringer("nodeID", nodeID), + ) + } + n.peersLock.RLock() _, connecting := n.connectingPeers.GetByID(nodeID) peer, connected := n.connectedPeers.GetByID(nodeID) @@ -467,35 +669,59 @@ func (n *network) Disconnected(nodeID ids.NodeID) { } } -func (n *network) Version() (message.OutboundMessage, error) { - mySignedIP, err := n.ipSigner.getSignedIP() - if err != nil { +func (n *network) Peers(peerID ids.NodeID) ([]ips.ClaimedIPPort, error) { + // Only select validators that we haven't already sent to this peer + unknownValidators, ok := n.gossipTracker.GetUnknown(peerID) + if !ok { + n.peerConfig.Log.Debug( + "unable to find peer to gossip to", + zap.Stringer("nodeID", peerID), + ) + return nil, nil + } + + // We select a random sample of validators to gossip to avoid starving out a + // validator from being gossiped for an extended period of time. + s := sampler.NewUniform() + if err := s.Initialize(uint64(len(unknownValidators))); err != nil { return nil, err } - return n.peerConfig.GetMessageCreator().Version( - n.peerConfig.NetworkID, - n.peerConfig.Clock.Unix(), - mySignedIP.IP.IP, - n.peerConfig.VersionCompatibility.Version().String(), - mySignedIP.IP.Timestamp, - mySignedIP.Signature, - n.peerConfig.MySubnets.List(), - ) -} -func (n *network) Peers() (message.OutboundMessage, error) { - peers := n.sampleValidatorIPs() - return n.peerConfig.GetMessageCreator().PeerList(peers, true) -} + // Calculate the unknown information we need to send to this peer. + validatorIPs := make([]ips.ClaimedIPPort, 0, int(n.config.PeerListNumValidatorIPs)) + for i := 0; i < len(unknownValidators) && len(validatorIPs) < int(n.config.PeerListNumValidatorIPs); i++ { + drawn, err := s.Next() + if err != nil { + return nil, err + } -func (n *network) Pong(nodeID ids.NodeID) (message.OutboundMessage, error) { - uptimePercentFloat, err := n.config.UptimeCalculator.CalculateUptimePercent(nodeID) - if err != nil { - uptimePercentFloat = 0 + validator := unknownValidators[drawn] + n.peersLock.RLock() + _, isConnected := n.connectedPeers.GetByID(validator.NodeID) + peerIP := n.peerIPs[validator.NodeID] + n.peersLock.RUnlock() + if !isConnected { + n.peerConfig.Log.Verbo( + "unable to find validator in connected peers", + zap.Stringer("nodeID", validator.NodeID), + ) + continue + } + + // Note: peerIP isn't used directly here because the TxID may be + // incorrect. + validatorIPs = append(validatorIPs, + ips.ClaimedIPPort{ + Cert: peerIP.Cert, + IPPort: peerIP.IPPort, + Timestamp: peerIP.Timestamp, + Signature: peerIP.Signature, + TxID: validator.TxID, + }, + ) } - uptimePercentInt := uint8(uptimePercentFloat * 100) - return n.peerConfig.GetMessageCreator().Pong(uptimePercentInt) + return validatorIPs, nil } // Dispatch starts accepting connections from other nodes attempting to connect @@ -505,58 +731,64 @@ func (n *network) Dispatch() error { go n.inboundConnUpgradeThrottler.Dispatch() errs := wrappers.Errs{} for { // Continuously accept new connections - conn, err := n.listener.Accept() // Returns error when n.Close() is called - if err != nil { - if netErr, ok := err.(net.Error); ok { - if netErr.Timeout() { - n.metrics.acceptFailed.WithLabelValues("timeout").Inc() - } - - // TODO: deprecate "Temporary" and use "Timeout" - if netErr.Temporary() { - n.metrics.acceptFailed.WithLabelValues("temporary").Inc() - - // Sleep for a small amount of time to try to wait for the - // temporary error to go away. - time.Sleep(time.Millisecond) - continue - } - } - - n.peerConfig.Log.Debug("error during server accept", - zap.Error(err), - ) + if n.onCloseCtx.Err() != nil { break } - // We pessimistically drop an incoming connection if the remote - // address is found in connectedIPs, myIPs, or peerAliasIPs. - // This protects our node from spending CPU cycles on TLS - // handshakes to upgrade connections from existing peers. - // Specifically, this can occur when one of our existing - // peers attempts to connect to one our IP aliases (that they - // aren't yet aware is an alias). - remoteAddr := conn.RemoteAddr().String() - ip, err := ips.ToIPPort(remoteAddr) + conn, err := n.listener.Accept() // Returns error when n.Close() is called if err != nil { - errs.Add(fmt.Errorf("unable to convert remote address %s to IP: %w", remoteAddr, err)) - break + n.peerConfig.Log.Debug("error during server accept", zap.Error(err)) + // Sleep for a small amount of time to try to wait for the + // error to go away. + time.Sleep(time.Millisecond) + n.metrics.acceptFailed.Inc() + continue } - if !n.inboundConnUpgradeThrottler.ShouldUpgrade(ip) { - n.peerConfig.Log.Debug("failed to upgrade connection", - zap.String("reason", "rate-limiting"), + // Note: listener.Accept is rate limited outside of this package, so a + // peer can not just arbitrarily spin up goroutines here. + go func() { + // We pessimistically drop an incoming connection if the remote + // address is found in connectedIPs, myIPs, or peerAliasIPs. This + // protects our node from spending CPU cycles on TLS handshakes to + // upgrade connections from existing peers. Specifically, this can + // occur when one of our existing peers attempts to connect to one + // our IP aliases (that they aren't yet aware is an alias). + // + // Note: Calling [RemoteAddr] with the Proxy protocol enabled may + // block for up to ProxyReadHeaderTimeout. Therefore, we ensure to + // call this function inside the go-routine, rather than the main + // accept loop. + remoteAddr := conn.RemoteAddr().String() + ip, err := ips.ToIPPort(remoteAddr) + if err != nil { + n.peerConfig.Log.Error("failed to parse remote address", + zap.String("peerIP", remoteAddr), + zap.Error(err), + ) + _ = conn.Close() + return + } + + if !n.inboundConnUpgradeThrottler.ShouldUpgrade(ip) { + n.peerConfig.Log.Debug("failed to upgrade connection", + zap.String("reason", "rate-limiting"), + zap.Stringer("peerIP", ip), + ) + n.metrics.inboundConnRateLimited.Inc() + _ = conn.Close() + return + } + n.metrics.inboundConnAllowed.Inc() + + n.peerConfig.Log.Verbo("starting to upgrade connection", + zap.String("direction", "inbound"), zap.Stringer("peerIP", ip), ) - n.metrics.inboundConnRateLimited.Inc() - _ = conn.Close() - continue - } - n.metrics.inboundConnAllowed.Inc() - go func() { if err := n.upgrade(conn, n.serverUpgrader); err != nil { - n.peerConfig.Log.Verbo("failed to upgrade inbound connection", + n.peerConfig.Log.Verbo("failed to upgrade connection", + zap.String("direction", "inbound"), zap.Error(err), ) } @@ -584,7 +816,7 @@ func (n *network) WantsConnection(nodeID ids.NodeID) bool { } func (n *network) wantsConnection(nodeID ids.NodeID) bool { - return n.config.Validators.Contains(constants.PrimaryNetworkID, nodeID) || + return validators.Contains(n.config.Validators, constants.PrimaryNetworkID, nodeID) || n.manuallyTrackedIDs.Contains(nodeID) } @@ -604,67 +836,24 @@ func (n *network) ManuallyTrack(nodeID ids.NodeID, ip ips.IPPort) { _, isTracked := n.trackedIPs[nodeID] if !isTracked { - tracked := newTrackedIP(&peer.UnsignedIP{ - IP: ip, - Timestamp: 0, - }) + tracked := newTrackedIP(ip) n.trackedIPs[nodeID] = tracked n.dial(n.onCloseCtx, nodeID, tracked) } } -func (n *network) TracksSubnet(nodeID ids.NodeID, subnetID ids.ID) bool { - if n.config.MyNodeID == nodeID { - return subnetID == constants.PrimaryNetworkID || n.config.WhitelistedSubnets.Contains(subnetID) - } - - n.peersLock.RLock() - defer n.peersLock.RUnlock() - - peer, connected := n.connectedPeers.GetByID(nodeID) - if !connected { - return false - } - trackedSubnets := peer.TrackedSubnets() - return subnetID == constants.PrimaryNetworkID || trackedSubnets.Contains(subnetID) -} - -func (n *network) sampleValidatorIPs() []ips.ClaimedIPPort { - n.peersLock.RLock() - peers := n.connectedPeers.Sample( - int(n.config.PeerListNumValidatorIPs), - func(p peer.Peer) bool { - // Only sample validators - return n.config.Validators.Contains(constants.PrimaryNetworkID, p.ID()) - }, - ) - n.peersLock.RUnlock() - - sampledIPs := make([]ips.ClaimedIPPort, len(peers)) - for i, peer := range peers { - peerIP := peer.IP() - sampledIPs[i] = ips.ClaimedIPPort{ - Cert: peer.Cert(), - IPPort: peerIP.IP.IP, - Timestamp: peerIP.IP.Timestamp, - Signature: peerIP.Signature, - } - } - return sampledIPs -} - // getPeers returns a slice of connected peers from a set of [nodeIDs]. // -// - [nodeIDs] the IDs of the peers that should be returned if they are -// connected. -// - [subnetID] the subnetID whose membership should be considered if -// [validatorOnly] is set to true. -// - [validatorOnly] is the flag to drop any nodes from [nodeIDs] that are not -// validators in [subnetID]. +// - [nodeIDs] the IDs of the peers that should be returned if they are +// connected. +// - [subnetID] the subnetID whose membership should be considered if +// [validatorOnly] is set to true. +// - [validatorOnly] is the flag to drop any nodes from [nodeIDs] that are not +// validators in [subnetID]. func (n *network) getPeers( - nodeIDs ids.NodeIDSet, + nodeIDs set.Set[ids.NodeID], subnetID ids.ID, - validatorOnly bool, + allower subnets.Allower, ) []peer.Peer { peers := make([]peer.Peer, 0, nodeIDs.Len()) @@ -682,7 +871,9 @@ func (n *network) getPeers( continue } - if validatorOnly && !n.config.Validators.Contains(subnetID, nodeID) { + isValidator := validators.Contains(n.config.Validators, subnetID, nodeID) + // check if the peer is allowed to connect to the subnet + if !allower.IsAllowed(nodeID, isValidator) { continue } @@ -694,15 +885,21 @@ func (n *network) getPeers( func (n *network) samplePeers( subnetID ids.ID, - validatorOnly bool, numValidatorsToSample, numNonValidatorsToSample int, numPeersToSample int, + allower subnets.Allower, ) []peer.Peer { - if validatorOnly { - numValidatorsToSample += numNonValidatorsToSample + numPeersToSample - numNonValidatorsToSample = 0 - numPeersToSample = 0 + subnetValidators, ok := n.config.Validators.Get(subnetID) + if !ok { + return nil + } + + // If there are fewer validators than [numValidatorsToSample], then only + // sample [numValidatorsToSample] validators. + subnetValidatorsLen := subnetValidators.Len() + if subnetValidatorsLen < numValidatorsToSample { + numValidatorsToSample = subnetValidatorsLen } n.peersLock.RLock() @@ -717,12 +914,19 @@ func (n *network) samplePeers( return false } + peerID := p.ID() + isValidator := subnetValidators.Contains(peerID) + // check if the peer is allowed to connect to the subnet + if !allower.IsAllowed(peerID, isValidator) { + return false + } + if numPeersToSample > 0 { numPeersToSample-- return true } - if n.config.Validators.Contains(subnetID, p.ID()) { + if isValidator { numValidatorsToSample-- return numValidatorsToSample >= 0 } @@ -738,15 +942,12 @@ func (n *network) samplePeers( // send takes ownership of the provided message reference. So, the provided // message should only be inspected if the reference has been externally // increased. -func (n *network) send(msg message.OutboundMessage, peers []peer.Peer) ids.NodeIDSet { - sentTo := ids.NewNodeIDSet(len(peers)) +func (n *network) send(msg message.OutboundMessage, peers []peer.Peer) set.Set[ids.NodeID] { + sentTo := set.NewSet[ids.NodeID](len(peers)) now := n.peerConfig.Clock.Time() // send to peer and update metrics for _, peer := range peers { - // Add a reference to the message so that if it is sent, it won't be - // collected until it is done being processed. - msg.AddRef() if peer.Send(n.onCloseCtx, msg) { sentTo.Add(peer.ID()) @@ -758,10 +959,6 @@ func (n *network) send(msg message.OutboundMessage, peers []peer.Peer) ids.NodeI n.sendFailRateCalculator.Observe(1, now) } } - - // The message has been passed to all peers that it will be sent to, so we - // can decrease the sender reference now. - msg.DecRef() return sentTo } @@ -780,6 +977,7 @@ func (n *network) disconnectedFromConnecting(nodeID ids.NodeID) { n.dial(n.onCloseCtx, nodeID, tracked) } else { tracked.stopTracking() + delete(n.peerIPs, nodeID) delete(n.trackedIPs, nodeID) } } @@ -797,43 +995,64 @@ func (n *network) disconnectedFromConnected(peer peer.Peer, nodeID ids.NodeID) { // The peer that is disconnecting from us finished the handshake if n.wantsConnection(nodeID) { - tracked := newTrackedIP(&peer.IP().IP) + prevIP := n.peerIPs[nodeID] + tracked := newTrackedIP(prevIP.IPPort) n.trackedIPs[nodeID] = tracked n.dial(n.onCloseCtx, nodeID, tracked) } else { - delete(n.trackedIPs, nodeID) + delete(n.peerIPs, nodeID) } n.metrics.markDisconnected(peer) } -func (n *network) shouldTrack(nodeID ids.NodeID, ip ips.ClaimedIPPort) bool { - if !n.config.AllowPrivateIPs && ip.IPPort.IP.IsPrivate() { - n.peerConfig.Log.Verbo( - "not connecting to suggested peer", - zap.String("reason", "peer IP is private"), - zap.Stringer("nodeID", nodeID), - zap.Stringer("peerIPPort", ip.IPPort), - ) - return false - } +// ipAuth is a helper struct used to convey information about an +// [*ips.ClaimedIPPort]. +type ipAuth struct { + nodeID ids.NodeID + verified bool +} - n.peersLock.RLock() - defer n.peersLock.RUnlock() +func (n *network) authenticateIPs(ips []*ips.ClaimedIPPort) ([]*ipAuth, error) { + ipAuths := make([]*ipAuth, len(ips)) + for i, ip := range ips { + nodeID := ids.NodeIDFromCert(ip.Cert) + n.peersLock.RLock() + _, _, shouldUpdateOurIP, shouldDial := n.peerIPStatus(nodeID, ip) + n.peersLock.RUnlock() + if !shouldUpdateOurIP && !shouldDial { + ipAuths[i] = &ipAuth{ + nodeID: nodeID, + } + continue + } - _, connected := n.connectedPeers.GetByID(nodeID) - if connected { - // If I'm currently connected to [nodeID] then they will have told me - // how to connect to them in the future, and I don't need to attempt to - // connect to them now. - return false + // Verify signature if needed + signedIP := peer.SignedIP{ + UnsignedIP: peer.UnsignedIP{ + IPPort: ip.IPPort, + Timestamp: ip.Timestamp, + }, + Signature: ip.Signature, + } + if err := signedIP.Verify(ip.Cert); err != nil { + return nil, err + } + ipAuths[i] = &ipAuth{ + nodeID: nodeID, + verified: true, + } } + return ipAuths, nil +} - tracked, isTracked := n.trackedIPs[nodeID] - if isTracked { - return tracked.ip.Timestamp < ip.Timestamp - } - return n.wantsConnection(nodeID) +// peerIPStatus assumes the caller holds [peersLock] +func (n *network) peerIPStatus(nodeID ids.NodeID, ip *ips.ClaimedIPPort) (*ips.ClaimedIPPort, bool, bool, bool) { + prevIP, previouslyTracked := n.peerIPs[nodeID] + _, connected := n.connectedPeers.GetByID(nodeID) + shouldUpdateOurIP := previouslyTracked && prevIP.Timestamp < ip.Timestamp + shouldDial := !previouslyTracked && !connected && n.wantsConnection(nodeID) + return prevIP, previouslyTracked, shouldUpdateOurIP, shouldDial } // dial will spin up a new goroutine and attempt to establish a connection with @@ -877,6 +1096,7 @@ func (n *network) dial(ctx context.Context, nodeID ids.NodeID, ip *trackedIP) { // race conditions before removing the entry. if ip, exists := n.trackedIPs[nodeID]; exists { ip.stopTracking() + delete(n.peerIPs, nodeID) delete(n.trackedIPs, nodeID) } n.peersLock.Unlock() @@ -908,7 +1128,7 @@ func (n *network) dial(ctx context.Context, nodeID ids.NodeID, ip *trackedIP) { n.config.MaxReconnectDelay, ) - conn, err := n.dialer.Dial(ctx, ip.ip.IP) + conn, err := n.dialer.Dial(ctx, ip.ip) if err != nil { n.peerConfig.Log.Verbo( "failed to reach peer, attempting again", @@ -918,6 +1138,11 @@ func (n *network) dial(ctx context.Context, nodeID ids.NodeID, ip *trackedIP) { continue } + n.peerConfig.Log.Verbo("starting to upgrade connection", + zap.String("direction", "outbound"), + zap.Stringer("peerIP", ip.ip.IP), + ) + err = n.upgrade(conn, n.clientUpgrader) if err != nil { n.peerConfig.Log.Verbo( @@ -941,16 +1166,6 @@ func (n *network) dial(ctx context.Context, nodeID ids.NodeID, ip *trackedIP) { // connection will be used to create a new peer. Otherwise the connection will // be immediately closed. func (n *network) upgrade(conn net.Conn, upgrader peer.Upgrader) error { - if conn, ok := conn.(*net.TCPConn); ok { - // If a connection is closed, we shouldn't bother keeping any messages - // in memory. - if err := conn.SetLinger(0); err != nil { - n.peerConfig.Log.Warn("failed to set no linger", - zap.Error(err), - ) - } - } - upgradeTimeout := n.peerConfig.Clock.Time().Add(n.config.ReadHandshakeTimeout) if err := conn.SetReadDeadline(upgradeTimeout); err != nil { _ = conn.Close() @@ -996,9 +1211,9 @@ func (n *network) upgrade(conn net.Conn, upgrader peer.Upgrader) error { } n.peersLock.Lock() - defer n.peersLock.Unlock() - if n.closing { + n.peersLock.Unlock() + _ = tlsConn.Close() n.peerConfig.Log.Verbo( "dropping connection", @@ -1009,6 +1224,8 @@ func (n *network) upgrade(conn net.Conn, upgrader peer.Upgrader) error { } if _, connecting := n.connectingPeers.GetByID(nodeID); connecting { + n.peersLock.Unlock() + _ = tlsConn.Close() n.peerConfig.Log.Verbo( "dropping connection", @@ -1019,6 +1236,8 @@ func (n *network) upgrade(conn net.Conn, upgrader peer.Upgrader) error { } if _, connected := n.connectedPeers.GetByID(nodeID); connected { + n.peersLock.Unlock() + _ = tlsConn.Close() n.peerConfig.Log.Verbo( "dropping connection", @@ -1032,6 +1251,13 @@ func (n *network) upgrade(conn net.Conn, upgrader peer.Upgrader) error { zap.Stringer("nodeID", nodeID), ) + if !n.gossipTracker.StartTrackingPeer(nodeID) { + n.peerConfig.Log.Error( + "started duplicate peer tracker", + zap.Stringer("nodeID", nodeID), + ) + } + // peer.Start requires there is only ever one peer instance running with the // same [peerConfig.InboundMsgThrottler]. This is guaranteed by the above // de-duplications for [connectingPeers] and [connectedPeers]. @@ -1048,6 +1274,7 @@ func (n *network) upgrade(conn net.Conn, upgrader peer.Upgrader) error { ), ) n.connectingPeers.Add(peer) + n.peersLock.Unlock() return nil } @@ -1079,6 +1306,7 @@ func (n *network) StartClose() { for nodeID, tracked := range n.trackedIPs { tracked.stopTracking() + delete(n.peerIPs, nodeID) delete(n.trackedIPs, nodeID) } @@ -1094,19 +1322,23 @@ func (n *network) StartClose() { }) } -func (n *network) NodeUptime() (UptimeResult, bool) { - primaryValidators, ok := n.config.Validators.GetValidators(constants.PrimaryNetworkID) +func (n *network) NodeUptime(subnetID ids.ID) (UptimeResult, error) { + if subnetID != constants.PrimaryNetworkID && !n.config.TrackedSubnets.Contains(subnetID) { + return UptimeResult{}, errNotTracked + } + + validators, ok := n.config.Validators.Get(subnetID) if !ok { - return UptimeResult{}, false + return UptimeResult{}, errSubnetNotExist } - myStake, isValidator := primaryValidators.GetWeight(n.config.MyNodeID) - if !isValidator { - return UptimeResult{}, false + myStake := validators.GetWeight(n.config.MyNodeID) + if myStake == 0 { + return UptimeResult{}, errNotValidator } var ( - totalWeight = float64(primaryValidators.Weight()) + totalWeight = float64(validators.Weight()) totalWeightedPercent = 100 * float64(myStake) rewardingStake = float64(myStake) ) @@ -1118,18 +1350,22 @@ func (n *network) NodeUptime() (UptimeResult, bool) { peer, _ := n.connectedPeers.GetByIndex(i) nodeID := peer.ID() - weight, ok := primaryValidators.GetWeight(nodeID) - if !ok { + weight := validators.GetWeight(nodeID) + if weight == 0 { // this is not a validator skip it. continue } - observedUptime := peer.ObservedUptime() + observedUptime, exist := peer.ObservedUptime(subnetID) + if !exist { + observedUptime = 0 + } percent := float64(observedUptime) weightFloat := float64(weight) totalWeightedPercent += percent * weightFloat // if this peer thinks we're above requirement add the weight + // TODO: use subnet-specific uptime requirements if percent/100 >= n.config.UptimeRequirement { rewardingStake += weightFloat } @@ -1138,7 +1374,7 @@ func (n *network) NodeUptime() (UptimeResult, bool) { return UptimeResult{ WeightedAveragePercentage: gomath.Abs(totalWeightedPercent / totalWeight), RewardingStakePercentage: gomath.Abs(100 * rewardingStake / totalWeight), - }, true + }, nil } func (n *network) runTimers() { @@ -1154,36 +1390,60 @@ func (n *network) runTimers() { case <-n.onCloseCtx.Done(): return case <-gossipPeerlists.C: - validatorIPs := n.sampleValidatorIPs() - if len(validatorIPs) == 0 { - n.peerConfig.Log.Debug("skipping validator IP gossiping as no IPs are connected") - continue - } - - msg, err := n.peerConfig.GetMessageCreator().PeerList(validatorIPs, false) + n.gossipPeerLists() + case <-updateUptimes.C: + primaryUptime, err := n.NodeUptime(constants.PrimaryNetworkID) if err != nil { - n.peerConfig.Log.Error( - "failed to gossip", - zap.Int("peerListLen", len(validatorIPs)), + n.peerConfig.Log.Debug("failed to get primary network uptime", zap.Error(err), ) - continue } + n.metrics.nodeUptimeWeightedAverage.Set(primaryUptime.WeightedAveragePercentage) + n.metrics.nodeUptimeRewardingStake.Set(primaryUptime.RewardingStakePercentage) + + for subnetID := range n.config.TrackedSubnets { + result, err := n.NodeUptime(subnetID) + if err != nil { + n.peerConfig.Log.Debug("failed to get subnet uptime", + zap.Stringer("subnetID", subnetID), + zap.Error(err), + ) + } + subnetIDStr := subnetID.String() + n.metrics.nodeSubnetUptimeWeightedAverage.WithLabelValues(subnetIDStr).Set(result.WeightedAveragePercentage) + n.metrics.nodeSubnetUptimeRewardingStake.WithLabelValues(subnetIDStr).Set(result.RewardingStakePercentage) + } + } + } +} - n.Gossip( - msg, - constants.PrimaryNetworkID, - false, - int(n.config.PeerListValidatorGossipSize), - int(n.config.PeerListNonValidatorGossipSize), - int(n.config.PeerListPeersGossipSize), - ) +// gossipPeerLists gossips validators to peers in the network +func (n *network) gossipPeerLists() { + peers := n.samplePeers( + constants.PrimaryNetworkID, + int(n.config.PeerListValidatorGossipSize), + int(n.config.PeerListNonValidatorGossipSize), + int(n.config.PeerListPeersGossipSize), + subnets.NoOpAllower, + ) - case <-updateUptimes.C: + for _, p := range peers { + p.StartSendPeerList() + } +} - result, _ := n.NodeUptime() - n.metrics.nodeUptimeWeightedAverage.Set(result.WeightedAveragePercentage) - n.metrics.nodeUptimeRewardingStake.Set(result.RewardingStakePercentage) - } +func (n *network) getLastReceived() (time.Time, bool) { + lastReceived := atomic.LoadInt64(&n.peerConfig.LastReceived) + if lastReceived == 0 { + return time.Time{}, false + } + return time.Unix(lastReceived, 0), true +} + +func (n *network) getLastSent() (time.Time, bool) { + lastSent := atomic.LoadInt64(&n.peerConfig.LastSent) + if lastSent == 0 { + return time.Time{}, false } + return time.Unix(lastSent, 0), true } diff --git a/avalanchego/network/network_test.go b/avalanchego/network/network_test.go index 5c3dbc9d..63efee69 100644 --- a/avalanchego/network/network_test.go +++ b/avalanchego/network/network_test.go @@ -1,11 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network import ( + "context" "crypto" - "fmt" "net" "sync" "testing" @@ -18,16 +18,20 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/network/dialer" + "github.com/ava-labs/avalanchego/network/peer" "github.com/ava-labs/avalanchego/network/throttling" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/subnets" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/version" ) @@ -106,7 +110,7 @@ var ( PingFrequency: constants.DefaultPingFrequency, AllowPrivateIPs: true, - CompressionEnabled: true, + CompressionType: constants.DefaultNetworkCompressionType, UptimeCalculator: uptime.NewManager(uptime.NewTestState()), UptimeMetricFreq: 30 * time.Second, @@ -139,7 +143,12 @@ func newDefaultTargeter(t tracker.Tracker) tracker.Targeter { } func newDefaultResourceTracker() tracker.ResourceTracker { - tracker, err := tracker.NewResourceTracker(prometheus.NewRegistry(), resource.NoUsage, meter.ContinuousFactory{}, 10*time.Second) + tracker, err := tracker.NewResourceTracker( + prometheus.NewRegistry(), + resource.NoUsage, + meter.ContinuousFactory{}, + 10*time.Second, + ) if err != nil { panic(err) } @@ -170,26 +179,19 @@ func newTestNetwork(t *testing.T, count int) (*testDialer, []*testListener, []id return dialer, listeners, nodeIDs, configs } -func newMessageCreator(t *testing.T) (message.Creator, message.Creator) { +func newMessageCreator(t *testing.T) message.Creator { t.Helper() mc, err := message.NewCreator( + logging.NoLog{}, prometheus.NewRegistry(), "", - true, - 10*time.Second, - ) - require.NoError(t, err) - - mcProto, err := message.NewCreatorWithProto( - prometheus.NewRegistry(), - "", - true, + constants.DefaultNetworkCompressionType, 10*time.Second, ) require.NoError(t, err) - return mc, mcProto + return mc } func newFullyConnectedTestNetwork(t *testing.T, handlers []router.InboundHandler) ([]ids.NodeID, []Network, *sync.WaitGroup) { @@ -197,18 +199,6 @@ func newFullyConnectedTestNetwork(t *testing.T, handlers []router.InboundHandler dialer, listeners, nodeIDs, configs := newTestNetwork(t, len(handlers)) - beacons := validators.NewSet() - err := beacons.AddWeight(nodeIDs[0], 1) - require.NoError(err) - - vdrs := validators.NewManager() - for _, nodeID := range nodeIDs { - err := vdrs.AddWeight(constants.PrimaryNetworkID, nodeID, 1) - require.NoError(err) - } - - msgCreator, msgCreatorWithProto := newMessageCreator(t) - var ( networks = make([]Network, len(configs)) @@ -218,19 +208,44 @@ func newFullyConnectedTestNetwork(t *testing.T, handlers []router.InboundHandler onAllConnected = make(chan struct{}) ) for i, config := range configs { + msgCreator := newMessageCreator(t) + registry := prometheus.NewRegistry() + + g, err := peer.NewGossipTracker(registry, "foobar") + require.NoError(err) + + log := logging.NoLog{} + gossipTrackerCallback := peer.GossipTrackerCallback{ + Log: log, + GossipTracker: g, + } + + beacons := validators.NewSet() + err = beacons.Add(nodeIDs[0], nil, ids.GenerateTestID(), 1) + require.NoError(err) + + primaryVdrs := validators.NewSet() + primaryVdrs.RegisterCallbackListener(&gossipTrackerCallback) + for _, nodeID := range nodeIDs { + err := primaryVdrs.Add(nodeID, nil, ids.GenerateTestID(), 1) + require.NoError(err) + } + + vdrs := validators.NewManager() + _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) + config := config + config.GossipTracker = g config.Beacons = beacons config.Validators = vdrs - var connected ids.NodeIDSet + var connected set.Set[ids.NodeID] net, err := NewNetwork( config, msgCreator, - msgCreatorWithProto, - time.Now().Add(time.Hour), // TODO: test proto with banff activated - prometheus.NewRegistry(), - logging.NoLog{}, + registry, + log, listeners[i], dialer, &testHandler{ @@ -300,52 +315,89 @@ func TestNewNetwork(t *testing.T) { func TestSend(t *testing.T) { require := require.New(t) - for _, useProto := range []bool{false, true} { - t.Run(fmt.Sprintf("use proto buf message creator %v", useProto), func(tt *testing.T) { - received := make(chan message.InboundMessage) - nodeIDs, networks, wg := newFullyConnectedTestNetwork( - tt, - []router.InboundHandler{ - router.InboundHandlerFunc(func(message.InboundMessage) { - tt.Fatal("unexpected message received") - }), - router.InboundHandlerFunc(func(msg message.InboundMessage) { - received <- msg - }), - router.InboundHandlerFunc(func(message.InboundMessage) { - tt.Fatal("unexpected message received") - }), - }, - ) - - net0 := networks[0] - - mc, mcProto := newMessageCreator(tt) - var ( - outboundGetMsg message.OutboundMessage - err error - ) - if !useProto { - outboundGetMsg, err = mc.Get(ids.Empty, 1, time.Second, ids.Empty) - } else { - outboundGetMsg, err = mcProto.Get(ids.Empty, 1, time.Second, ids.Empty) - } - require.NoError(err) + received := make(chan message.InboundMessage) + nodeIDs, networks, wg := newFullyConnectedTestNetwork( + t, + []router.InboundHandler{ + router.InboundHandlerFunc(func(context.Context, message.InboundMessage) { + t.Fatal("unexpected message received") + }), + router.InboundHandlerFunc(func(_ context.Context, msg message.InboundMessage) { + received <- msg + }), + router.InboundHandlerFunc(func(context.Context, message.InboundMessage) { + t.Fatal("unexpected message received") + }), + }, + ) + + net0 := networks[0] + + mc := newMessageCreator(t) + outboundGetMsg, err := mc.Get(ids.Empty, 1, time.Second, ids.Empty, p2p.EngineType_ENGINE_TYPE_SNOWMAN) + require.NoError(err) + + toSend := set.Set[ids.NodeID]{} + toSend.Add(nodeIDs[1]) + sentTo := net0.Send(outboundGetMsg, toSend, constants.PrimaryNetworkID, subnets.NoOpAllower) + require.EqualValues(toSend, sentTo) + + inboundGetMsg := <-received + require.Equal(message.GetOp, inboundGetMsg.Op()) + + for _, net := range networks { + net.StartClose() + } + wg.Wait() +} - toSend := ids.NodeIDSet{} - toSend.Add(nodeIDs[1]) - sentTo := net0.Send(outboundGetMsg, toSend, constants.PrimaryNetworkID, false) - require.EqualValues(toSend, sentTo) +func TestSendAndGossipWithFilter(t *testing.T) { + require := require.New(t) + + received := make(chan message.InboundMessage) + nodeIDs, networks, wg := newFullyConnectedTestNetwork( + t, + []router.InboundHandler{ + router.InboundHandlerFunc(func(context.Context, message.InboundMessage) { + t.Fatal("unexpected message received") + }), + router.InboundHandlerFunc(func(_ context.Context, msg message.InboundMessage) { + received <- msg + }), + router.InboundHandlerFunc(func(context.Context, message.InboundMessage) { + t.Fatal("unexpected message received") + }), + }, + ) + + net0 := networks[0] + + mc := newMessageCreator(t) + outboundGetMsg, err := mc.Get(ids.Empty, 1, time.Second, ids.Empty, p2p.EngineType_ENGINE_TYPE_SNOWMAN) + require.NoError(err) + + toSend := set.NewSet[ids.NodeID](3) + validNodeID := nodeIDs[1] + toSend.Add(nodeIDs...) + sentTo := net0.Send(outboundGetMsg, toSend, constants.PrimaryNetworkID, newNodeIDConnector(validNodeID)) + require.Len(sentTo, 1) + require.Contains(sentTo, validNodeID) + + inboundGetMsg := <-received + require.Equal(message.GetOp, inboundGetMsg.Op()) + + // Test Gossip now + sentTo = net0.Gossip(outboundGetMsg, constants.PrimaryNetworkID, 0, 0, len(nodeIDs), newNodeIDConnector(validNodeID)) + require.Len(sentTo, 1) + require.Contains(sentTo, validNodeID) - inboundGetMsg := <-received - require.Equal(message.Get, inboundGetMsg.Op()) + inboundGetMsg = <-received + require.Equal(message.GetOp, inboundGetMsg.Op()) - for _, net := range networks { - net.StartClose() - } - wg.Wait() - }) + for _, net := range networks { + net.StartClose() } + wg.Wait() } func TestTrackVerifiesSignatures(t *testing.T) { @@ -355,10 +407,10 @@ func TestTrackVerifiesSignatures(t *testing.T) { network := networks[0].(*network) nodeID, tlsCert, _ := getTLS(t, 1) - err := network.config.Validators.AddWeight(constants.PrimaryNetworkID, nodeID, 1) + err := validators.Add(network.config.Validators, constants.PrimaryNetworkID, nodeID, nil, ids.Empty, 1) require.NoError(err) - useful := network.Track(ips.ClaimedIPPort{ + _, err = network.Track(ids.EmptyNodeID, []*ips.ClaimedIPPort{{ Cert: tlsCert.Leaf, IPPort: ips.IPPort{ IP: net.IPv4(123, 132, 123, 123), @@ -366,9 +418,9 @@ func TestTrackVerifiesSignatures(t *testing.T) { }, Timestamp: 1000, Signature: nil, - }) + }}) // The signature is wrong so this peer tracking info isn't useful. - require.False(useful) + require.Error(err) network.peersLock.RLock() require.Empty(network.trackedIPs) diff --git a/avalanchego/network/peer/config.go b/avalanchego/network/peer/config.go index 3821b548..2ad13a19 100644 --- a/avalanchego/network/peer/config.go +++ b/avalanchego/network/peer/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -11,8 +11,10 @@ import ( "github.com/ava-labs/avalanchego/network/throttling" "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/networking/tracker" + "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/version" ) @@ -21,21 +23,17 @@ type Config struct { // Size, in bytes, of the buffer this peer reads messages into ReadBufferSize int // Size, in bytes, of the buffer this peer writes messages into - WriteBufferSize int - Clock mockable.Clock - Metrics *Metrics - MessageCreator message.Creator - MessageCreatorWithProto message.Creator - - // TODO: remove this once we complete banff migration - BanffTime time.Time + WriteBufferSize int + Clock mockable.Clock + Metrics *Metrics + MessageCreator message.Creator Log logging.Logger InboundMsgThrottler throttling.InboundMsgThrottler Network Network Router router.InboundHandler VersionCompatibility version.Compatibility - MySubnets ids.Set + MySubnets set.Set[ids.ID] Beacons validators.Set NetworkID uint32 PingFrequency time.Duration @@ -48,16 +46,10 @@ type Config struct { // Tracks CPU/disk usage caused by each peer. ResourceTracker tracker.ResourceTracker -} -func (c *Config) GetMessageCreator() message.Creator { - now := c.Clock.Time() - if c.IsBanffActivated(now) { - return c.MessageCreatorWithProto - } - return c.MessageCreator -} + // Calculates uptime of peers + UptimeCalculator uptime.Calculator -func (c *Config) IsBanffActivated(time time.Time) bool { - return !time.Before(c.BanffTime) + // Signs my IP so I can send my signed IP address in the Version message + IPSigner *IPSigner } diff --git a/avalanchego/network/peer/example_test.go b/avalanchego/network/peer/example_test.go index 8530fd48..75eaecee 100644 --- a/avalanchego/network/peer/example_test.go +++ b/avalanchego/network/peer/example_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -28,7 +28,7 @@ func ExampleStartTestPeer() { ctx, peerIP, constants.LocalID, - router.InboundHandlerFunc(func(msg message.InboundMessage) { + router.InboundHandlerFunc(func(_ context.Context, msg message.InboundMessage) { fmt.Printf("handling %s\n", msg.Op()) }), ) diff --git a/avalanchego/network/peer/gossip_tracker.go b/avalanchego/network/peer/gossip_tracker.go new file mode 100644 index 00000000..5676b073 --- /dev/null +++ b/avalanchego/network/peer/gossip_tracker.go @@ -0,0 +1,323 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package peer + +import ( + "fmt" + "sync" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" +) + +// GossipTracker tracks the validators that we're currently aware of, as well as +// the validators we've told each peers about. This data is stored in a bitset +// to optimize space, where only N (num validators) bits will be used per peer. +// +// This is done by recording some state information of both what validators this +// node is aware of, and what validators we've told each peer about. +// As an example, say we track three peers and three validators (MSB first): +// +// trackedPeers: { +// p1: [1, 1, 1] // we have already told [p1] about all validators +// p2: [0, 1, 1] // [p2] doesn't know about [v3] +// p3: [0, 0, 1] // [p3] knows only about [v3] +// } +// +// GetUnknown computes the validators we haven't sent to a given peer. Ex: +// +// GetUnknown(p1) - [0, 0, 0] +// GetUnknown(p2) - [1, 0, 0] +// GetUnknown(p3) - [1, 1, 0] +// +// Using the gossipTracker, we can quickly compute the validators each peer +// doesn't know about using GetUnknown so that in subsequent PeerList gossip +// messages we only send information that this peer (most likely) doesn't +// already know about. The only case where we'll send a redundant set of +// bytes is if another remote peer gossips to the same peer we're trying to +// gossip to first. +type GossipTracker interface { + // Tracked returns if a peer is being tracked + // Returns: + // bool: False if [peerID] is not tracked. True otherwise. + Tracked(peerID ids.NodeID) bool + + // StartTrackingPeer starts tracking a peer + // Returns: + // bool: False if [peerID] was already tracked. True otherwise. + StartTrackingPeer(peerID ids.NodeID) bool + // StopTrackingPeer stops tracking a given peer + // Returns: + // bool: False if [peerID] was not tracked. True otherwise. + StopTrackingPeer(peerID ids.NodeID) bool + + // AddValidator adds a validator that can be gossiped about + // bool: False if a validator with the same node ID or txID as [validator] + // is present. True otherwise. + AddValidator(validator ValidatorID) bool + // GetNodeID maps a txID into a nodeIDs + // nodeID: The nodeID that was registered by [txID] + // bool: False if [validator] was not present. True otherwise. + GetNodeID(txID ids.ID) (ids.NodeID, bool) + // RemoveValidator removes a validator that can be gossiped about + // bool: False if [validator] was already not present. True otherwise. + RemoveValidator(validatorID ids.NodeID) bool + // ResetValidator resets known gossip status of [validatorID] to unknown + // for all peers + // bool: False if [validator] was not present. True otherwise. + ResetValidator(validatorID ids.NodeID) bool + + // AddKnown adds [knownTxIDs] to the txIDs known by [peerID] and filters + // [txIDs] for non-validators. + // Returns: + // txIDs: The txIDs in [txIDs] that are currently validators. + // bool: False if [peerID] is not tracked. True otherwise. + AddKnown( + peerID ids.NodeID, + knownTxIDs []ids.ID, + txIDs []ids.ID, + ) ([]ids.ID, bool) + // GetUnknown gets the peers that we haven't sent to this peer + // Returns: + // []ValidatorID: a slice of ValidatorIDs that [peerID] doesn't know about. + // bool: False if [peerID] is not tracked. True otherwise. + GetUnknown(peerID ids.NodeID) ([]ValidatorID, bool) +} + +type gossipTracker struct { + lock sync.RWMutex + // a mapping of txIDs => the validator added to the validiator set by that + // tx. + txIDsToNodeIDs map[ids.ID]ids.NodeID + // a mapping of validators => the index they occupy in the bitsets + nodeIDsToIndices map[ids.NodeID]int + // each validator in the index it occupies in the bitset + validatorIDs []ValidatorID + // a mapping of each peer => the validators they know about + trackedPeers map[ids.NodeID]set.Bits + + metrics gossipTrackerMetrics +} + +// NewGossipTracker returns an instance of gossipTracker +func NewGossipTracker( + registerer prometheus.Registerer, + namespace string, +) (GossipTracker, error) { + m, err := newGossipTrackerMetrics(registerer, fmt.Sprintf("%s_gossip_tracker", namespace)) + if err != nil { + return nil, err + } + + return &gossipTracker{ + txIDsToNodeIDs: make(map[ids.ID]ids.NodeID), + nodeIDsToIndices: make(map[ids.NodeID]int), + trackedPeers: make(map[ids.NodeID]set.Bits), + metrics: m, + }, nil +} + +func (g *gossipTracker) Tracked(peerID ids.NodeID) bool { + g.lock.RLock() + defer g.lock.RUnlock() + + _, ok := g.trackedPeers[peerID] + return ok +} + +func (g *gossipTracker) StartTrackingPeer(peerID ids.NodeID) bool { + g.lock.Lock() + defer g.lock.Unlock() + + // don't track the peer if it's already being tracked + if _, ok := g.trackedPeers[peerID]; ok { + return false + } + + // start tracking the peer. Initialize their bitset to zero since we + // haven't sent them anything yet. + g.trackedPeers[peerID] = set.NewBits() + + // emit metrics + g.metrics.trackedPeersSize.Set(float64(len(g.trackedPeers))) + + return true +} + +func (g *gossipTracker) StopTrackingPeer(peerID ids.NodeID) bool { + g.lock.Lock() + defer g.lock.Unlock() + + // only stop tracking peers that are actually being tracked + if _, ok := g.trackedPeers[peerID]; !ok { + return false + } + + // stop tracking the peer by removing them + delete(g.trackedPeers, peerID) + g.metrics.trackedPeersSize.Set(float64(len(g.trackedPeers))) + + return true +} + +func (g *gossipTracker) AddValidator(validator ValidatorID) bool { + g.lock.Lock() + defer g.lock.Unlock() + + // only add validators that are not already present + if _, ok := g.txIDsToNodeIDs[validator.TxID]; ok { + return false + } + if _, ok := g.nodeIDsToIndices[validator.NodeID]; ok { + return false + } + + // add the validator to the MSB of the bitset. + msb := len(g.validatorIDs) + g.txIDsToNodeIDs[validator.TxID] = validator.NodeID + g.nodeIDsToIndices[validator.NodeID] = msb + g.validatorIDs = append(g.validatorIDs, validator) + + // emit metrics + g.metrics.validatorsSize.Set(float64(len(g.validatorIDs))) + + return true +} + +func (g *gossipTracker) GetNodeID(txID ids.ID) (ids.NodeID, bool) { + g.lock.RLock() + defer g.lock.RUnlock() + + nodeID, ok := g.txIDsToNodeIDs[txID] + return nodeID, ok +} + +func (g *gossipTracker) RemoveValidator(validatorID ids.NodeID) bool { + g.lock.Lock() + defer g.lock.Unlock() + + // only remove validators that are already present + indexToRemove, ok := g.nodeIDsToIndices[validatorID] + if !ok { + return false + } + validatorToRemove := g.validatorIDs[indexToRemove] + + // swap the validator-to-be-removed with the validator in the last index + // if the element we're swapping with is ourselves, we can skip this swap + // since we only need to delete instead + lastIndex := len(g.validatorIDs) - 1 + if indexToRemove != lastIndex { + lastValidator := g.validatorIDs[lastIndex] + + g.nodeIDsToIndices[lastValidator.NodeID] = indexToRemove + g.validatorIDs[indexToRemove] = lastValidator + } + + delete(g.txIDsToNodeIDs, validatorToRemove.TxID) + delete(g.nodeIDsToIndices, validatorID) + g.validatorIDs = g.validatorIDs[:lastIndex] + + // Invariant: We must remove the validator from everyone else's validator + // bitsets to make sure that each validator occupies the same position in + // each bitset. + for _, knownPeers := range g.trackedPeers { + // swap the element to be removed with the msb + if indexToRemove != lastIndex { + if knownPeers.Contains(lastIndex) { + knownPeers.Add(indexToRemove) + } else { + knownPeers.Remove(indexToRemove) + } + } + knownPeers.Remove(lastIndex) + } + + // emit metrics + g.metrics.validatorsSize.Set(float64(len(g.validatorIDs))) + + return true +} + +func (g *gossipTracker) ResetValidator(validatorID ids.NodeID) bool { + g.lock.Lock() + defer g.lock.Unlock() + + // only reset validators that exist + indexToReset, ok := g.nodeIDsToIndices[validatorID] + if !ok { + return false + } + + for _, knownPeers := range g.trackedPeers { + knownPeers.Remove(indexToReset) + } + + return true +} + +// AddKnown invariants: +// +// 1. [peerID] SHOULD only be a nodeID that has been tracked with +// StartTrackingPeer(). +func (g *gossipTracker) AddKnown( + peerID ids.NodeID, + knownTxIDs []ids.ID, + txIDs []ids.ID, +) ([]ids.ID, bool) { + g.lock.Lock() + defer g.lock.Unlock() + + knownPeers, ok := g.trackedPeers[peerID] + if !ok { + return nil, false + } + for _, txID := range knownTxIDs { + nodeID, ok := g.txIDsToNodeIDs[txID] + if !ok { + // We don't know about this txID, this can happen due to differences + // between our current validator set and the peer's current + // validator set. + continue + } + + // Because we fetched the nodeID from [g.txIDsToNodeIDs], we are + // guaranteed that the index is populated. + index := g.nodeIDsToIndices[nodeID] + knownPeers.Add(index) + } + + validatorTxIDs := make([]ids.ID, 0, len(txIDs)) + for _, txID := range txIDs { + if _, ok := g.txIDsToNodeIDs[txID]; ok { + validatorTxIDs = append(validatorTxIDs, txID) + } + } + return validatorTxIDs, true +} + +func (g *gossipTracker) GetUnknown(peerID ids.NodeID) ([]ValidatorID, bool) { + g.lock.RLock() + defer g.lock.RUnlock() + + // return false if this peer isn't tracked + knownPeers, ok := g.trackedPeers[peerID] + if !ok { + return nil, false + } + + // Calculate the unknown information we need to send to this peer. We do + // this by computing the difference between the validators we know about + // and the validators we know we've sent to [peerID]. + result := make([]ValidatorID, 0, len(g.validatorIDs)) + for i, validatorID := range g.validatorIDs { + if !knownPeers.Contains(i) { + result = append(result, validatorID) + } + } + + return result, true +} diff --git a/avalanchego/network/peer/gossip_tracker_callback.go b/avalanchego/network/peer/gossip_tracker_callback.go new file mode 100644 index 00000000..28514ac1 --- /dev/null +++ b/avalanchego/network/peer/gossip_tracker_callback.go @@ -0,0 +1,56 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package peer + +import ( + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/logging" +) + +var _ validators.SetCallbackListener = (*GossipTrackerCallback)(nil) + +// GossipTrackerCallback synchronizes GossipTracker's validator state with the +// validator set it's registered to. +type GossipTrackerCallback struct { + Log logging.Logger + GossipTracker GossipTracker +} + +// OnValidatorAdded adds [validatorID] to the set of validators that can be +// gossiped about +func (g *GossipTrackerCallback) OnValidatorAdded( + nodeID ids.NodeID, + _ *bls.PublicKey, + txID ids.ID, + _ uint64, +) { + vdr := ValidatorID{ + NodeID: nodeID, + TxID: txID, + } + if !g.GossipTracker.AddValidator(vdr) { + g.Log.Error("failed to add a validator", + zap.Stringer("nodeID", nodeID), + zap.Stringer("txID", txID), + ) + } +} + +// OnValidatorRemoved removes [validatorID] from the set of validators that can +// be gossiped about. +func (g *GossipTrackerCallback) OnValidatorRemoved(nodeID ids.NodeID, _ uint64) { + if !g.GossipTracker.RemoveValidator(nodeID) { + g.Log.Error("failed to remove a validator", + zap.Stringer("nodeID", nodeID), + ) + } +} + +// OnValidatorWeightChanged does nothing because PeerList gossip doesn't care +// about validator weights. +func (*GossipTrackerCallback) OnValidatorWeightChanged(ids.NodeID, uint64, uint64) {} diff --git a/avalanchego/network/peer/gossip_tracker_metrics.go b/avalanchego/network/peer/gossip_tracker_metrics.go new file mode 100644 index 00000000..be167ebf --- /dev/null +++ b/avalanchego/network/peer/gossip_tracker_metrics.go @@ -0,0 +1,42 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package peer + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/utils/wrappers" +) + +type gossipTrackerMetrics struct { + trackedPeersSize prometheus.Gauge + validatorsSize prometheus.Gauge +} + +func newGossipTrackerMetrics(registerer prometheus.Registerer, namespace string) (gossipTrackerMetrics, error) { + m := gossipTrackerMetrics{ + trackedPeersSize: prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "tracked_peers_size", + Help: "amount of peers that are being tracked", + }, + ), + validatorsSize: prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "validators_size", + Help: "number of validators this node is tracking", + }, + ), + } + + errs := wrappers.Errs{} + errs.Add( + registerer.Register(m.trackedPeersSize), + registerer.Register(m.validatorsSize), + ) + + return m, errs.Err +} diff --git a/avalanchego/network/peer/gossip_tracker_test.go b/avalanchego/network/peer/gossip_tracker_test.go new file mode 100644 index 00000000..1bd420c4 --- /dev/null +++ b/avalanchego/network/peer/gossip_tracker_test.go @@ -0,0 +1,620 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package peer + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" +) + +var ( + // peers + p1 = ids.GenerateTestNodeID() + p2 = ids.GenerateTestNodeID() + p3 = ids.GenerateTestNodeID() + + // validators + v1 = ValidatorID{ + NodeID: ids.GenerateTestNodeID(), + TxID: ids.GenerateTestID(), + } + v2 = ValidatorID{ + NodeID: ids.GenerateTestNodeID(), + TxID: ids.GenerateTestID(), + } + v3 = ValidatorID{ + NodeID: ids.GenerateTestNodeID(), + TxID: ids.GenerateTestID(), + } +) + +func TestGossipTracker_Contains(t *testing.T) { + tests := []struct { + name string + track []ids.NodeID + contains ids.NodeID + expected bool + }{ + { + name: "empty", + track: []ids.NodeID{}, + contains: p1, + expected: false, + }, + { + name: "populated - does not contain", + track: []ids.NodeID{p1, p2}, + contains: p3, + expected: false, + }, + { + name: "populated - contains", + track: []ids.NodeID{p1, p2, p3}, + contains: p3, + expected: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") + require.NoError(err) + + for _, add := range test.track { + require.True(g.StartTrackingPeer(add)) + } + + require.Equal(test.expected, g.Tracked(test.contains)) + }) + } +} + +func TestGossipTracker_StartTrackingPeer(t *testing.T) { + tests := []struct { + name string + toStartTracking []ids.NodeID + expected []bool + }{ + { + // Tracking new peers always works + name: "unique adds", + toStartTracking: []ids.NodeID{p1, p2, p3}, + expected: []bool{true, true, true}, + }, + { + // We shouldn't be able to track a peer more than once + name: "duplicate adds", + toStartTracking: []ids.NodeID{p1, p1, p1}, + expected: []bool{true, false, false}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") + require.NoError(err) + + for i, p := range test.toStartTracking { + require.Equal(test.expected[i], g.StartTrackingPeer(p)) + require.True(g.Tracked(p)) + } + }) + } +} + +func TestGossipTracker_StopTrackingPeer(t *testing.T) { + tests := []struct { + name string + toStartTracking []ids.NodeID + expectedStartTracking []bool + toStopTracking []ids.NodeID + expectedStopTracking []bool + }{ + { + // We should be able to stop tracking that we are tracking + name: "stop tracking tracked peers", + toStartTracking: []ids.NodeID{p1, p2, p3}, + toStopTracking: []ids.NodeID{p1, p2, p3}, + expectedStopTracking: []bool{true, true, true}, + }, + { + // We shouldn't be able to stop tracking peers we've stopped tracking + name: "stop tracking twice", + toStartTracking: []ids.NodeID{p1}, + toStopTracking: []ids.NodeID{p1, p1}, + expectedStopTracking: []bool{true, false}, + }, + { + // We shouldn't be able to stop tracking peers we were never tracking + name: "remove non-existent elements", + toStartTracking: []ids.NodeID{}, + expectedStartTracking: []bool{}, + toStopTracking: []ids.NodeID{p1, p2, p3}, + expectedStopTracking: []bool{false, false, false}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") + require.NoError(err) + + for _, add := range test.toStartTracking { + require.True(g.StartTrackingPeer(add)) + require.True(g.Tracked(add)) + } + + for i, p := range test.toStopTracking { + require.Equal(test.expectedStopTracking[i], g.StopTrackingPeer(p)) + } + }) + } +} + +func TestGossipTracker_AddValidator(t *testing.T) { + type args struct { + validator ValidatorID + } + + tests := []struct { + name string + validators []ValidatorID + args args + expected bool + }{ + { + name: "not present", + validators: []ValidatorID{}, + args: args{validator: v1}, + expected: true, + }, + { + name: "already present txID but with different nodeID", + validators: []ValidatorID{v1}, + args: args{validator: ValidatorID{ + NodeID: ids.GenerateTestNodeID(), + TxID: v1.TxID, + }}, + expected: false, + }, + { + name: "already present nodeID but with different txID", + validators: []ValidatorID{v1}, + args: args{validator: ValidatorID{ + NodeID: v1.NodeID, + TxID: ids.GenerateTestID(), + }}, + expected: false, + }, + { + name: "already present validatorID", + validators: []ValidatorID{v1}, + args: args{validator: v1}, + expected: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") + require.NoError(err) + + for _, v := range test.validators { + require.True(g.AddValidator(v)) + } + + require.Equal(test.expected, g.AddValidator(test.args.validator)) + }) + } +} + +func TestGossipTracker_RemoveValidator(t *testing.T) { + type args struct { + id ids.NodeID + } + + tests := []struct { + name string + validators []ValidatorID + args args + expected bool + }{ + { + name: "not already present", + validators: []ValidatorID{}, + args: args{id: v1.NodeID}, + expected: false, + }, + { + name: "already present", + validators: []ValidatorID{v1}, + args: args{id: v1.NodeID}, + expected: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") + require.NoError(err) + + for _, v := range test.validators { + require.True(g.AddValidator(v)) + } + + require.Equal(test.expected, g.RemoveValidator(test.args.id)) + }) + } +} + +func TestGossipTracker_ResetValidator(t *testing.T) { + type args struct { + id ids.NodeID + } + + tests := []struct { + name string + validators []ValidatorID + args args + expected bool + }{ + { + name: "non-existent validator", + validators: []ValidatorID{}, + args: args{id: v1.NodeID}, + expected: false, + }, + { + name: "existing validator", + validators: []ValidatorID{v1}, + args: args{id: v1.NodeID}, + expected: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") + require.NoError(err) + + require.True(g.StartTrackingPeer(p1)) + + for _, v := range test.validators { + require.True(g.AddValidator(v)) + g.AddKnown(p1, []ids.ID{v.TxID}, nil) + + unknown, ok := g.GetUnknown(p1) + require.True(ok) + require.NotContains(unknown, v) + } + + require.Equal(test.expected, g.ResetValidator(test.args.id)) + + for _, v := range test.validators { + unknown, ok := g.GetUnknown(p1) + require.True(ok) + require.Contains(unknown, v) + } + }) + } +} + +func TestGossipTracker_AddKnown(t *testing.T) { + type args struct { + peerID ids.NodeID + txIDs []ids.ID + } + + tests := []struct { + name string + trackedPeers []ids.NodeID + validators []ValidatorID + args args + expectedTxIDs []ids.ID + expectedOk bool + }{ + { + // We should not be able to update an untracked peer + name: "untracked peer - empty", + trackedPeers: []ids.NodeID{}, + validators: []ValidatorID{}, + args: args{peerID: p1, txIDs: []ids.ID{}}, + expectedTxIDs: nil, + expectedOk: false, + }, + { + // We should not be able to update an untracked peer + name: "untracked peer - populated", + trackedPeers: []ids.NodeID{p2, p3}, + validators: []ValidatorID{}, + args: args{peerID: p1, txIDs: []ids.ID{}}, + expectedTxIDs: nil, + expectedOk: false, + }, + { + // We shouldn't be able to look up a peer that isn't tracked + name: "untracked peer - unknown validator", + trackedPeers: []ids.NodeID{}, + validators: []ValidatorID{}, + args: args{peerID: p1, txIDs: []ids.ID{v1.TxID}}, + expectedTxIDs: nil, + expectedOk: false, + }, + { + // We shouldn't fail on a validator that's not registered + name: "tracked peer - unknown validator", + trackedPeers: []ids.NodeID{p1}, + validators: []ValidatorID{}, + args: args{peerID: p1, txIDs: []ids.ID{v1.TxID}}, + expectedTxIDs: []ids.ID{}, + expectedOk: true, + }, + { + // We should be able to update a tracked validator + name: "update tracked validator", + trackedPeers: []ids.NodeID{p1, p2, p3}, + validators: []ValidatorID{v1}, + args: args{peerID: p1, txIDs: []ids.ID{v1.TxID}}, + expectedTxIDs: []ids.ID{v1.TxID}, + expectedOk: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") + require.NoError(err) + + for _, p := range test.trackedPeers { + require.True(g.StartTrackingPeer(p)) + require.True(g.Tracked(p)) + } + + for _, v := range test.validators { + require.True(g.AddValidator(v)) + } + + txIDs, ok := g.AddKnown(test.args.peerID, test.args.txIDs, test.args.txIDs) + require.Equal(test.expectedOk, ok) + require.Equal(test.expectedTxIDs, txIDs) + }) + } +} + +func TestGossipTracker_GetUnknown(t *testing.T) { + tests := []struct { + name string + peerID ids.NodeID + peersToTrack []ids.NodeID + validators []ValidatorID + expectedUnknown []ValidatorID + expectedOk bool + }{ + { + name: "non tracked peer", + peerID: p1, + validators: []ValidatorID{v2}, + peersToTrack: []ids.NodeID{}, + expectedUnknown: nil, + expectedOk: false, + }, + { + name: "only validators", + peerID: p1, + peersToTrack: []ids.NodeID{p1}, + validators: []ValidatorID{v2}, + expectedUnknown: []ValidatorID{v2}, + expectedOk: true, + }, + { + name: "only non-validators", + peerID: p1, + peersToTrack: []ids.NodeID{p1, p2}, + validators: []ValidatorID{}, + expectedUnknown: []ValidatorID{}, + expectedOk: true, + }, + { + name: "validators and non-validators", + peerID: p1, + peersToTrack: []ids.NodeID{p1, p3}, + validators: []ValidatorID{v2}, + expectedUnknown: []ValidatorID{v2}, + expectedOk: true, + }, + { + name: "same as limit", + peerID: p1, + peersToTrack: []ids.NodeID{p1}, + validators: []ValidatorID{v2, v3}, + expectedUnknown: []ValidatorID{v2, v3}, + expectedOk: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") + require.NoError(err) + + // add our validators + for _, validator := range test.validators { + require.True(g.AddValidator(validator)) + } + + // start tracking our peers + for _, nonValidator := range test.peersToTrack { + require.True(g.StartTrackingPeer(nonValidator)) + require.True(g.Tracked(nonValidator)) + } + + // get the unknown peers for this peer + result, ok := g.GetUnknown(test.peerID) + require.Equal(test.expectedOk, ok) + require.Len(result, len(test.expectedUnknown)) + for _, v := range test.expectedUnknown { + require.Contains(result, v) + } + }) + } +} + +func TestGossipTracker_E2E(t *testing.T) { + require := require.New(t) + + g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") + require.NoError(err) + + // [v1, v2, v3] are validators + require.True(g.AddValidator(v1)) + require.True(g.AddValidator(v2)) + + // we should get an empty unknown since we're not tracking anything + unknown, ok := g.GetUnknown(p1) + require.False(ok) + require.Nil(unknown) + + // we should get a unknown of [v1, v2] since v1 and v2 are registered + require.True(g.StartTrackingPeer(p1)) + require.True(g.Tracked(p1)) + + // check p1's unknown + unknown, ok = g.GetUnknown(p1) + require.True(ok) + require.Contains(unknown, v1) + require.Contains(unknown, v2) + require.Len(unknown, 2) + + // Check p2's unknown. We should get nothing since we're not tracking it + // yet. + unknown, ok = g.GetUnknown(p2) + require.False(ok) + require.Nil(unknown) + + // Start tracking p2 + require.True(g.StartTrackingPeer(p2)) + + // check p2's unknown + unknown, ok = g.GetUnknown(p2) + require.True(ok) + require.Contains(unknown, v1) + require.Contains(unknown, v2) + require.Len(unknown, 2) + + // p1 now knows about v1, but not v2, so it should see [v2] in its unknown + // p2 still knows nothing, so it should see both + txIDs, ok := g.AddKnown(p1, []ids.ID{v1.TxID}, []ids.ID{v1.TxID}) + require.True(ok) + require.Equal([]ids.ID{v1.TxID}, txIDs) + + // p1 should have an unknown of [v2], since it knows v1 + unknown, ok = g.GetUnknown(p1) + require.True(ok) + require.Contains(unknown, v2) + require.Len(unknown, 1) + + // p2 should have a unknown of [v1, v2], since it knows nothing + unknown, ok = g.GetUnknown(p2) + require.True(ok) + require.Contains(unknown, v1) + require.Contains(unknown, v2) + require.Len(unknown, 2) + + // Add v3 + require.True(g.AddValidator(v3)) + + // track p3, who knows of v1, v2, and v3 + // p1 and p2 still don't know of v3 + require.True(g.StartTrackingPeer(p3)) + + txIDs, ok = g.AddKnown(p3, []ids.ID{v1.TxID, v2.TxID, v3.TxID}, []ids.ID{v1.TxID, v2.TxID, v3.TxID}) + require.True(ok) + require.Equal([]ids.ID{v1.TxID, v2.TxID, v3.TxID}, txIDs) + + // p1 doesn't know about [v2, v3] + unknown, ok = g.GetUnknown(p1) + require.True(ok) + require.Contains(unknown, v2) + require.Contains(unknown, v3) + require.Len(unknown, 2) + + // p2 doesn't know about [v1, v2, v3] + unknown, ok = g.GetUnknown(p2) + require.True(ok) + require.Contains(unknown, v1) + require.Contains(unknown, v2) + require.Contains(unknown, v3) + require.Len(unknown, 3) + + // p3 knows about everyone + unknown, ok = g.GetUnknown(p3) + require.True(ok) + require.Empty(unknown) + + // stop tracking p2 + require.True(g.StopTrackingPeer(p2)) + unknown, ok = g.GetUnknown(p2) + require.False(ok) + require.Nil(unknown) + + // p1 doesn't know about [v2, v3] because v2 is still registered as + // a validator + unknown, ok = g.GetUnknown(p1) + require.True(ok) + require.Contains(unknown, v2) + require.Contains(unknown, v3) + require.Len(unknown, 2) + + // Remove p2 from the validator set + require.True(g.RemoveValidator(v2.NodeID)) + + // p1 doesn't know about [v3] since v2 left the validator set + unknown, ok = g.GetUnknown(p1) + require.True(ok) + require.Contains(unknown, v3) + require.Len(unknown, 1) + + // p3 knows about everyone since it learned about v1 and v3 earlier. + unknown, ok = g.GetUnknown(p3) + require.Empty(unknown) + require.True(ok) +} + +func TestGossipTracker_Regression_IncorrectTxIDDeletion(t *testing.T) { + require := require.New(t) + + g, err := NewGossipTracker(prometheus.NewRegistry(), "foobar") + require.NoError(err) + + require.True(g.AddValidator(v1)) + require.True(g.AddValidator(v2)) + + require.True(g.RemoveValidator(v1.NodeID)) + + require.False(g.AddValidator(ValidatorID{ + NodeID: ids.GenerateTestNodeID(), + TxID: v2.TxID, + })) +} diff --git a/avalanchego/network/peer/info.go b/avalanchego/network/peer/info.go index ec48bbb3..45f7a3cd 100644 --- a/avalanchego/network/peer/info.go +++ b/avalanchego/network/peer/info.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -11,12 +11,13 @@ import ( ) type Info struct { - IP string `json:"ip"` - PublicIP string `json:"publicIP,omitempty"` - ID ids.NodeID `json:"nodeID"` - Version string `json:"version"` - LastSent time.Time `json:"lastSent"` - LastReceived time.Time `json:"lastReceived"` - ObservedUptime json.Uint8 `json:"observedUptime"` - TrackedSubnets []ids.ID `json:"trackedSubnets"` + IP string `json:"ip"` + PublicIP string `json:"publicIP,omitempty"` + ID ids.NodeID `json:"nodeID"` + Version string `json:"version"` + LastSent time.Time `json:"lastSent"` + LastReceived time.Time `json:"lastReceived"` + ObservedUptime json.Uint32 `json:"observedUptime"` + ObservedSubnetUptimes map[ids.ID]json.Uint32 `json:"observedSubnetUptimes"` + TrackedSubnets []ids.ID `json:"trackedSubnets"` } diff --git a/avalanchego/network/peer/ip.go b/avalanchego/network/peer/ip.go index f441eaf9..720a1cd8 100644 --- a/avalanchego/network/peer/ip.go +++ b/avalanchego/network/peer/ip.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -17,7 +17,7 @@ import ( // ensure that the most updated IP claim is tracked by peers for a given // validator. type UnsignedIP struct { - IP ips.IPPort + ips.IPPort Timestamp uint64 } @@ -29,8 +29,8 @@ func (ip *UnsignedIP) Sign(signer crypto.Signer) (*SignedIP, error) { crypto.SHA256, ) return &SignedIP{ - IP: *ip, - Signature: sig, + UnsignedIP: *ip, + Signature: sig, }, err } @@ -38,21 +38,21 @@ func (ip *UnsignedIP) bytes() []byte { p := wrappers.Packer{ Bytes: make([]byte, wrappers.IPLen+wrappers.LongLen), } - p.PackIP(ip.IP) + ips.PackIP(&p, ip.IPPort) p.PackLong(ip.Timestamp) return p.Bytes } // SignedIP is a wrapper of an UnsignedIP with the signature from a signer. type SignedIP struct { - IP UnsignedIP + UnsignedIP Signature []byte } func (ip *SignedIP) Verify(cert *x509.Certificate) error { return cert.CheckSignature( cert.SignatureAlgorithm, - ip.IP.bytes(), + ip.UnsignedIP.bytes(), ip.Signature, ) } diff --git a/avalanchego/network/ip_signer.go b/avalanchego/network/peer/ip_signer.go similarity index 65% rename from avalanchego/network/ip_signer.go rename to avalanchego/network/peer/ip_signer.go index 4b543f37..b524d346 100644 --- a/avalanchego/network/ip_signer.go +++ b/avalanchego/network/peer/ip_signer.go @@ -1,55 +1,52 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package network +package peer import ( "crypto" "sync" - "github.com/ava-labs/avalanchego/network/peer" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/timer/mockable" ) -// ipSigner will return a signedIP for the current value of our dynamic IP. -type ipSigner struct { +// IPSigner will return a signedIP for the current value of our dynamic IP. +type IPSigner struct { ip ips.DynamicIPPort - clock *mockable.Clock + clock mockable.Clock signer crypto.Signer // Must be held while accessing [signedIP] signedIPLock sync.RWMutex // Note that the values in [*signedIP] are constants and can be inspected // without holding [signedIPLock]. - signedIP *peer.SignedIP + signedIP *SignedIP } -func newIPSigner( +func NewIPSigner( ip ips.DynamicIPPort, - clock *mockable.Clock, signer crypto.Signer, -) *ipSigner { - return &ipSigner{ +) *IPSigner { + return &IPSigner{ ip: ip, - clock: clock, signer: signer, } } -// getSignedIP returns the signedIP of the current value of the provided +// GetSignedIP returns the signedIP of the current value of the provided // dynamicIP. If the dynamicIP hasn't changed since the prior call to -// getSignedIP, then the same [SignedIP] will be returned. +// GetSignedIP, then the same [SignedIP] will be returned. // -// It's safe for multiple goroutines to concurrently call getSignedIP. -func (s *ipSigner) getSignedIP() (*peer.SignedIP, error) { +// It's safe for multiple goroutines to concurrently call GetSignedIP. +func (s *IPSigner) GetSignedIP() (*SignedIP, error) { // Optimistically, the IP should already be signed. By grabbing a read lock // here we enable full concurrency of new connections. s.signedIPLock.RLock() signedIP := s.signedIP s.signedIPLock.RUnlock() ip := s.ip.IPPort() - if signedIP != nil && signedIP.IP.IP.Equal(ip) { + if signedIP != nil && signedIP.IPPort.Equal(ip) { return signedIP, nil } @@ -61,13 +58,13 @@ func (s *ipSigner) getSignedIP() (*peer.SignedIP, error) { // same time, we should verify that we are the first thread to attempt to // update it. signedIP = s.signedIP - if signedIP != nil && signedIP.IP.IP.Equal(ip) { + if signedIP != nil && signedIP.IPPort.Equal(ip) { return signedIP, nil } // We should now sign our new IP at the current timestamp. - unsignedIP := peer.UnsignedIP{ - IP: ip, + unsignedIP := UnsignedIP{ + IPPort: ip, Timestamp: s.clock.Unix(), } signedIP, err := unsignedIP.Sign(s.signer) diff --git a/avalanchego/network/ip_signer_test.go b/avalanchego/network/peer/ip_signer_test.go similarity index 50% rename from avalanchego/network/ip_signer_test.go rename to avalanchego/network/peer/ip_signer_test.go index 3872c931..1633c7e6 100644 --- a/avalanchego/network/ip_signer_test.go +++ b/avalanchego/network/peer/ip_signer_test.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package network +package peer import ( "crypto" @@ -13,7 +13,6 @@ import ( "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/utils/ips" - "github.com/ava-labs/avalanchego/utils/timer/mockable" ) func TestIPSigner(t *testing.T) { @@ -23,34 +22,34 @@ func TestIPSigner(t *testing.T) { net.IPv6loopback, 0, ) - clock := mockable.Clock{} - clock.Set(time.Unix(10, 0)) tlsCert, err := staking.NewTLSCert() require.NoError(err) key := tlsCert.PrivateKey.(crypto.Signer) - s := newIPSigner(dynIP, &clock, key) + s := NewIPSigner(dynIP, key) - signedIP1, err := s.getSignedIP() + s.clock.Set(time.Unix(10, 0)) + + signedIP1, err := s.GetSignedIP() require.NoError(err) - require.EqualValues(dynIP.IPPort(), signedIP1.IP.IP) - require.EqualValues(10, signedIP1.IP.Timestamp) + require.EqualValues(dynIP.IPPort(), signedIP1.IPPort) + require.EqualValues(10, signedIP1.Timestamp) - clock.Set(time.Unix(11, 0)) + s.clock.Set(time.Unix(11, 0)) - signedIP2, err := s.getSignedIP() + signedIP2, err := s.GetSignedIP() require.NoError(err) - require.EqualValues(dynIP.IPPort(), signedIP2.IP.IP) - require.EqualValues(10, signedIP2.IP.Timestamp) + require.EqualValues(dynIP.IPPort(), signedIP2.IPPort) + require.EqualValues(10, signedIP2.Timestamp) require.EqualValues(signedIP1.Signature, signedIP2.Signature) dynIP.SetIP(net.IPv4(1, 2, 3, 4)) - signedIP3, err := s.getSignedIP() + signedIP3, err := s.GetSignedIP() require.NoError(err) - require.EqualValues(dynIP.IPPort(), signedIP3.IP.IP) - require.EqualValues(11, signedIP3.IP.Timestamp) + require.EqualValues(dynIP.IPPort(), signedIP3.IPPort) + require.EqualValues(11, signedIP3.Timestamp) require.NotEqualValues(signedIP2.Signature, signedIP3.Signature) } diff --git a/avalanchego/network/peer/message_queue.go b/avalanchego/network/peer/message_queue.go index c8755128..b9d38996 100644 --- a/avalanchego/network/peer/message_queue.go +++ b/avalanchego/network/peer/message_queue.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -19,8 +19,8 @@ import ( const initialQueueSize = 64 var ( - _ MessageQueue = &throttledMessageQueue{} - _ MessageQueue = &blockingMessageQueue{} + _ MessageQueue = (*throttledMessageQueue)(nil) + _ MessageQueue = (*blockingMessageQueue)(nil) ) type SendFailedCallback interface { @@ -29,7 +29,9 @@ type SendFailedCallback interface { type SendFailedFunc func(message.OutboundMessage) -func (f SendFailedFunc) SendFailed(msg message.OutboundMessage) { f(msg) } +func (f SendFailedFunc) SendFailed(msg message.OutboundMessage) { + f(msg) +} type MessageQueue interface { // Push attempts to add the message to the queue. If the context is @@ -67,7 +69,7 @@ type throttledMessageQueue struct { // queue of the messages // [cond.L] must be held while accessing [queue]. - queue buffer.UnboundedQueue[message.OutboundMessage] + queue buffer.Deque[message.OutboundMessage] } func NewThrottledMessageQueue( @@ -82,7 +84,7 @@ func NewThrottledMessageQueue( log: log, outboundMsgThrottler: outboundMsgThrottler, cond: sync.NewCond(&sync.Mutex{}), - queue: buffer.NewUnboundedSliceQueue[message.OutboundMessage](initialQueueSize), + queue: buffer.NewUnboundedDeque[message.OutboundMessage](initialQueueSize), } } @@ -129,7 +131,7 @@ func (q *throttledMessageQueue) Push(ctx context.Context, msg message.OutboundMe return false } - q.queue.Enqueue(msg) + q.queue.PushRight(msg) q.cond.Signal() return true } @@ -166,7 +168,7 @@ func (q *throttledMessageQueue) PopNow() (message.OutboundMessage, bool) { } func (q *throttledMessageQueue) pop() message.OutboundMessage { - msg, _ := q.queue.Dequeue() + msg, _ := q.queue.PopLeft() q.outboundMsgThrottler.Release(msg, q.id) return msg @@ -183,7 +185,7 @@ func (q *throttledMessageQueue) Close() { q.closed = true for q.queue.Len() > 0 { - msg, _ := q.queue.Dequeue() + msg, _ := q.queue.PopLeft() q.outboundMsgThrottler.Release(msg, q.id) q.onFailed.SendFailed(msg) } diff --git a/avalanchego/network/peer/message_queue_test.go b/avalanchego/network/peer/message_queue_test.go index 1db29108..7c7f4d3a 100644 --- a/avalanchego/network/peer/message_queue_test.go +++ b/avalanchego/network/peer/message_queue_test.go @@ -1,64 +1,88 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer import ( "context" - "fmt" "testing" "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/utils/logging" ) -func TestBlockingMessageQueue(t *testing.T) { +func TestMessageQueue(t *testing.T) { require := require.New(t) - for _, useProto := range []bool{false, true} { - t.Run(fmt.Sprintf("use proto buf message creator %v", useProto), func(tt *testing.T) { - q := NewBlockingMessageQueue( - SendFailedFunc(func(msg message.OutboundMessage) { - t.Fail() - }), - logging.NoLog{}, - 0, - ) + expectFail := false + q := NewBlockingMessageQueue( + SendFailedFunc(func(msg message.OutboundMessage) { + require.True(expectFail) + }), + logging.NoLog{}, + 0, + ) - mc, mcProto := newMessageCreator(tt) + mc := newMessageCreator(t) + msgs := []message.OutboundMessage{} + numToSend := 10 - var ( - msg message.OutboundMessage - err error - ) - if useProto { - msg, err = mcProto.Ping() - } else { - msg, err = mc.Ping() - } - require.NoError(err) + // Assert that the messages are popped in the same order they were pushed + for i := 0; i < numToSend; i++ { + testID := ids.GenerateTestID() + testID2 := ids.GenerateTestID() + m, err := mc.Pong(uint32(i), + []*p2p.SubnetUptime{ + {SubnetId: testID[:], Uptime: uint32(i)}, + {SubnetId: testID2[:], Uptime: uint32(i)}, + }) + require.NoError(err) + msgs = append(msgs, m) + } + + go func() { + for i := 0; i < numToSend; i++ { + q.Push(context.Background(), msgs[i]) + } + }() - numToSend := 10 - go func() { - for i := 0; i < numToSend; i++ { - q.Push(context.Background(), msg) - } - }() + for i := 0; i < numToSend; i++ { + msg, ok := q.Pop() + require.True(ok) + require.Equal(msgs[i], msg) + } - for i := 0; i < numToSend; i++ { - _, ok := q.Pop() - require.True(ok) - } + // Assert that PopNow returns false when the queue is empty + _, ok := q.PopNow() + require.False(ok) - _, ok := q.PopNow() - require.False(ok) + // Assert that Push returns false when the context is canceled + ctx, cancel := context.WithCancel(context.Background()) + cancel() + expectFail = true + done := make(chan struct{}) + go func() { + ok := q.Push(ctx, msgs[0]) + require.False(ok) + close(done) + }() + <-done - q.Close() + // Assert that Push returns false when the queue is closed + done = make(chan struct{}) + go func() { + ok := q.Push(context.Background(), msgs[0]) + require.False(ok) + close(done) + }() + q.Close() + <-done - _, ok = q.Pop() - require.False(ok) - }) - } + // Assert Pop returns false when the queue is closed + _, ok = q.Pop() + require.False(ok) } diff --git a/avalanchego/network/peer/metrics.go b/avalanchego/network/peer/metrics.go index 6eef6f23..ea5290ae 100644 --- a/avalanchego/network/peer/metrics.go +++ b/avalanchego/network/peer/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -62,33 +62,27 @@ func NewMessageMetrics( metrics.Register(msg.SentBytes), ) - if op.Compressible() { - msg.SavedReceivedBytes = metric.NewAveragerWithErrs( - namespace, - fmt.Sprintf("%s_compression_saved_received_bytes", op), - fmt.Sprintf("bytes saved (not received) due to compression of %s messages", op), - metrics, - errs, - ) - msg.SavedSentBytes = metric.NewAveragerWithErrs( - namespace, - fmt.Sprintf("%s_compression_saved_sent_bytes", op), - fmt.Sprintf("bytes saved (not sent) due to compression of %s messages", op), - metrics, - errs, - ) - } else { - msg.SavedReceivedBytes = metric.NewNoAverager() - msg.SavedSentBytes = metric.NewNoAverager() - } + msg.SavedReceivedBytes = metric.NewAveragerWithErrs( + namespace, + fmt.Sprintf("%s_compression_saved_received_bytes", op), + fmt.Sprintf("bytes saved (not received) due to compression of %s messages", op), + metrics, + errs, + ) + msg.SavedSentBytes = metric.NewAveragerWithErrs( + namespace, + fmt.Sprintf("%s_compression_saved_sent_bytes", op), + fmt.Sprintf("bytes saved (not sent) due to compression of %s messages", op), + metrics, + errs, + ) return msg } type Metrics struct { - Log logging.Logger - FailedToParse prometheus.Counter - NumUselessPeerListBytes prometheus.Counter - MessageMetrics map[message.Op]*MessageMetrics + Log logging.Logger + FailedToParse prometheus.Counter + MessageMetrics map[message.Op]*MessageMetrics } func NewMetrics( @@ -103,18 +97,12 @@ func NewMetrics( Name: "msgs_failed_to_parse", Help: "Number of messages that could not be parsed or were invalidly formed", }), - NumUselessPeerListBytes: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "num_useless_peerlist_bytes", - Help: "Amount of useless bytes (i.e. information about nodes we already knew/don't want to connect to) received in PeerList messages", - }), MessageMetrics: make(map[message.Op]*MessageMetrics, len(message.ExternalOps)), } errs := wrappers.Errs{} errs.Add( registerer.Register(m.FailedToParse), - registerer.Register(m.NumUselessPeerListBytes), ) for _, op := range message.ExternalOps { m.MessageMetrics[op] = NewMessageMetrics(op, namespace, registerer, &errs) @@ -122,8 +110,7 @@ func NewMetrics( return m, errs.Err } -// Sent updates the metrics for having sent [msg] and removes a reference from -// the [msg]. +// Sent updates the metrics for having sent [msg]. func (m *Metrics) Sent(msg message.OutboundMessage) { op := msg.Op() msgMetrics := m.MessageMetrics[op] @@ -132,7 +119,6 @@ func (m *Metrics) Sent(msg message.OutboundMessage) { "unknown message being sent", zap.Stringer("messageOp", op), ) - msg.DecRef() return } msgMetrics.NumSent.Inc() @@ -141,7 +127,6 @@ func (m *Metrics) Sent(msg message.OutboundMessage) { if saved := msg.BytesSavedCompression(); saved != 0 { msgMetrics.SavedSentBytes.Observe(float64(saved)) } - msg.DecRef() } func (m *Metrics) MultipleSendsFailed(op message.Op, count int) { @@ -157,8 +142,7 @@ func (m *Metrics) MultipleSendsFailed(op message.Op, count int) { msgMetrics.NumFailed.Add(float64(count)) } -// SendFailed updates the metrics for having failed to send [msg] and removes a -// reference from the [msg]. +// SendFailed updates the metrics for having failed to send [msg]. func (m *Metrics) SendFailed(msg message.OutboundMessage) { op := msg.Op() msgMetrics := m.MessageMetrics[op] @@ -167,11 +151,9 @@ func (m *Metrics) SendFailed(msg message.OutboundMessage) { "unknown message failed to be sent", zap.Stringer("messageOp", op), ) - msg.DecRef() return } msgMetrics.NumFailed.Inc() - msg.DecRef() } func (m *Metrics) Received(msg message.InboundMessage, msgLen uint32) { diff --git a/avalanchego/network/peer/mock_gossip_tracker.go b/avalanchego/network/peer/mock_gossip_tracker.go new file mode 100644 index 00000000..9ab60bcc --- /dev/null +++ b/avalanchego/network/peer/mock_gossip_tracker.go @@ -0,0 +1,167 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/network/peer (interfaces: GossipTracker) + +// Package peer is a generated GoMock package. +package peer + +import ( + reflect "reflect" + + ids "github.com/ava-labs/avalanchego/ids" + gomock "github.com/golang/mock/gomock" +) + +// MockGossipTracker is a mock of GossipTracker interface. +type MockGossipTracker struct { + ctrl *gomock.Controller + recorder *MockGossipTrackerMockRecorder +} + +// MockGossipTrackerMockRecorder is the mock recorder for MockGossipTracker. +type MockGossipTrackerMockRecorder struct { + mock *MockGossipTracker +} + +// NewMockGossipTracker creates a new mock instance. +func NewMockGossipTracker(ctrl *gomock.Controller) *MockGossipTracker { + mock := &MockGossipTracker{ctrl: ctrl} + mock.recorder = &MockGossipTrackerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockGossipTracker) EXPECT() *MockGossipTrackerMockRecorder { + return m.recorder +} + +// AddKnown mocks base method. +func (m *MockGossipTracker) AddKnown(arg0 ids.NodeID, arg1, arg2 []ids.ID) ([]ids.ID, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddKnown", arg0, arg1, arg2) + ret0, _ := ret[0].([]ids.ID) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// AddKnown indicates an expected call of AddKnown. +func (mr *MockGossipTrackerMockRecorder) AddKnown(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddKnown", reflect.TypeOf((*MockGossipTracker)(nil).AddKnown), arg0, arg1, arg2) +} + +// AddValidator mocks base method. +func (m *MockGossipTracker) AddValidator(arg0 ValidatorID) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddValidator", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// AddValidator indicates an expected call of AddValidator. +func (mr *MockGossipTrackerMockRecorder) AddValidator(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddValidator", reflect.TypeOf((*MockGossipTracker)(nil).AddValidator), arg0) +} + +// GetNodeID mocks base method. +func (m *MockGossipTracker) GetNodeID(arg0 ids.ID) (ids.NodeID, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNodeID", arg0) + ret0, _ := ret[0].(ids.NodeID) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetNodeID indicates an expected call of GetNodeID. +func (mr *MockGossipTrackerMockRecorder) GetNodeID(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeID", reflect.TypeOf((*MockGossipTracker)(nil).GetNodeID), arg0) +} + +// GetUnknown mocks base method. +func (m *MockGossipTracker) GetUnknown(arg0 ids.NodeID) ([]ValidatorID, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUnknown", arg0) + ret0, _ := ret[0].([]ValidatorID) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetUnknown indicates an expected call of GetUnknown. +func (mr *MockGossipTrackerMockRecorder) GetUnknown(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUnknown", reflect.TypeOf((*MockGossipTracker)(nil).GetUnknown), arg0) +} + +// RemoveValidator mocks base method. +func (m *MockGossipTracker) RemoveValidator(arg0 ids.NodeID) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveValidator", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// RemoveValidator indicates an expected call of RemoveValidator. +func (mr *MockGossipTrackerMockRecorder) RemoveValidator(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveValidator", reflect.TypeOf((*MockGossipTracker)(nil).RemoveValidator), arg0) +} + +// ResetValidator mocks base method. +func (m *MockGossipTracker) ResetValidator(arg0 ids.NodeID) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ResetValidator", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// ResetValidator indicates an expected call of ResetValidator. +func (mr *MockGossipTrackerMockRecorder) ResetValidator(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetValidator", reflect.TypeOf((*MockGossipTracker)(nil).ResetValidator), arg0) +} + +// StartTrackingPeer mocks base method. +func (m *MockGossipTracker) StartTrackingPeer(arg0 ids.NodeID) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StartTrackingPeer", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// StartTrackingPeer indicates an expected call of StartTrackingPeer. +func (mr *MockGossipTrackerMockRecorder) StartTrackingPeer(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartTrackingPeer", reflect.TypeOf((*MockGossipTracker)(nil).StartTrackingPeer), arg0) +} + +// StopTrackingPeer mocks base method. +func (m *MockGossipTracker) StopTrackingPeer(arg0 ids.NodeID) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StopTrackingPeer", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// StopTrackingPeer indicates an expected call of StopTrackingPeer. +func (mr *MockGossipTrackerMockRecorder) StopTrackingPeer(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopTrackingPeer", reflect.TypeOf((*MockGossipTracker)(nil).StopTrackingPeer), arg0) +} + +// Tracked mocks base method. +func (m *MockGossipTracker) Tracked(arg0 ids.NodeID) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Tracked", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// Tracked indicates an expected call of Tracked. +func (mr *MockGossipTrackerMockRecorder) Tracked(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Tracked", reflect.TypeOf((*MockGossipTracker)(nil).Tracked), arg0) +} diff --git a/avalanchego/network/peer/msg_length.go b/avalanchego/network/peer/msg_length.go index 3d65b382..2cbcf5cd 100644 --- a/avalanchego/network/peer/msg_length.go +++ b/avalanchego/network/peer/msg_length.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -12,27 +12,35 @@ import ( ) var ( - errInvalidMaxMessageLength = errors.New("invalid maximum message length") - errInvalidMessageLengthBytes = errors.New("invalid message length bytes") - errMaxMessageLengthExceeded = errors.New("maximum message length exceeded") + errInvalidMaxMessageLength = errors.New("invalid maximum message length") + errInvalidMessageLength = errors.New("invalid message length") + errMaxMessageLengthExceeded = errors.New("maximum message length exceeded") ) +// Used to mask the most significant bit to indicate that the message format +// uses protocol buffers. const bitmaskCodec = uint32(1 << 31) // Assumes the specified [msgLen] will never >= 1<<31. -func writeMsgLen(msgLen uint32, isProto bool, maxMsgLen uint32) ([wrappers.IntLen]byte, error) { +func writeMsgLen(msgLen uint32, maxMsgLen uint32) ([wrappers.IntLen]byte, error) { if maxMsgLen >= bitmaskCodec { - return [wrappers.IntLen]byte{}, fmt.Errorf("%w; maximum message length must be <%d to be able to embed codec information at most significant bit", errInvalidMaxMessageLength, bitmaskCodec) + return [wrappers.IntLen]byte{}, fmt.Errorf( + "%w; maximum message length must be <%d to be able to embed codec information at most significant bit", + errInvalidMaxMessageLength, + bitmaskCodec, + ) } if msgLen > maxMsgLen { return [wrappers.IntLen]byte{}, fmt.Errorf("%w; the message length %d exceeds the specified limit %d", errMaxMessageLengthExceeded, msgLen, maxMsgLen) } x := msgLen - if isProto { - // mask most significant bit to denote it's using proto - x |= bitmaskCodec - } + + // Mask the most significant bit to denote it's using proto. This bit isn't + // read anymore, because all the messages use proto. However, it is set for + // backwards compatibility. + // TODO: Once the v1.10 is activated, this mask should be removed. + x |= bitmaskCodec b := [wrappers.IntLen]byte{} binary.BigEndian.PutUint32(b[:], x) @@ -41,23 +49,38 @@ func writeMsgLen(msgLen uint32, isProto bool, maxMsgLen uint32) ([wrappers.IntLe } // Assumes the read [msgLen] will never >= 1<<31. -func readMsgLen(b []byte, maxMsgLen uint32) (uint32, bool, error) { +func readMsgLen(b []byte, maxMsgLen uint32) (uint32, error) { + if maxMsgLen >= bitmaskCodec { + return 0, fmt.Errorf( + "%w; maximum message length must be <%d to be able to embed codec information at most significant bit", + errInvalidMaxMessageLength, + bitmaskCodec, + ) + } if len(b) != wrappers.IntLen { - return 0, false, fmt.Errorf("%w; readMsgLen only supports 4-byte (got %d bytes)", errInvalidMessageLengthBytes, len(b)) + return 0, fmt.Errorf( + "%w; readMsgLen only supports 4 bytes (got %d bytes)", + errInvalidMessageLength, + len(b), + ) } // parse the message length msgLen := binary.BigEndian.Uint32(b) - // handle proto by reading most significant bit - isProto := msgLen&bitmaskCodec != 0 - - // equivalent to "^= iff isProto=true" + // Because we always use proto messages, there's no need to check the most + // significant bit to inspect the message format. So, we just zero the proto + // flag. msgLen &^= bitmaskCodec if msgLen > maxMsgLen { - return 0, false, fmt.Errorf("%w; the message length %d exceeds the specified limit %d", errMaxMessageLengthExceeded, msgLen, maxMsgLen) + return 0, fmt.Errorf( + "%w; the message length %d exceeds the specified limit %d", + errMaxMessageLengthExceeded, + msgLen, + maxMsgLen, + ) } - return msgLen, isProto, nil + return msgLen, nil } diff --git a/avalanchego/network/peer/msg_length_test.go b/avalanchego/network/peer/msg_length_test.go index ad5c6467..9d7a3399 100644 --- a/avalanchego/network/peer/msg_length_test.go +++ b/avalanchego/network/peer/msg_length_test.go @@ -1,90 +1,156 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer import ( - "errors" "math" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/utils/constants" ) -func TestMsgLen(t *testing.T) { +func TestWriteMsgLen(t *testing.T) { + require := require.New(t) + tt := []struct { - msgLen uint32 - msgLimit uint32 - isProto bool - expectedWriteErr error - expectedReadErr error + msgLen uint32 + msgLimit uint32 + expectedErr error }{ { - msgLen: math.MaxUint32, - msgLimit: math.MaxUint32, - isProto: false, - expectedWriteErr: errInvalidMaxMessageLength, - expectedReadErr: nil, + msgLen: math.MaxUint32, + msgLimit: math.MaxUint32, + expectedErr: errInvalidMaxMessageLength, }, { - msgLen: 1 << 31, - msgLimit: 1 << 31, - isProto: false, - expectedWriteErr: errInvalidMaxMessageLength, - expectedReadErr: nil, + msgLen: bitmaskCodec, + msgLimit: bitmaskCodec, + expectedErr: errInvalidMaxMessageLength, }, { - msgLen: constants.DefaultMaxMessageSize, - msgLimit: constants.DefaultMaxMessageSize, - isProto: false, - expectedWriteErr: nil, - expectedReadErr: nil, + msgLen: bitmaskCodec - 1, + msgLimit: bitmaskCodec - 1, + expectedErr: nil, }, { - msgLen: constants.DefaultMaxMessageSize, - msgLimit: constants.DefaultMaxMessageSize, - isProto: true, - expectedWriteErr: nil, - expectedReadErr: nil, + msgLen: constants.DefaultMaxMessageSize, + msgLimit: constants.DefaultMaxMessageSize, + expectedErr: nil, }, { - msgLen: 1, - msgLimit: constants.DefaultMaxMessageSize, - isProto: false, - expectedWriteErr: nil, - expectedReadErr: nil, + msgLen: 1, + msgLimit: constants.DefaultMaxMessageSize, + expectedErr: nil, }, { - msgLen: 1, - msgLimit: constants.DefaultMaxMessageSize, - isProto: true, - expectedWriteErr: nil, - expectedReadErr: nil, + msgLen: constants.DefaultMaxMessageSize, + msgLimit: 1, + expectedErr: errMaxMessageLengthExceeded, }, } - for i, tv := range tt { - msgLenBytes, werr := writeMsgLen(tv.msgLen, tv.isProto, tv.msgLimit) - if !errors.Is(werr, tv.expectedWriteErr) { - t.Fatalf("#%d: unexpected writeMsgLen error %v, expected %v", i, werr, tv.expectedWriteErr) - } - if tv.expectedWriteErr != nil { + for _, tv := range tt { + msgLenBytes, err := writeMsgLen(tv.msgLen, tv.msgLimit) + require.ErrorIs(err, tv.expectedErr) + if tv.expectedErr != nil { continue } - msgLen, isProto, rerr := readMsgLen(msgLenBytes[:], tv.msgLimit) - if !errors.Is(rerr, tv.expectedReadErr) { - t.Fatalf("#%d: unexpected readMsgLen error %v, expected %v", i, rerr, tv.expectedReadErr) - } - if tv.expectedReadErr != nil { + msgLen, err := readMsgLen(msgLenBytes[:], tv.msgLimit) + require.NoError(err) + require.Equal(tv.msgLen, msgLen) + } +} + +func TestReadMsgLen(t *testing.T) { + require := require.New(t) + + tt := []struct { + msgLenBytes []byte + msgLimit uint32 + expectedErr error + expectedMsgLen uint32 + }{ + { + msgLenBytes: []byte{0xFF, 0xFF, 0xFF, 0xFF}, + msgLimit: math.MaxUint32, + expectedErr: errInvalidMaxMessageLength, + expectedMsgLen: 0, + }, + { + msgLenBytes: []byte{0b11111111, 0xFF}, + msgLimit: math.MaxInt32, + expectedErr: errInvalidMessageLength, + expectedMsgLen: 0, + }, + { + msgLenBytes: []byte{0b11111111, 0xFF, 0xFF, 0xFF}, + msgLimit: constants.DefaultMaxMessageSize, + expectedErr: errMaxMessageLengthExceeded, + expectedMsgLen: 0, + }, + { + msgLenBytes: []byte{0b11111111, 0xFF, 0xFF, 0xFF}, + msgLimit: math.MaxInt32, + expectedErr: nil, + expectedMsgLen: math.MaxInt32, + }, + { + msgLenBytes: []byte{0b10000000, 0x00, 0x00, 0x01}, + msgLimit: math.MaxInt32, + expectedErr: nil, + expectedMsgLen: 1, + }, + { + msgLenBytes: []byte{0b10000000, 0x00, 0x00, 0x01}, + msgLimit: 1, + expectedErr: nil, + expectedMsgLen: 1, + }, + } + for _, tv := range tt { + msgLen, err := readMsgLen(tv.msgLenBytes, tv.msgLimit) + require.ErrorIs(err, tv.expectedErr) + if tv.expectedErr != nil { continue } - t.Logf("#%d: msgLenBytes for %d (isProto %v): %08b\n", i, tv.msgLen, tv.isProto, msgLenBytes) + require.Equal(tv.expectedMsgLen, msgLen) - if msgLen != tv.msgLen { - t.Fatalf("#%d: unexpected msg length %v, expected %v", i, msgLen, tv.msgLen) - } - if isProto != tv.isProto { - t.Fatalf("#%d: unexpected isProto %v, expected %v", i, isProto, tv.isProto) - } + msgLenBytes, err := writeMsgLen(msgLen, tv.msgLimit) + require.NoError(err) + require.Equal(tv.msgLenBytes, msgLenBytes[:]) + } +} + +func TestBackwardsCompatibleReadMsgLen(t *testing.T) { + require := require.New(t) + + tt := []struct { + msgLenBytes []byte + msgLimit uint32 + expectedMsgLen uint32 + }{ + { + msgLenBytes: []byte{0b01111111, 0xFF, 0xFF, 0xFF}, + msgLimit: math.MaxInt32, + expectedMsgLen: math.MaxInt32, + }, + { + msgLenBytes: []byte{0b00000000, 0x00, 0x00, 0x01}, + msgLimit: math.MaxInt32, + expectedMsgLen: 1, + }, + { + msgLenBytes: []byte{0b00000000, 0x00, 0x00, 0x01}, + msgLimit: 1, + expectedMsgLen: 1, + }, + } + for _, tv := range tt { + msgLen, err := readMsgLen(tv.msgLenBytes, tv.msgLimit) + require.NoError(err) + require.Equal(tv.expectedMsgLen, msgLen) } } diff --git a/avalanchego/network/peer/network.go b/avalanchego/network/peer/network.go index b38a4827..fc136f0b 100644 --- a/avalanchego/network/peer/network.go +++ b/avalanchego/network/peer/network.go @@ -1,11 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/utils/ips" ) @@ -13,35 +13,28 @@ import ( // connected p2p network. type Network interface { // Connected is called by the peer once the handshake is finished. - Connected(ids.NodeID) + Connected(peerID ids.NodeID) // AllowConnection enables the network is signal to the peer that its // connection is no longer desired and should be terminated. - AllowConnection(ids.NodeID) bool + AllowConnection(peerID ids.NodeID) bool // Track allows the peer to notify the network of a potential new peer to - // connect to. + // connect to, given the [ips] of the peers it sent us during the peer + // handshake. // - // Returns false if this call was not "useful". That is, we were already - // connected to this node, we already had this tracking information, the - // signature is invalid or we don't want to connect. - Track(ips.ClaimedIPPort) bool + // Returns which IPs should not be gossipped to this node again. + Track(peerID ids.NodeID, ips []*ips.ClaimedIPPort) ([]*p2p.PeerAck, error) + + // MarkTracked stops sending gossip about [ips] to [peerID]. + MarkTracked(peerID ids.NodeID, ips []*p2p.PeerAck) error // Disconnected is called when the peer finishes shutting down. It is not // guaranteed that [Connected] was called for the provided peer. However, it // is guaranteed that [Connected] will not be called after [Disconnected] // for a given [Peer] object. - Disconnected(ids.NodeID) - - // Version provides the peer with the Version message to send to the peer - // during the handshake. - Version() (message.OutboundMessage, error) - - // Peers provides the peer with the PeerList message to send to the peer - // during the handshake. - Peers() (message.OutboundMessage, error) + Disconnected(peerID ids.NodeID) - // Pong provides the peer with a Pong message to send to the peer in - // response to a Ping message. - Pong(ids.NodeID) (message.OutboundMessage, error) + // Peers returns peers that [peerID] might not know about. + Peers(peerID ids.NodeID) ([]ips.ClaimedIPPort, error) } diff --git a/avalanchego/network/peer/peer.go b/avalanchego/network/peer/peer.go index 9a567468..dfe67084 100644 --- a/avalanchego/network/peer/peer.go +++ b/avalanchego/network/peer/peer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -19,10 +19,12 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/json" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" ) @@ -30,7 +32,7 @@ import ( var ( errClosed = errors.New("closed") - _ Peer = &peer{} + _ Peer = (*peer)(nil) ) // Peer encapsulates all of the functionality required to send and receive @@ -72,18 +74,23 @@ type Peer interface { // TrackedSubnets returns the subnets this peer is running. It should only // be called after [Ready] returns true. - TrackedSubnets() ids.Set + TrackedSubnets() set.Set[ids.ID] - // ObservedUptime returns the local node's uptime according to the peer. The - // value ranges from [0, 100]. It should only be called after [Ready] - // returns true. - ObservedUptime() uint8 + // ObservedUptime returns the local node's subnet uptime according to the + // peer. The value ranges from [0, 100]. It should only be called after + // [Ready] returns true. + ObservedUptime(subnetID ids.ID) (uint32, bool) // Send attempts to send [msg] to the peer. The peer takes ownership of // [msg] for reference counting. This returns false if the message is // guaranteed not to be delivered to the peer. Send(ctx context.Context, msg message.OutboundMessage) bool + // StartSendPeerList attempts to send a PeerList message to this peer on + // this peer's gossip routine. It is not guaranteed that a PeerList will be + // sent. + StartSendPeerList() + // StartClose will begin shutting down the peer. It will not block. StartClose() @@ -120,23 +127,24 @@ type peer struct { version *version.Application // trackedSubnets is the subset of subnetIDs the peer sent us in the Version // message that we are also tracking. - trackedSubnets ids.Set + trackedSubnets set.Set[ids.ID] - observedUptimeLock sync.RWMutex - // [observedUptimeLock] must be held while accessing [observedUptime] - observedUptime uint8 + observedUptimesLock sync.RWMutex + // [observedUptimesLock] must be held while accessing [observedUptime] + // Subnet ID --> Our uptime for the given subnet as perceived by the peer + observedUptimes map[ids.ID]uint32 // True if this peer has sent us a valid Version message and // is running a compatible version. // Only modified on the connection's reader routine. - gotVersion utils.AtomicBool + gotVersion utils.Atomic[bool] // True if the peer: // * Has sent us a Version message // * Has sent us a PeerList message // * Is running a compatible version // Only modified on the connection's reader routine. - finishedHandshake utils.AtomicBool + finishedHandshake utils.Atomic[bool] // onFinishHandshake is closed when the peer finishes the p2p handshake. onFinishHandshake chan struct{} @@ -155,6 +163,10 @@ type peer struct { // Unix time of the last message sent and received respectively // Must only be accessed atomically lastSent, lastReceived int64 + + // peerListChan signals that we should attempt to send a PeerList to this + // peer + peerListChan chan struct{} } // Start a new peer instance. @@ -180,18 +192,24 @@ func Start( onClosingCtx: onClosingCtx, onClosingCtxCancel: onClosingCtxCancel, onClosed: make(chan struct{}), + observedUptimes: make(map[ids.ID]uint32), + peerListChan: make(chan struct{}, 1), } go p.readMessages() go p.writeMessages() - go p.sendPings() + go p.sendNetworkMessages() return p } -func (p *peer) ID() ids.NodeID { return p.id } +func (p *peer) ID() ids.NodeID { + return p.id +} -func (p *peer) Cert() *x509.Certificate { return p.cert } +func (p *peer) Cert() *x509.Certificate { + return p.cert +} func (p *peer) LastSent() time.Time { return time.Unix( @@ -207,7 +225,9 @@ func (p *peer) LastReceived() time.Time { ) } -func (p *peer) Ready() bool { return p.finishedHandshake.GetValue() } +func (p *peer) Ready() bool { + return p.finishedHandshake.Get() +} func (p *peer) AwaitReady(ctx context.Context) error { select { @@ -222,38 +242,70 @@ func (p *peer) AwaitReady(ctx context.Context) error { func (p *peer) Info() Info { publicIPStr := "" - if !p.ip.IP.IP.IsZero() { - publicIPStr = p.ip.IP.IP.String() + if !p.ip.IsZero() { + publicIPStr = p.ip.IPPort.String() + } + + trackedSubnets := p.trackedSubnets.List() + uptimes := make(map[ids.ID]json.Uint32, len(trackedSubnets)) + + for _, subnetID := range trackedSubnets { + uptime, exist := p.ObservedUptime(subnetID) + if !exist { + continue + } + uptimes[subnetID] = json.Uint32(uptime) } + + primaryUptime, exist := p.ObservedUptime(constants.PrimaryNetworkID) + if !exist { + primaryUptime = 0 + } + return Info{ - IP: p.conn.RemoteAddr().String(), - PublicIP: publicIPStr, - ID: p.id, - Version: p.version.String(), - LastSent: time.Unix(atomic.LoadInt64(&p.lastSent), 0), - LastReceived: time.Unix(atomic.LoadInt64(&p.lastReceived), 0), - ObservedUptime: json.Uint8(p.ObservedUptime()), - TrackedSubnets: p.trackedSubnets.List(), + IP: p.conn.RemoteAddr().String(), + PublicIP: publicIPStr, + ID: p.id, + Version: p.version.String(), + LastSent: p.LastSent(), + LastReceived: p.LastReceived(), + ObservedUptime: json.Uint32(primaryUptime), + ObservedSubnetUptimes: uptimes, + TrackedSubnets: trackedSubnets, } } -func (p *peer) IP() *SignedIP { return p.ip } +func (p *peer) IP() *SignedIP { + return p.ip +} + +func (p *peer) Version() *version.Application { + return p.version +} -func (p *peer) Version() *version.Application { return p.version } +func (p *peer) TrackedSubnets() set.Set[ids.ID] { + return p.trackedSubnets +} -func (p *peer) TrackedSubnets() ids.Set { return p.trackedSubnets } +func (p *peer) ObservedUptime(subnetID ids.ID) (uint32, bool) { + p.observedUptimesLock.RLock() + defer p.observedUptimesLock.RUnlock() -func (p *peer) ObservedUptime() uint8 { - p.observedUptimeLock.RLock() - uptime := p.observedUptime - p.observedUptimeLock.RUnlock() - return uptime + uptime, exist := p.observedUptimes[subnetID] + return uptime, exist } func (p *peer) Send(ctx context.Context, msg message.OutboundMessage) bool { return p.messageQueue.Push(ctx, msg) } +func (p *peer) StartSendPeerList() { + select { + case p.peerListChan <- struct{}{}: + default: + } +} + func (p *peer) StartClose() { p.startClosingOnce.Do(func() { if err := p.conn.Close(); err != nil { @@ -331,7 +383,7 @@ func (p *peer) readMessages() { } // Parse the message length - msgLen, isProto, err := readMsgLen(msgLenBytes, constants.DefaultMaxMessageSize) + msgLen, err := readMsgLen(msgLenBytes, constants.DefaultMaxMessageSize) if err != nil { p.Log.Verbo("error reading message length", zap.Stringer("nodeID", p.id), @@ -360,7 +412,7 @@ func (p *peer) readMessages() { ) // If the peer is shutting down, there's no need to read the message. - if p.onClosingCtx.Err() != nil { + if err := p.onClosingCtx.Err(); err != nil { onFinishedHandling() return } @@ -400,12 +452,7 @@ func (p *peer) readMessages() { ) // Parse the message - var msg message.InboundMessage - if isProto { - msg, err = p.MessageCreatorWithProto.Parse(msgBytes, p.id, onFinishedHandling) - } else { - msg, err = p.MessageCreator.Parse(msgBytes, p.id, onFinishedHandling) - } + msg, err := p.MessageCreator.Parse(msgBytes, p.id, onFinishedHandling) if err != nil { p.Log.Verbo("failed to parse message", zap.Stringer("nodeID", p.id), @@ -421,9 +468,8 @@ func (p *peer) readMessages() { continue } - now := p.Clock.Time().Unix() - atomic.StoreInt64(&p.Config.LastReceived, now) - atomic.StoreInt64(&p.lastReceived, now) + now := p.Clock.Time() + p.storeLastReceived(now) p.Metrics.Received(msg, msgLen) // Handle the message. Note that when we are done handling this message, @@ -442,10 +488,26 @@ func (p *peer) writeMessages() { writer := bufio.NewWriterSize(p.conn, p.Config.WriteBufferSize) // Make sure that the version is the first message sent - msg, err := p.Network.Version() + mySignedIP, err := p.IPSigner.GetSignedIP() + if err != nil { + p.Log.Error("failed to get signed IP", + zap.Error(err), + ) + return + } + + msg, err := p.MessageCreator.Version( + p.NetworkID, + p.Clock.Unix(), + mySignedIP.IPPort, + p.VersionCompatibility.Version().String(), + mySignedIP.Timestamp, + mySignedIP.Signature, + p.MySubnets.List(), + ) if err != nil { p.Log.Error("failed to create message", - zap.Stringer("messageOp", message.Version), + zap.Stringer("messageOp", message.VersionOp), zap.Error(err), ) return @@ -492,13 +554,11 @@ func (p *peer) writeMessage(writer io.Writer, msg message.OutboundMessage) { zap.Stringer("nodeID", p.id), zap.Error(err), ) - msg.DecRef() return } msgLen := uint32(len(msgBytes)) - isProto := msg.IsProto() - msgLenBytes, err := writeMsgLen(msgLen, isProto, constants.DefaultMaxMessageSize) + msgLenBytes, err := writeMsgLen(msgLen, constants.DefaultMaxMessageSize) if err != nil { p.Log.Verbo("error writing message length", zap.Stringer("nodeID", p.id), @@ -514,17 +574,15 @@ func (p *peer) writeMessage(writer io.Writer, msg message.OutboundMessage) { zap.Stringer("nodeID", p.id), zap.Error(err), ) - msg.DecRef() return } - now := p.Clock.Time().Unix() - atomic.StoreInt64(&p.Config.LastSent, now) - atomic.StoreInt64(&p.lastSent, now) + now := p.Clock.Time() + p.storeLastSent(now) p.Metrics.Sent(msg) } -func (p *peer) sendPings() { +func (p *peer) sendNetworkMessages() { sendPingsTicker := time.NewTicker(p.PingFrequency) defer func() { sendPingsTicker.Stop() @@ -535,6 +593,40 @@ func (p *peer) sendPings() { for { select { + case <-p.peerListChan: + peerIPs, err := p.Config.Network.Peers(p.id) + if err != nil { + p.Log.Error("failed to get peers to gossip", + zap.Stringer("nodeID", p.id), + zap.Error(err), + ) + return + } + + if len(peerIPs) == 0 { + p.Log.Verbo( + "skipping peer gossip as there are no unknown peers", + zap.Stringer("nodeID", p.id), + ) + continue + } + + // Bypass throttling is disabled here to follow the non-handshake + // message sending pattern. + msg, err := p.Config.MessageCreator.PeerList(peerIPs, false /*=bypassThrottling*/) + if err != nil { + p.Log.Error("failed to create peer list message", + zap.Stringer("nodeID", p.id), + zap.Error(err), + ) + continue + } + + if !p.Send(p.onClosingCtx, msg) { + p.Log.Debug("failed to send peer list", + zap.Stringer("nodeID", p.id), + ) + } case <-sendPingsTicker.C: if !p.Network.AllowConnection(p.id) { p.Log.Debug("disconnecting from peer", @@ -544,7 +636,7 @@ func (p *peer) sendPings() { return } - if p.finishedHandshake.GetValue() { + if p.finishedHandshake.Get() { if err := p.VersionCompatibility.Compatible(p.version); err != nil { p.Log.Debug("disconnecting from peer", zap.String("reason", "version not compatible"), @@ -556,10 +648,10 @@ func (p *peer) sendPings() { } } - pingMessage, err := p.Config.GetMessageCreator().Ping() + pingMessage, err := p.Config.MessageCreator.Ping() if err != nil { p.Log.Error("failed to create message", - zap.Stringer("messageOp", message.Ping), + zap.Stringer("messageOp", message.PingOp), zap.Error(err), ) return @@ -573,82 +665,134 @@ func (p *peer) sendPings() { } func (p *peer) handle(msg message.InboundMessage) { - op := msg.Op() - switch op { // Network-related message types - case message.Ping: - p.handlePing(msg) + switch m := msg.Message().(type) { // Network-related message types + case *p2p.Ping: + p.handlePing(m) + msg.OnFinishedHandling() + return + case *p2p.Pong: + p.handlePong(m) msg.OnFinishedHandling() return - case message.Pong: - p.handlePong(msg) + case *p2p.Version: + p.handleVersion(m) msg.OnFinishedHandling() return - case message.Version: - p.handleVersion(msg) + case *p2p.PeerList: + p.handlePeerList(m) msg.OnFinishedHandling() return - case message.PeerList: - p.handlePeerList(msg) + case *p2p.PeerListAck: + p.handlePeerListAck(m) msg.OnFinishedHandling() return } - if !p.finishedHandshake.GetValue() { + if !p.finishedHandshake.Get() { p.Log.Debug( "dropping message", zap.String("reason", "handshake isn't finished"), zap.Stringer("nodeID", p.id), - zap.Stringer("messageOp", op), + zap.Stringer("messageOp", msg.Op()), ) msg.OnFinishedHandling() return } // Consensus and app-level messages - p.Router.HandleInbound(msg) + p.Router.HandleInbound(context.Background(), msg) } -func (p *peer) handlePing(_ message.InboundMessage) { - msg, err := p.Network.Pong(p.id) +func (p *peer) handlePing(*p2p.Ping) { + primaryUptime, err := p.UptimeCalculator.CalculateUptimePercent( + p.id, + constants.PrimaryNetworkID, + ) if err != nil { - p.Log.Error("failed to create message", - zap.Stringer("messageOp", message.Pong), + p.Log.Debug("failed to get peer primary uptime percentage", + zap.Stringer("nodeID", p.id), + zap.Stringer("subnetID", constants.PrimaryNetworkID), zap.Error(err), ) - return + primaryUptime = 0 } - p.Send(p.onClosingCtx, msg) -} -func (p *peer) handlePong(msg message.InboundMessage) { - uptimeIntf, err := msg.Get(message.Uptime) + subnetUptimes := make([]*p2p.SubnetUptime, 0, p.trackedSubnets.Len()) + for subnetID := range p.trackedSubnets { + subnetUptime, err := p.UptimeCalculator.CalculateUptimePercent(p.id, subnetID) + if err != nil { + p.Log.Debug("failed to get peer uptime percentage", + zap.Stringer("nodeID", p.id), + zap.Stringer("subnetID", subnetID), + zap.Error(err), + ) + continue + } + + subnetID := subnetID + subnetUptimes = append(subnetUptimes, &p2p.SubnetUptime{ + SubnetId: subnetID[:], + Uptime: uint32(subnetUptime * 100), + }) + } + + primaryUptimePercent := uint32(primaryUptime * 100) + msg, err := p.MessageCreator.Pong(primaryUptimePercent, subnetUptimes) if err != nil { - p.Log.Debug("message with invalid field", - zap.Stringer("nodeID", p.id), - zap.Stringer("messageOp", message.Pong), - zap.Stringer("field", message.Uptime), + p.Log.Error("failed to create message", + zap.Stringer("messageOp", message.PongOp), zap.Error(err), ) - p.StartClose() return } + p.Send(p.onClosingCtx, msg) +} - uptime := uptimeIntf.(uint8) - if uptime > 100 { +func (p *peer) handlePong(msg *p2p.Pong) { + if msg.Uptime > 100 { p.Log.Debug("dropping pong message with invalid uptime", zap.Stringer("nodeID", p.id), - zap.Uint8("uptime", uptime), + zap.Uint32("uptime", msg.Uptime), ) p.StartClose() return } + p.observeUptime(constants.PrimaryNetworkID, msg.Uptime) - p.observedUptimeLock.Lock() - p.observedUptime = uptime // [0, 100] percentage - p.observedUptimeLock.Unlock() + for _, subnetUptime := range msg.SubnetUptimes { + subnetID, err := ids.ToID(subnetUptime.SubnetId) + if err != nil { + p.Log.Debug("dropping pong message with invalid subnetID", + zap.Stringer("nodeID", p.id), + zap.Error(err), + ) + p.StartClose() + return + } + + uptime := subnetUptime.Uptime + if uptime > 100 { + p.Log.Debug("dropping pong message with invalid uptime", + zap.Stringer("nodeID", p.id), + zap.Stringer("subnetID", subnetID), + zap.Uint32("uptime", uptime), + ) + p.StartClose() + return + } + p.observeUptime(subnetID, uptime) + } +} + +// Record that the given peer perceives our uptime for the given [subnetID] +// to be [uptime]. +func (p *peer) observeUptime(subnetID ids.ID, uptime uint32) { + p.observedUptimesLock.Lock() + p.observedUptimes[subnetID] = uptime // [0, 100] percentage + p.observedUptimesLock.Unlock() } -func (p *peer) handleVersion(msg message.InboundMessage) { - if p.gotVersion.GetValue() { +func (p *peer) handleVersion(msg *p2p.Version) { + if p.gotVersion.Get() { // TODO: this should never happen, should we close the connection here? p.Log.Verbo("dropping duplicated version message", zap.Stringer("nodeID", p.id), @@ -656,54 +800,28 @@ func (p *peer) handleVersion(msg message.InboundMessage) { return } - peerNetworkIDIntf, err := msg.Get(message.NetworkID) - if err != nil { - p.Log.Debug("message with invalid field", - zap.Stringer("nodeID", p.id), - zap.Stringer("messageOp", message.Version), - zap.Stringer("field", message.NetworkID), - zap.Error(err), - ) - p.StartClose() - return - } - peerNetworkID := peerNetworkIDIntf.(uint32) - - if peerNetworkID != p.NetworkID { + if msg.NetworkId != p.NetworkID { p.Log.Debug("networkID mismatch", zap.Stringer("nodeID", p.id), - zap.Uint32("peerNetworkID", peerNetworkID), + zap.Uint32("peerNetworkID", msg.NetworkId), zap.Uint32("ourNetworkID", p.NetworkID), ) p.StartClose() return } - peerTimeIntf, err := msg.Get(message.MyTime) - if err != nil { - p.Log.Debug("message with invalid field", - zap.Stringer("nodeID", p.id), - zap.Stringer("messageOp", message.Version), - zap.Stringer("field", message.MyTime), - zap.Error(err), - ) - p.StartClose() - return - } - peerTime := peerTimeIntf.(uint64) - myTime := p.Clock.Unix() - if math.Abs(float64(peerTime)-float64(myTime)) > p.MaxClockDifference.Seconds() { + if math.Abs(float64(msg.MyTime)-float64(myTime)) > p.MaxClockDifference.Seconds() { if p.Beacons.Contains(p.id) { p.Log.Warn("beacon reports out of sync time", zap.Stringer("nodeID", p.id), - zap.Uint64("peerTime", peerTime), + zap.Uint64("peerTime", msg.MyTime), zap.Uint64("myTime", myTime), ) } else { p.Log.Debug("peer reports out of sync time", zap.Stringer("nodeID", p.id), - zap.Uint64("peerTime", peerTime), + zap.Uint64("peerTime", msg.MyTime), zap.Uint64("myTime", myTime), ) } @@ -711,20 +829,7 @@ func (p *peer) handleVersion(msg message.InboundMessage) { return } - peerVersionStrIntf, err := msg.Get(message.VersionStr) - if err != nil { - p.Log.Debug("message with invalid field", - zap.Stringer("nodeID", p.id), - zap.Stringer("messageOp", message.Version), - zap.Stringer("field", message.VersionStr), - zap.Error(err), - ) - p.StartClose() - return - } - peerVersionStr := peerVersionStrIntf.(string) - - peerVersion, err := version.ParseApplication(peerVersionStr) + peerVersion, err := version.ParseApplication(msg.MyVersion) if err != nil { p.Log.Debug("failed to parse peer version", zap.Stringer("nodeID", p.id), @@ -759,46 +864,20 @@ func (p *peer) handleVersion(msg message.InboundMessage) { return } - versionTimeIntf, err := msg.Get(message.VersionTime) - if err != nil { - p.Log.Debug("message with invalid field", - zap.Stringer("nodeID", p.id), - zap.Stringer("messageOp", message.Version), - zap.Stringer("field", message.VersionTime), - zap.Error(err), - ) - p.StartClose() - return - } - versionTime := versionTimeIntf.(uint64) - // Note that it is expected that the [versionTime] can be in the past. We // are just verifying that the claimed signing time isn't too far in the // future here. - if float64(versionTime)-float64(myTime) > p.MaxClockDifference.Seconds() { + if float64(msg.MyVersionTime)-float64(myTime) > p.MaxClockDifference.Seconds() { p.Log.Debug("peer attempting to connect with version timestamp too far in the future", zap.Stringer("nodeID", p.id), - zap.Uint64("versionTime", versionTime), + zap.Uint64("versionTime", msg.MyVersionTime), ) p.StartClose() return } // handle subnet IDs - subnetIDsBytesIntf, err := msg.Get(message.TrackedSubnets) - if err != nil { - p.Log.Debug("message with invalid field", - zap.Stringer("nodeID", p.id), - zap.Stringer("messageOp", message.Version), - zap.Stringer("field", message.TrackedSubnets), - zap.Error(err), - ) - p.StartClose() - return - } - subnetIDsBytes := subnetIDsBytesIntf.([][]byte) - - for _, subnetIDBytes := range subnetIDsBytes { + for _, subnetIDBytes := range msg.TrackedSubnets { subnetID, err := ids.ToID(subnetIDBytes) if err != nil { p.Log.Debug("failed to parse peer's tracked subnets", @@ -814,38 +893,27 @@ func (p *peer) handleVersion(msg message.InboundMessage) { } } - peerIPIntf, err := msg.Get(message.IP) - if err != nil { + // "net.IP" type in Golang is 16-byte + if ipLen := len(msg.IpAddr); ipLen != net.IPv6len { p.Log.Debug("message with invalid field", zap.Stringer("nodeID", p.id), - zap.Stringer("messageOp", message.Version), - zap.Stringer("field", message.IP), - zap.Error(err), - ) - p.StartClose() - return - } - peerIP := peerIPIntf.(ips.IPPort) - - signatureIntf, err := msg.Get(message.SigBytes) - if err != nil { - p.Log.Debug("message with invalid field", - zap.Stringer("nodeID", p.id), - zap.Stringer("messageOp", message.Version), - zap.Stringer("field", message.SigBytes), - zap.Error(err), + zap.Stringer("messageOp", message.VersionOp), + zap.String("field", "IP"), + zap.Int("ipLen", ipLen), ) p.StartClose() return } - signature := signatureIntf.([]byte) p.ip = &SignedIP{ - IP: UnsignedIP{ - IP: peerIP, - Timestamp: versionTime, + UnsignedIP: UnsignedIP{ + IPPort: ips.IPPort{ + IP: msg.IpAddr, + Port: uint16(msg.IpPort), + }, + Timestamp: msg.MyVersionTime, }, - Signature: signature, + Signature: msg.Sig, } if err := p.ip.Verify(p.cert); err != nil { p.Log.Debug("signature verification failed", @@ -856,50 +924,162 @@ func (p *peer) handleVersion(msg message.InboundMessage) { return } - p.gotVersion.SetValue(true) + p.gotVersion.Set(true) - peerlistMsg, err := p.Network.Peers() + peerIPs, err := p.Network.Peers(p.id) if err != nil { - p.Log.Error("failed to create message", - zap.Stringer("messageOp", message.PeerList), + p.Log.Error("failed to get peers to gossip for handshake", + zap.Stringer("nodeID", p.id), zap.Error(err), ) return } - p.Send(p.onClosingCtx, peerlistMsg) + + // We bypass throttling here to ensure that the version message is + // acknowledged timely. + peerListMsg, err := p.Config.MessageCreator.PeerList(peerIPs, true /*=bypassThrottling*/) + if err != nil { + p.Log.Error("failed to create peer list handshake message", + zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.PeerListOp), + zap.Error(err), + ) + return + } + + if !p.Send(p.onClosingCtx, peerListMsg) { + p.Log.Error("failed to send peer list for handshake", + zap.Stringer("nodeID", p.id), + ) + } } -func (p *peer) handlePeerList(msg message.InboundMessage) { - if !p.finishedHandshake.GetValue() { - if !p.gotVersion.GetValue() { +func (p *peer) handlePeerList(msg *p2p.PeerList) { + if !p.finishedHandshake.Get() { + if !p.gotVersion.Get() { return } p.Network.Connected(p.id) - p.finishedHandshake.SetValue(true) + p.finishedHandshake.Set(true) close(p.onFinishHandshake) } - ipsIntf, err := msg.Get(message.Peers) + // the peers this peer told us about + discoveredIPs := make([]*ips.ClaimedIPPort, len(msg.ClaimedIpPorts)) + for i, claimedIPPort := range msg.ClaimedIpPorts { + tlsCert, err := x509.ParseCertificate(claimedIPPort.X509Certificate) + if err != nil { + p.Log.Debug("message with invalid field", + zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.PeerListOp), + zap.String("field", "Cert"), + zap.Error(err), + ) + p.StartClose() + return + } + + // "net.IP" type in Golang is 16-byte + if ipLen := len(claimedIPPort.IpAddr); ipLen != net.IPv6len { + p.Log.Debug("message with invalid field", + zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.VersionOp), + zap.String("field", "IP"), + zap.Int("ipLen", ipLen), + ) + p.StartClose() + return + } + + // TODO: After the next network upgrade, require txIDs to be populated. + var txID ids.ID + if len(claimedIPPort.TxId) > 0 { + txID, err = ids.ToID(claimedIPPort.TxId) + if err != nil { + p.Log.Debug("message with invalid field", + zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.PeerListOp), + zap.String("field", "txID"), + zap.Error(err), + ) + p.StartClose() + return + } + } + + discoveredIPs[i] = &ips.ClaimedIPPort{ + Cert: tlsCert, + IPPort: ips.IPPort{ + IP: claimedIPPort.IpAddr, + Port: uint16(claimedIPPort.IpPort), + }, + Timestamp: claimedIPPort.Timestamp, + Signature: claimedIPPort.Signature, + TxID: txID, + } + } + + trackedPeers, err := p.Network.Track(p.id, discoveredIPs) if err != nil { p.Log.Debug("message with invalid field", zap.Stringer("nodeID", p.id), - zap.Stringer("messageOp", message.PeerList), - zap.Stringer("field", message.Peers), + zap.Stringer("messageOp", message.PeerListOp), + zap.String("field", "claimedIP"), zap.Error(err), ) p.StartClose() return } - ips := ipsIntf.([]ips.ClaimedIPPort) + if len(trackedPeers) == 0 { + p.Log.Debug("skipping peerlist ack as there were no tracked peers", + zap.Stringer("nodeID", p.id), + ) + return + } - for _, ip := range ips { - if !p.Network.Track(ip) { - p.Metrics.NumUselessPeerListBytes.Add(float64(ip.BytesLen())) - } + peerListAckMsg, err := p.Config.MessageCreator.PeerListAck(trackedPeers) + if err != nil { + p.Log.Error("failed to create message", + zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.PeerListAckOp), + zap.Error(err), + ) + return + } + + if !p.Send(p.onClosingCtx, peerListAckMsg) { + p.Log.Debug("failed to send peer list ack", + zap.Stringer("nodeID", p.id), + ) + } +} + +func (p *peer) handlePeerListAck(msg *p2p.PeerListAck) { + err := p.Network.MarkTracked(p.id, msg.PeerAcks) + if err != nil { + p.Log.Debug("message with invalid field", + zap.Stringer("nodeID", p.id), + zap.Stringer("messageOp", message.PeerListAckOp), + zap.String("field", "txID"), + zap.Error(err), + ) + p.StartClose() } } func (p *peer) nextTimeout() time.Time { return p.Clock.Time().Add(p.PongTimeout) } + +func (p *peer) storeLastSent(time time.Time) { + unixTime := time.Unix() + atomic.StoreInt64(&p.Config.LastSent, unixTime) + atomic.StoreInt64(&p.lastSent, unixTime) +} + +func (p *peer) storeLastReceived(time time.Time) { + unixTime := time.Unix() + atomic.StoreInt64(&p.Config.LastReceived, unixTime) + atomic.StoreInt64(&p.lastReceived, unixTime) +} diff --git a/avalanchego/network/peer/peer_test.go b/avalanchego/network/peer/peer_test.go index a76ec0bc..653a0c61 100644 --- a/avalanchego/network/peer/peer_test.go +++ b/avalanchego/network/peer/peer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -7,7 +7,6 @@ import ( "context" "crypto" "crypto/x509" - "fmt" "net" "testing" "time" @@ -19,6 +18,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/network/throttling" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/validators" @@ -28,6 +28,7 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" ) @@ -44,26 +45,19 @@ type rawTestPeer struct { inboundMsgChan <-chan message.InboundMessage } -func newMessageCreator(t *testing.T) (message.Creator, message.Creator) { +func newMessageCreator(t *testing.T) message.Creator { t.Helper() mc, err := message.NewCreator( + logging.NoLog{}, prometheus.NewRegistry(), "", - true, - 10*time.Second, - ) - require.NoError(t, err) - - mcProto, err := message.NewCreatorWithProto( - prometheus.NewRegistry(), - "", - true, + constants.DefaultNetworkCompressionType, 10*time.Second, ) require.NoError(t, err) - return mc, mcProto + return mc } func makeRawTestPeers(t *testing.T) (*rawTestPeer, *rawTestPeer) { @@ -81,7 +75,7 @@ func makeRawTestPeers(t *testing.T) (*rawTestPeer, *rawTestPeer) { nodeID0 := ids.NodeIDFromCert(tlsCert0.Leaf) nodeID1 := ids.NodeIDFromCert(tlsCert1.Leaf) - mc, mcProto := newMessageCreator(t) + mc := newMessageCreator(t) metrics, err := NewMetrics( logging.NoLog{}, @@ -90,61 +84,48 @@ func makeRawTestPeers(t *testing.T) (*rawTestPeer, *rawTestPeer) { ) require.NoError(err) - resourceTracker, err := tracker.NewResourceTracker(prometheus.NewRegistry(), resource.NoUsage, meter.ContinuousFactory{}, 10*time.Second) + resourceTracker, err := tracker.NewResourceTracker( + prometheus.NewRegistry(), + resource.NoUsage, + meter.ContinuousFactory{}, + 10*time.Second, + ) require.NoError(err) + sharedConfig := Config{ - Metrics: metrics, - MessageCreator: mc, - MessageCreatorWithProto: mcProto, - Log: logging.NoLog{}, - InboundMsgThrottler: throttling.NewNoInboundThrottler(), - VersionCompatibility: version.GetCompatibility(constants.LocalID), - MySubnets: ids.Set{}, - Beacons: validators.NewSet(), - NetworkID: constants.LocalID, - PingFrequency: constants.DefaultPingFrequency, - PongTimeout: constants.DefaultPingPongTimeout, - MaxClockDifference: time.Minute, - ResourceTracker: resourceTracker, + Metrics: metrics, + MessageCreator: mc, + Log: logging.NoLog{}, + InboundMsgThrottler: throttling.NewNoInboundThrottler(), + VersionCompatibility: version.GetCompatibility(constants.LocalID), + MySubnets: set.Set[ids.ID]{}, + Beacons: validators.NewSet(), + NetworkID: constants.LocalID, + PingFrequency: constants.DefaultPingFrequency, + PongTimeout: constants.DefaultPingPongTimeout, + MaxClockDifference: time.Minute, + ResourceTracker: resourceTracker, } peerConfig0 := sharedConfig peerConfig1 := sharedConfig - peerConfig0.Network = &testNetwork{ - mc: mc, + ip0 := ips.NewDynamicIPPort(net.IPv6loopback, 0) + tls0 := tlsCert0.PrivateKey.(crypto.Signer) + peerConfig0.IPSigner = NewIPSigner(ip0, tls0) - networkID: constants.LocalID, - ip: ips.IPPort{ - IP: net.IPv6loopback, - Port: 0, - }, - version: version.CurrentApp, - signer: tlsCert0.PrivateKey.(crypto.Signer), - subnets: ids.Set{}, - - uptime: 100, - } + peerConfig0.Network = TestNetwork inboundMsgChan0 := make(chan message.InboundMessage) - peerConfig0.Router = router.InboundHandlerFunc(func(msg message.InboundMessage) { + peerConfig0.Router = router.InboundHandlerFunc(func(_ context.Context, msg message.InboundMessage) { inboundMsgChan0 <- msg }) - peerConfig1.Network = &testNetwork{ - mc: mc, - - networkID: constants.LocalID, - ip: ips.IPPort{ - IP: net.IPv6loopback, - Port: 1, - }, - version: version.CurrentApp, - signer: tlsCert1.PrivateKey.(crypto.Signer), - subnets: ids.Set{}, + ip1 := ips.NewDynamicIPPort(net.IPv6loopback, 1) + tls1 := tlsCert1.PrivateKey.(crypto.Signer) + peerConfig1.IPSigner = NewIPSigner(ip1, tls1) - uptime: 100, - } + peerConfig1.Network = TestNetwork inboundMsgChan1 := make(chan message.InboundMessage) - peerConfig1.Router = router.InboundHandlerFunc(func(msg message.InboundMessage) { + peerConfig1.Router = router.InboundHandlerFunc(func(_ context.Context, msg message.InboundMessage) { inboundMsgChan1 <- msg }) @@ -223,82 +204,72 @@ func makeReadyTestPeers(t *testing.T) (*testPeer, *testPeer) { func TestReady(t *testing.T) { require := require.New(t) - // TODO: once "NewNetwork" handles proto, add "true" - for _, useProto := range []bool{false} { - t.Run(fmt.Sprintf("use proto buf message creator %v", useProto), func(tt *testing.T) { - rawPeer0, rawPeer1 := makeRawTestPeers(tt) + rawPeer0, rawPeer1 := makeRawTestPeers(t) - peer0 := Start( - rawPeer0.config, - rawPeer0.conn, - rawPeer1.cert, - rawPeer1.nodeID, - NewThrottledMessageQueue( - rawPeer0.config.Metrics, - rawPeer1.nodeID, - logging.NoLog{}, - throttling.NewNoOutboundThrottler(), - ), - ) - - isReady := peer0.Ready() - require.False(isReady) - - peer1 := Start( - rawPeer1.config, - rawPeer1.conn, - rawPeer0.cert, - rawPeer0.nodeID, - NewThrottledMessageQueue( - rawPeer1.config.Metrics, - rawPeer0.nodeID, - logging.NoLog{}, - throttling.NewNoOutboundThrottler(), - ), - ) - - err := peer0.AwaitReady(context.Background()) - require.NoError(err) - isReady = peer0.Ready() - require.True(isReady) - - err = peer1.AwaitReady(context.Background()) - require.NoError(err) - isReady = peer1.Ready() - require.True(isReady) - - peer0.StartClose() - err = peer0.AwaitClosed(context.Background()) - require.NoError(err) - err = peer1.AwaitClosed(context.Background()) - require.NoError(err) - }) - } + peer0 := Start( + rawPeer0.config, + rawPeer0.conn, + rawPeer1.cert, + rawPeer1.nodeID, + NewThrottledMessageQueue( + rawPeer0.config.Metrics, + rawPeer1.nodeID, + logging.NoLog{}, + throttling.NewNoOutboundThrottler(), + ), + ) + + isReady := peer0.Ready() + require.False(isReady) + + peer1 := Start( + rawPeer1.config, + rawPeer1.conn, + rawPeer0.cert, + rawPeer0.nodeID, + NewThrottledMessageQueue( + rawPeer1.config.Metrics, + rawPeer0.nodeID, + logging.NoLog{}, + throttling.NewNoOutboundThrottler(), + ), + ) + + err := peer0.AwaitReady(context.Background()) + require.NoError(err) + isReady = peer0.Ready() + require.True(isReady) + + err = peer1.AwaitReady(context.Background()) + require.NoError(err) + isReady = peer1.Ready() + require.True(isReady) + + peer0.StartClose() + err = peer0.AwaitClosed(context.Background()) + require.NoError(err) + err = peer1.AwaitClosed(context.Background()) + require.NoError(err) } func TestSend(t *testing.T) { require := require.New(t) - // TODO: add "true" to test proto - for _, useProto := range []bool{false} { - t.Run(fmt.Sprintf("use proto buf message creator %v", useProto), func(tt *testing.T) { - peer0, peer1 := makeReadyTestPeers(tt) - mc, _ := newMessageCreator(t) + peer0, peer1 := makeReadyTestPeers(t) + mc := newMessageCreator(t) - outboundGetMsg, err := mc.Get(ids.Empty, 1, time.Second, ids.Empty) - require.NoError(err) + outboundGetMsg, err := mc.Get(ids.Empty, 1, time.Second, ids.Empty, p2p.EngineType_ENGINE_TYPE_SNOWMAN) + require.NoError(err) - sent := peer0.Send(context.Background(), outboundGetMsg) - require.True(sent) + sent := peer0.Send(context.Background(), outboundGetMsg) + require.True(sent) - inboundGetMsg := <-peer1.inboundMsgChan - require.Equal(message.Get, inboundGetMsg.Op()) + inboundGetMsg := <-peer1.inboundMsgChan + require.Equal(message.GetOp, inboundGetMsg.Op()) - peer1.StartClose() - err = peer0.AwaitClosed(context.Background()) - require.NoError(err) - err = peer1.AwaitClosed(context.Background()) - require.NoError(err) - }) - } + peer1.StartClose() + err = peer0.AwaitClosed(context.Background()) + require.NoError(err) + err = peer1.AwaitClosed(context.Background()) + require.NoError(err) } diff --git a/avalanchego/network/peer/set.go b/avalanchego/network/peer/set.go index c3c3904e..a26901f3 100644 --- a/avalanchego/network/peer/set.go +++ b/avalanchego/network/peer/set.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -8,9 +8,11 @@ import ( "github.com/ava-labs/avalanchego/utils/sampler" ) -var _ Set = &set{} +var _ Set = (*peerSet)(nil) -func NoPrecondition(Peer) bool { return true } +func NoPrecondition(Peer) bool { + return true +} // Set contains a group of peers. type Set interface { @@ -49,7 +51,7 @@ type Set interface { Info(nodeIDs []ids.NodeID) []Info } -type set struct { +type peerSet struct { peersMap map[ids.NodeID]int // nodeID -> peer's index in peersSlice peersSlice []Peer // invariant: len(peersSlice) == len(peersMap) } @@ -59,12 +61,12 @@ type set struct { // Only [Add] and [Remove] require exclusion on the data structure. The // remaining methods are safe for concurrent use. func NewSet() Set { - return &set{ + return &peerSet{ peersMap: make(map[ids.NodeID]int), } } -func (s *set) Add(peer Peer) { +func (s *peerSet) Add(peer Peer) { nodeID := peer.ID() index, ok := s.peersMap[nodeID] if !ok { @@ -75,7 +77,7 @@ func (s *set) Add(peer Peer) { } } -func (s *set) GetByID(nodeID ids.NodeID) (Peer, bool) { +func (s *peerSet) GetByID(nodeID ids.NodeID) (Peer, bool) { index, ok := s.peersMap[nodeID] if !ok { return nil, false @@ -83,14 +85,14 @@ func (s *set) GetByID(nodeID ids.NodeID) (Peer, bool) { return s.peersSlice[index], true } -func (s *set) GetByIndex(index int) (Peer, bool) { +func (s *peerSet) GetByIndex(index int) (Peer, bool) { if index < 0 || index >= len(s.peersSlice) { return nil, false } return s.peersSlice[index], true } -func (s *set) Remove(nodeID ids.NodeID) { +func (s *peerSet) Remove(nodeID ids.NodeID) { index, ok := s.peersMap[nodeID] if !ok { return @@ -108,11 +110,11 @@ func (s *set) Remove(nodeID ids.NodeID) { s.peersSlice = s.peersSlice[:lastIndex] } -func (s *set) Len() int { +func (s *peerSet) Len() int { return len(s.peersSlice) } -func (s *set) Sample(n int, precondition func(Peer) bool) []Peer { +func (s *peerSet) Sample(n int, precondition func(Peer) bool) []Peer { if n <= 0 { return nil } @@ -138,7 +140,7 @@ func (s *set) Sample(n int, precondition func(Peer) bool) []Peer { return peers } -func (s *set) AllInfo() []Info { +func (s *peerSet) AllInfo() []Info { peerInfo := make([]Info, len(s.peersSlice)) for i, peer := range s.peersSlice { peerInfo[i] = peer.Info() @@ -146,7 +148,7 @@ func (s *set) AllInfo() []Info { return peerInfo } -func (s *set) Info(nodeIDs []ids.NodeID) []Info { +func (s *peerSet) Info(nodeIDs []ids.NodeID) []Info { peerInfo := make([]Info, 0, len(nodeIDs)) for _, nodeID := range nodeIDs { if peer, ok := s.GetByID(nodeID); ok { diff --git a/avalanchego/network/peer/set_test.go b/avalanchego/network/peer/set_test.go index 705a96ac..f26b1d19 100644 --- a/avalanchego/network/peer/set_test.go +++ b/avalanchego/network/peer/set_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" ) func TestSet(t *testing.T) { @@ -17,12 +18,12 @@ func TestSet(t *testing.T) { set := NewSet() peer1 := &peer{ - id: ids.NodeID{0x01}, - observedUptime: 0, + id: ids.NodeID{0x01}, + observedUptimes: map[ids.ID]uint32{constants.PrimaryNetworkID: 0}, } updatedPeer1 := &peer{ - id: ids.NodeID{0x01}, - observedUptime: 1, + id: ids.NodeID{0x01}, + observedUptimes: map[ids.ID]uint32{constants.PrimaryNetworkID: 1}, } peer2 := &peer{ id: ids.NodeID{0x02}, @@ -41,21 +42,27 @@ func TestSet(t *testing.T) { set.Add(peer1) retrievedPeer1, peer1Found := set.GetByID(peer1.id) require.True(peer1Found) - require.Equal(peer1.ObservedUptime(), retrievedPeer1.ObservedUptime()) + observed1, _ := peer1.ObservedUptime(constants.PrimaryNetworkID) + observed2, _ := retrievedPeer1.ObservedUptime(constants.PrimaryNetworkID) + require.Equal(observed1, observed2) require.Equal(1, set.Len()) // re-addition of peer works as update set.Add(updatedPeer1) retrievedPeer1, peer1Found = set.GetByID(peer1.id) require.True(peer1Found) - require.Equal(updatedPeer1.ObservedUptime(), retrievedPeer1.ObservedUptime()) + observed1, _ = updatedPeer1.ObservedUptime(constants.PrimaryNetworkID) + observed2, _ = retrievedPeer1.ObservedUptime(constants.PrimaryNetworkID) + require.Equal(observed1, observed2) require.Equal(1, set.Len()) // add of another peer is handled set.Add(peer2) retrievedPeer2, peer2Found := set.GetByID(peer2.id) require.True(peer2Found) - require.Equal(peer2.ObservedUptime(), retrievedPeer2.ObservedUptime()) + observed1, _ = peer2.ObservedUptime(constants.PrimaryNetworkID) + observed2, _ = retrievedPeer2.ObservedUptime(constants.PrimaryNetworkID) + require.Equal(observed1, observed2) require.Equal(2, set.Len()) // removal of added peer is handled diff --git a/avalanchego/network/peer/test_network.go b/avalanchego/network/peer/test_network.go index c55b1f00..9bac6260 100644 --- a/avalanchego/network/peer/test_network.go +++ b/avalanchego/network/peer/test_network.go @@ -1,87 +1,34 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer import ( - "crypto" - "time" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/utils/ips" - "github.com/ava-labs/avalanchego/version" ) -var _ Network = &testNetwork{} +var TestNetwork Network = testNetwork{} -// testNetwork is a network definition for a TestPeer -type testNetwork struct { - mc message.Creator +type testNetwork struct{} - networkID uint32 - ip ips.IPPort - version *version.Application - signer crypto.Signer - subnets ids.Set +func (testNetwork) Connected(ids.NodeID) {} - uptime uint8 +func (testNetwork) AllowConnection(ids.NodeID) bool { + return true } -// NewTestNetwork creates and returns a new TestNetwork -func NewTestNetwork( - mc message.Creator, - networkID uint32, - ipPort ips.IPPort, - version *version.Application, - signer crypto.Signer, - subnets ids.Set, - uptime uint8, -) Network { - return &testNetwork{ - mc: mc, - networkID: networkID, - ip: ipPort, - version: version, - signer: signer, - subnets: subnets, - uptime: uptime, - } +func (testNetwork) Track(ids.NodeID, []*ips.ClaimedIPPort) ([]*p2p.PeerAck, error) { + return nil, nil } -func (n *testNetwork) Connected(ids.NodeID) {} - -func (n *testNetwork) AllowConnection(ids.NodeID) bool { return true } - -func (n *testNetwork) Track(ips.ClaimedIPPort) bool { return true } - -func (n *testNetwork) Disconnected(ids.NodeID) {} - -func (n *testNetwork) Version() (message.OutboundMessage, error) { - now := uint64(time.Now().Unix()) - unsignedIP := UnsignedIP{ - IP: n.ip, - Timestamp: now, - } - signedIP, err := unsignedIP.Sign(n.signer) - if err != nil { - return nil, err - } - return n.mc.Version( - n.networkID, - now, - n.ip, - n.version.String(), - now, - signedIP.Signature, - n.subnets.List(), - ) +func (testNetwork) MarkTracked(ids.NodeID, []*p2p.PeerAck) error { + return nil } -func (n *testNetwork) Peers() (message.OutboundMessage, error) { - return n.mc.PeerList(nil, true) -} +func (testNetwork) Disconnected(ids.NodeID) {} -func (n *testNetwork) Pong(ids.NodeID) (message.OutboundMessage, error) { - return n.mc.Pong(n.uptime) +func (testNetwork) Peers(ids.NodeID) ([]ips.ClaimedIPPort, error) { + return nil, nil } diff --git a/avalanchego/network/peer/test_peer.go b/avalanchego/network/peer/test_peer.go index 7f45165d..d813a16a 100644 --- a/avalanchego/network/peer/test_peer.go +++ b/avalanchego/network/peer/test_peer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -16,6 +16,7 @@ import ( "github.com/ava-labs/avalanchego/network/throttling" "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/networking/tracker" + "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/utils/constants" @@ -23,6 +24,7 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" ) @@ -68,19 +70,10 @@ func StartTestPeer( } mc, err := message.NewCreator( + logging.NoLog{}, prometheus.NewRegistry(), "", - true, - 10*time.Second, - ) - if err != nil { - return nil, err - } - - mcWithProto, err := message.NewCreatorWithProto( - prometheus.NewRegistry(), - "", - true, + constants.DefaultNetworkCompressionType, 10*time.Second, ) if err != nil { @@ -96,41 +89,37 @@ func StartTestPeer( return nil, err } - ipPort := ips.IPPort{ - IP: net.IPv6zero, - Port: 0, - } - resourceTracker, err := tracker.NewResourceTracker(prometheus.NewRegistry(), resource.NoUsage, meter.ContinuousFactory{}, 10*time.Second) + resourceTracker, err := tracker.NewResourceTracker( + prometheus.NewRegistry(), + resource.NoUsage, + meter.ContinuousFactory{}, + 10*time.Second, + ) if err != nil { return nil, err } + signerIP := ips.NewDynamicIPPort(net.IPv6zero, 0) + tls := tlsCert.PrivateKey.(crypto.Signer) + peer := Start( &Config{ - Metrics: metrics, - MessageCreator: mc, - MessageCreatorWithProto: mcWithProto, - BanffTime: version.GetBanffTime(networkID), - Log: logging.NoLog{}, - InboundMsgThrottler: throttling.NewNoInboundThrottler(), - Network: NewTestNetwork( - mc, - networkID, - ipPort, - version.CurrentApp, - tlsCert.PrivateKey.(crypto.Signer), - ids.Set{}, - 100, - ), + Metrics: metrics, + MessageCreator: mc, + Log: logging.NoLog{}, + InboundMsgThrottler: throttling.NewNoInboundThrottler(), + Network: TestNetwork, Router: router, VersionCompatibility: version.GetCompatibility(networkID), - MySubnets: ids.Set{}, + MySubnets: set.Set[ids.ID]{}, Beacons: validators.NewSet(), NetworkID: networkID, PingFrequency: constants.DefaultPingFrequency, PongTimeout: constants.DefaultPingPongTimeout, MaxClockDifference: time.Minute, ResourceTracker: resourceTracker, + UptimeCalculator: uptime.NoOpCalculator, + IPSigner: NewIPSigner(signerIP, tls), }, conn, cert, diff --git a/avalanchego/network/peer/tls_config.go b/avalanchego/network/peer/tls_config.go index 06437909..733812db 100644 --- a/avalanchego/network/peer/tls_config.go +++ b/avalanchego/network/peer/tls_config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer diff --git a/avalanchego/network/peer/upgrader.go b/avalanchego/network/peer/upgrader.go index 3428e587..3e892188 100644 --- a/avalanchego/network/peer/upgrader.go +++ b/avalanchego/network/peer/upgrader.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package peer @@ -15,8 +15,8 @@ import ( var ( errNoCert = errors.New("tls handshake finished with no peer certificate") - _ Upgrader = &tlsServerUpgrader{} - _ Upgrader = &tlsClientUpgrader{} + _ Upgrader = (*tlsServerUpgrader)(nil) + _ Upgrader = (*tlsClientUpgrader)(nil) ) type Upgrader interface { diff --git a/avalanchego/network/peer/validator_id.go b/avalanchego/network/peer/validator_id.go new file mode 100644 index 00000000..5471fda2 --- /dev/null +++ b/avalanchego/network/peer/validator_id.go @@ -0,0 +1,14 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package peer + +import "github.com/ava-labs/avalanchego/ids" + +// ValidatorID represents a validator that we gossip to other peers +type ValidatorID struct { + // The validator's ID + NodeID ids.NodeID + // The Tx that added this into the validator set + TxID ids.ID +} diff --git a/avalanchego/network/test_network.go b/avalanchego/network/test_network.go new file mode 100644 index 00000000..296108b7 --- /dev/null +++ b/avalanchego/network/test_network.go @@ -0,0 +1,263 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "crypto" + "errors" + "math" + "net" + "runtime" + "sync" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/network/dialer" + "github.com/ava-labs/avalanchego/network/peer" + "github.com/ava-labs/avalanchego/network/throttling" + "github.com/ava-labs/avalanchego/snow/networking/router" + "github.com/ava-labs/avalanchego/snow/networking/tracker" + "github.com/ava-labs/avalanchego/snow/uptime" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/staking" + "github.com/ava-labs/avalanchego/subnets" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/ips" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/math/meter" + "github.com/ava-labs/avalanchego/utils/resource" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/units" +) + +var ( + errClosed = errors.New("closed") + + _ net.Listener = (*noopListener)(nil) + _ subnets.Allower = (*nodeIDConnector)(nil) +) + +type noopListener struct { + once sync.Once + closed chan struct{} +} + +func newNoopListener() net.Listener { + return &noopListener{ + closed: make(chan struct{}), + } +} + +func (l *noopListener) Accept() (net.Conn, error) { + <-l.closed + return nil, errClosed +} + +func (l *noopListener) Close() error { + l.once.Do(func() { + close(l.closed) + }) + return nil +} + +func (*noopListener) Addr() net.Addr { + return &net.TCPAddr{ + IP: net.IPv4zero, + Port: 0, + } +} + +func NewTestNetwork( + log logging.Logger, + networkID uint32, + currentValidators validators.Set, + trackedSubnets set.Set[ids.ID], + router router.ExternalHandler, +) (Network, error) { + metrics := prometheus.NewRegistry() + msgCreator, err := message.NewCreator( + logging.NoLog{}, + metrics, + "", + constants.DefaultNetworkCompressionType, + constants.DefaultNetworkMaximumInboundTimeout, + ) + if err != nil { + return nil, err + } + + networkConfig := Config{ + ThrottlerConfig: ThrottlerConfig{ + InboundConnUpgradeThrottlerConfig: throttling.InboundConnUpgradeThrottlerConfig{ + UpgradeCooldown: constants.DefaultInboundConnUpgradeThrottlerCooldown, + MaxRecentConnsUpgraded: int(math.Ceil(constants.DefaultInboundThrottlerMaxConnsPerSec * constants.DefaultInboundConnUpgradeThrottlerCooldown.Seconds())), + }, + + InboundMsgThrottlerConfig: throttling.InboundMsgThrottlerConfig{ + MsgByteThrottlerConfig: throttling.MsgByteThrottlerConfig{ + VdrAllocSize: constants.DefaultInboundThrottlerVdrAllocSize, + AtLargeAllocSize: constants.DefaultInboundThrottlerAtLargeAllocSize, + NodeMaxAtLargeBytes: constants.DefaultInboundThrottlerNodeMaxAtLargeBytes, + }, + + BandwidthThrottlerConfig: throttling.BandwidthThrottlerConfig{ + RefillRate: constants.DefaultInboundThrottlerBandwidthRefillRate, + MaxBurstSize: constants.DefaultInboundThrottlerBandwidthMaxBurstSize, + }, + + CPUThrottlerConfig: throttling.SystemThrottlerConfig{ + MaxRecheckDelay: constants.DefaultInboundThrottlerCPUMaxRecheckDelay, + }, + + DiskThrottlerConfig: throttling.SystemThrottlerConfig{ + MaxRecheckDelay: constants.DefaultInboundThrottlerDiskMaxRecheckDelay, + }, + + MaxProcessingMsgsPerNode: constants.DefaultInboundThrottlerMaxProcessingMsgsPerNode, + }, + OutboundMsgThrottlerConfig: throttling.MsgByteThrottlerConfig{ + VdrAllocSize: constants.DefaultOutboundThrottlerVdrAllocSize, + AtLargeAllocSize: constants.DefaultOutboundThrottlerAtLargeAllocSize, + NodeMaxAtLargeBytes: constants.DefaultOutboundThrottlerNodeMaxAtLargeBytes, + }, + + MaxInboundConnsPerSec: constants.DefaultInboundThrottlerMaxConnsPerSec, + }, + + HealthConfig: HealthConfig{ + Enabled: true, + MinConnectedPeers: constants.DefaultNetworkHealthMinPeers, + MaxTimeSinceMsgReceived: constants.DefaultNetworkHealthMaxTimeSinceMsgReceived, + MaxTimeSinceMsgSent: constants.DefaultNetworkHealthMaxTimeSinceMsgSent, + MaxPortionSendQueueBytesFull: constants.DefaultNetworkHealthMaxPortionSendQueueFill, + MaxSendFailRate: constants.DefaultNetworkHealthMaxSendFailRate, + SendFailRateHalflife: constants.DefaultHealthCheckAveragerHalflife, + }, + + ProxyEnabled: constants.DefaultNetworkTCPProxyEnabled, + ProxyReadHeaderTimeout: constants.DefaultNetworkTCPProxyReadTimeout, + + DialerConfig: dialer.Config{ + ThrottleRps: constants.DefaultOutboundConnectionThrottlingRps, + ConnectionTimeout: constants.DefaultOutboundConnectionTimeout, + }, + + TimeoutConfig: TimeoutConfig{ + PingPongTimeout: constants.DefaultPingPongTimeout, + ReadHandshakeTimeout: constants.DefaultNetworkReadHandshakeTimeout, + }, + + PeerListGossipConfig: PeerListGossipConfig{ + PeerListNumValidatorIPs: constants.DefaultNetworkPeerListNumValidatorIPs, + PeerListValidatorGossipSize: constants.DefaultNetworkPeerListValidatorGossipSize, + PeerListNonValidatorGossipSize: constants.DefaultNetworkPeerListNonValidatorGossipSize, + PeerListPeersGossipSize: constants.DefaultNetworkPeerListPeersGossipSize, + PeerListGossipFreq: constants.DefaultNetworkPeerListGossipFreq, + }, + + DelayConfig: DelayConfig{ + InitialReconnectDelay: constants.DefaultNetworkInitialReconnectDelay, + MaxReconnectDelay: constants.DefaultNetworkMaxReconnectDelay, + }, + + MaxClockDifference: constants.DefaultNetworkMaxClockDifference, + CompressionType: constants.DefaultNetworkCompressionType, + PingFrequency: constants.DefaultPingFrequency, + AllowPrivateIPs: constants.DefaultNetworkAllowPrivateIPs, + UptimeMetricFreq: constants.DefaultUptimeMetricFreq, + MaximumInboundMessageTimeout: constants.DefaultNetworkMaximumInboundTimeout, + + RequireValidatorToConnect: constants.DefaultNetworkRequireValidatorToConnect, + PeerReadBufferSize: constants.DefaultNetworkPeerReadBufferSize, + PeerWriteBufferSize: constants.DefaultNetworkPeerWriteBufferSize, + } + + networkConfig.NetworkID = networkID + networkConfig.TrackedSubnets = trackedSubnets + + tlsCert, err := staking.NewTLSCert() + if err != nil { + return nil, err + } + tlsConfig := peer.TLSConfig(*tlsCert, nil) + networkConfig.TLSConfig = tlsConfig + networkConfig.TLSKey = tlsCert.PrivateKey.(crypto.Signer) + + validatorManager := validators.NewManager() + beacons := validators.NewSet() + networkConfig.Validators = validatorManager + networkConfig.Validators.Add(constants.PrimaryNetworkID, currentValidators) + networkConfig.Beacons = beacons + // This never actually does anything because we never initialize the P-chain + networkConfig.UptimeCalculator = uptime.NoOpCalculator + + // TODO actually monitor usage + // TestNetwork doesn't use disk so we don't need to track it, but we should + // still have guardrails around cpu/memory usage. + networkConfig.ResourceTracker, err = tracker.NewResourceTracker( + metrics, + resource.NoUsage, + &meter.ContinuousFactory{}, + constants.DefaultHealthCheckAveragerHalflife, + ) + if err != nil { + return nil, err + } + networkConfig.CPUTargeter = tracker.NewTargeter( + &tracker.TargeterConfig{ + VdrAlloc: float64(runtime.NumCPU()), + MaxNonVdrUsage: .8 * float64(runtime.NumCPU()), + MaxNonVdrNodeUsage: float64(runtime.NumCPU()) / 8, + }, + currentValidators, + networkConfig.ResourceTracker.CPUTracker(), + ) + networkConfig.DiskTargeter = tracker.NewTargeter( + &tracker.TargeterConfig{ + VdrAlloc: 1000 * units.GiB, + MaxNonVdrUsage: 1000 * units.GiB, + MaxNonVdrNodeUsage: 1000 * units.GiB, + }, + currentValidators, + networkConfig.ResourceTracker.DiskTracker(), + ) + + networkConfig.MyIPPort = ips.NewDynamicIPPort(net.IPv4zero, 0) + + networkConfig.GossipTracker, err = peer.NewGossipTracker(metrics, "") + if err != nil { + return nil, err + } + + return NewNetwork( + &networkConfig, + msgCreator, + metrics, + log, + newNoopListener(), + dialer.NewDialer( + constants.NetworkType, + dialer.Config{ + ThrottleRps: constants.DefaultOutboundConnectionThrottlingRps, + ConnectionTimeout: constants.DefaultOutboundConnectionTimeout, + }, + log, + ), + router, + ) +} + +type nodeIDConnector struct { + nodeID ids.NodeID +} + +func newNodeIDConnector(nodeID ids.NodeID) *nodeIDConnector { + return &nodeIDConnector{nodeID: nodeID} +} + +func (f *nodeIDConnector) IsAllowed(nodeID ids.NodeID, _ bool) bool { + return nodeID == f.nodeID +} diff --git a/avalanchego/network/throttling/bandwidth_throttler.go b/avalanchego/network/throttling/bandwidth_throttler.go index f7d2dfa8..5adfcb00 100644 --- a/avalanchego/network/throttling/bandwidth_throttler.go +++ b/avalanchego/network/throttling/bandwidth_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling @@ -20,7 +20,7 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" ) -var _ bandwidthThrottler = &bandwidthThrottlerImpl{} +var _ bandwidthThrottler = (*bandwidthThrottlerImpl)(nil) // Returns a bandwidth throttler that uses a token bucket // model, where each token is 1 byte, to rate-limit bandwidth usage. diff --git a/avalanchego/network/throttling/bandwidth_throttler_test.go b/avalanchego/network/throttling/bandwidth_throttler_test.go index 318d767e..9b4b7eaf 100644 --- a/avalanchego/network/throttling/bandwidth_throttler_test.go +++ b/avalanchego/network/throttling/bandwidth_throttler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/avalanchego/network/throttling/common.go b/avalanchego/network/throttling/common.go index 4cb8d50c..c2a92db3 100644 --- a/avalanchego/network/throttling/common.go +++ b/avalanchego/network/throttling/common.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/avalanchego/network/throttling/dial_throttler.go b/avalanchego/network/throttling/dial_throttler.go index 7740fca5..491c312b 100644 --- a/avalanchego/network/throttling/dial_throttler.go +++ b/avalanchego/network/throttling/dial_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling @@ -10,8 +10,8 @@ import ( ) var ( - _ DialThrottler = &dialThrottler{} - _ DialThrottler = &noDialThrottler{} + _ DialThrottler = (*dialThrottler)(nil) + _ DialThrottler = (*noDialThrottler)(nil) ) type DialThrottler interface { @@ -40,6 +40,6 @@ func NewNoDialThrottler() DialThrottler { return noDialThrottler{} } -func (t noDialThrottler) Acquire(context.Context) error { +func (noDialThrottler) Acquire(context.Context) error { return nil } diff --git a/avalanchego/network/throttling/dial_throttler_test.go b/avalanchego/network/throttling/dial_throttler_test.go index 30d6caa8..f3c3f651 100644 --- a/avalanchego/network/throttling/dial_throttler_test.go +++ b/avalanchego/network/throttling/dial_throttler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling @@ -43,7 +43,6 @@ func TestDialThrottler(t *testing.T) { select { case <-time.After(25 * time.Millisecond): - break case <-acquiredChan: t.Fatal("should not have been able to acquire immediately") } diff --git a/avalanchego/network/throttling/inbound_conn_throttler.go b/avalanchego/network/throttling/inbound_conn_throttler.go index ec8decc1..7f220639 100644 --- a/avalanchego/network/throttling/inbound_conn_throttler.go +++ b/avalanchego/network/throttling/inbound_conn_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling @@ -10,7 +10,7 @@ import ( "golang.org/x/time/rate" ) -var _ net.Listener = &throttledListener{} +var _ net.Listener = (*throttledListener)(nil) // Wraps [listener] and returns a net.Listener that will accept at most // [maxConnsPerSec] connections per second. diff --git a/avalanchego/network/throttling/inbound_conn_throttler_test.go b/avalanchego/network/throttling/inbound_conn_throttler_test.go index e3d7b2d7..9e13e32b 100644 --- a/avalanchego/network/throttling/inbound_conn_throttler_test.go +++ b/avalanchego/network/throttling/inbound_conn_throttler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" ) -var _ net.Listener = &MockListener{} +var _ net.Listener = (*MockListener)(nil) type MockListener struct { t *testing.T @@ -46,8 +46,11 @@ func (ml *MockListener) Addr() net.Addr { func TestInboundConnThrottlerClose(t *testing.T) { closed := false l := &MockListener{ - t: t, - OnCloseF: func() error { closed = true; return nil }, + t: t, + OnCloseF: func() error { + closed = true + return nil + }, } wrappedL := NewThrottledListener(l, 1) err := wrappedL.Close() @@ -67,8 +70,11 @@ func TestInboundConnThrottlerClose(t *testing.T) { func TestInboundConnThrottlerAddr(t *testing.T) { addrCalled := false l := &MockListener{ - t: t, - OnAddrF: func() net.Addr { addrCalled = true; return nil }, + t: t, + OnAddrF: func() net.Addr { + addrCalled = true + return nil + }, } wrappedL := NewThrottledListener(l, 1) _ = wrappedL.Addr() diff --git a/avalanchego/network/throttling/inbound_conn_upgrade_throttler.go b/avalanchego/network/throttling/inbound_conn_upgrade_throttler.go index 2150eb83..9d058e29 100644 --- a/avalanchego/network/throttling/inbound_conn_upgrade_throttler.go +++ b/avalanchego/network/throttling/inbound_conn_upgrade_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling @@ -9,12 +9,13 @@ import ( "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" ) var ( - _ InboundConnUpgradeThrottler = &inboundConnUpgradeThrottler{} - _ InboundConnUpgradeThrottler = &noInboundConnUpgradeThrottler{} + _ InboundConnUpgradeThrottler = (*inboundConnUpgradeThrottler)(nil) + _ InboundConnUpgradeThrottler = (*noInboundConnUpgradeThrottler)(nil) ) // InboundConnUpgradeThrottler returns whether we should upgrade an inbound connection from IP [ipStr]. @@ -61,7 +62,6 @@ func NewInboundConnUpgradeThrottler(log logging.Logger, config InboundConnUpgrad InboundConnUpgradeThrottlerConfig: config, log: log, done: make(chan struct{}), - recentIPs: make(map[string]struct{}), recentIPsAndTimes: make(chan ipAndTime, config.MaxRecentConnsUpgraded), } } @@ -69,9 +69,13 @@ func NewInboundConnUpgradeThrottler(log logging.Logger, config InboundConnUpgrad // noInboundConnUpgradeThrottler upgrades all inbound connections type noInboundConnUpgradeThrottler struct{} -func (*noInboundConnUpgradeThrottler) Dispatch() {} -func (*noInboundConnUpgradeThrottler) Stop() {} -func (*noInboundConnUpgradeThrottler) ShouldUpgrade(ips.IPPort) bool { return true } +func (*noInboundConnUpgradeThrottler) Dispatch() {} + +func (*noInboundConnUpgradeThrottler) Stop() {} + +func (*noInboundConnUpgradeThrottler) ShouldUpgrade(ips.IPPort) bool { + return true +} type ipAndTime struct { ip string @@ -88,7 +92,7 @@ type inboundConnUpgradeThrottler struct { done chan struct{} // IP --> Present if ShouldUpgrade(ipStr) returned true // within the last [UpgradeCooldown]. - recentIPs map[string]struct{} + recentIPs set.Set[string] // Sorted in order of increasing time // of last call to ShouldUpgrade that returned true. // For each IP in this channel, ShouldUpgrade(ipStr) @@ -108,8 +112,7 @@ func (n *inboundConnUpgradeThrottler) ShouldUpgrade(ip ips.IPPort) bool { n.lock.Lock() defer n.lock.Unlock() - _, recentlyConnected := n.recentIPs[ipStr] - if recentlyConnected { + if n.recentIPs.Contains(ipStr) { // We recently upgraded an inbound connection from this IP return false } @@ -119,7 +122,7 @@ func (n *inboundConnUpgradeThrottler) ShouldUpgrade(ip ips.IPPort) bool { ip: ipStr, cooldownElapsedAt: n.clock.Time().Add(n.UpgradeCooldown), }: - n.recentIPs[ipStr] = struct{}{} + n.recentIPs.Add(ipStr) return true default: return false @@ -143,7 +146,7 @@ func (n *inboundConnUpgradeThrottler) Dispatch() { case <-timer.C: // Remove the next IP (we'd upgrade another inbound connection from it) n.lock.Lock() - delete(n.recentIPs, next.ip) + n.recentIPs.Remove(next.ip) n.lock.Unlock() case <-n.done: return diff --git a/avalanchego/network/throttling/inbound_conn_upgrade_throttler_test.go b/avalanchego/network/throttling/inbound_conn_upgrade_throttler_test.go index e0cb5f5f..03fec7a8 100644 --- a/avalanchego/network/throttling/inbound_conn_upgrade_throttler_test.go +++ b/avalanchego/network/throttling/inbound_conn_upgrade_throttler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/avalanchego/network/throttling/inbound_msg_buffer_throttler.go b/avalanchego/network/throttling/inbound_msg_buffer_throttler.go index 1760a9e0..d0617783 100644 --- a/avalanchego/network/throttling/inbound_msg_buffer_throttler.go +++ b/avalanchego/network/throttling/inbound_msg_buffer_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling @@ -57,8 +57,9 @@ type inboundMsgBufferThrottler struct { // buffer so that we can read a message from [nodeID]. // The returned release function must be called (!) when done processing the message // (or when we give up trying to read the message.) +// // invariant: There should be a maximum of 1 blocking call to Acquire for a -// given nodeID. Callers must enforce this invariant. +// given nodeID. Callers must enforce this invariant. func (t *inboundMsgBufferThrottler) Acquire(ctx context.Context, nodeID ids.NodeID) ReleaseFunc { startTime := time.Now() defer func() { @@ -69,7 +70,9 @@ func (t *inboundMsgBufferThrottler) Acquire(ctx context.Context, nodeID ids.Node if t.nodeToNumProcessingMsgs[nodeID] < t.maxProcessingMsgsPerNode { t.nodeToNumProcessingMsgs[nodeID]++ t.lock.Unlock() - return func() { t.release(nodeID) } + return func() { + t.release(nodeID) + } } // We're currently processing the maximum number of @@ -89,7 +92,9 @@ func (t *inboundMsgBufferThrottler) Acquire(ctx context.Context, nodeID ids.Node case <-closeOnAcquireChan: t.lock.Lock() t.nodeToNumProcessingMsgs[nodeID]++ - releaseFunc = func() { t.release(nodeID) } + releaseFunc = func() { + t.release(nodeID) + } case <-ctx.Done(): t.lock.Lock() delete(t.awaitingAcquire, nodeID) diff --git a/avalanchego/network/throttling/inbound_msg_buffer_throttler_test.go b/avalanchego/network/throttling/inbound_msg_buffer_throttler_test.go index 5ead7d46..329957cb 100644 --- a/avalanchego/network/throttling/inbound_msg_buffer_throttler_test.go +++ b/avalanchego/network/throttling/inbound_msg_buffer_throttler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/avalanchego/network/throttling/inbound_msg_byte_throttler.go b/avalanchego/network/throttling/inbound_msg_byte_throttler.go index ec7334d3..66efa79b 100644 --- a/avalanchego/network/throttling/inbound_msg_byte_throttler.go +++ b/avalanchego/network/throttling/inbound_msg_byte_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling @@ -107,7 +107,7 @@ func (t *inboundMsgByteThrottler) Acquire(ctx context.Context, msgSize uint64, n } // Take as many bytes as we can from the at-large allocation. - atLargeBytesUsed := math.Min64( + atLargeBytesUsed := math.Min( // only give as many bytes as needed metadata.bytesNeeded, // don't exceed per-node limit @@ -122,15 +122,17 @@ func (t *inboundMsgByteThrottler) Acquire(ctx context.Context, msgSize uint64, n t.nodeToAtLargeBytesUsed[nodeID] += atLargeBytesUsed if metadata.bytesNeeded == 0 { // If we acquired enough bytes, return t.lock.Unlock() - return func() { t.release(metadata, nodeID) } + return func() { + t.release(metadata, nodeID) + } } } // Take as many bytes as we can from [nodeID]'s validator allocation. // Calculate [nodeID]'s validator allocation size based on its weight vdrAllocationSize := uint64(0) - weight, isVdr := t.vdrs.GetWeight(nodeID) - if isVdr && weight != 0 { + weight := t.vdrs.GetWeight(nodeID) + if weight != 0 { vdrAllocationSize = uint64(float64(t.maxVdrBytes) * float64(weight) / float64(t.vdrs.Weight())) } vdrBytesAlreadyUsed := t.nodeToVdrBytesUsed[nodeID] @@ -143,7 +145,7 @@ func (t *inboundMsgByteThrottler) Acquire(ctx context.Context, msgSize uint64, n } else { vdrBytesAllowed -= vdrBytesAlreadyUsed } - vdrBytesUsed := math.Min64(t.remainingVdrBytes, metadata.bytesNeeded, vdrBytesAllowed) + vdrBytesUsed := math.Min(t.remainingVdrBytes, metadata.bytesNeeded, vdrBytesAllowed) if vdrBytesUsed > 0 { // Mark that [nodeID] used [vdrBytesUsed] from its validator allocation t.nodeToVdrBytesUsed[nodeID] += vdrBytesUsed @@ -152,7 +154,9 @@ func (t *inboundMsgByteThrottler) Acquire(ctx context.Context, msgSize uint64, n metadata.bytesNeeded -= vdrBytesUsed if metadata.bytesNeeded == 0 { // If we acquired enough bytes, return t.lock.Unlock() - return func() { t.release(metadata, nodeID) } + return func() { + t.release(metadata, nodeID) + } } } @@ -204,7 +208,7 @@ func (t *inboundMsgByteThrottler) release(metadata *msgMetadata, nodeID ids.Node // or messages from [nodeID] currently waiting to acquire bytes. vdrBytesUsed := t.nodeToVdrBytesUsed[nodeID] releasedBytes := metadata.msgSize - metadata.bytesNeeded - vdrBytesToReturn := math.Min64(releasedBytes, vdrBytesUsed) + vdrBytesToReturn := math.Min(releasedBytes, vdrBytesUsed) // [atLargeBytesToReturn] is the number of bytes from [msgSize] // that will be given to the at-large allocation or a message @@ -227,7 +231,7 @@ func (t *inboundMsgByteThrottler) release(metadata *msgMetadata, nodeID ids.Node msg := iter.Value() // From the at-large allocation, take the maximum number of bytes // without exceeding the per-node limit on taking from at-large pool. - atLargeBytesGiven := math.Min64( + atLargeBytesGiven := math.Min( // don't give [msg] too many bytes msg.bytesNeeded, // don't exceed per-node limit @@ -260,7 +264,7 @@ func (t *inboundMsgByteThrottler) release(metadata *msgMetadata, nodeID ids.Node msg, exists := t.waitingToAcquire.Get(msgID) if exists { // Give [msg] all the bytes we can - bytesToGive := math.Min64(msg.bytesNeeded, vdrBytesToReturn) + bytesToGive := math.Min(msg.bytesNeeded, vdrBytesToReturn) msg.bytesNeeded -= bytesToGive vdrBytesToReturn -= bytesToGive if msg.bytesNeeded == 0 { diff --git a/avalanchego/network/throttling/inbound_msg_byte_throttler_test.go b/avalanchego/network/throttling/inbound_msg_byte_throttler_test.go index 13aa7f98..6565d0b6 100644 --- a/avalanchego/network/throttling/inbound_msg_byte_throttler_test.go +++ b/avalanchego/network/throttling/inbound_msg_byte_throttler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling @@ -26,7 +26,7 @@ func TestInboundMsgByteThrottlerCancelContextDeadlock(t *testing.T) { } vdrs := validators.NewSet() vdr := ids.GenerateTestNodeID() - require.NoError(vdrs.AddWeight(vdr, 1)) + require.NoError(vdrs.Add(vdr, nil, ids.Empty, 1)) throttler, err := newInboundMsgByteThrottler( logging.NoLog{}, @@ -55,8 +55,8 @@ func TestInboundMsgByteThrottlerCancelContext(t *testing.T) { vdrs := validators.NewSet() vdr1ID := ids.GenerateTestNodeID() vdr2ID := ids.GenerateTestNodeID() - require.NoError(vdrs.AddWeight(vdr1ID, 1)) - require.NoError(vdrs.AddWeight(vdr2ID, 1)) + require.NoError(vdrs.Add(vdr1ID, nil, ids.Empty, 1)) + require.NoError(vdrs.Add(vdr2ID, nil, ids.Empty, 1)) throttler, err := newInboundMsgByteThrottler( logging.NoLog{}, @@ -113,8 +113,8 @@ func TestInboundMsgByteThrottler(t *testing.T) { vdrs := validators.NewSet() vdr1ID := ids.GenerateTestNodeID() vdr2ID := ids.GenerateTestNodeID() - require.NoError(vdrs.AddWeight(vdr1ID, 1)) - require.NoError(vdrs.AddWeight(vdr2ID, 1)) + require.NoError(vdrs.Add(vdr1ID, nil, ids.Empty, 1)) + require.NoError(vdrs.Add(vdr2ID, nil, ids.Empty, 1)) throttler, err := newInboundMsgByteThrottler( logging.NoLog{}, @@ -330,7 +330,7 @@ func TestSybilMsgThrottlerMaxNonVdr(t *testing.T) { } vdrs := validators.NewSet() vdr1ID := ids.GenerateTestNodeID() - require.NoError(vdrs.AddWeight(vdr1ID, 1)) + require.NoError(vdrs.Add(vdr1ID, nil, ids.Empty, 1)) throttler, err := newInboundMsgByteThrottler( logging.NoLog{}, "", @@ -377,7 +377,7 @@ func TestMsgThrottlerNextMsg(t *testing.T) { } vdrs := validators.NewSet() vdr1ID := ids.GenerateTestNodeID() - require.NoError(vdrs.AddWeight(vdr1ID, 1)) + require.NoError(vdrs.Add(vdr1ID, nil, ids.Empty, 1)) nonVdrNodeID := ids.GenerateTestNodeID() maxVdrBytes := config.VdrAllocSize + config.AtLargeAllocSize diff --git a/avalanchego/network/throttling/inbound_msg_throttler.go b/avalanchego/network/throttling/inbound_msg_throttler.go index fded1b3a..b76a7a34 100644 --- a/avalanchego/network/throttling/inbound_msg_throttler.go +++ b/avalanchego/network/throttling/inbound_msg_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling @@ -15,7 +15,7 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" ) -var _ InboundMsgThrottler = &inboundMsgThrottler{} +var _ InboundMsgThrottler = (*inboundMsgThrottler)(nil) // InboundMsgThrottler rate-limits inbound messages from the network. type InboundMsgThrottler interface { @@ -93,7 +93,6 @@ func NewInboundMsgThrottler( fmt.Sprintf("%s_cpu", namespace), registerer, throttlerConfig.CPUThrottlerConfig, - vdrs, resourceTracker.CPUTracker(), cpuTargeter, ) @@ -104,7 +103,6 @@ func NewInboundMsgThrottler( fmt.Sprintf("%s_disk", namespace), registerer, throttlerConfig.DiskThrottlerConfig, - vdrs, resourceTracker.DiskTracker(), diskTargeter, ) @@ -121,20 +119,23 @@ func NewInboundMsgThrottler( } // A sybil-safe inbound message throttler. -// Rate-limits reading of inbound messages to prevent peers from -// consuming excess resources. +// Rate-limits reading of inbound messages to prevent peers from consuming +// excess resources. // The three resources considered are: -// 1. An inbound message buffer, where each message that we're currently -// processing takes up 1 unit of space on the buffer. -// 2. An inbound message byte buffer, where a message of length n -// that we're currently processing takes up n units of space on the buffer. -// 3. Bandwidth. The bandwidth rate-limiting is implemented using a token bucket, -// where each token is 1 byte. See BandwidthThrottler. +// +// 1. An inbound message buffer, where each message that we're currently +// processing takes up 1 unit of space on the buffer. +// 2. An inbound message byte buffer, where a message of length n +// that we're currently processing takes up n units of space on the buffer. +// 3. Bandwidth. The bandwidth rate-limiting is implemented using a token +// bucket, where each token is 1 byte. See BandwidthThrottler. +// // A call to Acquire([msgSize], [nodeID]) blocks until we've secured -// enough of both these resources to read a message of size [msgSize] from [nodeID]. +// enough of both these resources to read a message of size [msgSize] from +// [nodeID]. type inboundMsgThrottler struct { - // Rate-limits based on number of messages from a given - // node that we're currently processing. + // Rate-limits based on number of messages from a given node that we're + // currently processing. bufferThrottler *inboundMsgBufferThrottler // Rate-limits based on recent bandwidth usage bandwidthThrottler bandwidthThrottler diff --git a/avalanchego/network/throttling/inbound_resource_throttler.go b/avalanchego/network/throttling/inbound_resource_throttler.go index 4d9c9abc..a12e8562 100644 --- a/avalanchego/network/throttling/inbound_resource_throttler.go +++ b/avalanchego/network/throttling/inbound_resource_throttler.go @@ -1,19 +1,18 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling import ( + "context" "fmt" + "sync" "time" "github.com/prometheus/client_golang/prometheus" - "golang.org/x/net/context" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/networking/tracker" - "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/wrappers" ) @@ -21,7 +20,7 @@ import ( const epsilon = time.Millisecond var ( - _ SystemThrottler = &systemThrottler{} + _ SystemThrottler = (*systemThrottler)(nil) _ SystemThrottler = noSystemThrottler{} ) @@ -53,6 +52,8 @@ type systemThrottler struct { targeter tracker.Targeter // Tells us the utilization of each node. tracker tracker.Tracker + // Invariant: [timerPool] only returns timers that have been stopped and drained. + timerPool sync.Pool } type systemThrottlerMetrics struct { @@ -92,7 +93,6 @@ func NewSystemThrottler( namespace string, reg prometheus.Registerer, config SystemThrottlerConfig, - vdrs validators.Set, tracker tracker.Tracker, targeter tracker.Targeter, ) (SystemThrottler, error) { @@ -105,23 +105,26 @@ func NewSystemThrottler( SystemThrottlerConfig: config, targeter: targeter, tracker: tracker, + timerPool: sync.Pool{ + New: func() interface{} { + // Satisfy invariant that timer is stopped and drained. + timer := time.NewTimer(0) + if !timer.Stop() { + <-timer.C + } + return timer + }, + }, }, nil } func (t *systemThrottler) Acquire(ctx context.Context, nodeID ids.NodeID) { - // Fires when we should re-check whether this node's usage has fallen to an - // acceptable level. - timer := time.NewTimer(0) - if !timer.Stop() { - <-timer.C - } - defer timer.Stop() - - // [waited] is true if we waited for this node's usage to fall to an - // acceptable level before returning - waited := false + // [timer] fires when we should re-check whether this node's + // usage has fallen to an acceptable level. + // Lazily initialize timer only if we actually need to wait. + var timer *time.Timer defer func() { - if waited { + if timer != nil { // We waited at least once for usage to fall. t.metrics.totalWaits.Inc() // Note that [t.metrics.awaitingAcquire.Inc()] was called once if // and only if [waited] is true. @@ -161,11 +164,26 @@ func (t *systemThrottler) Acquire(ctx context.Context, nodeID ids.NodeID) { // acceptable level. waitDuration = t.MaxRecheckDelay } - if !waited { + + // Reset [timer]. + if timer == nil { // Note this is called at most once. t.metrics.awaitingAcquire.Inc() + + timer = t.timerPool.Get().(*time.Timer) + defer func() { + // Satisfy [t.timerPool] invariant. + if !timer.Stop() { + // The default ensures we don't wait forever in the case + // that the channel was already drained. + select { + case <-timer.C: + default: + } + } + t.timerPool.Put(timer) + }() } - waited = true timer.Reset(waitDuration) select { case <-ctx.Done(): diff --git a/avalanchego/network/throttling/inbound_resource_throttler_test.go b/avalanchego/network/throttling/inbound_resource_throttler_test.go index 5db8a622..266dd070 100644 --- a/avalanchego/network/throttling/inbound_resource_throttler_test.go +++ b/avalanchego/network/throttling/inbound_resource_throttler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling @@ -16,7 +16,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/networking/tracker" - "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" "github.com/ava-labs/avalanchego/utils/timer/mockable" @@ -30,7 +29,6 @@ func TestNewSystemThrottler(t *testing.T) { reg := prometheus.NewRegistry() clock := mockable.Clock{} clock.Set(time.Now()) - vdrs := validators.NewSet() resourceTracker, err := tracker.NewResourceTracker(reg, resource.NoUsage, meter.ContinuousFactory{}, time.Second) require.NoError(err) cpuTracker := resourceTracker.CPUTracker() @@ -40,7 +38,7 @@ func TestNewSystemThrottler(t *testing.T) { MaxRecheckDelay: time.Second, } targeter := tracker.NewMockTargeter(ctrl) - throttlerIntf, err := NewSystemThrottler("", reg, config, vdrs, cpuTracker, targeter) + throttlerIntf, err := NewSystemThrottler("", reg, config, cpuTracker, targeter) require.NoError(err) throttler, ok := throttlerIntf.(*systemThrottler) require.True(ok) @@ -62,12 +60,9 @@ func TestSystemThrottler(t *testing.T) { config := SystemThrottlerConfig{ MaxRecheckDelay: maxRecheckDelay, } - vdrs := validators.NewSet() vdrID, nonVdrID := ids.GenerateTestNodeID(), ids.GenerateTestNodeID() - err := vdrs.AddWeight(vdrID, 1) - require.NoError(err) targeter := tracker.NewMockTargeter(ctrl) - throttler, err := NewSystemThrottler("", prometheus.NewRegistry(), config, vdrs, mockTracker, targeter) + throttler, err := NewSystemThrottler("", prometheus.NewRegistry(), config, mockTracker, targeter) require.NoError(err) // Case: Actual usage <= target usage; should return immediately @@ -148,12 +143,9 @@ func TestSystemThrottlerContextCancel(t *testing.T) { config := SystemThrottlerConfig{ MaxRecheckDelay: maxRecheckDelay, } - vdrs := validators.NewSet() vdrID := ids.GenerateTestNodeID() - err := vdrs.AddWeight(vdrID, 1) - require.NoError(err) targeter := tracker.NewMockTargeter(ctrl) - throttler, err := NewSystemThrottler("", prometheus.NewRegistry(), config, vdrs, mockTracker, targeter) + throttler, err := NewSystemThrottler("", prometheus.NewRegistry(), config, mockTracker, targeter) require.NoError(err) // Case: Actual usage > target usage; we should wait. diff --git a/avalanchego/network/throttling/no_inbound_msg_throttler.go b/avalanchego/network/throttling/no_inbound_msg_throttler.go index 046b2d24..de6e03f8 100644 --- a/avalanchego/network/throttling/no_inbound_msg_throttler.go +++ b/avalanchego/network/throttling/no_inbound_msg_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling @@ -9,7 +9,7 @@ import ( "github.com/ava-labs/avalanchego/ids" ) -var _ InboundMsgThrottler = &noInboundMsgThrottler{} +var _ InboundMsgThrottler = (*noInboundMsgThrottler)(nil) // Returns an InboundMsgThrottler where Acquire() always returns immediately. func NewNoInboundThrottler() InboundMsgThrottler { diff --git a/avalanchego/network/throttling/outbound_msg_throttler.go b/avalanchego/network/throttling/outbound_msg_throttler.go index afa69371..8b46cb2c 100644 --- a/avalanchego/network/throttling/outbound_msg_throttler.go +++ b/avalanchego/network/throttling/outbound_msg_throttler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling @@ -15,8 +15,8 @@ import ( ) var ( - _ OutboundMsgThrottler = &outboundMsgThrottler{} - _ OutboundMsgThrottler = &noOutboundMsgThrottler{} + _ OutboundMsgThrottler = (*outboundMsgThrottler)(nil) + _ OutboundMsgThrottler = (*noOutboundMsgThrottler)(nil) ) // Rate-limits outgoing messages @@ -72,7 +72,7 @@ func (t *outboundMsgThrottler) Acquire(msg message.OutboundMessage, nodeID ids.N // Take as many bytes as we can from the at-large allocation. bytesNeeded := uint64(len(msg.Bytes())) - atLargeBytesUsed := math.Min64( + atLargeBytesUsed := math.Min( // only give as many bytes as needed bytesNeeded, // don't exceed per-node limit @@ -85,8 +85,8 @@ func (t *outboundMsgThrottler) Acquire(msg message.OutboundMessage, nodeID ids.N // Take as many bytes as we can from [nodeID]'s validator allocation. // Calculate [nodeID]'s validator allocation size based on its weight vdrAllocationSize := uint64(0) - weight, isVdr := t.vdrs.GetWeight(nodeID) - if isVdr && weight != 0 { + weight := t.vdrs.GetWeight(nodeID) + if weight != 0 { vdrAllocationSize = uint64(float64(t.maxVdrBytes) * float64(weight) / float64(t.vdrs.Weight())) } vdrBytesAlreadyUsed := t.nodeToVdrBytesUsed[nodeID] @@ -99,7 +99,7 @@ func (t *outboundMsgThrottler) Acquire(msg message.OutboundMessage, nodeID ids.N } else { vdrBytesAllowed -= vdrBytesAlreadyUsed } - vdrBytesUsed := math.Min64(t.remainingVdrBytes, bytesNeeded, vdrBytesAllowed) + vdrBytesUsed := math.Min(t.remainingVdrBytes, bytesNeeded, vdrBytesAllowed) bytesNeeded -= vdrBytesUsed if bytesNeeded != 0 { // Can't acquire enough bytes to queue this message to be sent @@ -142,7 +142,7 @@ func (t *outboundMsgThrottler) Release(msg message.OutboundMessage, nodeID ids.N // that will be given back to [nodeID]'s validator allocation. vdrBytesUsed := t.nodeToVdrBytesUsed[nodeID] msgSize := uint64(len(msg.Bytes())) - vdrBytesToReturn := math.Min64(msgSize, vdrBytesUsed) + vdrBytesToReturn := math.Min(msgSize, vdrBytesUsed) t.nodeToVdrBytesUsed[nodeID] -= vdrBytesToReturn if t.nodeToVdrBytesUsed[nodeID] == 0 { delete(t.nodeToVdrBytesUsed, nodeID) @@ -212,6 +212,8 @@ func NewNoOutboundThrottler() OutboundMsgThrottler { // [Acquire] always returns true. [Release] does nothing. type noOutboundMsgThrottler struct{} -func (*noOutboundMsgThrottler) Acquire(message.OutboundMessage, ids.NodeID) bool { return true } +func (*noOutboundMsgThrottler) Acquire(message.OutboundMessage, ids.NodeID) bool { + return true +} func (*noOutboundMsgThrottler) Release(message.OutboundMessage, ids.NodeID) {} diff --git a/avalanchego/network/throttling/outbound_msg_throttler_test.go b/avalanchego/network/throttling/outbound_msg_throttler_test.go index 9729a873..f260558d 100644 --- a/avalanchego/network/throttling/outbound_msg_throttler_test.go +++ b/avalanchego/network/throttling/outbound_msg_throttler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling @@ -6,6 +6,8 @@ package throttling import ( "testing" + "github.com/golang/mock/gomock" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" @@ -17,6 +19,9 @@ import ( ) func TestSybilOutboundMsgThrottler(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + require := require.New(t) config := MsgByteThrottlerConfig{ VdrAllocSize: 1024, @@ -26,8 +31,8 @@ func TestSybilOutboundMsgThrottler(t *testing.T) { vdrs := validators.NewSet() vdr1ID := ids.GenerateTestNodeID() vdr2ID := ids.GenerateTestNodeID() - require.NoError(vdrs.AddWeight(vdr1ID, 1)) - require.NoError(vdrs.AddWeight(vdr2ID, 1)) + require.NoError(vdrs.Add(vdr1ID, nil, ids.Empty, 1)) + require.NoError(vdrs.Add(vdr2ID, nil, ids.Empty, 1)) throttlerIntf, err := NewSybilOutboundMsgThrottler( logging.NoLog{}, "", @@ -47,7 +52,7 @@ func TestSybilOutboundMsgThrottler(t *testing.T) { require.NotNil(throttler.vdrs) // Take from at-large allocation. - msg := testMsgWithSize(1) + msg := testMsgWithSize(ctrl, 1) acquired := throttlerIntf.Acquire(msg, vdr1ID) require.True(acquired) require.EqualValues(config.AtLargeAllocSize-1, throttler.remainingAtLargeBytes) @@ -64,7 +69,7 @@ func TestSybilOutboundMsgThrottler(t *testing.T) { require.Len(throttler.nodeToAtLargeBytesUsed, 0) // Use all the at-large allocation bytes and 1 of the validator allocation bytes - msg = testMsgWithSize(config.AtLargeAllocSize + 1) + msg = testMsgWithSize(ctrl, config.AtLargeAllocSize+1) acquired = throttlerIntf.Acquire(msg, vdr1ID) require.True(acquired) // vdr1 at-large bytes used: 1024. Validator bytes used: 1 @@ -76,7 +81,7 @@ func TestSybilOutboundMsgThrottler(t *testing.T) { require.EqualValues(config.AtLargeAllocSize, throttler.nodeToAtLargeBytesUsed[vdr1ID]) // The other validator should be able to acquire half the validator allocation. - msg = testMsgWithSize(config.AtLargeAllocSize / 2) + msg = testMsgWithSize(ctrl, config.AtLargeAllocSize/2) acquired = throttlerIntf.Acquire(msg, vdr2ID) require.True(acquired) // vdr2 at-large bytes used: 0. Validator bytes used: 512 @@ -87,7 +92,7 @@ func TestSybilOutboundMsgThrottler(t *testing.T) { require.Len(throttler.nodeToAtLargeBytesUsed, 1) // vdr1 should be able to acquire the rest of the validator allocation - msg = testMsgWithSize(config.VdrAllocSize/2 - 1) + msg = testMsgWithSize(ctrl, config.VdrAllocSize/2-1) acquired = throttlerIntf.Acquire(msg, vdr1ID) require.True(acquired) // vdr1 at-large bytes used: 1024. Validator bytes used: 512 @@ -96,7 +101,7 @@ func TestSybilOutboundMsgThrottler(t *testing.T) { require.EqualValues(config.AtLargeAllocSize, throttler.nodeToAtLargeBytesUsed[vdr1ID]) // Trying to take more bytes for either node should fail - msg = testMsgWithSize(1) + msg = testMsgWithSize(ctrl, 1) acquired = throttlerIntf.Acquire(msg, vdr1ID) require.False(acquired) acquired = throttlerIntf.Acquire(msg, vdr2ID) @@ -109,7 +114,7 @@ func TestSybilOutboundMsgThrottler(t *testing.T) { // When the choice exists, bytes should be given back to the validator allocation // rather than the at-large allocation. // vdr1 at-large bytes used: 511. Validator bytes used: 0 - msg = testMsgWithSize(config.AtLargeAllocSize + 1) + msg = testMsgWithSize(ctrl, config.AtLargeAllocSize+1) throttlerIntf.Release(msg, vdr1ID) require.EqualValues(config.NodeMaxAtLargeBytes/2, throttler.remainingVdrBytes) @@ -121,19 +126,19 @@ func TestSybilOutboundMsgThrottler(t *testing.T) { // Non-validator should be able to take the rest of the at-large bytes // nonVdrID at-large bytes used: 513 nonVdrID := ids.GenerateTestNodeID() - msg = testMsgWithSize(config.AtLargeAllocSize/2 + 1) + msg = testMsgWithSize(ctrl, config.AtLargeAllocSize/2+1) acquired = throttlerIntf.Acquire(msg, nonVdrID) require.True(acquired) require.EqualValues(0, throttler.remainingAtLargeBytes) require.EqualValues(config.AtLargeAllocSize/2+1, throttler.nodeToAtLargeBytesUsed[nonVdrID]) // Non-validator shouldn't be able to acquire more since at-large allocation empty - msg = testMsgWithSize(1) + msg = testMsgWithSize(ctrl, 1) acquired = throttlerIntf.Acquire(msg, nonVdrID) require.False(acquired) // Release all of vdr2's messages - msg = testMsgWithSize(config.AtLargeAllocSize / 2) + msg = testMsgWithSize(ctrl, config.AtLargeAllocSize/2) throttlerIntf.Release(msg, vdr2ID) require.EqualValues(0, throttler.nodeToAtLargeBytesUsed[vdr2ID]) require.EqualValues(config.VdrAllocSize, throttler.remainingVdrBytes) @@ -141,7 +146,7 @@ func TestSybilOutboundMsgThrottler(t *testing.T) { require.EqualValues(0, throttler.remainingAtLargeBytes) // Release all of vdr1's messages - msg = testMsgWithSize(config.VdrAllocSize/2 - 1) + msg = testMsgWithSize(ctrl, config.VdrAllocSize/2-1) throttlerIntf.Release(msg, vdr1ID) require.Len(throttler.nodeToVdrBytesUsed, 0) require.EqualValues(config.VdrAllocSize, throttler.remainingVdrBytes) @@ -149,7 +154,7 @@ func TestSybilOutboundMsgThrottler(t *testing.T) { require.EqualValues(0, throttler.nodeToAtLargeBytesUsed[vdr1ID]) // Release nonVdr's messages - msg = testMsgWithSize(config.AtLargeAllocSize/2 + 1) + msg = testMsgWithSize(ctrl, config.AtLargeAllocSize/2+1) throttlerIntf.Release(msg, nonVdrID) require.Len(throttler.nodeToVdrBytesUsed, 0) require.EqualValues(config.VdrAllocSize, throttler.remainingVdrBytes) @@ -160,6 +165,9 @@ func TestSybilOutboundMsgThrottler(t *testing.T) { // Ensure that the limit on taking from the at-large allocation is enforced func TestSybilOutboundMsgThrottlerMaxNonVdr(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + require := require.New(t) config := MsgByteThrottlerConfig{ VdrAllocSize: 100, @@ -168,7 +176,7 @@ func TestSybilOutboundMsgThrottlerMaxNonVdr(t *testing.T) { } vdrs := validators.NewSet() vdr1ID := ids.GenerateTestNodeID() - require.NoError(vdrs.AddWeight(vdr1ID, 1)) + require.NoError(vdrs.Add(vdr1ID, nil, ids.Empty, 1)) throttlerIntf, err := NewSybilOutboundMsgThrottler( logging.NoLog{}, "", @@ -179,23 +187,23 @@ func TestSybilOutboundMsgThrottlerMaxNonVdr(t *testing.T) { require.NoError(err) throttler := throttlerIntf.(*outboundMsgThrottler) nonVdrNodeID1 := ids.GenerateTestNodeID() - msg := testMsgWithSize(config.NodeMaxAtLargeBytes) + msg := testMsgWithSize(ctrl, config.NodeMaxAtLargeBytes) acquired := throttlerIntf.Acquire(msg, nonVdrNodeID1) require.True(acquired) // Acquiring more should fail - msg = testMsgWithSize(1) + msg = testMsgWithSize(ctrl, 1) acquired = throttlerIntf.Acquire(msg, nonVdrNodeID1) require.False(acquired) // A different non-validator should be able to acquire nonVdrNodeID2 := ids.GenerateTestNodeID() - msg = testMsgWithSize(config.NodeMaxAtLargeBytes) + msg = testMsgWithSize(ctrl, config.NodeMaxAtLargeBytes) acquired = throttlerIntf.Acquire(msg, nonVdrNodeID2) require.True(acquired) // Validator should only be able to take [MaxAtLargeBytes] - msg = testMsgWithSize(config.NodeMaxAtLargeBytes + 1) + msg = testMsgWithSize(ctrl, config.NodeMaxAtLargeBytes+1) throttlerIntf.Acquire(msg, vdr1ID) require.EqualValues(config.NodeMaxAtLargeBytes, throttler.nodeToAtLargeBytesUsed[vdr1ID]) require.EqualValues(1, throttler.nodeToVdrBytesUsed[vdr1ID]) @@ -206,6 +214,9 @@ func TestSybilOutboundMsgThrottlerMaxNonVdr(t *testing.T) { // Ensure that the throttler honors requested bypasses func TestBypassThrottling(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + require := require.New(t) config := MsgByteThrottlerConfig{ VdrAllocSize: 100, @@ -214,7 +225,7 @@ func TestBypassThrottling(t *testing.T) { } vdrs := validators.NewSet() vdr1ID := ids.GenerateTestNodeID() - require.NoError(vdrs.AddWeight(vdr1ID, 1)) + require.NoError(vdrs.Add(vdr1ID, nil, ids.Empty, 1)) throttlerIntf, err := NewSybilOutboundMsgThrottler( logging.NoLog{}, "", @@ -225,22 +236,31 @@ func TestBypassThrottling(t *testing.T) { require.NoError(err) throttler := throttlerIntf.(*outboundMsgThrottler) nonVdrNodeID1 := ids.GenerateTestNodeID() - msg := message.NewTestMsg(message.AppGossip, make([]byte, config.NodeMaxAtLargeBytes), true) + msg := message.NewMockOutboundMessage(ctrl) + msg.EXPECT().BypassThrottling().Return(true).AnyTimes() + msg.EXPECT().Op().Return(message.AppGossipOp).AnyTimes() + msg.EXPECT().Bytes().Return(make([]byte, config.NodeMaxAtLargeBytes)).AnyTimes() acquired := throttlerIntf.Acquire(msg, nonVdrNodeID1) require.True(acquired) // Acquiring more should not fail - msg = message.NewTestMsg(message.AppGossip, make([]byte, 1), true) + msg = message.NewMockOutboundMessage(ctrl) + msg.EXPECT().BypassThrottling().Return(true).AnyTimes() + msg.EXPECT().Op().Return(message.AppGossipOp).AnyTimes() + msg.EXPECT().Bytes().Return(make([]byte, 1)).AnyTimes() acquired = throttlerIntf.Acquire(msg, nonVdrNodeID1) require.True(acquired) // Acquiring more should not fail - msg2 := testMsgWithSize(1) + msg2 := testMsgWithSize(ctrl, 1) acquired = throttlerIntf.Acquire(msg2, nonVdrNodeID1) require.True(acquired) // Validator should only be able to take [MaxAtLargeBytes] - msg = message.NewTestMsg(message.AppGossip, make([]byte, config.NodeMaxAtLargeBytes+1), true) + msg = message.NewMockOutboundMessage(ctrl) + msg.EXPECT().BypassThrottling().Return(true).AnyTimes() + msg.EXPECT().Op().Return(message.AppGossipOp).AnyTimes() + msg.EXPECT().Bytes().Return(make([]byte, config.NodeMaxAtLargeBytes+1)).AnyTimes() throttlerIntf.Acquire(msg, vdr1ID) require.EqualValues(0, throttler.nodeToAtLargeBytesUsed[vdr1ID]) require.EqualValues(0, throttler.nodeToVdrBytesUsed[vdr1ID]) @@ -248,6 +268,10 @@ func TestBypassThrottling(t *testing.T) { require.EqualValues(config.AtLargeAllocSize-1, throttler.remainingAtLargeBytes) } -func testMsgWithSize(size uint64) message.OutboundMessage { - return message.NewTestMsg(message.AppGossip, make([]byte, size), false) +func testMsgWithSize(ctrl *gomock.Controller, size uint64) message.OutboundMessage { + msg := message.NewMockOutboundMessage(ctrl) + msg.EXPECT().BypassThrottling().Return(false).AnyTimes() + msg.EXPECT().Op().Return(message.AppGossipOp).AnyTimes() + msg.EXPECT().Bytes().Return(make([]byte, size)).AnyTimes() + return msg } diff --git a/avalanchego/network/throttling/release_func.go b/avalanchego/network/throttling/release_func.go index 3ea58391..0abe2bf4 100644 --- a/avalanchego/network/throttling/release_func.go +++ b/avalanchego/network/throttling/release_func.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package throttling diff --git a/avalanchego/network/tracked_ip.go b/avalanchego/network/tracked_ip.go index 93630b46..ca673f76 100644 --- a/avalanchego/network/tracked_ip.go +++ b/avalanchego/network/tracked_ip.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network @@ -8,29 +8,31 @@ import ( "sync" "time" - "github.com/ava-labs/avalanchego/network/peer" + "github.com/ava-labs/avalanchego/utils/ips" ) -func init() { rand.Seed(time.Now().UnixNano()) } +func init() { + rand.Seed(time.Now().UnixNano()) +} type trackedIP struct { delayLock sync.RWMutex delay time.Duration - ip *peer.UnsignedIP + ip ips.IPPort stopTrackingOnce sync.Once onStopTracking chan struct{} } -func newTrackedIP(ip *peer.UnsignedIP) *trackedIP { +func newTrackedIP(ip ips.IPPort) *trackedIP { return &trackedIP{ ip: ip, onStopTracking: make(chan struct{}), } } -func (ip *trackedIP) trackNewIP(newIP *peer.UnsignedIP) *trackedIP { +func (ip *trackedIP) trackNewIP(newIP ips.IPPort) *trackedIP { ip.stopTracking() return &trackedIP{ delay: ip.getDelay(), diff --git a/avalanchego/network/tracked_ip_test.go b/avalanchego/network/tracked_ip_test.go index f51cea37..bbf6267d 100644 --- a/avalanchego/network/tracked_ip_test.go +++ b/avalanchego/network/tracked_ip_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package network diff --git a/avalanchego/node/beacon_manager.go b/avalanchego/node/beacon_manager.go new file mode 100644 index 00000000..3e198241 --- /dev/null +++ b/avalanchego/node/beacon_manager.go @@ -0,0 +1,41 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package node + +import ( + "sync/atomic" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/networking/router" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/timer" + "github.com/ava-labs/avalanchego/version" +) + +var _ router.Router = (*beaconManager)(nil) + +type beaconManager struct { + router.Router + timer *timer.Timer + beacons validators.Set + requiredConns int64 + numConns int64 +} + +func (b *beaconManager) Connected(nodeID ids.NodeID, nodeVersion *version.Application, subnetID ids.ID) { + if constants.PrimaryNetworkID == subnetID && + b.beacons.Contains(nodeID) && + atomic.AddInt64(&b.numConns, 1) >= b.requiredConns { + b.timer.Cancel() + } + b.Router.Connected(nodeID, nodeVersion, subnetID) +} + +func (b *beaconManager) Disconnected(nodeID ids.NodeID) { + if b.beacons.Contains(nodeID) { + atomic.AddInt64(&b.numConns, -1) + } + b.Router.Disconnected(nodeID) +} diff --git a/avalanchego/node/beacon_manager_test.go b/avalanchego/node/beacon_manager_test.go new file mode 100644 index 00000000..4e5ce1b4 --- /dev/null +++ b/avalanchego/node/beacon_manager_test.go @@ -0,0 +1,87 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package node + +import ( + "sync" + "testing" + + "github.com/golang/mock/gomock" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/networking/router" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/timer" + "github.com/ava-labs/avalanchego/version" +) + +const numValidators = 5_000 + +// Tests that reconnects that mutate the beacon manager's current total stake +// weight is consistent. Test is not deterministic. +func TestBeaconManager_DataRace(t *testing.T) { + require := require.New(t) + + validatorIDs := make([]ids.NodeID, 0, numValidators) + validatorSet := validators.NewSet() + for i := 0; i < numValidators; i++ { + nodeID := ids.GenerateTestNodeID() + + require.NoError(validatorSet.Add(nodeID, nil, ids.Empty, 1)) + validatorIDs = append(validatorIDs, nodeID) + } + + wg := &sync.WaitGroup{} + + ctrl := gomock.NewController(t) + mockRouter := router.NewMockRouter(ctrl) + + b := beaconManager{ + Router: mockRouter, + timer: timer.NewTimer(nil), + beacons: validatorSet, + requiredConns: numValidators, + } + + // connect numValidators validators, each with a weight of 1 + wg.Add(2 * numValidators) + mockRouter.EXPECT(). + Connected(gomock.Any(), gomock.Any(), gomock.Any()). + Times(2 * numValidators). + Do(func(ids.NodeID, *version.Application, ids.ID) { + wg.Done() + }) + + for _, nodeID := range validatorIDs { + nodeID := nodeID + go func() { + b.Connected(nodeID, version.CurrentApp, constants.PrimaryNetworkID) + b.Connected(nodeID, version.CurrentApp, ids.GenerateTestID()) + }() + } + wg.Wait() + + // we should have a weight of numValidators now + require.EqualValues(numValidators, b.numConns) + + // disconnect numValidators validators + wg.Add(numValidators) + mockRouter.EXPECT(). + Disconnected(gomock.Any()). + Times(numValidators). + Do(func(ids.NodeID) { + wg.Done() + }) + + for _, nodeID := range validatorIDs { + go b.Disconnected(nodeID) + } + wg.Wait() + + // we should a weight of zero now + require.Zero(b.numConns) +} diff --git a/avalanchego/node/config.go b/avalanchego/node/config.go index 3c90dd35..d32a0b07 100644 --- a/avalanchego/node/config.go +++ b/avalanchego/node/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package node @@ -7,23 +7,24 @@ import ( "crypto/tls" "time" + "github.com/ava-labs/avalanchego/api/server" "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/nat" "github.com/ava-labs/avalanchego/network" - "github.com/ava-labs/avalanchego/snow/consensus/avalanche" "github.com/ava-labs/avalanchego/snow/networking/benchlist" "github.com/ava-labs/avalanchego/snow/networking/router" - "github.com/ava-labs/avalanchego/snow/networking/sender" "github.com/ava-labs/avalanchego/snow/networking/tracker" + "github.com/ava-labs/avalanchego/subnets" + "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/dynamicip" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/profiler" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer" - "github.com/ava-labs/avalanchego/vms" ) type IPCConfig struct { @@ -43,6 +44,7 @@ type APIIndexerConfig struct { } type HTTPConfig struct { + server.HTTPConfig APIConfig `json:"apiConfig"` HTTPHost string `json:"httpHost"` HTTPPort uint16 `json:"httpPort"` @@ -155,28 +157,19 @@ type Config struct { // Network configuration NetworkConfig network.Config `json:"networkConfig"` - GossipConfig sender.GossipConfig `json:"gossipConfig"` - AdaptiveTimeoutConfig timer.AdaptiveTimeoutConfig `json:"adaptiveTimeoutConfig"` - // Benchlist Configuration BenchlistConfig benchlist.Config `json:"benchlistConfig"` - // Profiling configurations ProfilerConfig profiler.Config `json:"profilerConfig"` - // Logging configuration LoggingConfig logging.Config `json:"loggingConfig"` - // Plugin directory PluginDir string `json:"pluginDir"` // File Descriptor Limit FdLimit uint64 `json:"fdLimit"` - // Consensus configuration - ConsensusParams avalanche.Parameters `json:"consensusParams"` - // Metrics MeterVMEnabled bool `json:"meterVMEnabled"` @@ -186,18 +179,18 @@ type Config struct { ConsensusShutdownTimeout time.Duration `json:"consensusShutdownTimeout"` // Gossip a container in the accepted frontier every [ConsensusGossipFrequency] ConsensusGossipFrequency time.Duration `json:"consensusGossipFreq"` + // ConsensusAppConcurrency defines the maximum number of goroutines to + // handle App messages per chain. + ConsensusAppConcurrency int `json:"consensusAppConcurrency"` - // Subnet Whitelist - WhitelistedSubnets ids.Set `json:"whitelistedSubnets"` + TrackedSubnets set.Set[ids.ID] `json:"trackedSubnets"` - // SubnetConfigs - SubnetConfigs map[ids.ID]chains.SubnetConfig `json:"subnetConfigs"` + SubnetConfigs map[ids.ID]subnets.Config `json:"subnetConfigs"` - // ChainConfigs ChainConfigs map[string]chains.ChainConfig `json:"-"` + ChainAliases map[ids.ID][]string `json:"chainAliases"` - // VM management - VMManager vms.Manager `json:"-"` + VMAliaser ids.Aliaser `json:"-"` // Halflife to use for the processing requests tracker. // Larger halflife --> usage metrics change more slowly. @@ -222,4 +215,20 @@ type Config struct { RequiredAvailableDiskSpace uint64 `json:"requiredAvailableDiskSpace"` WarningThresholdAvailableDiskSpace uint64 `json:"warningThresholdAvailableDiskSpace"` + + TraceConfig trace.Config `json:"traceConfig"` + + // See comment on [MinPercentConnectedStakeHealthy] in platformvm.Config + // TODO: consider moving to subnet config + MinPercentConnectedStakeHealthy map[ids.ID]float64 `json:"minPercentConnectedStakeHealthy"` + + // See comment on [UseCurrentHeight] in platformvm.Config + UseCurrentHeight bool `json:"useCurrentHeight"` + + // ProvidedFlags contains all the flags set by the user + ProvidedFlags map[string]interface{} `json:"-"` + + // ChainDataDir is the root path for per-chain directories where VMs can + // write arbitrary data. + ChainDataDir string `json:"chainDataDir"` } diff --git a/avalanchego/node/insecure_validator_manager.go b/avalanchego/node/insecure_validator_manager.go new file mode 100644 index 00000000..12428ed8 --- /dev/null +++ b/avalanchego/node/insecure_validator_manager.go @@ -0,0 +1,41 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package node + +import ( + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/networking/router" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/version" +) + +type insecureValidatorManager struct { + router.Router + vdrs validators.Set + weight uint64 +} + +func (i *insecureValidatorManager) Connected(vdrID ids.NodeID, nodeVersion *version.Application, subnetID ids.ID) { + if constants.PrimaryNetworkID == subnetID { + // Staking is disabled so we don't have a txID that added the peer as a + // validator. Because each validator needs a txID associated with it, we + // hack one together by padding the nodeID with zeroes. + dummyTxID := ids.Empty + copy(dummyTxID[:], vdrID[:]) + + // Add will only error here if the total weight of the set would go over + // [math.MaxUint64]. In this case, we will just not mark this new peer + // as a validator. + _ = i.vdrs.Add(vdrID, nil, dummyTxID, i.weight) + } + i.Router.Connected(vdrID, nodeVersion, subnetID) +} + +func (i *insecureValidatorManager) Disconnected(vdrID ids.NodeID) { + // RemoveWeight will only error here if there was an error reported during + // Add. + _ = i.vdrs.RemoveWeight(vdrID, i.weight) + i.Router.Disconnected(vdrID) +} diff --git a/avalanchego/node/node.go b/avalanchego/node/node.go index 8bebff97..0e258f48 100644 --- a/avalanchego/node/node.go +++ b/avalanchego/node/node.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package node import ( + "context" "crypto" "errors" "fmt" @@ -14,8 +15,6 @@ import ( "sync" "time" - "github.com/hashicorp/go-plugin" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -55,30 +54,35 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/trace" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/filesystem" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/utils/profiler" "github.com/ava-labs/avalanchego/utils/resource" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/vms" "github.com/ava-labs/avalanchego/vms/avm" "github.com/ava-labs/avalanchego/vms/nftfx" "github.com/ava-labs/avalanchego/vms/platformvm" - "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/propertyfx" "github.com/ava-labs/avalanchego/vms/registry" + "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ipcsapi "github.com/ava-labs/avalanchego/api/ipcs" + avmconfig "github.com/ava-labs/avalanchego/vms/avm/config" + platformconfig "github.com/ava-labs/avalanchego/vms/platformvm/config" ) var ( @@ -91,8 +95,9 @@ var ( // Node is an instance of an Avalanche node. type Node struct { - Log logging.Logger - LogFactory logging.Factory + Log logging.Logger + VMFactoryLog logging.Logger + LogFactory logging.Factory // This node's unique ID used when communicating with other nodes // (in consensus, for example) @@ -118,8 +123,7 @@ type Node struct { health health.Health // Build and parse messages, for both network layer and chain manager - msgCreator message.Creator - msgCreatorWithProto message.Creator + msgCreator message.Creator // Manages creation of blockchains and routing messages to them chainManager chains.Manager @@ -130,8 +134,9 @@ type Node struct { uptimeCalculator uptime.LockedCalculator // dispatcher for events as they happen in consensus - DecisionAcceptorGroup snow.AcceptorGroup - ConsensusAcceptorGroup snow.AcceptorGroup + BlockAcceptorGroup snow.AcceptorGroup + TxAcceptorGroup snow.AcceptorGroup + VertexAcceptorGroup snow.AcceptorGroup IPCs *ipcs.ChainIPCs @@ -155,14 +160,16 @@ type Node struct { // This node's configuration Config *Config + tracer trace.Tracer + // ensures that we only close the node once. shutdownOnce sync.Once // True if node is shutting down or is done shutting down - shuttingDown utils.AtomicBool + shuttingDown utils.Atomic[bool] // Sets the exit code - shuttingDownExitCode utils.AtomicInterface + shuttingDownExitCode utils.Atomic[int] // Incremented only once on initialization. // Decremented when node is done shutting down. @@ -172,9 +179,14 @@ type Node struct { MetricsRegisterer *prometheus.Registry MetricsGatherer metrics.MultiGatherer + VMManager vms.Manager + // VM endpoint registry VMRegistry registry.VMRegistry + // Manages shutdown of a VM process + runtimeManager runtime.Manager + resourceManager resource.Manager // Tracks the CPU/disk usage caused by processing @@ -249,9 +261,22 @@ func (n *Node) initNetworking(primaryNetVdrs validators.Set) error { consensusRouter := n.Config.ConsensusRouter if !n.Config.EnableStaking { - if err := primaryNetVdrs.AddWeight(n.ID, n.Config.DisabledStakingWeight); err != nil { + // Staking is disabled so we don't have a txID that added us as a + // validator. Because each validator needs a txID associated with it, we + // hack one together by just padding our nodeID with zeroes. + dummyTxID := ids.Empty + copy(dummyTxID[:], n.ID[:]) + + err := primaryNetVdrs.Add( + n.ID, + bls.PublicFromSecretKey(n.Config.StakingSigningKey), + dummyTxID, + n.Config.DisabledStakingWeight, + ) + if err != nil { return err } + consensusRouter = &insecureValidatorManager{ Router: consensusRouter, vdrs: primaryNetVdrs, @@ -259,16 +284,16 @@ func (n *Node) initNetworking(primaryNetVdrs validators.Set) error { } } - bootstrapWeight := n.beacons.Weight() - reqWeight := (3*bootstrapWeight + 3) / 4 + numBeacons := n.beacons.Len() + requiredConns := (3*numBeacons + 3) / 4 - if reqWeight > 0 { + if requiredConns > 0 { // Set a timer that will fire after a given timeout unless we connect - // to a sufficient portion of stake-weighted nodes. If the timeout - // fires, the node will shutdown. + // to a sufficient portion of nodes. If the timeout fires, the node will + // shutdown. timer := timer.NewTimer(func() { // If the timeout fires and we're already shutting down, nothing to do. - if !n.shuttingDown.GetValue() { + if !n.shuttingDown.Get() { n.Log.Warn("failed to connect to bootstrap nodes", zap.Stringer("beacons", n.beacons), zap.Duration("duration", n.Config.BootstrapBeaconConnectionTimeout), @@ -280,13 +305,25 @@ func (n *Node) initNetworking(primaryNetVdrs validators.Set) error { timer.SetTimeoutIn(n.Config.BootstrapBeaconConnectionTimeout) consensusRouter = &beaconManager{ - Router: consensusRouter, - timer: timer, - beacons: n.beacons, - requiredWeight: reqWeight, + Router: consensusRouter, + timer: timer, + beacons: n.beacons, + requiredConns: int64(requiredConns), } } + // initialize gossip tracker + gossipTracker, err := peer.NewGossipTracker(n.MetricsRegisterer, n.networkNamespace) + if err != nil { + return err + } + + // keep gossip tracker synchronized with the validator set + primaryNetVdrs.RegisterCallbackListener(&peer.GossipTrackerCallback{ + Log: n.Log, + GossipTracker: gossipTracker, + }) + // add node configs to network config n.Config.NetworkConfig.Namespace = n.networkNamespace n.Config.NetworkConfig.MyNodeID = n.ID @@ -296,18 +333,17 @@ func (n *Node) initNetworking(primaryNetVdrs validators.Set) error { n.Config.NetworkConfig.Beacons = n.beacons n.Config.NetworkConfig.TLSConfig = tlsConfig n.Config.NetworkConfig.TLSKey = tlsKey - n.Config.NetworkConfig.WhitelistedSubnets = n.Config.WhitelistedSubnets + n.Config.NetworkConfig.TrackedSubnets = n.Config.TrackedSubnets n.Config.NetworkConfig.UptimeCalculator = n.uptimeCalculator n.Config.NetworkConfig.UptimeRequirement = n.Config.UptimeRequirement n.Config.NetworkConfig.ResourceTracker = n.resourceTracker n.Config.NetworkConfig.CPUTargeter = n.cpuTargeter n.Config.NetworkConfig.DiskTargeter = n.diskTargeter + n.Config.NetworkConfig.GossipTracker = gossipTracker n.Net, err = network.NewNetwork( &n.Config.NetworkConfig, n.msgCreator, - n.msgCreatorWithProto, - version.GetBanffTime(n.Config.NetworkID), n.MetricsRegisterer, n.Log, listener, @@ -318,69 +354,6 @@ func (n *Node) initNetworking(primaryNetVdrs validators.Set) error { return err } -type insecureValidatorManager struct { - router.Router - vdrs validators.Set - weight uint64 -} - -func (i *insecureValidatorManager) Connected(vdrID ids.NodeID, nodeVersion *version.Application, subnetID ids.ID) { - if constants.PrimaryNetworkID == subnetID { - _ = i.vdrs.AddWeight(vdrID, i.weight) - } - i.Router.Connected(vdrID, nodeVersion, subnetID) -} - -func (i *insecureValidatorManager) Disconnected(vdrID ids.NodeID) { - // Shouldn't error unless the set previously had an error, which should - // never happen as described above - _ = i.vdrs.RemoveWeight(vdrID, i.weight) - i.Router.Disconnected(vdrID) -} - -type beaconManager struct { - router.Router - timer *timer.Timer - beacons validators.Set - requiredWeight uint64 - totalWeight uint64 -} - -func (b *beaconManager) Connected(vdrID ids.NodeID, nodeVersion *version.Application, subnetID ids.ID) { - if constants.PrimaryNetworkID == subnetID { - // TODO: this is always 1, beacons can be reduced to ShortSet? - weight, ok := b.beacons.GetWeight(vdrID) - if !ok { - b.Router.Connected(vdrID, nodeVersion, subnetID) - return - } - weight, err := math.Add64(weight, b.totalWeight) - if err != nil { - b.timer.Cancel() - b.Router.Connected(vdrID, nodeVersion, subnetID) - return - } - b.totalWeight = weight - if b.totalWeight >= b.requiredWeight { - b.timer.Cancel() - } - } - b.Router.Connected(vdrID, nodeVersion, subnetID) -} - -func (b *beaconManager) Disconnected(vdrID ids.NodeID) { - if weight, ok := b.beacons.GetWeight(vdrID); ok { - // TODO: Account for weight changes in a more robust manner. - - // Sub64 should rarely error since only validators that have added their - // weight can become disconnected. Because it is possible that there are - // changes to the validators set, we utilize that Sub64 returns 0 on - // error. - b.totalWeight, _ = math.Sub64(b.totalWeight, weight) - } - b.Router.Disconnected(vdrID) -} - // Dispatch starts the node's servers. // Returns when the node exits. func (n *Node) Dispatch() error { @@ -397,7 +370,7 @@ func (n *Node) Dispatch() error { // When [n].Shutdown() is called, [n.APIServer].Close() is called. // This causes [n.APIServer].Dispatch() to return an error. // If that happened, don't log/return an error here. - if !n.shuttingDown.GetValue() { + if !n.shuttingDown.Get() { n.Log.Fatal("API server dispatch failed", zap.Error(err), ) @@ -511,7 +484,10 @@ func (n *Node) initDatabase() error { func (n *Node) initBeacons() error { n.beacons = validators.NewSet() for _, peerID := range n.Config.BootstrapIDs { - if err := n.beacons.AddWeight(peerID, 1); err != nil { + // Note: The beacon connection manager will treat all beaconIDs as + // equal. + // Invariant: We never use the TxID or BLS keys populated here. + if err := n.beacons.Add(peerID, nil, ids.Empty, 1); err != nil { return err } } @@ -521,8 +497,9 @@ func (n *Node) initBeacons() error { // Create the EventDispatcher used for hooking events // into the general process flow. func (n *Node) initEventDispatchers() { - n.DecisionAcceptorGroup = snow.NewAcceptorGroup(n.Log) - n.ConsensusAcceptorGroup = snow.NewAcceptorGroup(n.Log) + n.BlockAcceptorGroup = snow.NewAcceptorGroup(n.Log) + n.TxAcceptorGroup = snow.NewAcceptorGroup(n.Log) + n.VertexAcceptorGroup = snow.NewAcceptorGroup(n.Log) } func (n *Node) initIPCs() error { @@ -536,7 +513,15 @@ func (n *Node) initIPCs() error { } var err error - n.IPCs, err = ipcs.NewChainIPCs(n.Log, n.Config.IPCPath, n.Config.NetworkID, n.ConsensusAcceptorGroup, n.DecisionAcceptorGroup, chainIDs) + n.IPCs, err = ipcs.NewChainIPCs( + n.Log, + n.Config.IPCPath, + n.Config.NetworkID, + n.BlockAcceptorGroup, + n.TxAcceptorGroup, + n.VertexAcceptorGroup, + chainIDs, + ) return err } @@ -548,14 +533,17 @@ func (n *Node) initIndexer() error { txIndexerDB := prefixdb.New(indexerDBPrefix, n.DB) var err error n.indexer, err = indexer.NewIndexer(indexer.Config{ - IndexingEnabled: n.Config.IndexAPIEnabled, - AllowIncompleteIndex: n.Config.IndexAllowIncomplete, - DB: txIndexerDB, - Log: n.Log, - DecisionAcceptorGroup: n.DecisionAcceptorGroup, - ConsensusAcceptorGroup: n.ConsensusAcceptorGroup, - APIServer: n.APIServer, - ShutdownF: func() { n.Shutdown(0) }, // TODO put exit code here + IndexingEnabled: n.Config.IndexAPIEnabled, + AllowIncompleteIndex: n.Config.IndexAllowIncomplete, + DB: txIndexerDB, + Log: n.Log, + BlockAcceptorGroup: n.BlockAcceptorGroup, + TxAcceptorGroup: n.TxAcceptorGroup, + VertexAcceptorGroup: n.VertexAcceptorGroup, + APIServer: n.APIServer, + ShutdownF: func() { + n.Shutdown(0) // TODO put exit code here + }, }) if err != nil { return fmt.Errorf("couldn't create index for txs: %w", err) @@ -569,26 +557,33 @@ func (n *Node) initIndexer() error { // Initializes the Platform chain. // Its genesis data specifies the other chains that should be created. -func (n *Node) initChains(genesisBytes []byte) { +func (n *Node) initChains(genesisBytes []byte) error { n.Log.Info("initializing chains") - // Create the Platform Chain - n.chainManager.ForceCreateChain(chains.ChainParameters{ + platformChain := chains.ChainParameters{ ID: constants.PlatformChainID, SubnetID: constants.PrimaryNetworkID, GenesisData: genesisBytes, // Specifies other chains to create VMID: constants.PlatformVMID, CustomBeacons: n.beacons, - }) + } + + // Start the chain creator with the Platform Chain + return n.chainManager.StartChainCreator(platformChain) +} + +func (n *Node) initMetrics() { + n.MetricsRegisterer = prometheus.NewRegistry() + n.MetricsGatherer = metrics.NewMultiGatherer() } // initAPIServer initializes the server that handles HTTP calls func (n *Node) initAPIServer() error { n.Log.Info("initializing API server") - n.APIServer = server.New() if !n.Config.APIRequireAuthToken { - n.APIServer.Initialize( + var err error + n.APIServer, err = server.New( n.Log, n.LogFactory, n.Config.HTTPHost, @@ -596,8 +591,13 @@ func (n *Node) initAPIServer() error { n.Config.APIAllowedOrigins, n.Config.ShutdownTimeout, n.ID, + n.Config.TraceConfig.Enabled, + n.tracer, + "api", + n.MetricsRegisterer, + n.Config.HTTPConfig.HTTPConfig, ) - return nil + return err } a, err := auth.New(n.Log, "auth", n.Config.APIAuthPassword) @@ -605,7 +605,7 @@ func (n *Node) initAPIServer() error { return err } - n.APIServer.Initialize( + n.APIServer, err = server.New( n.Log, n.LogFactory, n.Config.HTTPHost, @@ -613,8 +613,16 @@ func (n *Node) initAPIServer() error { n.Config.APIAllowedOrigins, n.Config.ShutdownTimeout, n.ID, + n.Config.TraceConfig.Enabled, + n.tracer, + "api", + n.MetricsRegisterer, + n.Config.HTTPConfig.HTTPConfig, a, ) + if err != nil { + return err + } // only create auth service if token authorization is required n.Log.Info("API authorization is enabled. Auth tokens must be passed in the header of API requests, except requests to the auth service.") @@ -636,7 +644,7 @@ func (n *Node) addDefaultVMAliases() error { for vmID, aliases := range vmAliases { for _, alias := range aliases { - if err := n.Config.VMManager.Alias(vmID, alias); err != nil { + if err := n.Config.VMAliaser.Alias(vmID, alias); err != nil { return err } } @@ -661,7 +669,7 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { cChainID := createEVMTx.ID() // If any of these chains die, the node shuts down - criticalChains := ids.Set{} + criticalChains := set.Set[ids.ID]{} criticalChains.Add( constants.PlatformChainID, xChainID, @@ -684,11 +692,11 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { err = n.Config.ConsensusRouter.Initialize( n.ID, n.Log, - n.msgCreator, timeoutManager, n.Config.ConsensusShutdownTimeout, criticalChains, - n.Config.WhitelistedSubnets, + n.Config.EnableStaking, + n.Config.TrackedSubnets, n.Shutdown, n.Config.RouterHealthConfig, "requests", @@ -701,17 +709,17 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { n.chainManager = chains.New(&chains.ManagerConfig{ StakingEnabled: n.Config.EnableStaking, StakingCert: n.Config.StakingTLSCert, + StakingBLSKey: n.Config.StakingSigningKey, Log: n.Log, LogFactory: n.LogFactory, - VMManager: n.Config.VMManager, - DecisionAcceptorGroup: n.DecisionAcceptorGroup, - ConsensusAcceptorGroup: n.ConsensusAcceptorGroup, + VMManager: n.VMManager, + BlockAcceptorGroup: n.BlockAcceptorGroup, + TxAcceptorGroup: n.TxAcceptorGroup, + VertexAcceptorGroup: n.VertexAcceptorGroup, DBManager: n.DBManager, MsgCreator: n.msgCreator, - MsgCreatorWithProto: n.msgCreatorWithProto, Router: n.Config.ConsensusRouter, Net: n.Net, - ConsensusParams: n.Config.ConsensusParams, Validators: n.vdrs, NodeID: n.ID, NetworkID: n.Config.NetworkID, @@ -720,10 +728,10 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { AtomicMemory: n.sharedMemory, AVAXAssetID: avaxAssetID, XChainID: xChainID, + CChainID: cChainID, CriticalChains: criticalChains, TimeoutManager: timeoutManager, Health: n.health, - WhitelistedSubnets: n.Config.WhitelistedSubnets, RetryBootstrap: n.Config.RetryBootstrap, RetryBootstrapWarnFrequency: n.Config.RetryBootstrapWarnFrequency, ShutdownNodeFunc: n.Shutdown, @@ -732,15 +740,17 @@ func (n *Node) initChainManager(avaxAssetID ids.ID) error { SubnetConfigs: n.Config.SubnetConfigs, ChainConfigs: n.Config.ChainConfigs, ConsensusGossipFrequency: n.Config.ConsensusGossipFrequency, - GossipConfig: n.Config.GossipConfig, + ConsensusAppConcurrency: n.Config.ConsensusAppConcurrency, BootstrapMaxTimeGetAncestors: n.Config.BootstrapMaxTimeGetAncestors, BootstrapAncestorsMaxContainersSent: n.Config.BootstrapAncestorsMaxContainersSent, BootstrapAncestorsMaxContainersReceived: n.Config.BootstrapAncestorsMaxContainersReceived, ApricotPhase4Time: version.GetApricotPhase4Time(n.Config.NetworkID), ApricotPhase4MinPChainHeight: version.GetApricotPhase4MinPChainHeight(n.Config.NetworkID), - BanffTime: version.GetBanffTime(n.Config.NetworkID), ResourceTracker: n.resourceTracker, StateSyncBeacons: n.Config.StateSyncIDs, + TracingEnabled: n.Config.TraceConfig.Enabled, + Tracer: n.tracer, + ChainDataDir: n.Config.ChainDataDir, }) // Notify the API server when new chains are created @@ -759,74 +769,84 @@ func (n *Node) initVMs() error { // to its own local validator manager (which isn't used for sampling) if !n.Config.EnableStaking { vdrs = validators.NewManager() + primaryVdrs := validators.NewSet() + _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) } vmRegisterer := registry.NewVMRegisterer(registry.VMRegistererConfig{ - APIServer: n.APIServer, - Log: n.Log, - VMManager: n.Config.VMManager, + APIServer: n.APIServer, + Log: n.Log, + VMFactoryLog: n.VMFactoryLog, + VMManager: n.VMManager, }) // Register the VMs that Avalanche supports errs := wrappers.Errs{} errs.Add( - vmRegisterer.Register(constants.PlatformVMID, &platformvm.Factory{ - Config: config.Config{ - Chains: n.chainManager, - Validators: vdrs, - SubnetTracker: n.Net, - UptimeLockedCalculator: n.uptimeCalculator, - StakingEnabled: n.Config.EnableStaking, - WhitelistedSubnets: n.Config.WhitelistedSubnets, - TxFee: n.Config.TxFee, - CreateAssetTxFee: n.Config.CreateAssetTxFee, - CreateSubnetTxFee: n.Config.CreateSubnetTxFee, - TransformSubnetTxFee: n.Config.TransformSubnetTxFee, - CreateBlockchainTxFee: n.Config.CreateBlockchainTxFee, - AddPrimaryNetworkValidatorFee: n.Config.AddPrimaryNetworkValidatorFee, - AddPrimaryNetworkDelegatorFee: n.Config.AddPrimaryNetworkDelegatorFee, - AddSubnetValidatorFee: n.Config.AddSubnetValidatorFee, - AddSubnetDelegatorFee: n.Config.AddSubnetDelegatorFee, - UptimePercentage: n.Config.UptimeRequirement, - MinValidatorStake: n.Config.MinValidatorStake, - MaxValidatorStake: n.Config.MaxValidatorStake, - MinDelegatorStake: n.Config.MinDelegatorStake, - MinDelegationFee: n.Config.MinDelegationFee, - MinStakeDuration: n.Config.MinStakeDuration, - MaxStakeDuration: n.Config.MaxStakeDuration, - RewardConfig: n.Config.RewardConfig, - ApricotPhase3Time: version.GetApricotPhase3Time(n.Config.NetworkID), - ApricotPhase5Time: version.GetApricotPhase5Time(n.Config.NetworkID), - BanffTime: version.GetBanffTime(n.Config.NetworkID), + vmRegisterer.Register(context.TODO(), constants.PlatformVMID, &platformvm.Factory{ + Config: platformconfig.Config{ + Chains: n.chainManager, + Validators: vdrs, + UptimeLockedCalculator: n.uptimeCalculator, + StakingEnabled: n.Config.EnableStaking, + TrackedSubnets: n.Config.TrackedSubnets, + TxFee: n.Config.TxFee, + CreateAssetTxFee: n.Config.CreateAssetTxFee, + CreateSubnetTxFee: n.Config.CreateSubnetTxFee, + TransformSubnetTxFee: n.Config.TransformSubnetTxFee, + CreateBlockchainTxFee: n.Config.CreateBlockchainTxFee, + AddPrimaryNetworkValidatorFee: n.Config.AddPrimaryNetworkValidatorFee, + AddPrimaryNetworkDelegatorFee: n.Config.AddPrimaryNetworkDelegatorFee, + AddSubnetValidatorFee: n.Config.AddSubnetValidatorFee, + AddSubnetDelegatorFee: n.Config.AddSubnetDelegatorFee, + UptimePercentage: n.Config.UptimeRequirement, + MinValidatorStake: n.Config.MinValidatorStake, + MaxValidatorStake: n.Config.MaxValidatorStake, + MinDelegatorStake: n.Config.MinDelegatorStake, + MinDelegationFee: n.Config.MinDelegationFee, + MinStakeDuration: n.Config.MinStakeDuration, + MaxStakeDuration: n.Config.MaxStakeDuration, + RewardConfig: n.Config.RewardConfig, + ApricotPhase3Time: version.GetApricotPhase3Time(n.Config.NetworkID), + ApricotPhase5Time: version.GetApricotPhase5Time(n.Config.NetworkID), + BanffTime: version.GetBanffTime(n.Config.NetworkID), + CortinaTime: version.GetCortinaTime(n.Config.NetworkID), + MinPercentConnectedStakeHealthy: n.Config.MinPercentConnectedStakeHealthy, + UseCurrentHeight: n.Config.UseCurrentHeight, }, }), - vmRegisterer.Register(constants.AVMID, &avm.Factory{ - TxFee: n.Config.TxFee, - CreateAssetTxFee: n.Config.CreateAssetTxFee, - BanffTime: version.GetBanffTime(n.Config.NetworkID), + vmRegisterer.Register(context.TODO(), constants.AVMID, &avm.Factory{ + Config: avmconfig.Config{ + TxFee: n.Config.TxFee, + CreateAssetTxFee: n.Config.CreateAssetTxFee, + }, }), - vmRegisterer.Register(constants.EVMID, &coreth.Factory{}), - n.Config.VMManager.RegisterFactory(secp256k1fx.ID, &secp256k1fx.Factory{}), - n.Config.VMManager.RegisterFactory(nftfx.ID, &nftfx.Factory{}), - n.Config.VMManager.RegisterFactory(propertyfx.ID, &propertyfx.Factory{}), + vmRegisterer.Register(context.TODO(), constants.EVMID, &coreth.Factory{}), + n.VMManager.RegisterFactory(context.TODO(), secp256k1fx.ID, &secp256k1fx.Factory{}), + n.VMManager.RegisterFactory(context.TODO(), nftfx.ID, &nftfx.Factory{}), + n.VMManager.RegisterFactory(context.TODO(), propertyfx.ID, &propertyfx.Factory{}), ) if errs.Errored() { return errs.Err } + // initialize vm runtime manager + n.runtimeManager = runtime.NewManager() + // initialize the vm registry n.VMRegistry = registry.NewVMRegistry(registry.VMRegistryConfig{ VMGetter: registry.NewVMGetter(registry.VMGetterConfig{ FileReader: filesystem.NewReader(), - Manager: n.Config.VMManager, + Manager: n.VMManager, PluginDirectory: n.Config.PluginDir, CPUTracker: n.resourceManager, + RuntimeTracker: n.runtimeManager, }), VMRegisterer: vmRegisterer, }) // register any vms that need to be installed as plugins from disk - _, failedVMs, err := n.VMRegistry.Reload() + _, failedVMs, err := n.VMRegistry.Reload(context.TODO()) for failedVM, err := range failedVMs { n.Log.Error("failed to register VM", zap.Stringer("vmID", failedVM), @@ -857,7 +877,7 @@ func (n *Node) initKeystoreAPI() error { n.Log.Info("skipping keystore API initialization because it has been disabled") return nil } - n.Log.Info("initializing keystore API") + n.Log.Warn("initializing deprecated keystore API") handler := &common.HTTPHandler{ LockOptions: common.NoLock, Handler: keystoreHandler, @@ -868,9 +888,6 @@ func (n *Node) initKeystoreAPI() error { // initMetricsAPI initializes the Metrics API // Assumes n.APIServer is already set func (n *Node) initMetricsAPI() error { - n.MetricsRegisterer = prometheus.NewRegistry() - n.MetricsGatherer = metrics.NewMultiGatherer() - if !n.Config.MetricsAPIEnabled { n.Log.Info("skipping metrics API initialization because it has been disabled") return nil @@ -924,7 +941,7 @@ func (n *Node) initAdminAPI() error { ProfileDir: n.Config.ProfilerConfig.Dir, LogFactory: n.LogFactory, NodeConfig: n.Config, - VMManager: n.Config.VMManager, + VMManager: n.VMManager, VMRegistry: n.VMRegistry, }, ) @@ -966,7 +983,7 @@ func (n *Node) initInfoAPI() error { n.Log.Info("initializing info API") - primaryValidators, _ := n.vdrs.GetValidators(constants.PrimaryNetworkID) + primaryValidators, _ := n.vdrs.Get(constants.PrimaryNetworkID) service, err := info.NewService( info.Parameters{ Version: version.CurrentApp, @@ -982,11 +999,11 @@ func (n *Node) initInfoAPI() error { AddPrimaryNetworkDelegatorFee: n.Config.AddPrimaryNetworkDelegatorFee, AddSubnetValidatorFee: n.Config.AddSubnetValidatorFee, AddSubnetDelegatorFee: n.Config.AddSubnetDelegatorFee, - VMManager: n.Config.VMManager, + VMManager: n.VMManager, }, n.Log, n.chainManager, - n.Config.VMManager, + n.VMManager, n.Config.NetworkConfig.MyIPPort, n.Net, primaryValidators, @@ -1013,23 +1030,23 @@ func (n *Node) initHealthAPI() error { } n.Log.Info("initializing Health API") - err = healthChecker.RegisterHealthCheck("network", n.Net) + err = healthChecker.RegisterHealthCheck("network", n.Net, health.GlobalTag) if err != nil { return fmt.Errorf("couldn't register network health check: %w", err) } - err = healthChecker.RegisterHealthCheck("router", n.Config.ConsensusRouter) + err = healthChecker.RegisterHealthCheck("router", n.Config.ConsensusRouter, health.GlobalTag) if err != nil { return fmt.Errorf("couldn't register router health check: %w", err) } // TODO: add database health to liveness check - err = healthChecker.RegisterHealthCheck("database", n.DB) + err = healthChecker.RegisterHealthCheck("database", n.DB, health.GlobalTag) if err != nil { return fmt.Errorf("couldn't register database health check: %w", err) } - diskSpaceCheck := health.CheckerFunc(func() (interface{}, error) { + diskSpaceCheck := health.CheckerFunc(func(context.Context) (interface{}, error) { // confirm that the node has enough disk space to continue operating // if there is too little disk space remaining, first report unhealthy and then shutdown the node @@ -1051,7 +1068,7 @@ func (n *Node) initHealthAPI() error { }, err }) - err = n.health.RegisterHealthCheck("diskspace", diskSpaceCheck) + err = n.health.RegisterHealthCheck("diskspace", diskSpaceCheck, health.GlobalTag) if err != nil { return fmt.Errorf("couldn't register resource health check: %w", err) } @@ -1118,7 +1135,7 @@ func (n *Node) initIPCAPI() error { n.Log.Info("skipping ipc API initialization because it has been disabled") return nil } - n.Log.Info("initializing ipc API") + n.Log.Warn("initializing deprecated ipc API") service, err := ipcsapi.NewService(n.Log, n.chainManager, n.APIServer, n.IPCs) if err != nil { return err @@ -1141,6 +1158,15 @@ func (n *Node) initChainAliases(genesisBytes []byte) error { } } } + + for chainID, aliases := range n.Config.ChainAliases { + for _, alias := range aliases { + if err := n.chainManager.Alias(chainID, alias); err != nil { + return err + } + } + } + return nil } @@ -1161,13 +1187,11 @@ func (n *Node) initAPIAliases(genesisBytes []byte) error { } // Initializes [n.vdrs] and returns the Primary Network validator set. -func (n *Node) initVdrs() (validators.Set, error) { +func (n *Node) initVdrs() validators.Set { n.vdrs = validators.NewManager() vdrSet := validators.NewSet() - if err := n.vdrs.Set(constants.PrimaryNetworkID, vdrSet); err != nil { - return vdrSet, fmt.Errorf("couldn't set primary network validators: %w", err) - } - return vdrSet, nil + _ = n.vdrs.Add(constants.PrimaryNetworkID, vdrSet) + return vdrSet } // Initialize [n.resourceManager]. @@ -1219,7 +1243,6 @@ func (n *Node) Initialize( ) error { n.Log = logger n.Config = config - var err error n.ID = ids.NodeIDFromCert(n.Config.StakingTLSCert.Leaf) n.LogFactory = logFactory n.DoneShuttingDown.Add(1) @@ -1229,13 +1252,34 @@ func (n *Node) Initialize( zap.Stringer("version", version.CurrentApp), zap.Stringer("nodeID", n.ID), zap.Reflect("nodePOP", pop), + zap.Reflect("providedFlags", n.Config.ProvidedFlags), zap.Reflect("config", n.Config), ) - if err = n.initBeacons(); err != nil { // Configure the beacons + var err error + n.VMFactoryLog, err = logFactory.Make("vm-factory") + if err != nil { + return fmt.Errorf("problem creating vm logger: %w", err) + } + + n.VMManager = vms.NewManager(n.VMFactoryLog, config.VMAliaser) + + if err := n.initBeacons(); err != nil { // Configure the beacons return fmt.Errorf("problem initializing node beacons: %w", err) } + // Set up tracer + n.tracer, err = trace.New(n.Config.TraceConfig) + if err != nil { + return fmt.Errorf("couldn't initialize tracer: %w", err) + } + + if n.Config.TraceConfig.Enabled { + n.Config.ConsensusRouter = router.Trace(n.Config.ConsensusRouter, n.tracer) + } + + n.initMetrics() + if err := n.initAPIServer(); err != nil { // Start the API Server return fmt.Errorf("couldn't initialize API server: %w", err) } @@ -1260,34 +1304,23 @@ func (n *Node) Initialize( // message.Creator currently record metrics under network namespace n.networkNamespace = "network" n.msgCreator, err = message.NewCreator( + n.Log, n.MetricsRegisterer, n.networkNamespace, - n.Config.NetworkConfig.CompressionEnabled, + constants.DefaultNetworkCompressionType, n.Config.NetworkConfig.MaximumInboundMessageTimeout, ) if err != nil { return fmt.Errorf("problem initializing message creator: %w", err) } - n.msgCreatorWithProto, err = message.NewCreatorWithProto( - n.MetricsRegisterer, - n.networkNamespace, - n.Config.NetworkConfig.CompressionEnabled, - n.Config.NetworkConfig.MaximumInboundMessageTimeout, - ) - if err != nil { - return fmt.Errorf("problem initializing message creator with proto: %w", err) - } - primaryNetVdrs, err := n.initVdrs() - if err != nil { - return fmt.Errorf("problem initializing validators: %w", err) - } + primaryNetVdrs := n.initVdrs() if err := n.initResourceManager(n.MetricsRegisterer); err != nil { return fmt.Errorf("problem initializing resource manager: %w", err) } n.initCPUTargeter(&config.CPUTargeterConfig, primaryNetVdrs) n.initDiskTargeter(&config.DiskTargeterConfig, primaryNetVdrs) - if err = n.initNetworking(primaryNetVdrs); err != nil { // Set up networking layer. + if err := n.initNetworking(primaryNetVdrs); err != nil { // Set up networking layer. return fmt.Errorf("problem initializing networking: %w", err) } @@ -1330,21 +1363,23 @@ func (n *Node) Initialize( return fmt.Errorf("couldn't initialize indexer: %w", err) } - n.health.Start(n.Config.HealthCheckFreq) + n.health.Start(context.TODO(), n.Config.HealthCheckFreq) n.initProfiler() // Start the Platform chain - n.initChains(n.Config.GenesisBytes) + if err := n.initChains(n.Config.GenesisBytes); err != nil { + return fmt.Errorf("couldn't initialize chains: %w", err) + } return nil } // Shutdown this node // May be called multiple times func (n *Node) Shutdown(exitCode int) { - if !n.shuttingDown.GetValue() { // only set the exit code once - n.shuttingDownExitCode.SetValue(exitCode) + if !n.shuttingDown.Get() { // only set the exit code once + n.shuttingDownExitCode.Set(exitCode) } - n.shuttingDown.SetValue(true) + n.shuttingDown.Set(true) n.shutdownOnce.Do(n.shutdown) } @@ -1355,13 +1390,13 @@ func (n *Node) shutdown() { if n.health != nil { // Passes if the node is not shutting down - shuttingDownCheck := health.CheckerFunc(func() (interface{}, error) { + shuttingDownCheck := health.CheckerFunc(func(context.Context) (interface{}, error) { return map[string]interface{}{ "isShuttingDown": true, }, errShuttingDown }) - err := n.health.RegisterHealthCheck("shuttingDown", shuttingDownCheck) + err := n.health.RegisterHealthCheck("shuttingDown", shuttingDownCheck, health.GlobalTag) if err != nil { n.Log.Debug("couldn't register shuttingDown health check", zap.Error(err), @@ -1401,9 +1436,9 @@ func (n *Node) shutdown() { ) } - // Make sure all plugin subprocesses are killed - n.Log.Info("cleaning up plugin subprocesses") - plugin.CleanupClients() + // Ensure all runtimes are shutdown + n.Log.Info("cleaning up plugin runtimes") + n.runtimeManager.Stop(context.TODO()) if n.DBManager != nil { if err := n.DBManager.Close(); err != nil { @@ -1413,13 +1448,20 @@ func (n *Node) shutdown() { } } + if n.Config.TraceConfig.Enabled { + n.Log.Info("shutting down tracing") + } + + if err := n.tracer.Close(); err != nil { + n.Log.Warn("error during tracer shutdown", + zap.Error(err), + ) + } + n.DoneShuttingDown.Done() n.Log.Info("finished node shutdown") } func (n *Node) ExitCode() int { - if exitCode, ok := n.shuttingDownExitCode.GetValue().(int); ok { - return exitCode - } - return 0 + return n.shuttingDownExitCode.Get() } diff --git a/avalanchego/proto/Dockerfile.buf b/avalanchego/proto/Dockerfile.buf index b95f4998..3903baf2 100644 --- a/avalanchego/proto/Dockerfile.buf +++ b/avalanchego/proto/Dockerfile.buf @@ -1,4 +1,4 @@ -FROM bufbuild/buf:1.7.0 AS builder +FROM bufbuild/buf:1.11.0 AS builder FROM ubuntu:20.04 @@ -6,7 +6,7 @@ RUN apt-get update && apt -y install bash curl unzip git WORKDIR /opt RUN \ - curl -L https://golang.org/dl/go1.18.5.linux-amd64.tar.gz > golang.tar.gz && \ + curl -L https://golang.org/dl/go1.19.6.linux-amd64.tar.gz > golang.tar.gz && \ mkdir golang && \ tar -zxvf golang.tar.gz -C golang/ @@ -16,7 +16,7 @@ COPY --from=builder /usr/local/bin/buf /usr/local/bin/ # any version changes here should also be bumped in scripts/protobuf_codegen.sh RUN \ - go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.28.0 && \ + go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.28.1 && \ go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.2.0 ENV PATH="${PATH}:/root/go/bin/" diff --git a/avalanchego/proto/README.md b/avalanchego/proto/README.md index b307e072..f42fbf11 100644 --- a/avalanchego/proto/README.md +++ b/avalanchego/proto/README.md @@ -1,6 +1,6 @@ # Avalanche gRPC -Now Serving: **Protocol Version 16** +Now Serving: **Protocol Version 25** Protobuf files are hosted at [https://buf.build/ava-labs/avalanche](https://buf.build/ava-labs/avalanche) and can be used as dependencies in other projects. @@ -14,5 +14,5 @@ Introduction to `buf` [https://docs.buf.build/tour/introduction](https://docs.bu ## Protocol Version Compatibility -The protobuf definitions and generated code are versioned based on the [protocolVersion](../vms/rpcchainvm/vm.go#L21) defined by the rpcchainvm. -Many versions of an Avalanche client can use the same [protocolVersion](../vms/rpcchainvm/vm.go#L21). But each Avalanche client and subnet vm must use the same protocol version to be compatible. +The protobuf definitions and generated code are versioned based on the [RPCChainVMProtocol](../version/version.go#L13) defined for the RPCChainVM. +Many versions of an Avalanche client can use the same [RPCChainVMProtocol](../version/version.go#L13). But each Avalanche client and subnet vm must use the same protocol version to be compatible. diff --git a/avalanchego/proto/appsender/appsender.proto b/avalanchego/proto/appsender/appsender.proto index ee98356a..d021bbb5 100644 --- a/avalanchego/proto/appsender/appsender.proto +++ b/avalanchego/proto/appsender/appsender.proto @@ -11,6 +11,9 @@ service AppSender { rpc SendAppResponse(SendAppResponseMsg) returns (google.protobuf.Empty); rpc SendAppGossip(SendAppGossipMsg) returns (google.protobuf.Empty); rpc SendAppGossipSpecific(SendAppGossipSpecificMsg) returns (google.protobuf.Empty); + + rpc SendCrossChainAppRequest(SendCrossChainAppRequestMsg) returns (google.protobuf.Empty); + rpc SendCrossChainAppResponse(SendCrossChainAppResponseMsg) returns (google.protobuf.Empty); } message SendAppRequestMsg { @@ -42,3 +45,21 @@ message SendAppGossipSpecificMsg { // The message body bytes msg = 2; } + +message SendCrossChainAppRequestMsg { + // The chain to send this request to + bytes chain_id = 1; + // the ID of this request + uint32 request_id = 2; + // The request body + bytes request = 3; +} + +message SendCrossChainAppResponseMsg { + // The chain to send this response to + bytes chain_id = 1; + // the ID of this request + uint32 request_id = 2; + // The response body + bytes response = 3; +} diff --git a/avalanchego/proto/io/reader/reader.proto b/avalanchego/proto/io/reader/reader.proto index f4de89b0..ec3e275b 100644 --- a/avalanchego/proto/io/reader/reader.proto +++ b/avalanchego/proto/io/reader/reader.proto @@ -18,7 +18,5 @@ message ReadResponse { // read is the payload in bytes bytes read = 1; // error is an error message - string error = 2; - // errored is true if an error has been set - bool errored = 3; + optional string error = 2; } diff --git a/avalanchego/proto/io/writer/writer.proto b/avalanchego/proto/io/writer/writer.proto index 60364366..1ea73880 100644 --- a/avalanchego/proto/io/writer/writer.proto +++ b/avalanchego/proto/io/writer/writer.proto @@ -19,7 +19,5 @@ message WriteResponse { // written is the length of payload in bytes int32 written = 1; // error is an error message - string error = 2; - // errored is true if an error has been set - bool errored = 3; + optional string error = 2; } diff --git a/avalanchego/proto/messenger/messenger.proto b/avalanchego/proto/messenger/messenger.proto index 027fcdeb..36c3f522 100644 --- a/avalanchego/proto/messenger/messenger.proto +++ b/avalanchego/proto/messenger/messenger.proto @@ -8,8 +8,14 @@ service Messenger { rpc Notify(NotifyRequest) returns (NotifyResponse); } +enum Message { + MESSAGE_UNSPECIFIED = 0; + MESSAGE_BUILD_BLOCK = 1; + MESSAGE_STATE_SYNC_FINISHED = 2; +} + message NotifyRequest { - uint32 message = 1; + Message message = 1; } message NotifyResponse {} diff --git a/avalanchego/proto/net/conn/conn.proto b/avalanchego/proto/net/conn/conn.proto index 5839c8a3..07b46df8 100644 --- a/avalanchego/proto/net/conn/conn.proto +++ b/avalanchego/proto/net/conn/conn.proto @@ -34,9 +34,7 @@ message ReadResponse { // read is the payload in bytes bytes read = 1; // error is an error message - string error = 2; - // errored is true if an error has been set - bool errored = 3; + optional string error = 2; } message WriteRequest { @@ -48,9 +46,7 @@ message WriteResponse { // length of the response in bytes int32 length = 1; // error is an error message - string error = 2; - // errored is true if an error has been set - bool errored = 3; + optional string error = 2; } message SetDeadlineRequest { diff --git a/avalanchego/proto/p2p/p2p.proto b/avalanchego/proto/p2p/p2p.proto index 375d5020..d6524a18 100644 --- a/avalanchego/proto/p2p/p2p.proto +++ b/avalanchego/proto/p2p/p2p.proto @@ -17,8 +17,12 @@ message Message { // This field is only set if the message type supports compression. bytes compressed_gzip = 1; + // zstd-compressed bytes of a "p2p.Message" whose "oneof" "message" field is + // NOT compressed_* BUT one of the message types (e.g. ping, pong, etc.). + // This field is only set if the message type supports compression. + bytes compressed_zstd = 2; + // Fields lower than 10 are reserved for other compression algorithms. - // TODO: support COMPRESS_ZSTD // TODO: support COMPRESS_SNAPPY // Network messages: @@ -52,6 +56,8 @@ message Message { AppRequest app_request = 30; AppResponse app_response = 31; AppGossip app_gossip = 32; + + PeerListAck peer_list_ack = 33; } } @@ -62,10 +68,21 @@ message Message { // uptime value of the message sender in "pong" message. message Ping {} -// Contains the uptime of the message receiver (remote peer) +// Contains subnet id and the related observed subnet uptime of the message +// receiver (remote peer). +message SubnetUptime { + bytes subnet_id = 1; + uint32 uptime = 2; +} + +// Contains the uptime percentage of the message receiver (remote peer) // from the sender's point of view, in response to "ping" message. +// Uptimes are expected to be provided as integers ranging in [0, 100]. message Pong { - uint32 uptime_pct = 1; + // uptime is the primary network uptime percentage. + uint32 uptime = 1; + // subnet_uptimes contains subnet uptime percentages. + repeated SubnetUptime subnet_uptimes = 2; } // The first outbound message that the local node sends to its remote peer @@ -93,6 +110,7 @@ message ClaimedIpPort { uint32 ip_port = 3; uint64 timestamp = 4; bytes signature = 5; + bytes tx_id = 6; } // Message that contains a list of peer information (IP, certs, etc.) @@ -106,6 +124,26 @@ message PeerList { repeated ClaimedIpPort claimed_ip_ports = 1; } +// "peer_ack" is sent in response to a "peer_list" message. The "tx_id" should +// correspond to a "tx_id" in the "peer_list" message. The sender should set +// "timestamp" to be the latest known timestamp of a signed IP corresponding to +// the nodeID of "tx_id". +// +// Upon receipt, the "tx_id" and "timestamp" will determine if the receiptent +// can forgo future gossip of the node's IP to the sender of this message. +message PeerAck { + bytes tx_id = 1; + uint64 timestamp = 2; +} + +// Message that responds to a peer_list message containing the AddValidatorTxIDs +// from the peer_list message that we currently have in our validator set. +message PeerListAck { + reserved 1; // deprecated; used to be tx_ids + + repeated PeerAck peer_acks = 2; +} + message GetStateSummaryFrontier { bytes chain_id = 1; uint32 request_id = 2; @@ -131,6 +169,12 @@ message AcceptedStateSummary { repeated bytes summary_ids = 3; } +enum EngineType { + ENGINE_TYPE_UNSPECIFIED = 0; + ENGINE_TYPE_AVALANCHE = 1; + ENGINE_TYPE_SNOWMAN = 2; +} + // Message to request for the accepted frontier of the "remote" peer. // For instance, the accepted frontier of X-chain DAG is the set of // accepted vertices that do not have any accepted descendants (i.e., frontier). @@ -144,6 +188,7 @@ message GetAcceptedFrontier { bytes chain_id = 1; uint32 request_id = 2; uint64 deadline = 3; + EngineType engine_type = 4; } // Message that contains the list of accepted frontier in response to @@ -152,6 +197,8 @@ message GetAcceptedFrontier { // // See "snow/engine/common/bootstrapper.go#AcceptedFrontier". message AcceptedFrontier { + reserved 4; // Until Cortina upgrade is activated + bytes chain_id = 1; uint32 request_id = 2; repeated bytes container_ids = 3; @@ -169,6 +216,7 @@ message GetAccepted { uint32 request_id = 2; uint64 deadline = 3; repeated bytes container_ids = 4; + EngineType engine_type = 5; } // Message that contains the list of accepted block/vertex IDs in response to @@ -179,6 +227,8 @@ message GetAccepted { // See "snow/engine/avalanche#GetAccepted" and "SendAccepted". // See "snow/engine/common/bootstrapper.go#Accepted". message Accepted { + reserved 4; // Until Cortina upgrade is activated + bytes chain_id = 1; uint32 request_id = 2; repeated bytes container_ids = 3; @@ -195,6 +245,7 @@ message GetAncestors { uint32 request_id = 2; uint64 deadline = 3; bytes container_id = 4; + EngineType engine_type = 5; } // Message that contains the container bytes of the ancestors @@ -203,6 +254,8 @@ message GetAncestors { // On receiving "ancestors", the engine parses the containers and queues them // to be accepted once we've received the entire chain history. message Ancestors { + reserved 4; // Until Cortina upgrade is activated + bytes chain_id = 1; uint32 request_id = 2; repeated bytes containers = 3; @@ -217,6 +270,7 @@ message Get { uint32 request_id = 2; uint64 deadline = 3; bytes container_id = 4; + EngineType engine_type = 5; } // Message that contains the container ID and its bytes in response to "get". @@ -226,8 +280,7 @@ message Put { bytes chain_id = 1; uint32 request_id = 2; bytes container = 3; - -// NOTE: "container_id" is deprecated in packer based serializer + EngineType engine_type = 4; } // Message that contains a preferred container ID and its container bytes @@ -244,8 +297,7 @@ message PushQuery { uint32 request_id = 2; uint64 deadline = 3; bytes container = 4; - -// NOTE: "container_id" is deprecated in packer based serializer + EngineType engine_type = 5; } // Message that contains a preferred container ID to query other peers @@ -258,6 +310,7 @@ message PullQuery { uint32 request_id = 2; uint64 deadline = 3; bytes container_id = 4; + EngineType engine_type = 5; } // Message that contains the votes/preferences of the local node, @@ -267,9 +320,16 @@ message PullQuery { // to the consensus. If the received container is not found, it responds back with // "get" message to fetch the missing container from the remote peer. message Chits { + reserved 5; // Until Cortina upgrade is activated + bytes chain_id = 1; uint32 request_id = 2; - repeated bytes container_ids = 3; + // Represents the current preferred frontier. + // TODO: Remove `repeated` once all chains are running Snowman. + repeated bytes preferred_container_ids = 3; + // Represents the current accepted frontier. + // TODO: Remove `repeated` once all chains are running Snowman. + repeated bytes accepted_container_ids = 4; } message AppRequest { diff --git a/avalanchego/proto/pb/aliasreader/aliasreader.pb.go b/avalanchego/proto/pb/aliasreader/aliasreader.pb.go index 347d60a5..56886860 100644 --- a/avalanchego/proto/pb/aliasreader/aliasreader.pb.go +++ b/avalanchego/proto/pb/aliasreader/aliasreader.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 +// protoc-gen-go v1.28.1 // protoc (unknown) // source: aliasreader/aliasreader.proto diff --git a/avalanchego/proto/pb/appsender/appsender.pb.go b/avalanchego/proto/pb/appsender/appsender.pb.go index d6e8be61..609d30a1 100644 --- a/avalanchego/proto/pb/appsender/appsender.pb.go +++ b/avalanchego/proto/pb/appsender/appsender.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 +// protoc-gen-go v1.28.1 // protoc (unknown) // source: appsender/appsender.proto @@ -258,6 +258,138 @@ func (x *SendAppGossipSpecificMsg) GetMsg() []byte { return nil } +type SendCrossChainAppRequestMsg struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The chain to send this request to + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // the ID of this request + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // The request body + Request []byte `protobuf:"bytes,3,opt,name=request,proto3" json:"request,omitempty"` +} + +func (x *SendCrossChainAppRequestMsg) Reset() { + *x = SendCrossChainAppRequestMsg{} + if protoimpl.UnsafeEnabled { + mi := &file_appsender_appsender_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SendCrossChainAppRequestMsg) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendCrossChainAppRequestMsg) ProtoMessage() {} + +func (x *SendCrossChainAppRequestMsg) ProtoReflect() protoreflect.Message { + mi := &file_appsender_appsender_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendCrossChainAppRequestMsg.ProtoReflect.Descriptor instead. +func (*SendCrossChainAppRequestMsg) Descriptor() ([]byte, []int) { + return file_appsender_appsender_proto_rawDescGZIP(), []int{4} +} + +func (x *SendCrossChainAppRequestMsg) GetChainId() []byte { + if x != nil { + return x.ChainId + } + return nil +} + +func (x *SendCrossChainAppRequestMsg) GetRequestId() uint32 { + if x != nil { + return x.RequestId + } + return 0 +} + +func (x *SendCrossChainAppRequestMsg) GetRequest() []byte { + if x != nil { + return x.Request + } + return nil +} + +type SendCrossChainAppResponseMsg struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The chain to send this response to + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // the ID of this request + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // The response body + Response []byte `protobuf:"bytes,3,opt,name=response,proto3" json:"response,omitempty"` +} + +func (x *SendCrossChainAppResponseMsg) Reset() { + *x = SendCrossChainAppResponseMsg{} + if protoimpl.UnsafeEnabled { + mi := &file_appsender_appsender_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SendCrossChainAppResponseMsg) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SendCrossChainAppResponseMsg) ProtoMessage() {} + +func (x *SendCrossChainAppResponseMsg) ProtoReflect() protoreflect.Message { + mi := &file_appsender_appsender_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SendCrossChainAppResponseMsg.ProtoReflect.Descriptor instead. +func (*SendCrossChainAppResponseMsg) Descriptor() ([]byte, []int) { + return file_appsender_appsender_proto_rawDescGZIP(), []int{5} +} + +func (x *SendCrossChainAppResponseMsg) GetChainId() []byte { + if x != nil { + return x.ChainId + } + return nil +} + +func (x *SendCrossChainAppResponseMsg) GetRequestId() uint32 { + if x != nil { + return x.RequestId + } + return 0 +} + +func (x *SendCrossChainAppResponseMsg) GetResponse() []byte { + if x != nil { + return x.Response + } + return nil +} + var File_appsender_appsender_proto protoreflect.FileDescriptor var file_appsender_appsender_proto_rawDesc = []byte{ @@ -285,30 +417,57 @@ var file_appsender_appsender_proto_rawDesc = []byte{ 0x63, 0x69, 0x66, 0x69, 0x63, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x73, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x03, 0x6d, 0x73, 0x67, 0x32, 0xb9, 0x02, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x53, 0x65, 0x6e, - 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x2e, 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, 0x64, 0x65, - 0x72, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x48, 0x0a, 0x0f, 0x53, - 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, + 0x52, 0x03, 0x6d, 0x73, 0x67, 0x22, 0x71, 0x0a, 0x1b, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x72, 0x6f, + 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, + 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x18, + 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x74, 0x0a, 0x1c, 0x53, 0x65, 0x6e, 0x64, + 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, + 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, + 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xf3, + 0x03, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x53, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x0e, + 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1c, 0x2e, 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, - 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x44, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, - 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x1b, 0x2e, 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, 0x64, - 0x65, 0x72, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, - 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x54, 0x0a, 0x15, 0x53, - 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x53, 0x70, 0x65, 0x63, - 0x69, 0x66, 0x69, 0x63, 0x12, 0x23, 0x2e, 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, - 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x53, 0x70, - 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x61, 0x70, - 0x70, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x12, 0x48, 0x0a, 0x0f, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x2e, 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, + 0x64, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x44, + 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, + 0x1b, 0x2e, 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x6e, 0x64, + 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x12, 0x54, 0x0a, 0x15, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, 0x70, 0x47, + 0x6f, 0x73, 0x73, 0x69, 0x70, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x12, 0x23, 0x2e, + 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x70, + 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x4d, + 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x5a, 0x0a, 0x18, 0x53, 0x65, + 0x6e, 0x64, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x2e, 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, 0x64, + 0x65, 0x72, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, + 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x1a, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x5c, 0x0a, 0x19, 0x53, 0x65, 0x6e, 0x64, 0x43, 0x72, + 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x27, 0x2e, 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x2e, + 0x53, 0x65, 0x6e, 0x64, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, + 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, + 0x2f, 0x61, 0x70, 0x70, 0x73, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -323,25 +482,31 @@ func file_appsender_appsender_proto_rawDescGZIP() []byte { return file_appsender_appsender_proto_rawDescData } -var file_appsender_appsender_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_appsender_appsender_proto_msgTypes = make([]protoimpl.MessageInfo, 6) var file_appsender_appsender_proto_goTypes = []interface{}{ - (*SendAppRequestMsg)(nil), // 0: appsender.SendAppRequestMsg - (*SendAppResponseMsg)(nil), // 1: appsender.SendAppResponseMsg - (*SendAppGossipMsg)(nil), // 2: appsender.SendAppGossipMsg - (*SendAppGossipSpecificMsg)(nil), // 3: appsender.SendAppGossipSpecificMsg - (*emptypb.Empty)(nil), // 4: google.protobuf.Empty + (*SendAppRequestMsg)(nil), // 0: appsender.SendAppRequestMsg + (*SendAppResponseMsg)(nil), // 1: appsender.SendAppResponseMsg + (*SendAppGossipMsg)(nil), // 2: appsender.SendAppGossipMsg + (*SendAppGossipSpecificMsg)(nil), // 3: appsender.SendAppGossipSpecificMsg + (*SendCrossChainAppRequestMsg)(nil), // 4: appsender.SendCrossChainAppRequestMsg + (*SendCrossChainAppResponseMsg)(nil), // 5: appsender.SendCrossChainAppResponseMsg + (*emptypb.Empty)(nil), // 6: google.protobuf.Empty } var file_appsender_appsender_proto_depIdxs = []int32{ 0, // 0: appsender.AppSender.SendAppRequest:input_type -> appsender.SendAppRequestMsg 1, // 1: appsender.AppSender.SendAppResponse:input_type -> appsender.SendAppResponseMsg 2, // 2: appsender.AppSender.SendAppGossip:input_type -> appsender.SendAppGossipMsg 3, // 3: appsender.AppSender.SendAppGossipSpecific:input_type -> appsender.SendAppGossipSpecificMsg - 4, // 4: appsender.AppSender.SendAppRequest:output_type -> google.protobuf.Empty - 4, // 5: appsender.AppSender.SendAppResponse:output_type -> google.protobuf.Empty - 4, // 6: appsender.AppSender.SendAppGossip:output_type -> google.protobuf.Empty - 4, // 7: appsender.AppSender.SendAppGossipSpecific:output_type -> google.protobuf.Empty - 4, // [4:8] is the sub-list for method output_type - 0, // [0:4] is the sub-list for method input_type + 4, // 4: appsender.AppSender.SendCrossChainAppRequest:input_type -> appsender.SendCrossChainAppRequestMsg + 5, // 5: appsender.AppSender.SendCrossChainAppResponse:input_type -> appsender.SendCrossChainAppResponseMsg + 6, // 6: appsender.AppSender.SendAppRequest:output_type -> google.protobuf.Empty + 6, // 7: appsender.AppSender.SendAppResponse:output_type -> google.protobuf.Empty + 6, // 8: appsender.AppSender.SendAppGossip:output_type -> google.protobuf.Empty + 6, // 9: appsender.AppSender.SendAppGossipSpecific:output_type -> google.protobuf.Empty + 6, // 10: appsender.AppSender.SendCrossChainAppRequest:output_type -> google.protobuf.Empty + 6, // 11: appsender.AppSender.SendCrossChainAppResponse:output_type -> google.protobuf.Empty + 6, // [6:12] is the sub-list for method output_type + 0, // [0:6] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name @@ -401,6 +566,30 @@ func file_appsender_appsender_proto_init() { return nil } } + file_appsender_appsender_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SendCrossChainAppRequestMsg); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_appsender_appsender_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SendCrossChainAppResponseMsg); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -408,7 +597,7 @@ func file_appsender_appsender_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_appsender_appsender_proto_rawDesc, NumEnums: 0, - NumMessages: 4, + NumMessages: 6, NumExtensions: 0, NumServices: 1, }, diff --git a/avalanchego/proto/pb/appsender/appsender_grpc.pb.go b/avalanchego/proto/pb/appsender/appsender_grpc.pb.go index f1636130..4ef890e6 100644 --- a/avalanchego/proto/pb/appsender/appsender_grpc.pb.go +++ b/avalanchego/proto/pb/appsender/appsender_grpc.pb.go @@ -27,6 +27,8 @@ type AppSenderClient interface { SendAppResponse(ctx context.Context, in *SendAppResponseMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) SendAppGossip(ctx context.Context, in *SendAppGossipMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) SendAppGossipSpecific(ctx context.Context, in *SendAppGossipSpecificMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) + SendCrossChainAppRequest(ctx context.Context, in *SendCrossChainAppRequestMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) + SendCrossChainAppResponse(ctx context.Context, in *SendCrossChainAppResponseMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) } type appSenderClient struct { @@ -73,6 +75,24 @@ func (c *appSenderClient) SendAppGossipSpecific(ctx context.Context, in *SendApp return out, nil } +func (c *appSenderClient) SendCrossChainAppRequest(ctx context.Context, in *SendCrossChainAppRequestMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/appsender.AppSender/SendCrossChainAppRequest", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *appSenderClient) SendCrossChainAppResponse(ctx context.Context, in *SendCrossChainAppResponseMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/appsender.AppSender/SendCrossChainAppResponse", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // AppSenderServer is the server API for AppSender service. // All implementations must embed UnimplementedAppSenderServer // for forward compatibility @@ -81,6 +101,8 @@ type AppSenderServer interface { SendAppResponse(context.Context, *SendAppResponseMsg) (*emptypb.Empty, error) SendAppGossip(context.Context, *SendAppGossipMsg) (*emptypb.Empty, error) SendAppGossipSpecific(context.Context, *SendAppGossipSpecificMsg) (*emptypb.Empty, error) + SendCrossChainAppRequest(context.Context, *SendCrossChainAppRequestMsg) (*emptypb.Empty, error) + SendCrossChainAppResponse(context.Context, *SendCrossChainAppResponseMsg) (*emptypb.Empty, error) mustEmbedUnimplementedAppSenderServer() } @@ -100,6 +122,12 @@ func (UnimplementedAppSenderServer) SendAppGossip(context.Context, *SendAppGossi func (UnimplementedAppSenderServer) SendAppGossipSpecific(context.Context, *SendAppGossipSpecificMsg) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method SendAppGossipSpecific not implemented") } +func (UnimplementedAppSenderServer) SendCrossChainAppRequest(context.Context, *SendCrossChainAppRequestMsg) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method SendCrossChainAppRequest not implemented") +} +func (UnimplementedAppSenderServer) SendCrossChainAppResponse(context.Context, *SendCrossChainAppResponseMsg) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method SendCrossChainAppResponse not implemented") +} func (UnimplementedAppSenderServer) mustEmbedUnimplementedAppSenderServer() {} // UnsafeAppSenderServer may be embedded to opt out of forward compatibility for this service. @@ -185,6 +213,42 @@ func _AppSender_SendAppGossipSpecific_Handler(srv interface{}, ctx context.Conte return interceptor(ctx, in, info, handler) } +func _AppSender_SendCrossChainAppRequest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SendCrossChainAppRequestMsg) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AppSenderServer).SendCrossChainAppRequest(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/appsender.AppSender/SendCrossChainAppRequest", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AppSenderServer).SendCrossChainAppRequest(ctx, req.(*SendCrossChainAppRequestMsg)) + } + return interceptor(ctx, in, info, handler) +} + +func _AppSender_SendCrossChainAppResponse_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SendCrossChainAppResponseMsg) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AppSenderServer).SendCrossChainAppResponse(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/appsender.AppSender/SendCrossChainAppResponse", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AppSenderServer).SendCrossChainAppResponse(ctx, req.(*SendCrossChainAppResponseMsg)) + } + return interceptor(ctx, in, info, handler) +} + // AppSender_ServiceDesc is the grpc.ServiceDesc for AppSender service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -208,6 +272,14 @@ var AppSender_ServiceDesc = grpc.ServiceDesc{ MethodName: "SendAppGossipSpecific", Handler: _AppSender_SendAppGossipSpecific_Handler, }, + { + MethodName: "SendCrossChainAppRequest", + Handler: _AppSender_SendCrossChainAppRequest_Handler, + }, + { + MethodName: "SendCrossChainAppResponse", + Handler: _AppSender_SendCrossChainAppResponse_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "appsender/appsender.proto", diff --git a/avalanchego/proto/pb/http/http.pb.go b/avalanchego/proto/pb/http/http.pb.go index e845b2ce..fca44132 100644 --- a/avalanchego/proto/pb/http/http.pb.go +++ b/avalanchego/proto/pb/http/http.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 +// protoc-gen-go v1.28.1 // protoc (unknown) // source: http/http.proto diff --git a/avalanchego/proto/pb/http/responsewriter/responsewriter.pb.go b/avalanchego/proto/pb/http/responsewriter/responsewriter.pb.go index e7df856e..348a1086 100644 --- a/avalanchego/proto/pb/http/responsewriter/responsewriter.pb.go +++ b/avalanchego/proto/pb/http/responsewriter/responsewriter.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 +// protoc-gen-go v1.28.1 // protoc (unknown) // source: http/responsewriter/responsewriter.proto diff --git a/avalanchego/proto/pb/io/reader/reader.pb.go b/avalanchego/proto/pb/io/reader/reader.pb.go index 46836304..fc1d0d12 100644 --- a/avalanchego/proto/pb/io/reader/reader.pb.go +++ b/avalanchego/proto/pb/io/reader/reader.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 +// protoc-gen-go v1.28.1 // protoc (unknown) // source: io/reader/reader.proto @@ -76,9 +76,7 @@ type ReadResponse struct { // read is the payload in bytes Read []byte `protobuf:"bytes,1,opt,name=read,proto3" json:"read,omitempty"` // error is an error message - Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` - // errored is true if an error has been set - Errored bool `protobuf:"varint,3,opt,name=errored,proto3" json:"errored,omitempty"` + Error *string `protobuf:"bytes,2,opt,name=error,proto3,oneof" json:"error,omitempty"` } func (x *ReadResponse) Reset() { @@ -121,19 +119,12 @@ func (x *ReadResponse) GetRead() []byte { } func (x *ReadResponse) GetError() string { - if x != nil { - return x.Error + if x != nil && x.Error != nil { + return *x.Error } return "" } -func (x *ReadResponse) GetErrored() bool { - if x != nil { - return x.Errored - } - return false -} - var File_io_reader_reader_proto protoreflect.FileDescriptor var file_io_reader_reader_proto_rawDesc = []byte{ @@ -141,20 +132,20 @@ var file_io_reader_reader_proto_rawDesc = []byte{ 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x69, 0x6f, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x22, 0x25, 0x0a, 0x0b, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x52, 0x0a, 0x0c, 0x52, 0x65, + 0x28, 0x05, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x47, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x65, - 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x72, 0x65, 0x61, 0x64, 0x12, 0x14, - 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x65, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x65, 0x64, 0x32, 0x41, - 0x0a, 0x06, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x04, 0x52, 0x65, 0x61, 0x64, - 0x12, 0x16, 0x2e, 0x69, 0x6f, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x61, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x69, 0x6f, 0x2e, 0x72, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, - 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x69, 0x6f, - 0x2f, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x72, 0x65, 0x61, 0x64, 0x12, 0x19, + 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x32, 0x41, 0x0a, 0x06, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x37, 0x0a, + 0x04, 0x52, 0x65, 0x61, 0x64, 0x12, 0x16, 0x2e, 0x69, 0x6f, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, + 0x72, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, + 0x69, 0x6f, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, + 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x70, 0x62, 0x2f, 0x69, 0x6f, 0x2f, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -215,6 +206,7 @@ func file_io_reader_reader_proto_init() { } } } + file_io_reader_reader_proto_msgTypes[1].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/avalanchego/proto/pb/io/writer/writer.pb.go b/avalanchego/proto/pb/io/writer/writer.pb.go index abd5c866..afd1092f 100644 --- a/avalanchego/proto/pb/io/writer/writer.pb.go +++ b/avalanchego/proto/pb/io/writer/writer.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 +// protoc-gen-go v1.28.1 // protoc (unknown) // source: io/writer/writer.proto @@ -76,9 +76,7 @@ type WriteResponse struct { // written is the length of payload in bytes Written int32 `protobuf:"varint,1,opt,name=written,proto3" json:"written,omitempty"` // error is an error message - Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` - // errored is true if an error has been set - Errored bool `protobuf:"varint,3,opt,name=errored,proto3" json:"errored,omitempty"` + Error *string `protobuf:"bytes,2,opt,name=error,proto3,oneof" json:"error,omitempty"` } func (x *WriteResponse) Reset() { @@ -121,19 +119,12 @@ func (x *WriteResponse) GetWritten() int32 { } func (x *WriteResponse) GetError() string { - if x != nil { - return x.Error + if x != nil && x.Error != nil { + return *x.Error } return "" } -func (x *WriteResponse) GetErrored() bool { - if x != nil { - return x.Errored - } - return false -} - var File_io_writer_writer_proto protoreflect.FileDescriptor var file_io_writer_writer_proto_rawDesc = []byte{ @@ -141,21 +132,21 @@ var file_io_writer_writer_proto_rawDesc = []byte{ 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x69, 0x6f, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x72, 0x22, 0x28, 0x0a, 0x0c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x59, 0x0a, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x4e, 0x0a, 0x0d, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x07, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x18, - 0x0a, 0x07, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x07, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x65, 0x64, 0x32, 0x44, 0x0a, 0x06, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x72, 0x12, 0x3a, 0x0a, 0x05, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x17, 0x2e, 0x69, 0x6f, - 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x72, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x69, 0x6f, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x72, - 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x34, - 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, - 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, - 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x69, 0x6f, 0x2f, 0x77, 0x72, - 0x69, 0x74, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x07, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x12, 0x19, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x32, 0x44, 0x0a, + 0x06, 0x57, 0x72, 0x69, 0x74, 0x65, 0x72, 0x12, 0x3a, 0x0a, 0x05, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x12, 0x17, 0x2e, 0x69, 0x6f, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x72, 0x2e, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x69, 0x6f, 0x2e, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x72, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, + 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, + 0x69, 0x6f, 0x2f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( @@ -216,6 +207,7 @@ func file_io_writer_writer_proto_init() { } } } + file_io_writer_writer_proto_msgTypes[1].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/avalanchego/proto/pb/keystore/keystore.pb.go b/avalanchego/proto/pb/keystore/keystore.pb.go index a418fa2f..d9d75e73 100644 --- a/avalanchego/proto/pb/keystore/keystore.pb.go +++ b/avalanchego/proto/pb/keystore/keystore.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 +// protoc-gen-go v1.28.1 // protoc (unknown) // source: keystore/keystore.proto diff --git a/avalanchego/proto/pb/messenger/messenger.pb.go b/avalanchego/proto/pb/messenger/messenger.pb.go index d0cb2a26..15c43300 100644 --- a/avalanchego/proto/pb/messenger/messenger.pb.go +++ b/avalanchego/proto/pb/messenger/messenger.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 +// protoc-gen-go v1.28.1 // protoc (unknown) // source: messenger/messenger.proto @@ -20,12 +20,61 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type Message int32 + +const ( + Message_MESSAGE_UNSPECIFIED Message = 0 + Message_MESSAGE_BUILD_BLOCK Message = 1 + Message_MESSAGE_STATE_SYNC_FINISHED Message = 2 +) + +// Enum value maps for Message. +var ( + Message_name = map[int32]string{ + 0: "MESSAGE_UNSPECIFIED", + 1: "MESSAGE_BUILD_BLOCK", + 2: "MESSAGE_STATE_SYNC_FINISHED", + } + Message_value = map[string]int32{ + "MESSAGE_UNSPECIFIED": 0, + "MESSAGE_BUILD_BLOCK": 1, + "MESSAGE_STATE_SYNC_FINISHED": 2, + } +) + +func (x Message) Enum() *Message { + p := new(Message) + *p = x + return p +} + +func (x Message) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Message) Descriptor() protoreflect.EnumDescriptor { + return file_messenger_messenger_proto_enumTypes[0].Descriptor() +} + +func (Message) Type() protoreflect.EnumType { + return &file_messenger_messenger_proto_enumTypes[0] +} + +func (x Message) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Message.Descriptor instead. +func (Message) EnumDescriptor() ([]byte, []int) { + return file_messenger_messenger_proto_rawDescGZIP(), []int{0} +} + type NotifyRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Message uint32 `protobuf:"varint,1,opt,name=message,proto3" json:"message,omitempty"` + Message Message `protobuf:"varint,1,opt,name=message,proto3,enum=messenger.Message" json:"message,omitempty"` } func (x *NotifyRequest) Reset() { @@ -60,11 +109,11 @@ func (*NotifyRequest) Descriptor() ([]byte, []int) { return file_messenger_messenger_proto_rawDescGZIP(), []int{0} } -func (x *NotifyRequest) GetMessage() uint32 { +func (x *NotifyRequest) GetMessage() Message { if x != nil { return x.Message } - return 0 + return Message_MESSAGE_UNSPECIFIED } type NotifyResponse struct { @@ -110,19 +159,26 @@ var File_messenger_messenger_proto protoreflect.FileDescriptor var file_messenger_messenger_proto_rawDesc = []byte{ 0x0a, 0x19, 0x6d, 0x65, 0x73, 0x73, 0x65, 0x6e, 0x67, 0x65, 0x72, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x65, 0x6e, 0x67, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09, 0x6d, 0x65, 0x73, - 0x73, 0x65, 0x6e, 0x67, 0x65, 0x72, 0x22, 0x29, 0x0a, 0x0d, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x22, 0x10, 0x0a, 0x0e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x32, 0x4a, 0x0a, 0x09, 0x4d, 0x65, 0x73, 0x73, 0x65, 0x6e, 0x67, 0x65, 0x72, - 0x12, 0x3d, 0x0a, 0x06, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x12, 0x18, 0x2e, 0x6d, 0x65, 0x73, - 0x73, 0x65, 0x6e, 0x67, 0x65, 0x72, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x65, 0x6e, 0x67, 0x65, 0x72, - 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, - 0x34, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, - 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, - 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x6d, 0x65, 0x73, 0x73, - 0x65, 0x6e, 0x67, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x73, 0x65, 0x6e, 0x67, 0x65, 0x72, 0x22, 0x3d, 0x0a, 0x0d, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x65, + 0x6e, 0x67, 0x65, 0x72, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x07, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x10, 0x0a, 0x0e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2a, 0x5c, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x4d, + 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x42, 0x55, 0x49, 0x4c, 0x44, 0x5f, 0x42, 0x4c, 0x4f, + 0x43, 0x4b, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, + 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x46, 0x49, 0x4e, 0x49, 0x53, + 0x48, 0x45, 0x44, 0x10, 0x02, 0x32, 0x4a, 0x0a, 0x09, 0x4d, 0x65, 0x73, 0x73, 0x65, 0x6e, 0x67, + 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x06, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x12, 0x18, 0x2e, 0x6d, + 0x65, 0x73, 0x73, 0x65, 0x6e, 0x67, 0x65, 0x72, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x65, 0x6e, 0x67, + 0x65, 0x72, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x6d, 0x65, + 0x73, 0x73, 0x65, 0x6e, 0x67, 0x65, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -137,19 +193,22 @@ func file_messenger_messenger_proto_rawDescGZIP() []byte { return file_messenger_messenger_proto_rawDescData } +var file_messenger_messenger_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_messenger_messenger_proto_msgTypes = make([]protoimpl.MessageInfo, 2) var file_messenger_messenger_proto_goTypes = []interface{}{ - (*NotifyRequest)(nil), // 0: messenger.NotifyRequest - (*NotifyResponse)(nil), // 1: messenger.NotifyResponse + (Message)(0), // 0: messenger.Message + (*NotifyRequest)(nil), // 1: messenger.NotifyRequest + (*NotifyResponse)(nil), // 2: messenger.NotifyResponse } var file_messenger_messenger_proto_depIdxs = []int32{ - 0, // 0: messenger.Messenger.Notify:input_type -> messenger.NotifyRequest - 1, // 1: messenger.Messenger.Notify:output_type -> messenger.NotifyResponse - 1, // [1:2] is the sub-list for method output_type - 0, // [0:1] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 0, // 0: messenger.NotifyRequest.message:type_name -> messenger.Message + 1, // 1: messenger.Messenger.Notify:input_type -> messenger.NotifyRequest + 2, // 2: messenger.Messenger.Notify:output_type -> messenger.NotifyResponse + 2, // [2:3] is the sub-list for method output_type + 1, // [1:2] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name } func init() { file_messenger_messenger_proto_init() } @@ -188,13 +247,14 @@ func file_messenger_messenger_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_messenger_messenger_proto_rawDesc, - NumEnums: 0, + NumEnums: 1, NumMessages: 2, NumExtensions: 0, NumServices: 1, }, GoTypes: file_messenger_messenger_proto_goTypes, DependencyIndexes: file_messenger_messenger_proto_depIdxs, + EnumInfos: file_messenger_messenger_proto_enumTypes, MessageInfos: file_messenger_messenger_proto_msgTypes, }.Build() File_messenger_messenger_proto = out.File diff --git a/avalanchego/proto/pb/net/conn/conn.pb.go b/avalanchego/proto/pb/net/conn/conn.pb.go index 8481a022..b40c1608 100644 --- a/avalanchego/proto/pb/net/conn/conn.pb.go +++ b/avalanchego/proto/pb/net/conn/conn.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 +// protoc-gen-go v1.28.1 // protoc (unknown) // source: net/conn/conn.proto @@ -77,9 +77,7 @@ type ReadResponse struct { // read is the payload in bytes Read []byte `protobuf:"bytes,1,opt,name=read,proto3" json:"read,omitempty"` // error is an error message - Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` - // errored is true if an error has been set - Errored bool `protobuf:"varint,3,opt,name=errored,proto3" json:"errored,omitempty"` + Error *string `protobuf:"bytes,2,opt,name=error,proto3,oneof" json:"error,omitempty"` } func (x *ReadResponse) Reset() { @@ -122,19 +120,12 @@ func (x *ReadResponse) GetRead() []byte { } func (x *ReadResponse) GetError() string { - if x != nil { - return x.Error + if x != nil && x.Error != nil { + return *x.Error } return "" } -func (x *ReadResponse) GetErrored() bool { - if x != nil { - return x.Errored - } - return false -} - type WriteRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -191,9 +182,7 @@ type WriteResponse struct { // length of the response in bytes Length int32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` // error is an error message - Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` - // errored is true if an error has been set - Errored bool `protobuf:"varint,3,opt,name=errored,proto3" json:"errored,omitempty"` + Error *string `protobuf:"bytes,2,opt,name=error,proto3,oneof" json:"error,omitempty"` } func (x *WriteResponse) Reset() { @@ -236,19 +225,12 @@ func (x *WriteResponse) GetLength() int32 { } func (x *WriteResponse) GetError() string { - if x != nil { - return x.Error + if x != nil && x.Error != nil { + return *x.Error } return "" } -func (x *WriteResponse) GetErrored() bool { - if x != nil { - return x.Errored - } - return false -} - type SetDeadlineRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -306,51 +288,50 @@ var file_net_conn_conn_proto_rawDesc = []byte{ 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x25, 0x0a, 0x0b, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6c, 0x65, 0x6e, - 0x67, 0x74, 0x68, 0x22, 0x52, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x67, 0x74, 0x68, 0x22, 0x47, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x65, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x04, 0x72, 0x65, 0x61, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x18, 0x0a, - 0x07, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x65, 0x64, 0x22, 0x28, 0x0a, 0x0c, 0x57, 0x72, 0x69, 0x74, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, - 0x64, 0x22, 0x57, 0x0a, 0x0d, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x12, 0x18, 0x0a, 0x07, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x07, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x65, 0x64, 0x22, 0x28, 0x0a, 0x12, 0x53, 0x65, - 0x74, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x12, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, - 0x74, 0x69, 0x6d, 0x65, 0x32, 0x88, 0x03, 0x0a, 0x04, 0x43, 0x6f, 0x6e, 0x6e, 0x12, 0x35, 0x0a, - 0x04, 0x52, 0x65, 0x61, 0x64, 0x12, 0x15, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x6e, - 0x65, 0x74, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x16, 0x2e, - 0x6e, 0x65, 0x74, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, - 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, - 0x0a, 0x05, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x0b, 0x53, 0x65, 0x74, 0x44, 0x65, - 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1c, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x63, 0x6f, 0x6e, - 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x47, 0x0a, 0x0f, - 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, - 0x1c, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x44, 0x65, - 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x48, 0x0a, 0x10, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, - 0x65, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1c, 0x2e, 0x6e, 0x65, 0x74, 0x2e, + 0x0c, 0x52, 0x04, 0x72, 0x65, 0x61, 0x64, 0x12, 0x19, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x88, + 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x28, 0x0a, 0x0c, + 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, + 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x4c, 0x0a, 0x0d, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, + 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, + 0x19, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, + 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x88, 0x01, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x22, 0x28, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x44, 0x65, 0x61, 0x64, 0x6c, + 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x32, 0x88, + 0x03, 0x0a, 0x04, 0x43, 0x6f, 0x6e, 0x6e, 0x12, 0x35, 0x0a, 0x04, 0x52, 0x65, 0x61, 0x64, 0x12, + 0x15, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x63, 0x6f, 0x6e, + 0x6e, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, + 0x0a, 0x05, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x16, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x63, 0x6f, + 0x6e, 0x6e, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x17, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x05, 0x43, 0x6c, 0x6f, 0x73, + 0x65, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x12, 0x43, 0x0a, 0x0b, 0x53, 0x65, 0x74, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, + 0x12, 0x1c, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x44, + 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x47, 0x0a, 0x0f, 0x53, 0x65, 0x74, 0x52, 0x65, 0x61, + 0x64, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1c, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x2e, 0x53, 0x65, 0x74, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, - 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, - 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, - 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x6e, 0x65, 0x74, 0x2f, - 0x63, 0x6f, 0x6e, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, + 0x48, 0x0a, 0x10, 0x53, 0x65, 0x74, 0x57, 0x72, 0x69, 0x74, 0x65, 0x44, 0x65, 0x61, 0x64, 0x6c, + 0x69, 0x6e, 0x65, 0x12, 0x1c, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x63, 0x6f, 0x6e, 0x6e, 0x2e, 0x53, + 0x65, 0x74, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, + 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x6e, 0x65, 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -461,6 +442,8 @@ func file_net_conn_conn_proto_init() { } } } + file_net_conn_conn_proto_msgTypes[1].OneofWrappers = []interface{}{} + file_net_conn_conn_proto_msgTypes[3].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/avalanchego/proto/pb/p2p/p2p.pb.go b/avalanchego/proto/pb/p2p/p2p.pb.go index 03a8adf7..ff5127f4 100644 --- a/avalanchego/proto/pb/p2p/p2p.pb.go +++ b/avalanchego/proto/pb/p2p/p2p.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 +// protoc-gen-go v1.28.1 // protoc (unknown) // source: p2p/p2p.proto @@ -20,6 +20,55 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type EngineType int32 + +const ( + EngineType_ENGINE_TYPE_UNSPECIFIED EngineType = 0 + EngineType_ENGINE_TYPE_AVALANCHE EngineType = 1 + EngineType_ENGINE_TYPE_SNOWMAN EngineType = 2 +) + +// Enum value maps for EngineType. +var ( + EngineType_name = map[int32]string{ + 0: "ENGINE_TYPE_UNSPECIFIED", + 1: "ENGINE_TYPE_AVALANCHE", + 2: "ENGINE_TYPE_SNOWMAN", + } + EngineType_value = map[string]int32{ + "ENGINE_TYPE_UNSPECIFIED": 0, + "ENGINE_TYPE_AVALANCHE": 1, + "ENGINE_TYPE_SNOWMAN": 2, + } +) + +func (x EngineType) Enum() *EngineType { + p := new(EngineType) + *p = x + return p +} + +func (x EngineType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (EngineType) Descriptor() protoreflect.EnumDescriptor { + return file_p2p_p2p_proto_enumTypes[0].Descriptor() +} + +func (EngineType) Type() protoreflect.EnumType { + return &file_p2p_p2p_proto_enumTypes[0] +} + +func (x EngineType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use EngineType.Descriptor instead. +func (EngineType) EnumDescriptor() ([]byte, []int) { + return file_p2p_p2p_proto_rawDescGZIP(), []int{0} +} + // Represents peer-to-peer messages. // Only one type can be non-null. type Message struct { @@ -32,7 +81,9 @@ type Message struct { // That is because when the compression is enabled, we don't want to include uncompressed fields. // // Types that are assignable to Message: + // // *Message_CompressedGzip + // *Message_CompressedZstd // *Message_Ping // *Message_Pong // *Message_Version @@ -55,6 +106,7 @@ type Message struct { // *Message_AppRequest // *Message_AppResponse // *Message_AppGossip + // *Message_PeerListAck Message isMessage_Message `protobuf_oneof:"message"` } @@ -104,6 +156,13 @@ func (x *Message) GetCompressedGzip() []byte { return nil } +func (x *Message) GetCompressedZstd() []byte { + if x, ok := x.GetMessage().(*Message_CompressedZstd); ok { + return x.CompressedZstd + } + return nil +} + func (x *Message) GetPing() *Ping { if x, ok := x.GetMessage().(*Message_Ping); ok { return x.Ping @@ -258,6 +317,13 @@ func (x *Message) GetAppGossip() *AppGossip { return nil } +func (x *Message) GetPeerListAck() *PeerListAck { + if x, ok := x.GetMessage().(*Message_PeerListAck); ok { + return x.PeerListAck + } + return nil +} + type isMessage_Message interface { isMessage_Message() } @@ -269,6 +335,13 @@ type Message_CompressedGzip struct { CompressedGzip []byte `protobuf:"bytes,1,opt,name=compressed_gzip,json=compressedGzip,proto3,oneof"` } +type Message_CompressedZstd struct { + // zstd-compressed bytes of a "p2p.Message" whose "oneof" "message" field is + // NOT compressed_* BUT one of the message types (e.g. ping, pong, etc.). + // This field is only set if the message type supports compression. + CompressedZstd []byte `protobuf:"bytes,2,opt,name=compressed_zstd,json=compressedZstd,proto3,oneof"` +} + type Message_Ping struct { // Network messages: Ping *Ping `protobuf:"bytes,11,opt,name=ping,proto3,oneof"` @@ -362,8 +435,14 @@ type Message_AppGossip struct { AppGossip *AppGossip `protobuf:"bytes,32,opt,name=app_gossip,json=appGossip,proto3,oneof"` } +type Message_PeerListAck struct { + PeerListAck *PeerListAck `protobuf:"bytes,33,opt,name=peer_list_ack,json=peerListAck,proto3,oneof"` +} + func (*Message_CompressedGzip) isMessage_Message() {} +func (*Message_CompressedZstd) isMessage_Message() {} + func (*Message_Ping) isMessage_Message() {} func (*Message_Pong) isMessage_Message() {} @@ -408,6 +487,8 @@ func (*Message_AppResponse) isMessage_Message() {} func (*Message_AppGossip) isMessage_Message() {} +func (*Message_PeerListAck) isMessage_Message() {} + // Message that the local node sends to its remote peers, // in order to periodically check its uptime. // @@ -451,20 +532,81 @@ func (*Ping) Descriptor() ([]byte, []int) { return file_p2p_p2p_proto_rawDescGZIP(), []int{1} } -// Contains the uptime of the message receiver (remote peer) +// Contains subnet id and the related observed subnet uptime of the message +// receiver (remote peer). +type SubnetUptime struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SubnetId []byte `protobuf:"bytes,1,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` + Uptime uint32 `protobuf:"varint,2,opt,name=uptime,proto3" json:"uptime,omitempty"` +} + +func (x *SubnetUptime) Reset() { + *x = SubnetUptime{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_p2p_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SubnetUptime) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SubnetUptime) ProtoMessage() {} + +func (x *SubnetUptime) ProtoReflect() protoreflect.Message { + mi := &file_p2p_p2p_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SubnetUptime.ProtoReflect.Descriptor instead. +func (*SubnetUptime) Descriptor() ([]byte, []int) { + return file_p2p_p2p_proto_rawDescGZIP(), []int{2} +} + +func (x *SubnetUptime) GetSubnetId() []byte { + if x != nil { + return x.SubnetId + } + return nil +} + +func (x *SubnetUptime) GetUptime() uint32 { + if x != nil { + return x.Uptime + } + return 0 +} + +// Contains the uptime percentage of the message receiver (remote peer) // from the sender's point of view, in response to "ping" message. +// Uptimes are expected to be provided as integers ranging in [0, 100]. type Pong struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - UptimePct uint32 `protobuf:"varint,1,opt,name=uptime_pct,json=uptimePct,proto3" json:"uptime_pct,omitempty"` + // uptime is the primary network uptime percentage. + Uptime uint32 `protobuf:"varint,1,opt,name=uptime,proto3" json:"uptime,omitempty"` + // subnet_uptimes contains subnet uptime percentages. + SubnetUptimes []*SubnetUptime `protobuf:"bytes,2,rep,name=subnet_uptimes,json=subnetUptimes,proto3" json:"subnet_uptimes,omitempty"` } func (x *Pong) Reset() { *x = Pong{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[2] + mi := &file_p2p_p2p_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -477,7 +619,7 @@ func (x *Pong) String() string { func (*Pong) ProtoMessage() {} func (x *Pong) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[2] + mi := &file_p2p_p2p_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -490,16 +632,23 @@ func (x *Pong) ProtoReflect() protoreflect.Message { // Deprecated: Use Pong.ProtoReflect.Descriptor instead. func (*Pong) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{2} + return file_p2p_p2p_proto_rawDescGZIP(), []int{3} } -func (x *Pong) GetUptimePct() uint32 { +func (x *Pong) GetUptime() uint32 { if x != nil { - return x.UptimePct + return x.Uptime } return 0 } +func (x *Pong) GetSubnetUptimes() []*SubnetUptime { + if x != nil { + return x.SubnetUptimes + } + return nil +} + // The first outbound message that the local node sends to its remote peer // when the connection is established. In order for the local node to be // tracked as a valid peer by the remote peer, the fields must be valid. @@ -525,7 +674,7 @@ type Version struct { func (x *Version) Reset() { *x = Version{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[3] + mi := &file_p2p_p2p_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -538,7 +687,7 @@ func (x *Version) String() string { func (*Version) ProtoMessage() {} func (x *Version) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[3] + mi := &file_p2p_p2p_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -551,7 +700,7 @@ func (x *Version) ProtoReflect() protoreflect.Message { // Deprecated: Use Version.ProtoReflect.Descriptor instead. func (*Version) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{3} + return file_p2p_p2p_proto_rawDescGZIP(), []int{4} } func (x *Version) GetNetworkId() uint32 { @@ -621,12 +770,13 @@ type ClaimedIpPort struct { IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` Timestamp uint64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` Signature []byte `protobuf:"bytes,5,opt,name=signature,proto3" json:"signature,omitempty"` + TxId []byte `protobuf:"bytes,6,opt,name=tx_id,json=txId,proto3" json:"tx_id,omitempty"` } func (x *ClaimedIpPort) Reset() { *x = ClaimedIpPort{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[4] + mi := &file_p2p_p2p_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -639,7 +789,7 @@ func (x *ClaimedIpPort) String() string { func (*ClaimedIpPort) ProtoMessage() {} func (x *ClaimedIpPort) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[4] + mi := &file_p2p_p2p_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -652,7 +802,7 @@ func (x *ClaimedIpPort) ProtoReflect() protoreflect.Message { // Deprecated: Use ClaimedIpPort.ProtoReflect.Descriptor instead. func (*ClaimedIpPort) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{4} + return file_p2p_p2p_proto_rawDescGZIP(), []int{5} } func (x *ClaimedIpPort) GetX509Certificate() []byte { @@ -690,6 +840,13 @@ func (x *ClaimedIpPort) GetSignature() []byte { return nil } +func (x *ClaimedIpPort) GetTxId() []byte { + if x != nil { + return x.TxId + } + return nil +} + // Message that contains a list of peer information (IP, certs, etc.) // in response to "version" message, and sent periodically to a set of // validators. @@ -708,7 +865,7 @@ type PeerList struct { func (x *PeerList) Reset() { *x = PeerList{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[5] + mi := &file_p2p_p2p_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -721,7 +878,7 @@ func (x *PeerList) String() string { func (*PeerList) ProtoMessage() {} func (x *PeerList) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[5] + mi := &file_p2p_p2p_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -734,7 +891,7 @@ func (x *PeerList) ProtoReflect() protoreflect.Message { // Deprecated: Use PeerList.ProtoReflect.Descriptor instead. func (*PeerList) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{5} + return file_p2p_p2p_proto_rawDescGZIP(), []int{6} } func (x *PeerList) GetClaimedIpPorts() []*ClaimedIpPort { @@ -744,6 +901,117 @@ func (x *PeerList) GetClaimedIpPorts() []*ClaimedIpPort { return nil } +// "peer_ack" is sent in response to a "peer_list" message. The "tx_id" should +// correspond to a "tx_id" in the "peer_list" message. The sender should set +// "timestamp" to be the latest known timestamp of a signed IP corresponding to +// the nodeID of "tx_id". +// +// Upon receipt, the "tx_id" and "timestamp" will determine if the receiptent +// can forgo future gossip of the node's IP to the sender of this message. +type PeerAck struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + TxId []byte `protobuf:"bytes,1,opt,name=tx_id,json=txId,proto3" json:"tx_id,omitempty"` + Timestamp uint64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (x *PeerAck) Reset() { + *x = PeerAck{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_p2p_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeerAck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeerAck) ProtoMessage() {} + +func (x *PeerAck) ProtoReflect() protoreflect.Message { + mi := &file_p2p_p2p_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeerAck.ProtoReflect.Descriptor instead. +func (*PeerAck) Descriptor() ([]byte, []int) { + return file_p2p_p2p_proto_rawDescGZIP(), []int{7} +} + +func (x *PeerAck) GetTxId() []byte { + if x != nil { + return x.TxId + } + return nil +} + +func (x *PeerAck) GetTimestamp() uint64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +// Message that responds to a peer_list message containing the AddValidatorTxIDs +// from the peer_list message that we currently have in our validator set. +type PeerListAck struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PeerAcks []*PeerAck `protobuf:"bytes,2,rep,name=peer_acks,json=peerAcks,proto3" json:"peer_acks,omitempty"` +} + +func (x *PeerListAck) Reset() { + *x = PeerListAck{} + if protoimpl.UnsafeEnabled { + mi := &file_p2p_p2p_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PeerListAck) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PeerListAck) ProtoMessage() {} + +func (x *PeerListAck) ProtoReflect() protoreflect.Message { + mi := &file_p2p_p2p_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PeerListAck.ProtoReflect.Descriptor instead. +func (*PeerListAck) Descriptor() ([]byte, []int) { + return file_p2p_p2p_proto_rawDescGZIP(), []int{8} +} + +func (x *PeerListAck) GetPeerAcks() []*PeerAck { + if x != nil { + return x.PeerAcks + } + return nil +} + type GetStateSummaryFrontier struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -757,7 +1025,7 @@ type GetStateSummaryFrontier struct { func (x *GetStateSummaryFrontier) Reset() { *x = GetStateSummaryFrontier{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[6] + mi := &file_p2p_p2p_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -770,7 +1038,7 @@ func (x *GetStateSummaryFrontier) String() string { func (*GetStateSummaryFrontier) ProtoMessage() {} func (x *GetStateSummaryFrontier) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[6] + mi := &file_p2p_p2p_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -783,7 +1051,7 @@ func (x *GetStateSummaryFrontier) ProtoReflect() protoreflect.Message { // Deprecated: Use GetStateSummaryFrontier.ProtoReflect.Descriptor instead. func (*GetStateSummaryFrontier) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{6} + return file_p2p_p2p_proto_rawDescGZIP(), []int{9} } func (x *GetStateSummaryFrontier) GetChainId() []byte { @@ -820,7 +1088,7 @@ type StateSummaryFrontier struct { func (x *StateSummaryFrontier) Reset() { *x = StateSummaryFrontier{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[7] + mi := &file_p2p_p2p_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -833,7 +1101,7 @@ func (x *StateSummaryFrontier) String() string { func (*StateSummaryFrontier) ProtoMessage() {} func (x *StateSummaryFrontier) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[7] + mi := &file_p2p_p2p_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -846,7 +1114,7 @@ func (x *StateSummaryFrontier) ProtoReflect() protoreflect.Message { // Deprecated: Use StateSummaryFrontier.ProtoReflect.Descriptor instead. func (*StateSummaryFrontier) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{7} + return file_p2p_p2p_proto_rawDescGZIP(), []int{10} } func (x *StateSummaryFrontier) GetChainId() []byte { @@ -884,7 +1152,7 @@ type GetAcceptedStateSummary struct { func (x *GetAcceptedStateSummary) Reset() { *x = GetAcceptedStateSummary{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[8] + mi := &file_p2p_p2p_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -897,7 +1165,7 @@ func (x *GetAcceptedStateSummary) String() string { func (*GetAcceptedStateSummary) ProtoMessage() {} func (x *GetAcceptedStateSummary) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[8] + mi := &file_p2p_p2p_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -910,7 +1178,7 @@ func (x *GetAcceptedStateSummary) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAcceptedStateSummary.ProtoReflect.Descriptor instead. func (*GetAcceptedStateSummary) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{8} + return file_p2p_p2p_proto_rawDescGZIP(), []int{11} } func (x *GetAcceptedStateSummary) GetChainId() []byte { @@ -954,7 +1222,7 @@ type AcceptedStateSummary struct { func (x *AcceptedStateSummary) Reset() { *x = AcceptedStateSummary{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[9] + mi := &file_p2p_p2p_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -967,7 +1235,7 @@ func (x *AcceptedStateSummary) String() string { func (*AcceptedStateSummary) ProtoMessage() {} func (x *AcceptedStateSummary) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[9] + mi := &file_p2p_p2p_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -980,7 +1248,7 @@ func (x *AcceptedStateSummary) ProtoReflect() protoreflect.Message { // Deprecated: Use AcceptedStateSummary.ProtoReflect.Descriptor instead. func (*AcceptedStateSummary) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{9} + return file_p2p_p2p_proto_rawDescGZIP(), []int{12} } func (x *AcceptedStateSummary) GetChainId() []byte { @@ -1018,15 +1286,16 @@ type GetAcceptedFrontier struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` + EngineType EngineType `protobuf:"varint,4,opt,name=engine_type,json=engineType,proto3,enum=p2p.EngineType" json:"engine_type,omitempty"` } func (x *GetAcceptedFrontier) Reset() { *x = GetAcceptedFrontier{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[10] + mi := &file_p2p_p2p_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1039,7 +1308,7 @@ func (x *GetAcceptedFrontier) String() string { func (*GetAcceptedFrontier) ProtoMessage() {} func (x *GetAcceptedFrontier) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[10] + mi := &file_p2p_p2p_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1052,7 +1321,7 @@ func (x *GetAcceptedFrontier) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAcceptedFrontier.ProtoReflect.Descriptor instead. func (*GetAcceptedFrontier) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{10} + return file_p2p_p2p_proto_rawDescGZIP(), []int{13} } func (x *GetAcceptedFrontier) GetChainId() []byte { @@ -1076,6 +1345,13 @@ func (x *GetAcceptedFrontier) GetDeadline() uint64 { return 0 } +func (x *GetAcceptedFrontier) GetEngineType() EngineType { + if x != nil { + return x.EngineType + } + return EngineType_ENGINE_TYPE_UNSPECIFIED +} + // Message that contains the list of accepted frontier in response to // "get_accepted_frontier". For instance, on receiving "get_accepted_frontier", // the X-chain engine responds with the accepted frontier of X-chain DAG. @@ -1094,7 +1370,7 @@ type AcceptedFrontier struct { func (x *AcceptedFrontier) Reset() { *x = AcceptedFrontier{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[11] + mi := &file_p2p_p2p_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1107,7 +1383,7 @@ func (x *AcceptedFrontier) String() string { func (*AcceptedFrontier) ProtoMessage() {} func (x *AcceptedFrontier) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[11] + mi := &file_p2p_p2p_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1120,7 +1396,7 @@ func (x *AcceptedFrontier) ProtoReflect() protoreflect.Message { // Deprecated: Use AcceptedFrontier.ProtoReflect.Descriptor instead. func (*AcceptedFrontier) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{11} + return file_p2p_p2p_proto_rawDescGZIP(), []int{14} } func (x *AcceptedFrontier) GetChainId() []byte { @@ -1156,16 +1432,17 @@ type GetAccepted struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` - ContainerIds [][]byte `protobuf:"bytes,4,rep,name=container_ids,json=containerIds,proto3" json:"container_ids,omitempty"` + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` + ContainerIds [][]byte `protobuf:"bytes,4,rep,name=container_ids,json=containerIds,proto3" json:"container_ids,omitempty"` + EngineType EngineType `protobuf:"varint,5,opt,name=engine_type,json=engineType,proto3,enum=p2p.EngineType" json:"engine_type,omitempty"` } func (x *GetAccepted) Reset() { *x = GetAccepted{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[12] + mi := &file_p2p_p2p_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1178,7 +1455,7 @@ func (x *GetAccepted) String() string { func (*GetAccepted) ProtoMessage() {} func (x *GetAccepted) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[12] + mi := &file_p2p_p2p_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1191,7 +1468,7 @@ func (x *GetAccepted) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAccepted.ProtoReflect.Descriptor instead. func (*GetAccepted) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{12} + return file_p2p_p2p_proto_rawDescGZIP(), []int{15} } func (x *GetAccepted) GetChainId() []byte { @@ -1222,6 +1499,13 @@ func (x *GetAccepted) GetContainerIds() [][]byte { return nil } +func (x *GetAccepted) GetEngineType() EngineType { + if x != nil { + return x.EngineType + } + return EngineType_ENGINE_TYPE_UNSPECIFIED +} + // Message that contains the list of accepted block/vertex IDs in response to // "get_accepted". For instance, on receiving "get_accepted" that contains // the sender's accepted frontier IDs, the X-chain engine responds only with @@ -1242,7 +1526,7 @@ type Accepted struct { func (x *Accepted) Reset() { *x = Accepted{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[13] + mi := &file_p2p_p2p_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1255,7 +1539,7 @@ func (x *Accepted) String() string { func (*Accepted) ProtoMessage() {} func (x *Accepted) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[13] + mi := &file_p2p_p2p_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1268,7 +1552,7 @@ func (x *Accepted) ProtoReflect() protoreflect.Message { // Deprecated: Use Accepted.ProtoReflect.Descriptor instead. func (*Accepted) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{13} + return file_p2p_p2p_proto_rawDescGZIP(), []int{16} } func (x *Accepted) GetChainId() []byte { @@ -1303,16 +1587,17 @@ type GetAncestors struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` - ContainerId []byte `protobuf:"bytes,4,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` + ContainerId []byte `protobuf:"bytes,4,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + EngineType EngineType `protobuf:"varint,5,opt,name=engine_type,json=engineType,proto3,enum=p2p.EngineType" json:"engine_type,omitempty"` } func (x *GetAncestors) Reset() { *x = GetAncestors{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[14] + mi := &file_p2p_p2p_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1325,7 +1610,7 @@ func (x *GetAncestors) String() string { func (*GetAncestors) ProtoMessage() {} func (x *GetAncestors) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[14] + mi := &file_p2p_p2p_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1338,7 +1623,7 @@ func (x *GetAncestors) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAncestors.ProtoReflect.Descriptor instead. func (*GetAncestors) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{14} + return file_p2p_p2p_proto_rawDescGZIP(), []int{17} } func (x *GetAncestors) GetChainId() []byte { @@ -1369,6 +1654,13 @@ func (x *GetAncestors) GetContainerId() []byte { return nil } +func (x *GetAncestors) GetEngineType() EngineType { + if x != nil { + return x.EngineType + } + return EngineType_ENGINE_TYPE_UNSPECIFIED +} + // Message that contains the container bytes of the ancestors // in response to "get_ancestors". // @@ -1387,7 +1679,7 @@ type Ancestors struct { func (x *Ancestors) Reset() { *x = Ancestors{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[15] + mi := &file_p2p_p2p_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1400,7 +1692,7 @@ func (x *Ancestors) String() string { func (*Ancestors) ProtoMessage() {} func (x *Ancestors) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[15] + mi := &file_p2p_p2p_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1413,7 +1705,7 @@ func (x *Ancestors) ProtoReflect() protoreflect.Message { // Deprecated: Use Ancestors.ProtoReflect.Descriptor instead. func (*Ancestors) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{15} + return file_p2p_p2p_proto_rawDescGZIP(), []int{18} } func (x *Ancestors) GetChainId() []byte { @@ -1446,16 +1738,17 @@ type Get struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` - ContainerId []byte `protobuf:"bytes,4,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` + ContainerId []byte `protobuf:"bytes,4,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + EngineType EngineType `protobuf:"varint,5,opt,name=engine_type,json=engineType,proto3,enum=p2p.EngineType" json:"engine_type,omitempty"` } func (x *Get) Reset() { *x = Get{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[16] + mi := &file_p2p_p2p_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1468,7 +1761,7 @@ func (x *Get) String() string { func (*Get) ProtoMessage() {} func (x *Get) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[16] + mi := &file_p2p_p2p_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1481,7 +1774,7 @@ func (x *Get) ProtoReflect() protoreflect.Message { // Deprecated: Use Get.ProtoReflect.Descriptor instead. func (*Get) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{16} + return file_p2p_p2p_proto_rawDescGZIP(), []int{19} } func (x *Get) GetChainId() []byte { @@ -1512,6 +1805,13 @@ func (x *Get) GetContainerId() []byte { return nil } +func (x *Get) GetEngineType() EngineType { + if x != nil { + return x.EngineType + } + return EngineType_ENGINE_TYPE_UNSPECIFIED +} + // Message that contains the container ID and its bytes in response to "get". // // On receiving "put", the engine parses the container and tries to issue it to consensus. @@ -1520,15 +1820,16 @@ type Put struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - Container []byte `protobuf:"bytes,3,opt,name=container,proto3" json:"container,omitempty"` + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + Container []byte `protobuf:"bytes,3,opt,name=container,proto3" json:"container,omitempty"` + EngineType EngineType `protobuf:"varint,4,opt,name=engine_type,json=engineType,proto3,enum=p2p.EngineType" json:"engine_type,omitempty"` } func (x *Put) Reset() { *x = Put{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[17] + mi := &file_p2p_p2p_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1541,7 +1842,7 @@ func (x *Put) String() string { func (*Put) ProtoMessage() {} func (x *Put) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[17] + mi := &file_p2p_p2p_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1554,7 +1855,7 @@ func (x *Put) ProtoReflect() protoreflect.Message { // Deprecated: Use Put.ProtoReflect.Descriptor instead. func (*Put) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{17} + return file_p2p_p2p_proto_rawDescGZIP(), []int{20} } func (x *Put) GetChainId() []byte { @@ -1578,6 +1879,13 @@ func (x *Put) GetContainer() []byte { return nil } +func (x *Put) GetEngineType() EngineType { + if x != nil { + return x.EngineType + } + return EngineType_ENGINE_TYPE_UNSPECIFIED +} + // Message that contains a preferred container ID and its container bytes // in order to query other peers for their preferences of the container. // For example, when a new container is issued, the engine sends out @@ -1592,16 +1900,17 @@ type PushQuery struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` - Container []byte `protobuf:"bytes,4,opt,name=container,proto3" json:"container,omitempty"` + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` + Container []byte `protobuf:"bytes,4,opt,name=container,proto3" json:"container,omitempty"` + EngineType EngineType `protobuf:"varint,5,opt,name=engine_type,json=engineType,proto3,enum=p2p.EngineType" json:"engine_type,omitempty"` } func (x *PushQuery) Reset() { *x = PushQuery{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[18] + mi := &file_p2p_p2p_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1614,7 +1923,7 @@ func (x *PushQuery) String() string { func (*PushQuery) ProtoMessage() {} func (x *PushQuery) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[18] + mi := &file_p2p_p2p_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1627,7 +1936,7 @@ func (x *PushQuery) ProtoReflect() protoreflect.Message { // Deprecated: Use PushQuery.ProtoReflect.Descriptor instead. func (*PushQuery) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{18} + return file_p2p_p2p_proto_rawDescGZIP(), []int{21} } func (x *PushQuery) GetChainId() []byte { @@ -1658,6 +1967,13 @@ func (x *PushQuery) GetContainer() []byte { return nil } +func (x *PushQuery) GetEngineType() EngineType { + if x != nil { + return x.EngineType + } + return EngineType_ENGINE_TYPE_UNSPECIFIED +} + // Message that contains a preferred container ID to query other peers // for their preferences of the container. // For example, when a new container is issued, the engine sends out @@ -1668,16 +1984,17 @@ type PullQuery struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` - ContainerId []byte `protobuf:"bytes,4,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + Deadline uint64 `protobuf:"varint,3,opt,name=deadline,proto3" json:"deadline,omitempty"` + ContainerId []byte `protobuf:"bytes,4,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + EngineType EngineType `protobuf:"varint,5,opt,name=engine_type,json=engineType,proto3,enum=p2p.EngineType" json:"engine_type,omitempty"` } func (x *PullQuery) Reset() { *x = PullQuery{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[19] + mi := &file_p2p_p2p_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1690,7 +2007,7 @@ func (x *PullQuery) String() string { func (*PullQuery) ProtoMessage() {} func (x *PullQuery) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[19] + mi := &file_p2p_p2p_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1703,7 +2020,7 @@ func (x *PullQuery) ProtoReflect() protoreflect.Message { // Deprecated: Use PullQuery.ProtoReflect.Descriptor instead. func (*PullQuery) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{19} + return file_p2p_p2p_proto_rawDescGZIP(), []int{22} } func (x *PullQuery) GetChainId() []byte { @@ -1734,6 +2051,13 @@ func (x *PullQuery) GetContainerId() []byte { return nil } +func (x *PullQuery) GetEngineType() EngineType { + if x != nil { + return x.EngineType + } + return EngineType_ENGINE_TYPE_UNSPECIFIED +} + // Message that contains the votes/preferences of the local node, // in response to "push_query" or "pull_query" (e.g., preferred frontier). // @@ -1745,15 +2069,20 @@ type Chits struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - ContainerIds [][]byte `protobuf:"bytes,3,rep,name=container_ids,json=containerIds,proto3" json:"container_ids,omitempty"` + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // Represents the current preferred frontier. + // TODO: Remove `repeated` once all chains are running Snowman. + PreferredContainerIds [][]byte `protobuf:"bytes,3,rep,name=preferred_container_ids,json=preferredContainerIds,proto3" json:"preferred_container_ids,omitempty"` + // Represents the current accepted frontier. + // TODO: Remove `repeated` once all chains are running Snowman. + AcceptedContainerIds [][]byte `protobuf:"bytes,4,rep,name=accepted_container_ids,json=acceptedContainerIds,proto3" json:"accepted_container_ids,omitempty"` } func (x *Chits) Reset() { *x = Chits{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[20] + mi := &file_p2p_p2p_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1766,7 +2095,7 @@ func (x *Chits) String() string { func (*Chits) ProtoMessage() {} func (x *Chits) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[20] + mi := &file_p2p_p2p_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1779,7 +2108,7 @@ func (x *Chits) ProtoReflect() protoreflect.Message { // Deprecated: Use Chits.ProtoReflect.Descriptor instead. func (*Chits) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{20} + return file_p2p_p2p_proto_rawDescGZIP(), []int{23} } func (x *Chits) GetChainId() []byte { @@ -1796,9 +2125,16 @@ func (x *Chits) GetRequestId() uint32 { return 0 } -func (x *Chits) GetContainerIds() [][]byte { +func (x *Chits) GetPreferredContainerIds() [][]byte { if x != nil { - return x.ContainerIds + return x.PreferredContainerIds + } + return nil +} + +func (x *Chits) GetAcceptedContainerIds() [][]byte { + if x != nil { + return x.AcceptedContainerIds } return nil } @@ -1817,7 +2153,7 @@ type AppRequest struct { func (x *AppRequest) Reset() { *x = AppRequest{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[21] + mi := &file_p2p_p2p_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1830,7 +2166,7 @@ func (x *AppRequest) String() string { func (*AppRequest) ProtoMessage() {} func (x *AppRequest) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[21] + mi := &file_p2p_p2p_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1843,7 +2179,7 @@ func (x *AppRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use AppRequest.ProtoReflect.Descriptor instead. func (*AppRequest) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{21} + return file_p2p_p2p_proto_rawDescGZIP(), []int{24} } func (x *AppRequest) GetChainId() []byte { @@ -1887,7 +2223,7 @@ type AppResponse struct { func (x *AppResponse) Reset() { *x = AppResponse{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[22] + mi := &file_p2p_p2p_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1900,7 +2236,7 @@ func (x *AppResponse) String() string { func (*AppResponse) ProtoMessage() {} func (x *AppResponse) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[22] + mi := &file_p2p_p2p_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1913,7 +2249,7 @@ func (x *AppResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use AppResponse.ProtoReflect.Descriptor instead. func (*AppResponse) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{22} + return file_p2p_p2p_proto_rawDescGZIP(), []int{25} } func (x *AppResponse) GetChainId() []byte { @@ -1949,7 +2285,7 @@ type AppGossip struct { func (x *AppGossip) Reset() { *x = AppGossip{} if protoimpl.UnsafeEnabled { - mi := &file_p2p_p2p_proto_msgTypes[23] + mi := &file_p2p_p2p_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1962,7 +2298,7 @@ func (x *AppGossip) String() string { func (*AppGossip) ProtoMessage() {} func (x *AppGossip) ProtoReflect() protoreflect.Message { - mi := &file_p2p_p2p_proto_msgTypes[23] + mi := &file_p2p_p2p_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1975,7 +2311,7 @@ func (x *AppGossip) ProtoReflect() protoreflect.Message { // Deprecated: Use AppGossip.ProtoReflect.Descriptor instead. func (*AppGossip) Descriptor() ([]byte, []int) { - return file_p2p_p2p_proto_rawDescGZIP(), []int{23} + return file_p2p_p2p_proto_rawDescGZIP(), []int{26} } func (x *AppGossip) GetChainId() []byte { @@ -1996,217 +2332,260 @@ var File_p2p_p2p_proto protoreflect.FileDescriptor var file_p2p_p2p_proto_rawDesc = []byte{ 0x0a, 0x0d, 0x70, 0x32, 0x70, 0x2f, 0x70, 0x32, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x03, 0x70, 0x32, 0x70, 0x22, 0xfb, 0x09, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x03, 0x70, 0x32, 0x70, 0x22, 0xde, 0x0a, 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x29, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x67, 0x7a, 0x69, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x6f, 0x6d, - 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x47, 0x7a, 0x69, 0x70, 0x12, 0x1f, 0x0a, 0x04, 0x70, - 0x69, 0x6e, 0x67, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x70, 0x32, 0x70, 0x2e, - 0x50, 0x69, 0x6e, 0x67, 0x48, 0x00, 0x52, 0x04, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x0a, 0x04, - 0x70, 0x6f, 0x6e, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x70, 0x32, 0x70, - 0x2e, 0x50, 0x6f, 0x6e, 0x67, 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x6e, 0x67, 0x12, 0x28, 0x0a, - 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, - 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x07, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x09, 0x70, 0x65, 0x65, 0x72, 0x5f, - 0x6c, 0x69, 0x73, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x32, 0x70, - 0x2e, 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x70, 0x65, 0x65, - 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x66, 0x72, 0x6f, 0x6e, 0x74, - 0x69, 0x65, 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x32, 0x70, 0x2e, - 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, - 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x53, 0x74, + 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x47, 0x7a, 0x69, 0x70, 0x12, 0x29, 0x0a, 0x0f, 0x63, + 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x5f, 0x7a, 0x73, 0x74, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, + 0x65, 0x64, 0x5a, 0x73, 0x74, 0x64, 0x12, 0x1f, 0x0a, 0x04, 0x70, 0x69, 0x6e, 0x67, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x48, + 0x00, 0x52, 0x04, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x0a, 0x04, 0x70, 0x6f, 0x6e, 0x67, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x6f, 0x6e, 0x67, + 0x48, 0x00, 0x52, 0x04, 0x70, 0x6f, 0x6e, 0x67, 0x12, 0x28, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x32, 0x70, 0x2e, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x09, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, + 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x65, 0x65, 0x72, + 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x70, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, + 0x12, 0x5b, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, + 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x0f, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, - 0x65, 0x72, 0x12, 0x51, 0x0a, 0x16, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, - 0x61, 0x72, 0x79, 0x5f, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x10, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, - 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x48, 0x00, 0x52, - 0x14, 0x73, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, - 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, - 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, - 0x61, 0x72, 0x79, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x32, 0x70, 0x2e, - 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x41, 0x63, + 0x65, 0x72, 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, + 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x51, 0x0a, + 0x16, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x66, + 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x70, 0x32, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x48, 0x00, 0x52, 0x14, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, + 0x12, 0x5b, 0x0a, 0x1a, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x11, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x12, 0x51, 0x0a, 0x16, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x12, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, - 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x48, 0x00, 0x52, - 0x14, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, - 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x4e, 0x0a, 0x15, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, - 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x13, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, - 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x48, 0x00, - 0x52, 0x13, 0x67, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, - 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x11, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, - 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, - 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x61, 0x63, 0x63, 0x65, 0x70, - 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x0c, 0x67, - 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x18, 0x15, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, - 0x74, 0x65, 0x64, 0x48, 0x00, 0x52, 0x0b, 0x67, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, - 0x65, 0x64, 0x12, 0x2b, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x18, 0x16, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x70, - 0x74, 0x65, 0x64, 0x48, 0x00, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, - 0x38, 0x0a, 0x0d, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, - 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, - 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x48, 0x00, 0x52, 0x0c, 0x67, 0x65, 0x74, - 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x09, 0x61, 0x6e, 0x63, - 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, - 0x32, 0x70, 0x2e, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x48, 0x00, 0x52, 0x09, - 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x1c, 0x0a, 0x03, 0x67, 0x65, 0x74, - 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, - 0x48, 0x00, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x1c, 0x0a, 0x03, 0x70, 0x75, 0x74, 0x18, 0x1a, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x75, 0x74, 0x48, 0x00, - 0x52, 0x03, 0x70, 0x75, 0x74, 0x12, 0x2f, 0x0a, 0x0a, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x71, 0x75, - 0x65, 0x72, 0x79, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, - 0x50, 0x75, 0x73, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, 0x48, 0x00, 0x52, 0x09, 0x70, 0x75, 0x73, - 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2f, 0x0a, 0x0a, 0x70, 0x75, 0x6c, 0x6c, 0x5f, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, - 0x2e, 0x50, 0x75, 0x6c, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x48, 0x00, 0x52, 0x09, 0x70, 0x75, - 0x6c, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x22, 0x0a, 0x05, 0x63, 0x68, 0x69, 0x74, 0x73, - 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x43, 0x68, 0x69, - 0x74, 0x73, 0x48, 0x00, 0x52, 0x05, 0x63, 0x68, 0x69, 0x74, 0x73, 0x12, 0x32, 0x0a, 0x0b, 0x61, - 0x70, 0x70, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x35, 0x0a, 0x0c, 0x61, 0x70, 0x70, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, - 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x70, 0x70, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x61, 0x70, 0x70, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x5f, 0x67, 0x6f, - 0x73, 0x73, 0x69, 0x70, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, - 0x2e, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x48, 0x00, 0x52, 0x09, 0x61, 0x70, - 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x42, 0x09, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x22, 0x06, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x22, 0x25, 0x0a, 0x04, 0x50, 0x6f, - 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x70, 0x63, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x50, 0x63, - 0x74, 0x22, 0xf5, 0x01, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, - 0x0a, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x09, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, - 0x6d, 0x79, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6d, - 0x79, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x12, 0x17, - 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x06, 0x69, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x79, 0x5f, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x79, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x79, 0x5f, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x0d, 0x6d, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x10, - 0x0a, 0x03, 0x73, 0x69, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x73, 0x69, 0x67, - 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x73, 0x75, 0x62, 0x6e, - 0x65, 0x74, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x63, 0x6b, - 0x65, 0x64, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x73, 0x22, 0xa8, 0x01, 0x0a, 0x0d, 0x43, 0x6c, - 0x61, 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x78, - 0x35, 0x30, 0x39, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0f, 0x78, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, - 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, - 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x12, - 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x06, 0x69, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x22, 0x48, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, + 0x72, 0x79, 0x48, 0x00, 0x52, 0x17, 0x67, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, + 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x51, 0x0a, + 0x16, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, + 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x70, 0x32, 0x70, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x48, 0x00, 0x52, 0x14, 0x61, 0x63, 0x63, 0x65, + 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x12, 0x4e, 0x0a, 0x15, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, + 0x5f, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, + 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x48, 0x00, 0x52, 0x13, 0x67, 0x65, 0x74, + 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, + 0x12, 0x44, 0x0a, 0x11, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x72, 0x6f, + 0x6e, 0x74, 0x69, 0x65, 0x72, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x32, + 0x70, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, + 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, + 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x0c, 0x67, 0x65, 0x74, 0x5f, 0x61, 0x63, + 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, + 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x48, 0x00, + 0x52, 0x0b, 0x67, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x2b, 0x0a, + 0x08, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0d, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x48, 0x00, + 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x38, 0x0a, 0x0d, 0x67, 0x65, + 0x74, 0x5f, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x11, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, + 0x74, 0x6f, 0x72, 0x73, 0x48, 0x00, 0x52, 0x0c, 0x67, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, + 0x74, 0x6f, 0x72, 0x73, 0x12, 0x2e, 0x0a, 0x09, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, + 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x6e, + 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x48, 0x00, 0x52, 0x09, 0x61, 0x6e, 0x63, 0x65, 0x73, + 0x74, 0x6f, 0x72, 0x73, 0x12, 0x1c, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x08, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x00, 0x52, 0x03, 0x67, + 0x65, 0x74, 0x12, 0x1c, 0x0a, 0x03, 0x70, 0x75, 0x74, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x08, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x75, 0x74, 0x48, 0x00, 0x52, 0x03, 0x70, 0x75, 0x74, + 0x12, 0x2f, 0x0a, 0x0a, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x1b, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x48, 0x00, 0x52, 0x09, 0x70, 0x75, 0x73, 0x68, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x12, 0x2f, 0x0a, 0x0a, 0x70, 0x75, 0x6c, 0x6c, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, + 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x75, 0x6c, 0x6c, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x48, 0x00, 0x52, 0x09, 0x70, 0x75, 0x6c, 0x6c, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x12, 0x22, 0x0a, 0x05, 0x63, 0x68, 0x69, 0x74, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x43, 0x68, 0x69, 0x74, 0x73, 0x48, 0x00, 0x52, + 0x05, 0x63, 0x68, 0x69, 0x74, 0x73, 0x12, 0x32, 0x0a, 0x0b, 0x61, 0x70, 0x70, 0x5f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x32, + 0x70, 0x2e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0a, + 0x61, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x35, 0x0a, 0x0c, 0x61, 0x70, + 0x70, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x10, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x61, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x2f, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x5f, 0x67, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x18, + 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x41, 0x70, 0x70, 0x47, + 0x6f, 0x73, 0x73, 0x69, 0x70, 0x48, 0x00, 0x52, 0x09, 0x61, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, + 0x69, 0x70, 0x12, 0x36, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x5f, + 0x61, 0x63, 0x6b, 0x18, 0x21, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x32, 0x70, 0x2e, + 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x6b, 0x48, 0x00, 0x52, 0x0b, 0x70, + 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x6b, 0x42, 0x09, 0x0a, 0x07, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x06, 0x0a, 0x04, 0x50, 0x69, 0x6e, 0x67, 0x22, 0x43, 0x0a, + 0x0c, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, + 0x09, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x08, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x75, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x75, 0x70, 0x74, 0x69, + 0x6d, 0x65, 0x22, 0x58, 0x0a, 0x04, 0x50, 0x6f, 0x6e, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x75, 0x70, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x75, 0x70, 0x74, 0x69, + 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0e, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x75, 0x70, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x70, 0x32, 0x70, + 0x2e, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x0d, 0x73, + 0x75, 0x62, 0x6e, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x22, 0xf5, 0x01, 0x0a, + 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x74, 0x77, + 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x6e, 0x65, + 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6d, 0x79, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6d, 0x79, 0x54, 0x69, 0x6d, 0x65, + 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x06, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, + 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, 0x70, 0x50, 0x6f, + 0x72, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x79, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x79, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x79, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x69, 0x67, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x73, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, 0x74, + 0x72, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x73, 0x18, 0x08, + 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0e, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x53, 0x75, 0x62, + 0x6e, 0x65, 0x74, 0x73, 0x22, 0xbd, 0x01, 0x0a, 0x0d, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, + 0x49, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x78, 0x35, 0x30, 0x39, 0x5f, 0x63, + 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x0f, 0x78, 0x35, 0x30, 0x39, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x06, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x12, 0x17, 0x0a, 0x07, 0x69, 0x70, + 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x69, 0x70, 0x50, + 0x6f, 0x72, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, + 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, + 0x74, 0x78, 0x49, 0x64, 0x22, 0x48, 0x0a, 0x08, 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x10, 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x5f, 0x69, 0x70, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x43, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x52, 0x0e, - 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x22, 0x6f, - 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, - 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, - 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, - 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x22, - 0x6a, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, + 0x63, 0x6c, 0x61, 0x69, 0x6d, 0x65, 0x64, 0x49, 0x70, 0x50, 0x6f, 0x72, 0x74, 0x73, 0x22, 0x3c, + 0x0a, 0x07, 0x50, 0x65, 0x65, 0x72, 0x41, 0x63, 0x6b, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x78, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, 0x78, 0x49, 0x64, 0x12, 0x1c, + 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3e, 0x0a, 0x0b, + 0x50, 0x65, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x6b, 0x12, 0x29, 0x0a, 0x09, 0x70, + 0x65, 0x65, 0x72, 0x5f, 0x61, 0x63, 0x6b, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, + 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x41, 0x63, 0x6b, 0x52, 0x08, 0x70, 0x65, + 0x65, 0x72, 0x41, 0x63, 0x6b, 0x73, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x22, 0x6f, 0x0a, 0x17, + 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, - 0x64, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x89, 0x01, 0x0a, 0x17, - 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x18, 0x0a, - 0x07, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x04, 0x52, 0x07, - 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x22, 0x71, 0x0a, 0x14, 0x41, 0x63, 0x63, 0x65, 0x70, - 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, - 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x75, 0x6d, - 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, - 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x49, 0x64, 0x73, 0x22, 0x6b, 0x0a, 0x13, 0x47, 0x65, - 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, - 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x22, 0x6a, 0x0a, + 0x14, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x46, 0x72, 0x6f, + 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, + 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x22, 0x89, 0x01, 0x0a, 0x17, 0x47, 0x65, + 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, + 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, + 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, + 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x68, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x04, 0x52, 0x07, 0x68, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x73, 0x22, 0x71, 0x0a, 0x14, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, + 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x19, 0x0a, + 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x73, 0x75, + 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x49, 0x64, 0x73, 0x22, 0x9d, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, + 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, + 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, + 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, + 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, + 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, + 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, 0x77, 0x0a, 0x10, 0x41, 0x63, 0x63, 0x65, + 0x70, 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, + 0x05, 0x22, 0xba, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, + 0x64, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, - 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x22, 0x71, 0x0a, 0x10, 0x41, 0x63, 0x63, 0x65, 0x70, - 0x74, 0x65, 0x64, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x69, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x08, 0x63, - 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, - 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x88, 0x01, 0x0a, 0x0b, 0x47, - 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, + 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x30, 0x0a, 0x0b, + 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, 0x6f, + 0x0a, 0x08, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x49, 0x64, 0x73, 0x22, 0x69, 0x0a, 0x08, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, - 0x64, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x73, - 0x22, 0x87, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, - 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, - 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, - 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x22, 0x65, 0x0a, 0x09, 0x41, 0x6e, - 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, - 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, - 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x73, 0x22, 0x7e, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, - 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, - 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, - 0x64, 0x22, 0x5d, 0x0a, 0x03, 0x50, 0x75, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, + 0x73, 0x74, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, + 0xb9, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, + 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, + 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, + 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, + 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, + 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, 0x6b, 0x0a, 0x09, 0x41, + 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x22, 0x7f, 0x0a, 0x09, 0x50, 0x75, 0x73, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, - 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, - 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, - 0x69, 0x6e, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x22, 0x84, 0x01, 0x0a, 0x09, 0x50, 0x75, 0x6c, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, + 0x49, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0xb0, 0x01, 0x0a, 0x03, 0x47, 0x65, 0x74, + 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, + 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, + 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, + 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, + 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, + 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, 0x8f, 0x01, 0x0a, 0x03, + 0x50, 0x75, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, + 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1c, 0x0a, + 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x30, 0x0a, 0x0b, 0x65, + 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, 0xb1, 0x01, + 0x0a, 0x09, 0x50, 0x75, 0x73, 0x68, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, + 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, + 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, + 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, + 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, + 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, + 0x65, 0x22, 0xb6, 0x01, 0x0a, 0x09, 0x50, 0x75, 0x6c, 0x6c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, @@ -2214,36 +2593,50 @@ var file_p2p_p2p_proto_rawDesc = []byte{ 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x22, 0x66, 0x0a, 0x05, 0x43, 0x68, 0x69, 0x74, - 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0c, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x73, - 0x22, 0x7f, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, - 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, - 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, - 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, - 0x73, 0x22, 0x64, 0x0a, 0x0b, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, + 0x6e, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0f, 0x2e, + 0x70, 0x32, 0x70, 0x2e, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, + 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x22, 0xb5, 0x01, 0x0a, 0x05, 0x43, + 0x68, 0x69, 0x74, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, + 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x36, + 0x0a, 0x17, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, + 0x15, 0x70, 0x72, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x49, 0x64, 0x73, 0x12, 0x34, 0x0a, 0x16, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, + 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x14, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, + 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x73, 0x4a, 0x04, 0x08, 0x05, + 0x10, 0x06, 0x22, 0x7f, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x70, - 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, - 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x43, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x47, 0x6f, - 0x73, 0x73, 0x69, 0x70, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, - 0x1b, 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x42, 0x2e, 0x5a, 0x2c, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, - 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x70, 0x32, 0x70, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, + 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x64, 0x65, + 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, + 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x22, 0x64, 0x0a, 0x0b, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, + 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, + 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x43, 0x0a, 0x09, 0x41, 0x70, 0x70, + 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, + 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x61, 0x70, 0x70, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x61, 0x70, 0x70, 0x42, 0x79, 0x74, 0x65, 0x73, 0x2a, 0x5d, + 0x0a, 0x0a, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x17, + 0x45, 0x4e, 0x47, 0x49, 0x4e, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, + 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x19, 0x0a, 0x15, 0x45, 0x4e, 0x47, + 0x49, 0x4e, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x56, 0x41, 0x4c, 0x41, 0x4e, 0x43, + 0x48, 0x45, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x4e, 0x47, 0x49, 0x4e, 0x45, 0x5f, 0x54, + 0x59, 0x50, 0x45, 0x5f, 0x53, 0x4e, 0x4f, 0x57, 0x4d, 0x41, 0x4e, 0x10, 0x02, 0x42, 0x2e, 0x5a, + 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, + 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x70, 0x32, 0x70, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2258,62 +2651,77 @@ func file_p2p_p2p_proto_rawDescGZIP() []byte { return file_p2p_p2p_proto_rawDescData } -var file_p2p_p2p_proto_msgTypes = make([]protoimpl.MessageInfo, 24) +var file_p2p_p2p_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_p2p_p2p_proto_msgTypes = make([]protoimpl.MessageInfo, 27) var file_p2p_p2p_proto_goTypes = []interface{}{ - (*Message)(nil), // 0: p2p.Message - (*Ping)(nil), // 1: p2p.Ping - (*Pong)(nil), // 2: p2p.Pong - (*Version)(nil), // 3: p2p.Version - (*ClaimedIpPort)(nil), // 4: p2p.ClaimedIpPort - (*PeerList)(nil), // 5: p2p.PeerList - (*GetStateSummaryFrontier)(nil), // 6: p2p.GetStateSummaryFrontier - (*StateSummaryFrontier)(nil), // 7: p2p.StateSummaryFrontier - (*GetAcceptedStateSummary)(nil), // 8: p2p.GetAcceptedStateSummary - (*AcceptedStateSummary)(nil), // 9: p2p.AcceptedStateSummary - (*GetAcceptedFrontier)(nil), // 10: p2p.GetAcceptedFrontier - (*AcceptedFrontier)(nil), // 11: p2p.AcceptedFrontier - (*GetAccepted)(nil), // 12: p2p.GetAccepted - (*Accepted)(nil), // 13: p2p.Accepted - (*GetAncestors)(nil), // 14: p2p.GetAncestors - (*Ancestors)(nil), // 15: p2p.Ancestors - (*Get)(nil), // 16: p2p.Get - (*Put)(nil), // 17: p2p.Put - (*PushQuery)(nil), // 18: p2p.PushQuery - (*PullQuery)(nil), // 19: p2p.PullQuery - (*Chits)(nil), // 20: p2p.Chits - (*AppRequest)(nil), // 21: p2p.AppRequest - (*AppResponse)(nil), // 22: p2p.AppResponse - (*AppGossip)(nil), // 23: p2p.AppGossip + (EngineType)(0), // 0: p2p.EngineType + (*Message)(nil), // 1: p2p.Message + (*Ping)(nil), // 2: p2p.Ping + (*SubnetUptime)(nil), // 3: p2p.SubnetUptime + (*Pong)(nil), // 4: p2p.Pong + (*Version)(nil), // 5: p2p.Version + (*ClaimedIpPort)(nil), // 6: p2p.ClaimedIpPort + (*PeerList)(nil), // 7: p2p.PeerList + (*PeerAck)(nil), // 8: p2p.PeerAck + (*PeerListAck)(nil), // 9: p2p.PeerListAck + (*GetStateSummaryFrontier)(nil), // 10: p2p.GetStateSummaryFrontier + (*StateSummaryFrontier)(nil), // 11: p2p.StateSummaryFrontier + (*GetAcceptedStateSummary)(nil), // 12: p2p.GetAcceptedStateSummary + (*AcceptedStateSummary)(nil), // 13: p2p.AcceptedStateSummary + (*GetAcceptedFrontier)(nil), // 14: p2p.GetAcceptedFrontier + (*AcceptedFrontier)(nil), // 15: p2p.AcceptedFrontier + (*GetAccepted)(nil), // 16: p2p.GetAccepted + (*Accepted)(nil), // 17: p2p.Accepted + (*GetAncestors)(nil), // 18: p2p.GetAncestors + (*Ancestors)(nil), // 19: p2p.Ancestors + (*Get)(nil), // 20: p2p.Get + (*Put)(nil), // 21: p2p.Put + (*PushQuery)(nil), // 22: p2p.PushQuery + (*PullQuery)(nil), // 23: p2p.PullQuery + (*Chits)(nil), // 24: p2p.Chits + (*AppRequest)(nil), // 25: p2p.AppRequest + (*AppResponse)(nil), // 26: p2p.AppResponse + (*AppGossip)(nil), // 27: p2p.AppGossip } var file_p2p_p2p_proto_depIdxs = []int32{ - 1, // 0: p2p.Message.ping:type_name -> p2p.Ping - 2, // 1: p2p.Message.pong:type_name -> p2p.Pong - 3, // 2: p2p.Message.version:type_name -> p2p.Version - 5, // 3: p2p.Message.peer_list:type_name -> p2p.PeerList - 6, // 4: p2p.Message.get_state_summary_frontier:type_name -> p2p.GetStateSummaryFrontier - 7, // 5: p2p.Message.state_summary_frontier:type_name -> p2p.StateSummaryFrontier - 8, // 6: p2p.Message.get_accepted_state_summary:type_name -> p2p.GetAcceptedStateSummary - 9, // 7: p2p.Message.accepted_state_summary:type_name -> p2p.AcceptedStateSummary - 10, // 8: p2p.Message.get_accepted_frontier:type_name -> p2p.GetAcceptedFrontier - 11, // 9: p2p.Message.accepted_frontier:type_name -> p2p.AcceptedFrontier - 12, // 10: p2p.Message.get_accepted:type_name -> p2p.GetAccepted - 13, // 11: p2p.Message.accepted:type_name -> p2p.Accepted - 14, // 12: p2p.Message.get_ancestors:type_name -> p2p.GetAncestors - 15, // 13: p2p.Message.ancestors:type_name -> p2p.Ancestors - 16, // 14: p2p.Message.get:type_name -> p2p.Get - 17, // 15: p2p.Message.put:type_name -> p2p.Put - 18, // 16: p2p.Message.push_query:type_name -> p2p.PushQuery - 19, // 17: p2p.Message.pull_query:type_name -> p2p.PullQuery - 20, // 18: p2p.Message.chits:type_name -> p2p.Chits - 21, // 19: p2p.Message.app_request:type_name -> p2p.AppRequest - 22, // 20: p2p.Message.app_response:type_name -> p2p.AppResponse - 23, // 21: p2p.Message.app_gossip:type_name -> p2p.AppGossip - 4, // 22: p2p.PeerList.claimed_ip_ports:type_name -> p2p.ClaimedIpPort - 23, // [23:23] is the sub-list for method output_type - 23, // [23:23] is the sub-list for method input_type - 23, // [23:23] is the sub-list for extension type_name - 23, // [23:23] is the sub-list for extension extendee - 0, // [0:23] is the sub-list for field type_name + 2, // 0: p2p.Message.ping:type_name -> p2p.Ping + 4, // 1: p2p.Message.pong:type_name -> p2p.Pong + 5, // 2: p2p.Message.version:type_name -> p2p.Version + 7, // 3: p2p.Message.peer_list:type_name -> p2p.PeerList + 10, // 4: p2p.Message.get_state_summary_frontier:type_name -> p2p.GetStateSummaryFrontier + 11, // 5: p2p.Message.state_summary_frontier:type_name -> p2p.StateSummaryFrontier + 12, // 6: p2p.Message.get_accepted_state_summary:type_name -> p2p.GetAcceptedStateSummary + 13, // 7: p2p.Message.accepted_state_summary:type_name -> p2p.AcceptedStateSummary + 14, // 8: p2p.Message.get_accepted_frontier:type_name -> p2p.GetAcceptedFrontier + 15, // 9: p2p.Message.accepted_frontier:type_name -> p2p.AcceptedFrontier + 16, // 10: p2p.Message.get_accepted:type_name -> p2p.GetAccepted + 17, // 11: p2p.Message.accepted:type_name -> p2p.Accepted + 18, // 12: p2p.Message.get_ancestors:type_name -> p2p.GetAncestors + 19, // 13: p2p.Message.ancestors:type_name -> p2p.Ancestors + 20, // 14: p2p.Message.get:type_name -> p2p.Get + 21, // 15: p2p.Message.put:type_name -> p2p.Put + 22, // 16: p2p.Message.push_query:type_name -> p2p.PushQuery + 23, // 17: p2p.Message.pull_query:type_name -> p2p.PullQuery + 24, // 18: p2p.Message.chits:type_name -> p2p.Chits + 25, // 19: p2p.Message.app_request:type_name -> p2p.AppRequest + 26, // 20: p2p.Message.app_response:type_name -> p2p.AppResponse + 27, // 21: p2p.Message.app_gossip:type_name -> p2p.AppGossip + 9, // 22: p2p.Message.peer_list_ack:type_name -> p2p.PeerListAck + 3, // 23: p2p.Pong.subnet_uptimes:type_name -> p2p.SubnetUptime + 6, // 24: p2p.PeerList.claimed_ip_ports:type_name -> p2p.ClaimedIpPort + 8, // 25: p2p.PeerListAck.peer_acks:type_name -> p2p.PeerAck + 0, // 26: p2p.GetAcceptedFrontier.engine_type:type_name -> p2p.EngineType + 0, // 27: p2p.GetAccepted.engine_type:type_name -> p2p.EngineType + 0, // 28: p2p.GetAncestors.engine_type:type_name -> p2p.EngineType + 0, // 29: p2p.Get.engine_type:type_name -> p2p.EngineType + 0, // 30: p2p.Put.engine_type:type_name -> p2p.EngineType + 0, // 31: p2p.PushQuery.engine_type:type_name -> p2p.EngineType + 0, // 32: p2p.PullQuery.engine_type:type_name -> p2p.EngineType + 33, // [33:33] is the sub-list for method output_type + 33, // [33:33] is the sub-list for method input_type + 33, // [33:33] is the sub-list for extension type_name + 33, // [33:33] is the sub-list for extension extendee + 0, // [0:33] is the sub-list for field type_name } func init() { file_p2p_p2p_proto_init() } @@ -2347,7 +2755,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Pong); i { + switch v := v.(*SubnetUptime); i { case 0: return &v.state case 1: @@ -2359,7 +2767,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Version); i { + switch v := v.(*Pong); i { case 0: return &v.state case 1: @@ -2371,7 +2779,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ClaimedIpPort); i { + switch v := v.(*Version); i { case 0: return &v.state case 1: @@ -2383,7 +2791,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PeerList); i { + switch v := v.(*ClaimedIpPort); i { case 0: return &v.state case 1: @@ -2395,7 +2803,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetStateSummaryFrontier); i { + switch v := v.(*PeerList); i { case 0: return &v.state case 1: @@ -2407,7 +2815,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StateSummaryFrontier); i { + switch v := v.(*PeerAck); i { case 0: return &v.state case 1: @@ -2419,7 +2827,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAcceptedStateSummary); i { + switch v := v.(*PeerListAck); i { case 0: return &v.state case 1: @@ -2431,7 +2839,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AcceptedStateSummary); i { + switch v := v.(*GetStateSummaryFrontier); i { case 0: return &v.state case 1: @@ -2443,7 +2851,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAcceptedFrontier); i { + switch v := v.(*StateSummaryFrontier); i { case 0: return &v.state case 1: @@ -2455,7 +2863,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AcceptedFrontier); i { + switch v := v.(*GetAcceptedStateSummary); i { case 0: return &v.state case 1: @@ -2467,7 +2875,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAccepted); i { + switch v := v.(*AcceptedStateSummary); i { case 0: return &v.state case 1: @@ -2479,7 +2887,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Accepted); i { + switch v := v.(*GetAcceptedFrontier); i { case 0: return &v.state case 1: @@ -2491,7 +2899,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAncestors); i { + switch v := v.(*AcceptedFrontier); i { case 0: return &v.state case 1: @@ -2503,7 +2911,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Ancestors); i { + switch v := v.(*GetAccepted); i { case 0: return &v.state case 1: @@ -2515,7 +2923,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Get); i { + switch v := v.(*Accepted); i { case 0: return &v.state case 1: @@ -2527,7 +2935,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Put); i { + switch v := v.(*GetAncestors); i { case 0: return &v.state case 1: @@ -2539,7 +2947,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PushQuery); i { + switch v := v.(*Ancestors); i { case 0: return &v.state case 1: @@ -2551,7 +2959,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PullQuery); i { + switch v := v.(*Get); i { case 0: return &v.state case 1: @@ -2563,7 +2971,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Chits); i { + switch v := v.(*Put); i { case 0: return &v.state case 1: @@ -2575,7 +2983,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AppRequest); i { + switch v := v.(*PushQuery); i { case 0: return &v.state case 1: @@ -2587,7 +2995,7 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AppResponse); i { + switch v := v.(*PullQuery); i { case 0: return &v.state case 1: @@ -2599,6 +3007,42 @@ func file_p2p_p2p_proto_init() { } } file_p2p_p2p_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Chits); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_p2p_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AppRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_p2p_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AppResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_p2p_p2p_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*AppGossip); i { case 0: return &v.state @@ -2613,6 +3057,7 @@ func file_p2p_p2p_proto_init() { } file_p2p_p2p_proto_msgTypes[0].OneofWrappers = []interface{}{ (*Message_CompressedGzip)(nil), + (*Message_CompressedZstd)(nil), (*Message_Ping)(nil), (*Message_Pong)(nil), (*Message_Version)(nil), @@ -2635,19 +3080,21 @@ func file_p2p_p2p_proto_init() { (*Message_AppRequest)(nil), (*Message_AppResponse)(nil), (*Message_AppGossip)(nil), + (*Message_PeerListAck)(nil), } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_p2p_p2p_proto_rawDesc, - NumEnums: 0, - NumMessages: 24, + NumEnums: 1, + NumMessages: 27, NumExtensions: 0, NumServices: 0, }, GoTypes: file_p2p_p2p_proto_goTypes, DependencyIndexes: file_p2p_p2p_proto_depIdxs, + EnumInfos: file_p2p_p2p_proto_enumTypes, MessageInfos: file_p2p_p2p_proto_msgTypes, }.Build() File_p2p_p2p_proto = out.File diff --git a/avalanchego/proto/pb/plugin/plugin.pb.go b/avalanchego/proto/pb/plugin/plugin.pb.go deleted file mode 100644 index bf382a99..00000000 --- a/avalanchego/proto/pb/plugin/plugin.pb.go +++ /dev/null @@ -1,166 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc (unknown) -// source: plugin/plugin.proto - -package plugin - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - emptypb "google.golang.org/protobuf/types/known/emptypb" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type ExitCodeResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ExitCode int32 `protobuf:"varint,1,opt,name=exit_code,json=exitCode,proto3" json:"exit_code,omitempty"` -} - -func (x *ExitCodeResponse) Reset() { - *x = ExitCodeResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_plugin_plugin_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExitCodeResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExitCodeResponse) ProtoMessage() {} - -func (x *ExitCodeResponse) ProtoReflect() protoreflect.Message { - mi := &file_plugin_plugin_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExitCodeResponse.ProtoReflect.Descriptor instead. -func (*ExitCodeResponse) Descriptor() ([]byte, []int) { - return file_plugin_plugin_proto_rawDescGZIP(), []int{0} -} - -func (x *ExitCodeResponse) GetExitCode() int32 { - if x != nil { - return x.ExitCode - } - return 0 -} - -var File_plugin_plugin_proto protoreflect.FileDescriptor - -var file_plugin_plugin_proto_rawDesc = []byte{ - 0x0a, 0x13, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x1a, 0x1b, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, - 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x2f, 0x0a, 0x10, 0x45, 0x78, - 0x69, 0x74, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, - 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, 0x43, 0x6f, 0x64, 0x65, 0x32, 0xb5, 0x01, 0x0a, 0x04, - 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x37, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x16, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3c, 0x0a, - 0x08, 0x45, 0x78, 0x69, 0x74, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x1a, 0x18, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x69, 0x74, 0x43, - 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x04, 0x53, - 0x74, 0x6f, 0x70, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x42, 0x31, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_plugin_plugin_proto_rawDescOnce sync.Once - file_plugin_plugin_proto_rawDescData = file_plugin_plugin_proto_rawDesc -) - -func file_plugin_plugin_proto_rawDescGZIP() []byte { - file_plugin_plugin_proto_rawDescOnce.Do(func() { - file_plugin_plugin_proto_rawDescData = protoimpl.X.CompressGZIP(file_plugin_plugin_proto_rawDescData) - }) - return file_plugin_plugin_proto_rawDescData -} - -var file_plugin_plugin_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_plugin_plugin_proto_goTypes = []interface{}{ - (*ExitCodeResponse)(nil), // 0: plugin.ExitCodeResponse - (*emptypb.Empty)(nil), // 1: google.protobuf.Empty -} -var file_plugin_plugin_proto_depIdxs = []int32{ - 1, // 0: plugin.Node.Start:input_type -> google.protobuf.Empty - 1, // 1: plugin.Node.ExitCode:input_type -> google.protobuf.Empty - 1, // 2: plugin.Node.Stop:input_type -> google.protobuf.Empty - 1, // 3: plugin.Node.Start:output_type -> google.protobuf.Empty - 0, // 4: plugin.Node.ExitCode:output_type -> plugin.ExitCodeResponse - 1, // 5: plugin.Node.Stop:output_type -> google.protobuf.Empty - 3, // [3:6] is the sub-list for method output_type - 0, // [0:3] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_plugin_plugin_proto_init() } -func file_plugin_plugin_proto_init() { - if File_plugin_plugin_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_plugin_plugin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExitCodeResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_plugin_plugin_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_plugin_plugin_proto_goTypes, - DependencyIndexes: file_plugin_plugin_proto_depIdxs, - MessageInfos: file_plugin_plugin_proto_msgTypes, - }.Build() - File_plugin_plugin_proto = out.File - file_plugin_plugin_proto_rawDesc = nil - file_plugin_plugin_proto_goTypes = nil - file_plugin_plugin_proto_depIdxs = nil -} diff --git a/avalanchego/proto/pb/plugin/plugin_grpc.pb.go b/avalanchego/proto/pb/plugin/plugin_grpc.pb.go deleted file mode 100644 index 7056dc0d..00000000 --- a/avalanchego/proto/pb/plugin/plugin_grpc.pb.go +++ /dev/null @@ -1,178 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc (unknown) -// source: plugin/plugin.proto - -package plugin - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - emptypb "google.golang.org/protobuf/types/known/emptypb" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// NodeClient is the client API for Node service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type NodeClient interface { - Start(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) - ExitCode(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ExitCodeResponse, error) - Stop(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) -} - -type nodeClient struct { - cc grpc.ClientConnInterface -} - -func NewNodeClient(cc grpc.ClientConnInterface) NodeClient { - return &nodeClient{cc} -} - -func (c *nodeClient) Start(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/plugin.Node/Start", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *nodeClient) ExitCode(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*ExitCodeResponse, error) { - out := new(ExitCodeResponse) - err := c.cc.Invoke(ctx, "/plugin.Node/ExitCode", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *nodeClient) Stop(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) { - out := new(emptypb.Empty) - err := c.cc.Invoke(ctx, "/plugin.Node/Stop", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// NodeServer is the server API for Node service. -// All implementations must embed UnimplementedNodeServer -// for forward compatibility -type NodeServer interface { - Start(context.Context, *emptypb.Empty) (*emptypb.Empty, error) - ExitCode(context.Context, *emptypb.Empty) (*ExitCodeResponse, error) - Stop(context.Context, *emptypb.Empty) (*emptypb.Empty, error) - mustEmbedUnimplementedNodeServer() -} - -// UnimplementedNodeServer must be embedded to have forward compatible implementations. -type UnimplementedNodeServer struct { -} - -func (UnimplementedNodeServer) Start(context.Context, *emptypb.Empty) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Start not implemented") -} -func (UnimplementedNodeServer) ExitCode(context.Context, *emptypb.Empty) (*ExitCodeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ExitCode not implemented") -} -func (UnimplementedNodeServer) Stop(context.Context, *emptypb.Empty) (*emptypb.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Stop not implemented") -} -func (UnimplementedNodeServer) mustEmbedUnimplementedNodeServer() {} - -// UnsafeNodeServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to NodeServer will -// result in compilation errors. -type UnsafeNodeServer interface { - mustEmbedUnimplementedNodeServer() -} - -func RegisterNodeServer(s grpc.ServiceRegistrar, srv NodeServer) { - s.RegisterService(&Node_ServiceDesc, srv) -} - -func _Node_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(emptypb.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).Start(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/plugin.Node/Start", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).Start(ctx, req.(*emptypb.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Node_ExitCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(emptypb.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).ExitCode(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/plugin.Node/ExitCode", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).ExitCode(ctx, req.(*emptypb.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _Node_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(emptypb.Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(NodeServer).Stop(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/plugin.Node/Stop", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(NodeServer).Stop(ctx, req.(*emptypb.Empty)) - } - return interceptor(ctx, in, info, handler) -} - -// Node_ServiceDesc is the grpc.ServiceDesc for Node service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var Node_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "plugin.Node", - HandlerType: (*NodeServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Start", - Handler: _Node_Start_Handler, - }, - { - MethodName: "ExitCode", - Handler: _Node_ExitCode_Handler, - }, - { - MethodName: "Stop", - Handler: _Node_Stop_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "plugin/plugin.proto", -} diff --git a/avalanchego/proto/pb/rpcdb/rpcdb.pb.go b/avalanchego/proto/pb/rpcdb/rpcdb.pb.go index d25537cd..d7cfa7b6 100644 --- a/avalanchego/proto/pb/rpcdb/rpcdb.pb.go +++ b/avalanchego/proto/pb/rpcdb/rpcdb.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 +// protoc-gen-go v1.28.1 // protoc (unknown) // source: rpcdb/rpcdb.proto @@ -21,6 +21,56 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type Error int32 + +const ( + // ERROR_UNSPECIFIED is used to indicate that no error occurred. + Error_ERROR_UNSPECIFIED Error = 0 + Error_ERROR_CLOSED Error = 1 + Error_ERROR_NOT_FOUND Error = 2 +) + +// Enum value maps for Error. +var ( + Error_name = map[int32]string{ + 0: "ERROR_UNSPECIFIED", + 1: "ERROR_CLOSED", + 2: "ERROR_NOT_FOUND", + } + Error_value = map[string]int32{ + "ERROR_UNSPECIFIED": 0, + "ERROR_CLOSED": 1, + "ERROR_NOT_FOUND": 2, + } +) + +func (x Error) Enum() *Error { + p := new(Error) + *p = x + return p +} + +func (x Error) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Error) Descriptor() protoreflect.EnumDescriptor { + return file_rpcdb_rpcdb_proto_enumTypes[0].Descriptor() +} + +func (Error) Type() protoreflect.EnumType { + return &file_rpcdb_rpcdb_proto_enumTypes[0] +} + +func (x Error) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Error.Descriptor instead. +func (Error) EnumDescriptor() ([]byte, []int) { + return file_rpcdb_rpcdb_proto_rawDescGZIP(), []int{0} +} + type HasRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -73,8 +123,8 @@ type HasResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Has bool `protobuf:"varint,1,opt,name=has,proto3" json:"has,omitempty"` - Err uint32 `protobuf:"varint,2,opt,name=err,proto3" json:"err,omitempty"` + Has bool `protobuf:"varint,1,opt,name=has,proto3" json:"has,omitempty"` + Err Error `protobuf:"varint,2,opt,name=err,proto3,enum=rpcdb.Error" json:"err,omitempty"` } func (x *HasResponse) Reset() { @@ -116,11 +166,11 @@ func (x *HasResponse) GetHas() bool { return false } -func (x *HasResponse) GetErr() uint32 { +func (x *HasResponse) GetErr() Error { if x != nil { return x.Err } - return 0 + return Error_ERROR_UNSPECIFIED } type GetRequest struct { @@ -176,7 +226,7 @@ type GetResponse struct { unknownFields protoimpl.UnknownFields Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - Err uint32 `protobuf:"varint,2,opt,name=err,proto3" json:"err,omitempty"` + Err Error `protobuf:"varint,2,opt,name=err,proto3,enum=rpcdb.Error" json:"err,omitempty"` } func (x *GetResponse) Reset() { @@ -218,11 +268,11 @@ func (x *GetResponse) GetValue() []byte { return nil } -func (x *GetResponse) GetErr() uint32 { +func (x *GetResponse) GetErr() Error { if x != nil { return x.Err } - return 0 + return Error_ERROR_UNSPECIFIED } type PutRequest struct { @@ -285,7 +335,7 @@ type PutResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Err uint32 `protobuf:"varint,1,opt,name=err,proto3" json:"err,omitempty"` + Err Error `protobuf:"varint,1,opt,name=err,proto3,enum=rpcdb.Error" json:"err,omitempty"` } func (x *PutResponse) Reset() { @@ -320,11 +370,11 @@ func (*PutResponse) Descriptor() ([]byte, []int) { return file_rpcdb_rpcdb_proto_rawDescGZIP(), []int{5} } -func (x *PutResponse) GetErr() uint32 { +func (x *PutResponse) GetErr() Error { if x != nil { return x.Err } - return 0 + return Error_ERROR_UNSPECIFIED } type DeleteRequest struct { @@ -379,7 +429,7 @@ type DeleteResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Err uint32 `protobuf:"varint,1,opt,name=err,proto3" json:"err,omitempty"` + Err Error `protobuf:"varint,1,opt,name=err,proto3,enum=rpcdb.Error" json:"err,omitempty"` } func (x *DeleteResponse) Reset() { @@ -414,11 +464,11 @@ func (*DeleteResponse) Descriptor() ([]byte, []int) { return file_rpcdb_rpcdb_proto_rawDescGZIP(), []int{7} } -func (x *DeleteResponse) GetErr() uint32 { +func (x *DeleteResponse) GetErr() Error { if x != nil { return x.Err } - return 0 + return Error_ERROR_UNSPECIFIED } type CompactRequest struct { @@ -481,7 +531,7 @@ type CompactResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Err uint32 `protobuf:"varint,1,opt,name=err,proto3" json:"err,omitempty"` + Err Error `protobuf:"varint,1,opt,name=err,proto3,enum=rpcdb.Error" json:"err,omitempty"` } func (x *CompactResponse) Reset() { @@ -516,11 +566,11 @@ func (*CompactResponse) Descriptor() ([]byte, []int) { return file_rpcdb_rpcdb_proto_rawDescGZIP(), []int{9} } -func (x *CompactResponse) GetErr() uint32 { +func (x *CompactResponse) GetErr() Error { if x != nil { return x.Err } - return 0 + return Error_ERROR_UNSPECIFIED } type CloseRequest struct { @@ -566,7 +616,7 @@ type CloseResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Err uint32 `protobuf:"varint,1,opt,name=err,proto3" json:"err,omitempty"` + Err Error `protobuf:"varint,1,opt,name=err,proto3,enum=rpcdb.Error" json:"err,omitempty"` } func (x *CloseResponse) Reset() { @@ -601,11 +651,11 @@ func (*CloseResponse) Descriptor() ([]byte, []int) { return file_rpcdb_rpcdb_proto_rawDescGZIP(), []int{11} } -func (x *CloseResponse) GetErr() uint32 { +func (x *CloseResponse) GetErr() Error { if x != nil { return x.Err } - return 0 + return Error_ERROR_UNSPECIFIED } type WriteBatchRequest struct { @@ -613,10 +663,8 @@ type WriteBatchRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Puts []*PutRequest `protobuf:"bytes,1,rep,name=puts,proto3" json:"puts,omitempty"` - Deletes []*DeleteRequest `protobuf:"bytes,2,rep,name=deletes,proto3" json:"deletes,omitempty"` - Id int64 `protobuf:"varint,3,opt,name=id,proto3" json:"id,omitempty"` - Continues bool `protobuf:"varint,4,opt,name=continues,proto3" json:"continues,omitempty"` + Puts []*PutRequest `protobuf:"bytes,1,rep,name=puts,proto3" json:"puts,omitempty"` + Deletes []*DeleteRequest `protobuf:"bytes,2,rep,name=deletes,proto3" json:"deletes,omitempty"` } func (x *WriteBatchRequest) Reset() { @@ -665,26 +713,12 @@ func (x *WriteBatchRequest) GetDeletes() []*DeleteRequest { return nil } -func (x *WriteBatchRequest) GetId() int64 { - if x != nil { - return x.Id - } - return 0 -} - -func (x *WriteBatchRequest) GetContinues() bool { - if x != nil { - return x.Continues - } - return false -} - type WriteBatchResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Err uint32 `protobuf:"varint,1,opt,name=err,proto3" json:"err,omitempty"` + Err Error `protobuf:"varint,1,opt,name=err,proto3,enum=rpcdb.Error" json:"err,omitempty"` } func (x *WriteBatchResponse) Reset() { @@ -719,11 +753,11 @@ func (*WriteBatchResponse) Descriptor() ([]byte, []int) { return file_rpcdb_rpcdb_proto_rawDescGZIP(), []int{13} } -func (x *WriteBatchResponse) GetErr() uint32 { +func (x *WriteBatchResponse) GetErr() Error { if x != nil { return x.Err } - return 0 + return Error_ERROR_UNSPECIFIED } type NewIteratorRequest struct { @@ -1012,7 +1046,7 @@ type IteratorErrorResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Err uint32 `protobuf:"varint,1,opt,name=err,proto3" json:"err,omitempty"` + Err Error `protobuf:"varint,1,opt,name=err,proto3,enum=rpcdb.Error" json:"err,omitempty"` } func (x *IteratorErrorResponse) Reset() { @@ -1047,11 +1081,11 @@ func (*IteratorErrorResponse) Descriptor() ([]byte, []int) { return file_rpcdb_rpcdb_proto_rawDescGZIP(), []int{20} } -func (x *IteratorErrorResponse) GetErr() uint32 { +func (x *IteratorErrorResponse) GetErr() Error { if x != nil { return x.Err } - return 0 + return Error_ERROR_UNSPECIFIED } type IteratorReleaseRequest struct { @@ -1106,7 +1140,7 @@ type IteratorReleaseResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Err uint32 `protobuf:"varint,1,opt,name=err,proto3" json:"err,omitempty"` + Err Error `protobuf:"varint,1,opt,name=err,proto3,enum=rpcdb.Error" json:"err,omitempty"` } func (x *IteratorReleaseResponse) Reset() { @@ -1141,11 +1175,11 @@ func (*IteratorReleaseResponse) Descriptor() ([]byte, []int) { return file_rpcdb_rpcdb_proto_rawDescGZIP(), []int{22} } -func (x *IteratorReleaseResponse) GetErr() uint32 { +func (x *IteratorReleaseResponse) GetErr() Error { if x != nil { return x.Err } - return 0 + return Error_ERROR_UNSPECIFIED } type HealthCheckResponse struct { @@ -1203,131 +1237,141 @@ var file_rpcdb_rpcdb_proto_rawDesc = []byte{ 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x1e, 0x0a, 0x0a, 0x48, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x31, 0x0a, 0x0b, 0x48, 0x61, 0x73, 0x52, 0x65, + 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x3f, 0x0a, 0x0b, 0x48, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x68, 0x61, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x03, 0x68, 0x61, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x1e, 0x0a, 0x0a, 0x47, 0x65, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x35, 0x0a, 0x0b, 0x47, 0x65, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x65, 0x72, - 0x72, 0x22, 0x34, 0x0a, 0x0a, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x1f, 0x0a, 0x0b, 0x50, 0x75, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x21, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x22, 0x0a, 0x0e, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, - 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, - 0x3c, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x23, 0x0a, - 0x0f, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x65, - 0x72, 0x72, 0x22, 0x0e, 0x0a, 0x0c, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x22, 0x21, 0x0a, 0x0d, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x98, 0x01, 0x0a, 0x11, 0x57, 0x72, 0x69, 0x74, 0x65, 0x42, - 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x04, 0x70, - 0x75, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x72, 0x70, 0x63, 0x64, - 0x62, 0x2e, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x04, 0x70, 0x75, - 0x74, 0x73, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x73, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x73, - 0x22, 0x26, 0x0a, 0x12, 0x57, 0x72, 0x69, 0x74, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x14, 0x0a, 0x12, 0x4e, 0x65, 0x77, 0x49, - 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x54, - 0x0a, 0x24, 0x4e, 0x65, 0x77, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x57, 0x69, 0x74, - 0x68, 0x53, 0x74, 0x61, 0x72, 0x74, 0x41, 0x6e, 0x64, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x16, 0x0a, 0x06, - 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x72, - 0x65, 0x66, 0x69, 0x78, 0x22, 0x37, 0x0a, 0x25, 0x4e, 0x65, 0x77, 0x49, 0x74, 0x65, 0x72, 0x61, - 0x74, 0x6f, 0x72, 0x57, 0x69, 0x74, 0x68, 0x53, 0x74, 0x61, 0x72, 0x74, 0x41, 0x6e, 0x64, 0x50, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, - 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x22, 0x25, 0x0a, - 0x13, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x4e, 0x65, 0x78, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x02, 0x69, 0x64, 0x22, 0x3d, 0x0a, 0x14, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, - 0x4e, 0x65, 0x78, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x04, - 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x72, 0x70, 0x63, - 0x64, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x04, 0x64, - 0x61, 0x74, 0x61, 0x22, 0x26, 0x0a, 0x14, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x45, - 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x22, 0x29, 0x0a, 0x15, 0x49, - 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x28, 0x0a, 0x16, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, - 0x6f, 0x72, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x01, 0x28, 0x08, 0x52, 0x03, 0x68, 0x61, 0x73, 0x12, 0x1e, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0c, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x1e, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x43, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1e, 0x0a, + 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0c, 0x2e, 0x72, 0x70, 0x63, + 0x64, 0x62, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x34, 0x0a, + 0x0a, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x22, 0x2d, 0x0a, 0x0b, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x0c, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, + 0x72, 0x72, 0x22, 0x21, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x30, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0c, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x3c, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x61, + 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, + 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, + 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x31, 0x0a, 0x0f, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0c, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x0e, 0x0a, 0x0c, 0x43, 0x6c, 0x6f, 0x73, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x2f, 0x0a, 0x0d, 0x43, 0x6c, 0x6f, 0x73, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x03, 0x65, 0x72, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0c, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x45, + 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x6a, 0x0a, 0x11, 0x57, 0x72, 0x69, + 0x74, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, + 0x0a, 0x04, 0x70, 0x75, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x72, + 0x70, 0x63, 0x64, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, + 0x04, 0x70, 0x75, 0x74, 0x73, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x07, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x73, 0x22, 0x34, 0x0a, 0x12, 0x57, 0x72, 0x69, 0x74, 0x65, 0x42, 0x61, + 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x03, 0x65, + 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0c, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, + 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x14, 0x0a, 0x12, 0x4e, + 0x65, 0x77, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x22, 0x54, 0x0a, 0x24, 0x4e, 0x65, 0x77, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, + 0x57, 0x69, 0x74, 0x68, 0x53, 0x74, 0x61, 0x72, 0x74, 0x41, 0x6e, 0x64, 0x50, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x37, 0x0a, 0x25, 0x4e, 0x65, 0x77, 0x49, 0x74, + 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x57, 0x69, 0x74, 0x68, 0x53, 0x74, 0x61, 0x72, 0x74, 0x41, + 0x6e, 0x64, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, - 0x22, 0x2b, 0x0a, 0x17, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x6c, 0x65, - 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x65, - 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x2f, 0x0a, - 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x32, 0xa2, - 0x06, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x03, 0x48, - 0x61, 0x73, 0x12, 0x11, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x48, 0x61, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x48, 0x61, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x03, 0x47, 0x65, 0x74, - 0x12, 0x11, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x03, 0x50, 0x75, 0x74, 0x12, 0x11, - 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x12, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, - 0x14, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x07, - 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x12, 0x15, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, - 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, - 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x05, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x12, - 0x13, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x43, 0x6c, 0x6f, - 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0b, 0x48, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x1a, 0x1a, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, - 0x0a, 0x57, 0x72, 0x69, 0x74, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x18, 0x2e, 0x72, 0x70, + 0x22, 0x25, 0x0a, 0x13, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x4e, 0x65, 0x78, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x22, 0x3d, 0x0a, 0x14, 0x49, 0x74, 0x65, 0x72, 0x61, + 0x74, 0x6f, 0x72, 0x4e, 0x65, 0x78, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x25, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, + 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x50, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x26, 0x0a, 0x14, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x22, 0x37, + 0x0a, 0x15, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0c, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x28, 0x0a, 0x16, 0x49, 0x74, 0x65, 0x72, 0x61, + 0x74, 0x6f, 0x72, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, + 0x64, 0x22, 0x39, 0x0a, 0x17, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x6c, + 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1e, 0x0a, 0x03, + 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0c, 0x2e, 0x72, 0x70, 0x63, 0x64, + 0x62, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x2f, 0x0a, 0x13, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2a, 0x45, 0x0a, + 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, + 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, + 0x0c, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x01, 0x12, + 0x13, 0x0a, 0x0f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, + 0x4e, 0x44, 0x10, 0x02, 0x32, 0xa2, 0x06, 0x0a, 0x08, 0x44, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, + 0x65, 0x12, 0x2c, 0x0a, 0x03, 0x48, 0x61, 0x73, 0x12, 0x11, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, + 0x2e, 0x48, 0x61, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x72, 0x70, + 0x63, 0x64, 0x62, 0x2e, 0x48, 0x61, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2c, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x11, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x47, + 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x72, 0x70, 0x63, 0x64, + 0x62, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, + 0x03, 0x50, 0x75, 0x74, 0x12, 0x11, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x50, 0x75, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, + 0x50, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x06, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x14, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x72, 0x70, + 0x63, 0x64, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x12, 0x15, 0x2e, + 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x43, 0x6f, 0x6d, + 0x70, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x05, + 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x12, 0x13, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x43, 0x6c, + 0x6f, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x72, 0x70, 0x63, + 0x64, 0x62, 0x2e, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x41, 0x0a, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1a, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0a, 0x57, 0x72, 0x69, 0x74, 0x65, 0x42, 0x61, 0x74, 0x63, + 0x68, 0x12, 0x18, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x42, + 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x7a, 0x0a, 0x1d, 0x4e, 0x65, 0x77, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x57, - 0x69, 0x74, 0x68, 0x53, 0x74, 0x61, 0x72, 0x74, 0x41, 0x6e, 0x64, 0x50, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x12, 0x2b, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x4e, 0x65, 0x77, 0x49, 0x74, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7a, 0x0a, 0x1d, 0x4e, 0x65, 0x77, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x57, 0x69, 0x74, 0x68, 0x53, 0x74, 0x61, 0x72, 0x74, 0x41, 0x6e, - 0x64, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, - 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x4e, 0x65, 0x77, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, - 0x6f, 0x72, 0x57, 0x69, 0x74, 0x68, 0x53, 0x74, 0x61, 0x72, 0x74, 0x41, 0x6e, 0x64, 0x50, 0x72, - 0x65, 0x66, 0x69, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, 0x0c, - 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x4e, 0x65, 0x78, 0x74, 0x12, 0x1a, 0x2e, 0x72, - 0x70, 0x63, 0x64, 0x62, 0x2e, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x4e, 0x65, 0x78, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, - 0x2e, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x4e, 0x65, 0x78, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0d, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, - 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x1b, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x49, - 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x49, 0x74, 0x65, 0x72, - 0x61, 0x74, 0x6f, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x50, 0x0a, 0x0f, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x6c, - 0x65, 0x61, 0x73, 0x65, 0x12, 0x1d, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x49, 0x74, 0x65, - 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x49, 0x74, 0x65, 0x72, - 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x42, 0x30, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, - 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, - 0x72, 0x70, 0x63, 0x64, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x64, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2b, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, + 0x4e, 0x65, 0x77, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x57, 0x69, 0x74, 0x68, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x41, 0x6e, 0x64, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x4e, 0x65, 0x77, + 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x57, 0x69, 0x74, 0x68, 0x53, 0x74, 0x61, 0x72, + 0x74, 0x41, 0x6e, 0x64, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x47, 0x0a, 0x0c, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x4e, 0x65, + 0x78, 0x74, 0x12, 0x1a, 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x49, 0x74, 0x65, 0x72, 0x61, + 0x74, 0x6f, 0x72, 0x4e, 0x65, 0x78, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, + 0x2e, 0x72, 0x70, 0x63, 0x64, 0x62, 0x2e, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x4e, + 0x65, 0x78, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0d, 0x49, + 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x1b, 0x2e, 0x72, + 0x70, 0x63, 0x64, 0x62, 0x2e, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x72, 0x70, 0x63, 0x64, + 0x62, 0x2e, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x0f, 0x49, 0x74, 0x65, 0x72, 0x61, + 0x74, 0x6f, 0x72, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x12, 0x1d, 0x2e, 0x72, 0x70, 0x63, + 0x64, 0x62, 0x2e, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x6c, 0x65, 0x61, + 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x72, 0x70, 0x63, 0x64, + 0x62, 0x2e, 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x30, 0x5a, 0x2e, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, + 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x72, 0x70, 0x63, 0x64, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( @@ -1342,67 +1386,78 @@ func file_rpcdb_rpcdb_proto_rawDescGZIP() []byte { return file_rpcdb_rpcdb_proto_rawDescData } +var file_rpcdb_rpcdb_proto_enumTypes = make([]protoimpl.EnumInfo, 1) var file_rpcdb_rpcdb_proto_msgTypes = make([]protoimpl.MessageInfo, 24) var file_rpcdb_rpcdb_proto_goTypes = []interface{}{ - (*HasRequest)(nil), // 0: rpcdb.HasRequest - (*HasResponse)(nil), // 1: rpcdb.HasResponse - (*GetRequest)(nil), // 2: rpcdb.GetRequest - (*GetResponse)(nil), // 3: rpcdb.GetResponse - (*PutRequest)(nil), // 4: rpcdb.PutRequest - (*PutResponse)(nil), // 5: rpcdb.PutResponse - (*DeleteRequest)(nil), // 6: rpcdb.DeleteRequest - (*DeleteResponse)(nil), // 7: rpcdb.DeleteResponse - (*CompactRequest)(nil), // 8: rpcdb.CompactRequest - (*CompactResponse)(nil), // 9: rpcdb.CompactResponse - (*CloseRequest)(nil), // 10: rpcdb.CloseRequest - (*CloseResponse)(nil), // 11: rpcdb.CloseResponse - (*WriteBatchRequest)(nil), // 12: rpcdb.WriteBatchRequest - (*WriteBatchResponse)(nil), // 13: rpcdb.WriteBatchResponse - (*NewIteratorRequest)(nil), // 14: rpcdb.NewIteratorRequest - (*NewIteratorWithStartAndPrefixRequest)(nil), // 15: rpcdb.NewIteratorWithStartAndPrefixRequest - (*NewIteratorWithStartAndPrefixResponse)(nil), // 16: rpcdb.NewIteratorWithStartAndPrefixResponse - (*IteratorNextRequest)(nil), // 17: rpcdb.IteratorNextRequest - (*IteratorNextResponse)(nil), // 18: rpcdb.IteratorNextResponse - (*IteratorErrorRequest)(nil), // 19: rpcdb.IteratorErrorRequest - (*IteratorErrorResponse)(nil), // 20: rpcdb.IteratorErrorResponse - (*IteratorReleaseRequest)(nil), // 21: rpcdb.IteratorReleaseRequest - (*IteratorReleaseResponse)(nil), // 22: rpcdb.IteratorReleaseResponse - (*HealthCheckResponse)(nil), // 23: rpcdb.HealthCheckResponse - (*emptypb.Empty)(nil), // 24: google.protobuf.Empty + (Error)(0), // 0: rpcdb.Error + (*HasRequest)(nil), // 1: rpcdb.HasRequest + (*HasResponse)(nil), // 2: rpcdb.HasResponse + (*GetRequest)(nil), // 3: rpcdb.GetRequest + (*GetResponse)(nil), // 4: rpcdb.GetResponse + (*PutRequest)(nil), // 5: rpcdb.PutRequest + (*PutResponse)(nil), // 6: rpcdb.PutResponse + (*DeleteRequest)(nil), // 7: rpcdb.DeleteRequest + (*DeleteResponse)(nil), // 8: rpcdb.DeleteResponse + (*CompactRequest)(nil), // 9: rpcdb.CompactRequest + (*CompactResponse)(nil), // 10: rpcdb.CompactResponse + (*CloseRequest)(nil), // 11: rpcdb.CloseRequest + (*CloseResponse)(nil), // 12: rpcdb.CloseResponse + (*WriteBatchRequest)(nil), // 13: rpcdb.WriteBatchRequest + (*WriteBatchResponse)(nil), // 14: rpcdb.WriteBatchResponse + (*NewIteratorRequest)(nil), // 15: rpcdb.NewIteratorRequest + (*NewIteratorWithStartAndPrefixRequest)(nil), // 16: rpcdb.NewIteratorWithStartAndPrefixRequest + (*NewIteratorWithStartAndPrefixResponse)(nil), // 17: rpcdb.NewIteratorWithStartAndPrefixResponse + (*IteratorNextRequest)(nil), // 18: rpcdb.IteratorNextRequest + (*IteratorNextResponse)(nil), // 19: rpcdb.IteratorNextResponse + (*IteratorErrorRequest)(nil), // 20: rpcdb.IteratorErrorRequest + (*IteratorErrorResponse)(nil), // 21: rpcdb.IteratorErrorResponse + (*IteratorReleaseRequest)(nil), // 22: rpcdb.IteratorReleaseRequest + (*IteratorReleaseResponse)(nil), // 23: rpcdb.IteratorReleaseResponse + (*HealthCheckResponse)(nil), // 24: rpcdb.HealthCheckResponse + (*emptypb.Empty)(nil), // 25: google.protobuf.Empty } var file_rpcdb_rpcdb_proto_depIdxs = []int32{ - 4, // 0: rpcdb.WriteBatchRequest.puts:type_name -> rpcdb.PutRequest - 6, // 1: rpcdb.WriteBatchRequest.deletes:type_name -> rpcdb.DeleteRequest - 4, // 2: rpcdb.IteratorNextResponse.data:type_name -> rpcdb.PutRequest - 0, // 3: rpcdb.Database.Has:input_type -> rpcdb.HasRequest - 2, // 4: rpcdb.Database.Get:input_type -> rpcdb.GetRequest - 4, // 5: rpcdb.Database.Put:input_type -> rpcdb.PutRequest - 6, // 6: rpcdb.Database.Delete:input_type -> rpcdb.DeleteRequest - 8, // 7: rpcdb.Database.Compact:input_type -> rpcdb.CompactRequest - 10, // 8: rpcdb.Database.Close:input_type -> rpcdb.CloseRequest - 24, // 9: rpcdb.Database.HealthCheck:input_type -> google.protobuf.Empty - 12, // 10: rpcdb.Database.WriteBatch:input_type -> rpcdb.WriteBatchRequest - 15, // 11: rpcdb.Database.NewIteratorWithStartAndPrefix:input_type -> rpcdb.NewIteratorWithStartAndPrefixRequest - 17, // 12: rpcdb.Database.IteratorNext:input_type -> rpcdb.IteratorNextRequest - 19, // 13: rpcdb.Database.IteratorError:input_type -> rpcdb.IteratorErrorRequest - 21, // 14: rpcdb.Database.IteratorRelease:input_type -> rpcdb.IteratorReleaseRequest - 1, // 15: rpcdb.Database.Has:output_type -> rpcdb.HasResponse - 3, // 16: rpcdb.Database.Get:output_type -> rpcdb.GetResponse - 5, // 17: rpcdb.Database.Put:output_type -> rpcdb.PutResponse - 7, // 18: rpcdb.Database.Delete:output_type -> rpcdb.DeleteResponse - 9, // 19: rpcdb.Database.Compact:output_type -> rpcdb.CompactResponse - 11, // 20: rpcdb.Database.Close:output_type -> rpcdb.CloseResponse - 23, // 21: rpcdb.Database.HealthCheck:output_type -> rpcdb.HealthCheckResponse - 13, // 22: rpcdb.Database.WriteBatch:output_type -> rpcdb.WriteBatchResponse - 16, // 23: rpcdb.Database.NewIteratorWithStartAndPrefix:output_type -> rpcdb.NewIteratorWithStartAndPrefixResponse - 18, // 24: rpcdb.Database.IteratorNext:output_type -> rpcdb.IteratorNextResponse - 20, // 25: rpcdb.Database.IteratorError:output_type -> rpcdb.IteratorErrorResponse - 22, // 26: rpcdb.Database.IteratorRelease:output_type -> rpcdb.IteratorReleaseResponse - 15, // [15:27] is the sub-list for method output_type - 3, // [3:15] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name + 0, // 0: rpcdb.HasResponse.err:type_name -> rpcdb.Error + 0, // 1: rpcdb.GetResponse.err:type_name -> rpcdb.Error + 0, // 2: rpcdb.PutResponse.err:type_name -> rpcdb.Error + 0, // 3: rpcdb.DeleteResponse.err:type_name -> rpcdb.Error + 0, // 4: rpcdb.CompactResponse.err:type_name -> rpcdb.Error + 0, // 5: rpcdb.CloseResponse.err:type_name -> rpcdb.Error + 5, // 6: rpcdb.WriteBatchRequest.puts:type_name -> rpcdb.PutRequest + 7, // 7: rpcdb.WriteBatchRequest.deletes:type_name -> rpcdb.DeleteRequest + 0, // 8: rpcdb.WriteBatchResponse.err:type_name -> rpcdb.Error + 5, // 9: rpcdb.IteratorNextResponse.data:type_name -> rpcdb.PutRequest + 0, // 10: rpcdb.IteratorErrorResponse.err:type_name -> rpcdb.Error + 0, // 11: rpcdb.IteratorReleaseResponse.err:type_name -> rpcdb.Error + 1, // 12: rpcdb.Database.Has:input_type -> rpcdb.HasRequest + 3, // 13: rpcdb.Database.Get:input_type -> rpcdb.GetRequest + 5, // 14: rpcdb.Database.Put:input_type -> rpcdb.PutRequest + 7, // 15: rpcdb.Database.Delete:input_type -> rpcdb.DeleteRequest + 9, // 16: rpcdb.Database.Compact:input_type -> rpcdb.CompactRequest + 11, // 17: rpcdb.Database.Close:input_type -> rpcdb.CloseRequest + 25, // 18: rpcdb.Database.HealthCheck:input_type -> google.protobuf.Empty + 13, // 19: rpcdb.Database.WriteBatch:input_type -> rpcdb.WriteBatchRequest + 16, // 20: rpcdb.Database.NewIteratorWithStartAndPrefix:input_type -> rpcdb.NewIteratorWithStartAndPrefixRequest + 18, // 21: rpcdb.Database.IteratorNext:input_type -> rpcdb.IteratorNextRequest + 20, // 22: rpcdb.Database.IteratorError:input_type -> rpcdb.IteratorErrorRequest + 22, // 23: rpcdb.Database.IteratorRelease:input_type -> rpcdb.IteratorReleaseRequest + 2, // 24: rpcdb.Database.Has:output_type -> rpcdb.HasResponse + 4, // 25: rpcdb.Database.Get:output_type -> rpcdb.GetResponse + 6, // 26: rpcdb.Database.Put:output_type -> rpcdb.PutResponse + 8, // 27: rpcdb.Database.Delete:output_type -> rpcdb.DeleteResponse + 10, // 28: rpcdb.Database.Compact:output_type -> rpcdb.CompactResponse + 12, // 29: rpcdb.Database.Close:output_type -> rpcdb.CloseResponse + 24, // 30: rpcdb.Database.HealthCheck:output_type -> rpcdb.HealthCheckResponse + 14, // 31: rpcdb.Database.WriteBatch:output_type -> rpcdb.WriteBatchResponse + 17, // 32: rpcdb.Database.NewIteratorWithStartAndPrefix:output_type -> rpcdb.NewIteratorWithStartAndPrefixResponse + 19, // 33: rpcdb.Database.IteratorNext:output_type -> rpcdb.IteratorNextResponse + 21, // 34: rpcdb.Database.IteratorError:output_type -> rpcdb.IteratorErrorResponse + 23, // 35: rpcdb.Database.IteratorRelease:output_type -> rpcdb.IteratorReleaseResponse + 24, // [24:36] is the sub-list for method output_type + 12, // [12:24] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name } func init() { file_rpcdb_rpcdb_proto_init() } @@ -1705,13 +1760,14 @@ func file_rpcdb_rpcdb_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_rpcdb_rpcdb_proto_rawDesc, - NumEnums: 0, + NumEnums: 1, NumMessages: 24, NumExtensions: 0, NumServices: 1, }, GoTypes: file_rpcdb_rpcdb_proto_goTypes, DependencyIndexes: file_rpcdb_rpcdb_proto_depIdxs, + EnumInfos: file_rpcdb_rpcdb_proto_enumTypes, MessageInfos: file_rpcdb_rpcdb_proto_msgTypes, }.Build() File_rpcdb_rpcdb_proto = out.File diff --git a/avalanchego/proto/pb/sharedmemory/sharedmemory.pb.go b/avalanchego/proto/pb/sharedmemory/sharedmemory.pb.go index a0be0c43..02d4fbb6 100644 --- a/avalanchego/proto/pb/sharedmemory/sharedmemory.pb.go +++ b/avalanchego/proto/pb/sharedmemory/sharedmemory.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 +// protoc-gen-go v1.28.1 // protoc (unknown) // source: sharedmemory/sharedmemory.proto @@ -129,7 +129,6 @@ type Batch struct { Puts []*BatchPut `protobuf:"bytes,1,rep,name=puts,proto3" json:"puts,omitempty"` Deletes []*BatchDelete `protobuf:"bytes,2,rep,name=deletes,proto3" json:"deletes,omitempty"` - Id int64 `protobuf:"varint,3,opt,name=id,proto3" json:"id,omitempty"` } func (x *Batch) Reset() { @@ -178,13 +177,6 @@ func (x *Batch) GetDeletes() []*BatchDelete { return nil } -func (x *Batch) GetId() int64 { - if x != nil { - return x.Id - } - return 0 -} - type AtomicRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -318,8 +310,6 @@ type GetRequest struct { PeerChainId []byte `protobuf:"bytes,1,opt,name=peer_chain_id,json=peerChainId,proto3" json:"peer_chain_id,omitempty"` Keys [][]byte `protobuf:"bytes,2,rep,name=keys,proto3" json:"keys,omitempty"` - Id int64 `protobuf:"varint,3,opt,name=id,proto3" json:"id,omitempty"` - Continues bool `protobuf:"varint,4,opt,name=continues,proto3" json:"continues,omitempty"` } func (x *GetRequest) Reset() { @@ -368,27 +358,12 @@ func (x *GetRequest) GetKeys() [][]byte { return nil } -func (x *GetRequest) GetId() int64 { - if x != nil { - return x.Id - } - return 0 -} - -func (x *GetRequest) GetContinues() bool { - if x != nil { - return x.Continues - } - return false -} - type GetResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Values [][]byte `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` - Continues bool `protobuf:"varint,2,opt,name=continues,proto3" json:"continues,omitempty"` + Values [][]byte `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` } func (x *GetResponse) Reset() { @@ -430,13 +405,6 @@ func (x *GetResponse) GetValues() [][]byte { return nil } -func (x *GetResponse) GetContinues() bool { - if x != nil { - return x.Continues - } - return false -} - type IndexedRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -447,8 +415,6 @@ type IndexedRequest struct { StartTrait []byte `protobuf:"bytes,3,opt,name=start_trait,json=startTrait,proto3" json:"start_trait,omitempty"` StartKey []byte `protobuf:"bytes,4,opt,name=start_key,json=startKey,proto3" json:"start_key,omitempty"` Limit int32 `protobuf:"varint,5,opt,name=limit,proto3" json:"limit,omitempty"` - Id int64 `protobuf:"varint,6,opt,name=id,proto3" json:"id,omitempty"` - Continues bool `protobuf:"varint,7,opt,name=continues,proto3" json:"continues,omitempty"` } func (x *IndexedRequest) Reset() { @@ -518,20 +484,6 @@ func (x *IndexedRequest) GetLimit() int32 { return 0 } -func (x *IndexedRequest) GetId() int64 { - if x != nil { - return x.Id - } - return 0 -} - -func (x *IndexedRequest) GetContinues() bool { - if x != nil { - return x.Continues - } - return false -} - type IndexedResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -540,7 +492,6 @@ type IndexedResponse struct { Values [][]byte `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` LastTrait []byte `protobuf:"bytes,2,opt,name=last_trait,json=lastTrait,proto3" json:"last_trait,omitempty"` LastKey []byte `protobuf:"bytes,3,opt,name=last_key,json=lastKey,proto3" json:"last_key,omitempty"` - Continues bool `protobuf:"varint,4,opt,name=continues,proto3" json:"continues,omitempty"` } func (x *IndexedResponse) Reset() { @@ -596,22 +547,13 @@ func (x *IndexedResponse) GetLastKey() []byte { return nil } -func (x *IndexedResponse) GetContinues() bool { - if x != nil { - return x.Continues - } - return false -} - type ApplyRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Requests []*AtomicRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` - Batches []*Batch `protobuf:"bytes,2,rep,name=batches,proto3" json:"batches,omitempty"` - Id int64 `protobuf:"varint,3,opt,name=id,proto3" json:"id,omitempty"` - Continues bool `protobuf:"varint,4,opt,name=continues,proto3" json:"continues,omitempty"` + Requests []*AtomicRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` + Batches []*Batch `protobuf:"bytes,2,rep,name=batches,proto3" json:"batches,omitempty"` } func (x *ApplyRequest) Reset() { @@ -660,20 +602,6 @@ func (x *ApplyRequest) GetBatches() []*Batch { return nil } -func (x *ApplyRequest) GetId() int64 { - if x != nil { - return x.Id - } - return 0 -} - -func (x *ApplyRequest) GetContinues() bool { - if x != nil { - return x.Continues - } - return false -} - type ApplyResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -723,14 +651,13 @@ var file_sharedmemory_sharedmemory_proto_rawDesc = []byte{ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x1f, 0x0a, 0x0b, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x22, 0x78, 0x0a, 0x05, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x2a, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x22, 0x68, 0x0a, 0x05, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x2a, 0x0a, 0x04, 0x70, 0x75, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x50, 0x75, 0x74, 0x52, 0x04, 0x70, 0x75, 0x74, 0x73, 0x12, 0x33, 0x0a, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x44, - 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x73, 0x12, 0x0e, - 0x0a, 0x02, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x22, 0x96, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x73, 0x22, 0x96, 0x01, 0x0a, 0x0d, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0e, 0x72, 0x65, 0x6d, 0x6f, 0x76, @@ -745,68 +672,56 @@ var file_sharedmemory_sharedmemory_proto_rawDesc = []byte{ 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x72, 0x61, 0x69, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x74, 0x72, 0x61, 0x69, - 0x74, 0x73, 0x22, 0x72, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x74, 0x73, 0x22, 0x44, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x22, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x70, 0x65, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0c, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, - 0x69, 0x6e, 0x75, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x6f, 0x6e, - 0x74, 0x69, 0x6e, 0x75, 0x65, 0x73, 0x22, 0x43, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, + 0x28, 0x0c, 0x52, 0x04, 0x6b, 0x65, 0x79, 0x73, 0x22, 0x25, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, + 0xa0, 0x01, 0x0a, 0x0e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x22, 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x70, 0x65, 0x65, 0x72, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x72, 0x61, 0x69, 0x74, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x74, 0x72, 0x61, 0x69, 0x74, 0x73, 0x12, 0x1f, + 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x72, 0x61, 0x69, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x08, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x22, 0x63, 0x0a, 0x0f, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x1c, 0x0a, - 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x73, 0x22, 0xce, 0x01, 0x0a, 0x0e, - 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x22, - 0x0a, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x70, 0x65, 0x65, 0x72, 0x43, 0x68, 0x61, 0x69, 0x6e, - 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x72, 0x61, 0x69, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0c, 0x52, 0x06, 0x74, 0x72, 0x61, 0x69, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x72, 0x61, 0x69, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, - 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x0e, - 0x0a, 0x02, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1c, - 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x73, 0x22, 0x81, 0x01, 0x0a, - 0x0f, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x16, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, - 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x73, 0x74, - 0x5f, 0x74, 0x72, 0x61, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6c, 0x61, - 0x73, 0x74, 0x54, 0x72, 0x61, 0x69, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x61, 0x73, 0x74, 0x5f, - 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6c, 0x61, 0x73, 0x74, 0x4b, - 0x65, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x73, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x73, - 0x22, 0xa4, 0x01, 0x0a, 0x0c, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x6d, 0x65, 0x6d, 0x6f, - 0x72, 0x79, 0x2e, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x12, 0x2d, 0x0a, 0x07, 0x62, 0x61, - 0x74, 0x63, 0x68, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x68, - 0x61, 0x72, 0x65, 0x64, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, - 0x52, 0x07, 0x62, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6e, - 0x74, 0x69, 0x6e, 0x75, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x6f, - 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x73, 0x22, 0x0f, 0x0a, 0x0d, 0x41, 0x70, 0x70, 0x6c, 0x79, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xd4, 0x01, 0x0a, 0x0c, 0x53, 0x68, 0x61, - 0x72, 0x65, 0x64, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x3a, 0x0a, 0x03, 0x47, 0x65, 0x74, - 0x12, 0x18, 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x2e, - 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x73, 0x68, 0x61, - 0x72, 0x65, 0x64, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, 0x07, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x64, - 0x12, 0x1c, 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x2e, - 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, - 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x2e, 0x49, 0x6e, - 0x64, 0x65, 0x78, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, - 0x05, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x12, 0x1a, 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x6d, - 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x6d, 0x65, 0x6d, 0x6f, 0x72, - 0x79, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, - 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, - 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, - 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x68, 0x61, 0x72, - 0x65, 0x64, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x1d, 0x0a, + 0x0a, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x74, 0x72, 0x61, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x09, 0x6c, 0x61, 0x73, 0x74, 0x54, 0x72, 0x61, 0x69, 0x74, 0x12, 0x19, 0x0a, 0x08, + 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, + 0x6c, 0x61, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x22, 0x76, 0x0a, 0x0c, 0x41, 0x70, 0x70, 0x6c, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x73, 0x68, 0x61, 0x72, + 0x65, 0x64, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x2e, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, + 0x12, 0x2d, 0x0a, 0x07, 0x62, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x13, 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, + 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x07, 0x62, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x22, + 0x0f, 0x0a, 0x0d, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x32, 0xd4, 0x01, 0x0a, 0x0c, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x4d, 0x65, 0x6d, 0x6f, 0x72, + 0x79, 0x12, 0x3a, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x18, 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, + 0x64, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x6d, 0x65, 0x6d, 0x6f, 0x72, + 0x79, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, + 0x07, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x64, 0x12, 0x1c, 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, + 0x64, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x6d, + 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x05, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x12, 0x1a, + 0x2e, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x2e, 0x41, 0x70, + 0x70, 0x6c, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x73, 0x68, 0x61, + 0x72, 0x65, 0x64, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x37, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, + 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/avalanchego/proto/pb/subnetlookup/subnetlookup.pb.go b/avalanchego/proto/pb/subnetlookup/subnetlookup.pb.go deleted file mode 100644 index fe99b5a6..00000000 --- a/avalanchego/proto/pb/subnetlookup/subnetlookup.pb.go +++ /dev/null @@ -1,216 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.0 -// protoc (unknown) -// source: subnetlookup/subnetlookup.proto - -package subnetlookup - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type SubnetIDRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` -} - -func (x *SubnetIDRequest) Reset() { - *x = SubnetIDRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_subnetlookup_subnetlookup_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SubnetIDRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubnetIDRequest) ProtoMessage() {} - -func (x *SubnetIDRequest) ProtoReflect() protoreflect.Message { - mi := &file_subnetlookup_subnetlookup_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubnetIDRequest.ProtoReflect.Descriptor instead. -func (*SubnetIDRequest) Descriptor() ([]byte, []int) { - return file_subnetlookup_subnetlookup_proto_rawDescGZIP(), []int{0} -} - -func (x *SubnetIDRequest) GetChainId() []byte { - if x != nil { - return x.ChainId - } - return nil -} - -type SubnetIDResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` -} - -func (x *SubnetIDResponse) Reset() { - *x = SubnetIDResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_subnetlookup_subnetlookup_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SubnetIDResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SubnetIDResponse) ProtoMessage() {} - -func (x *SubnetIDResponse) ProtoReflect() protoreflect.Message { - mi := &file_subnetlookup_subnetlookup_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SubnetIDResponse.ProtoReflect.Descriptor instead. -func (*SubnetIDResponse) Descriptor() ([]byte, []int) { - return file_subnetlookup_subnetlookup_proto_rawDescGZIP(), []int{1} -} - -func (x *SubnetIDResponse) GetId() []byte { - if x != nil { - return x.Id - } - return nil -} - -var File_subnetlookup_subnetlookup_proto protoreflect.FileDescriptor - -var file_subnetlookup_subnetlookup_proto_rawDesc = []byte{ - 0x0a, 0x1f, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x73, - 0x75, 0x62, 0x6e, 0x65, 0x74, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x0c, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x22, - 0x2c, 0x0a, 0x0f, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x22, 0x22, 0x0a, - 0x10, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, - 0x64, 0x32, 0x59, 0x0a, 0x0c, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, - 0x70, 0x12, 0x49, 0x0a, 0x08, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x49, 0x44, 0x12, 0x1d, 0x2e, - 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x53, 0x75, 0x62, - 0x6e, 0x65, 0x74, 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x73, - 0x75, 0x62, 0x6e, 0x65, 0x74, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x53, 0x75, 0x62, 0x6e, - 0x65, 0x74, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x37, 0x5a, 0x35, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, - 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x6c, - 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_subnetlookup_subnetlookup_proto_rawDescOnce sync.Once - file_subnetlookup_subnetlookup_proto_rawDescData = file_subnetlookup_subnetlookup_proto_rawDesc -) - -func file_subnetlookup_subnetlookup_proto_rawDescGZIP() []byte { - file_subnetlookup_subnetlookup_proto_rawDescOnce.Do(func() { - file_subnetlookup_subnetlookup_proto_rawDescData = protoimpl.X.CompressGZIP(file_subnetlookup_subnetlookup_proto_rawDescData) - }) - return file_subnetlookup_subnetlookup_proto_rawDescData -} - -var file_subnetlookup_subnetlookup_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_subnetlookup_subnetlookup_proto_goTypes = []interface{}{ - (*SubnetIDRequest)(nil), // 0: subnetlookup.SubnetIDRequest - (*SubnetIDResponse)(nil), // 1: subnetlookup.SubnetIDResponse -} -var file_subnetlookup_subnetlookup_proto_depIdxs = []int32{ - 0, // 0: subnetlookup.SubnetLookup.SubnetID:input_type -> subnetlookup.SubnetIDRequest - 1, // 1: subnetlookup.SubnetLookup.SubnetID:output_type -> subnetlookup.SubnetIDResponse - 1, // [1:2] is the sub-list for method output_type - 0, // [0:1] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_subnetlookup_subnetlookup_proto_init() } -func file_subnetlookup_subnetlookup_proto_init() { - if File_subnetlookup_subnetlookup_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_subnetlookup_subnetlookup_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SubnetIDRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_subnetlookup_subnetlookup_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SubnetIDResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_subnetlookup_subnetlookup_proto_rawDesc, - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_subnetlookup_subnetlookup_proto_goTypes, - DependencyIndexes: file_subnetlookup_subnetlookup_proto_depIdxs, - MessageInfos: file_subnetlookup_subnetlookup_proto_msgTypes, - }.Build() - File_subnetlookup_subnetlookup_proto = out.File - file_subnetlookup_subnetlookup_proto_rawDesc = nil - file_subnetlookup_subnetlookup_proto_goTypes = nil - file_subnetlookup_subnetlookup_proto_depIdxs = nil -} diff --git a/avalanchego/proto/pb/subnetlookup/subnetlookup_grpc.pb.go b/avalanchego/proto/pb/subnetlookup/subnetlookup_grpc.pb.go deleted file mode 100644 index bdf2ac04..00000000 --- a/avalanchego/proto/pb/subnetlookup/subnetlookup_grpc.pb.go +++ /dev/null @@ -1,105 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc (unknown) -// source: subnetlookup/subnetlookup.proto - -package subnetlookup - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// SubnetLookupClient is the client API for SubnetLookup service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type SubnetLookupClient interface { - SubnetID(ctx context.Context, in *SubnetIDRequest, opts ...grpc.CallOption) (*SubnetIDResponse, error) -} - -type subnetLookupClient struct { - cc grpc.ClientConnInterface -} - -func NewSubnetLookupClient(cc grpc.ClientConnInterface) SubnetLookupClient { - return &subnetLookupClient{cc} -} - -func (c *subnetLookupClient) SubnetID(ctx context.Context, in *SubnetIDRequest, opts ...grpc.CallOption) (*SubnetIDResponse, error) { - out := new(SubnetIDResponse) - err := c.cc.Invoke(ctx, "/subnetlookup.SubnetLookup/SubnetID", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// SubnetLookupServer is the server API for SubnetLookup service. -// All implementations must embed UnimplementedSubnetLookupServer -// for forward compatibility -type SubnetLookupServer interface { - SubnetID(context.Context, *SubnetIDRequest) (*SubnetIDResponse, error) - mustEmbedUnimplementedSubnetLookupServer() -} - -// UnimplementedSubnetLookupServer must be embedded to have forward compatible implementations. -type UnimplementedSubnetLookupServer struct { -} - -func (UnimplementedSubnetLookupServer) SubnetID(context.Context, *SubnetIDRequest) (*SubnetIDResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method SubnetID not implemented") -} -func (UnimplementedSubnetLookupServer) mustEmbedUnimplementedSubnetLookupServer() {} - -// UnsafeSubnetLookupServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to SubnetLookupServer will -// result in compilation errors. -type UnsafeSubnetLookupServer interface { - mustEmbedUnimplementedSubnetLookupServer() -} - -func RegisterSubnetLookupServer(s grpc.ServiceRegistrar, srv SubnetLookupServer) { - s.RegisterService(&SubnetLookup_ServiceDesc, srv) -} - -func _SubnetLookup_SubnetID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SubnetIDRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SubnetLookupServer).SubnetID(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/subnetlookup.SubnetLookup/SubnetID", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SubnetLookupServer).SubnetID(ctx, req.(*SubnetIDRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// SubnetLookup_ServiceDesc is the grpc.ServiceDesc for SubnetLookup service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var SubnetLookup_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "subnetlookup.SubnetLookup", - HandlerType: (*SubnetLookupServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "SubnetID", - Handler: _SubnetLookup_SubnetID_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "subnetlookup/subnetlookup.proto", -} diff --git a/avalanchego/proto/pb/validatorstate/validator_state.pb.go b/avalanchego/proto/pb/validatorstate/validator_state.pb.go new file mode 100644 index 00000000..7f89e25a --- /dev/null +++ b/avalanchego/proto/pb/validatorstate/validator_state.pb.go @@ -0,0 +1,593 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc (unknown) +// source: validatorstate/validator_state.proto + +package validatorstate + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type GetMinimumHeightResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` +} + +func (x *GetMinimumHeightResponse) Reset() { + *x = GetMinimumHeightResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_validatorstate_validator_state_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMinimumHeightResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMinimumHeightResponse) ProtoMessage() {} + +func (x *GetMinimumHeightResponse) ProtoReflect() protoreflect.Message { + mi := &file_validatorstate_validator_state_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMinimumHeightResponse.ProtoReflect.Descriptor instead. +func (*GetMinimumHeightResponse) Descriptor() ([]byte, []int) { + return file_validatorstate_validator_state_proto_rawDescGZIP(), []int{0} +} + +func (x *GetMinimumHeightResponse) GetHeight() uint64 { + if x != nil { + return x.Height + } + return 0 +} + +type GetCurrentHeightResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` +} + +func (x *GetCurrentHeightResponse) Reset() { + *x = GetCurrentHeightResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_validatorstate_validator_state_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetCurrentHeightResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetCurrentHeightResponse) ProtoMessage() {} + +func (x *GetCurrentHeightResponse) ProtoReflect() protoreflect.Message { + mi := &file_validatorstate_validator_state_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetCurrentHeightResponse.ProtoReflect.Descriptor instead. +func (*GetCurrentHeightResponse) Descriptor() ([]byte, []int) { + return file_validatorstate_validator_state_proto_rawDescGZIP(), []int{1} +} + +func (x *GetCurrentHeightResponse) GetHeight() uint64 { + if x != nil { + return x.Height + } + return 0 +} + +type GetSubnetIDRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` +} + +func (x *GetSubnetIDRequest) Reset() { + *x = GetSubnetIDRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_validatorstate_validator_state_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetSubnetIDRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSubnetIDRequest) ProtoMessage() {} + +func (x *GetSubnetIDRequest) ProtoReflect() protoreflect.Message { + mi := &file_validatorstate_validator_state_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSubnetIDRequest.ProtoReflect.Descriptor instead. +func (*GetSubnetIDRequest) Descriptor() ([]byte, []int) { + return file_validatorstate_validator_state_proto_rawDescGZIP(), []int{2} +} + +func (x *GetSubnetIDRequest) GetChainId() []byte { + if x != nil { + return x.ChainId + } + return nil +} + +type GetSubnetIDResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SubnetId []byte `protobuf:"bytes,1,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` +} + +func (x *GetSubnetIDResponse) Reset() { + *x = GetSubnetIDResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_validatorstate_validator_state_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetSubnetIDResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSubnetIDResponse) ProtoMessage() {} + +func (x *GetSubnetIDResponse) ProtoReflect() protoreflect.Message { + mi := &file_validatorstate_validator_state_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSubnetIDResponse.ProtoReflect.Descriptor instead. +func (*GetSubnetIDResponse) Descriptor() ([]byte, []int) { + return file_validatorstate_validator_state_proto_rawDescGZIP(), []int{3} +} + +func (x *GetSubnetIDResponse) GetSubnetId() []byte { + if x != nil { + return x.SubnetId + } + return nil +} + +type GetValidatorSetRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + SubnetId []byte `protobuf:"bytes,2,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` +} + +func (x *GetValidatorSetRequest) Reset() { + *x = GetValidatorSetRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_validatorstate_validator_state_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetValidatorSetRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetValidatorSetRequest) ProtoMessage() {} + +func (x *GetValidatorSetRequest) ProtoReflect() protoreflect.Message { + mi := &file_validatorstate_validator_state_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetValidatorSetRequest.ProtoReflect.Descriptor instead. +func (*GetValidatorSetRequest) Descriptor() ([]byte, []int) { + return file_validatorstate_validator_state_proto_rawDescGZIP(), []int{4} +} + +func (x *GetValidatorSetRequest) GetHeight() uint64 { + if x != nil { + return x.Height + } + return 0 +} + +func (x *GetValidatorSetRequest) GetSubnetId() []byte { + if x != nil { + return x.SubnetId + } + return nil +} + +type Validator struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + NodeId []byte `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + Weight uint64 `protobuf:"varint,2,opt,name=weight,proto3" json:"weight,omitempty"` + PublicKey []byte `protobuf:"bytes,3,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` +} + +func (x *Validator) Reset() { + *x = Validator{} + if protoimpl.UnsafeEnabled { + mi := &file_validatorstate_validator_state_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Validator) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Validator) ProtoMessage() {} + +func (x *Validator) ProtoReflect() protoreflect.Message { + mi := &file_validatorstate_validator_state_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Validator.ProtoReflect.Descriptor instead. +func (*Validator) Descriptor() ([]byte, []int) { + return file_validatorstate_validator_state_proto_rawDescGZIP(), []int{5} +} + +func (x *Validator) GetNodeId() []byte { + if x != nil { + return x.NodeId + } + return nil +} + +func (x *Validator) GetWeight() uint64 { + if x != nil { + return x.Weight + } + return 0 +} + +func (x *Validator) GetPublicKey() []byte { + if x != nil { + return x.PublicKey + } + return nil +} + +type GetValidatorSetResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Validators []*Validator `protobuf:"bytes,1,rep,name=validators,proto3" json:"validators,omitempty"` +} + +func (x *GetValidatorSetResponse) Reset() { + *x = GetValidatorSetResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_validatorstate_validator_state_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetValidatorSetResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetValidatorSetResponse) ProtoMessage() {} + +func (x *GetValidatorSetResponse) ProtoReflect() protoreflect.Message { + mi := &file_validatorstate_validator_state_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetValidatorSetResponse.ProtoReflect.Descriptor instead. +func (*GetValidatorSetResponse) Descriptor() ([]byte, []int) { + return file_validatorstate_validator_state_proto_rawDescGZIP(), []int{6} +} + +func (x *GetValidatorSetResponse) GetValidators() []*Validator { + if x != nil { + return x.Validators + } + return nil +} + +var File_validatorstate_validator_state_proto protoreflect.FileDescriptor + +var file_validatorstate_validator_state_proto_rawDesc = []byte{ + 0x0a, 0x24, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, + 0x72, 0x73, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x32, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, + 0x6d, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x32, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x43, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x2f, 0x0a, 0x12, 0x47, + 0x65, 0x74, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x49, 0x44, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x22, 0x32, 0x0a, 0x13, + 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x49, 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x49, 0x64, + 0x22, 0x4d, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, + 0x53, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x73, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x49, 0x64, 0x22, + 0x5b, 0x0a, 0x09, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x17, 0x0a, 0x07, + 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, + 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x77, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x1d, 0x0a, + 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x09, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0x54, 0x0a, 0x17, + 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x76, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, + 0x72, 0x73, 0x32, 0xf8, 0x02, 0x0a, 0x0e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x54, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4d, 0x69, 0x6e, 0x69, + 0x6d, 0x75, 0x6d, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x1a, 0x28, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x48, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x54, 0x0a, 0x10, 0x47, + 0x65, 0x74, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x28, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x6f, 0x72, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x75, 0x72, 0x72, + 0x65, 0x6e, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x56, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x49, 0x44, + 0x12, 0x22, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x49, 0x44, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x6e, 0x65, 0x74, 0x49, + 0x44, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x62, 0x0a, 0x0f, 0x47, 0x65, 0x74, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x47, 0x65, + 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x6f, 0x72, 0x53, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x39, 0x5a, + 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, + 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x6f, 0x72, 0x73, 0x74, 0x61, 0x74, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_validatorstate_validator_state_proto_rawDescOnce sync.Once + file_validatorstate_validator_state_proto_rawDescData = file_validatorstate_validator_state_proto_rawDesc +) + +func file_validatorstate_validator_state_proto_rawDescGZIP() []byte { + file_validatorstate_validator_state_proto_rawDescOnce.Do(func() { + file_validatorstate_validator_state_proto_rawDescData = protoimpl.X.CompressGZIP(file_validatorstate_validator_state_proto_rawDescData) + }) + return file_validatorstate_validator_state_proto_rawDescData +} + +var file_validatorstate_validator_state_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_validatorstate_validator_state_proto_goTypes = []interface{}{ + (*GetMinimumHeightResponse)(nil), // 0: validatorstate.GetMinimumHeightResponse + (*GetCurrentHeightResponse)(nil), // 1: validatorstate.GetCurrentHeightResponse + (*GetSubnetIDRequest)(nil), // 2: validatorstate.GetSubnetIDRequest + (*GetSubnetIDResponse)(nil), // 3: validatorstate.GetSubnetIDResponse + (*GetValidatorSetRequest)(nil), // 4: validatorstate.GetValidatorSetRequest + (*Validator)(nil), // 5: validatorstate.Validator + (*GetValidatorSetResponse)(nil), // 6: validatorstate.GetValidatorSetResponse + (*emptypb.Empty)(nil), // 7: google.protobuf.Empty +} +var file_validatorstate_validator_state_proto_depIdxs = []int32{ + 5, // 0: validatorstate.GetValidatorSetResponse.validators:type_name -> validatorstate.Validator + 7, // 1: validatorstate.ValidatorState.GetMinimumHeight:input_type -> google.protobuf.Empty + 7, // 2: validatorstate.ValidatorState.GetCurrentHeight:input_type -> google.protobuf.Empty + 2, // 3: validatorstate.ValidatorState.GetSubnetID:input_type -> validatorstate.GetSubnetIDRequest + 4, // 4: validatorstate.ValidatorState.GetValidatorSet:input_type -> validatorstate.GetValidatorSetRequest + 0, // 5: validatorstate.ValidatorState.GetMinimumHeight:output_type -> validatorstate.GetMinimumHeightResponse + 1, // 6: validatorstate.ValidatorState.GetCurrentHeight:output_type -> validatorstate.GetCurrentHeightResponse + 3, // 7: validatorstate.ValidatorState.GetSubnetID:output_type -> validatorstate.GetSubnetIDResponse + 6, // 8: validatorstate.ValidatorState.GetValidatorSet:output_type -> validatorstate.GetValidatorSetResponse + 5, // [5:9] is the sub-list for method output_type + 1, // [1:5] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_validatorstate_validator_state_proto_init() } +func file_validatorstate_validator_state_proto_init() { + if File_validatorstate_validator_state_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_validatorstate_validator_state_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMinimumHeightResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validatorstate_validator_state_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetCurrentHeightResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validatorstate_validator_state_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSubnetIDRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validatorstate_validator_state_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSubnetIDResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validatorstate_validator_state_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetValidatorSetRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validatorstate_validator_state_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Validator); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_validatorstate_validator_state_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetValidatorSetResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_validatorstate_validator_state_proto_rawDesc, + NumEnums: 0, + NumMessages: 7, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_validatorstate_validator_state_proto_goTypes, + DependencyIndexes: file_validatorstate_validator_state_proto_depIdxs, + MessageInfos: file_validatorstate_validator_state_proto_msgTypes, + }.Build() + File_validatorstate_validator_state_proto = out.File + file_validatorstate_validator_state_proto_rawDesc = nil + file_validatorstate_validator_state_proto_goTypes = nil + file_validatorstate_validator_state_proto_depIdxs = nil +} diff --git a/avalanchego/proto/pb/validatorstate/validator_state_grpc.pb.go b/avalanchego/proto/pb/validatorstate/validator_state_grpc.pb.go new file mode 100644 index 00000000..1b328a7f --- /dev/null +++ b/avalanchego/proto/pb/validatorstate/validator_state_grpc.pb.go @@ -0,0 +1,226 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc (unknown) +// source: validatorstate/validator_state.proto + +package validatorstate + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// ValidatorStateClient is the client API for ValidatorState service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ValidatorStateClient interface { + // GetMinimumHeight returns the minimum height of the blocks in the optimal + // proposal window. + GetMinimumHeight(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetMinimumHeightResponse, error) + // GetCurrentHeight returns the current height of the P-chain. + GetCurrentHeight(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetCurrentHeightResponse, error) + // GetSubnetID returns the subnetID of the provided chain. + GetSubnetID(ctx context.Context, in *GetSubnetIDRequest, opts ...grpc.CallOption) (*GetSubnetIDResponse, error) + // GetValidatorSet returns the weights of the nodeIDs for the provided + // subnet at the requested P-chain height. + GetValidatorSet(ctx context.Context, in *GetValidatorSetRequest, opts ...grpc.CallOption) (*GetValidatorSetResponse, error) +} + +type validatorStateClient struct { + cc grpc.ClientConnInterface +} + +func NewValidatorStateClient(cc grpc.ClientConnInterface) ValidatorStateClient { + return &validatorStateClient{cc} +} + +func (c *validatorStateClient) GetMinimumHeight(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetMinimumHeightResponse, error) { + out := new(GetMinimumHeightResponse) + err := c.cc.Invoke(ctx, "/validatorstate.ValidatorState/GetMinimumHeight", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *validatorStateClient) GetCurrentHeight(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetCurrentHeightResponse, error) { + out := new(GetCurrentHeightResponse) + err := c.cc.Invoke(ctx, "/validatorstate.ValidatorState/GetCurrentHeight", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *validatorStateClient) GetSubnetID(ctx context.Context, in *GetSubnetIDRequest, opts ...grpc.CallOption) (*GetSubnetIDResponse, error) { + out := new(GetSubnetIDResponse) + err := c.cc.Invoke(ctx, "/validatorstate.ValidatorState/GetSubnetID", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *validatorStateClient) GetValidatorSet(ctx context.Context, in *GetValidatorSetRequest, opts ...grpc.CallOption) (*GetValidatorSetResponse, error) { + out := new(GetValidatorSetResponse) + err := c.cc.Invoke(ctx, "/validatorstate.ValidatorState/GetValidatorSet", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ValidatorStateServer is the server API for ValidatorState service. +// All implementations must embed UnimplementedValidatorStateServer +// for forward compatibility +type ValidatorStateServer interface { + // GetMinimumHeight returns the minimum height of the blocks in the optimal + // proposal window. + GetMinimumHeight(context.Context, *emptypb.Empty) (*GetMinimumHeightResponse, error) + // GetCurrentHeight returns the current height of the P-chain. + GetCurrentHeight(context.Context, *emptypb.Empty) (*GetCurrentHeightResponse, error) + // GetSubnetID returns the subnetID of the provided chain. + GetSubnetID(context.Context, *GetSubnetIDRequest) (*GetSubnetIDResponse, error) + // GetValidatorSet returns the weights of the nodeIDs for the provided + // subnet at the requested P-chain height. + GetValidatorSet(context.Context, *GetValidatorSetRequest) (*GetValidatorSetResponse, error) + mustEmbedUnimplementedValidatorStateServer() +} + +// UnimplementedValidatorStateServer must be embedded to have forward compatible implementations. +type UnimplementedValidatorStateServer struct { +} + +func (UnimplementedValidatorStateServer) GetMinimumHeight(context.Context, *emptypb.Empty) (*GetMinimumHeightResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetMinimumHeight not implemented") +} +func (UnimplementedValidatorStateServer) GetCurrentHeight(context.Context, *emptypb.Empty) (*GetCurrentHeightResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetCurrentHeight not implemented") +} +func (UnimplementedValidatorStateServer) GetSubnetID(context.Context, *GetSubnetIDRequest) (*GetSubnetIDResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSubnetID not implemented") +} +func (UnimplementedValidatorStateServer) GetValidatorSet(context.Context, *GetValidatorSetRequest) (*GetValidatorSetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetValidatorSet not implemented") +} +func (UnimplementedValidatorStateServer) mustEmbedUnimplementedValidatorStateServer() {} + +// UnsafeValidatorStateServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ValidatorStateServer will +// result in compilation errors. +type UnsafeValidatorStateServer interface { + mustEmbedUnimplementedValidatorStateServer() +} + +func RegisterValidatorStateServer(s grpc.ServiceRegistrar, srv ValidatorStateServer) { + s.RegisterService(&ValidatorState_ServiceDesc, srv) +} + +func _ValidatorState_GetMinimumHeight_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ValidatorStateServer).GetMinimumHeight(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/validatorstate.ValidatorState/GetMinimumHeight", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ValidatorStateServer).GetMinimumHeight(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _ValidatorState_GetCurrentHeight_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ValidatorStateServer).GetCurrentHeight(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/validatorstate.ValidatorState/GetCurrentHeight", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ValidatorStateServer).GetCurrentHeight(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _ValidatorState_GetSubnetID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSubnetIDRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ValidatorStateServer).GetSubnetID(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/validatorstate.ValidatorState/GetSubnetID", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ValidatorStateServer).GetSubnetID(ctx, req.(*GetSubnetIDRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ValidatorState_GetValidatorSet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetValidatorSetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ValidatorStateServer).GetValidatorSet(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/validatorstate.ValidatorState/GetValidatorSet", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ValidatorStateServer).GetValidatorSet(ctx, req.(*GetValidatorSetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// ValidatorState_ServiceDesc is the grpc.ServiceDesc for ValidatorState service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ValidatorState_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "validatorstate.ValidatorState", + HandlerType: (*ValidatorStateServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetMinimumHeight", + Handler: _ValidatorState_GetMinimumHeight_Handler, + }, + { + MethodName: "GetCurrentHeight", + Handler: _ValidatorState_GetCurrentHeight_Handler, + }, + { + MethodName: "GetSubnetID", + Handler: _ValidatorState_GetSubnetID_Handler, + }, + { + MethodName: "GetValidatorSet", + Handler: _ValidatorState_GetValidatorSet_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "validatorstate/validator_state.proto", +} diff --git a/avalanchego/proto/pb/vm/runtime/runtime.pb.go b/avalanchego/proto/pb/vm/runtime/runtime.pb.go new file mode 100644 index 00000000..c8a90fc7 --- /dev/null +++ b/avalanchego/proto/pb/vm/runtime/runtime.pb.go @@ -0,0 +1,170 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc (unknown) +// source: vm/runtime/runtime.proto + +package manager + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type InitializeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ProtocolVersion is used to identify incompatibilities with AvalancheGo and a VM. + ProtocolVersion uint32 `protobuf:"varint,1,opt,name=protocol_version,json=protocolVersion,proto3" json:"protocol_version,omitempty"` + // Address of the gRPC server endpoint serving the handshake logic. + // Example: 127.0.0.1:50001 + Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` +} + +func (x *InitializeRequest) Reset() { + *x = InitializeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vm_runtime_runtime_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InitializeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InitializeRequest) ProtoMessage() {} + +func (x *InitializeRequest) ProtoReflect() protoreflect.Message { + mi := &file_vm_runtime_runtime_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InitializeRequest.ProtoReflect.Descriptor instead. +func (*InitializeRequest) Descriptor() ([]byte, []int) { + return file_vm_runtime_runtime_proto_rawDescGZIP(), []int{0} +} + +func (x *InitializeRequest) GetProtocolVersion() uint32 { + if x != nil { + return x.ProtocolVersion + } + return 0 +} + +func (x *InitializeRequest) GetAddr() string { + if x != nil { + return x.Addr + } + return "" +} + +var File_vm_runtime_runtime_proto protoreflect.FileDescriptor + +var file_vm_runtime_runtime_proto_rawDesc = []byte{ + 0x0a, 0x18, 0x76, 0x6d, 0x2f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x72, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x76, 0x6d, 0x2e, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x22, 0x52, 0x0a, 0x11, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x63, 0x6f, 0x6c, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0d, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x61, 0x64, 0x64, 0x72, 0x32, 0x4e, 0x0a, 0x07, 0x52, 0x75, 0x6e, 0x74, 0x69, + 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x0a, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, + 0x12, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x49, 0x6e, + 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x35, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, + 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x70, 0x62, 0x2f, 0x76, 0x6d, 0x2f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_vm_runtime_runtime_proto_rawDescOnce sync.Once + file_vm_runtime_runtime_proto_rawDescData = file_vm_runtime_runtime_proto_rawDesc +) + +func file_vm_runtime_runtime_proto_rawDescGZIP() []byte { + file_vm_runtime_runtime_proto_rawDescOnce.Do(func() { + file_vm_runtime_runtime_proto_rawDescData = protoimpl.X.CompressGZIP(file_vm_runtime_runtime_proto_rawDescData) + }) + return file_vm_runtime_runtime_proto_rawDescData +} + +var file_vm_runtime_runtime_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_vm_runtime_runtime_proto_goTypes = []interface{}{ + (*InitializeRequest)(nil), // 0: vm.runtime.InitializeRequest + (*emptypb.Empty)(nil), // 1: google.protobuf.Empty +} +var file_vm_runtime_runtime_proto_depIdxs = []int32{ + 0, // 0: vm.runtime.Runtime.Initialize:input_type -> vm.runtime.InitializeRequest + 1, // 1: vm.runtime.Runtime.Initialize:output_type -> google.protobuf.Empty + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_vm_runtime_runtime_proto_init() } +func file_vm_runtime_runtime_proto_init() { + if File_vm_runtime_runtime_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_vm_runtime_runtime_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InitializeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_vm_runtime_runtime_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_vm_runtime_runtime_proto_goTypes, + DependencyIndexes: file_vm_runtime_runtime_proto_depIdxs, + MessageInfos: file_vm_runtime_runtime_proto_msgTypes, + }.Build() + File_vm_runtime_runtime_proto = out.File + file_vm_runtime_runtime_proto_rawDesc = nil + file_vm_runtime_runtime_proto_goTypes = nil + file_vm_runtime_runtime_proto_depIdxs = nil +} diff --git a/avalanchego/proto/pb/vm/runtime/runtime_grpc.pb.go b/avalanchego/proto/pb/vm/runtime/runtime_grpc.pb.go new file mode 100644 index 00000000..be32d678 --- /dev/null +++ b/avalanchego/proto/pb/vm/runtime/runtime_grpc.pb.go @@ -0,0 +1,108 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc (unknown) +// source: vm/runtime/runtime.proto + +package manager + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// RuntimeClient is the client API for Runtime service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type RuntimeClient interface { + // Initialize a VM Runtime. + Initialize(ctx context.Context, in *InitializeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) +} + +type runtimeClient struct { + cc grpc.ClientConnInterface +} + +func NewRuntimeClient(cc grpc.ClientConnInterface) RuntimeClient { + return &runtimeClient{cc} +} + +func (c *runtimeClient) Initialize(ctx context.Context, in *InitializeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/vm.runtime.Runtime/Initialize", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// RuntimeServer is the server API for Runtime service. +// All implementations must embed UnimplementedRuntimeServer +// for forward compatibility +type RuntimeServer interface { + // Initialize a VM Runtime. + Initialize(context.Context, *InitializeRequest) (*emptypb.Empty, error) + mustEmbedUnimplementedRuntimeServer() +} + +// UnimplementedRuntimeServer must be embedded to have forward compatible implementations. +type UnimplementedRuntimeServer struct { +} + +func (UnimplementedRuntimeServer) Initialize(context.Context, *InitializeRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Initialize not implemented") +} +func (UnimplementedRuntimeServer) mustEmbedUnimplementedRuntimeServer() {} + +// UnsafeRuntimeServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to RuntimeServer will +// result in compilation errors. +type UnsafeRuntimeServer interface { + mustEmbedUnimplementedRuntimeServer() +} + +func RegisterRuntimeServer(s grpc.ServiceRegistrar, srv RuntimeServer) { + s.RegisterService(&Runtime_ServiceDesc, srv) +} + +func _Runtime_Initialize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InitializeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RuntimeServer).Initialize(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vm.runtime.Runtime/Initialize", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RuntimeServer).Initialize(ctx, req.(*InitializeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Runtime_ServiceDesc is the grpc.ServiceDesc for Runtime service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Runtime_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "vm.runtime.Runtime", + HandlerType: (*RuntimeServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Initialize", + Handler: _Runtime_Initialize_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "vm/runtime/runtime.proto", +} diff --git a/avalanchego/proto/pb/vm/vm.pb.go b/avalanchego/proto/pb/vm/vm.pb.go index 0d4f5eb6..bf9ae4d0 100644 --- a/avalanchego/proto/pb/vm/vm.pb.go +++ b/avalanchego/proto/pb/vm/vm.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 +// protoc-gen-go v1.28.1 // protoc (unknown) // source: vm/vm.proto @@ -23,25 +23,245 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type State int32 + +const ( + State_STATE_UNSPECIFIED State = 0 + State_STATE_STATE_SYNCING State = 1 + State_STATE_BOOTSTRAPPING State = 2 + State_STATE_NORMAL_OP State = 3 +) + +// Enum value maps for State. +var ( + State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "STATE_STATE_SYNCING", + 2: "STATE_BOOTSTRAPPING", + 3: "STATE_NORMAL_OP", + } + State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "STATE_STATE_SYNCING": 1, + "STATE_BOOTSTRAPPING": 2, + "STATE_NORMAL_OP": 3, + } +) + +func (x State) Enum() *State { + p := new(State) + *p = x + return p +} + +func (x State) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (State) Descriptor() protoreflect.EnumDescriptor { + return file_vm_vm_proto_enumTypes[0].Descriptor() +} + +func (State) Type() protoreflect.EnumType { + return &file_vm_vm_proto_enumTypes[0] +} + +func (x State) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use State.Descriptor instead. +func (State) EnumDescriptor() ([]byte, []int) { + return file_vm_vm_proto_rawDescGZIP(), []int{0} +} + +type Status int32 + +const ( + Status_STATUS_UNSPECIFIED Status = 0 + Status_STATUS_PROCESSING Status = 1 + Status_STATUS_REJECTED Status = 2 + Status_STATUS_ACCEPTED Status = 3 +) + +// Enum value maps for Status. +var ( + Status_name = map[int32]string{ + 0: "STATUS_UNSPECIFIED", + 1: "STATUS_PROCESSING", + 2: "STATUS_REJECTED", + 3: "STATUS_ACCEPTED", + } + Status_value = map[string]int32{ + "STATUS_UNSPECIFIED": 0, + "STATUS_PROCESSING": 1, + "STATUS_REJECTED": 2, + "STATUS_ACCEPTED": 3, + } +) + +func (x Status) Enum() *Status { + p := new(Status) + *p = x + return p +} + +func (x Status) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Status) Descriptor() protoreflect.EnumDescriptor { + return file_vm_vm_proto_enumTypes[1].Descriptor() +} + +func (Status) Type() protoreflect.EnumType { + return &file_vm_vm_proto_enumTypes[1] +} + +func (x Status) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Status.Descriptor instead. +func (Status) EnumDescriptor() ([]byte, []int) { + return file_vm_vm_proto_rawDescGZIP(), []int{1} +} + +type Error int32 + +const ( + // ERROR_UNSPECIFIED is used to indicate that no error occurred. + Error_ERROR_UNSPECIFIED Error = 0 + Error_ERROR_CLOSED Error = 1 + Error_ERROR_NOT_FOUND Error = 2 + Error_ERROR_HEIGHT_INDEX_NOT_IMPLEMENTED Error = 3 + Error_ERROR_HEIGHT_INDEX_INCOMPLETE Error = 4 + Error_ERROR_STATE_SYNC_NOT_IMPLEMENTED Error = 5 +) + +// Enum value maps for Error. +var ( + Error_name = map[int32]string{ + 0: "ERROR_UNSPECIFIED", + 1: "ERROR_CLOSED", + 2: "ERROR_NOT_FOUND", + 3: "ERROR_HEIGHT_INDEX_NOT_IMPLEMENTED", + 4: "ERROR_HEIGHT_INDEX_INCOMPLETE", + 5: "ERROR_STATE_SYNC_NOT_IMPLEMENTED", + } + Error_value = map[string]int32{ + "ERROR_UNSPECIFIED": 0, + "ERROR_CLOSED": 1, + "ERROR_NOT_FOUND": 2, + "ERROR_HEIGHT_INDEX_NOT_IMPLEMENTED": 3, + "ERROR_HEIGHT_INDEX_INCOMPLETE": 4, + "ERROR_STATE_SYNC_NOT_IMPLEMENTED": 5, + } +) + +func (x Error) Enum() *Error { + p := new(Error) + *p = x + return p +} + +func (x Error) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Error) Descriptor() protoreflect.EnumDescriptor { + return file_vm_vm_proto_enumTypes[2].Descriptor() +} + +func (Error) Type() protoreflect.EnumType { + return &file_vm_vm_proto_enumTypes[2] +} + +func (x Error) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Error.Descriptor instead. +func (Error) EnumDescriptor() ([]byte, []int) { + return file_vm_vm_proto_rawDescGZIP(), []int{2} +} + +type StateSummaryAcceptResponse_Mode int32 + +const ( + StateSummaryAcceptResponse_MODE_UNSPECIFIED StateSummaryAcceptResponse_Mode = 0 + StateSummaryAcceptResponse_MODE_SKIPPED StateSummaryAcceptResponse_Mode = 1 + StateSummaryAcceptResponse_MODE_STATIC StateSummaryAcceptResponse_Mode = 2 + StateSummaryAcceptResponse_MODE_DYNAMIC StateSummaryAcceptResponse_Mode = 3 +) + +// Enum value maps for StateSummaryAcceptResponse_Mode. +var ( + StateSummaryAcceptResponse_Mode_name = map[int32]string{ + 0: "MODE_UNSPECIFIED", + 1: "MODE_SKIPPED", + 2: "MODE_STATIC", + 3: "MODE_DYNAMIC", + } + StateSummaryAcceptResponse_Mode_value = map[string]int32{ + "MODE_UNSPECIFIED": 0, + "MODE_SKIPPED": 1, + "MODE_STATIC": 2, + "MODE_DYNAMIC": 3, + } +) + +func (x StateSummaryAcceptResponse_Mode) Enum() *StateSummaryAcceptResponse_Mode { + p := new(StateSummaryAcceptResponse_Mode) + *p = x + return p +} + +func (x StateSummaryAcceptResponse_Mode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (StateSummaryAcceptResponse_Mode) Descriptor() protoreflect.EnumDescriptor { + return file_vm_vm_proto_enumTypes[3].Descriptor() +} + +func (StateSummaryAcceptResponse_Mode) Type() protoreflect.EnumType { + return &file_vm_vm_proto_enumTypes[3] +} + +func (x StateSummaryAcceptResponse_Mode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use StateSummaryAcceptResponse_Mode.Descriptor instead. +func (StateSummaryAcceptResponse_Mode) EnumDescriptor() ([]byte, []int) { + return file_vm_vm_proto_rawDescGZIP(), []int{46, 0} +} + type InitializeRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - NetworkId uint32 `protobuf:"varint,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` - SubnetId []byte `protobuf:"bytes,2,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` - ChainId []byte `protobuf:"bytes,3,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` - NodeId []byte `protobuf:"bytes,4,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` - XChainId []byte `protobuf:"bytes,5,opt,name=x_chain_id,json=xChainId,proto3" json:"x_chain_id,omitempty"` - AvaxAssetId []byte `protobuf:"bytes,6,opt,name=avax_asset_id,json=avaxAssetId,proto3" json:"avax_asset_id,omitempty"` - GenesisBytes []byte `protobuf:"bytes,7,opt,name=genesis_bytes,json=genesisBytes,proto3" json:"genesis_bytes,omitempty"` - UpgradeBytes []byte `protobuf:"bytes,8,opt,name=upgrade_bytes,json=upgradeBytes,proto3" json:"upgrade_bytes,omitempty"` - ConfigBytes []byte `protobuf:"bytes,9,opt,name=config_bytes,json=configBytes,proto3" json:"config_bytes,omitempty"` - DbServers []*VersionedDBServer `protobuf:"bytes,10,rep,name=db_servers,json=dbServers,proto3" json:"db_servers,omitempty"` + NetworkId uint32 `protobuf:"varint,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + SubnetId []byte `protobuf:"bytes,2,opt,name=subnet_id,json=subnetId,proto3" json:"subnet_id,omitempty"` + ChainId []byte `protobuf:"bytes,3,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + NodeId []byte `protobuf:"bytes,4,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // public_key is the BLS public key that would correspond with any signatures + // produced by the warp messaging signer + PublicKey []byte `protobuf:"bytes,5,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"` + XChainId []byte `protobuf:"bytes,6,opt,name=x_chain_id,json=xChainId,proto3" json:"x_chain_id,omitempty"` + CChainId []byte `protobuf:"bytes,7,opt,name=c_chain_id,json=cChainId,proto3" json:"c_chain_id,omitempty"` + AvaxAssetId []byte `protobuf:"bytes,8,opt,name=avax_asset_id,json=avaxAssetId,proto3" json:"avax_asset_id,omitempty"` + ChainDataDir string `protobuf:"bytes,9,opt,name=chain_data_dir,json=chainDataDir,proto3" json:"chain_data_dir,omitempty"` + GenesisBytes []byte `protobuf:"bytes,10,opt,name=genesis_bytes,json=genesisBytes,proto3" json:"genesis_bytes,omitempty"` + UpgradeBytes []byte `protobuf:"bytes,11,opt,name=upgrade_bytes,json=upgradeBytes,proto3" json:"upgrade_bytes,omitempty"` + ConfigBytes []byte `protobuf:"bytes,12,opt,name=config_bytes,json=configBytes,proto3" json:"config_bytes,omitempty"` + DbServers []*VersionedDBServer `protobuf:"bytes,13,rep,name=db_servers,json=dbServers,proto3" json:"db_servers,omitempty"` // server_addr is the address of the gRPC server which serves // the messenger, keystore, shared memory, blockchain alias, // subnet alias, and appSender services - ServerAddr string `protobuf:"bytes,11,opt,name=server_addr,json=serverAddr,proto3" json:"server_addr,omitempty"` + ServerAddr string `protobuf:"bytes,14,opt,name=server_addr,json=serverAddr,proto3" json:"server_addr,omitempty"` } func (x *InitializeRequest) Reset() { @@ -104,6 +324,13 @@ func (x *InitializeRequest) GetNodeId() []byte { return nil } +func (x *InitializeRequest) GetPublicKey() []byte { + if x != nil { + return x.PublicKey + } + return nil +} + func (x *InitializeRequest) GetXChainId() []byte { if x != nil { return x.XChainId @@ -111,6 +338,13 @@ func (x *InitializeRequest) GetXChainId() []byte { return nil } +func (x *InitializeRequest) GetCChainId() []byte { + if x != nil { + return x.CChainId + } + return nil +} + func (x *InitializeRequest) GetAvaxAssetId() []byte { if x != nil { return x.AvaxAssetId @@ -118,6 +352,13 @@ func (x *InitializeRequest) GetAvaxAssetId() []byte { return nil } +func (x *InitializeRequest) GetChainDataDir() string { + if x != nil { + return x.ChainDataDir + } + return "" +} + func (x *InitializeRequest) GetGenesisBytes() []byte { if x != nil { return x.GenesisBytes @@ -294,7 +535,7 @@ type SetStateRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - State uint32 `protobuf:"varint,1,opt,name=state,proto3" json:"state,omitempty"` + State State `protobuf:"varint,1,opt,name=state,proto3,enum=vm.State" json:"state,omitempty"` } func (x *SetStateRequest) Reset() { @@ -329,11 +570,11 @@ func (*SetStateRequest) Descriptor() ([]byte, []int) { return file_vm_vm_proto_rawDescGZIP(), []int{3} } -func (x *SetStateRequest) GetState() uint32 { +func (x *SetStateRequest) GetState() State { if x != nil { return x.State } - return 0 + return State_STATE_UNSPECIFIED } type SetStateResponse struct { @@ -574,22 +815,71 @@ func (x *Handler) GetServerAddr() string { return "" } +type BuildBlockRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + PChainHeight *uint64 `protobuf:"varint,1,opt,name=p_chain_height,json=pChainHeight,proto3,oneof" json:"p_chain_height,omitempty"` +} + +func (x *BuildBlockRequest) Reset() { + *x = BuildBlockRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_vm_vm_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BuildBlockRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BuildBlockRequest) ProtoMessage() {} + +func (x *BuildBlockRequest) ProtoReflect() protoreflect.Message { + mi := &file_vm_vm_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BuildBlockRequest.ProtoReflect.Descriptor instead. +func (*BuildBlockRequest) Descriptor() ([]byte, []int) { + return file_vm_vm_proto_rawDescGZIP(), []int{8} +} + +func (x *BuildBlockRequest) GetPChainHeight() uint64 { + if x != nil && x.PChainHeight != nil { + return *x.PChainHeight + } + return 0 +} + +// Note: The status of a freshly built block is assumed to be Processing. type BuildBlockResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ParentId []byte `protobuf:"bytes,2,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"` - Bytes []byte `protobuf:"bytes,3,opt,name=bytes,proto3" json:"bytes,omitempty"` - Height uint64 `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"` - Timestamp *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // status is always processing + Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ParentId []byte `protobuf:"bytes,2,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"` + Bytes []byte `protobuf:"bytes,3,opt,name=bytes,proto3" json:"bytes,omitempty"` + Height uint64 `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"` + Timestamp *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + VerifyWithContext bool `protobuf:"varint,6,opt,name=verify_with_context,json=verifyWithContext,proto3" json:"verify_with_context,omitempty"` } func (x *BuildBlockResponse) Reset() { *x = BuildBlockResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[8] + mi := &file_vm_vm_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -602,7 +892,7 @@ func (x *BuildBlockResponse) String() string { func (*BuildBlockResponse) ProtoMessage() {} func (x *BuildBlockResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[8] + mi := &file_vm_vm_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -615,7 +905,7 @@ func (x *BuildBlockResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use BuildBlockResponse.ProtoReflect.Descriptor instead. func (*BuildBlockResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{8} + return file_vm_vm_proto_rawDescGZIP(), []int{9} } func (x *BuildBlockResponse) GetId() []byte { @@ -653,6 +943,13 @@ func (x *BuildBlockResponse) GetTimestamp() *timestamppb.Timestamp { return nil } +func (x *BuildBlockResponse) GetVerifyWithContext() bool { + if x != nil { + return x.VerifyWithContext + } + return false +} + type ParseBlockRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -664,7 +961,7 @@ type ParseBlockRequest struct { func (x *ParseBlockRequest) Reset() { *x = ParseBlockRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[9] + mi := &file_vm_vm_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -677,7 +974,7 @@ func (x *ParseBlockRequest) String() string { func (*ParseBlockRequest) ProtoMessage() {} func (x *ParseBlockRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[9] + mi := &file_vm_vm_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -690,7 +987,7 @@ func (x *ParseBlockRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ParseBlockRequest.ProtoReflect.Descriptor instead. func (*ParseBlockRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{9} + return file_vm_vm_proto_rawDescGZIP(), []int{10} } func (x *ParseBlockRequest) GetBytes() []byte { @@ -705,17 +1002,18 @@ type ParseBlockResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - ParentId []byte `protobuf:"bytes,2,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"` - Status uint32 `protobuf:"varint,3,opt,name=status,proto3" json:"status,omitempty"` - Height uint64 `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"` - Timestamp *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ParentId []byte `protobuf:"bytes,2,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"` + Status Status `protobuf:"varint,3,opt,name=status,proto3,enum=vm.Status" json:"status,omitempty"` + Height uint64 `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"` + Timestamp *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + VerifyWithContext bool `protobuf:"varint,6,opt,name=verify_with_context,json=verifyWithContext,proto3" json:"verify_with_context,omitempty"` } func (x *ParseBlockResponse) Reset() { *x = ParseBlockResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[10] + mi := &file_vm_vm_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -728,7 +1026,7 @@ func (x *ParseBlockResponse) String() string { func (*ParseBlockResponse) ProtoMessage() {} func (x *ParseBlockResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[10] + mi := &file_vm_vm_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -741,7 +1039,7 @@ func (x *ParseBlockResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ParseBlockResponse.ProtoReflect.Descriptor instead. func (*ParseBlockResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{10} + return file_vm_vm_proto_rawDescGZIP(), []int{11} } func (x *ParseBlockResponse) GetId() []byte { @@ -758,11 +1056,11 @@ func (x *ParseBlockResponse) GetParentId() []byte { return nil } -func (x *ParseBlockResponse) GetStatus() uint32 { +func (x *ParseBlockResponse) GetStatus() Status { if x != nil { return x.Status } - return 0 + return Status_STATUS_UNSPECIFIED } func (x *ParseBlockResponse) GetHeight() uint64 { @@ -779,6 +1077,13 @@ func (x *ParseBlockResponse) GetTimestamp() *timestamppb.Timestamp { return nil } +func (x *ParseBlockResponse) GetVerifyWithContext() bool { + if x != nil { + return x.VerifyWithContext + } + return false +} + type GetBlockRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -790,7 +1095,7 @@ type GetBlockRequest struct { func (x *GetBlockRequest) Reset() { *x = GetBlockRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[11] + mi := &file_vm_vm_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -803,7 +1108,7 @@ func (x *GetBlockRequest) String() string { func (*GetBlockRequest) ProtoMessage() {} func (x *GetBlockRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[11] + mi := &file_vm_vm_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -816,7 +1121,7 @@ func (x *GetBlockRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBlockRequest.ProtoReflect.Descriptor instead. func (*GetBlockRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{11} + return file_vm_vm_proto_rawDescGZIP(), []int{12} } func (x *GetBlockRequest) GetId() []byte { @@ -833,17 +1138,18 @@ type GetBlockResponse struct { ParentId []byte `protobuf:"bytes,1,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"` Bytes []byte `protobuf:"bytes,2,opt,name=bytes,proto3" json:"bytes,omitempty"` - Status uint32 `protobuf:"varint,3,opt,name=status,proto3" json:"status,omitempty"` + Status Status `protobuf:"varint,3,opt,name=status,proto3,enum=vm.Status" json:"status,omitempty"` Height uint64 `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"` Timestamp *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // used to propagate database.ErrNotFound through RPC - Err uint32 `protobuf:"varint,6,opt,name=err,proto3" json:"err,omitempty"` + Err Error `protobuf:"varint,6,opt,name=err,proto3,enum=vm.Error" json:"err,omitempty"` + VerifyWithContext bool `protobuf:"varint,7,opt,name=verify_with_context,json=verifyWithContext,proto3" json:"verify_with_context,omitempty"` } func (x *GetBlockResponse) Reset() { *x = GetBlockResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[12] + mi := &file_vm_vm_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -856,7 +1162,7 @@ func (x *GetBlockResponse) String() string { func (*GetBlockResponse) ProtoMessage() {} func (x *GetBlockResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[12] + mi := &file_vm_vm_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -869,7 +1175,7 @@ func (x *GetBlockResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBlockResponse.ProtoReflect.Descriptor instead. func (*GetBlockResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{12} + return file_vm_vm_proto_rawDescGZIP(), []int{13} } func (x *GetBlockResponse) GetParentId() []byte { @@ -886,11 +1192,11 @@ func (x *GetBlockResponse) GetBytes() []byte { return nil } -func (x *GetBlockResponse) GetStatus() uint32 { +func (x *GetBlockResponse) GetStatus() Status { if x != nil { return x.Status } - return 0 + return Status_STATUS_UNSPECIFIED } func (x *GetBlockResponse) GetHeight() uint64 { @@ -907,11 +1213,18 @@ func (x *GetBlockResponse) GetTimestamp() *timestamppb.Timestamp { return nil } -func (x *GetBlockResponse) GetErr() uint32 { +func (x *GetBlockResponse) GetErr() Error { if x != nil { return x.Err } - return 0 + return Error_ERROR_UNSPECIFIED +} + +func (x *GetBlockResponse) GetVerifyWithContext() bool { + if x != nil { + return x.VerifyWithContext + } + return false } type SetPreferenceRequest struct { @@ -925,7 +1238,7 @@ type SetPreferenceRequest struct { func (x *SetPreferenceRequest) Reset() { *x = SetPreferenceRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[13] + mi := &file_vm_vm_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -938,7 +1251,7 @@ func (x *SetPreferenceRequest) String() string { func (*SetPreferenceRequest) ProtoMessage() {} func (x *SetPreferenceRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[13] + mi := &file_vm_vm_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -951,7 +1264,7 @@ func (x *SetPreferenceRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SetPreferenceRequest.ProtoReflect.Descriptor instead. func (*SetPreferenceRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{13} + return file_vm_vm_proto_rawDescGZIP(), []int{14} } func (x *SetPreferenceRequest) GetId() []byte { @@ -967,12 +1280,15 @@ type BlockVerifyRequest struct { unknownFields protoimpl.UnknownFields Bytes []byte `protobuf:"bytes,1,opt,name=bytes,proto3" json:"bytes,omitempty"` + // If set, the VM server casts the block to a [block.WithVerifyContext] and + // calls [VerifyWithContext] instead of [Verify]. + PChainHeight *uint64 `protobuf:"varint,2,opt,name=p_chain_height,json=pChainHeight,proto3,oneof" json:"p_chain_height,omitempty"` } func (x *BlockVerifyRequest) Reset() { *x = BlockVerifyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[14] + mi := &file_vm_vm_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -985,7 +1301,7 @@ func (x *BlockVerifyRequest) String() string { func (*BlockVerifyRequest) ProtoMessage() {} func (x *BlockVerifyRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[14] + mi := &file_vm_vm_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -998,7 +1314,7 @@ func (x *BlockVerifyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BlockVerifyRequest.ProtoReflect.Descriptor instead. func (*BlockVerifyRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{14} + return file_vm_vm_proto_rawDescGZIP(), []int{15} } func (x *BlockVerifyRequest) GetBytes() []byte { @@ -1008,6 +1324,13 @@ func (x *BlockVerifyRequest) GetBytes() []byte { return nil } +func (x *BlockVerifyRequest) GetPChainHeight() uint64 { + if x != nil && x.PChainHeight != nil { + return *x.PChainHeight + } + return 0 +} + type BlockVerifyResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1019,7 +1342,7 @@ type BlockVerifyResponse struct { func (x *BlockVerifyResponse) Reset() { *x = BlockVerifyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[15] + mi := &file_vm_vm_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1032,7 +1355,7 @@ func (x *BlockVerifyResponse) String() string { func (*BlockVerifyResponse) ProtoMessage() {} func (x *BlockVerifyResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[15] + mi := &file_vm_vm_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1045,7 +1368,7 @@ func (x *BlockVerifyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use BlockVerifyResponse.ProtoReflect.Descriptor instead. func (*BlockVerifyResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{15} + return file_vm_vm_proto_rawDescGZIP(), []int{16} } func (x *BlockVerifyResponse) GetTimestamp() *timestamppb.Timestamp { @@ -1066,7 +1389,7 @@ type BlockAcceptRequest struct { func (x *BlockAcceptRequest) Reset() { *x = BlockAcceptRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[16] + mi := &file_vm_vm_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1079,7 +1402,7 @@ func (x *BlockAcceptRequest) String() string { func (*BlockAcceptRequest) ProtoMessage() {} func (x *BlockAcceptRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[16] + mi := &file_vm_vm_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1092,7 +1415,7 @@ func (x *BlockAcceptRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BlockAcceptRequest.ProtoReflect.Descriptor instead. func (*BlockAcceptRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{16} + return file_vm_vm_proto_rawDescGZIP(), []int{17} } func (x *BlockAcceptRequest) GetId() []byte { @@ -1113,7 +1436,7 @@ type BlockRejectRequest struct { func (x *BlockRejectRequest) Reset() { *x = BlockRejectRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[17] + mi := &file_vm_vm_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1126,7 +1449,7 @@ func (x *BlockRejectRequest) String() string { func (*BlockRejectRequest) ProtoMessage() {} func (x *BlockRejectRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[17] + mi := &file_vm_vm_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1139,7 +1462,7 @@ func (x *BlockRejectRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BlockRejectRequest.ProtoReflect.Descriptor instead. func (*BlockRejectRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{17} + return file_vm_vm_proto_rawDescGZIP(), []int{18} } func (x *BlockRejectRequest) GetId() []byte { @@ -1160,7 +1483,7 @@ type HealthResponse struct { func (x *HealthResponse) Reset() { *x = HealthResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[18] + mi := &file_vm_vm_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1173,7 +1496,7 @@ func (x *HealthResponse) String() string { func (*HealthResponse) ProtoMessage() {} func (x *HealthResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[18] + mi := &file_vm_vm_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1186,7 +1509,7 @@ func (x *HealthResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use HealthResponse.ProtoReflect.Descriptor instead. func (*HealthResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{18} + return file_vm_vm_proto_rawDescGZIP(), []int{19} } func (x *HealthResponse) GetDetails() []byte { @@ -1207,7 +1530,7 @@ type VersionResponse struct { func (x *VersionResponse) Reset() { *x = VersionResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[19] + mi := &file_vm_vm_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1220,7 +1543,7 @@ func (x *VersionResponse) String() string { func (*VersionResponse) ProtoMessage() {} func (x *VersionResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[19] + mi := &file_vm_vm_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1233,7 +1556,7 @@ func (x *VersionResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VersionResponse.ProtoReflect.Descriptor instead. func (*VersionResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{19} + return file_vm_vm_proto_rawDescGZIP(), []int{20} } func (x *VersionResponse) GetVersion() string { @@ -1261,7 +1584,7 @@ type AppRequestMsg struct { func (x *AppRequestMsg) Reset() { *x = AppRequestMsg{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[20] + mi := &file_vm_vm_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1274,7 +1597,7 @@ func (x *AppRequestMsg) String() string { func (*AppRequestMsg) ProtoMessage() {} func (x *AppRequestMsg) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[20] + mi := &file_vm_vm_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1287,7 +1610,7 @@ func (x *AppRequestMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use AppRequestMsg.ProtoReflect.Descriptor instead. func (*AppRequestMsg) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{20} + return file_vm_vm_proto_rawDescGZIP(), []int{21} } func (x *AppRequestMsg) GetNodeId() []byte { @@ -1332,7 +1655,7 @@ type AppRequestFailedMsg struct { func (x *AppRequestFailedMsg) Reset() { *x = AppRequestFailedMsg{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[21] + mi := &file_vm_vm_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1345,7 +1668,7 @@ func (x *AppRequestFailedMsg) String() string { func (*AppRequestFailedMsg) ProtoMessage() {} func (x *AppRequestFailedMsg) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[21] + mi := &file_vm_vm_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1358,7 +1681,7 @@ func (x *AppRequestFailedMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use AppRequestFailedMsg.ProtoReflect.Descriptor instead. func (*AppRequestFailedMsg) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{21} + return file_vm_vm_proto_rawDescGZIP(), []int{22} } func (x *AppRequestFailedMsg) GetNodeId() []byte { @@ -1391,7 +1714,7 @@ type AppResponseMsg struct { func (x *AppResponseMsg) Reset() { *x = AppResponseMsg{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[22] + mi := &file_vm_vm_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1404,7 +1727,7 @@ func (x *AppResponseMsg) String() string { func (*AppResponseMsg) ProtoMessage() {} func (x *AppResponseMsg) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[22] + mi := &file_vm_vm_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1417,7 +1740,7 @@ func (x *AppResponseMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use AppResponseMsg.ProtoReflect.Descriptor instead. func (*AppResponseMsg) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{22} + return file_vm_vm_proto_rawDescGZIP(), []int{23} } func (x *AppResponseMsg) GetNodeId() []byte { @@ -1455,7 +1778,7 @@ type AppGossipMsg struct { func (x *AppGossipMsg) Reset() { *x = AppGossipMsg{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[23] + mi := &file_vm_vm_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1468,7 +1791,7 @@ func (x *AppGossipMsg) String() string { func (*AppGossipMsg) ProtoMessage() {} func (x *AppGossipMsg) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[23] + mi := &file_vm_vm_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1481,19 +1804,217 @@ func (x *AppGossipMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use AppGossipMsg.ProtoReflect.Descriptor instead. func (*AppGossipMsg) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{23} + return file_vm_vm_proto_rawDescGZIP(), []int{24} } func (x *AppGossipMsg) GetNodeId() []byte { if x != nil { return x.NodeId } - return nil + return nil +} + +func (x *AppGossipMsg) GetMsg() []byte { + if x != nil { + return x.Msg + } + return nil +} + +type CrossChainAppRequestMsg struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The chain that sent us this request + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // The ID of this request + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // deadline for this request + Deadline *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=deadline,proto3" json:"deadline,omitempty"` + // The request body + Request []byte `protobuf:"bytes,4,opt,name=request,proto3" json:"request,omitempty"` +} + +func (x *CrossChainAppRequestMsg) Reset() { + *x = CrossChainAppRequestMsg{} + if protoimpl.UnsafeEnabled { + mi := &file_vm_vm_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CrossChainAppRequestMsg) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CrossChainAppRequestMsg) ProtoMessage() {} + +func (x *CrossChainAppRequestMsg) ProtoReflect() protoreflect.Message { + mi := &file_vm_vm_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CrossChainAppRequestMsg.ProtoReflect.Descriptor instead. +func (*CrossChainAppRequestMsg) Descriptor() ([]byte, []int) { + return file_vm_vm_proto_rawDescGZIP(), []int{25} +} + +func (x *CrossChainAppRequestMsg) GetChainId() []byte { + if x != nil { + return x.ChainId + } + return nil +} + +func (x *CrossChainAppRequestMsg) GetRequestId() uint32 { + if x != nil { + return x.RequestId + } + return 0 +} + +func (x *CrossChainAppRequestMsg) GetDeadline() *timestamppb.Timestamp { + if x != nil { + return x.Deadline + } + return nil +} + +func (x *CrossChainAppRequestMsg) GetRequest() []byte { + if x != nil { + return x.Request + } + return nil +} + +type CrossChainAppRequestFailedMsg struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The chain that we failed to get a response from + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // The ID of the request we sent and didn't get a response to + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` +} + +func (x *CrossChainAppRequestFailedMsg) Reset() { + *x = CrossChainAppRequestFailedMsg{} + if protoimpl.UnsafeEnabled { + mi := &file_vm_vm_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CrossChainAppRequestFailedMsg) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CrossChainAppRequestFailedMsg) ProtoMessage() {} + +func (x *CrossChainAppRequestFailedMsg) ProtoReflect() protoreflect.Message { + mi := &file_vm_vm_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CrossChainAppRequestFailedMsg.ProtoReflect.Descriptor instead. +func (*CrossChainAppRequestFailedMsg) Descriptor() ([]byte, []int) { + return file_vm_vm_proto_rawDescGZIP(), []int{26} +} + +func (x *CrossChainAppRequestFailedMsg) GetChainId() []byte { + if x != nil { + return x.ChainId + } + return nil +} + +func (x *CrossChainAppRequestFailedMsg) GetRequestId() uint32 { + if x != nil { + return x.RequestId + } + return 0 +} + +type CrossChainAppResponseMsg struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The chain that we got a response from + ChainId []byte `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // Request ID of request that this is in response to + RequestId uint32 `protobuf:"varint,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + // The response body + Response []byte `protobuf:"bytes,3,opt,name=response,proto3" json:"response,omitempty"` +} + +func (x *CrossChainAppResponseMsg) Reset() { + *x = CrossChainAppResponseMsg{} + if protoimpl.UnsafeEnabled { + mi := &file_vm_vm_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CrossChainAppResponseMsg) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CrossChainAppResponseMsg) ProtoMessage() {} + +func (x *CrossChainAppResponseMsg) ProtoReflect() protoreflect.Message { + mi := &file_vm_vm_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CrossChainAppResponseMsg.ProtoReflect.Descriptor instead. +func (*CrossChainAppResponseMsg) Descriptor() ([]byte, []int) { + return file_vm_vm_proto_rawDescGZIP(), []int{27} +} + +func (x *CrossChainAppResponseMsg) GetChainId() []byte { + if x != nil { + return x.ChainId + } + return nil +} + +func (x *CrossChainAppResponseMsg) GetRequestId() uint32 { + if x != nil { + return x.RequestId + } + return 0 } -func (x *AppGossipMsg) GetMsg() []byte { +func (x *CrossChainAppResponseMsg) GetResponse() []byte { if x != nil { - return x.Msg + return x.Response } return nil } @@ -1510,7 +2031,7 @@ type ConnectedRequest struct { func (x *ConnectedRequest) Reset() { *x = ConnectedRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[24] + mi := &file_vm_vm_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1523,7 +2044,7 @@ func (x *ConnectedRequest) String() string { func (*ConnectedRequest) ProtoMessage() {} func (x *ConnectedRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[24] + mi := &file_vm_vm_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1536,7 +2057,7 @@ func (x *ConnectedRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ConnectedRequest.ProtoReflect.Descriptor instead. func (*ConnectedRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{24} + return file_vm_vm_proto_rawDescGZIP(), []int{28} } func (x *ConnectedRequest) GetNodeId() []byte { @@ -1564,7 +2085,7 @@ type DisconnectedRequest struct { func (x *DisconnectedRequest) Reset() { *x = DisconnectedRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[25] + mi := &file_vm_vm_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1577,7 +2098,7 @@ func (x *DisconnectedRequest) String() string { func (*DisconnectedRequest) ProtoMessage() {} func (x *DisconnectedRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[25] + mi := &file_vm_vm_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1590,7 +2111,7 @@ func (x *DisconnectedRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DisconnectedRequest.ProtoReflect.Descriptor instead. func (*DisconnectedRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{25} + return file_vm_vm_proto_rawDescGZIP(), []int{29} } func (x *DisconnectedRequest) GetNodeId() []byte { @@ -1614,7 +2135,7 @@ type GetAncestorsRequest struct { func (x *GetAncestorsRequest) Reset() { *x = GetAncestorsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[26] + mi := &file_vm_vm_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1627,7 +2148,7 @@ func (x *GetAncestorsRequest) String() string { func (*GetAncestorsRequest) ProtoMessage() {} func (x *GetAncestorsRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[26] + mi := &file_vm_vm_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1640,7 +2161,7 @@ func (x *GetAncestorsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAncestorsRequest.ProtoReflect.Descriptor instead. func (*GetAncestorsRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{26} + return file_vm_vm_proto_rawDescGZIP(), []int{30} } func (x *GetAncestorsRequest) GetBlkId() []byte { @@ -1682,7 +2203,7 @@ type GetAncestorsResponse struct { func (x *GetAncestorsResponse) Reset() { *x = GetAncestorsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[27] + mi := &file_vm_vm_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1695,7 +2216,7 @@ func (x *GetAncestorsResponse) String() string { func (*GetAncestorsResponse) ProtoMessage() {} func (x *GetAncestorsResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[27] + mi := &file_vm_vm_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1708,7 +2229,7 @@ func (x *GetAncestorsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetAncestorsResponse.ProtoReflect.Descriptor instead. func (*GetAncestorsResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{27} + return file_vm_vm_proto_rawDescGZIP(), []int{31} } func (x *GetAncestorsResponse) GetBlksBytes() [][]byte { @@ -1729,7 +2250,7 @@ type BatchedParseBlockRequest struct { func (x *BatchedParseBlockRequest) Reset() { *x = BatchedParseBlockRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[28] + mi := &file_vm_vm_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1742,7 +2263,7 @@ func (x *BatchedParseBlockRequest) String() string { func (*BatchedParseBlockRequest) ProtoMessage() {} func (x *BatchedParseBlockRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[28] + mi := &file_vm_vm_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1755,7 +2276,7 @@ func (x *BatchedParseBlockRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use BatchedParseBlockRequest.ProtoReflect.Descriptor instead. func (*BatchedParseBlockRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{28} + return file_vm_vm_proto_rawDescGZIP(), []int{32} } func (x *BatchedParseBlockRequest) GetRequest() [][]byte { @@ -1776,7 +2297,7 @@ type BatchedParseBlockResponse struct { func (x *BatchedParseBlockResponse) Reset() { *x = BatchedParseBlockResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[29] + mi := &file_vm_vm_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1789,7 +2310,7 @@ func (x *BatchedParseBlockResponse) String() string { func (*BatchedParseBlockResponse) ProtoMessage() {} func (x *BatchedParseBlockResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[29] + mi := &file_vm_vm_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1802,7 +2323,7 @@ func (x *BatchedParseBlockResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use BatchedParseBlockResponse.ProtoReflect.Descriptor instead. func (*BatchedParseBlockResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{29} + return file_vm_vm_proto_rawDescGZIP(), []int{33} } func (x *BatchedParseBlockResponse) GetResponse() []*ParseBlockResponse { @@ -1817,13 +2338,13 @@ type VerifyHeightIndexResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Err uint32 `protobuf:"varint,1,opt,name=err,proto3" json:"err,omitempty"` + Err Error `protobuf:"varint,1,opt,name=err,proto3,enum=vm.Error" json:"err,omitempty"` } func (x *VerifyHeightIndexResponse) Reset() { *x = VerifyHeightIndexResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[30] + mi := &file_vm_vm_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1836,7 +2357,7 @@ func (x *VerifyHeightIndexResponse) String() string { func (*VerifyHeightIndexResponse) ProtoMessage() {} func (x *VerifyHeightIndexResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[30] + mi := &file_vm_vm_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1849,14 +2370,14 @@ func (x *VerifyHeightIndexResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use VerifyHeightIndexResponse.ProtoReflect.Descriptor instead. func (*VerifyHeightIndexResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{30} + return file_vm_vm_proto_rawDescGZIP(), []int{34} } -func (x *VerifyHeightIndexResponse) GetErr() uint32 { +func (x *VerifyHeightIndexResponse) GetErr() Error { if x != nil { return x.Err } - return 0 + return Error_ERROR_UNSPECIFIED } type GetBlockIDAtHeightRequest struct { @@ -1870,7 +2391,7 @@ type GetBlockIDAtHeightRequest struct { func (x *GetBlockIDAtHeightRequest) Reset() { *x = GetBlockIDAtHeightRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[31] + mi := &file_vm_vm_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1883,7 +2404,7 @@ func (x *GetBlockIDAtHeightRequest) String() string { func (*GetBlockIDAtHeightRequest) ProtoMessage() {} func (x *GetBlockIDAtHeightRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[31] + mi := &file_vm_vm_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1896,7 +2417,7 @@ func (x *GetBlockIDAtHeightRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBlockIDAtHeightRequest.ProtoReflect.Descriptor instead. func (*GetBlockIDAtHeightRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{31} + return file_vm_vm_proto_rawDescGZIP(), []int{35} } func (x *GetBlockIDAtHeightRequest) GetHeight() uint64 { @@ -1912,13 +2433,13 @@ type GetBlockIDAtHeightResponse struct { unknownFields protoimpl.UnknownFields BlkId []byte `protobuf:"bytes,1,opt,name=blk_id,json=blkId,proto3" json:"blk_id,omitempty"` - Err uint32 `protobuf:"varint,2,opt,name=err,proto3" json:"err,omitempty"` + Err Error `protobuf:"varint,2,opt,name=err,proto3,enum=vm.Error" json:"err,omitempty"` } func (x *GetBlockIDAtHeightResponse) Reset() { *x = GetBlockIDAtHeightResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[32] + mi := &file_vm_vm_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1931,7 +2452,7 @@ func (x *GetBlockIDAtHeightResponse) String() string { func (*GetBlockIDAtHeightResponse) ProtoMessage() {} func (x *GetBlockIDAtHeightResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[32] + mi := &file_vm_vm_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1944,7 +2465,7 @@ func (x *GetBlockIDAtHeightResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBlockIDAtHeightResponse.ProtoReflect.Descriptor instead. func (*GetBlockIDAtHeightResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{32} + return file_vm_vm_proto_rawDescGZIP(), []int{36} } func (x *GetBlockIDAtHeightResponse) GetBlkId() []byte { @@ -1954,11 +2475,11 @@ func (x *GetBlockIDAtHeightResponse) GetBlkId() []byte { return nil } -func (x *GetBlockIDAtHeightResponse) GetErr() uint32 { +func (x *GetBlockIDAtHeightResponse) GetErr() Error { if x != nil { return x.Err } - return 0 + return Error_ERROR_UNSPECIFIED } type GatherResponse struct { @@ -1972,7 +2493,7 @@ type GatherResponse struct { func (x *GatherResponse) Reset() { *x = GatherResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[33] + mi := &file_vm_vm_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1985,7 +2506,7 @@ func (x *GatherResponse) String() string { func (*GatherResponse) ProtoMessage() {} func (x *GatherResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[33] + mi := &file_vm_vm_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1998,7 +2519,7 @@ func (x *GatherResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GatherResponse.ProtoReflect.Descriptor instead. func (*GatherResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{33} + return file_vm_vm_proto_rawDescGZIP(), []int{37} } func (x *GatherResponse) GetMetricFamilies() []*_go.MetricFamily { @@ -2013,14 +2534,14 @@ type StateSyncEnabledResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` - Err uint32 `protobuf:"varint,2,opt,name=err,proto3" json:"err,omitempty"` + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + Err Error `protobuf:"varint,2,opt,name=err,proto3,enum=vm.Error" json:"err,omitempty"` } func (x *StateSyncEnabledResponse) Reset() { *x = StateSyncEnabledResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[34] + mi := &file_vm_vm_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2033,7 +2554,7 @@ func (x *StateSyncEnabledResponse) String() string { func (*StateSyncEnabledResponse) ProtoMessage() {} func (x *StateSyncEnabledResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[34] + mi := &file_vm_vm_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2046,7 +2567,7 @@ func (x *StateSyncEnabledResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StateSyncEnabledResponse.ProtoReflect.Descriptor instead. func (*StateSyncEnabledResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{34} + return file_vm_vm_proto_rawDescGZIP(), []int{38} } func (x *StateSyncEnabledResponse) GetEnabled() bool { @@ -2056,11 +2577,11 @@ func (x *StateSyncEnabledResponse) GetEnabled() bool { return false } -func (x *StateSyncEnabledResponse) GetErr() uint32 { +func (x *StateSyncEnabledResponse) GetErr() Error { if x != nil { return x.Err } - return 0 + return Error_ERROR_UNSPECIFIED } type GetOngoingSyncStateSummaryResponse struct { @@ -2071,13 +2592,13 @@ type GetOngoingSyncStateSummaryResponse struct { Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` Height uint64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` Bytes []byte `protobuf:"bytes,3,opt,name=bytes,proto3" json:"bytes,omitempty"` - Err uint32 `protobuf:"varint,4,opt,name=err,proto3" json:"err,omitempty"` + Err Error `protobuf:"varint,4,opt,name=err,proto3,enum=vm.Error" json:"err,omitempty"` } func (x *GetOngoingSyncStateSummaryResponse) Reset() { *x = GetOngoingSyncStateSummaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[35] + mi := &file_vm_vm_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2090,7 +2611,7 @@ func (x *GetOngoingSyncStateSummaryResponse) String() string { func (*GetOngoingSyncStateSummaryResponse) ProtoMessage() {} func (x *GetOngoingSyncStateSummaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[35] + mi := &file_vm_vm_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2103,7 +2624,7 @@ func (x *GetOngoingSyncStateSummaryResponse) ProtoReflect() protoreflect.Message // Deprecated: Use GetOngoingSyncStateSummaryResponse.ProtoReflect.Descriptor instead. func (*GetOngoingSyncStateSummaryResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{35} + return file_vm_vm_proto_rawDescGZIP(), []int{39} } func (x *GetOngoingSyncStateSummaryResponse) GetId() []byte { @@ -2127,11 +2648,11 @@ func (x *GetOngoingSyncStateSummaryResponse) GetBytes() []byte { return nil } -func (x *GetOngoingSyncStateSummaryResponse) GetErr() uint32 { +func (x *GetOngoingSyncStateSummaryResponse) GetErr() Error { if x != nil { return x.Err } - return 0 + return Error_ERROR_UNSPECIFIED } type GetLastStateSummaryResponse struct { @@ -2142,13 +2663,13 @@ type GetLastStateSummaryResponse struct { Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` Height uint64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` Bytes []byte `protobuf:"bytes,3,opt,name=bytes,proto3" json:"bytes,omitempty"` - Err uint32 `protobuf:"varint,4,opt,name=err,proto3" json:"err,omitempty"` + Err Error `protobuf:"varint,4,opt,name=err,proto3,enum=vm.Error" json:"err,omitempty"` } func (x *GetLastStateSummaryResponse) Reset() { *x = GetLastStateSummaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[36] + mi := &file_vm_vm_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2161,7 +2682,7 @@ func (x *GetLastStateSummaryResponse) String() string { func (*GetLastStateSummaryResponse) ProtoMessage() {} func (x *GetLastStateSummaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[36] + mi := &file_vm_vm_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2174,7 +2695,7 @@ func (x *GetLastStateSummaryResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetLastStateSummaryResponse.ProtoReflect.Descriptor instead. func (*GetLastStateSummaryResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{36} + return file_vm_vm_proto_rawDescGZIP(), []int{40} } func (x *GetLastStateSummaryResponse) GetId() []byte { @@ -2198,11 +2719,11 @@ func (x *GetLastStateSummaryResponse) GetBytes() []byte { return nil } -func (x *GetLastStateSummaryResponse) GetErr() uint32 { +func (x *GetLastStateSummaryResponse) GetErr() Error { if x != nil { return x.Err } - return 0 + return Error_ERROR_UNSPECIFIED } type ParseStateSummaryRequest struct { @@ -2216,7 +2737,7 @@ type ParseStateSummaryRequest struct { func (x *ParseStateSummaryRequest) Reset() { *x = ParseStateSummaryRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[37] + mi := &file_vm_vm_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2229,7 +2750,7 @@ func (x *ParseStateSummaryRequest) String() string { func (*ParseStateSummaryRequest) ProtoMessage() {} func (x *ParseStateSummaryRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[37] + mi := &file_vm_vm_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2242,7 +2763,7 @@ func (x *ParseStateSummaryRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ParseStateSummaryRequest.ProtoReflect.Descriptor instead. func (*ParseStateSummaryRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{37} + return file_vm_vm_proto_rawDescGZIP(), []int{41} } func (x *ParseStateSummaryRequest) GetBytes() []byte { @@ -2259,13 +2780,13 @@ type ParseStateSummaryResponse struct { Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` Height uint64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` - Err uint32 `protobuf:"varint,3,opt,name=err,proto3" json:"err,omitempty"` + Err Error `protobuf:"varint,3,opt,name=err,proto3,enum=vm.Error" json:"err,omitempty"` } func (x *ParseStateSummaryResponse) Reset() { *x = ParseStateSummaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[38] + mi := &file_vm_vm_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2278,7 +2799,7 @@ func (x *ParseStateSummaryResponse) String() string { func (*ParseStateSummaryResponse) ProtoMessage() {} func (x *ParseStateSummaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[38] + mi := &file_vm_vm_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2291,7 +2812,7 @@ func (x *ParseStateSummaryResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ParseStateSummaryResponse.ProtoReflect.Descriptor instead. func (*ParseStateSummaryResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{38} + return file_vm_vm_proto_rawDescGZIP(), []int{42} } func (x *ParseStateSummaryResponse) GetId() []byte { @@ -2308,11 +2829,11 @@ func (x *ParseStateSummaryResponse) GetHeight() uint64 { return 0 } -func (x *ParseStateSummaryResponse) GetErr() uint32 { +func (x *ParseStateSummaryResponse) GetErr() Error { if x != nil { return x.Err } - return 0 + return Error_ERROR_UNSPECIFIED } type GetStateSummaryRequest struct { @@ -2326,7 +2847,7 @@ type GetStateSummaryRequest struct { func (x *GetStateSummaryRequest) Reset() { *x = GetStateSummaryRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[39] + mi := &file_vm_vm_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2339,7 +2860,7 @@ func (x *GetStateSummaryRequest) String() string { func (*GetStateSummaryRequest) ProtoMessage() {} func (x *GetStateSummaryRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[39] + mi := &file_vm_vm_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2352,7 +2873,7 @@ func (x *GetStateSummaryRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetStateSummaryRequest.ProtoReflect.Descriptor instead. func (*GetStateSummaryRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{39} + return file_vm_vm_proto_rawDescGZIP(), []int{43} } func (x *GetStateSummaryRequest) GetHeight() uint64 { @@ -2369,13 +2890,13 @@ type GetStateSummaryResponse struct { Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` Bytes []byte `protobuf:"bytes,2,opt,name=bytes,proto3" json:"bytes,omitempty"` - Err uint32 `protobuf:"varint,3,opt,name=err,proto3" json:"err,omitempty"` + Err Error `protobuf:"varint,3,opt,name=err,proto3,enum=vm.Error" json:"err,omitempty"` } func (x *GetStateSummaryResponse) Reset() { *x = GetStateSummaryResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[40] + mi := &file_vm_vm_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2388,7 +2909,7 @@ func (x *GetStateSummaryResponse) String() string { func (*GetStateSummaryResponse) ProtoMessage() {} func (x *GetStateSummaryResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[40] + mi := &file_vm_vm_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2401,7 +2922,7 @@ func (x *GetStateSummaryResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetStateSummaryResponse.ProtoReflect.Descriptor instead. func (*GetStateSummaryResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{40} + return file_vm_vm_proto_rawDescGZIP(), []int{44} } func (x *GetStateSummaryResponse) GetId() []byte { @@ -2418,11 +2939,11 @@ func (x *GetStateSummaryResponse) GetBytes() []byte { return nil } -func (x *GetStateSummaryResponse) GetErr() uint32 { +func (x *GetStateSummaryResponse) GetErr() Error { if x != nil { return x.Err } - return 0 + return Error_ERROR_UNSPECIFIED } type StateSummaryAcceptRequest struct { @@ -2436,7 +2957,7 @@ type StateSummaryAcceptRequest struct { func (x *StateSummaryAcceptRequest) Reset() { *x = StateSummaryAcceptRequest{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[41] + mi := &file_vm_vm_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2449,7 +2970,7 @@ func (x *StateSummaryAcceptRequest) String() string { func (*StateSummaryAcceptRequest) ProtoMessage() {} func (x *StateSummaryAcceptRequest) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[41] + mi := &file_vm_vm_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2462,7 +2983,7 @@ func (x *StateSummaryAcceptRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StateSummaryAcceptRequest.ProtoReflect.Descriptor instead. func (*StateSummaryAcceptRequest) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{41} + return file_vm_vm_proto_rawDescGZIP(), []int{45} } func (x *StateSummaryAcceptRequest) GetBytes() []byte { @@ -2477,14 +2998,14 @@ type StateSummaryAcceptResponse struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Accepted bool `protobuf:"varint,1,opt,name=accepted,proto3" json:"accepted,omitempty"` - Err uint32 `protobuf:"varint,2,opt,name=err,proto3" json:"err,omitempty"` + Mode StateSummaryAcceptResponse_Mode `protobuf:"varint,1,opt,name=mode,proto3,enum=vm.StateSummaryAcceptResponse_Mode" json:"mode,omitempty"` + Err Error `protobuf:"varint,2,opt,name=err,proto3,enum=vm.Error" json:"err,omitempty"` } func (x *StateSummaryAcceptResponse) Reset() { *x = StateSummaryAcceptResponse{} if protoimpl.UnsafeEnabled { - mi := &file_vm_vm_proto_msgTypes[42] + mi := &file_vm_vm_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2497,7 +3018,7 @@ func (x *StateSummaryAcceptResponse) String() string { func (*StateSummaryAcceptResponse) ProtoMessage() {} func (x *StateSummaryAcceptResponse) ProtoReflect() protoreflect.Message { - mi := &file_vm_vm_proto_msgTypes[42] + mi := &file_vm_vm_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2510,21 +3031,21 @@ func (x *StateSummaryAcceptResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StateSummaryAcceptResponse.ProtoReflect.Descriptor instead. func (*StateSummaryAcceptResponse) Descriptor() ([]byte, []int) { - return file_vm_vm_proto_rawDescGZIP(), []int{42} + return file_vm_vm_proto_rawDescGZIP(), []int{46} } -func (x *StateSummaryAcceptResponse) GetAccepted() bool { +func (x *StateSummaryAcceptResponse) GetMode() StateSummaryAcceptResponse_Mode { if x != nil { - return x.Accepted + return x.Mode } - return false + return StateSummaryAcceptResponse_MODE_UNSPECIFIED } -func (x *StateSummaryAcceptResponse) GetErr() uint32 { +func (x *StateSummaryAcceptResponse) GetErr() Error { if x != nil { return x.Err } - return 0 + return Error_ERROR_UNSPECIFIED } var File_vm_vm_proto protoreflect.FileDescriptor @@ -2537,7 +3058,7 @@ var file_vm_vm_proto_rawDesc = []byte{ 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0x89, 0x03, 0x0a, 0x11, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, + 0x6f, 0x74, 0x6f, 0x22, 0xec, 0x03, 0x0a, 0x11, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x75, 0x62, 0x6e, @@ -2545,384 +3066,486 @@ var file_vm_vm_proto_rawDesc = []byte{ 0x6e, 0x65, 0x74, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x0a, 0x78, 0x5f, 0x63, - 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x78, - 0x43, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x22, 0x0a, 0x0d, 0x61, 0x76, 0x61, 0x78, 0x5f, - 0x61, 0x73, 0x73, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, - 0x61, 0x76, 0x61, 0x78, 0x41, 0x73, 0x73, 0x65, 0x74, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x67, - 0x65, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x0c, 0x67, 0x65, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x42, 0x79, 0x74, 0x65, 0x73, - 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, - 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, - 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x0a, 0x64, 0x62, 0x5f, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x76, - 0x6d, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x44, 0x42, 0x53, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x52, 0x09, 0x64, 0x62, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x12, 0x1f, - 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x22, - 0xdd, 0x01, 0x0a, 0x12, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x61, - 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x49, 0x64, - 0x12, 0x35, 0x0a, 0x17, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, - 0x64, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x14, 0x6c, 0x61, 0x73, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x50, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, - 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, - 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, - 0x4e, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x44, 0x42, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, - 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x22, - 0x27, 0x0a, 0x0f, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0xdb, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x74, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, - 0x10, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x41, 0x63, 0x63, - 0x65, 0x70, 0x74, 0x65, 0x64, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x6c, 0x61, 0x73, 0x74, 0x5f, - 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6c, 0x61, 0x73, 0x74, 0x41, 0x63, - 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x16, - 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, - 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x09, + 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x70, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x1c, 0x0a, 0x0a, 0x78, 0x5f, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x78, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1c, 0x0a, 0x0a, 0x63, 0x5f, 0x63, 0x68, 0x61, 0x69, + 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x63, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x49, 0x64, 0x12, 0x22, 0x0a, 0x0d, 0x61, 0x76, 0x61, 0x78, 0x5f, 0x61, 0x73, 0x73, + 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x61, 0x76, 0x61, + 0x78, 0x41, 0x73, 0x73, 0x65, 0x74, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x63, 0x68, 0x61, 0x69, + 0x6e, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x64, 0x69, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x44, 0x69, 0x72, 0x12, 0x23, + 0x0a, 0x0d, 0x67, 0x65, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x67, 0x65, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x42, 0x79, + 0x74, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x75, 0x70, 0x67, 0x72, + 0x61, 0x64, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x0a, 0x64, + 0x62, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x44, 0x42, + 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x09, 0x64, 0x62, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, + 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, + 0x64, 0x72, 0x22, 0xdd, 0x01, 0x0a, 0x12, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6c, 0x61, 0x73, + 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, + 0x64, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, + 0x70, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6c, 0x61, 0x73, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, + 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x22, 0x4e, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x44, + 0x42, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, + 0x64, 0x72, 0x22, 0x32, 0x0a, 0x0f, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x22, 0xdb, 0x01, 0x0a, 0x10, 0x53, 0x65, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x6c, + 0x61, 0x73, 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x41, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x65, 0x64, 0x49, 0x64, 0x12, 0x35, 0x0a, 0x17, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x61, 0x63, + 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x14, 0x6c, 0x61, 0x73, 0x74, 0x41, 0x63, 0x63, 0x65, + 0x70, 0x74, 0x65, 0x64, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, + 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x22, 0x41, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x61, + 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, + 0x0a, 0x08, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0b, 0x2e, 0x76, 0x6d, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x08, 0x68, + 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x22, 0x47, 0x0a, 0x1c, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x08, 0x68, 0x61, 0x6e, 0x64, 0x6c, + 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x76, 0x6d, 0x2e, 0x48, + 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x08, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, + 0x22, 0x65, 0x0a, 0x07, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x70, + 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x0c, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6c, 0x6f, 0x63, 0x6b, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x22, 0x51, 0x0a, 0x11, 0x42, 0x75, 0x69, 0x6c, 0x64, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x29, 0x0a, 0x0e, + 0x70, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x48, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x88, 0x01, 0x01, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x70, 0x5f, 0x63, 0x68, + 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0xd9, 0x01, 0x0a, 0x12, 0x42, + 0x75, 0x69, 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, + 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x14, + 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x41, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x27, 0x0a, 0x08, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x76, 0x6d, 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, - 0x08, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x22, 0x47, 0x0a, 0x1c, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x08, 0x68, 0x61, 0x6e, - 0x64, 0x6c, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x76, 0x6d, - 0x2e, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x08, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, - 0x72, 0x73, 0x22, 0x65, 0x0a, 0x07, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x12, 0x16, 0x0a, - 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x0c, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x6c, 0x6f, 0x63, - 0x6b, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, - 0x65, 0x72, 0x76, 0x65, 0x72, 0x41, 0x64, 0x64, 0x72, 0x22, 0xa9, 0x01, 0x0a, 0x12, 0x42, 0x75, - 0x69, 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, - 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, - 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x29, 0x0a, 0x11, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, - 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, - 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, - 0x22, 0xab, 0x01, 0x0a, 0x12, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, - 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, - 0x69, 0x67, 0x68, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x21, - 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, + 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x11, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x57, 0x69, 0x74, 0x68, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x29, 0x0a, 0x11, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x22, 0xe7, 0x01, 0x0a, 0x12, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x22, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2e, 0x0a, 0x13, 0x76, + 0x65, 0x72, 0x69, 0x66, 0x79, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, + 0x57, 0x69, 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x21, 0x0a, 0x0f, 0x47, + 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22, 0x88, + 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x64, + 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0a, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1b, 0x0a, 0x03, + 0x65, 0x72, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, + 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x65, 0x72, + 0x69, 0x66, 0x79, 0x5f, 0x77, 0x69, 0x74, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x57, 0x69, + 0x74, 0x68, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x22, 0x26, 0x0a, 0x14, 0x53, 0x65, 0x74, + 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, - 0x64, 0x22, 0xc1, 0x01, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x26, 0x0a, 0x14, 0x53, 0x65, 0x74, 0x50, 0x72, 0x65, 0x66, - 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, - 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22, 0x2a, 0x0a, - 0x12, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x4f, 0x0a, 0x13, 0x42, 0x6c, 0x6f, - 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x24, 0x0a, 0x12, 0x42, 0x6c, - 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, - 0x22, 0x24, 0x0a, 0x12, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22, 0x2a, 0x0a, 0x0e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, - 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, - 0x6c, 0x73, 0x22, 0x2b, 0x0a, 0x0f, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, - 0x99, 0x01, 0x0a, 0x0d, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, - 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x08, 0x64, 0x65, 0x61, - 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4d, 0x0a, 0x13, 0x41, - 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4d, + 0x64, 0x22, 0x68, 0x0a, 0x12, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x29, 0x0a, + 0x0e, 0x70, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0c, 0x70, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x48, + 0x65, 0x69, 0x67, 0x68, 0x74, 0x88, 0x01, 0x01, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x70, 0x5f, 0x63, + 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x4f, 0x0a, 0x13, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x24, 0x0a, 0x12, + 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, + 0x69, 0x64, 0x22, 0x24, 0x0a, 0x12, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x6a, 0x65, 0x63, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x22, 0x2a, 0x0a, 0x0e, 0x48, 0x65, 0x61, 0x6c, + 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, + 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x64, 0x65, 0x74, + 0x61, 0x69, 0x6c, 0x73, 0x22, 0x2b, 0x0a, 0x0f, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x22, 0x99, 0x01, 0x0a, 0x0d, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x4d, 0x73, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x08, 0x64, + 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, + 0x69, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4d, 0x0a, + 0x13, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, + 0x64, 0x4d, 0x73, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, + 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x22, 0x64, 0x0a, 0x0e, + 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x12, 0x17, + 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x39, 0x0a, 0x0c, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x4d, 0x73, 0x67, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x22, 0x64, 0x0a, 0x0e, 0x41, 0x70, - 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x12, 0x17, 0x0a, 0x07, - 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, - 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x39, 0x0a, 0x0c, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x4d, 0x73, 0x67, + 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6d, + 0x73, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x22, 0xa5, 0x01, + 0x0a, 0x17, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, + 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, + 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x49, 0x64, 0x12, 0x36, 0x0a, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x52, 0x08, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x59, 0x0a, 0x1d, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, + 0x6c, 0x65, 0x64, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, + 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, + 0x22, 0x70, 0x0a, 0x18, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, + 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x12, 0x19, 0x0a, 0x08, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x45, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, + 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x2e, 0x0a, 0x13, 0x44, 0x69, 0x73, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x73, 0x67, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6d, 0x73, 0x67, 0x22, 0x45, 0x0a, 0x10, 0x43, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x22, 0x2e, 0x0a, 0x13, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, - 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, - 0x49, 0x64, 0x22, 0xb3, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, - 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x6c, + 0x0c, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x22, 0xb3, 0x01, 0x0a, 0x13, 0x47, 0x65, + 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x6c, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x05, 0x62, 0x6c, 0x6b, 0x49, 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, + 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x6e, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x4e, 0x75, 0x6d, 0x12, 0x26, + 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x73, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69, 0x76, 0x61, 0x6c, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x73, 0x52, 0x65, 0x74, 0x72, 0x69, 0x76, 0x61, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x22, + 0x35, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x6b, 0x73, 0x5f, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x09, 0x62, 0x6c, 0x6b, + 0x73, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x34, 0x0a, 0x18, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, + 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0c, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4f, 0x0a, 0x19, + 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x6d, + 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x0a, + 0x19, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x49, 0x6e, 0x64, + 0x65, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x33, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x50, 0x0a, 0x1a, + 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x6c, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x6c, 0x6b, 0x49, - 0x64, 0x12, 0x24, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, - 0x6e, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x42, 0x6c, - 0x6f, 0x63, 0x6b, 0x73, 0x4e, 0x75, 0x6d, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x62, - 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x53, 0x69, 0x7a, 0x65, 0x12, - 0x37, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x5f, 0x72, 0x65, - 0x74, 0x72, 0x69, 0x76, 0x61, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x73, 0x52, 0x65, 0x74, 0x72, - 0x69, 0x76, 0x61, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x35, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x41, - 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6c, 0x6b, 0x73, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0c, 0x52, 0x09, 0x62, 0x6c, 0x6b, 0x73, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, - 0x34, 0x0a, 0x18, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x07, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x4f, 0x0a, 0x19, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, - 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x08, 0x72, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2d, 0x0a, 0x19, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, - 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x33, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x45, 0x0a, 0x1a, 0x47, 0x65, - 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x6c, 0x6b, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x6c, 0x6b, 0x49, 0x64, 0x12, - 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x65, 0x72, - 0x72, 0x22, 0x5d, 0x0a, 0x0e, 0x47, 0x61, 0x74, 0x68, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x66, 0x61, - 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, - 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, - 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, - 0x22, 0x46, 0x0a, 0x18, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x74, 0x0a, 0x22, 0x47, 0x65, 0x74, 0x4f, - 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, - 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, - 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, - 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, - 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, - 0x65, 0x72, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x6d, - 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, - 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, - 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, - 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, - 0x72, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x30, 0x0a, - 0x18, 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, - 0x55, 0x0a, 0x19, 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, + 0x64, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, + 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x5d, + 0x0a, 0x0e, 0x47, 0x61, 0x74, 0x68, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x4b, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x66, 0x61, 0x6d, 0x69, 0x6c, + 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, 0x70, + 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, + 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x52, 0x0e, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x22, 0x51, 0x0a, + 0x18, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, + 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, + 0x22, 0x7f, 0x0a, 0x22, 0x47, 0x65, 0x74, 0x4f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x79, + 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, + 0x72, 0x22, 0x78, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1b, + 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, + 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x30, 0x0a, 0x18, 0x50, + 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x60, 0x0a, + 0x19, 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, + 0x68, 0x74, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, + 0x30, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, + 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, + 0x74, 0x22, 0x5c, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a, 0x06, - 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x68, 0x65, - 0x69, 0x67, 0x68, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0d, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x30, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x06, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x22, 0x51, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x31, 0x0a, 0x19, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x79, 0x74, 0x65, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, 0x65, 0x73, 0x22, 0x4a, - 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, - 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, - 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, - 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x65, 0x72, 0x72, 0x32, 0xb0, 0x10, 0x0a, 0x02, 0x56, - 0x4d, 0x12, 0x3b, 0x0a, 0x0a, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x12, - 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x49, 0x6e, 0x69, 0x74, - 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, - 0x0a, 0x08, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x13, 0x2e, 0x76, 0x6d, 0x2e, - 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x14, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, - 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x12, 0x44, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, - 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1a, 0x2e, 0x76, 0x6d, - 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x12, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x20, 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x09, 0x43, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x14, 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, + 0x31, 0x0a, 0x19, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, + 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, + 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x62, 0x79, 0x74, + 0x65, 0x73, 0x22, 0xc5, 0x01, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, + 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x37, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x23, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, + 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, + 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x03, 0x65, 0x72, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x09, 0x2e, 0x76, 0x6d, 0x2e, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x51, 0x0a, 0x04, 0x4d, 0x6f, 0x64, 0x65, 0x12, + 0x14, 0x0a, 0x10, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x4d, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x4b, + 0x49, 0x50, 0x50, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x4f, 0x44, 0x45, 0x5f, + 0x53, 0x54, 0x41, 0x54, 0x49, 0x43, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4d, 0x4f, 0x44, 0x45, + 0x5f, 0x44, 0x59, 0x4e, 0x41, 0x4d, 0x49, 0x43, 0x10, 0x03, 0x2a, 0x65, 0x0a, 0x05, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x55, 0x4e, 0x53, + 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x54, + 0x41, 0x54, 0x45, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x49, 0x4e, + 0x47, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x4f, + 0x54, 0x53, 0x54, 0x52, 0x41, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, + 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x5f, 0x4f, 0x50, 0x10, + 0x03, 0x2a, 0x61, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x53, + 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, + 0x44, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x52, + 0x4f, 0x43, 0x45, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, + 0x41, 0x54, 0x55, 0x53, 0x5f, 0x52, 0x45, 0x4a, 0x45, 0x43, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, + 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x50, 0x54, + 0x45, 0x44, 0x10, 0x03, 0x2a, 0xb6, 0x01, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x15, + 0x0a, 0x11, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, + 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x01, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x52, 0x52, 0x4f, 0x52, + 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x02, 0x12, 0x26, 0x0a, 0x22, + 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x48, 0x45, 0x49, 0x47, 0x48, 0x54, 0x5f, 0x49, 0x4e, 0x44, + 0x45, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, + 0x45, 0x44, 0x10, 0x03, 0x12, 0x21, 0x0a, 0x1d, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x48, 0x45, + 0x49, 0x47, 0x48, 0x54, 0x5f, 0x49, 0x4e, 0x44, 0x45, 0x58, 0x5f, 0x49, 0x4e, 0x43, 0x4f, 0x4d, + 0x50, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x04, 0x12, 0x24, 0x0a, 0x20, 0x45, 0x52, 0x52, 0x4f, 0x52, + 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, + 0x49, 0x4d, 0x50, 0x4c, 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x05, 0x32, 0xa4, 0x12, + 0x0a, 0x02, 0x56, 0x4d, 0x12, 0x3b, 0x0a, 0x0a, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, + 0x7a, 0x65, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, + 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x49, + 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x35, 0x0a, 0x08, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x13, 0x2e, + 0x76, 0x6d, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x53, 0x68, 0x75, 0x74, + 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x0c, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x65, 0x64, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x44, 0x69, 0x73, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, + 0x6d, 0x70, 0x74, 0x79, 0x12, 0x44, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x61, + 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1a, + 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, + 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x14, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, + 0x72, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x20, 0x2e, 0x76, 0x6d, 0x2e, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x48, 0x61, 0x6e, 0x64, + 0x6c, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x09, + 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x14, 0x2e, 0x76, 0x6d, 0x2e, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x0c, 0x44, 0x69, 0x73, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x44, 0x69, 0x73, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3b, 0x0a, 0x0a, 0x42, 0x75, 0x69, 0x6c, + 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x75, 0x69, 0x6c, + 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, + 0x76, 0x6d, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x76, 0x6d, 0x2e, + 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x13, + 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0d, 0x53, 0x65, 0x74, + 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x18, 0x2e, 0x76, 0x6d, 0x2e, + 0x53, 0x65, 0x74, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x34, 0x0a, 0x06, + 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x12, + 0x2e, 0x76, 0x6d, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x42, 0x6c, - 0x6f, 0x63, 0x6b, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x16, 0x2e, 0x76, 0x6d, - 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x12, 0x15, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, - 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x35, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x13, 0x2e, 0x76, - 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x14, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0d, 0x53, 0x65, 0x74, 0x50, 0x72, - 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x18, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x65, - 0x74, 0x50, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x34, 0x0a, 0x06, 0x48, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x12, 0x2e, 0x76, - 0x6d, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x36, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x76, 0x6d, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0a, 0x41, 0x70, + 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x11, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, + 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x12, 0x43, 0x0a, 0x10, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, 0x70, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4d, 0x73, 0x67, + 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0b, 0x41, 0x70, 0x70, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, 0x70, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x76, 0x6d, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x11, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, 0x70, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x12, 0x43, 0x0a, 0x10, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, - 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, 0x70, 0x52, 0x65, + 0x70, 0x74, 0x79, 0x12, 0x35, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, + 0x12, 0x10, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x4d, + 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x34, 0x0a, 0x06, 0x47, 0x61, + 0x74, 0x68, 0x65, 0x72, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x12, 0x2e, 0x76, + 0x6d, 0x2e, 0x47, 0x61, 0x74, 0x68, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x4b, 0x0a, 0x14, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, + 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x72, + 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x57, 0x0a, + 0x1a, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x12, 0x21, 0x2e, 0x76, 0x6d, + 0x2e, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x39, 0x0a, 0x0b, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, 0x70, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x12, 0x35, 0x0a, 0x09, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x12, 0x10, - 0x2e, 0x76, 0x6d, 0x2e, 0x41, 0x70, 0x70, 0x47, 0x6f, 0x73, 0x73, 0x69, 0x70, 0x4d, 0x73, 0x67, - 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x34, 0x0a, 0x06, 0x47, 0x61, 0x74, 0x68, - 0x65, 0x72, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x12, 0x2e, 0x76, 0x6d, 0x2e, - 0x47, 0x61, 0x74, 0x68, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, - 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x17, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x4d, 0x0a, 0x15, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, + 0x68, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x43, 0x72, 0x6f, 0x73, 0x73, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x41, + 0x70, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x4d, 0x73, 0x67, 0x1a, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x41, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6e, + 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, - 0x41, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x50, 0x0a, 0x11, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, - 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x61, 0x74, 0x63, - 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x69, - 0x67, 0x68, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x1a, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x69, 0x67, - 0x68, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x53, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, - 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, - 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, - 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, - 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x1a, 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5c, - 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x4f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x26, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x6e, 0x67, - 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, - 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x13, - 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x11, 0x42, 0x61, 0x74, 0x63, + 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1c, 0x2e, + 0x76, 0x6d, 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, + 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x6d, + 0x2e, 0x42, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x61, 0x72, 0x73, 0x65, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x11, 0x56, 0x65, + 0x72, 0x69, 0x66, 0x79, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x56, 0x65, 0x72, + 0x69, 0x66, 0x79, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x1d, 0x2e, 0x76, + 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, + 0x69, 0x67, 0x68, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x6d, + 0x2e, 0x47, 0x65, 0x74, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x44, 0x41, 0x74, 0x48, 0x65, 0x69, + 0x67, 0x68, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x10, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5c, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x4f, 0x6e, 0x67, 0x6f, + 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1f, 0x2e, 0x76, 0x6d, - 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, - 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x11, - 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, - 0x79, 0x12, 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, - 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, - 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, - 0x79, 0x12, 0x1a, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, - 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, - 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x42, 0x6c, - 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, - 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, 0x0b, 0x42, 0x6c, - 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x3d, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, - 0x63, 0x6b, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, - 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, 0x53, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x12, 0x1d, - 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, - 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, - 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, - 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x2d, 0x5a, - 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, - 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x2f, 0x76, 0x6d, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x26, 0x2e, 0x76, 0x6d, + 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x6e, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x53, 0x79, 0x6e, 0x63, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x1a, 0x1f, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x61, 0x73, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x11, 0x50, 0x61, 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x1c, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, + 0x72, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x50, 0x61, 0x72, 0x73, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x1a, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x76, 0x6d, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x3e, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, + 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x3d, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, + 0x12, 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x41, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x12, 0x3d, 0x0a, 0x0b, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x16, 0x2e, 0x76, 0x6d, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x65, 0x6a, 0x65, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x12, + 0x53, 0x0a, 0x12, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, + 0x63, 0x63, 0x65, 0x70, 0x74, 0x12, 0x1d, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x76, 0x6d, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x53, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, 0x6c, + 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, + 0x2f, 0x76, 0x6d, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2937,135 +3560,164 @@ func file_vm_vm_proto_rawDescGZIP() []byte { return file_vm_vm_proto_rawDescData } -var file_vm_vm_proto_msgTypes = make([]protoimpl.MessageInfo, 43) +var file_vm_vm_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_vm_vm_proto_msgTypes = make([]protoimpl.MessageInfo, 47) var file_vm_vm_proto_goTypes = []interface{}{ - (*InitializeRequest)(nil), // 0: vm.InitializeRequest - (*InitializeResponse)(nil), // 1: vm.InitializeResponse - (*VersionedDBServer)(nil), // 2: vm.VersionedDBServer - (*SetStateRequest)(nil), // 3: vm.SetStateRequest - (*SetStateResponse)(nil), // 4: vm.SetStateResponse - (*CreateHandlersResponse)(nil), // 5: vm.CreateHandlersResponse - (*CreateStaticHandlersResponse)(nil), // 6: vm.CreateStaticHandlersResponse - (*Handler)(nil), // 7: vm.Handler - (*BuildBlockResponse)(nil), // 8: vm.BuildBlockResponse - (*ParseBlockRequest)(nil), // 9: vm.ParseBlockRequest - (*ParseBlockResponse)(nil), // 10: vm.ParseBlockResponse - (*GetBlockRequest)(nil), // 11: vm.GetBlockRequest - (*GetBlockResponse)(nil), // 12: vm.GetBlockResponse - (*SetPreferenceRequest)(nil), // 13: vm.SetPreferenceRequest - (*BlockVerifyRequest)(nil), // 14: vm.BlockVerifyRequest - (*BlockVerifyResponse)(nil), // 15: vm.BlockVerifyResponse - (*BlockAcceptRequest)(nil), // 16: vm.BlockAcceptRequest - (*BlockRejectRequest)(nil), // 17: vm.BlockRejectRequest - (*HealthResponse)(nil), // 18: vm.HealthResponse - (*VersionResponse)(nil), // 19: vm.VersionResponse - (*AppRequestMsg)(nil), // 20: vm.AppRequestMsg - (*AppRequestFailedMsg)(nil), // 21: vm.AppRequestFailedMsg - (*AppResponseMsg)(nil), // 22: vm.AppResponseMsg - (*AppGossipMsg)(nil), // 23: vm.AppGossipMsg - (*ConnectedRequest)(nil), // 24: vm.ConnectedRequest - (*DisconnectedRequest)(nil), // 25: vm.DisconnectedRequest - (*GetAncestorsRequest)(nil), // 26: vm.GetAncestorsRequest - (*GetAncestorsResponse)(nil), // 27: vm.GetAncestorsResponse - (*BatchedParseBlockRequest)(nil), // 28: vm.BatchedParseBlockRequest - (*BatchedParseBlockResponse)(nil), // 29: vm.BatchedParseBlockResponse - (*VerifyHeightIndexResponse)(nil), // 30: vm.VerifyHeightIndexResponse - (*GetBlockIDAtHeightRequest)(nil), // 31: vm.GetBlockIDAtHeightRequest - (*GetBlockIDAtHeightResponse)(nil), // 32: vm.GetBlockIDAtHeightResponse - (*GatherResponse)(nil), // 33: vm.GatherResponse - (*StateSyncEnabledResponse)(nil), // 34: vm.StateSyncEnabledResponse - (*GetOngoingSyncStateSummaryResponse)(nil), // 35: vm.GetOngoingSyncStateSummaryResponse - (*GetLastStateSummaryResponse)(nil), // 36: vm.GetLastStateSummaryResponse - (*ParseStateSummaryRequest)(nil), // 37: vm.ParseStateSummaryRequest - (*ParseStateSummaryResponse)(nil), // 38: vm.ParseStateSummaryResponse - (*GetStateSummaryRequest)(nil), // 39: vm.GetStateSummaryRequest - (*GetStateSummaryResponse)(nil), // 40: vm.GetStateSummaryResponse - (*StateSummaryAcceptRequest)(nil), // 41: vm.StateSummaryAcceptRequest - (*StateSummaryAcceptResponse)(nil), // 42: vm.StateSummaryAcceptResponse - (*timestamppb.Timestamp)(nil), // 43: google.protobuf.Timestamp - (*_go.MetricFamily)(nil), // 44: io.prometheus.client.MetricFamily - (*emptypb.Empty)(nil), // 45: google.protobuf.Empty + (State)(0), // 0: vm.State + (Status)(0), // 1: vm.Status + (Error)(0), // 2: vm.Error + (StateSummaryAcceptResponse_Mode)(0), // 3: vm.StateSummaryAcceptResponse.Mode + (*InitializeRequest)(nil), // 4: vm.InitializeRequest + (*InitializeResponse)(nil), // 5: vm.InitializeResponse + (*VersionedDBServer)(nil), // 6: vm.VersionedDBServer + (*SetStateRequest)(nil), // 7: vm.SetStateRequest + (*SetStateResponse)(nil), // 8: vm.SetStateResponse + (*CreateHandlersResponse)(nil), // 9: vm.CreateHandlersResponse + (*CreateStaticHandlersResponse)(nil), // 10: vm.CreateStaticHandlersResponse + (*Handler)(nil), // 11: vm.Handler + (*BuildBlockRequest)(nil), // 12: vm.BuildBlockRequest + (*BuildBlockResponse)(nil), // 13: vm.BuildBlockResponse + (*ParseBlockRequest)(nil), // 14: vm.ParseBlockRequest + (*ParseBlockResponse)(nil), // 15: vm.ParseBlockResponse + (*GetBlockRequest)(nil), // 16: vm.GetBlockRequest + (*GetBlockResponse)(nil), // 17: vm.GetBlockResponse + (*SetPreferenceRequest)(nil), // 18: vm.SetPreferenceRequest + (*BlockVerifyRequest)(nil), // 19: vm.BlockVerifyRequest + (*BlockVerifyResponse)(nil), // 20: vm.BlockVerifyResponse + (*BlockAcceptRequest)(nil), // 21: vm.BlockAcceptRequest + (*BlockRejectRequest)(nil), // 22: vm.BlockRejectRequest + (*HealthResponse)(nil), // 23: vm.HealthResponse + (*VersionResponse)(nil), // 24: vm.VersionResponse + (*AppRequestMsg)(nil), // 25: vm.AppRequestMsg + (*AppRequestFailedMsg)(nil), // 26: vm.AppRequestFailedMsg + (*AppResponseMsg)(nil), // 27: vm.AppResponseMsg + (*AppGossipMsg)(nil), // 28: vm.AppGossipMsg + (*CrossChainAppRequestMsg)(nil), // 29: vm.CrossChainAppRequestMsg + (*CrossChainAppRequestFailedMsg)(nil), // 30: vm.CrossChainAppRequestFailedMsg + (*CrossChainAppResponseMsg)(nil), // 31: vm.CrossChainAppResponseMsg + (*ConnectedRequest)(nil), // 32: vm.ConnectedRequest + (*DisconnectedRequest)(nil), // 33: vm.DisconnectedRequest + (*GetAncestorsRequest)(nil), // 34: vm.GetAncestorsRequest + (*GetAncestorsResponse)(nil), // 35: vm.GetAncestorsResponse + (*BatchedParseBlockRequest)(nil), // 36: vm.BatchedParseBlockRequest + (*BatchedParseBlockResponse)(nil), // 37: vm.BatchedParseBlockResponse + (*VerifyHeightIndexResponse)(nil), // 38: vm.VerifyHeightIndexResponse + (*GetBlockIDAtHeightRequest)(nil), // 39: vm.GetBlockIDAtHeightRequest + (*GetBlockIDAtHeightResponse)(nil), // 40: vm.GetBlockIDAtHeightResponse + (*GatherResponse)(nil), // 41: vm.GatherResponse + (*StateSyncEnabledResponse)(nil), // 42: vm.StateSyncEnabledResponse + (*GetOngoingSyncStateSummaryResponse)(nil), // 43: vm.GetOngoingSyncStateSummaryResponse + (*GetLastStateSummaryResponse)(nil), // 44: vm.GetLastStateSummaryResponse + (*ParseStateSummaryRequest)(nil), // 45: vm.ParseStateSummaryRequest + (*ParseStateSummaryResponse)(nil), // 46: vm.ParseStateSummaryResponse + (*GetStateSummaryRequest)(nil), // 47: vm.GetStateSummaryRequest + (*GetStateSummaryResponse)(nil), // 48: vm.GetStateSummaryResponse + (*StateSummaryAcceptRequest)(nil), // 49: vm.StateSummaryAcceptRequest + (*StateSummaryAcceptResponse)(nil), // 50: vm.StateSummaryAcceptResponse + (*timestamppb.Timestamp)(nil), // 51: google.protobuf.Timestamp + (*_go.MetricFamily)(nil), // 52: io.prometheus.client.MetricFamily + (*emptypb.Empty)(nil), // 53: google.protobuf.Empty } var file_vm_vm_proto_depIdxs = []int32{ - 2, // 0: vm.InitializeRequest.db_servers:type_name -> vm.VersionedDBServer - 43, // 1: vm.InitializeResponse.timestamp:type_name -> google.protobuf.Timestamp - 43, // 2: vm.SetStateResponse.timestamp:type_name -> google.protobuf.Timestamp - 7, // 3: vm.CreateHandlersResponse.handlers:type_name -> vm.Handler - 7, // 4: vm.CreateStaticHandlersResponse.handlers:type_name -> vm.Handler - 43, // 5: vm.BuildBlockResponse.timestamp:type_name -> google.protobuf.Timestamp - 43, // 6: vm.ParseBlockResponse.timestamp:type_name -> google.protobuf.Timestamp - 43, // 7: vm.GetBlockResponse.timestamp:type_name -> google.protobuf.Timestamp - 43, // 8: vm.BlockVerifyResponse.timestamp:type_name -> google.protobuf.Timestamp - 43, // 9: vm.AppRequestMsg.deadline:type_name -> google.protobuf.Timestamp - 10, // 10: vm.BatchedParseBlockResponse.response:type_name -> vm.ParseBlockResponse - 44, // 11: vm.GatherResponse.metric_families:type_name -> io.prometheus.client.MetricFamily - 0, // 12: vm.VM.Initialize:input_type -> vm.InitializeRequest - 3, // 13: vm.VM.SetState:input_type -> vm.SetStateRequest - 45, // 14: vm.VM.Shutdown:input_type -> google.protobuf.Empty - 45, // 15: vm.VM.CreateHandlers:input_type -> google.protobuf.Empty - 45, // 16: vm.VM.CreateStaticHandlers:input_type -> google.protobuf.Empty - 24, // 17: vm.VM.Connected:input_type -> vm.ConnectedRequest - 25, // 18: vm.VM.Disconnected:input_type -> vm.DisconnectedRequest - 45, // 19: vm.VM.BuildBlock:input_type -> google.protobuf.Empty - 9, // 20: vm.VM.ParseBlock:input_type -> vm.ParseBlockRequest - 11, // 21: vm.VM.GetBlock:input_type -> vm.GetBlockRequest - 13, // 22: vm.VM.SetPreference:input_type -> vm.SetPreferenceRequest - 45, // 23: vm.VM.Health:input_type -> google.protobuf.Empty - 45, // 24: vm.VM.Version:input_type -> google.protobuf.Empty - 20, // 25: vm.VM.AppRequest:input_type -> vm.AppRequestMsg - 21, // 26: vm.VM.AppRequestFailed:input_type -> vm.AppRequestFailedMsg - 22, // 27: vm.VM.AppResponse:input_type -> vm.AppResponseMsg - 23, // 28: vm.VM.AppGossip:input_type -> vm.AppGossipMsg - 45, // 29: vm.VM.Gather:input_type -> google.protobuf.Empty - 26, // 30: vm.VM.GetAncestors:input_type -> vm.GetAncestorsRequest - 28, // 31: vm.VM.BatchedParseBlock:input_type -> vm.BatchedParseBlockRequest - 45, // 32: vm.VM.VerifyHeightIndex:input_type -> google.protobuf.Empty - 31, // 33: vm.VM.GetBlockIDAtHeight:input_type -> vm.GetBlockIDAtHeightRequest - 45, // 34: vm.VM.StateSyncEnabled:input_type -> google.protobuf.Empty - 45, // 35: vm.VM.GetOngoingSyncStateSummary:input_type -> google.protobuf.Empty - 45, // 36: vm.VM.GetLastStateSummary:input_type -> google.protobuf.Empty - 37, // 37: vm.VM.ParseStateSummary:input_type -> vm.ParseStateSummaryRequest - 39, // 38: vm.VM.GetStateSummary:input_type -> vm.GetStateSummaryRequest - 14, // 39: vm.VM.BlockVerify:input_type -> vm.BlockVerifyRequest - 16, // 40: vm.VM.BlockAccept:input_type -> vm.BlockAcceptRequest - 17, // 41: vm.VM.BlockReject:input_type -> vm.BlockRejectRequest - 41, // 42: vm.VM.StateSummaryAccept:input_type -> vm.StateSummaryAcceptRequest - 1, // 43: vm.VM.Initialize:output_type -> vm.InitializeResponse - 4, // 44: vm.VM.SetState:output_type -> vm.SetStateResponse - 45, // 45: vm.VM.Shutdown:output_type -> google.protobuf.Empty - 5, // 46: vm.VM.CreateHandlers:output_type -> vm.CreateHandlersResponse - 6, // 47: vm.VM.CreateStaticHandlers:output_type -> vm.CreateStaticHandlersResponse - 45, // 48: vm.VM.Connected:output_type -> google.protobuf.Empty - 45, // 49: vm.VM.Disconnected:output_type -> google.protobuf.Empty - 8, // 50: vm.VM.BuildBlock:output_type -> vm.BuildBlockResponse - 10, // 51: vm.VM.ParseBlock:output_type -> vm.ParseBlockResponse - 12, // 52: vm.VM.GetBlock:output_type -> vm.GetBlockResponse - 45, // 53: vm.VM.SetPreference:output_type -> google.protobuf.Empty - 18, // 54: vm.VM.Health:output_type -> vm.HealthResponse - 19, // 55: vm.VM.Version:output_type -> vm.VersionResponse - 45, // 56: vm.VM.AppRequest:output_type -> google.protobuf.Empty - 45, // 57: vm.VM.AppRequestFailed:output_type -> google.protobuf.Empty - 45, // 58: vm.VM.AppResponse:output_type -> google.protobuf.Empty - 45, // 59: vm.VM.AppGossip:output_type -> google.protobuf.Empty - 33, // 60: vm.VM.Gather:output_type -> vm.GatherResponse - 27, // 61: vm.VM.GetAncestors:output_type -> vm.GetAncestorsResponse - 29, // 62: vm.VM.BatchedParseBlock:output_type -> vm.BatchedParseBlockResponse - 30, // 63: vm.VM.VerifyHeightIndex:output_type -> vm.VerifyHeightIndexResponse - 32, // 64: vm.VM.GetBlockIDAtHeight:output_type -> vm.GetBlockIDAtHeightResponse - 34, // 65: vm.VM.StateSyncEnabled:output_type -> vm.StateSyncEnabledResponse - 35, // 66: vm.VM.GetOngoingSyncStateSummary:output_type -> vm.GetOngoingSyncStateSummaryResponse - 36, // 67: vm.VM.GetLastStateSummary:output_type -> vm.GetLastStateSummaryResponse - 38, // 68: vm.VM.ParseStateSummary:output_type -> vm.ParseStateSummaryResponse - 40, // 69: vm.VM.GetStateSummary:output_type -> vm.GetStateSummaryResponse - 15, // 70: vm.VM.BlockVerify:output_type -> vm.BlockVerifyResponse - 45, // 71: vm.VM.BlockAccept:output_type -> google.protobuf.Empty - 45, // 72: vm.VM.BlockReject:output_type -> google.protobuf.Empty - 42, // 73: vm.VM.StateSummaryAccept:output_type -> vm.StateSummaryAcceptResponse - 43, // [43:74] is the sub-list for method output_type - 12, // [12:43] is the sub-list for method input_type - 12, // [12:12] is the sub-list for extension type_name - 12, // [12:12] is the sub-list for extension extendee - 0, // [0:12] is the sub-list for field type_name + 6, // 0: vm.InitializeRequest.db_servers:type_name -> vm.VersionedDBServer + 51, // 1: vm.InitializeResponse.timestamp:type_name -> google.protobuf.Timestamp + 0, // 2: vm.SetStateRequest.state:type_name -> vm.State + 51, // 3: vm.SetStateResponse.timestamp:type_name -> google.protobuf.Timestamp + 11, // 4: vm.CreateHandlersResponse.handlers:type_name -> vm.Handler + 11, // 5: vm.CreateStaticHandlersResponse.handlers:type_name -> vm.Handler + 51, // 6: vm.BuildBlockResponse.timestamp:type_name -> google.protobuf.Timestamp + 1, // 7: vm.ParseBlockResponse.status:type_name -> vm.Status + 51, // 8: vm.ParseBlockResponse.timestamp:type_name -> google.protobuf.Timestamp + 1, // 9: vm.GetBlockResponse.status:type_name -> vm.Status + 51, // 10: vm.GetBlockResponse.timestamp:type_name -> google.protobuf.Timestamp + 2, // 11: vm.GetBlockResponse.err:type_name -> vm.Error + 51, // 12: vm.BlockVerifyResponse.timestamp:type_name -> google.protobuf.Timestamp + 51, // 13: vm.AppRequestMsg.deadline:type_name -> google.protobuf.Timestamp + 51, // 14: vm.CrossChainAppRequestMsg.deadline:type_name -> google.protobuf.Timestamp + 15, // 15: vm.BatchedParseBlockResponse.response:type_name -> vm.ParseBlockResponse + 2, // 16: vm.VerifyHeightIndexResponse.err:type_name -> vm.Error + 2, // 17: vm.GetBlockIDAtHeightResponse.err:type_name -> vm.Error + 52, // 18: vm.GatherResponse.metric_families:type_name -> io.prometheus.client.MetricFamily + 2, // 19: vm.StateSyncEnabledResponse.err:type_name -> vm.Error + 2, // 20: vm.GetOngoingSyncStateSummaryResponse.err:type_name -> vm.Error + 2, // 21: vm.GetLastStateSummaryResponse.err:type_name -> vm.Error + 2, // 22: vm.ParseStateSummaryResponse.err:type_name -> vm.Error + 2, // 23: vm.GetStateSummaryResponse.err:type_name -> vm.Error + 3, // 24: vm.StateSummaryAcceptResponse.mode:type_name -> vm.StateSummaryAcceptResponse.Mode + 2, // 25: vm.StateSummaryAcceptResponse.err:type_name -> vm.Error + 4, // 26: vm.VM.Initialize:input_type -> vm.InitializeRequest + 7, // 27: vm.VM.SetState:input_type -> vm.SetStateRequest + 53, // 28: vm.VM.Shutdown:input_type -> google.protobuf.Empty + 53, // 29: vm.VM.CreateHandlers:input_type -> google.protobuf.Empty + 53, // 30: vm.VM.CreateStaticHandlers:input_type -> google.protobuf.Empty + 32, // 31: vm.VM.Connected:input_type -> vm.ConnectedRequest + 33, // 32: vm.VM.Disconnected:input_type -> vm.DisconnectedRequest + 12, // 33: vm.VM.BuildBlock:input_type -> vm.BuildBlockRequest + 14, // 34: vm.VM.ParseBlock:input_type -> vm.ParseBlockRequest + 16, // 35: vm.VM.GetBlock:input_type -> vm.GetBlockRequest + 18, // 36: vm.VM.SetPreference:input_type -> vm.SetPreferenceRequest + 53, // 37: vm.VM.Health:input_type -> google.protobuf.Empty + 53, // 38: vm.VM.Version:input_type -> google.protobuf.Empty + 25, // 39: vm.VM.AppRequest:input_type -> vm.AppRequestMsg + 26, // 40: vm.VM.AppRequestFailed:input_type -> vm.AppRequestFailedMsg + 27, // 41: vm.VM.AppResponse:input_type -> vm.AppResponseMsg + 28, // 42: vm.VM.AppGossip:input_type -> vm.AppGossipMsg + 53, // 43: vm.VM.Gather:input_type -> google.protobuf.Empty + 29, // 44: vm.VM.CrossChainAppRequest:input_type -> vm.CrossChainAppRequestMsg + 30, // 45: vm.VM.CrossChainAppRequestFailed:input_type -> vm.CrossChainAppRequestFailedMsg + 31, // 46: vm.VM.CrossChainAppResponse:input_type -> vm.CrossChainAppResponseMsg + 34, // 47: vm.VM.GetAncestors:input_type -> vm.GetAncestorsRequest + 36, // 48: vm.VM.BatchedParseBlock:input_type -> vm.BatchedParseBlockRequest + 53, // 49: vm.VM.VerifyHeightIndex:input_type -> google.protobuf.Empty + 39, // 50: vm.VM.GetBlockIDAtHeight:input_type -> vm.GetBlockIDAtHeightRequest + 53, // 51: vm.VM.StateSyncEnabled:input_type -> google.protobuf.Empty + 53, // 52: vm.VM.GetOngoingSyncStateSummary:input_type -> google.protobuf.Empty + 53, // 53: vm.VM.GetLastStateSummary:input_type -> google.protobuf.Empty + 45, // 54: vm.VM.ParseStateSummary:input_type -> vm.ParseStateSummaryRequest + 47, // 55: vm.VM.GetStateSummary:input_type -> vm.GetStateSummaryRequest + 19, // 56: vm.VM.BlockVerify:input_type -> vm.BlockVerifyRequest + 21, // 57: vm.VM.BlockAccept:input_type -> vm.BlockAcceptRequest + 22, // 58: vm.VM.BlockReject:input_type -> vm.BlockRejectRequest + 49, // 59: vm.VM.StateSummaryAccept:input_type -> vm.StateSummaryAcceptRequest + 5, // 60: vm.VM.Initialize:output_type -> vm.InitializeResponse + 8, // 61: vm.VM.SetState:output_type -> vm.SetStateResponse + 53, // 62: vm.VM.Shutdown:output_type -> google.protobuf.Empty + 9, // 63: vm.VM.CreateHandlers:output_type -> vm.CreateHandlersResponse + 10, // 64: vm.VM.CreateStaticHandlers:output_type -> vm.CreateStaticHandlersResponse + 53, // 65: vm.VM.Connected:output_type -> google.protobuf.Empty + 53, // 66: vm.VM.Disconnected:output_type -> google.protobuf.Empty + 13, // 67: vm.VM.BuildBlock:output_type -> vm.BuildBlockResponse + 15, // 68: vm.VM.ParseBlock:output_type -> vm.ParseBlockResponse + 17, // 69: vm.VM.GetBlock:output_type -> vm.GetBlockResponse + 53, // 70: vm.VM.SetPreference:output_type -> google.protobuf.Empty + 23, // 71: vm.VM.Health:output_type -> vm.HealthResponse + 24, // 72: vm.VM.Version:output_type -> vm.VersionResponse + 53, // 73: vm.VM.AppRequest:output_type -> google.protobuf.Empty + 53, // 74: vm.VM.AppRequestFailed:output_type -> google.protobuf.Empty + 53, // 75: vm.VM.AppResponse:output_type -> google.protobuf.Empty + 53, // 76: vm.VM.AppGossip:output_type -> google.protobuf.Empty + 41, // 77: vm.VM.Gather:output_type -> vm.GatherResponse + 53, // 78: vm.VM.CrossChainAppRequest:output_type -> google.protobuf.Empty + 53, // 79: vm.VM.CrossChainAppRequestFailed:output_type -> google.protobuf.Empty + 53, // 80: vm.VM.CrossChainAppResponse:output_type -> google.protobuf.Empty + 35, // 81: vm.VM.GetAncestors:output_type -> vm.GetAncestorsResponse + 37, // 82: vm.VM.BatchedParseBlock:output_type -> vm.BatchedParseBlockResponse + 38, // 83: vm.VM.VerifyHeightIndex:output_type -> vm.VerifyHeightIndexResponse + 40, // 84: vm.VM.GetBlockIDAtHeight:output_type -> vm.GetBlockIDAtHeightResponse + 42, // 85: vm.VM.StateSyncEnabled:output_type -> vm.StateSyncEnabledResponse + 43, // 86: vm.VM.GetOngoingSyncStateSummary:output_type -> vm.GetOngoingSyncStateSummaryResponse + 44, // 87: vm.VM.GetLastStateSummary:output_type -> vm.GetLastStateSummaryResponse + 46, // 88: vm.VM.ParseStateSummary:output_type -> vm.ParseStateSummaryResponse + 48, // 89: vm.VM.GetStateSummary:output_type -> vm.GetStateSummaryResponse + 20, // 90: vm.VM.BlockVerify:output_type -> vm.BlockVerifyResponse + 53, // 91: vm.VM.BlockAccept:output_type -> google.protobuf.Empty + 53, // 92: vm.VM.BlockReject:output_type -> google.protobuf.Empty + 50, // 93: vm.VM.StateSummaryAccept:output_type -> vm.StateSummaryAcceptResponse + 60, // [60:94] is the sub-list for method output_type + 26, // [26:60] is the sub-list for method input_type + 26, // [26:26] is the sub-list for extension type_name + 26, // [26:26] is the sub-list for extension extendee + 0, // [0:26] is the sub-list for field type_name } func init() { file_vm_vm_proto_init() } @@ -3171,7 +3823,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BuildBlockResponse); i { + switch v := v.(*BuildBlockRequest); i { case 0: return &v.state case 1: @@ -3183,7 +3835,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ParseBlockRequest); i { + switch v := v.(*BuildBlockResponse); i { case 0: return &v.state case 1: @@ -3195,7 +3847,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ParseBlockResponse); i { + switch v := v.(*ParseBlockRequest); i { case 0: return &v.state case 1: @@ -3207,7 +3859,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetBlockRequest); i { + switch v := v.(*ParseBlockResponse); i { case 0: return &v.state case 1: @@ -3219,7 +3871,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetBlockResponse); i { + switch v := v.(*GetBlockRequest); i { case 0: return &v.state case 1: @@ -3231,7 +3883,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetPreferenceRequest); i { + switch v := v.(*GetBlockResponse); i { case 0: return &v.state case 1: @@ -3243,7 +3895,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BlockVerifyRequest); i { + switch v := v.(*SetPreferenceRequest); i { case 0: return &v.state case 1: @@ -3255,7 +3907,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BlockVerifyResponse); i { + switch v := v.(*BlockVerifyRequest); i { case 0: return &v.state case 1: @@ -3267,7 +3919,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BlockAcceptRequest); i { + switch v := v.(*BlockVerifyResponse); i { case 0: return &v.state case 1: @@ -3279,7 +3931,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BlockRejectRequest); i { + switch v := v.(*BlockAcceptRequest); i { case 0: return &v.state case 1: @@ -3291,7 +3943,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthResponse); i { + switch v := v.(*BlockRejectRequest); i { case 0: return &v.state case 1: @@ -3303,7 +3955,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VersionResponse); i { + switch v := v.(*HealthResponse); i { case 0: return &v.state case 1: @@ -3315,7 +3967,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AppRequestMsg); i { + switch v := v.(*VersionResponse); i { case 0: return &v.state case 1: @@ -3327,7 +3979,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AppRequestFailedMsg); i { + switch v := v.(*AppRequestMsg); i { case 0: return &v.state case 1: @@ -3339,7 +3991,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AppResponseMsg); i { + switch v := v.(*AppRequestFailedMsg); i { case 0: return &v.state case 1: @@ -3351,7 +4003,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AppGossipMsg); i { + switch v := v.(*AppResponseMsg); i { case 0: return &v.state case 1: @@ -3363,7 +4015,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ConnectedRequest); i { + switch v := v.(*AppGossipMsg); i { case 0: return &v.state case 1: @@ -3375,7 +4027,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DisconnectedRequest); i { + switch v := v.(*CrossChainAppRequestMsg); i { case 0: return &v.state case 1: @@ -3387,7 +4039,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAncestorsRequest); i { + switch v := v.(*CrossChainAppRequestFailedMsg); i { case 0: return &v.state case 1: @@ -3399,7 +4051,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAncestorsResponse); i { + switch v := v.(*CrossChainAppResponseMsg); i { case 0: return &v.state case 1: @@ -3411,7 +4063,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BatchedParseBlockRequest); i { + switch v := v.(*ConnectedRequest); i { case 0: return &v.state case 1: @@ -3423,7 +4075,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BatchedParseBlockResponse); i { + switch v := v.(*DisconnectedRequest); i { case 0: return &v.state case 1: @@ -3435,7 +4087,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VerifyHeightIndexResponse); i { + switch v := v.(*GetAncestorsRequest); i { case 0: return &v.state case 1: @@ -3447,7 +4099,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetBlockIDAtHeightRequest); i { + switch v := v.(*GetAncestorsResponse); i { case 0: return &v.state case 1: @@ -3459,7 +4111,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetBlockIDAtHeightResponse); i { + switch v := v.(*BatchedParseBlockRequest); i { case 0: return &v.state case 1: @@ -3471,7 +4123,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GatherResponse); i { + switch v := v.(*BatchedParseBlockResponse); i { case 0: return &v.state case 1: @@ -3483,7 +4135,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StateSyncEnabledResponse); i { + switch v := v.(*VerifyHeightIndexResponse); i { case 0: return &v.state case 1: @@ -3495,7 +4147,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetOngoingSyncStateSummaryResponse); i { + switch v := v.(*GetBlockIDAtHeightRequest); i { case 0: return &v.state case 1: @@ -3507,7 +4159,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetLastStateSummaryResponse); i { + switch v := v.(*GetBlockIDAtHeightResponse); i { case 0: return &v.state case 1: @@ -3519,7 +4171,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ParseStateSummaryRequest); i { + switch v := v.(*GatherResponse); i { case 0: return &v.state case 1: @@ -3531,7 +4183,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ParseStateSummaryResponse); i { + switch v := v.(*StateSyncEnabledResponse); i { case 0: return &v.state case 1: @@ -3543,7 +4195,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetStateSummaryRequest); i { + switch v := v.(*GetOngoingSyncStateSummaryResponse); i { case 0: return &v.state case 1: @@ -3555,7 +4207,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetStateSummaryResponse); i { + switch v := v.(*GetLastStateSummaryResponse); i { case 0: return &v.state case 1: @@ -3567,7 +4219,7 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StateSummaryAcceptRequest); i { + switch v := v.(*ParseStateSummaryRequest); i { case 0: return &v.state case 1: @@ -3579,6 +4231,54 @@ func file_vm_vm_proto_init() { } } file_vm_vm_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ParseStateSummaryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vm_vm_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetStateSummaryRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vm_vm_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetStateSummaryResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vm_vm_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StateSummaryAcceptRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_vm_vm_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*StateSummaryAcceptResponse); i { case 0: return &v.state @@ -3591,18 +4291,21 @@ func file_vm_vm_proto_init() { } } } + file_vm_vm_proto_msgTypes[8].OneofWrappers = []interface{}{} + file_vm_vm_proto_msgTypes[15].OneofWrappers = []interface{}{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_vm_vm_proto_rawDesc, - NumEnums: 0, - NumMessages: 43, + NumEnums: 4, + NumMessages: 47, NumExtensions: 0, NumServices: 1, }, GoTypes: file_vm_vm_proto_goTypes, DependencyIndexes: file_vm_vm_proto_depIdxs, + EnumInfos: file_vm_vm_proto_enumTypes, MessageInfos: file_vm_vm_proto_msgTypes, }.Build() File_vm_vm_proto = out.File diff --git a/avalanchego/proto/pb/vm/vm_grpc.pb.go b/avalanchego/proto/pb/vm/vm_grpc.pb.go index 7bcf064e..25c3859b 100644 --- a/avalanchego/proto/pb/vm/vm_grpc.pb.go +++ b/avalanchego/proto/pb/vm/vm_grpc.pb.go @@ -24,24 +24,51 @@ const _ = grpc.SupportPackageIsVersion7 // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type VMClient interface { // ChainVM + // + // Initialize this VM. Initialize(ctx context.Context, in *InitializeRequest, opts ...grpc.CallOption) (*InitializeResponse, error) + // SetState communicates to VM its next state it starts SetState(ctx context.Context, in *SetStateRequest, opts ...grpc.CallOption) (*SetStateResponse, error) + // Shutdown is called when the node is shutting down. Shutdown(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Creates the HTTP handlers for custom chain network calls. CreateHandlers(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*CreateHandlersResponse, error) + // Creates the HTTP handlers for custom VM network calls. + // + // Note: RPC Chain VM Factory will start a new instance of the VM in a + // seperate process which will populate the static handlers. After this + // process is created other processes will be created to populate blockchains, + // but they will not have the static handlers be called again. CreateStaticHandlers(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*CreateStaticHandlersResponse, error) Connected(ctx context.Context, in *ConnectedRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) Disconnected(ctx context.Context, in *DisconnectedRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - BuildBlock(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*BuildBlockResponse, error) + // Attempt to create a new block from data contained in the VM. + BuildBlock(ctx context.Context, in *BuildBlockRequest, opts ...grpc.CallOption) (*BuildBlockResponse, error) + // Attempt to create a block from a stream of bytes. ParseBlock(ctx context.Context, in *ParseBlockRequest, opts ...grpc.CallOption) (*ParseBlockResponse, error) + // Attempt to load a block. GetBlock(ctx context.Context, in *GetBlockRequest, opts ...grpc.CallOption) (*GetBlockResponse, error) + // Notify the VM of the currently preferred block. SetPreference(ctx context.Context, in *SetPreferenceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Attempt to verify the health of the VM. Health(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*HealthResponse, error) + // Version returns the version of the VM. Version(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*VersionResponse, error) + // Notify this engine of a request for data from [nodeID]. AppRequest(ctx context.Context, in *AppRequestMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Notify this engine that an AppRequest message it sent to [nodeID] with + // request ID [requestID] failed. AppRequestFailed(ctx context.Context, in *AppRequestFailedMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Notify this engine of a response to the AppRequest message it sent to + // [nodeID] with request ID [requestID]. AppResponse(ctx context.Context, in *AppResponseMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Notify this engine of a gossip message from [nodeID]. AppGossip(ctx context.Context, in *AppGossipMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Attempts to gather metrics from a VM. Gather(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GatherResponse, error) + CrossChainAppRequest(ctx context.Context, in *CrossChainAppRequestMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) + CrossChainAppRequestFailed(ctx context.Context, in *CrossChainAppRequestFailedMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) + CrossChainAppResponse(ctx context.Context, in *CrossChainAppResponseMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) // BatchedChainVM GetAncestors(ctx context.Context, in *GetAncestorsRequest, opts ...grpc.CallOption) (*GetAncestorsResponse, error) BatchedParseBlock(ctx context.Context, in *BatchedParseBlockRequest, opts ...grpc.CallOption) (*BatchedParseBlockResponse, error) @@ -49,10 +76,17 @@ type VMClient interface { VerifyHeightIndex(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*VerifyHeightIndexResponse, error) GetBlockIDAtHeight(ctx context.Context, in *GetBlockIDAtHeightRequest, opts ...grpc.CallOption) (*GetBlockIDAtHeightResponse, error) // StateSyncableVM + // + // StateSyncEnabled indicates whether the state sync is enabled for this VM. StateSyncEnabled(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*StateSyncEnabledResponse, error) + // GetOngoingSyncStateSummary returns an in-progress state summary if it exists. GetOngoingSyncStateSummary(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetOngoingSyncStateSummaryResponse, error) + // GetLastStateSummary returns the latest state summary. GetLastStateSummary(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetLastStateSummaryResponse, error) + // ParseStateSummary parses a state summary out of [summaryBytes]. ParseStateSummary(ctx context.Context, in *ParseStateSummaryRequest, opts ...grpc.CallOption) (*ParseStateSummaryResponse, error) + // GetStateSummary retrieves the state summary that was generated at height + // [summaryHeight]. GetStateSummary(ctx context.Context, in *GetStateSummaryRequest, opts ...grpc.CallOption) (*GetStateSummaryResponse, error) // Block BlockVerify(ctx context.Context, in *BlockVerifyRequest, opts ...grpc.CallOption) (*BlockVerifyResponse, error) @@ -133,7 +167,7 @@ func (c *vMClient) Disconnected(ctx context.Context, in *DisconnectedRequest, op return out, nil } -func (c *vMClient) BuildBlock(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*BuildBlockResponse, error) { +func (c *vMClient) BuildBlock(ctx context.Context, in *BuildBlockRequest, opts ...grpc.CallOption) (*BuildBlockResponse, error) { out := new(BuildBlockResponse) err := c.cc.Invoke(ctx, "/vm.VM/BuildBlock", in, out, opts...) if err != nil { @@ -232,6 +266,33 @@ func (c *vMClient) Gather(ctx context.Context, in *emptypb.Empty, opts ...grpc.C return out, nil } +func (c *vMClient) CrossChainAppRequest(ctx context.Context, in *CrossChainAppRequestMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/vm.VM/CrossChainAppRequest", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vMClient) CrossChainAppRequestFailed(ctx context.Context, in *CrossChainAppRequestFailedMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/vm.VM/CrossChainAppRequestFailed", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vMClient) CrossChainAppResponse(ctx context.Context, in *CrossChainAppResponseMsg, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/vm.VM/CrossChainAppResponse", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *vMClient) GetAncestors(ctx context.Context, in *GetAncestorsRequest, opts ...grpc.CallOption) (*GetAncestorsResponse, error) { out := new(GetAncestorsResponse) err := c.cc.Invoke(ctx, "/vm.VM/GetAncestors", in, out, opts...) @@ -354,24 +415,51 @@ func (c *vMClient) StateSummaryAccept(ctx context.Context, in *StateSummaryAccep // for forward compatibility type VMServer interface { // ChainVM + // + // Initialize this VM. Initialize(context.Context, *InitializeRequest) (*InitializeResponse, error) + // SetState communicates to VM its next state it starts SetState(context.Context, *SetStateRequest) (*SetStateResponse, error) + // Shutdown is called when the node is shutting down. Shutdown(context.Context, *emptypb.Empty) (*emptypb.Empty, error) + // Creates the HTTP handlers for custom chain network calls. CreateHandlers(context.Context, *emptypb.Empty) (*CreateHandlersResponse, error) + // Creates the HTTP handlers for custom VM network calls. + // + // Note: RPC Chain VM Factory will start a new instance of the VM in a + // seperate process which will populate the static handlers. After this + // process is created other processes will be created to populate blockchains, + // but they will not have the static handlers be called again. CreateStaticHandlers(context.Context, *emptypb.Empty) (*CreateStaticHandlersResponse, error) Connected(context.Context, *ConnectedRequest) (*emptypb.Empty, error) Disconnected(context.Context, *DisconnectedRequest) (*emptypb.Empty, error) - BuildBlock(context.Context, *emptypb.Empty) (*BuildBlockResponse, error) + // Attempt to create a new block from data contained in the VM. + BuildBlock(context.Context, *BuildBlockRequest) (*BuildBlockResponse, error) + // Attempt to create a block from a stream of bytes. ParseBlock(context.Context, *ParseBlockRequest) (*ParseBlockResponse, error) + // Attempt to load a block. GetBlock(context.Context, *GetBlockRequest) (*GetBlockResponse, error) + // Notify the VM of the currently preferred block. SetPreference(context.Context, *SetPreferenceRequest) (*emptypb.Empty, error) + // Attempt to verify the health of the VM. Health(context.Context, *emptypb.Empty) (*HealthResponse, error) + // Version returns the version of the VM. Version(context.Context, *emptypb.Empty) (*VersionResponse, error) + // Notify this engine of a request for data from [nodeID]. AppRequest(context.Context, *AppRequestMsg) (*emptypb.Empty, error) + // Notify this engine that an AppRequest message it sent to [nodeID] with + // request ID [requestID] failed. AppRequestFailed(context.Context, *AppRequestFailedMsg) (*emptypb.Empty, error) + // Notify this engine of a response to the AppRequest message it sent to + // [nodeID] with request ID [requestID]. AppResponse(context.Context, *AppResponseMsg) (*emptypb.Empty, error) + // Notify this engine of a gossip message from [nodeID]. AppGossip(context.Context, *AppGossipMsg) (*emptypb.Empty, error) + // Attempts to gather metrics from a VM. Gather(context.Context, *emptypb.Empty) (*GatherResponse, error) + CrossChainAppRequest(context.Context, *CrossChainAppRequestMsg) (*emptypb.Empty, error) + CrossChainAppRequestFailed(context.Context, *CrossChainAppRequestFailedMsg) (*emptypb.Empty, error) + CrossChainAppResponse(context.Context, *CrossChainAppResponseMsg) (*emptypb.Empty, error) // BatchedChainVM GetAncestors(context.Context, *GetAncestorsRequest) (*GetAncestorsResponse, error) BatchedParseBlock(context.Context, *BatchedParseBlockRequest) (*BatchedParseBlockResponse, error) @@ -379,10 +467,17 @@ type VMServer interface { VerifyHeightIndex(context.Context, *emptypb.Empty) (*VerifyHeightIndexResponse, error) GetBlockIDAtHeight(context.Context, *GetBlockIDAtHeightRequest) (*GetBlockIDAtHeightResponse, error) // StateSyncableVM + // + // StateSyncEnabled indicates whether the state sync is enabled for this VM. StateSyncEnabled(context.Context, *emptypb.Empty) (*StateSyncEnabledResponse, error) + // GetOngoingSyncStateSummary returns an in-progress state summary if it exists. GetOngoingSyncStateSummary(context.Context, *emptypb.Empty) (*GetOngoingSyncStateSummaryResponse, error) + // GetLastStateSummary returns the latest state summary. GetLastStateSummary(context.Context, *emptypb.Empty) (*GetLastStateSummaryResponse, error) + // ParseStateSummary parses a state summary out of [summaryBytes]. ParseStateSummary(context.Context, *ParseStateSummaryRequest) (*ParseStateSummaryResponse, error) + // GetStateSummary retrieves the state summary that was generated at height + // [summaryHeight]. GetStateSummary(context.Context, *GetStateSummaryRequest) (*GetStateSummaryResponse, error) // Block BlockVerify(context.Context, *BlockVerifyRequest) (*BlockVerifyResponse, error) @@ -418,7 +513,7 @@ func (UnimplementedVMServer) Connected(context.Context, *ConnectedRequest) (*emp func (UnimplementedVMServer) Disconnected(context.Context, *DisconnectedRequest) (*emptypb.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method Disconnected not implemented") } -func (UnimplementedVMServer) BuildBlock(context.Context, *emptypb.Empty) (*BuildBlockResponse, error) { +func (UnimplementedVMServer) BuildBlock(context.Context, *BuildBlockRequest) (*BuildBlockResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method BuildBlock not implemented") } func (UnimplementedVMServer) ParseBlock(context.Context, *ParseBlockRequest) (*ParseBlockResponse, error) { @@ -451,6 +546,15 @@ func (UnimplementedVMServer) AppGossip(context.Context, *AppGossipMsg) (*emptypb func (UnimplementedVMServer) Gather(context.Context, *emptypb.Empty) (*GatherResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Gather not implemented") } +func (UnimplementedVMServer) CrossChainAppRequest(context.Context, *CrossChainAppRequestMsg) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method CrossChainAppRequest not implemented") +} +func (UnimplementedVMServer) CrossChainAppRequestFailed(context.Context, *CrossChainAppRequestFailedMsg) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method CrossChainAppRequestFailed not implemented") +} +func (UnimplementedVMServer) CrossChainAppResponse(context.Context, *CrossChainAppResponseMsg) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method CrossChainAppResponse not implemented") +} func (UnimplementedVMServer) GetAncestors(context.Context, *GetAncestorsRequest) (*GetAncestorsResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetAncestors not implemented") } @@ -630,7 +734,7 @@ func _VM_Disconnected_Handler(srv interface{}, ctx context.Context, dec func(int } func _VM_BuildBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(emptypb.Empty) + in := new(BuildBlockRequest) if err := dec(in); err != nil { return nil, err } @@ -642,7 +746,7 @@ func _VM_BuildBlock_Handler(srv interface{}, ctx context.Context, dec func(inter FullMethod: "/vm.VM/BuildBlock", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(VMServer).BuildBlock(ctx, req.(*emptypb.Empty)) + return srv.(VMServer).BuildBlock(ctx, req.(*BuildBlockRequest)) } return interceptor(ctx, in, info, handler) } @@ -827,6 +931,60 @@ func _VM_Gather_Handler(srv interface{}, ctx context.Context, dec func(interface return interceptor(ctx, in, info, handler) } +func _VM_CrossChainAppRequest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CrossChainAppRequestMsg) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VMServer).CrossChainAppRequest(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vm.VM/CrossChainAppRequest", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VMServer).CrossChainAppRequest(ctx, req.(*CrossChainAppRequestMsg)) + } + return interceptor(ctx, in, info, handler) +} + +func _VM_CrossChainAppRequestFailed_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CrossChainAppRequestFailedMsg) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VMServer).CrossChainAppRequestFailed(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vm.VM/CrossChainAppRequestFailed", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VMServer).CrossChainAppRequestFailed(ctx, req.(*CrossChainAppRequestFailedMsg)) + } + return interceptor(ctx, in, info, handler) +} + +func _VM_CrossChainAppResponse_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CrossChainAppResponseMsg) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VMServer).CrossChainAppResponse(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vm.VM/CrossChainAppResponse", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VMServer).CrossChainAppResponse(ctx, req.(*CrossChainAppResponseMsg)) + } + return interceptor(ctx, in, info, handler) +} + func _VM_GetAncestors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetAncestorsRequest) if err := dec(in); err != nil { @@ -1140,6 +1298,18 @@ var VM_ServiceDesc = grpc.ServiceDesc{ MethodName: "Gather", Handler: _VM_Gather_Handler, }, + { + MethodName: "CrossChainAppRequest", + Handler: _VM_CrossChainAppRequest_Handler, + }, + { + MethodName: "CrossChainAppRequestFailed", + Handler: _VM_CrossChainAppRequestFailed_Handler, + }, + { + MethodName: "CrossChainAppResponse", + Handler: _VM_CrossChainAppResponse_Handler, + }, { MethodName: "GetAncestors", Handler: _VM_GetAncestors_Handler, diff --git a/avalanchego/proto/pb/warp/message.pb.go b/avalanchego/proto/pb/warp/message.pb.go new file mode 100644 index 00000000..cfc355c6 --- /dev/null +++ b/avalanchego/proto/pb/warp/message.pb.go @@ -0,0 +1,234 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc (unknown) +// source: warp/message.proto + +package warp + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type SignRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SourceChainId []byte `protobuf:"bytes,1,opt,name=source_chain_id,json=sourceChainId,proto3" json:"source_chain_id,omitempty"` + DestinationChainId []byte `protobuf:"bytes,2,opt,name=destination_chain_id,json=destinationChainId,proto3" json:"destination_chain_id,omitempty"` + Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` +} + +func (x *SignRequest) Reset() { + *x = SignRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_warp_message_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SignRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SignRequest) ProtoMessage() {} + +func (x *SignRequest) ProtoReflect() protoreflect.Message { + mi := &file_warp_message_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SignRequest.ProtoReflect.Descriptor instead. +func (*SignRequest) Descriptor() ([]byte, []int) { + return file_warp_message_proto_rawDescGZIP(), []int{0} +} + +func (x *SignRequest) GetSourceChainId() []byte { + if x != nil { + return x.SourceChainId + } + return nil +} + +func (x *SignRequest) GetDestinationChainId() []byte { + if x != nil { + return x.DestinationChainId + } + return nil +} + +func (x *SignRequest) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +type SignResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Signature []byte `protobuf:"bytes,1,opt,name=signature,proto3" json:"signature,omitempty"` +} + +func (x *SignResponse) Reset() { + *x = SignResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_warp_message_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SignResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SignResponse) ProtoMessage() {} + +func (x *SignResponse) ProtoReflect() protoreflect.Message { + mi := &file_warp_message_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SignResponse.ProtoReflect.Descriptor instead. +func (*SignResponse) Descriptor() ([]byte, []int) { + return file_warp_message_proto_rawDescGZIP(), []int{1} +} + +func (x *SignResponse) GetSignature() []byte { + if x != nil { + return x.Signature + } + return nil +} + +var File_warp_message_proto protoreflect.FileDescriptor + +var file_warp_message_proto_rawDesc = []byte{ + 0x0a, 0x12, 0x77, 0x61, 0x72, 0x70, 0x2f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x04, 0x77, 0x61, 0x72, 0x70, 0x22, 0x81, 0x01, 0x0a, 0x0b, 0x53, + 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x69, 0x6e, + 0x49, 0x64, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, + 0x69, 0x6e, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x2c, + 0x0a, 0x0c, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, + 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x32, 0x37, 0x0a, 0x06, + 0x53, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x12, 0x2d, 0x0a, 0x04, 0x53, 0x69, 0x67, 0x6e, 0x12, 0x11, + 0x2e, 0x77, 0x61, 0x72, 0x70, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x12, 0x2e, 0x77, 0x61, 0x72, 0x70, 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x76, 0x61, 0x2d, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x61, 0x76, 0x61, + 0x6c, 0x61, 0x6e, 0x63, 0x68, 0x65, 0x67, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, + 0x62, 0x2f, 0x77, 0x61, 0x72, 0x70, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_warp_message_proto_rawDescOnce sync.Once + file_warp_message_proto_rawDescData = file_warp_message_proto_rawDesc +) + +func file_warp_message_proto_rawDescGZIP() []byte { + file_warp_message_proto_rawDescOnce.Do(func() { + file_warp_message_proto_rawDescData = protoimpl.X.CompressGZIP(file_warp_message_proto_rawDescData) + }) + return file_warp_message_proto_rawDescData +} + +var file_warp_message_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_warp_message_proto_goTypes = []interface{}{ + (*SignRequest)(nil), // 0: warp.SignRequest + (*SignResponse)(nil), // 1: warp.SignResponse +} +var file_warp_message_proto_depIdxs = []int32{ + 0, // 0: warp.Signer.Sign:input_type -> warp.SignRequest + 1, // 1: warp.Signer.Sign:output_type -> warp.SignResponse + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_warp_message_proto_init() } +func file_warp_message_proto_init() { + if File_warp_message_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_warp_message_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SignRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_warp_message_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SignResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_warp_message_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_warp_message_proto_goTypes, + DependencyIndexes: file_warp_message_proto_depIdxs, + MessageInfos: file_warp_message_proto_msgTypes, + }.Build() + File_warp_message_proto = out.File + file_warp_message_proto_rawDesc = nil + file_warp_message_proto_goTypes = nil + file_warp_message_proto_depIdxs = nil +} diff --git a/avalanchego/proto/pb/warp/message_grpc.pb.go b/avalanchego/proto/pb/warp/message_grpc.pb.go new file mode 100644 index 00000000..fa092303 --- /dev/null +++ b/avalanchego/proto/pb/warp/message_grpc.pb.go @@ -0,0 +1,105 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc (unknown) +// source: warp/message.proto + +package warp + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// SignerClient is the client API for Signer service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type SignerClient interface { + Sign(ctx context.Context, in *SignRequest, opts ...grpc.CallOption) (*SignResponse, error) +} + +type signerClient struct { + cc grpc.ClientConnInterface +} + +func NewSignerClient(cc grpc.ClientConnInterface) SignerClient { + return &signerClient{cc} +} + +func (c *signerClient) Sign(ctx context.Context, in *SignRequest, opts ...grpc.CallOption) (*SignResponse, error) { + out := new(SignResponse) + err := c.cc.Invoke(ctx, "/warp.Signer/Sign", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SignerServer is the server API for Signer service. +// All implementations must embed UnimplementedSignerServer +// for forward compatibility +type SignerServer interface { + Sign(context.Context, *SignRequest) (*SignResponse, error) + mustEmbedUnimplementedSignerServer() +} + +// UnimplementedSignerServer must be embedded to have forward compatible implementations. +type UnimplementedSignerServer struct { +} + +func (UnimplementedSignerServer) Sign(context.Context, *SignRequest) (*SignResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Sign not implemented") +} +func (UnimplementedSignerServer) mustEmbedUnimplementedSignerServer() {} + +// UnsafeSignerServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to SignerServer will +// result in compilation errors. +type UnsafeSignerServer interface { + mustEmbedUnimplementedSignerServer() +} + +func RegisterSignerServer(s grpc.ServiceRegistrar, srv SignerServer) { + s.RegisterService(&Signer_ServiceDesc, srv) +} + +func _Signer_Sign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SignRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SignerServer).Sign(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/warp.Signer/Sign", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SignerServer).Sign(ctx, req.(*SignRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// Signer_ServiceDesc is the grpc.ServiceDesc for Signer service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Signer_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "warp.Signer", + HandlerType: (*SignerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Sign", + Handler: _Signer_Sign_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "warp/message.proto", +} diff --git a/avalanchego/proto/plugin/plugin.proto b/avalanchego/proto/plugin/plugin.proto deleted file mode 100644 index 8849a1d4..00000000 --- a/avalanchego/proto/plugin/plugin.proto +++ /dev/null @@ -1,17 +0,0 @@ -syntax = "proto3"; - -package plugin; - -import "google/protobuf/empty.proto"; - -option go_package = "github.com/ava-labs/avalanchego/proto/pb/plugin"; - -service Node { - rpc Start(google.protobuf.Empty) returns (google.protobuf.Empty); - rpc ExitCode(google.protobuf.Empty) returns (ExitCodeResponse); - rpc Stop(google.protobuf.Empty) returns (google.protobuf.Empty); -} - -message ExitCodeResponse { - int32 exit_code = 1; -} diff --git a/avalanchego/proto/rpcdb/rpcdb.proto b/avalanchego/proto/rpcdb/rpcdb.proto index c1800bd7..420d2b7a 100644 --- a/avalanchego/proto/rpcdb/rpcdb.proto +++ b/avalanchego/proto/rpcdb/rpcdb.proto @@ -21,13 +21,20 @@ service Database { rpc IteratorRelease(IteratorReleaseRequest) returns (IteratorReleaseResponse); } +enum Error { + // ERROR_UNSPECIFIED is used to indicate that no error occurred. + ERROR_UNSPECIFIED = 0; + ERROR_CLOSED = 1; + ERROR_NOT_FOUND = 2; +} + message HasRequest { bytes key = 1; } message HasResponse { bool has = 1; - uint32 err = 2; + Error err = 2; } message GetRequest { @@ -36,7 +43,7 @@ message GetRequest { message GetResponse { bytes value = 1; - uint32 err = 2; + Error err = 2; } message PutRequest { @@ -45,7 +52,7 @@ message PutRequest { } message PutResponse { - uint32 err = 1; + Error err = 1; } message DeleteRequest { @@ -53,7 +60,7 @@ message DeleteRequest { } message DeleteResponse { - uint32 err = 1; + Error err = 1; } message CompactRequest { @@ -62,24 +69,22 @@ message CompactRequest { } message CompactResponse { - uint32 err = 1; + Error err = 1; } message CloseRequest {} message CloseResponse { - uint32 err = 1; + Error err = 1; } message WriteBatchRequest { repeated PutRequest puts = 1; repeated DeleteRequest deletes = 2; - int64 id = 3; - bool continues = 4; } message WriteBatchResponse { - uint32 err = 1; + Error err = 1; } message NewIteratorRequest {} @@ -106,7 +111,7 @@ message IteratorErrorRequest { } message IteratorErrorResponse { - uint32 err = 1; + Error err = 1; } message IteratorReleaseRequest { @@ -114,7 +119,7 @@ message IteratorReleaseRequest { } message IteratorReleaseResponse { - uint32 err = 1; + Error err = 1; } message HealthCheckResponse { diff --git a/avalanchego/proto/sharedmemory/sharedmemory.proto b/avalanchego/proto/sharedmemory/sharedmemory.proto index 62e8277c..5b31876b 100644 --- a/avalanchego/proto/sharedmemory/sharedmemory.proto +++ b/avalanchego/proto/sharedmemory/sharedmemory.proto @@ -22,7 +22,6 @@ message BatchDelete { message Batch { repeated BatchPut puts = 1; repeated BatchDelete deletes = 2; - int64 id = 3; } message AtomicRequest { @@ -40,13 +39,10 @@ message Element { message GetRequest { bytes peer_chain_id = 1; repeated bytes keys = 2; - int64 id = 3; - bool continues = 4; } message GetResponse { repeated bytes values = 1; - bool continues = 2; } message IndexedRequest { @@ -55,22 +51,17 @@ message IndexedRequest { bytes start_trait = 3; bytes start_key = 4; int32 limit = 5; - int64 id = 6; - bool continues = 7; } message IndexedResponse { repeated bytes values = 1; bytes last_trait = 2; bytes last_key = 3; - bool continues = 4; } message ApplyRequest { repeated AtomicRequest requests = 1; repeated Batch batches = 2; - int64 id = 3; - bool continues = 4; } message ApplyResponse {} diff --git a/avalanchego/proto/subnetlookup/subnetlookup.proto b/avalanchego/proto/subnetlookup/subnetlookup.proto deleted file mode 100644 index c2e7f814..00000000 --- a/avalanchego/proto/subnetlookup/subnetlookup.proto +++ /dev/null @@ -1,17 +0,0 @@ -syntax = "proto3"; - -package subnetlookup; - -option go_package = "github.com/ava-labs/avalanchego/proto/pb/subnetlookup"; - -service SubnetLookup { - rpc SubnetID(SubnetIDRequest) returns (SubnetIDResponse); -} - -message SubnetIDRequest { - bytes chain_id = 1; -} - -message SubnetIDResponse { - bytes id = 1; -} diff --git a/avalanchego/proto/validatorstate/validator_state.proto b/avalanchego/proto/validatorstate/validator_state.proto new file mode 100644 index 00000000..6d2900f0 --- /dev/null +++ b/avalanchego/proto/validatorstate/validator_state.proto @@ -0,0 +1,51 @@ +syntax = "proto3"; + +package validatorstate; + +import "google/protobuf/empty.proto"; + +option go_package = "github.com/ava-labs/avalanchego/proto/pb/validatorstate"; + +service ValidatorState { + // GetMinimumHeight returns the minimum height of the blocks in the optimal + // proposal window. + rpc GetMinimumHeight(google.protobuf.Empty) returns (GetMinimumHeightResponse); + // GetCurrentHeight returns the current height of the P-chain. + rpc GetCurrentHeight(google.protobuf.Empty) returns (GetCurrentHeightResponse); + // GetSubnetID returns the subnetID of the provided chain. + rpc GetSubnetID(GetSubnetIDRequest) returns (GetSubnetIDResponse); + // GetValidatorSet returns the weights of the nodeIDs for the provided + // subnet at the requested P-chain height. + rpc GetValidatorSet(GetValidatorSetRequest) returns (GetValidatorSetResponse); +} + +message GetMinimumHeightResponse { + uint64 height = 1; +} + +message GetCurrentHeightResponse { + uint64 height = 1; +} + +message GetSubnetIDRequest { + bytes chain_id = 1; +} + +message GetSubnetIDResponse { + bytes subnet_id = 1; +} + +message GetValidatorSetRequest { + uint64 height = 1; + bytes subnet_id = 2; +} + +message Validator { + bytes node_id = 1; + uint64 weight = 2; + bytes public_key = 3; +} + +message GetValidatorSetResponse { + repeated Validator validators = 1; +} diff --git a/avalanchego/proto/vm/runtime/runtime.proto b/avalanchego/proto/vm/runtime/runtime.proto new file mode 100644 index 00000000..400d72e8 --- /dev/null +++ b/avalanchego/proto/vm/runtime/runtime.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package vm.runtime; + +import "google/protobuf/empty.proto"; + +option go_package = "github.com/ava-labs/avalanchego/proto/pb/vm/manager"; + +// Manages the lifecycle of a subnet VM process. +service Runtime { + // Initialize a VM Runtime. + rpc Initialize(InitializeRequest) returns (google.protobuf.Empty); +} + +message InitializeRequest { + // ProtocolVersion is used to identify incompatibilities with AvalancheGo and a VM. + uint32 protocol_version = 1; + // Address of the gRPC server endpoint serving the handshake logic. + // Example: 127.0.0.1:50001 + string addr = 2; +} diff --git a/avalanchego/proto/vm/vm.proto b/avalanchego/proto/vm/vm.proto index 28a18afe..179db3f4 100644 --- a/avalanchego/proto/vm/vm.proto +++ b/avalanchego/proto/vm/vm.proto @@ -8,26 +8,55 @@ import "io/prometheus/client/metrics.proto"; option go_package = "github.com/ava-labs/avalanchego/proto/pb/vm"; +// ref. https://pkg.go.dev/github.com/ava-labs/avalanchego/snow/engine/snowman/block +// ref. https://pkg.go.dev/github.com/ava-labs/avalanchego/snow/consensus/snowman#Block service VM { // ChainVM + // + // Initialize this VM. rpc Initialize(InitializeRequest) returns (InitializeResponse); + // SetState communicates to VM its next state it starts rpc SetState(SetStateRequest) returns (SetStateResponse); + // Shutdown is called when the node is shutting down. rpc Shutdown(google.protobuf.Empty) returns (google.protobuf.Empty); + // Creates the HTTP handlers for custom chain network calls. rpc CreateHandlers(google.protobuf.Empty) returns (CreateHandlersResponse); + // Creates the HTTP handlers for custom VM network calls. + // + // Note: RPC Chain VM Factory will start a new instance of the VM in a + // seperate process which will populate the static handlers. After this + // process is created other processes will be created to populate blockchains, + // but they will not have the static handlers be called again. rpc CreateStaticHandlers(google.protobuf.Empty) returns (CreateStaticHandlersResponse); rpc Connected(ConnectedRequest) returns (google.protobuf.Empty); rpc Disconnected(DisconnectedRequest) returns (google.protobuf.Empty); - rpc BuildBlock(google.protobuf.Empty) returns (BuildBlockResponse); + // Attempt to create a new block from data contained in the VM. + rpc BuildBlock(BuildBlockRequest) returns (BuildBlockResponse); + // Attempt to create a block from a stream of bytes. rpc ParseBlock(ParseBlockRequest) returns (ParseBlockResponse); + // Attempt to load a block. rpc GetBlock(GetBlockRequest) returns (GetBlockResponse); + // Notify the VM of the currently preferred block. rpc SetPreference(SetPreferenceRequest) returns (google.protobuf.Empty); + // Attempt to verify the health of the VM. rpc Health(google.protobuf.Empty) returns (HealthResponse); + // Version returns the version of the VM. rpc Version(google.protobuf.Empty) returns (VersionResponse); + // Notify this engine of a request for data from [nodeID]. rpc AppRequest(AppRequestMsg) returns (google.protobuf.Empty); + // Notify this engine that an AppRequest message it sent to [nodeID] with + // request ID [requestID] failed. rpc AppRequestFailed(AppRequestFailedMsg) returns (google.protobuf.Empty); + // Notify this engine of a response to the AppRequest message it sent to + // [nodeID] with request ID [requestID]. rpc AppResponse(AppResponseMsg) returns (google.protobuf.Empty); + // Notify this engine of a gossip message from [nodeID]. rpc AppGossip(AppGossipMsg) returns (google.protobuf.Empty); + // Attempts to gather metrics from a VM. rpc Gather(google.protobuf.Empty) returns (GatherResponse); + rpc CrossChainAppRequest(CrossChainAppRequestMsg) returns (google.protobuf.Empty); + rpc CrossChainAppRequestFailed(CrossChainAppRequestFailedMsg) returns (google.protobuf.Empty); + rpc CrossChainAppResponse(CrossChainAppResponseMsg) returns (google.protobuf.Empty); // BatchedChainVM rpc GetAncestors(GetAncestorsRequest) returns (GetAncestorsResponse); @@ -38,10 +67,17 @@ service VM { rpc GetBlockIDAtHeight(GetBlockIDAtHeightRequest) returns (GetBlockIDAtHeightResponse); // StateSyncableVM + // + // StateSyncEnabled indicates whether the state sync is enabled for this VM. rpc StateSyncEnabled(google.protobuf.Empty) returns (StateSyncEnabledResponse); + // GetOngoingSyncStateSummary returns an in-progress state summary if it exists. rpc GetOngoingSyncStateSummary(google.protobuf.Empty) returns (GetOngoingSyncStateSummaryResponse); + // GetLastStateSummary returns the latest state summary. rpc GetLastStateSummary(google.protobuf.Empty) returns (GetLastStateSummaryResponse); + // ParseStateSummary parses a state summary out of [summaryBytes]. rpc ParseStateSummary(ParseStateSummaryRequest) returns (ParseStateSummaryResponse); + // GetStateSummary retrieves the state summary that was generated at height + // [summaryHeight]. rpc GetStateSummary(GetStateSummaryRequest) returns (GetStateSummaryResponse); // Block @@ -53,21 +89,50 @@ service VM { rpc StateSummaryAccept(StateSummaryAcceptRequest) returns (StateSummaryAcceptResponse); } +enum State { + STATE_UNSPECIFIED = 0; + STATE_STATE_SYNCING = 1; + STATE_BOOTSTRAPPING = 2; + STATE_NORMAL_OP = 3; +} + +enum Status { + STATUS_UNSPECIFIED = 0; + STATUS_PROCESSING = 1; + STATUS_REJECTED = 2; + STATUS_ACCEPTED = 3; +} + +enum Error { + // ERROR_UNSPECIFIED is used to indicate that no error occurred. + ERROR_UNSPECIFIED = 0; + ERROR_CLOSED = 1; + ERROR_NOT_FOUND = 2; + ERROR_HEIGHT_INDEX_NOT_IMPLEMENTED = 3; + ERROR_HEIGHT_INDEX_INCOMPLETE = 4; + ERROR_STATE_SYNC_NOT_IMPLEMENTED = 5; +} + message InitializeRequest { uint32 network_id = 1; bytes subnet_id = 2; bytes chain_id = 3; bytes node_id = 4; - bytes x_chain_id = 5; - bytes avax_asset_id = 6; - bytes genesis_bytes = 7; - bytes upgrade_bytes = 8; - bytes config_bytes = 9; - repeated VersionedDBServer db_servers = 10; + // public_key is the BLS public key that would correspond with any signatures + // produced by the warp messaging signer + bytes public_key = 5; + bytes x_chain_id = 6; + bytes c_chain_id = 7; + bytes avax_asset_id = 8; + string chain_data_dir = 9; + bytes genesis_bytes = 10; + bytes upgrade_bytes = 11; + bytes config_bytes = 12; + repeated VersionedDBServer db_servers = 13; // server_addr is the address of the gRPC server which serves // the messenger, keystore, shared memory, blockchain alias, // subnet alias, and appSender services - string server_addr = 11; + string server_addr = 14; } message InitializeResponse { @@ -86,7 +151,7 @@ message VersionedDBServer { } message SetStateRequest { - uint32 state = 1; + State state = 1; } message SetStateResponse { @@ -113,13 +178,18 @@ message Handler { string server_addr = 3; } +message BuildBlockRequest { + optional uint64 p_chain_height = 1; +} + +// Note: The status of a freshly built block is assumed to be Processing. message BuildBlockResponse { bytes id = 1; bytes parent_id = 2; bytes bytes = 3; uint64 height = 4; google.protobuf.Timestamp timestamp = 5; - // status is always processing + bool verify_with_context = 6; } message ParseBlockRequest { @@ -129,9 +199,10 @@ message ParseBlockRequest { message ParseBlockResponse { bytes id = 1; bytes parent_id = 2; - uint32 status = 3; + Status status = 3; uint64 height = 4; google.protobuf.Timestamp timestamp = 5; + bool verify_with_context = 6; } message GetBlockRequest { @@ -141,11 +212,12 @@ message GetBlockRequest { message GetBlockResponse { bytes parent_id = 1; bytes bytes = 2; - uint32 status = 3; + Status status = 3; uint64 height = 4; google.protobuf.Timestamp timestamp = 5; // used to propagate database.ErrNotFound through RPC - uint32 err = 6; + Error err = 6; + bool verify_with_context = 7; } message SetPreferenceRequest { @@ -154,6 +226,10 @@ message SetPreferenceRequest { message BlockVerifyRequest { bytes bytes = 1; + + // If set, the VM server casts the block to a [block.WithVerifyContext] and + // calls [VerifyWithContext] instead of [Verify]. + optional uint64 p_chain_height = 2; } message BlockVerifyResponse { @@ -210,6 +286,33 @@ message AppGossipMsg { bytes msg = 2; } +message CrossChainAppRequestMsg { + // The chain that sent us this request + bytes chain_id = 1; + // The ID of this request + uint32 request_id = 2; + // deadline for this request + google.protobuf.Timestamp deadline = 3; + // The request body + bytes request = 4; +} + +message CrossChainAppRequestFailedMsg { + // The chain that we failed to get a response from + bytes chain_id = 1; + // The ID of the request we sent and didn't get a response to + uint32 request_id = 2; +} + +message CrossChainAppResponseMsg { + // The chain that we got a response from + bytes chain_id = 1; + // Request ID of request that this is in response to + uint32 request_id = 2; + // The response body + bytes response = 3; +} + message ConnectedRequest { bytes node_id = 1; string version = 2; @@ -239,7 +342,7 @@ message BatchedParseBlockResponse { } message VerifyHeightIndexResponse { - uint32 err = 1; + Error err = 1; } message GetBlockIDAtHeightRequest { @@ -248,7 +351,7 @@ message GetBlockIDAtHeightRequest { message GetBlockIDAtHeightResponse { bytes blk_id = 1; - uint32 err = 2; + Error err = 2; } message GatherResponse { @@ -257,21 +360,21 @@ message GatherResponse { message StateSyncEnabledResponse { bool enabled = 1; - uint32 err = 2; + Error err = 2; } message GetOngoingSyncStateSummaryResponse { bytes id = 1; uint64 height = 2; bytes bytes = 3; - uint32 err = 4; + Error err = 4; } message GetLastStateSummaryResponse { bytes id = 1; uint64 height = 2; bytes bytes = 3; - uint32 err = 4; + Error err = 4; } message ParseStateSummaryRequest { @@ -281,7 +384,7 @@ message ParseStateSummaryRequest { message ParseStateSummaryResponse { bytes id = 1; uint64 height = 2; - uint32 err = 3; + Error err = 3; } message GetStateSummaryRequest { @@ -291,7 +394,7 @@ message GetStateSummaryRequest { message GetStateSummaryResponse { bytes id = 1; bytes bytes = 2; - uint32 err = 3; + Error err = 3; } message StateSummaryAcceptRequest { @@ -299,6 +402,12 @@ message StateSummaryAcceptRequest { } message StateSummaryAcceptResponse { - bool accepted = 1; - uint32 err = 2; + enum Mode { + MODE_UNSPECIFIED = 0; + MODE_SKIPPED = 1; + MODE_STATIC = 2; + MODE_DYNAMIC = 3; + } + Mode mode = 1; + Error err = 2; } diff --git a/avalanchego/proto/warp/message.proto b/avalanchego/proto/warp/message.proto new file mode 100644 index 00000000..e2f52db6 --- /dev/null +++ b/avalanchego/proto/warp/message.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package warp; + +option go_package = "github.com/ava-labs/avalanchego/proto/pb/warp"; + +service Signer { + rpc Sign(SignRequest) returns (SignResponse); +} + +message SignRequest { + bytes source_chain_id = 1; + bytes destination_chain_id = 2; + bytes payload = 3; +} + +message SignResponse { + bytes signature = 1; +} diff --git a/avalanchego/pubsub/connection.go b/avalanchego/pubsub/connection.go index a3e71859..2dae38ac 100644 --- a/avalanchego/pubsub/connection.go +++ b/avalanchego/pubsub/connection.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pubsub @@ -22,7 +22,7 @@ var ( ErrAddressLimit = errors.New("address limit exceeded") ErrInvalidFilterParam = errors.New("invalid bloom filter params") ErrInvalidCommand = errors.New("invalid command") - _ Filter = &connection{} + _ Filter = (*connection)(nil) ) type Filter interface { diff --git a/avalanchego/pubsub/connections.go b/avalanchego/pubsub/connections.go index 52a0d433..417e1aa8 100644 --- a/avalanchego/pubsub/connections.go +++ b/avalanchego/pubsub/connections.go @@ -1,20 +1,22 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pubsub -import "sync" +import ( + "sync" + + "github.com/ava-labs/avalanchego/utils/set" +) type connections struct { lock sync.RWMutex - conns map[*connection]struct{} + conns set.Set[*connection] connsList []Filter } func newConnections() *connections { - return &connections{ - conns: make(map[*connection]struct{}), - } + return &connections{} } func (c *connections) Conns() []Filter { @@ -28,7 +30,7 @@ func (c *connections) Remove(conn *connection) { c.lock.Lock() defer c.lock.Unlock() - delete(c.conns, conn) + c.conns.Remove(conn) c.createConnsList() } @@ -36,7 +38,7 @@ func (c *connections) Add(conn *connection) { c.lock.Lock() defer c.lock.Unlock() - c.conns[conn] = struct{}{} + c.conns.Add(conn) c.createConnsList() } diff --git a/avalanchego/pubsub/filter_param.go b/avalanchego/pubsub/filter_param.go index 246015b5..e7e2453c 100644 --- a/avalanchego/pubsub/filter_param.go +++ b/avalanchego/pubsub/filter_param.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pubsub @@ -7,17 +7,18 @@ import ( "sync" "github.com/ava-labs/avalanchego/utils/bloom" + "github.com/ava-labs/avalanchego/utils/set" ) type FilterParam struct { lock sync.RWMutex - set map[string]struct{} + set set.Set[string] filter bloom.Filter } func NewFilterParam() *FilterParam { return &FilterParam{ - set: make(map[string]struct{}), + set: set.Set[string]{}, } } @@ -25,7 +26,7 @@ func (f *FilterParam) NewSet() { f.lock.Lock() defer f.lock.Unlock() - f.set = make(map[string]struct{}) + f.set = set.Set[string]{} f.filter = nil } @@ -52,8 +53,7 @@ func (f *FilterParam) Check(addr []byte) bool { if f.filter != nil && f.filter.Check(addr) { return true } - _, ok := f.set[string(addr)] - return ok + return f.set.Contains(string(addr)) } func (f *FilterParam) Add(bl ...[]byte) error { @@ -74,7 +74,7 @@ func (f *FilterParam) Add(bl ...[]byte) error { return ErrAddressLimit } for _, b := range bl { - f.set[string(b)] = struct{}{} + f.set.Add(string(b)) } return nil } diff --git a/avalanchego/pubsub/filter_test.go b/avalanchego/pubsub/filter_test.go index 8cd9161e..051ad94c 100644 --- a/avalanchego/pubsub/filter_test.go +++ b/avalanchego/pubsub/filter_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pubsub diff --git a/avalanchego/pubsub/filterer.go b/avalanchego/pubsub/filterer.go index c3263728..389448ea 100644 --- a/avalanchego/pubsub/filterer.go +++ b/avalanchego/pubsub/filterer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pubsub diff --git a/avalanchego/pubsub/messages.go b/avalanchego/pubsub/messages.go index e9625b92..525ae035 100644 --- a/avalanchego/pubsub/messages.go +++ b/avalanchego/pubsub/messages.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pubsub @@ -10,6 +10,8 @@ import ( ) // NewBloom command for a new bloom filter +// +// Deprecated: The pubsub server is deprecated. type NewBloom struct { // MaxElements size of bloom filter MaxElements json.Uint64 `json:"maxElements"` @@ -18,9 +20,13 @@ type NewBloom struct { } // NewSet command for a new map set +// +// Deprecated: The pubsub server is deprecated. type NewSet struct{} // AddAddresses command to add addresses +// +// Deprecated: The pubsub server is deprecated. type AddAddresses struct { api.JSONAddresses @@ -29,6 +35,8 @@ type AddAddresses struct { } // Command execution command +// +// Deprecated: The pubsub server is deprecated. type Command struct { NewBloom *NewBloom `json:"newBloom,omitempty"` NewSet *NewSet `json:"newSet,omitempty"` diff --git a/avalanchego/pubsub/server.go b/avalanchego/pubsub/server.go index b2d6738d..b7e4eaf7 100644 --- a/avalanchego/pubsub/server.go +++ b/avalanchego/pubsub/server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package pubsub @@ -13,6 +13,7 @@ import ( "go.uber.org/zap" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/units" ) @@ -52,7 +53,9 @@ type errorMsg struct { var upgrader = websocket.Upgrader{ ReadBufferSize: readBufferSize, WriteBufferSize: writeBufferSize, - CheckOrigin: func(*http.Request) bool { return true }, + CheckOrigin: func(*http.Request) bool { + return true + }, } // Server maintains the set of active clients and sends messages to the clients. @@ -60,15 +63,15 @@ type Server struct { log logging.Logger lock sync.RWMutex // conns a list of all our connections - conns map[*connection]struct{} + conns set.Set[*connection] // subscribedConnections the connections that have activated subscriptions subscribedConnections *connections } -func New(networkID uint32, log logging.Logger) *Server { +// Deprecated: The pubsub server is deprecated. +func New(log logging.Logger) *Server { return &Server{ log: log, - conns: make(map[*connection]struct{}), subscribedConnections: newConnections(), } } @@ -109,7 +112,7 @@ func (s *Server) addConnection(conn *connection) { s.lock.Lock() defer s.lock.Unlock() - s.conns[conn] = struct{}{} + s.conns.Add(conn) go conn.writePump() go conn.readPump() @@ -121,5 +124,5 @@ func (s *Server) removeConnection(conn *connection) { s.lock.Lock() defer s.lock.Unlock() - delete(s.conns, conn) + s.conns.Remove(conn) } diff --git a/avalanchego/scripts/build.sh b/avalanchego/scripts/build.sh index 5baef0f9..ac035737 100755 --- a/avalanchego/scripts/build.sh +++ b/avalanchego/scripts/build.sh @@ -4,6 +4,26 @@ set -o errexit set -o nounset set -o pipefail +print_usage() { + printf "Usage: build [OPTIONS] + + Build avalanchego + + Options: + + -r Build with race detector +" +} + +race='' +while getopts 'r' flag; do + case "${flag}" in + r) race='-r' ;; + *) print_usage + exit 1 ;; + esac +done + # Avalanchego root folder AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) CORETH_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd ../../coreth && pwd ) @@ -16,19 +36,21 @@ source "$AVALANCHE_PATH"/scripts/constants.sh echo "Downloading dependencies..." go mod download -modcacherw +build_args="$race" + echo "Syncing with sources at GOPATH: $GOPATH" rsync -ar --delete $AVALANCHE_PATH/* $GOPATH/pkg/mod/github.com/ava-labs/avalanchego@$avalanche_version rsync -ar --delete $CORETH_PATH/* $GOPATH/pkg/mod/github.com/ava-labs/coreth@$coreth_version # Build avalanchego -"$AVALANCHE_PATH"/scripts/build_avalanche.sh +"$AVALANCHE_PATH"/scripts/build_avalanche.sh $build_args # Build coreth "$AVALANCHE_PATH"/scripts/build_coreth.sh -# Exit build successfully if the binaries are created -if [[ -f "$avalanchego_path" && -f "$evm_path" ]]; then +# Exit build successfully if the AvalancheGo binary is created successfully +if [[ -f "$avalanchego_path" ]]; then echo "Build Successful" exit 0 else diff --git a/avalanchego/scripts/build_avalanche.sh b/avalanchego/scripts/build_avalanche.sh index 00b6bd8c..2756efde 100755 --- a/avalanchego/scripts/build_avalanche.sh +++ b/avalanchego/scripts/build_avalanche.sh @@ -4,13 +4,33 @@ set -o errexit set -o nounset set -o pipefail +print_usage() { + printf "Usage: build_avalanche [OPTIONS] + + Build avalanchego + + Options: + + -r Build with race detector +" +} + +race='' +while getopts 'r' flag; do + case "${flag}" in + r) race='-race' ;; + *) print_usage + exit 1 ;; + esac +done + # Changes to the minimum golang version must also be replicated in # scripts/build_avalanche.sh (here) # scripts/local.Dockerfile # Dockerfile # README.md # go.mod -go_version_minimum="1.18.1" +go_version_minimum="1.21.8" go_version() { go version | sed -nE -e 's/[^0-9.]+([0-9.]+).+/\1/p' @@ -35,10 +55,9 @@ fi # Avalanchego root folder AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) -# Load the versions -source "$AVALANCHE_PATH"/scripts/versions.sh # Load the constants source "$AVALANCHE_PATH"/scripts/constants.sh +build_args="$race" echo "Building AvalancheGo..." -go build -modcacherw -ldflags "-X github.com/ava-labs/avalanchego/version.GitCommit=$git_commit $static_ld_flags" -o "$avalanchego_path" "$AVALANCHE_PATH/main/"*.go +go build $build_args -modcacherw -ldflags "-X github.com/ava-labs/avalanchego/version.GitCommit=$git_commit $static_ld_flags" -o "$avalanchego_path" "$AVALANCHE_PATH/main/"*.go diff --git a/avalanchego/scripts/build_coreth.sh b/avalanchego/scripts/build_coreth.sh index c18c7035..3f0a2394 100755 --- a/avalanchego/scripts/build_coreth.sh +++ b/avalanchego/scripts/build_coreth.sh @@ -4,33 +4,64 @@ set -o errexit set -o nounset set -o pipefail +race='' +coreth_path='' +evm_path='' + # Directory above this script AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) -# Load the versions -source "$AVALANCHE_PATH"/scripts/versions.sh - # Load the constants source "$AVALANCHE_PATH"/scripts/constants.sh -# check if there's args defining different coreth source and build paths -if [[ $# -eq 2 ]]; then - coreth_path=$1 - evm_path=$2 -elif [[ $# -eq 0 ]]; then - if [[ ! -d "$coreth_path" ]]; then - go get -modcacherw "github.com/ava-labs/coreth@$coreth_version" - fi -else - echo "Invalid arguments to build coreth. Requires either no arguments (default) or two arguments to specify coreth directory and location to add binary." - exit 1 +print_usage() { + printf "Usage: build_coreth [OPTIONS] + + Build coreth + + Options: + -r Build with race detector (optional) + -c Coreth path (optional; must be provided with -c) + -e EVM path (optional; must be provided with -e) +" +} + +while getopts 'rc:e:' flag; do + case "${flag}" in + r) race='-race' ;; + c) coreth_path=${OPTARG} ;; + e) evm_path=${OPTARG} ;; + *) print_usage + exit 1 ;; + esac +done + +# Sanity-check the user's overrides for coreth path/version if they supplied a flag +if [[ -z $coreth_path ]] || [[ -z $evm_path ]]; then + echo "Invalid arguments to build coreth. Coreth path (-c) must be provided with EVM path (-e)." + print_usage + exit 1 +fi + +if [[ ! -d "$coreth_path" ]]; then + go get "github.com/ava-labs/coreth@$coreth_version" fi # Build Coreth +build_args="$race" echo "Building Coreth @ ${coreth_version} ..." cd "$coreth_path" -go build -modcacherw -ldflags "-X github.com/ava-labs/coreth/plugin/evm.Version=$coreth_version $static_ld_flags" -o "$evm_path" "plugin/"*.go +go build $build_args -modcacherw -ldflags "-X github.com/ava-labs/coreth/plugin/evm.Version=$coreth_version $static_ld_flags" -o "$evm_path" "plugin/"*.go cd "$AVALANCHE_PATH" # Building coreth + using go get can mess with the go.mod file. -go mod tidy -compat=1.18 +go mod tidy -compat=1.19 + +# Exit build successfully if the Coreth EVM binary is created successfully +if [[ -f "$evm_path" ]]; then + echo "Coreth Build Successful" + exit 0 +else + echo "Coreth Build Failure" >&2 + exit 1 +fi diff --git a/avalanchego/scripts/build_fuzz.sh b/avalanchego/scripts/build_fuzz.sh new file mode 100644 index 00000000..c51f438f --- /dev/null +++ b/avalanchego/scripts/build_fuzz.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +# Mostly taken from https://github.com/golang/go/issues/46312#issuecomment-1153345129 + +fuzzTime=${1:-1} +files=$(grep -r --include='**_test.go' --files-with-matches 'func Fuzz' .) +failed=false +for file in ${files} +do + funcs=$(grep -oP 'func \K(Fuzz\w*)' $file) + for func in ${funcs} + do + echo "Fuzzing $func in $file" + parentDir=$(dirname $file) + go test $parentDir -run=$func -fuzz=$func -fuzztime=${fuzzTime}s + # If any of the fuzz tests fail, return exit code 1 + if [ $? -ne 0 ]; then + failed=true + fi + done +done + +if $failed; then + exit 1 +fi diff --git a/avalanchego/scripts/build_image.sh b/avalanchego/scripts/build_image.sh index ce5ac565..2a15c8f6 100755 --- a/avalanchego/scripts/build_image.sh +++ b/avalanchego/scripts/build_image.sh @@ -6,8 +6,6 @@ set -o pipefail # Avalanchego root folder AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) -# Load the versions -source "$AVALANCHE_PATH"/scripts/versions.sh # Load the constants source "$AVALANCHE_PATH"/scripts/constants.sh diff --git a/avalanchego/scripts/build_local_dep_image.sh b/avalanchego/scripts/build_local_dep_image.sh index 25775d60..2db930fc 100755 --- a/avalanchego/scripts/build_local_dep_image.sh +++ b/avalanchego/scripts/build_local_dep_image.sh @@ -12,7 +12,6 @@ DOCKERHUB_REPO="avaplatform/avalanchego" DOCKER="${DOCKER:-docker}" SCRIPT_DIRPATH=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) -ROOT_DIRPATH="$(dirname "${SCRIPT_DIRPATH}")" AVA_LABS_RELATIVE_PATH="src/github.com/ava-labs" EXISTING_GOPATH="$GOPATH" diff --git a/avalanchego/scripts/build_local_image.sh b/avalanchego/scripts/build_local_image.sh index 0322c6ff..6a12774a 100755 --- a/avalanchego/scripts/build_local_image.sh +++ b/avalanchego/scripts/build_local_image.sh @@ -7,9 +7,6 @@ set -o pipefail # Directory above this script AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) -# Load the versions -source "$AVALANCHE_PATH"/scripts/versions.sh - # Load the constants source "$AVALANCHE_PATH"/scripts/constants.sh diff --git a/avalanchego/scripts/build_test.sh b/avalanchego/scripts/build_test.sh index 2515c4a5..dbf7a51d 100755 --- a/avalanchego/scripts/build_test.sh +++ b/avalanchego/scripts/build_test.sh @@ -6,8 +6,6 @@ set -o pipefail # Directory above this script AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) -# Load the versions -source "$AVALANCHE_PATH"/scripts/versions.sh # Load the constants source "$AVALANCHE_PATH"/scripts/constants.sh diff --git a/avalanchego/scripts/constants.sh b/avalanchego/scripts/constants.sh index fdbe15c7..8b4474e5 100644 --- a/avalanchego/scripts/constants.sh +++ b/avalanchego/scripts/constants.sh @@ -2,22 +2,20 @@ # # Use lower_case variables in the scripts and UPPER_CASE variables for override # Use the constants.sh for env overrides -# Use the versions.sh to specify versions -# AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) # Directory above this script +# Where AvalancheGo binary goes +avalanchego_path="$AVALANCHE_PATH/build/avalanchego" +plugin_dir=${PLUGIN_DIR:-$HOME/.avalanchego/plugins} +evm_path=${EVM_PATH:-$plugin_dir/evm} +coreth_version=${CORETH_VERSION:-'v0.12.0-rc.2'} + # Set the PATHS GOPATH="$(go env GOPATH)" -# coreth_path="$GOPATH/pkg/mod/github.com/ava-labs/coreth@$coreth_version" +# coreth_path=${CORETH_PATH:-"$GOPATH/pkg/mod/github.com/ava-labs/coreth@$coreth_version"} coreth_path="$AVALANCHE_PATH/../coreth" -# Where AvalancheGo binary goes -build_dir="$AVALANCHE_PATH/build" -avalanchego_path="$build_dir/avalanchego" -plugin_dir="$build_dir/plugins" -evm_path="$plugin_dir/evm" - # Avalabs docker hub # avaplatform/avalanchego - defaults to local as to avoid unintentional pushes # You should probably set it - export DOCKER_REPO='avaplatform/avalanchego' @@ -43,3 +41,6 @@ fi # We use "export" here instead of just setting a bash variable because we need # to pass this flag to all child processes spawned by the shell. export CGO_CFLAGS="-O -D__BLST_PORTABLE__" +# While CGO_ENABLED doesn't need to be explicitly set, it produces a much more +# clear error due to the default value change in go1.20. +export CGO_ENABLED=1 diff --git a/avalanchego/scripts/lint.sh b/avalanchego/scripts/lint.sh index 6a2bfa6d..00e54624 100755 --- a/avalanchego/scripts/lint.sh +++ b/avalanchego/scripts/lint.sh @@ -24,7 +24,7 @@ fi TESTS=${TESTS:-"golangci_lint license_header"} function test_golangci_lint { - go install -modcacherw -v github.com/golangci/golangci-lint/cmd/golangci-lint@v1.49.0 + go install -modcacherw -v github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.2 golangci-lint run --config .golangci.yml } diff --git a/avalanchego/scripts/local.Dockerfile b/avalanchego/scripts/local.Dockerfile index 6f65bb62..0603b1a4 100644 --- a/avalanchego/scripts/local.Dockerfile +++ b/avalanchego/scripts/local.Dockerfile @@ -9,16 +9,14 @@ # Dockerfile # README.md # go.mod -FROM golang:1.18.5-buster +FROM golang:1.19.6-buster RUN mkdir -p /go/src/github.com/ava-labs WORKDIR $GOPATH/src/github.com/ava-labs COPY avalanchego avalanchego -COPY coreth coreth WORKDIR $GOPATH/src/github.com/ava-labs/avalanchego RUN ./scripts/build_avalanche.sh -RUN ./scripts/build_coreth.sh ../coreth $PWD/build/plugins/evm RUN ln -sv $GOPATH/src/github.com/ava-labs/avalanche-byzantine/ /avalanchego diff --git a/avalanchego/scripts/mock.gen.sh b/avalanchego/scripts/mock.gen.sh new file mode 100644 index 00000000..9a0cd075 --- /dev/null +++ b/avalanchego/scripts/mock.gen.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +set -e + +if ! [[ "$0" =~ scripts/mock.gen.sh ]]; then + echo "must be run from repository root" + exit 255 +fi + +if ! command -v mockgen &> /dev/null +then + echo "mockgen not found, installing..." + # https://github.com/golang/mock + go install -v github.com/golang/mock/mockgen@v1.6.0 +fi + +# tuples of (source interface import path, comma-separated interface names, output file path) +input="scripts/mocks.mockgen.txt" +while IFS= read -r line +do + IFS='=' read src_import_path interface_name output_path <<< "${line}" + package_name=$(basename $(dirname $output_path)) + echo "Generating ${output_path}..." + mockgen -copyright_file=./LICENSE.header -package=${package_name} -destination=${output_path} ${src_import_path} ${interface_name} +done < "$input" + +echo "SUCCESS" diff --git a/avalanchego/scripts/mocks.mockgen.txt b/avalanchego/scripts/mocks.mockgen.txt new file mode 100644 index 00000000..cc95dc0e --- /dev/null +++ b/avalanchego/scripts/mocks.mockgen.txt @@ -0,0 +1,51 @@ +github.com/ava-labs/avalanchego/api/server=Server=api/server/mock_server.go +github.com/ava-labs/avalanchego/chains/atomic=SharedMemory=chains/atomic/mock_shared_memory.go +github.com/ava-labs/avalanchego/codec=Manager=codec/mock_manager.go +github.com/ava-labs/avalanchego/database=Batch=database/mock_batch.go +github.com/ava-labs/avalanchego/message=OutboundMessage=message/mock_message.go +github.com/ava-labs/avalanchego/message=OutboundMsgBuilder=message/mock_outbound_message_builder.go +github.com/ava-labs/avalanchego/network/peer=GossipTracker=network/peer/mock_gossip_tracker.go +github.com/ava-labs/avalanchego/snow/consensus/snowman=Block=snow/consensus/snowman/mock_block.go +github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex=LinearizableVM=snow/engine/avalanche/vertex/mock_vm.go +github.com/ava-labs/avalanchego/snow/engine/snowman/block=BuildBlockWithContextChainVM=snow/engine/snowman/block/mocks/build_block_with_context_vm.go +github.com/ava-labs/avalanchego/snow/engine/snowman/block=ChainVM=snow/engine/snowman/block/mocks/chain_vm.go +github.com/ava-labs/avalanchego/snow/engine/snowman/block=StateSyncableVM=snow/engine/snowman/block/mocks/state_syncable_vm.go +github.com/ava-labs/avalanchego/snow/engine/snowman/block=WithVerifyContext=snow/engine/snowman/block/mocks/with_verify_context.go +github.com/ava-labs/avalanchego/snow/networking/handler=Handler=snow/networking/handler/mock_handler.go +github.com/ava-labs/avalanchego/snow/networking/timeout=Manager=snow/networking/timeout/mock_manager.go +github.com/ava-labs/avalanchego/snow/networking/tracker=Targeter=snow/networking/tracker/mock_targeter.go +github.com/ava-labs/avalanchego/snow/networking/tracker=Tracker=snow/networking/tracker/mock_resource_tracker.go +github.com/ava-labs/avalanchego/snow/uptime=Calculator=snow/uptime/mock_calculator.go +github.com/ava-labs/avalanchego/snow/validators=Manager=snow/validators/mock_manager.go +github.com/ava-labs/avalanchego/snow/validators=State=snow/validators/mock_state.go +github.com/ava-labs/avalanchego/snow/validators=SubnetConnector=snow/validators/mock_subnet_connector.go +github.com/ava-labs/avalanchego/utils/crypto/keychain=Ledger=utils/crypto/keychain/mock_ledger.go +github.com/ava-labs/avalanchego/utils/filesystem=Reader=utils/filesystem/mock_io.go +github.com/ava-labs/avalanchego/utils/hashing=Hasher=utils/hashing/mock_hasher.go +github.com/ava-labs/avalanchego/utils/logging=Logger=utils/logging/mock_logger.go +github.com/ava-labs/avalanchego/utils/resource=User=utils/resource/mock_user.go +github.com/ava-labs/avalanchego/vms/avm/blocks=Block=vms/avm/blocks/mock_block.go +github.com/ava-labs/avalanchego/vms/avm/metrics=Metrics=vms/avm/metrics/mock_metrics.go +github.com/ava-labs/avalanchego/vms/avm/states=Chain,State,Diff=vms/avm/states/mock_states.go +github.com/ava-labs/avalanchego/vms/avm/txs/mempool=Mempool=vms/avm/txs/mempool/mock_mempool.go +github.com/ava-labs/avalanchego/vms/components/avax=TransferableIn=vms/components/avax/mock_transferable_in.go +github.com/ava-labs/avalanchego/vms/components/avax=TransferableOut=vms/components/avax/mock_transferable_out.go +github.com/ava-labs/avalanchego/vms/components/verify=Verifiable=vms/components/verify/mock_verifiable.go +github.com/ava-labs/avalanchego/vms/platformvm/blocks/executor=Manager=vms/platformvm/blocks/executor/mock_manager.go +github.com/ava-labs/avalanchego/vms/platformvm/blocks=Block=vms/platformvm/blocks/mock_block.go +github.com/ava-labs/avalanchego/vms/platformvm/fx=Fx,Owner=vms/platformvm/fx/mock_fx.go +github.com/ava-labs/avalanchego/vms/platformvm/state=Chain=vms/platformvm/state/mock_chain.go +github.com/ava-labs/avalanchego/vms/platformvm/state=Diff=vms/platformvm/state/mock_diff.go +github.com/ava-labs/avalanchego/vms/platformvm/state=StakerIterator=vms/platformvm/state/mock_staker_iterator.go +github.com/ava-labs/avalanchego/vms/platformvm/state=Versions=vms/platformvm/state/mock_versions.go +github.com/ava-labs/avalanchego/vms/platformvm/txs/builder=Builder=vms/platformvm/txs/builder/mock_builder.go +github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool=Mempool=vms/platformvm/txs/mempool/mock_mempool.go +github.com/ava-labs/avalanchego/vms/platformvm/utxo=Verifier=vms/platformvm/utxo/mock_verifier.go +github.com/ava-labs/avalanchego/vms/proposervm/proposer=Windower=vms/proposervm/proposer/mock_windower.go +github.com/ava-labs/avalanchego/vms/proposervm/state=State=vms/proposervm/state/mock_state.go +github.com/ava-labs/avalanchego/vms/proposervm=PostForkBlock=vms/proposervm/mock_post_fork_block.go +github.com/ava-labs/avalanchego/vms/registry=VMGetter=vms/registry/mock_vm_getter.go +github.com/ava-labs/avalanchego/vms/registry=VMRegisterer=vms/registry/mock_vm_registerer.go +github.com/ava-labs/avalanchego/vms/registry=VMRegistry=vms/registry/mock_vm_registry.go +github.com/ava-labs/avalanchego/vms=Factory,Manager=vms/mock_manager.go +github.com/ava-labs/avalanchego/x/sync=Client=x/sync/mock_client.go \ No newline at end of file diff --git a/avalanchego/scripts/protobuf_codegen.sh b/avalanchego/scripts/protobuf_codegen.sh index cd01448b..707ea5e8 100755 --- a/avalanchego/scripts/protobuf_codegen.sh +++ b/avalanchego/scripts/protobuf_codegen.sh @@ -9,7 +9,7 @@ fi # any version changes here should also be bumped in Dockerfile.buf # ref. https://docs.buf.build/installation # ref. https://github.com/bufbuild/buf/releases -BUF_VERSION='1.7.0' +BUF_VERSION='1.11.0' if [[ $(buf --version | cut -f2 -d' ') != "${BUF_VERSION}" ]]; then echo "could not find buf ${BUF_VERSION}, is it installed + in PATH?" exit 255 @@ -18,10 +18,10 @@ fi ## install "protoc-gen-go" # any version changes here should also be bumped in Dockerfile.buf # ref. https://github.com/protocolbuffers/protobuf-go/releases -PROTOC_GEN_GO_VERSION='v1.28.0' +PROTOC_GEN_GO_VERSION='v1.28.1' go install -v google.golang.org/protobuf/cmd/protoc-gen-go@${PROTOC_GEN_GO_VERSION} if [[ $(protoc-gen-go --version | cut -f2 -d' ') != "${PROTOC_GEN_GO_VERSION}" ]]; then - # e.g., protoc-gen-go v1.28.0 + # e.g., protoc-gen-go v1.28.1 echo "could not find protoc-gen-go ${PROTOC_GEN_GO_VERSION}, is it installed + in PATH?" exit 255 fi diff --git a/avalanchego/scripts/tests.e2e.sh b/avalanchego/scripts/tests.e2e.sh index f90843a5..be92c580 100755 --- a/avalanchego/scripts/tests.e2e.sh +++ b/avalanchego/scripts/tests.e2e.sh @@ -24,6 +24,9 @@ fi # We use "export" here instead of just setting a bash variable because we need # to pass this flag to all child processes spawned by the shell. export CGO_CFLAGS="-O -D__BLST_PORTABLE__" +# While CGO_ENABLED doesn't need to be explicitly set, it produces a much more +# clear error due to the default value change in go1.20. +export CGO_ENABLED=1 ENABLE_WHITELIST_VTX_TESTS=${ENABLE_WHITELIST_VTX_TESTS:-false} # ref. https://onsi.github.io/ginkgo/#spec-labels @@ -40,14 +43,14 @@ echo GINKGO_LABEL_FILTER: ${GINKGO_LABEL_FILTER} # TODO: migrate to upstream avalanche-network-runner GOARCH=$(go env GOARCH) GOOS=$(go env GOOS) -NETWORK_RUNNER_VERSION=1.2.2 +NETWORK_RUNNER_VERSION=1.3.5-rc.0 DOWNLOAD_PATH=/tmp/avalanche-network-runner.tar.gz DOWNLOAD_URL="https://github.com/ava-labs/avalanche-network-runner/releases/download/v${NETWORK_RUNNER_VERSION}/avalanche-network-runner_${NETWORK_RUNNER_VERSION}_${GOOS}_${GOARCH}.tar.gz" rm -f ${DOWNLOAD_PATH} rm -f /tmp/avalanche-network-runner -echo "downloading avalanche-network-runner ${NETWORK_RUNNER_VERSION} at ${DOWNLOAD_URL}" +echo "downloading avalanche-network-runner ${NETWORK_RUNNER_VERSION} at ${DOWNLOAD_URL} to ${DOWNLOAD_PATH}" curl --fail -L ${DOWNLOAD_URL} -o ${DOWNLOAD_PATH} echo "extracting downloaded avalanche-network-runner" @@ -71,7 +74,7 @@ echo "launch avalanche-network-runner in the background" server \ --log-level debug \ --port=":12342" \ ---disable-grpc-gateway 2> /dev/null & +--disable-grpc-gateway & PID=${!} ################################# diff --git a/avalanchego/scripts/tests.upgrade.sh b/avalanchego/scripts/tests.upgrade.sh index 2f2e3364..d7f9e0d8 100755 --- a/avalanchego/scripts/tests.upgrade.sh +++ b/avalanchego/scripts/tests.upgrade.sh @@ -54,11 +54,11 @@ find /tmp/avalanchego-v${VERSION} ################################# # download avalanche-network-runner # https://github.com/ava-labs/avalanche-network-runner -NETWORK_RUNNER_VERSION=1.1.0 +NETWORK_RUNNER_VERSION=1.3.5-rc.0 DOWNLOAD_PATH=/tmp/avalanche-network-runner.tar.gz -DOWNLOAD_URL=https://github.com/ava-labs/avalanche-network-runner/releases/download/v${NETWORK_RUNNER_VERSION}/avalanche-network-runner_${NETWORK_RUNNER_VERSION}_linux_amd64.tar.gz +DOWNLOAD_URL="https://github.com/ava-labs/avalanche-network-runner/releases/download/v${NETWORK_RUNNER_VERSION}/avalanche-network-runner_${NETWORK_RUNNER_VERSION}_${GOOS}_${GOARCH}.tar.gz" if [[ ${GOOS} == "darwin" ]]; then - DOWNLOAD_URL=https://github.com/ava-labs/avalanche-network-runner/releases/download/v${NETWORK_RUNNER_VERSION}/avalanche-network-runner_${NETWORK_RUNNER_VERSION}_darwin_amd64.tar.gz + DOWNLOAD_URL="https://github.com/ava-labs/avalanche-network-runner/releases/download/v${NETWORK_RUNNER_VERSION}/avalanche-network-runner_${NETWORK_RUNNER_VERSION}_darwin_amd64.tar.gz" fi rm -f ${DOWNLOAD_PATH} diff --git a/avalanchego/snow/acceptor.go b/avalanchego/snow/acceptor.go index 54dbc20b..f1a92e2f 100644 --- a/avalanchego/snow/acceptor.go +++ b/avalanchego/snow/acceptor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snow @@ -15,10 +15,10 @@ import ( var ( _ Acceptor = noOpAcceptor{} - _ Acceptor = &AcceptorTracker{} + _ Acceptor = (*AcceptorTracker)(nil) _ Acceptor = acceptorWrapper{} - _ AcceptorGroup = &acceptorGroup{} + _ AcceptorGroup = (*acceptorGroup)(nil) ) // Acceptor is implemented when a struct is monitoring if a message is accepted @@ -34,7 +34,9 @@ type Acceptor interface { type noOpAcceptor struct{} -func (noOpAcceptor) Accept(*ConsensusContext, ids.ID, []byte) error { return nil } +func (noOpAcceptor) Accept(*ConsensusContext, ids.ID, []byte) error { + return nil +} // AcceptorTracker tracks the dispatched accept events by its ID and counts. // Useful for testing. @@ -49,7 +51,7 @@ func NewAcceptorTracker() *AcceptorTracker { } } -func (a *AcceptorTracker) Accept(ctx *ConsensusContext, containerID ids.ID, container []byte) error { +func (a *AcceptorTracker) Accept(_ *ConsensusContext, containerID ids.ID, _ []byte) error { a.lock.Lock() a.accepted[containerID]++ a.lock.Unlock() diff --git a/avalanchego/snow/choices/decidable.go b/avalanchego/snow/choices/decidable.go index 946d196d..18ef4458 100644 --- a/avalanchego/snow/choices/decidable.go +++ b/avalanchego/snow/choices/decidable.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package choices import ( + "context" + "github.com/ava-labs/avalanchego/ids" ) @@ -22,12 +24,12 @@ type Decidable interface { // Accept this element. // // This element will be accepted by every correct node in the network. - Accept() error + Accept(context.Context) error // Reject this element. // // This element will not be accepted by any correct node in the network. - Reject() error + Reject(context.Context) error // Status returns this element's current status. // diff --git a/avalanchego/snow/choices/status.go b/avalanchego/snow/choices/status.go index 5cc6d1c7..255356b7 100644 --- a/avalanchego/snow/choices/status.go +++ b/avalanchego/snow/choices/status.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package choices diff --git a/avalanchego/snow/choices/status_test.go b/avalanchego/snow/choices/status_test.go index 78748843..960af7b4 100644 --- a/avalanchego/snow/choices/status_test.go +++ b/avalanchego/snow/choices/status_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package choices @@ -6,60 +6,50 @@ package choices import ( "math" "testing" + + "github.com/stretchr/testify/require" ) func TestStatusValid(t *testing.T) { - if err := Accepted.Valid(); err != nil { - t.Fatalf("%s failed verification", Accepted) - } else if err := Rejected.Valid(); err != nil { - t.Fatalf("%s failed verification", Rejected) - } else if err := Processing.Valid(); err != nil { - t.Fatalf("%s failed verification", Processing) - } else if err := Unknown.Valid(); err != nil { - t.Fatalf("%s failed verification", Unknown) - } else if badStatus := Status(math.MaxInt32); badStatus.Valid() == nil { - t.Fatalf("%s passed verification", badStatus) - } + require := require.New(t) + + require.NoError(Accepted.Valid()) + require.NoError(Rejected.Valid()) + require.NoError(Processing.Valid()) + require.NoError(Unknown.Valid()) + + require.Error(Status(math.MaxInt32).Valid()) } func TestStatusDecided(t *testing.T) { - if !Accepted.Decided() { - t.Fatalf("%s failed decision", Accepted) - } else if !Rejected.Decided() { - t.Fatalf("%s failed decision", Rejected) - } else if Processing.Decided() { - t.Fatalf("%s failed decision", Processing) - } else if Unknown.Decided() { - t.Fatalf("%s failed decision", Unknown) - } else if badStatus := Status(math.MaxInt32); badStatus.Decided() { - t.Fatalf("%s failed decision", badStatus) - } + require := require.New(t) + + require.True(Accepted.Decided()) + require.True(Rejected.Decided()) + require.False(Processing.Decided()) + require.False(Unknown.Decided()) + + require.False(Status(math.MaxInt32).Decided()) } func TestStatusFetched(t *testing.T) { - if !Accepted.Fetched() { - t.Fatalf("%s failed issue", Accepted) - } else if !Rejected.Fetched() { - t.Fatalf("%s failed issue", Rejected) - } else if !Processing.Fetched() { - t.Fatalf("%s failed issue", Processing) - } else if Unknown.Fetched() { - t.Fatalf("%s failed issue", Unknown) - } else if badStatus := Status(math.MaxInt32); badStatus.Fetched() { - t.Fatalf("%s failed issue", badStatus) - } + require := require.New(t) + + require.True(Accepted.Fetched()) + require.True(Rejected.Fetched()) + require.True(Processing.Fetched()) + require.False(Unknown.Fetched()) + + require.False(Status(math.MaxInt32).Fetched()) } func TestStatusString(t *testing.T) { - if Accepted.String() != "Accepted" { - t.Fatalf("%s failed printing", Accepted) - } else if Rejected.String() != "Rejected" { - t.Fatalf("%s failed printing", Rejected) - } else if Processing.String() != "Processing" { - t.Fatalf("%s failed printing", Processing) - } else if Unknown.String() != "Unknown" { - t.Fatalf("%s failed printing", Unknown) - } else if badStatus := Status(math.MaxInt32); badStatus.String() != "Invalid status" { - t.Fatalf("%s failed printing", badStatus) - } + require := require.New(t) + + require.Equal("Accepted", Accepted.String()) + require.Equal("Rejected", Rejected.String()) + require.Equal("Processing", Processing.String()) + require.Equal("Unknown", Unknown.String()) + + require.Equal("Invalid status", Status(math.MaxInt32).String()) } diff --git a/avalanchego/snow/choices/test_decidable.go b/avalanchego/snow/choices/test_decidable.go index 5c2b276b..055a5405 100644 --- a/avalanchego/snow/choices/test_decidable.go +++ b/avalanchego/snow/choices/test_decidable.go @@ -1,15 +1,16 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package choices import ( + "context" "fmt" "github.com/ava-labs/avalanchego/ids" ) -var _ Decidable = &TestDecidable{} +var _ Decidable = (*TestDecidable)(nil) // TestDecidable is a test Decidable type TestDecidable struct { @@ -18,12 +19,14 @@ type TestDecidable struct { StatusV Status } -func (d *TestDecidable) ID() ids.ID { return d.IDV } +func (d *TestDecidable) ID() ids.ID { + return d.IDV +} -func (d *TestDecidable) Accept() error { +func (d *TestDecidable) Accept(context.Context) error { switch d.StatusV { case Unknown, Rejected: - return fmt.Errorf("invalid state transaition from %s to %s", + return fmt.Errorf("invalid state transition from %s to %s", d.StatusV, Accepted) default: d.StatusV = Accepted @@ -31,10 +34,10 @@ func (d *TestDecidable) Accept() error { } } -func (d *TestDecidable) Reject() error { +func (d *TestDecidable) Reject(context.Context) error { switch d.StatusV { case Unknown, Accepted: - return fmt.Errorf("invalid state transaition from %s to %s", + return fmt.Errorf("invalid state transition from %s to %s", d.StatusV, Rejected) default: d.StatusV = Rejected @@ -42,4 +45,6 @@ func (d *TestDecidable) Reject() error { } } -func (d *TestDecidable) Status() Status { return d.StatusV } +func (d *TestDecidable) Status() Status { + return d.StatusV +} diff --git a/avalanchego/snow/consensus/avalanche/consensus.go b/avalanchego/snow/consensus/avalanche/consensus.go index 1c1e5c6b..f31d78c6 100644 --- a/avalanchego/snow/consensus/avalanche/consensus.go +++ b/avalanchego/snow/consensus/avalanche/consensus.go @@ -1,12 +1,17 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche import ( + "context" + + "github.com/ava-labs/avalanchego/api/health" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" + "github.com/ava-labs/avalanchego/utils/bag" + "github.com/ava-labs/avalanchego/utils/set" ) // TODO: Implement pruning of accepted decisions. @@ -18,15 +23,14 @@ import ( // Consensus represents a general avalanche instance that can be used directly // to process a series of partially ordered elements. type Consensus interface { + health.Checker + // Takes in alpha, beta1, beta2, the accepted frontier, the join statuses, // the mutation statuses, and the consumer statuses. If accept or reject is // called, the status maps should be immediately updated accordingly. // Assumes each element in the accepted frontier will return accepted from // the join status map. - Initialize(*snow.ConsensusContext, Parameters, []Vertex) error - - // Returns the parameters that describe this avalanche instance - Parameters() Parameters + Initialize(context.Context, *snow.ConsensusContext, Parameters, []Vertex) error // Returns the number of vertices processing NumProcessing() int @@ -38,7 +42,7 @@ type Consensus interface { // Adds a new decision. Assumes the dependencies have already been added. // Assumes that mutations don't conflict with themselves. Returns if a // critical error has occurred. - Add(Vertex) error + Add(context.Context, Vertex) error // VertexIssued returns true iff Vertex has been added VertexIssued(Vertex) bool @@ -48,18 +52,18 @@ type Consensus interface { // Returns the set of transaction IDs that are virtuous but not contained in // any preferred vertices. - Orphans() ids.Set + Orphans() set.Set[ids.ID] // Returns a set of vertex IDs that were virtuous at the last update. - Virtuous() ids.Set + Virtuous() set.Set[ids.ID] // Returns a set of vertex IDs that are preferred - Preferences() ids.Set + Preferences() set.Set[ids.ID] // RecordPoll collects the results of a network poll. If a result has not // been added, the result is dropped. Returns if a critical error has // occurred. - RecordPoll(ids.UniqueBag) error + RecordPoll(context.Context, bag.UniqueBag[ids.ID]) error // Quiesce is guaranteed to return true if the instance is finalized. It // may, but doesn't need to, return true if all processing vertices are @@ -71,7 +75,4 @@ type Consensus interface { // finalized. Note, it is possible that after returning finalized, a new // decision may be added such that this instance is no longer finalized. Finalized() bool - - // HealthCheck returns information about the consensus health. - HealthCheck() (interface{}, error) } diff --git a/avalanchego/snow/consensus/avalanche/consensus_test.go b/avalanchego/snow/consensus/avalanche/consensus_test.go index 2a3902db..bfd255e5 100644 --- a/avalanchego/snow/consensus/avalanche/consensus_test.go +++ b/avalanchego/snow/consensus/avalanche/consensus_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche import ( + "context" "errors" "math" "path" @@ -14,44 +15,57 @@ import ( "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" + "github.com/ava-labs/avalanchego/utils/bag" + "github.com/ava-labs/avalanchego/utils/compare" + "github.com/ava-labs/avalanchego/utils/set" ) type testFunc func(*testing.T, Factory) -var testFuncs = []testFunc{ - MetricsTest, - ParamsTest, - NumProcessingTest, - AddTest, - VertexIssuedTest, - TxIssuedTest, - VirtuousTest, - VirtuousSkippedUpdateTest, - VotingTest, - IgnoreInvalidVotingTest, - IgnoreInvalidTransactionVertexVotingTest, - TransitiveVotingTest, - SplitVotingTest, - TransitiveRejectionTest, - IsVirtuousTest, - QuiesceTest, - QuiesceAfterVotingTest, - TransactionVertexTest, - OrphansTest, - OrphansUpdateTest, - ErrorOnVacuousAcceptTest, - ErrorOnTxAcceptTest, - ErrorOnVtxAcceptTest, - ErrorOnVtxRejectTest, - ErrorOnParentVtxRejectTest, - ErrorOnTransitiveVtxRejectTest, - SilenceTransactionVertexEventsTest, -} +var ( + testFuncs = []testFunc{ + MetricsTest, + NumProcessingTest, + AddTest, + VertexIssuedTest, + TxIssuedTest, + VirtuousTest, + VirtuousSkippedUpdateTest, + VotingTest, + IgnoreInvalidVotingTest, + IgnoreInvalidTransactionVertexVotingTest, + TransitiveVotingTest, + StopVertexVerificationUnequalBetaValuesTest, + StopVertexVerificationEqualBetaValuesTest, + AcceptParentOfPreviouslyRejectedVertexTest, + RejectParentOfPreviouslyRejectedVertexTest, + QuiesceAfterRejectedVertexTest, + SplitVotingTest, + TransitiveRejectionTest, + IsVirtuousTest, + QuiesceTest, + QuiesceAfterVotingTest, + TransactionVertexTest, + OrphansTest, + OrphansUpdateTest, + ErrorOnVacuousAcceptTest, + ErrorOnTxAcceptTest, + ErrorOnVtxAcceptTest, + ErrorOnVtxRejectTest, + ErrorOnParentVtxRejectTest, + ErrorOnTransitiveVtxRejectTest, + SilenceTransactionVertexEventsTest, + } + + errTest = errors.New("non-nil error") +) func runConsensusTests(t *testing.T, factory Factory) { for _, test := range testFuncs { @@ -82,13 +96,13 @@ func MetricsTest(t *testing.T, factory Factory) { Parents: 2, BatchSize: 1, } - err := ctx.Registerer.Register(prometheus.NewGauge(prometheus.GaugeOpts{ + err := ctx.AvalancheRegisterer.Register(prometheus.NewGauge(prometheus.GaugeOpts{ Name: "vtx_processing", })) if err != nil { t.Fatal(err) } - if err := avl.Initialize(ctx, params, nil); err == nil { + if err := avl.Initialize(context.Background(), ctx, params, nil); err == nil { t.Fatalf("should have failed due to registering a duplicated statistic") } } @@ -106,13 +120,13 @@ func MetricsTest(t *testing.T, factory Factory) { Parents: 2, BatchSize: 1, } - err := ctx.Registerer.Register(prometheus.NewGauge(prometheus.GaugeOpts{ + err := ctx.AvalancheRegisterer.Register(prometheus.NewGauge(prometheus.GaugeOpts{ Name: "vtx_accepted", })) if err != nil { t.Fatal(err) } - if err := avl.Initialize(ctx, params, nil); err == nil { + if err := avl.Initialize(context.Background(), ctx, params, nil); err == nil { t.Fatalf("should have failed due to registering a duplicated statistic") } } @@ -130,56 +144,18 @@ func MetricsTest(t *testing.T, factory Factory) { Parents: 2, BatchSize: 1, } - err := ctx.Registerer.Register(prometheus.NewGauge(prometheus.GaugeOpts{ + err := ctx.AvalancheRegisterer.Register(prometheus.NewGauge(prometheus.GaugeOpts{ Name: "vtx_rejected", })) if err != nil { t.Fatal(err) } - if err := avl.Initialize(ctx, params, nil); err == nil { + if err := avl.Initialize(context.Background(), ctx, params, nil); err == nil { t.Fatalf("should have failed due to registering a duplicated statistic") } } } -func ParamsTest(t *testing.T, factory Factory) { - avl := factory.New() - - ctx := snow.DefaultConsensusContextTest() - params := Parameters{ - Parameters: snowball.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Parents: 2, - BatchSize: 1, - } - - if err := avl.Initialize(ctx, params, nil); err != nil { - t.Fatal(err) - } - - p := avl.Parameters() - switch { - case p.K != params.K: - t.Fatalf("Wrong K parameter") - case p.Alpha != params.Alpha: - t.Fatalf("Wrong Alpha parameter") - case p.BetaVirtuous != params.BetaVirtuous: - t.Fatalf("Wrong Beta1 parameter") - case p.BetaRogue != params.BetaRogue: - t.Fatalf("Wrong Beta2 parameter") - case p.Parents != params.Parents: - t.Fatalf("Wrong Parents parameter") - } -} - func NumProcessingTest(t *testing.T, factory Factory) { avl := factory.New() @@ -209,7 +185,7 @@ func NumProcessingTest(t *testing.T, factory Factory) { } utxos := []ids.ID{ids.GenerateTestID()} - if err := avl.Initialize(snow.DefaultConsensusContextTest(), params, vts); err != nil { + if err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts); err != nil { t.Fatal(err) } @@ -233,7 +209,7 @@ func NumProcessingTest(t *testing.T, factory Factory) { TxsV: []snowstorm.Tx{tx0}, } - if err := avl.Add(vtx0); err != nil { + if err := avl.Add(context.Background(), vtx0); err != nil { t.Fatal(err) } @@ -257,7 +233,7 @@ func NumProcessingTest(t *testing.T, factory Factory) { TxsV: []snowstorm.Tx{tx1}, } - if err := avl.Add(vtx1); err != nil { + if err := avl.Add(context.Background(), vtx1); err != nil { t.Fatal(err) } @@ -265,7 +241,7 @@ func NumProcessingTest(t *testing.T, factory Factory) { t.Fatalf("expected %d vertices processing but returned %d", 2, numProcessing) } - if err := avl.Add(vtx1); err != nil { + if err := avl.Add(context.Background(), vtx1); err != nil { t.Fatal(err) } @@ -273,7 +249,7 @@ func NumProcessingTest(t *testing.T, factory Factory) { t.Fatalf("expected %d vertices processing but returned %d", 2, numProcessing) } - if err := avl.Add(vts[0]); err != nil { + if err := avl.Add(context.Background(), vts[0]); err != nil { t.Fatal(err) } @@ -281,9 +257,9 @@ func NumProcessingTest(t *testing.T, factory Factory) { t.Fatalf("expected %d vertices processing but returned %d", 2, numProcessing) } - votes := ids.UniqueBag{} + votes := bag.UniqueBag[ids.ID]{} votes.Add(0, vtx0.ID()) - if err := avl.RecordPoll(votes); err != nil { + if err := avl.RecordPoll(context.Background(), votes); err != nil { t.Fatal(err) } @@ -327,17 +303,17 @@ func AddTest(t *testing.T, factory Factory) { ctx := snow.DefaultConsensusContextTest() // track consensus events to ensure idempotency in case of redundant vertex adds - consensusEvents := snow.NewAcceptorTracker() - ctx.ConsensusAcceptor = consensusEvents + vertexEvents := snow.NewAcceptorTracker() + ctx.VertexAcceptor = vertexEvents - if err := avl.Initialize(ctx, params, seedVertices); err != nil { + if err := avl.Initialize(context.Background(), ctx, params, seedVertices); err != nil { t.Fatal(err) } if !avl.Finalized() { t.Fatal("An empty avalanche instance is not finalized") } - if !ids.UnsortedEquals([]ids.ID{seedVertices[0].ID(), seedVertices[1].ID()}, avl.Preferences().List()) { + if !compare.UnsortedEquals([]ids.ID{seedVertices[0].ID(), seedVertices[1].ID()}, avl.Preferences().List()) { t.Fatal("Initial frontier failed to be set") } @@ -412,7 +388,7 @@ func AddTest(t *testing.T, factory Factory) { } for i, tv := range tt { for _, j := range []int{1, 2} { // duplicate vertex add should be skipped - err := avl.Add(tv.toAdd) + err := avl.Add(context.Background(), tv.toAdd) if err != tv.err { t.Fatalf("#%d-%d: expected error %v, got %v", i, j, tv.err, err) } @@ -421,10 +397,10 @@ func AddTest(t *testing.T, factory Factory) { t.Fatalf("#%d-%d: expected finalized %v, got %v", i, j, finalized, tv.finalized) } preferenceSet := avl.Preferences().List() - if !ids.UnsortedEquals(tv.preferenceSet, preferenceSet) { + if !compare.UnsortedEquals(tv.preferenceSet, preferenceSet) { t.Fatalf("#%d-%d: expected preferenceSet %v, got %v", i, j, preferenceSet, tv.preferenceSet) } - if accepted, _ := consensusEvents.IsAccepted(tv.toAdd.ID()); accepted != tv.accepted { + if accepted, _ := vertexEvents.IsAccepted(tv.toAdd.ID()); accepted != tv.accepted { t.Fatalf("#%d-%d: expected accepted %d, got %d", i, j, tv.accepted, accepted) } } @@ -460,7 +436,7 @@ func VertexIssuedTest(t *testing.T, factory Factory) { } utxos := []ids.ID{ids.GenerateTestID()} - if err := avl.Initialize(snow.DefaultConsensusContextTest(), params, vts); err != nil { + if err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts); err != nil { t.Fatal(err) } @@ -486,7 +462,7 @@ func VertexIssuedTest(t *testing.T, factory Factory) { if avl.VertexIssued(vtx) { t.Fatalf("Vertex reported as issued") - } else if err := avl.Add(vtx); err != nil { + } else if err := avl.Add(context.Background(), vtx); err != nil { t.Fatal(err) } else if !avl.VertexIssued(vtx) { t.Fatalf("Vertex reported as not issued") @@ -530,7 +506,7 @@ func TxIssuedTest(t *testing.T, factory Factory) { }} tx1.InputIDsV = append(tx1.InputIDsV, utxos[0]) - if err := avl.Initialize(snow.DefaultConsensusContextTest(), params, vts); err != nil { + if err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts); err != nil { t.Fatal(err) } @@ -549,7 +525,7 @@ func TxIssuedTest(t *testing.T, factory Factory) { TxsV: []snowstorm.Tx{tx1}, } - if err := avl.Add(vtx); err != nil { + if err := avl.Add(context.Background(), vtx); err != nil { t.Fatal(err) } else if !avl.TxIssued(tx1) { t.Fatalf("Tx reported as not issued") @@ -585,7 +561,7 @@ func VirtuousTest(t *testing.T, factory Factory) { } utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - err := avl.Initialize(snow.DefaultConsensusContextTest(), params, vts) + err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) if err != nil { t.Fatal(err) } @@ -648,13 +624,13 @@ func VirtuousTest(t *testing.T, factory Factory) { TxsV: []snowstorm.Tx{tx2}, } - if err := avl.Add(vtx0); err != nil { + if err := avl.Add(context.Background(), vtx0); err != nil { t.Fatal(err) } else if virtuous := avl.Virtuous(); virtuous.Len() != 1 { t.Fatalf("Wrong number of virtuous.") } else if !virtuous.Contains(vtx0.IDV) { t.Fatalf("Wrong virtuous") - } else if err := avl.Add(vtx1); err != nil { + } else if err := avl.Add(context.Background(), vtx1); err != nil { t.Fatal(err) } else if virtuous := avl.Virtuous(); virtuous.Len() != 1 { t.Fatalf("Wrong number of virtuous.") @@ -662,11 +638,11 @@ func VirtuousTest(t *testing.T, factory Factory) { t.Fatalf("Wrong virtuous") } - votes := ids.UniqueBag{} + votes := bag.UniqueBag[ids.ID]{} votes.Add(0, vtx1.ID()) votes.Add(1, vtx1.ID()) - if err := avl.RecordPoll(votes); err != nil { + if err := avl.RecordPoll(context.Background(), votes); err != nil { t.Fatal(err) } @@ -680,7 +656,7 @@ func VirtuousTest(t *testing.T, factory Factory) { t.Fatalf("Wrong virtuous") } - if err := avl.Add(vtx2); err != nil { + if err := avl.Add(context.Background(), vtx2); err != nil { t.Fatal(err) } @@ -694,7 +670,7 @@ func VirtuousTest(t *testing.T, factory Factory) { t.Fatalf("Wrong virtuous") } - if err := avl.RecordPoll(votes); err != nil { + if err := avl.RecordPoll(context.Background(), votes); err != nil { t.Fatal(err) } @@ -741,7 +717,7 @@ func VirtuousSkippedUpdateTest(t *testing.T, factory Factory) { ids.GenerateTestID(), } - err := avl.Initialize(snow.DefaultConsensusContextTest(), params, vts) + err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) if err != nil { t.Fatal(err) } @@ -788,19 +764,19 @@ func VirtuousSkippedUpdateTest(t *testing.T, factory Factory) { TxsV: []snowstorm.Tx{tx1}, } - if err := avl.Add(vtx0); err != nil { + if err := avl.Add(context.Background(), vtx0); err != nil { t.Fatal(err) } else if virtuous := avl.Virtuous(); virtuous.Len() != 1 { t.Fatalf("Wrong number of virtuous.") } else if !virtuous.Contains(vtx0.IDV) { t.Fatalf("Wrong virtuous") - } else if err := avl.Add(vtx1); err != nil { + } else if err := avl.Add(context.Background(), vtx1); err != nil { t.Fatal(err) } else if virtuous := avl.Virtuous(); virtuous.Len() != 1 { t.Fatalf("Wrong number of virtuous.") } else if !virtuous.Contains(vtx0.IDV) { t.Fatalf("Wrong virtuous") - } else if err := avl.RecordPoll(ids.UniqueBag{}); err != nil { + } else if err := avl.RecordPoll(context.Background(), bag.UniqueBag[ids.ID]{}); err != nil { t.Fatal(err) } else if virtuous := avl.Virtuous(); virtuous.Len() != 1 { t.Fatalf("Wrong number of virtuous.") @@ -840,7 +816,7 @@ func VotingTest(t *testing.T, factory Factory) { } utxos := []ids.ID{ids.GenerateTestID()} - err := avl.Initialize(snow.DefaultConsensusContextTest(), params, vts) + err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) if err != nil { t.Fatal(err) } @@ -882,26 +858,26 @@ func VotingTest(t *testing.T, factory Factory) { } // issue two vertices with conflicting transaction to the consensus instance - if err := avl.Add(vtx0); err != nil { + if err := avl.Add(context.Background(), vtx0); err != nil { t.Fatal(err) } - if err := avl.Add(vtx1); err != nil { + if err := avl.Add(context.Background(), vtx1); err != nil { t.Fatal(err) } // create poll results, all vote for vtx1, not for vtx0 - sm := ids.UniqueBag{} + sm := bag.UniqueBag[ids.ID]{} sm.Add(0, vtx1.IDV) sm.Add(1, vtx1.IDV) // "BetaRogue" is 2, thus consensus should not be finalized yet - err = avl.RecordPoll(sm) + err = avl.RecordPoll(context.Background(), sm) switch { case err != nil: t.Fatal(err) case avl.Finalized(): t.Fatalf("An avalanche instance finalized too early") - case !ids.UnsortedEquals([]ids.ID{vtx1.IDV}, avl.Preferences().List()): + case !compare.UnsortedEquals([]ids.ID{vtx1.IDV}, avl.Preferences().List()): t.Fatalf("Initial frontier failed to be set") case tx0.Status() != choices.Processing: t.Fatalf("Tx should have been Processing") @@ -911,13 +887,13 @@ func VotingTest(t *testing.T, factory Factory) { // second poll should reach consensus, // and the other vertex of conflict transaction should be rejected - err = avl.RecordPoll(sm) + err = avl.RecordPoll(context.Background(), sm) switch { case err != nil: t.Fatal(err) case !avl.Finalized(): t.Fatalf("An avalanche instance finalized too late") - case !ids.UnsortedEquals([]ids.ID{vtx1.IDV}, avl.Preferences().List()): + case !compare.UnsortedEquals([]ids.ID{vtx1.IDV}, avl.Preferences().List()): // rejected vertex ID (vtx0) must have been removed from the preferred set t.Fatalf("Initial frontier failed to be set") case tx0.Status() != choices.Rejected: @@ -957,7 +933,7 @@ func IgnoreInvalidVotingTest(t *testing.T, factory Factory) { } utxos := []ids.ID{ids.GenerateTestID()} - if err := avl.Initialize(snow.DefaultConsensusContextTest(), params, vts); err != nil { + if err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts); err != nil { t.Fatal(err) } @@ -993,13 +969,13 @@ func IgnoreInvalidVotingTest(t *testing.T, factory Factory) { TxsV: []snowstorm.Tx{tx1}, } - if err := avl.Add(vtx0); err != nil { + if err := avl.Add(context.Background(), vtx0); err != nil { t.Fatal(err) - } else if err := avl.Add(vtx1); err != nil { + } else if err := avl.Add(context.Background(), vtx1); err != nil { t.Fatal(err) } - sm := ids.UniqueBag{} + sm := bag.UniqueBag[ids.ID]{} sm.Add(0, vtx0.IDV) sm.Add(1, vtx1.IDV) @@ -1007,7 +983,7 @@ func IgnoreInvalidVotingTest(t *testing.T, factory Factory) { sm.Add(2, vtx0.IDV) sm.Add(2, vtx1.IDV) - if err := avl.RecordPoll(sm); err != nil { + if err := avl.RecordPoll(context.Background(), sm); err != nil { t.Fatal(err) } else if avl.Finalized() { t.Fatalf("An avalanche instance finalized too early") @@ -1043,7 +1019,7 @@ func IgnoreInvalidTransactionVertexVotingTest(t *testing.T, factory Factory) { }}, } - if err := avl.Initialize(snow.DefaultConsensusContextTest(), params, vts); err != nil { + if err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts); err != nil { t.Fatal(err) } @@ -1074,13 +1050,13 @@ func IgnoreInvalidTransactionVertexVotingTest(t *testing.T, factory Factory) { TxsV: []snowstorm.Tx{tx1}, } - if err := avl.Add(vtx0); err != nil { + if err := avl.Add(context.Background(), vtx0); err != nil { t.Fatal(err) - } else if err := avl.Add(vtx1); err != nil { + } else if err := avl.Add(context.Background(), vtx1); err != nil { t.Fatal(err) } - sm := ids.UniqueBag{} + sm := bag.UniqueBag[ids.ID]{} sm.Add(0, vtx0.IDV) sm.Add(1, vtx1.IDV) @@ -1088,7 +1064,7 @@ func IgnoreInvalidTransactionVertexVotingTest(t *testing.T, factory Factory) { sm.Add(2, vtx0.IDV) sm.Add(2, vtx1.IDV) - if err := avl.RecordPoll(sm); err != nil { + if err := avl.RecordPoll(context.Background(), sm); err != nil { t.Fatal(err) } else if avl.Finalized() { t.Fatalf("An avalanche instance finalized too early") @@ -1124,7 +1100,7 @@ func TransitiveVotingTest(t *testing.T, factory Factory) { } utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - err := avl.Initialize(snow.DefaultConsensusContextTest(), params, vts) + err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) if err != nil { t.Fatal(err) } @@ -1171,41 +1147,41 @@ func TransitiveVotingTest(t *testing.T, factory Factory) { TxsV: []snowstorm.Tx{tx1}, } - if err := avl.Add(vtx0); err != nil { + if err := avl.Add(context.Background(), vtx0); err != nil { t.Fatal(err) - } else if err := avl.Add(vtx1); err != nil { + } else if err := avl.Add(context.Background(), vtx1); err != nil { t.Fatal(err) - } else if err := avl.Add(vtx2); err != nil { + } else if err := avl.Add(context.Background(), vtx2); err != nil { t.Fatal(err) } - sm1 := ids.UniqueBag{} + sm1 := bag.UniqueBag[ids.ID]{} sm1.Add(0, vtx0.IDV) sm1.Add(1, vtx2.IDV) - err = avl.RecordPoll(sm1) + err = avl.RecordPoll(context.Background(), sm1) switch { case err != nil: t.Fatal(err) case avl.Finalized(): t.Fatalf("An avalanche instance finalized too early") - case !ids.UnsortedEquals([]ids.ID{vtx2.IDV}, avl.Preferences().List()): + case !compare.UnsortedEquals([]ids.ID{vtx2.IDV}, avl.Preferences().List()): t.Fatalf("Initial frontier failed to be set") case tx0.Status() != choices.Accepted: t.Fatalf("Tx should have been accepted") } - sm2 := ids.UniqueBag{} + sm2 := bag.UniqueBag[ids.ID]{} sm2.Add(0, vtx2.IDV) sm2.Add(1, vtx2.IDV) - err = avl.RecordPoll(sm2) + err = avl.RecordPoll(context.Background(), sm2) switch { case err != nil: t.Fatal(err) case !avl.Finalized(): t.Fatalf("An avalanche instance finalized too late") - case !ids.UnsortedEquals([]ids.ID{vtx2.IDV}, avl.Preferences().List()): + case !compare.UnsortedEquals([]ids.ID{vtx2.IDV}, avl.Preferences().List()): t.Fatalf("Initial frontier failed to be set") case tx0.Status() != choices.Accepted: t.Fatalf("Tx should have been accepted") @@ -1214,6 +1190,575 @@ func TransitiveVotingTest(t *testing.T, factory Factory) { } } +func StopVertexVerificationUnequalBetaValuesTest(t *testing.T, factory Factory) { + require := require.New(t) + + avl := factory.New() + + params := Parameters{ + Parameters: snowball.Parameters{ + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{ + &TestVertex{TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Accepted, + }}, + &TestVertex{TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Accepted, + }}, + } + utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} + + require.NoError(avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts)) + + tx0 := &snowstorm.TestTx{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + InputIDsV: utxos, + } + tx1 := &snowstorm.TestTx{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + InputIDsV: utxos, + } + + vtx0 := &TestVertex{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentsV: vts, + HeightV: 1, + TxsV: []snowstorm.Tx{tx0}, + } + vtx1A := &TestVertex{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentsV: vts[:1], + HeightV: 1, + TxsV: []snowstorm.Tx{tx1}, + } + vtx1B := &TestVertex{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentsV: vts[1:], + HeightV: 1, + TxsV: []snowstorm.Tx{tx1}, + } + stopVertex := &TestVertex{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentsV: []Vertex{vtx1B}, + HasWhitelistV: true, + WhitelistV: set.Set[ids.ID]{ + vtx1B.IDV: struct{}{}, + tx1.IDV: struct{}{}, + }, + HeightV: 2, + } + + require.NoError(avl.Add(context.Background(), vtx0)) + require.NoError(avl.Add(context.Background(), vtx1A)) + require.NoError(avl.Add(context.Background(), vtx1B)) + + sm1 := bag.UniqueBag[ids.ID]{} + sm1.Add(0, vtx1A.IDV, vtx1B.IDV) + + // Transaction vertex for vtx1A is now accepted + require.NoError(avl.RecordPoll(context.Background(), sm1)) + require.Equal(choices.Processing, tx0.Status()) + require.Equal(choices.Processing, tx1.Status()) + require.Equal(choices.Processing, vtx0.Status()) + require.Equal(choices.Processing, vtx1A.Status()) + require.Equal(choices.Processing, vtx1B.Status()) + + // Because vtx1A isn't accepted, the stopVertex verification passes + require.NoError(avl.Add(context.Background(), stopVertex)) + + // Because vtx1A is now accepted, the stopVertex should be rejected. + // However, because BetaVirtuous < BetaRogue it is possible for the + // stopVertex to be processing. + require.NoError(avl.RecordPoll(context.Background(), sm1)) + require.Equal(choices.Rejected, tx0.Status()) + require.Equal(choices.Accepted, tx1.Status()) + require.Equal(choices.Rejected, vtx0.Status()) + require.Equal(choices.Accepted, vtx1A.Status()) + require.Equal(choices.Accepted, vtx1B.Status()) + require.Equal(choices.Processing, stopVertex.Status()) +} + +func StopVertexVerificationEqualBetaValuesTest(t *testing.T, factory Factory) { + require := require.New(t) + + avl := factory.New() + + params := Parameters{ + Parameters: snowball.Parameters{ + K: 1, + Alpha: 1, + BetaVirtuous: 2, + BetaRogue: 2, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{ + &TestVertex{TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Accepted, + }}, + &TestVertex{TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Accepted, + }}, + } + utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} + + require.NoError(avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts)) + + tx0 := &snowstorm.TestTx{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + InputIDsV: utxos, + } + tx1 := &snowstorm.TestTx{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + InputIDsV: utxos, + } + + vtx0 := &TestVertex{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentsV: vts, + HeightV: 1, + TxsV: []snowstorm.Tx{tx0}, + } + vtx1A := &TestVertex{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentsV: vts[:1], + HeightV: 1, + TxsV: []snowstorm.Tx{tx1}, + } + vtx1B := &TestVertex{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentsV: vts[1:], + HeightV: 1, + TxsV: []snowstorm.Tx{tx1}, + } + stopVertex := &TestVertex{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentsV: []Vertex{vtx1B}, + HasWhitelistV: true, + WhitelistV: set.Set[ids.ID]{ + vtx1B.IDV: struct{}{}, + tx1.IDV: struct{}{}, + }, + HeightV: 2, + } + + require.NoError(avl.Add(context.Background(), vtx0)) + require.NoError(avl.Add(context.Background(), vtx1A)) + require.NoError(avl.Add(context.Background(), vtx1B)) + + sm1 := bag.UniqueBag[ids.ID]{} + sm1.Add(0, vtx1A.IDV, vtx1B.IDV) + + // Transaction vertex for vtx1A can not be accepted because BetaVirtuous is + // equal to BetaRogue + require.NoError(avl.RecordPoll(context.Background(), sm1)) + require.Equal(choices.Processing, tx0.Status()) + require.Equal(choices.Processing, tx1.Status()) + require.Equal(choices.Processing, vtx0.Status()) + require.Equal(choices.Processing, vtx1A.Status()) + require.Equal(choices.Processing, vtx1B.Status()) + + // Because vtx1A isn't accepted, the stopVertex verification passes + require.NoError(avl.Add(context.Background(), stopVertex)) + + // Because vtx1A is now accepted, the stopVertex should be rejected + require.NoError(avl.RecordPoll(context.Background(), sm1)) + require.Equal(choices.Rejected, tx0.Status()) + require.Equal(choices.Accepted, tx1.Status()) + require.Equal(choices.Rejected, vtx0.Status()) + require.Equal(choices.Accepted, vtx1A.Status()) + require.Equal(choices.Accepted, vtx1B.Status()) + require.Equal(choices.Rejected, stopVertex.Status()) +} + +func AcceptParentOfPreviouslyRejectedVertexTest(t *testing.T, factory Factory) { + require := require.New(t) + + avl := factory.New() + + params := Parameters{ + Parameters: snowball.Parameters{ + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{ + &TestVertex{TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Accepted, + }}, + } + utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} + + require.NoError(avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts)) + + tx0 := &snowstorm.TestTx{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + InputIDsV: utxos[:1], + } + + tx1A := &snowstorm.TestTx{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + InputIDsV: utxos[1:], + } + tx1B := &snowstorm.TestTx{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + InputIDsV: utxos[1:], + } + + vtx1A := &TestVertex{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentsV: vts, + HeightV: 1, + TxsV: []snowstorm.Tx{tx1A}, + } + + vtx0 := &TestVertex{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentsV: vts, + HeightV: 1, + TxsV: []snowstorm.Tx{tx0}, + } + vtx1B := &TestVertex{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentsV: []Vertex{vtx0}, + HeightV: 2, + TxsV: []snowstorm.Tx{tx1B}, + } + + require.NoError(avl.Add(context.Background(), vtx0)) + require.NoError(avl.Add(context.Background(), vtx1A)) + require.NoError(avl.Add(context.Background(), vtx1B)) + + sm1 := bag.UniqueBag[ids.ID]{} + sm1.Add(0, vtx1A.IDV) + + require.NoError(avl.RecordPoll(context.Background(), sm1)) + require.Equal(choices.Accepted, tx1A.Status()) + require.Equal(choices.Accepted, vtx1A.Status()) + require.Equal(choices.Rejected, tx1B.Status()) + require.Equal(choices.Rejected, vtx1B.Status()) + require.Equal(1, avl.NumProcessing()) + require.Equal(choices.Processing, tx0.Status()) + require.Equal(choices.Processing, vtx0.Status()) + + sm0 := bag.UniqueBag[ids.ID]{} + sm0.Add(0, vtx0.IDV) + + require.NoError(avl.RecordPoll(context.Background(), sm0)) + require.Zero(avl.NumProcessing()) + require.Equal(choices.Accepted, tx0.Status()) + require.Equal(choices.Accepted, vtx0.Status()) + + prefs := avl.Preferences() + require.Len(prefs, 2) + require.Contains(prefs, vtx0.ID()) + require.Contains(prefs, vtx1A.ID()) +} + +func RejectParentOfPreviouslyRejectedVertexTest(t *testing.T, factory Factory) { + require := require.New(t) + + avl := factory.New() + + params := Parameters{ + Parameters: snowball.Parameters{ + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{ + &TestVertex{TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Accepted, + }}, + } + utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} + + require.NoError(avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts)) + + tx0A := &snowstorm.TestTx{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + InputIDsV: utxos[:1], + } + tx0B := &snowstorm.TestTx{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + InputIDsV: utxos[:1], + } + + tx1A := &snowstorm.TestTx{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + InputIDsV: utxos[1:], + } + tx1B := &snowstorm.TestTx{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + InputIDsV: utxos[1:], + } + + vtx0A := &TestVertex{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentsV: vts, + HeightV: 1, + TxsV: []snowstorm.Tx{tx0A}, + } + vtx1A := &TestVertex{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentsV: vts, + HeightV: 1, + TxsV: []snowstorm.Tx{tx1A}, + } + + vtx0B := &TestVertex{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentsV: vts, + HeightV: 1, + TxsV: []snowstorm.Tx{tx0B}, + } + vtx1B := &TestVertex{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentsV: []Vertex{vtx0B}, + HeightV: 2, + TxsV: []snowstorm.Tx{tx1B}, + } + + require.NoError(avl.Add(context.Background(), vtx0A)) + require.NoError(avl.Add(context.Background(), vtx1A)) + require.NoError(avl.Add(context.Background(), vtx0B)) + require.NoError(avl.Add(context.Background(), vtx1B)) + + sm1 := bag.UniqueBag[ids.ID]{} + sm1.Add(0, vtx1A.IDV) + + require.NoError(avl.RecordPoll(context.Background(), sm1)) + require.Equal(choices.Accepted, tx1A.Status()) + require.Equal(choices.Accepted, vtx1A.Status()) + require.Equal(choices.Rejected, tx1B.Status()) + require.Equal(choices.Rejected, vtx1B.Status()) + require.Equal(2, avl.NumProcessing()) + require.Equal(choices.Processing, tx0A.Status()) + require.Equal(choices.Processing, vtx0A.Status()) + require.Equal(choices.Processing, tx0B.Status()) + require.Equal(choices.Processing, vtx0B.Status()) + + sm0 := bag.UniqueBag[ids.ID]{} + sm0.Add(0, vtx0A.IDV) + + require.NoError(avl.RecordPoll(context.Background(), sm0)) + require.Zero(avl.NumProcessing()) + require.Equal(choices.Accepted, tx0A.Status()) + require.Equal(choices.Accepted, vtx0A.Status()) + require.Equal(choices.Rejected, tx0B.Status()) + require.Equal(choices.Rejected, vtx0B.Status()) + + orphans := avl.Orphans() + require.Empty(orphans) +} + +func QuiesceAfterRejectedVertexTest(t *testing.T, factory Factory) { + require := require.New(t) + + avl := factory.New() + + params := Parameters{ + Parameters: snowball.Parameters{ + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{ + &TestVertex{TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Accepted, + }}, + } + utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} + + require.NoError(avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts)) + + txA := &snowstorm.TestTx{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + InputIDsV: utxos, + } + txB := &snowstorm.TestTx{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + InputIDsV: utxos, + } + + vtxA := &TestVertex{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentsV: vts, + HeightV: 1, + TxsV: []snowstorm.Tx{txA}, + } + + vtxB0 := &TestVertex{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentsV: vts, + HeightV: 1, + TxsV: []snowstorm.Tx{txB}, + } + vtxB1 := &TestVertex{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentsV: []Vertex{vtxB0}, + HeightV: 2, + TxsV: []snowstorm.Tx{txB}, + } + + require.NoError(avl.Add(context.Background(), vtxA)) + require.NoError(avl.Add(context.Background(), vtxB0)) + require.NoError(avl.Add(context.Background(), vtxB1)) + + sm1 := bag.UniqueBag[ids.ID]{} + sm1.Add(0, vtxA.IDV) + + require.NoError(avl.RecordPoll(context.Background(), sm1)) + require.Equal(choices.Accepted, txA.Status()) + require.Equal(choices.Accepted, vtxA.Status()) + require.Equal(choices.Rejected, txB.Status()) + require.Equal(choices.Rejected, vtxB0.Status()) + require.Equal(choices.Rejected, vtxB1.Status()) + require.Zero(avl.NumProcessing()) + require.True(avl.Finalized()) + require.True(avl.Quiesce()) +} + func SplitVotingTest(t *testing.T, factory Factory) { avl := factory.New() @@ -1243,7 +1788,7 @@ func SplitVotingTest(t *testing.T, factory Factory) { } utxos := []ids.ID{ids.GenerateTestID()} - err := avl.Initialize(snow.DefaultConsensusContextTest(), params, vts) + err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) if err != nil { t.Fatal(err) } @@ -1274,40 +1819,40 @@ func SplitVotingTest(t *testing.T, factory Factory) { TxsV: []snowstorm.Tx{tx0}, } - if err := avl.Add(vtx0); err != nil { + if err := avl.Add(context.Background(), vtx0); err != nil { t.Fatal(err) - } else if err := avl.Add(vtx1); err != nil { + } else if err := avl.Add(context.Background(), vtx1); err != nil { t.Fatal(err) } - sm1 := ids.UniqueBag{} + sm1 := bag.UniqueBag[ids.ID]{} sm1.Add(0, vtx0.IDV) // peer 0 votes for the tx though vtx0 sm1.Add(1, vtx1.IDV) // peer 1 votes for the tx though vtx1 - err = avl.RecordPoll(sm1) + err = avl.RecordPoll(context.Background(), sm1) switch { case err != nil: t.Fatal(err) case avl.Finalized(): // avalanche shouldn't be finalized because the vertex transactions are still processing t.Fatalf("An avalanche instance finalized too late") - case !ids.UnsortedEquals([]ids.ID{vtx0.IDV, vtx1.IDV}, avl.Preferences().List()): + case !compare.UnsortedEquals([]ids.ID{vtx0.IDV, vtx1.IDV}, avl.Preferences().List()): t.Fatalf("Initial frontier failed to be set") case tx0.Status() != choices.Accepted: t.Fatalf("Tx should have been accepted") } // Give alpha votes for both tranaction vertices - sm2 := ids.UniqueBag{} + sm2 := bag.UniqueBag[ids.ID]{} sm2.Add(0, vtx0.IDV, vtx1.IDV) // peer 0 votes for vtx0 and vtx1 sm2.Add(1, vtx0.IDV, vtx1.IDV) // peer 1 votes for vtx0 and vtx1 - err = avl.RecordPoll(sm2) + err = avl.RecordPoll(context.Background(), sm2) switch { case err != nil: t.Fatal(err) case !avl.Finalized(): t.Fatalf("An avalanche instance finalized too late") - case !ids.UnsortedEquals([]ids.ID{vtx0.IDV, vtx1.IDV}, avl.Preferences().List()): + case !compare.UnsortedEquals([]ids.ID{vtx0.IDV, vtx1.IDV}, avl.Preferences().List()): t.Fatalf("Initial frontier failed to be set") case tx0.Status() != choices.Accepted: t.Fatalf("Tx should have been accepted") @@ -1343,7 +1888,7 @@ func TransitiveRejectionTest(t *testing.T, factory Factory) { } utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - err := avl.Initialize(snow.DefaultConsensusContextTest(), params, vts) + err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) if err != nil { t.Fatal(err) } @@ -1396,35 +1941,35 @@ func TransitiveRejectionTest(t *testing.T, factory Factory) { TxsV: []snowstorm.Tx{tx2}, } - if err := avl.Add(vtx0); err != nil { + if err := avl.Add(context.Background(), vtx0); err != nil { t.Fatal(err) - } else if err := avl.Add(vtx1); err != nil { + } else if err := avl.Add(context.Background(), vtx1); err != nil { t.Fatal(err) - } else if err := avl.Add(vtx2); err != nil { + } else if err := avl.Add(context.Background(), vtx2); err != nil { t.Fatal(err) } - sm := ids.UniqueBag{} + sm := bag.UniqueBag[ids.ID]{} sm.Add(0, vtx1.IDV) sm.Add(1, vtx1.IDV) - err = avl.RecordPoll(sm) + err = avl.RecordPoll(context.Background(), sm) switch { case err != nil: t.Fatal(err) case avl.Finalized(): t.Fatalf("An avalanche instance finalized too early") - case !ids.UnsortedEquals([]ids.ID{vtx1.IDV}, avl.Preferences().List()): + case !compare.UnsortedEquals([]ids.ID{vtx1.IDV}, avl.Preferences().List()): t.Fatalf("Initial frontier failed to be set") } - err = avl.RecordPoll(sm) + err = avl.RecordPoll(context.Background(), sm) switch { case err != nil: t.Fatal(err) case avl.Finalized(): t.Fatalf("An avalanche instance finalized too early") - case !ids.UnsortedEquals([]ids.ID{vtx1.IDV}, avl.Preferences().List()): + case !compare.UnsortedEquals([]ids.ID{vtx1.IDV}, avl.Preferences().List()): t.Fatalf("Initial frontier failed to be set") case tx0.Status() != choices.Rejected: t.Fatalf("Tx should have been rejected") @@ -1434,13 +1979,13 @@ func TransitiveRejectionTest(t *testing.T, factory Factory) { t.Fatalf("Tx should not have been decided") } - err = avl.RecordPoll(sm) + err = avl.RecordPoll(context.Background(), sm) switch { case err != nil: t.Fatal(err) case avl.Finalized(): t.Fatalf("An avalanche instance finalized too early") - case !ids.UnsortedEquals([]ids.ID{vtx1.IDV}, avl.Preferences().List()): + case !compare.UnsortedEquals([]ids.ID{vtx1.IDV}, avl.Preferences().List()): t.Fatalf("Initial frontier failed to be set") case tx0.Status() != choices.Rejected: t.Fatalf("Tx should have been rejected") @@ -1480,7 +2025,7 @@ func IsVirtuousTest(t *testing.T, factory Factory) { } utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - err := avl.Initialize(snow.DefaultConsensusContextTest(), params, vts) + err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) if err != nil { t.Fatal(err) } @@ -1533,7 +2078,7 @@ func IsVirtuousTest(t *testing.T, factory Factory) { t.Fatalf("Should be virtuous.") } - err = avl.Add(vtx0) + err = avl.Add(context.Background(), vtx0) switch { case err != nil: t.Fatal(err) @@ -1543,7 +2088,7 @@ func IsVirtuousTest(t *testing.T, factory Factory) { t.Fatalf("Should not be virtuous.") } - err = avl.Add(vtx1) + err = avl.Add(context.Background(), vtx1) switch { case err != nil: t.Fatal(err) @@ -1583,7 +2128,7 @@ func QuiesceTest(t *testing.T, factory Factory) { } utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - err := avl.Initialize(snow.DefaultConsensusContextTest(), params, vts) + err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) if err != nil { t.Fatal(err) } @@ -1638,7 +2183,7 @@ func QuiesceTest(t *testing.T, factory Factory) { // Add [vtx0] containing [tx0]. Because [tx0] is virtuous, the instance // shouldn't quiesce. - if err := avl.Add(vtx0); err != nil { + if err := avl.Add(context.Background(), vtx0); err != nil { t.Fatal(err) } if avl.Quiesce() { @@ -1648,15 +2193,15 @@ func QuiesceTest(t *testing.T, factory Factory) { // Add [vtx1] containing [tx1]. Because [tx1] conflicts with [tx0], neither // [tx0] nor [tx1] are now virtuous. This means there are no virtuous // transaction left in the consensus instance and it can quiesce. - if err := avl.Add(vtx1); err != nil { + if err := avl.Add(context.Background(), vtx1); err != nil { t.Fatal(err) } // The virtuous frontier is only updated sometimes, so force the frontier to // be re-calculated by changing the preference of tx1. - sm1 := ids.UniqueBag{} + sm1 := bag.UniqueBag[ids.ID]{} sm1.Add(0, vtx1.IDV) - if err := avl.RecordPoll(sm1); err != nil { + if err := avl.RecordPoll(context.Background(), sm1); err != nil { t.Fatal(err) } @@ -1666,16 +2211,16 @@ func QuiesceTest(t *testing.T, factory Factory) { // Add [vtx2] containing [tx2]. Because [tx2] is virtuous, the instance // shouldn't quiesce, even though [tx0] and [tx1] conflict. - if err := avl.Add(vtx2); err != nil { + if err := avl.Add(context.Background(), vtx2); err != nil { t.Fatal(err) } if avl.Quiesce() { t.Fatalf("Shouldn't quiesce") } - sm2 := ids.UniqueBag{} + sm2 := bag.UniqueBag[ids.ID]{} sm2.Add(0, vtx2.IDV) - if err := avl.RecordPoll(sm2); err != nil { + if err := avl.RecordPoll(context.Background(), sm2); err != nil { t.Fatal(err) } @@ -1718,7 +2263,7 @@ func QuiesceAfterVotingTest(t *testing.T, factory Factory) { ids.GenerateTestID(), } - err := avl.Initialize(snow.DefaultConsensusContextTest(), params, vts) + err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) if err != nil { t.Fatal(err) } @@ -1784,16 +2329,16 @@ func QuiesceAfterVotingTest(t *testing.T, factory Factory) { TxsV: []snowstorm.Tx{}, } - if err := avl.Add(vtx0); err != nil { + if err := avl.Add(context.Background(), vtx0); err != nil { t.Fatal(err) } - if err := avl.Add(vtx1); err != nil { + if err := avl.Add(context.Background(), vtx1); err != nil { t.Fatal(err) } - if err := avl.Add(vtx2); err != nil { + if err := avl.Add(context.Background(), vtx2); err != nil { t.Fatal(err) } - if err := avl.Add(vtx3); err != nil { + if err := avl.Add(context.Background(), vtx3); err != nil { t.Fatal(err) } @@ -1802,9 +2347,9 @@ func QuiesceAfterVotingTest(t *testing.T, factory Factory) { t.Fatalf("Shouldn't quiesce") } - sm12 := ids.UniqueBag{} + sm12 := bag.UniqueBag[ids.ID]{} sm12.Add(0, vtx1.IDV, vtx2.IDV) - if err := avl.RecordPoll(sm12); err != nil { + if err := avl.RecordPoll(context.Background(), sm12); err != nil { t.Fatal(err) } @@ -1814,9 +2359,9 @@ func QuiesceAfterVotingTest(t *testing.T, factory Factory) { t.Fatalf("Shouldn't quiesce") } - sm023 := ids.UniqueBag{} + sm023 := bag.UniqueBag[ids.ID]{} sm023.Add(0, vtx0.IDV, vtx2.IDV, vtx3.IDV) - if err := avl.RecordPoll(sm023); err != nil { + if err := avl.RecordPoll(context.Background(), sm023); err != nil { t.Fatal(err) } @@ -1825,9 +2370,9 @@ func QuiesceAfterVotingTest(t *testing.T, factory Factory) { t.Fatalf("Shouldn't quiesce") } - sm3 := ids.UniqueBag{} + sm3 := bag.UniqueBag[ids.ID]{} sm3.Add(0, vtx3.IDV) - if err := avl.RecordPoll(sm3); err != nil { + if err := avl.RecordPoll(context.Background(), sm3); err != nil { t.Fatal(err) } @@ -1866,7 +2411,7 @@ func TransactionVertexTest(t *testing.T, factory Factory) { }}, } - err := avl.Initialize(snow.DefaultConsensusContextTest(), params, seedVertices) + err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, seedVertices) if err != nil { t.Fatal(err) } @@ -1881,7 +2426,7 @@ func TransactionVertexTest(t *testing.T, factory Factory) { ParentsV: seedVertices, HeightV: 1, } - if err := avl.Add(vtx0); err != nil { + if err := avl.Add(context.Background(), vtx0); err != nil { t.Fatal(err) } if !avl.VertexIssued(vtx0) { @@ -1896,10 +2441,10 @@ func TransactionVertexTest(t *testing.T, factory Factory) { // After voting for the transaction vertex beta times, the vertex should // also be accepted. - bags := ids.UniqueBag{} + bags := bag.UniqueBag[ids.ID]{} bags.Add(0, vtx0.IDV) bags.Add(1, vtx0.IDV) - if err := avl.RecordPoll(bags); err != nil { + if err := avl.RecordPoll(context.Background(), bags); err != nil { t.Fatalf("unexpected RecordPoll error %v", err) } @@ -1908,7 +2453,7 @@ func TransactionVertexTest(t *testing.T, factory Factory) { t.Fatalf("vertex with no transaction should have been accepted after polling, got %v", vtx0.Status()) case !avl.Finalized(): t.Fatal("expected finalized avalanche instance") - case !ids.UnsortedEquals([]ids.ID{vtx0.IDV}, avl.Preferences().List()): + case !compare.UnsortedEquals([]ids.ID{vtx0.IDV}, avl.Preferences().List()): t.Fatalf("unexpected frontier %v", avl.Preferences().List()) } } @@ -1942,7 +2487,7 @@ func OrphansTest(t *testing.T, factory Factory) { } utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} - err := avl.Initialize(snow.DefaultConsensusContextTest(), params, vts) + err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) if err != nil { t.Fatal(err) } @@ -1997,7 +2542,7 @@ func OrphansTest(t *testing.T, factory Factory) { // [vtx0] contains [tx0], both of which will be preferred, so [tx0] is not // an orphan. - if err := avl.Add(vtx0); err != nil { + if err := avl.Add(context.Background(), vtx0); err != nil { t.Fatal(err) } if orphans := avl.Orphans(); orphans.Len() != 0 { @@ -2006,7 +2551,7 @@ func OrphansTest(t *testing.T, factory Factory) { // [vtx1] contains [tx1], which conflicts with [tx0]. [tx0] is contained in // a preferred vertex, and neither [tx0] nor [tx1] are virtuous. - if err := avl.Add(vtx1); err != nil { + if err := avl.Add(context.Background(), vtx1); err != nil { t.Fatal(err) } if orphans := avl.Orphans(); orphans.Len() != 0 { @@ -2015,16 +2560,16 @@ func OrphansTest(t *testing.T, factory Factory) { // [vtx2] contains [tx2], both of which will be preferred, so [tx2] is not // an orphan. - if err := avl.Add(vtx2); err != nil { + if err := avl.Add(context.Background(), vtx2); err != nil { t.Fatal(err) } if orphans := avl.Orphans(); orphans.Len() != 0 { t.Fatalf("Wrong number of orphans") } - sm := ids.UniqueBag{} + sm := bag.UniqueBag[ids.ID]{} sm.Add(0, vtx1.IDV) - if err := avl.RecordPoll(sm); err != nil { + if err := avl.RecordPoll(context.Background(), sm); err != nil { t.Fatal(err) } @@ -2068,7 +2613,7 @@ func OrphansUpdateTest(t *testing.T, factory Factory) { StatusV: choices.Accepted, }}, } - err := avl.Initialize(snow.DefaultConsensusContextTest(), params, seedVertices) + err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, seedVertices) if err != nil { t.Fatal(err) } @@ -2128,52 +2673,52 @@ func OrphansUpdateTest(t *testing.T, factory Factory) { TxsV: []snowstorm.Tx{tx2}, } - if err := avl.Add(vtx0); err != nil { + if err := avl.Add(context.Background(), vtx0); err != nil { t.Fatal(err) } - if err := avl.Add(vtx1); err != nil { + if err := avl.Add(context.Background(), vtx1); err != nil { t.Fatal(err) } - if err := avl.Add(vtx2); err != nil { + if err := avl.Add(context.Background(), vtx2); err != nil { t.Fatal(err) } // vtx0 is virtuous, so it should be preferred. vtx1 and vtx2 conflict, but // vtx1 was issued before vtx2, so vtx1 should be preferred and vtx2 should // not be preferred. - expectedPreferredSet := ids.Set{ + expectedPreferredSet := set.Set[ids.ID]{ vtx0.ID(): struct{}{}, vtx1.ID(): struct{}{}, } preferenceSet := avl.Preferences().List() - if !ids.UnsortedEquals(expectedPreferredSet.List(), preferenceSet) { + if !compare.UnsortedEquals(expectedPreferredSet.List(), preferenceSet) { t.Fatalf("expected preferenceSet %v, got %v", expectedPreferredSet, preferenceSet) } // Record a successful poll to change the preference from vtx1 to vtx2 and // update the orphan set. - votes := ids.UniqueBag{} + votes := bag.UniqueBag[ids.ID]{} votes.Add(0, vtx2.IDV) - if err := avl.RecordPoll(votes); err != nil { + if err := avl.RecordPoll(context.Background(), votes); err != nil { t.Fatal(err) } // Because vtx2 was voted for over vtx1, they should be swapped in the // preferred set. - expectedPreferredSet = ids.Set{ + expectedPreferredSet = set.Set[ids.ID]{ vtx0.ID(): struct{}{}, vtx2.ID(): struct{}{}, } preferenceSet = avl.Preferences().List() - if !ids.UnsortedEquals(expectedPreferredSet.List(), preferenceSet) { + if !compare.UnsortedEquals(expectedPreferredSet.List(), preferenceSet) { t.Fatalf("expected preferenceSet %v, got %v", expectedPreferredSet, preferenceSet) } // Because there are no virtuous transactions that are not in a preferred // vertex, there should be no orphans. - expectedOrphanSet := ids.Set{} + expectedOrphanSet := set.Set[ids.ID]{} orphanSet := avl.Orphans() - if !ids.UnsortedEquals(expectedOrphanSet.List(), orphanSet.List()) { + if !compare.UnsortedEquals(expectedOrphanSet.List(), orphanSet.List()) { t.Fatalf("expected orphanSet %v, got %v", expectedOrphanSet, orphanSet) } } @@ -2200,14 +2745,14 @@ func ErrorOnVacuousAcceptTest(t *testing.T, factory Factory) { StatusV: choices.Accepted, }}} - err := avl.Initialize(snow.DefaultConsensusContextTest(), params, vts) + err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) if err != nil { t.Fatal(err) } tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), - AcceptV: errors.New(""), + AcceptV: errTest, StatusV: choices.Processing, }} @@ -2221,7 +2766,7 @@ func ErrorOnVacuousAcceptTest(t *testing.T, factory Factory) { TxsV: []snowstorm.Tx{tx0}, } - if err := avl.Add(vtx0); err == nil { + if err := avl.Add(context.Background(), vtx0); err == nil { t.Fatalf("Should have errored on vertex issuance") } } @@ -2249,14 +2794,14 @@ func ErrorOnTxAcceptTest(t *testing.T, factory Factory) { }}} utxos := []ids.ID{ids.GenerateTestID()} - err := avl.Initialize(snow.DefaultConsensusContextTest(), params, vts) + err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) if err != nil { t.Fatal(err) } tx0 := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), - AcceptV: errors.New(""), + AcceptV: errTest, StatusV: choices.Processing, }} tx0.InputIDsV = append(tx0.InputIDsV, utxos[0]) @@ -2271,13 +2816,13 @@ func ErrorOnTxAcceptTest(t *testing.T, factory Factory) { TxsV: []snowstorm.Tx{tx0}, } - if err := avl.Add(vtx0); err != nil { + if err := avl.Add(context.Background(), vtx0); err != nil { t.Fatal(err) } - votes := ids.UniqueBag{} + votes := bag.UniqueBag[ids.ID]{} votes.Add(0, vtx0.IDV) - if err := avl.RecordPoll(votes); err == nil { + if err := avl.RecordPoll(context.Background(), votes); err == nil { t.Fatalf("Should have errored on vertex acceptance") } } @@ -2305,7 +2850,7 @@ func ErrorOnVtxAcceptTest(t *testing.T, factory Factory) { }}} utxos := []ids.ID{ids.GenerateTestID()} - err := avl.Initialize(snow.DefaultConsensusContextTest(), params, vts) + err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) if err != nil { t.Fatal(err) } @@ -2319,7 +2864,7 @@ func ErrorOnVtxAcceptTest(t *testing.T, factory Factory) { vtx0 := &TestVertex{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), - AcceptV: errors.New(""), + AcceptV: errTest, StatusV: choices.Processing, }, ParentsV: vts, @@ -2327,13 +2872,13 @@ func ErrorOnVtxAcceptTest(t *testing.T, factory Factory) { TxsV: []snowstorm.Tx{tx0}, } - if err := avl.Add(vtx0); err != nil { + if err := avl.Add(context.Background(), vtx0); err != nil { t.Fatal(err) } - votes := ids.UniqueBag{} + votes := bag.UniqueBag[ids.ID]{} votes.Add(0, vtx0.IDV) - if err := avl.RecordPoll(votes); err == nil { + if err := avl.RecordPoll(context.Background(), votes); err == nil { t.Fatalf("Should have errored on vertex acceptance") } } @@ -2361,7 +2906,7 @@ func ErrorOnVtxRejectTest(t *testing.T, factory Factory) { }}} utxos := []ids.ID{ids.GenerateTestID()} - err := avl.Initialize(snow.DefaultConsensusContextTest(), params, vts) + err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) if err != nil { t.Fatal(err) } @@ -2391,7 +2936,7 @@ func ErrorOnVtxRejectTest(t *testing.T, factory Factory) { vtx1 := &TestVertex{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), - RejectV: errors.New(""), + RejectV: errTest, StatusV: choices.Processing, }, ParentsV: vts, @@ -2399,15 +2944,15 @@ func ErrorOnVtxRejectTest(t *testing.T, factory Factory) { TxsV: []snowstorm.Tx{tx1}, } - if err := avl.Add(vtx0); err != nil { + if err := avl.Add(context.Background(), vtx0); err != nil { t.Fatal(err) - } else if err := avl.Add(vtx1); err != nil { + } else if err := avl.Add(context.Background(), vtx1); err != nil { t.Fatal(err) } - votes := ids.UniqueBag{} + votes := bag.UniqueBag[ids.ID]{} votes.Add(0, vtx0.IDV) - if err := avl.RecordPoll(votes); err == nil { + if err := avl.RecordPoll(context.Background(), votes); err == nil { t.Fatalf("Should have errored on vertex rejection") } } @@ -2435,7 +2980,7 @@ func ErrorOnParentVtxRejectTest(t *testing.T, factory Factory) { }}} utxos := []ids.ID{ids.GenerateTestID()} - err := avl.Initialize(snow.DefaultConsensusContextTest(), params, vts) + err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) if err != nil { t.Fatal(err) } @@ -2465,7 +3010,7 @@ func ErrorOnParentVtxRejectTest(t *testing.T, factory Factory) { vtx1 := &TestVertex{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), - RejectV: errors.New(""), + RejectV: errTest, StatusV: choices.Processing, }, ParentsV: vts, @@ -2483,17 +3028,17 @@ func ErrorOnParentVtxRejectTest(t *testing.T, factory Factory) { TxsV: []snowstorm.Tx{tx1}, } - if err := avl.Add(vtx0); err != nil { + if err := avl.Add(context.Background(), vtx0); err != nil { t.Fatal(err) - } else if err := avl.Add(vtx1); err != nil { + } else if err := avl.Add(context.Background(), vtx1); err != nil { t.Fatal(err) - } else if err := avl.Add(vtx2); err != nil { + } else if err := avl.Add(context.Background(), vtx2); err != nil { t.Fatal(err) } - votes := ids.UniqueBag{} + votes := bag.UniqueBag[ids.ID]{} votes.Add(0, vtx0.IDV) - if err := avl.RecordPoll(votes); err == nil { + if err := avl.RecordPoll(context.Background(), votes); err == nil { t.Fatalf("Should have errored on vertex rejection") } } @@ -2521,7 +3066,7 @@ func ErrorOnTransitiveVtxRejectTest(t *testing.T, factory Factory) { }}} utxos := []ids.ID{ids.GenerateTestID()} - err := avl.Initialize(snow.DefaultConsensusContextTest(), params, vts) + err := avl.Initialize(context.Background(), snow.DefaultConsensusContextTest(), params, vts) if err != nil { t.Fatal(err) } @@ -2561,24 +3106,24 @@ func ErrorOnTransitiveVtxRejectTest(t *testing.T, factory Factory) { vtx2 := &TestVertex{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), - RejectV: errors.New(""), + RejectV: errTest, StatusV: choices.Processing, }, ParentsV: []Vertex{vtx1}, HeightV: 1, } - if err := avl.Add(vtx0); err != nil { + if err := avl.Add(context.Background(), vtx0); err != nil { t.Fatal(err) - } else if err := avl.Add(vtx1); err != nil { + } else if err := avl.Add(context.Background(), vtx1); err != nil { t.Fatal(err) - } else if err := avl.Add(vtx2); err != nil { + } else if err := avl.Add(context.Background(), vtx2); err != nil { t.Fatal(err) } - votes := ids.UniqueBag{} + votes := bag.UniqueBag[ids.ID]{} votes.Add(0, vtx0.IDV) - if err := avl.RecordPoll(votes); err == nil { + if err := avl.RecordPoll(context.Background(), votes); err == nil { t.Fatalf("Should have errored on vertex rejection") } } @@ -2607,9 +3152,9 @@ func SilenceTransactionVertexEventsTest(t *testing.T, factory Factory) { ctx := snow.DefaultConsensusContextTest() tracker := snow.NewAcceptorTracker() - ctx.DecisionAcceptor = tracker + ctx.TxAcceptor = tracker - err := avl.Initialize(ctx, params, vts) + err := avl.Initialize(context.Background(), ctx, params, vts) if err != nil { t.Fatal(err) } @@ -2623,13 +3168,13 @@ func SilenceTransactionVertexEventsTest(t *testing.T, factory Factory) { HeightV: 1, } - if err := avl.Add(vtx0); err != nil { + if err := avl.Add(context.Background(), vtx0); err != nil { t.Fatal(err) } - votes := ids.UniqueBag{} + votes := bag.UniqueBag[ids.ID]{} votes.Add(0, vtx0.IDV) - if err := avl.RecordPoll(votes); err != nil { + if err := avl.RecordPoll(context.Background(), votes); err != nil { t.Fatal(err) } diff --git a/avalanchego/snow/consensus/avalanche/factory.go b/avalanchego/snow/consensus/avalanche/factory.go index 05dd1e8f..ed74831c 100644 --- a/avalanchego/snow/consensus/avalanche/factory.go +++ b/avalanchego/snow/consensus/avalanche/factory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche diff --git a/avalanchego/snow/consensus/avalanche/parameters.go b/avalanchego/snow/consensus/avalanche/parameters.go index df0ce1bd..9027df61 100644 --- a/avalanchego/snow/consensus/avalanche/parameters.go +++ b/avalanchego/snow/consensus/avalanche/parameters.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche diff --git a/avalanchego/snow/consensus/avalanche/parameters_test.go b/avalanchego/snow/consensus/avalanche/parameters_test.go index 2bc3f677..748aedf6 100644 --- a/avalanchego/snow/consensus/avalanche/parameters_test.go +++ b/avalanchego/snow/consensus/avalanche/parameters_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche diff --git a/avalanchego/snow/consensus/avalanche/poll/early_term_no_traversal.go b/avalanchego/snow/consensus/avalanche/poll/early_term_no_traversal.go index 70962f77..5ae57232 100644 --- a/avalanchego/snow/consensus/avalanche/poll/early_term_no_traversal.go +++ b/avalanchego/snow/consensus/avalanche/poll/early_term_no_traversal.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package poll @@ -7,11 +7,14 @@ import ( "fmt" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bag" + + sets "github.com/ava-labs/avalanchego/utils/set" ) var ( - _ Factory = &earlyTermNoTraversalFactory{} - _ Poll = &earlyTermNoTraversalPoll{} + _ Factory = (*earlyTermNoTraversalFactory)(nil) + _ Poll = (*earlyTermNoTraversalPoll)(nil) ) type earlyTermNoTraversalFactory struct { @@ -24,7 +27,7 @@ func NewEarlyTermNoTraversalFactory(alpha int) Factory { return &earlyTermNoTraversalFactory{alpha: alpha} } -func (f *earlyTermNoTraversalFactory) New(vdrs ids.NodeIDBag) Poll { +func (f *earlyTermNoTraversalFactory) New(vdrs bag.Bag[ids.NodeID]) Poll { return &earlyTermNoTraversalPoll{ polled: vdrs, alpha: f.alpha, @@ -35,8 +38,8 @@ func (f *earlyTermNoTraversalFactory) New(vdrs ids.NodeIDBag) Poll { // the result of the poll. However, does not terminate tightly with this bound. // It terminates as quickly as it can without performing any DAG traversals. type earlyTermNoTraversalPoll struct { - votes ids.UniqueBag - polled ids.NodeIDBag + votes bag.UniqueBag[ids.ID] + polled bag.Bag[ids.NodeID] alpha int } @@ -70,7 +73,7 @@ func (p *earlyTermNoTraversalPoll) Finished() bool { // votes will be applied to a single shared ancestor. In this case, the poll // can terminate early, iff there are not enough pending votes for this // ancestor to receive alpha votes. - partialVotes := ids.BitSet64(0) + partialVotes := sets.Bits64(0) for _, vote := range p.votes.List() { if voters := p.votes.GetSet(vote); voters.Len() < p.alpha { partialVotes.Union(voters) @@ -80,7 +83,9 @@ func (p *earlyTermNoTraversalPoll) Finished() bool { } // Result returns the result of this poll -func (p *earlyTermNoTraversalPoll) Result() ids.UniqueBag { return p.votes } +func (p *earlyTermNoTraversalPoll) Result() bag.UniqueBag[ids.ID] { + return p.votes +} func (p *earlyTermNoTraversalPoll) PrefixedString(prefix string) string { return fmt.Sprintf( @@ -91,4 +96,6 @@ func (p *earlyTermNoTraversalPoll) PrefixedString(prefix string) string { ) } -func (p *earlyTermNoTraversalPoll) String() string { return p.PrefixedString("") } +func (p *earlyTermNoTraversalPoll) String() string { + return p.PrefixedString("") +} diff --git a/avalanchego/snow/consensus/avalanche/poll/early_term_no_traversal_test.go b/avalanchego/snow/consensus/avalanche/poll/early_term_no_traversal_test.go index 77603fb0..94643983 100644 --- a/avalanchego/snow/consensus/avalanche/poll/early_term_no_traversal_test.go +++ b/avalanchego/snow/consensus/avalanche/poll/early_term_no_traversal_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package poll @@ -7,6 +7,7 @@ import ( "testing" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bag" ) func TestEarlyTermNoTraversalResults(t *testing.T) { @@ -17,7 +18,7 @@ func TestEarlyTermNoTraversalResults(t *testing.T) { vdr1 := ids.NodeID{1} // k = 1 - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add(vdr1) factory := NewEarlyTermNoTraversalFactory(alpha) @@ -47,7 +48,7 @@ func TestEarlyTermNoTraversalString(t *testing.T) { vdr1 := ids.NodeID{1} vdr2 := ids.NodeID{2} // k = 2 - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add( vdr1, vdr2, @@ -59,9 +60,9 @@ func TestEarlyTermNoTraversalString(t *testing.T) { poll.Vote(vdr1, votes) expected := `waiting on Bag: (Size = 1) - ID[NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp]: Count = 1 + NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp: 1 received UniqueBag: (Size = 1) - ID[SYXsAycDPUu4z2ZksJD5fh5nTDcH3vCFHnpcVye5XuJ2jArg]: Members = 0000000000000002` + SYXsAycDPUu4z2ZksJD5fh5nTDcH3vCFHnpcVye5XuJ2jArg: 0000000000000002` if result := poll.String(); expected != result { t.Fatalf("Poll should have returned:\n%s\nbut returned\n%s", expected, result) } @@ -76,7 +77,7 @@ func TestEarlyTermNoTraversalDropsDuplicatedVotes(t *testing.T) { vdr1 := ids.NodeID{1} vdr2 := ids.NodeID{2} // k = 2 - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add( vdr1, vdr2, @@ -111,7 +112,7 @@ func TestEarlyTermNoTraversalTerminatesEarly(t *testing.T) { vdr4 := ids.NodeID{4} vdr5 := ids.NodeID{5} // k = 5 - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add( vdr1, vdr2, @@ -155,7 +156,7 @@ func TestEarlyTermNoTraversalForSharedAncestor(t *testing.T) { vdr3 := ids.NodeID{3} vdr4 := ids.NodeID{4} - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add(vdr1) vdrs.Add(vdr2) vdrs.Add(vdr3) @@ -189,7 +190,7 @@ func TestEarlyTermNoTraversalWithFastDrops(t *testing.T) { vdr2 := ids.NodeID{2} vdr3 := ids.NodeID{3} // k = 3 - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add( vdr1, vdr2, diff --git a/avalanchego/snow/consensus/avalanche/poll/interfaces.go b/avalanchego/snow/consensus/avalanche/poll/interfaces.go index b3682793..70cbfaba 100644 --- a/avalanchego/snow/consensus/avalanche/poll/interfaces.go +++ b/avalanchego/snow/consensus/avalanche/poll/interfaces.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package poll @@ -7,6 +7,7 @@ import ( "fmt" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bag" "github.com/ava-labs/avalanchego/utils/formatting" ) @@ -14,8 +15,8 @@ import ( type Set interface { fmt.Stringer - Add(requestID uint32, vdrs ids.NodeIDBag) bool - Vote(requestID uint32, vdr ids.NodeID, votes []ids.ID) []ids.UniqueBag + Add(requestID uint32, vdrs bag.Bag[ids.NodeID]) bool + Vote(requestID uint32, vdr ids.NodeID, votes []ids.ID) []bag.UniqueBag[ids.ID] Len() int } @@ -25,10 +26,10 @@ type Poll interface { Vote(vdr ids.NodeID, votes []ids.ID) Finished() bool - Result() ids.UniqueBag + Result() bag.UniqueBag[ids.ID] } // Factory creates a new Poll type Factory interface { - New(vdrs ids.NodeIDBag) Poll + New(vdrs bag.Bag[ids.NodeID]) Poll } diff --git a/avalanchego/snow/consensus/avalanche/poll/no_early_term.go b/avalanchego/snow/consensus/avalanche/poll/no_early_term.go index 1d859866..21ff49de 100644 --- a/avalanchego/snow/consensus/avalanche/poll/no_early_term.go +++ b/avalanchego/snow/consensus/avalanche/poll/no_early_term.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package poll @@ -7,28 +7,31 @@ import ( "fmt" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bag" ) var ( - _ Factory = &noEarlyTermFactory{} - _ Poll = &noEarlyTermPoll{} + _ Factory = (*noEarlyTermFactory)(nil) + _ Poll = (*noEarlyTermPoll)(nil) ) type noEarlyTermFactory struct{} // NewNoEarlyTermFactory returns a factory that returns polls with no early // termination -func NewNoEarlyTermFactory() Factory { return noEarlyTermFactory{} } +func NewNoEarlyTermFactory() Factory { + return noEarlyTermFactory{} +} -func (noEarlyTermFactory) New(vdrs ids.NodeIDBag) Poll { +func (noEarlyTermFactory) New(vdrs bag.Bag[ids.NodeID]) Poll { return &noEarlyTermPoll{polled: vdrs} } // noEarlyTermPoll finishes when all polled validators either respond to the // query or a timeout occurs type noEarlyTermPoll struct { - votes ids.UniqueBag - polled ids.NodeIDBag + votes bag.UniqueBag[ids.ID] + polled bag.Bag[ids.NodeID] } // Vote registers a response for this poll @@ -44,10 +47,14 @@ func (p *noEarlyTermPoll) Vote(vdr ids.NodeID, votes []ids.ID) { } // Finished returns true when all validators have voted -func (p *noEarlyTermPoll) Finished() bool { return p.polled.Len() == 0 } +func (p *noEarlyTermPoll) Finished() bool { + return p.polled.Len() == 0 +} // Result returns the result of this poll -func (p *noEarlyTermPoll) Result() ids.UniqueBag { return p.votes } +func (p *noEarlyTermPoll) Result() bag.UniqueBag[ids.ID] { + return p.votes +} func (p *noEarlyTermPoll) PrefixedString(prefix string) string { return fmt.Sprintf( @@ -58,4 +65,6 @@ func (p *noEarlyTermPoll) PrefixedString(prefix string) string { ) } -func (p *noEarlyTermPoll) String() string { return p.PrefixedString("") } +func (p *noEarlyTermPoll) String() string { + return p.PrefixedString("") +} diff --git a/avalanchego/snow/consensus/avalanche/poll/no_early_term_test.go b/avalanchego/snow/consensus/avalanche/poll/no_early_term_test.go index 1b545232..d4ced58a 100644 --- a/avalanchego/snow/consensus/avalanche/poll/no_early_term_test.go +++ b/avalanchego/snow/consensus/avalanche/poll/no_early_term_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package poll @@ -7,6 +7,7 @@ import ( "testing" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bag" ) func TestNoEarlyTermResults(t *testing.T) { @@ -15,7 +16,7 @@ func TestNoEarlyTermResults(t *testing.T) { vdr1 := ids.NodeID{1} // k = 1 - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add(vdr1) factory := NewNoEarlyTermFactory() @@ -43,7 +44,7 @@ func TestNoEarlyTermString(t *testing.T) { vdr1 := ids.NodeID{1} vdr2 := ids.NodeID{2} // k = 2 - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add( vdr1, vdr2, @@ -55,9 +56,9 @@ func TestNoEarlyTermString(t *testing.T) { poll.Vote(vdr1, votes) expected := `waiting on Bag: (Size = 1) - ID[NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp]: Count = 1 + NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp: 1 received UniqueBag: (Size = 1) - ID[SYXsAycDPUu4z2ZksJD5fh5nTDcH3vCFHnpcVye5XuJ2jArg]: Members = 0000000000000002` + SYXsAycDPUu4z2ZksJD5fh5nTDcH3vCFHnpcVye5XuJ2jArg: 0000000000000002` if result := poll.String(); expected != result { t.Fatalf("Poll should have returned %s but returned %s", expected, result) } @@ -70,7 +71,7 @@ func TestNoEarlyTermDropsDuplicatedVotes(t *testing.T) { vdr1 := ids.NodeID{1} vdr2 := ids.NodeID{2} // k = 2 - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add( vdr1, vdr2, diff --git a/avalanchego/snow/consensus/avalanche/poll/set.go b/avalanchego/snow/consensus/avalanche/poll/set.go index 9d9d7087..b829c44d 100644 --- a/avalanchego/snow/consensus/avalanche/poll/set.go +++ b/avalanchego/snow/consensus/avalanche/poll/set.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package poll @@ -13,14 +13,15 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bag" "github.com/ava-labs/avalanchego/utils/linkedhashmap" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/metric" ) var ( - _ Set = &set{} - _ Poll = &poll{} + _ Set = (*set)(nil) + _ Poll = (*poll)(nil) ) type pollHolder interface { @@ -91,8 +92,8 @@ func NewSet( // Add to the current set of polls // Returns true if the poll was registered correctly and the network sample -// should be made. -func (s *set) Add(requestID uint32, vdrs ids.NodeIDBag) bool { +// should be made. +func (s *set) Add(requestID uint32, vdrs bag.Bag[ids.NodeID]) bool { if _, exists := s.polls.Get(requestID); exists { s.log.Debug("dropping poll", zap.String("reason", "duplicated request"), @@ -116,7 +117,7 @@ func (s *set) Add(requestID uint32, vdrs ids.NodeIDBag) bool { // Vote registers the connections response to a query for [id]. If there was no // query, or the response has already be registered, nothing is performed. -func (s *set) Vote(requestID uint32, vdr ids.NodeID, votes []ids.ID) []ids.UniqueBag { +func (s *set) Vote(requestID uint32, vdr ids.NodeID, votes []ids.ID) []bag.UniqueBag[ids.ID] { holder, exists := s.polls.Get(requestID) if !exists { s.log.Verbo("dropping vote", @@ -132,7 +133,7 @@ func (s *set) Vote(requestID uint32, vdr ids.NodeID, votes []ids.ID) []ids.Uniqu s.log.Verbo("processing votes", zap.Stringer("validator", vdr), zap.Uint32("requestID", requestID), - zap.Stringer("votes", ids.SliceStringer(votes)), + zap.Stringers("votes", votes), ) p.Vote(vdr, votes) @@ -140,7 +141,7 @@ func (s *set) Vote(requestID uint32, vdr ids.NodeID, votes []ids.ID) []ids.Uniqu return nil } - var results []ids.UniqueBag + var results []bag.UniqueBag[ids.ID] // iterate from oldest to newest iter := s.polls.NewIterator() @@ -170,7 +171,9 @@ func (s *set) Vote(requestID uint32, vdr ids.NodeID, votes []ids.ID) []ids.Uniqu } // Len returns the number of outstanding polls -func (s *set) Len() int { return s.polls.Len() } +func (s *set) Len() int { + return s.polls.Len() +} func (s *set) String() string { sb := strings.Builder{} diff --git a/avalanchego/snow/consensus/avalanche/poll/set_test.go b/avalanchego/snow/consensus/avalanche/poll/set_test.go index e4a35ecc..6343db8b 100644 --- a/avalanchego/snow/consensus/avalanche/poll/set_test.go +++ b/avalanchego/snow/consensus/avalanche/poll/set_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package poll @@ -11,6 +11,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bag" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/wrappers" ) @@ -52,7 +53,7 @@ func TestCreateAndFinishPoll(t *testing.T) { vdr1 := ids.NodeID{1} vdr2 := ids.NodeID{2} // k = 2 - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add( vdr1, vdr2, @@ -70,11 +71,11 @@ func TestCreateAndFinishPoll(t *testing.T) { t.Fatalf("Should only have one active poll") } else if results := s.Vote(1, vdr1, votes); len(results) > 0 { t.Fatalf("Shouldn't have been able to finish a non-existent poll") - } else if results = s.Vote(0, vdr1, votes); len(results) > 0 { + } else if results := s.Vote(0, vdr1, votes); len(results) > 0 { t.Fatalf("Shouldn't have been able to finish an ongoing poll") - } else if results = s.Vote(0, vdr1, votes); len(results) > 0 { + } else if results := s.Vote(0, vdr1, votes); len(results) > 0 { t.Fatalf("Should have dropped a duplicated poll") - } else if results = s.Vote(0, vdr2, votes); len(results) == 0 { + } else if results := s.Vote(0, vdr2, votes); len(results) == 0 { t.Fatalf("Should have finished the poll") } else if len(results) != 1 { t.Fatalf("Wrong number of results returned") @@ -102,12 +103,12 @@ func TestCreateAndFinishPollOutOfOrder_OlderFinishesFirst(t *testing.T) { vdrs := []ids.NodeID{vdr1, vdr2, vdr3} // create two polls for the two vtxs - vdrBag := ids.NodeIDBag{} + vdrBag := bag.Bag[ids.NodeID]{} vdrBag.Add(vdrs...) added := s.Add(1, vdrBag) require.True(t, added) - vdrBag = ids.NodeIDBag{} + vdrBag = bag.Bag[ids.NodeID]{} vdrBag.Add(vdrs...) added = s.Add(2, vdrBag) require.True(t, added) @@ -118,7 +119,7 @@ func TestCreateAndFinishPollOutOfOrder_OlderFinishesFirst(t *testing.T) { vtx1 := ids.ID{1} vtx2 := ids.ID{2} - var results []ids.UniqueBag + var results []bag.UniqueBag[ids.ID] // vote out of order results = s.Vote(1, vdr1, []ids.ID{vtx1}) @@ -155,17 +156,17 @@ func TestCreateAndFinishPollOutOfOrder_UnfinishedPollsGaps(t *testing.T) { vdrs := []ids.NodeID{vdr1, vdr2, vdr3} // create three polls for the two vtxs - vdrBag := ids.NodeIDBag{} + vdrBag := bag.Bag[ids.NodeID]{} vdrBag.Add(vdrs...) added := s.Add(1, vdrBag) require.True(t, added) - vdrBag = ids.NodeIDBag{} + vdrBag = bag.Bag[ids.NodeID]{} vdrBag.Add(vdrs...) added = s.Add(2, vdrBag) require.True(t, added) - vdrBag = ids.NodeIDBag{} + vdrBag = bag.Bag[ids.NodeID]{} vdrBag.Add(vdrs...) added = s.Add(3, vdrBag) require.True(t, added) @@ -178,7 +179,7 @@ func TestCreateAndFinishPollOutOfOrder_UnfinishedPollsGaps(t *testing.T) { vtx2 := ids.ID{2} vtx3 := ids.ID{3} - var results []ids.UniqueBag + var results []bag.UniqueBag[ids.ID] // vote out of order // 2 finishes first to create a gap of finished poll between two unfinished polls 1 and 3 @@ -218,13 +219,13 @@ func TestSetString(t *testing.T) { vdr1 := ids.NodeID{1} // k = 1 - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add(vdr1) expected := `current polls: (Size = 1) RequestID 0: waiting on Bag: (Size = 1) - ID[NodeID-6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt]: Count = 1 + NodeID-6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt: 1 received UniqueBag: (Size = 0)` if !s.Add(0, vdrs) { t.Fatalf("Should have been able to add a new poll") diff --git a/avalanchego/snow/consensus/avalanche/test_vertex.go b/avalanchego/snow/consensus/avalanche/test_vertex.go index 4390798a..be4037a5 100644 --- a/avalanchego/snow/consensus/avalanche/test_vertex.go +++ b/avalanchego/snow/consensus/avalanche/test_vertex.go @@ -1,15 +1,18 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche import ( + "context" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" + "github.com/ava-labs/avalanchego/utils/set" ) -var _ Vertex = &TestVertex{} +var _ Vertex = (*TestVertex)(nil) // TestVertex is a useful test vertex type TestVertex struct { @@ -19,7 +22,7 @@ type TestVertex struct { ParentsV []Vertex ParentsErrV error HasWhitelistV bool - WhitelistV ids.Set + WhitelistV set.Set[ids.ID] WhitelistErrV error HeightV uint64 HeightErrV error @@ -28,10 +31,30 @@ type TestVertex struct { BytesV []byte } -func (v *TestVertex) Verify() error { return v.VerifyErrV } -func (v *TestVertex) Parents() ([]Vertex, error) { return v.ParentsV, v.ParentsErrV } -func (v *TestVertex) HasWhitelist() bool { return v.HasWhitelistV } -func (v *TestVertex) Whitelist() (ids.Set, error) { return v.WhitelistV, v.WhitelistErrV } -func (v *TestVertex) Height() (uint64, error) { return v.HeightV, v.HeightErrV } -func (v *TestVertex) Txs() ([]snowstorm.Tx, error) { return v.TxsV, v.TxsErrV } -func (v *TestVertex) Bytes() []byte { return v.BytesV } +func (v *TestVertex) Verify(context.Context) error { + return v.VerifyErrV +} + +func (v *TestVertex) Parents() ([]Vertex, error) { + return v.ParentsV, v.ParentsErrV +} + +func (v *TestVertex) HasWhitelist() bool { + return v.HasWhitelistV +} + +func (v *TestVertex) Whitelist(context.Context) (set.Set[ids.ID], error) { + return v.WhitelistV, v.WhitelistErrV +} + +func (v *TestVertex) Height() (uint64, error) { + return v.HeightV, v.HeightErrV +} + +func (v *TestVertex) Txs(context.Context) ([]snowstorm.Tx, error) { + return v.TxsV, v.TxsErrV +} + +func (v *TestVertex) Bytes() []byte { + return v.BytesV +} diff --git a/avalanchego/snow/consensus/avalanche/topological.go b/avalanchego/snow/consensus/avalanche/topological.go index 8962341b..6b17824c 100644 --- a/avalanchego/snow/consensus/avalanche/topological.go +++ b/avalanchego/snow/consensus/avalanche/topological.go @@ -1,20 +1,25 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche import ( + "context" "errors" "fmt" "strings" "go.uber.org/zap" + "golang.org/x/exp/maps" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/metrics" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" + "github.com/ava-labs/avalanchego/utils/bag" + "github.com/ava-labs/avalanchego/utils/set" ) const minMapSize = 16 @@ -22,14 +27,16 @@ const minMapSize = 16 var ( errNoLeaves = errors.New("couldn't pop a leaf from leaf set") - _ Factory = &TopologicalFactory{} - _ Consensus = &Topological{} + _ Factory = (*TopologicalFactory)(nil) + _ Consensus = (*Topological)(nil) ) // TopologicalFactory implements Factory by returning a topological struct type TopologicalFactory struct{} -func (TopologicalFactory) New() Consensus { return &Topological{} } +func (TopologicalFactory) New() Consensus { + return &Topological{} +} // TODO: Implement pruning of decisions. // To perfectly preserve the protocol, this implementation will need to store @@ -59,20 +66,22 @@ type Topological struct { cg snowstorm.Consensus // preferred is the frontier of vtxIDs that are strongly preferred - preferred ids.Set + preferred set.Set[ids.ID] // virtuous is the frontier of vtxIDs that are strongly virtuous - virtuous ids.Set + virtuous set.Set[ids.ID] // orphans are the txIDs that are virtuous, but not preferred - orphans ids.Set + orphans set.Set[ids.ID] // virtuousVoting are the txIDs that are virtuous and still awaiting // additional votes before acceptance. transactionVertices whose vertices // are not considered virtuous are removed from this set. - virtuousVoting ids.Set + virtuousVoting set.Set[ids.ID] // frontier is the set of vts that have no descendents + // + // Invariant: frontier never contains a rejected vertex frontier map[ids.ID]Vertex // preferenceCache is the cache for strongly preferred checks // virtuousCache is the cache for strongly virtuous checks @@ -80,9 +89,9 @@ type Topological struct { // Used in [calculateInDegree] and [markAncestorInDegrees]. // Should only be accessed in those methods. - // We use this one instance of ids.Set instead of creating a - // new ids.Set during each call to [calculateInDegree]. - leaves ids.Set + // We use this one instance of set.Set instead of creating a + // new set.Set during each call to [calculateInDegree]. + leaves set.Set[ids.ID] // Kahn nodes used in [calculateInDegree] and [markAncestorInDegrees]. // Should only be accessed in those methods. @@ -91,18 +100,19 @@ type Topological struct { kahnNodes map[ids.ID]kahnNode // Used in [pushVotes]. Should only be accessed in that method. - // We use this one instance instead of creating a new ids.UniqueBag + // We use this one instance instead of creating a new bag.UniqueBag[ids.ID] // during each call to [pushVotes]. - votes ids.UniqueBag + votes bag.UniqueBag[ids.ID] } type kahnNode struct { inDegree int - votes ids.BitSet64 + votes set.Bits64 } func (ta *Topological) Initialize( - ctx *snow.ConsensusContext, + ctx context.Context, + chainCtx *snow.ConsensusContext, params Parameters, frontier []Vertex, ) error { @@ -110,13 +120,13 @@ func (ta *Topological) Initialize( return err } - ta.ctx = ctx + ta.ctx = chainCtx ta.params = params - ta.leaves = ids.Set{} - ta.votes = ids.UniqueBag{} + ta.leaves = set.Set[ids.ID]{} + ta.votes = bag.UniqueBag[ids.ID]{} ta.kahnNodes = make(map[ids.ID]kahnNode) - latencyMetrics, err := metrics.NewLatency("vtx", "vertex/vertices", ctx.Log, "", ctx.Registerer) + latencyMetrics, err := metrics.NewLatency("vtx", "vertex/vertices", chainCtx.Log, "", chainCtx.AvalancheRegisterer) if err != nil { return err } @@ -125,7 +135,7 @@ func (ta *Topological) Initialize( ta.nodes = make(map[ids.ID]*transactionVertex, minMapSize) ta.cg = &snowstorm.Directed{} - if err := ta.cg.Initialize(ctx, params.Parameters); err != nil { + if err := ta.cg.Initialize(chainCtx, params.Parameters); err != nil { return err } @@ -133,16 +143,18 @@ func (ta *Topological) Initialize( for _, vtx := range frontier { ta.frontier[vtx.ID()] = vtx } - return ta.updateFrontiers() + return ta.updateFrontiers(ctx) } -func (ta *Topological) NumProcessing() int { return len(ta.nodes) } - -func (ta *Topological) Parameters() Parameters { return ta.params } +func (ta *Topological) NumProcessing() int { + return len(ta.nodes) +} -func (ta *Topological) IsVirtuous(tx snowstorm.Tx) bool { return ta.cg.IsVirtuous(tx) } +func (ta *Topological) IsVirtuous(tx snowstorm.Tx) bool { + return ta.cg.IsVirtuous(tx) +} -func (ta *Topological) Add(vtx Vertex) error { +func (ta *Topological) Add(ctx context.Context, vtx Vertex) error { if vtx.Status().Decided() { return nil // Already decided this vertex } @@ -152,14 +164,14 @@ func (ta *Topological) Add(vtx Vertex) error { return nil // Already inserted this vertex } - txs, err := vtx.Txs() + txs, err := vtx.Txs(ctx) if err != nil { return err } for _, tx := range txs { if !tx.Status().Decided() { // Add the consumers to the conflict graph. - if err := ta.cg.Add(tx); err != nil { + if err := ta.cg.Add(ctx, tx); err != nil { return err } @@ -177,7 +189,7 @@ func (ta *Topological) Add(vtx Vertex) error { ta.nodes[vtxID] = txv // Also add the transaction vertex to the conflict graph to track conflicts. - if err := ta.cg.Add(txv); err != nil { + if err := ta.cg.Add(ctx, txv); err != nil { return err } @@ -193,11 +205,11 @@ func (ta *Topological) Add(vtx Vertex) error { // Because we don't call [updateFrontiers], previous vertices that were // marked as virtuous will not be updated to no longer being virtuous. Even // if this newly added vertex conflicts with them. This is an optimization - // to avoid a retraversal of the DAG in the issuance path. Their virtuous + // to avoid a re-traversal of the DAG in the issuance path. Their virtuous // status will be updated during a future poll. This is safe because the // virtuous frontier is only used optimistically to control when it is valid // to quiesce. - return ta.update(vtx) // Update the vertices preference and virtuous status + return ta.update(ctx, vtx) // Update the vertices preference and virtuous status } func (ta *Topological) VertexIssued(vtx Vertex) bool { @@ -208,22 +220,30 @@ func (ta *Topological) VertexIssued(vtx Vertex) bool { return ok } -func (ta *Topological) TxIssued(tx snowstorm.Tx) bool { return ta.cg.Issued(tx) } +func (ta *Topological) TxIssued(tx snowstorm.Tx) bool { + return ta.cg.Issued(tx) +} -func (ta *Topological) Orphans() ids.Set { return ta.orphans } +func (ta *Topological) Orphans() set.Set[ids.ID] { + return ta.orphans +} -func (ta *Topological) Virtuous() ids.Set { return ta.virtuous } +func (ta *Topological) Virtuous() set.Set[ids.ID] { + return ta.virtuous +} -func (ta *Topological) Preferences() ids.Set { return ta.preferred } +func (ta *Topological) Preferences() set.Set[ids.ID] { + return ta.preferred +} -func (ta *Topological) RecordPoll(responses ids.UniqueBag) error { +func (ta *Topological) RecordPoll(ctx context.Context, responses bag.UniqueBag[ids.ID]) error { // Register a new poll call ta.pollNumber++ // If it isn't possible to have alpha votes for any transaction, then we can // just reset the confidence values in the conflict graph and not perform // any traversals. - partialVotes := ids.BitSet64(0) + partialVotes := set.Bits64(0) for vote := range responses { votes := responses.GetSet(vote) partialVotes.Union(votes) @@ -234,7 +254,7 @@ func (ta *Topological) RecordPoll(responses ids.UniqueBag) error { if partialVotes.Len() < ta.params.Alpha { // Because there were less than alpha total returned votes, we can skip // the traversals and fail the poll. - _, err := ta.cg.RecordPoll(ids.Bag{}) + _, err := ta.cg.RecordPoll(ctx, bag.Bag[ids.ID]{}) return err } @@ -244,28 +264,32 @@ func (ta *Topological) RecordPoll(responses ids.UniqueBag) error { } // Collect the votes for each transaction: O(|Live Set|) - votes, err := ta.pushVotes() + votes, err := ta.pushVotes(ctx) if err != nil { return err } // Update the conflict graph: O(|Transactions|) - if updated, err := ta.cg.RecordPoll(votes); !updated || err != nil { + if updated, err := ta.cg.RecordPoll(ctx, votes); !updated || err != nil { // If the transaction statuses weren't changed, there is no need to // perform a traversal. return err } // Update the dag: O(|Live Set|) - return ta.updateFrontiers() + return ta.updateFrontiers(ctx) } -func (ta *Topological) Quiesce() bool { return ta.virtuousVoting.Len() == 0 } +func (ta *Topological) Quiesce() bool { + return ta.virtuousVoting.Len() == 0 +} -func (ta *Topological) Finalized() bool { return ta.cg.Finalized() } +func (ta *Topological) Finalized() bool { + return ta.cg.Finalized() +} // HealthCheck returns information about the consensus health. -func (ta *Topological) HealthCheck() (interface{}, error) { +func (ta *Topological) HealthCheck(ctx context.Context) (interface{}, error) { numOutstandingVtx := ta.Latency.NumProcessing() isOutstandingVtx := numOutstandingVtx <= ta.params.MaxOutstandingItems healthy := isOutstandingVtx @@ -273,7 +297,13 @@ func (ta *Topological) HealthCheck() (interface{}, error) { "outstandingVertices": numOutstandingVtx, } - snowstormReport, err := ta.cg.HealthCheck() + // check for long running vertices + oldestProcessingDuration := ta.Latency.MeasureAndGetOldestDuration() + processingTimeOK := oldestProcessingDuration <= ta.params.MaxItemProcessingTime + healthy = healthy && processingTimeOK + details["longestRunningVertex"] = oldestProcessingDuration.String() + + snowstormReport, err := ta.cg.HealthCheck(ctx) healthy = healthy && err == nil details["snowstorm"] = snowstormReport @@ -282,6 +312,9 @@ func (ta *Topological) HealthCheck() (interface{}, error) { if isOutstandingVtx { errorReasons = append(errorReasons, fmt.Sprintf("number outstanding vertexes %d > %d", numOutstandingVtx, ta.params.MaxOutstandingItems)) } + if !processingTimeOK { + errorReasons = append(errorReasons, fmt.Sprintf("vertex processing time %s > %s", oldestProcessingDuration, ta.params.MaxItemProcessingTime)) + } if err != nil { errorReasons = append(errorReasons, err.Error()) } @@ -293,11 +326,9 @@ func (ta *Topological) HealthCheck() (interface{}, error) { // Takes in a list of votes and sets up the topological ordering. Returns the // reachable section of the graph annotated with the number of inbound edges and // the non-transitively applied votes. Also returns the list of leaf nodes. -func (ta *Topological) calculateInDegree(responses ids.UniqueBag) error { +func (ta *Topological) calculateInDegree(responses bag.UniqueBag[ids.ID]) error { // Clear the kahn node set - for k := range ta.kahnNodes { - delete(ta.kahnNodes, k) - } + maps.Clear(ta.kahnNodes) // Clear the leaf set ta.leaves.Clear() @@ -380,9 +411,9 @@ func (ta *Topological) markAncestorInDegrees( // Count the number of votes for each operation by pushing votes upwards through // vertex ancestors. -func (ta *Topological) pushVotes() (ids.Bag, error) { +func (ta *Topological) pushVotes(ctx context.Context) (bag.Bag[ids.ID], error) { ta.votes.Clear() - txConflicts := make(map[ids.ID]ids.Set, minMapSize) + txConflicts := make(map[ids.ID]set.Set[ids.ID], minMapSize) // A leaf is a node with no inbound edges. This removes each leaf and pushes // the votes upwards, potentially creating new leaves, until there are no @@ -393,16 +424,16 @@ func (ta *Topological) pushVotes() (ids.Bag, error) { if !ok { // Should never happen because we just checked that [ta.leaves] is // not empty. - return ids.Bag{}, errNoLeaves + return bag.Bag[ids.ID]{}, errNoLeaves } kahn := ta.kahnNodes[leaf] if tv := ta.nodes[leaf]; tv != nil { vtx := tv.vtx - txs, err := vtx.Txs() + txs, err := vtx.Txs(ctx) if err != nil { - return ids.Bag{}, err + return bag.Bag[ids.ID]{}, err } for _, tx := range txs { // Give the votes to the consumer @@ -428,7 +459,7 @@ func (ta *Topological) pushVotes() (ids.Bag, error) { parents, err := vtx.Parents() if err != nil { - return ids.Bag{}, err + return bag.Bag[ids.ID]{}, err } for _, dep := range parents { depID := dep.ID() @@ -448,7 +479,7 @@ func (ta *Topological) pushVotes() (ids.Bag, error) { } // Create bag of votes for conflicting transactions - conflictingVotes := make(ids.UniqueBag) + conflictingVotes := make(bag.UniqueBag[ids.ID]) for txID, conflicts := range txConflicts { for conflictTxID := range conflicts { conflictingVotes.UnionSet(txID, ta.votes.GetSet(conflictTxID)) @@ -466,9 +497,9 @@ func (ta *Topological) pushVotes() (ids.Bag, error) { // I now update all my ancestors // If any of my parents are rejected, reject myself // If I'm preferred, remove all my ancestors from the preferred frontier, add -// myself to the preferred frontier +// myself to the preferred frontier // If all my parents are accepted and I'm acceptable, accept myself -func (ta *Topological) update(vtx Vertex) error { +func (ta *Topological) update(ctx context.Context, vtx Vertex) error { vtxID := vtx.ID() if _, cached := ta.preferenceCache[vtxID]; cached { return nil // This vertex has already been updated @@ -478,8 +509,14 @@ func (ta *Topological) update(vtx Vertex) error { // reissued. ta.orphans.Remove(vtxID) - switch vtx.Status() { - case choices.Accepted: + // Note: it is not possible for the status to be rejected here. Update is + // only called when adding a new processing vertex and when updating the + // frontiers. If update is called with a rejected vertex when updating the + // frontiers, it is guaranteed that the vertex was rejected during the same + // frontier update. This means that the rejected vertex must have already + // been visited, which means update will have exited from the above + // preferenceCache check. + if vtx.Status() == choices.Accepted { ta.preferred.Add(vtxID) // I'm preferred ta.virtuous.Add(vtxID) // Accepted is defined as virtuous @@ -488,14 +525,9 @@ func (ta *Topological) update(vtx Vertex) error { ta.preferenceCache[vtxID] = true ta.virtuousCache[vtxID] = true return nil - case choices.Rejected: - // I'm rejected - ta.preferenceCache[vtxID] = false - ta.virtuousCache[vtxID] = false - return nil } - txs, err := vtx.Txs() + txs, err := vtx.Txs(ctx) if err != nil { return err } @@ -545,7 +577,7 @@ func (ta *Topological) update(vtx Vertex) error { } // Update all of my dependencies for _, dep := range deps { - if err := ta.update(dep); err != nil { + if err := ta.update(ctx, dep); err != nil { return err } @@ -564,17 +596,16 @@ func (ta *Topological) update(vtx Vertex) error { zap.Stringer("vtxID", vtxID), zap.Stringer("parentID", dep.ID()), ) - if !txv.Status().Decided() { - if err := ta.cg.Remove(vtxID); err != nil { - return fmt.Errorf("failed to remove transaction vertex %s from snowstorm before rejecting vertex itself", vtxID) - } - ta.virtuousVoting.Remove(vtxID) - } - if err := vtx.Reject(); err != nil { + // Note: because the parent was rejected, the transaction vertex + // will have already been marked as rejected by the conflict graph. + // However, we still need to remove it from the set of virtuous + // transactions. + ta.virtuousVoting.Remove(vtxID) + if err := vtx.Reject(ctx); err != nil { return err } delete(ta.nodes, vtxID) - ta.Latency.Rejected(vtxID, ta.pollNumber) + ta.Latency.Rejected(vtxID, ta.pollNumber, len(vtx.Bytes())) ta.preferenceCache[vtxID] = false ta.virtuousCache[vtxID] = false @@ -594,11 +625,16 @@ func (ta *Topological) update(vtx Vertex) error { // Also, this will only happen from a byzantine node issuing the vertex. // Therefore, this is very unlikely to actually be triggered in practice. - // Remove all my parents from the frontier - for _, dep := range deps { - delete(ta.frontier, dep.ID()) + // If the vertex is going to be rejected, it and all of its children are + // going to be removed from the graph. This means that the parents may still + // exist in the frontier. If the vertex is not rejectable, then it will + // still be in the graph and the parents can not be part of the frontier. + if !rejectable { + for _, dep := range deps { + delete(ta.frontier, dep.ID()) + } + ta.frontier[vtxID] = vtx // I have no descendents yet } - ta.frontier[vtxID] = vtx // I have no descendents yet ta.preferenceCache[vtxID] = preferred ta.virtuousCache[vtxID] = virtuous @@ -639,17 +675,18 @@ func (ta *Topological) update(vtx Vertex) error { switch { case acceptable: // I'm acceptable, why not accept? - // Note that ConsensusAcceptor.Accept must be called before vtx.Accept - // to honor Acceptor.Accept's invariant. - if err := ta.ctx.ConsensusAcceptor.Accept(ta.ctx, vtxID, vtx.Bytes()); err != nil { + // Note that VertexAcceptor.Accept must be called before vtx.Accept to + // honor Acceptor.Accept's invariant. + vtxBytes := vtx.Bytes() + if err := ta.ctx.VertexAcceptor.Accept(ta.ctx, vtxID, vtxBytes); err != nil { return err } - if err := vtx.Accept(); err != nil { + if err := vtx.Accept(ctx); err != nil { return err } delete(ta.nodes, vtxID) - ta.Latency.Accepted(vtxID, ta.pollNumber) + ta.Latency.Accepted(vtxID, ta.pollNumber, len(vtxBytes)) case rejectable: // I'm rejectable, why not reject? ta.ctx.Log.Trace("rejecting vertex", @@ -657,22 +694,22 @@ func (ta *Topological) update(vtx Vertex) error { zap.Stringer("vtxID", vtxID), ) if !txv.Status().Decided() { - if err := ta.cg.Remove(vtxID); err != nil { + if err := ta.cg.Remove(ctx, vtxID); err != nil { return fmt.Errorf("failed to remove transaction vertex %s from snowstorm before rejecting vertex itself", vtxID) } ta.virtuousVoting.Remove(vtxID) } - if err := vtx.Reject(); err != nil { + if err := vtx.Reject(ctx); err != nil { return err } delete(ta.nodes, vtxID) - ta.Latency.Rejected(vtxID, ta.pollNumber) + ta.Latency.Rejected(vtxID, ta.pollNumber, len(vtx.Bytes())) } return nil } // Update the frontier sets -func (ta *Topological) updateFrontiers() error { +func (ta *Topological) updateFrontiers(ctx context.Context) error { vts := ta.frontier ta.preferred.Clear() @@ -689,7 +726,7 @@ func (ta *Topological) updateFrontiers() error { for _, vtx := range vts { // Update all the vertices that were in my previous frontier - if err := ta.update(vtx); err != nil { + if err := ta.update(ctx, vtx); err != nil { return err } } diff --git a/avalanchego/snow/consensus/avalanche/topological_test.go b/avalanchego/snow/consensus/avalanche/topological_test.go index e1e27898..6c948ce2 100644 --- a/avalanchego/snow/consensus/avalanche/topological_test.go +++ b/avalanchego/snow/consensus/avalanche/topological_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche @@ -7,4 +7,6 @@ import ( "testing" ) -func TestTopological(t *testing.T) { runConsensusTests(t, TopologicalFactory{}) } +func TestTopological(t *testing.T) { + runConsensusTests(t, TopologicalFactory{}) +} diff --git a/avalanchego/snow/consensus/avalanche/traced_consensus.go b/avalanchego/snow/consensus/avalanche/traced_consensus.go new file mode 100644 index 00000000..dfabbad5 --- /dev/null +++ b/avalanchego/snow/consensus/avalanche/traced_consensus.go @@ -0,0 +1,55 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + + oteltrace "go.opentelemetry.io/otel/trace" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/utils/bag" + "github.com/ava-labs/avalanchego/utils/set" +) + +var _ Consensus = (*tracedConsensus)(nil) + +type tracedConsensus struct { + Consensus + tracer trace.Tracer +} + +func Trace(consensus Consensus, tracer trace.Tracer) Consensus { + return &tracedConsensus{ + Consensus: consensus, + tracer: tracer, + } +} + +func (c *tracedConsensus) Add(ctx context.Context, vtx Vertex) error { + ctx, span := c.tracer.Start(ctx, "tracedConsensus.Add", oteltrace.WithAttributes( + attribute.Stringer("vtxID", vtx.ID()), + )) + defer span.End() + + return c.Consensus.Add(ctx, vtx) +} + +func (c *tracedConsensus) RecordPoll(ctx context.Context, votes bag.UniqueBag[ids.ID]) error { + var allVotes set.Bits64 + for _, vote := range votes { + allVotes.Union(vote) + } + + ctx, span := c.tracer.Start(ctx, "tracedConsensus.RecordPoll", oteltrace.WithAttributes( + attribute.Int("numVotes", allVotes.Len()), + attribute.Int("numVtxIDs", len(votes)), + )) + defer span.End() + + return c.Consensus.RecordPoll(ctx, votes) +} diff --git a/avalanchego/snow/consensus/avalanche/transaction_vertex.go b/avalanchego/snow/consensus/avalanche/transaction_vertex.go index 0f35e972..51325026 100644 --- a/avalanchego/snow/consensus/avalanche/transaction_vertex.go +++ b/avalanchego/snow/consensus/avalanche/transaction_vertex.go @@ -1,15 +1,18 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche import ( + "context" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" + "github.com/ava-labs/avalanchego/utils/set" ) -var _ snowstorm.Tx = &transactionVertex{} +var _ snowstorm.Tx = (*transactionVertex)(nil) // newTransactionVertex returns a new transactionVertex initialized with a // processing status. @@ -34,7 +37,7 @@ type transactionVertex struct { status choices.Status } -func (tv *transactionVertex) Bytes() []byte { +func (*transactionVertex) Bytes() []byte { // Snowstorm uses the bytes of the transaction to broadcast through the // decision dispatcher. Because this is an internal transaction type, we // don't want to have this transaction broadcast. So, we return nil here. @@ -45,21 +48,25 @@ func (tv *transactionVertex) ID() ids.ID { return tv.vtx.ID() } -func (tv *transactionVertex) Accept() error { +func (tv *transactionVertex) Accept(context.Context) error { tv.status = choices.Accepted return nil } -func (tv *transactionVertex) Reject() error { +func (tv *transactionVertex) Reject(context.Context) error { tv.status = choices.Rejected return nil } -func (tv *transactionVertex) Status() choices.Status { return tv.status } +func (tv *transactionVertex) Status() choices.Status { + return tv.status +} // Verify isn't called in the consensus code. So this implementation doesn't // really matter. However it's used to implement the tx interface. -func (tv *transactionVertex) Verify() error { return nil } +func (*transactionVertex) Verify(context.Context) error { + return nil +} // Dependencies returns the currently processing transaction vertices of this // vertex's parents. @@ -78,10 +85,16 @@ func (tv *transactionVertex) Dependencies() ([]snowstorm.Tx, error) { } // InputIDs must return a non-empty slice to avoid having the snowstorm engine -// vaciously accept it. A slice is returned containing just the vertexID in +// vacuously accept it. A slice is returned containing just the vertexID in // order to produce no conflicts based on the consumed input. -func (tv *transactionVertex) InputIDs() []ids.ID { return []ids.ID{tv.vtx.ID()} } +func (tv *transactionVertex) InputIDs() []ids.ID { + return []ids.ID{tv.vtx.ID()} +} -func (tv *transactionVertex) HasWhitelist() bool { return tv.vtx.HasWhitelist() } +func (tv *transactionVertex) HasWhitelist() bool { + return tv.vtx.HasWhitelist() +} -func (tv *transactionVertex) Whitelist() (ids.Set, error) { return tv.vtx.Whitelist() } +func (tv *transactionVertex) Whitelist(ctx context.Context) (set.Set[ids.ID], error) { + return tv.vtx.Whitelist(ctx) +} diff --git a/avalanchego/snow/consensus/avalanche/vertex.go b/avalanchego/snow/consensus/avalanche/vertex.go index ebbcc47f..16a87e77 100644 --- a/avalanchego/snow/consensus/avalanche/vertex.go +++ b/avalanchego/snow/consensus/avalanche/vertex.go @@ -1,21 +1,23 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche import ( + "context" + "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" - "github.com/ava-labs/avalanchego/vms/components/verify" ) // Vertex is a collection of multiple transactions tied to other vertices type Vertex interface { choices.Decidable - // Vertex verification should be performed before issuance. - verify.Verifiable snowstorm.Whitelister + // Vertex verification should be performed before issuance. + Verify(context.Context) error + // Returns the vertices this vertex depends on Parents() ([]Vertex, error) @@ -24,7 +26,7 @@ type Vertex interface { Height() (uint64, error) // Returns a series of state transitions to be performed on acceptance - Txs() ([]snowstorm.Tx, error) + Txs(context.Context) ([]snowstorm.Tx, error) // Returns the binary representation of this vertex Bytes() []byte diff --git a/avalanchego/snow/consensus/metrics/height.go b/avalanchego/snow/consensus/metrics/height.go index 93a98b8d..7a485f72 100644 --- a/avalanchego/snow/consensus/metrics/height.go +++ b/avalanchego/snow/consensus/metrics/height.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics @@ -7,7 +7,7 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -var _ Height = &height{} +var _ Height = (*height)(nil) // Height reports the last accepted height type Height interface { diff --git a/avalanchego/snow/consensus/metrics/latency.go b/avalanchego/snow/consensus/metrics/latency.go index bd377728..4f5d413a 100644 --- a/avalanchego/snow/consensus/metrics/latency.go +++ b/avalanchego/snow/consensus/metrics/latency.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics @@ -19,17 +19,19 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" ) -var _ Latency = &latency{} +var _ Latency = (*latency)(nil) type Latency interface { // Issued marks the item as having been issued. Issued(id ids.ID, pollNumber uint64) // Accepted marks the item as having been accepted. - Accepted(id ids.ID, pollNumber uint64) + // Pass the container size in bytes for metrics tracking. + Accepted(id ids.ID, pollNumber uint64, containerSize int) // Rejected marks the item as having been rejected. - Rejected(id ids.ID, pollNumber uint64) + // Pass the container size in bytes for metrics tracking. + Rejected(id ids.ID, pollNumber uint64, containerSize int) // MeasureAndGetOldestDuration returns the amount of time the oldest item // has been processing. @@ -67,11 +69,13 @@ type latency struct { // latAccepted tracks the number of nanoseconds that an item was processing // before being accepted - latAccepted metric.Averager + latAccepted metric.Averager + containerSizeAcceptedSum prometheus.Gauge // rejected tracks the number of nanoseconds that an item was processing // before being rejected - latRejected metric.Averager + latRejected metric.Averager + containerSizeRejectedSum prometheus.Gauge } // Initialize the metrics with the provided names. @@ -80,11 +84,15 @@ func NewLatency(metricName, descriptionName string, log logging.Logger, namespac l := &latency{ processingEntries: linkedhashmap.New[ids.ID, opStart](), log: log, + + // e.g., + // "avalanche_7y7zwo7XatqnX4dtTakLo32o7jkMX4XuDa26WaxbCXoCT1qKK_blks_processing" to count how blocks are currently processing numProcessing: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Name: fmt.Sprintf("%s_processing", metricName), Help: fmt.Sprintf("Number of currently processing %s", metricName), }), + pollsAccepted: metric.NewAveragerWithErrs( namespace, fmt.Sprintf("%s_polls_accepted", metricName), @@ -99,6 +107,13 @@ func NewLatency(metricName, descriptionName string, log logging.Logger, namespac reg, &errs, ), + + // e.g., + // "avalanche_C_blks_accepted_count" to count how many "Observe" gets called -- count all "Accept" + // "avalanche_C_blks_accepted_sum" to count how many ns have elapsed since its issuance on acceptance + // "avalanche_C_blks_accepted_sum / avalanche_C_blks_accepted_count" is the average block acceptance latency in ns + // "avalanche_C_blks_accepted_container_size_sum" to track cumulative sum of all accepted blocks' sizes + // "avalanche_C_blks_accepted_container_size_sum / avalanche_C_blks_accepted_count" is the average block size latAccepted: metric.NewAveragerWithErrs( namespace, fmt.Sprintf("%s_accepted", metricName), @@ -106,6 +121,18 @@ func NewLatency(metricName, descriptionName string, log logging.Logger, namespac reg, &errs, ), + containerSizeAcceptedSum: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: fmt.Sprintf("%s_accepted_container_size_sum", metricName), + Help: fmt.Sprintf("Cumulative sum of container size of all accepted %s", metricName), + }), + + // e.g., + // "avalanche_P_blks_rejected_count" to count how many "Observe" gets called -- count all "Reject" + // "avalanche_P_blks_rejected_sum" to count how many ns have elapsed since its issuance on rejection + // "avalanche_P_blks_accepted_sum / avalanche_P_blks_accepted_count" is the average block acceptance latency in ns + // "avalanche_P_blks_accepted_container_size_sum" to track cumulative sum of all accepted blocks' sizes + // "avalanche_P_blks_accepted_container_size_sum / avalanche_P_blks_accepted_count" is the average block size latRejected: metric.NewAveragerWithErrs( namespace, fmt.Sprintf("%s_rejected", metricName), @@ -113,8 +140,17 @@ func NewLatency(metricName, descriptionName string, log logging.Logger, namespac reg, &errs, ), + containerSizeRejectedSum: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: fmt.Sprintf("%s_rejected_container_size_sum", metricName), + Help: fmt.Sprintf("Cumulative sum of container size of all rejected %s", metricName), + }), } - errs.Add(reg.Register(l.numProcessing)) + errs.Add( + reg.Register(l.numProcessing), + reg.Register(l.containerSizeAcceptedSum), + reg.Register(l.containerSizeRejectedSum), + ) return l, errs.Err } @@ -126,7 +162,7 @@ func (l *latency) Issued(id ids.ID, pollNumber uint64) { l.numProcessing.Inc() } -func (l *latency) Accepted(id ids.ID, pollNumber uint64) { +func (l *latency) Accepted(id ids.ID, pollNumber uint64, containerSize int) { start, ok := l.processingEntries.Get(id) if !ok { l.log.Debug("unable to measure tx latency", @@ -142,9 +178,11 @@ func (l *latency) Accepted(id ids.ID, pollNumber uint64) { duration := time.Since(start.time) l.latAccepted.Observe(float64(duration)) l.numProcessing.Dec() + + l.containerSizeAcceptedSum.Add(float64(containerSize)) } -func (l *latency) Rejected(id ids.ID, pollNumber uint64) { +func (l *latency) Rejected(id ids.ID, pollNumber uint64, containerSize int) { start, ok := l.processingEntries.Get(id) if !ok { l.log.Debug("unable to measure tx latency", @@ -160,6 +198,8 @@ func (l *latency) Rejected(id ids.ID, pollNumber uint64) { duration := time.Since(start.time) l.latRejected.Observe(float64(duration)) l.numProcessing.Dec() + + l.containerSizeRejectedSum.Add(float64(containerSize)) } func (l *latency) MeasureAndGetOldestDuration() time.Duration { diff --git a/avalanchego/snow/consensus/metrics/polls.go b/avalanchego/snow/consensus/metrics/polls.go index aff848e9..188bb217 100644 --- a/avalanchego/snow/consensus/metrics/polls.go +++ b/avalanchego/snow/consensus/metrics/polls.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics @@ -9,7 +9,7 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" ) -var _ Polls = &polls{} +var _ Polls = (*polls)(nil) // Polls reports commonly used consensus poll metrics. type Polls interface { diff --git a/avalanchego/snow/consensus/metrics/timestamp.go b/avalanchego/snow/consensus/metrics/timestamp.go new file mode 100644 index 00000000..0e784fa5 --- /dev/null +++ b/avalanchego/snow/consensus/metrics/timestamp.go @@ -0,0 +1,38 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package metrics + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +var _ Timestamp = (*timestamp)(nil) + +// Timestamp reports the last accepted block time, +// to track it in unix seconds. +type Timestamp interface { + Accepted(ts time.Time) +} + +type timestamp struct { + // lastAcceptedTimestamp keeps track of the last accepted timestamp + lastAcceptedTimestamp prometheus.Gauge +} + +func NewTimestamp(namespace string, reg prometheus.Registerer) (Timestamp, error) { + t := ×tamp{ + lastAcceptedTimestamp: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "last_accepted_timestamp", + Help: "Last accepted block timestamp in unix seconds", + }), + } + return t, reg.Register(t.lastAcceptedTimestamp) +} + +func (t *timestamp) Accepted(ts time.Time) { + t.lastAcceptedTimestamp.Set(float64(ts.Unix())) +} diff --git a/avalanchego/snow/consensus/snowball/binary_slush.go b/avalanchego/snow/consensus/snowball/binary_slush.go index 8428c929..3fed5c98 100644 --- a/avalanchego/snow/consensus/snowball/binary_slush.go +++ b/avalanchego/snow/consensus/snowball/binary_slush.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -7,7 +7,7 @@ import ( "fmt" ) -var _ BinarySlush = &binarySlush{} +var _ BinarySlush = (*binarySlush)(nil) // binarySlush is the implementation of a binary slush instance type binarySlush struct { @@ -17,10 +17,18 @@ type binarySlush struct { preference int } -func (sl *binarySlush) Initialize(choice int) { sl.preference = choice } +func (sl *binarySlush) Initialize(choice int) { + sl.preference = choice +} -func (sl *binarySlush) Preference() int { return sl.preference } +func (sl *binarySlush) Preference() int { + return sl.preference +} -func (sl *binarySlush) RecordSuccessfulPoll(choice int) { sl.preference = choice } +func (sl *binarySlush) RecordSuccessfulPoll(choice int) { + sl.preference = choice +} -func (sl *binarySlush) String() string { return fmt.Sprintf("SL(Preference = %d)", sl.preference) } +func (sl *binarySlush) String() string { + return fmt.Sprintf("SL(Preference = %d)", sl.preference) +} diff --git a/avalanchego/snow/consensus/snowball/binary_snowball.go b/avalanchego/snow/consensus/snowball/binary_snowball.go index 349398b0..8f197f03 100644 --- a/avalanchego/snow/consensus/snowball/binary_snowball.go +++ b/avalanchego/snow/consensus/snowball/binary_snowball.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -7,7 +7,7 @@ import ( "fmt" ) -var _ BinarySnowball = &binarySnowball{} +var _ BinarySnowball = (*binarySnowball)(nil) // binarySnowball is the implementation of a binary snowball instance type binarySnowball struct { diff --git a/avalanchego/snow/consensus/snowball/binary_snowball_test.go b/avalanchego/snow/consensus/snowball/binary_snowball_test.go index 1c796775..c2832a7d 100644 --- a/avalanchego/snow/consensus/snowball/binary_snowball_test.go +++ b/avalanchego/snow/consensus/snowball/binary_snowball_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -8,90 +8,90 @@ import ( ) func TestBinarySnowball(t *testing.T) { - Red := 0 - Blue := 1 + red := 0 + blue := 1 beta := 2 sb := binarySnowball{} - sb.Initialize(beta, Red) + sb.Initialize(beta, red) - if pref := sb.Preference(); pref != Red { - t.Fatalf("Wrong preference. Expected %d got %d", Red, pref) + if pref := sb.Preference(); pref != red { + t.Fatalf("Wrong preference. Expected %d got %d", red, pref) } else if sb.Finalized() { t.Fatalf("Finalized too early") } - sb.RecordSuccessfulPoll(Blue) + sb.RecordSuccessfulPoll(blue) - if pref := sb.Preference(); pref != Blue { - t.Fatalf("Wrong preference. Expected %d got %d", Blue, pref) + if pref := sb.Preference(); pref != blue { + t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) } else if sb.Finalized() { t.Fatalf("Finalized too early") } - sb.RecordSuccessfulPoll(Red) + sb.RecordSuccessfulPoll(red) - if pref := sb.Preference(); pref != Blue { - t.Fatalf("Wrong preference. Expected %d got %d", Blue, pref) + if pref := sb.Preference(); pref != blue { + t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) } else if sb.Finalized() { t.Fatalf("Finalized too early") } - sb.RecordSuccessfulPoll(Blue) + sb.RecordSuccessfulPoll(blue) - if pref := sb.Preference(); pref != Blue { - t.Fatalf("Wrong preference. Expected %d got %d", Blue, pref) + if pref := sb.Preference(); pref != blue { + t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) } else if sb.Finalized() { t.Fatalf("Finalized too early") } - sb.RecordSuccessfulPoll(Blue) + sb.RecordSuccessfulPoll(blue) - if pref := sb.Preference(); pref != Blue { - t.Fatalf("Wrong preference. Expected %d got %d", Blue, pref) + if pref := sb.Preference(); pref != blue { + t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) } else if !sb.Finalized() { t.Fatalf("Didn't finalized correctly") } } func TestBinarySnowballRecordUnsuccessfulPoll(t *testing.T) { - Red := 0 - Blue := 1 + red := 0 + blue := 1 beta := 2 sb := binarySnowball{} - sb.Initialize(beta, Red) + sb.Initialize(beta, red) - if pref := sb.Preference(); pref != Red { - t.Fatalf("Wrong preference. Expected %d got %d", Red, pref) + if pref := sb.Preference(); pref != red { + t.Fatalf("Wrong preference. Expected %d got %d", red, pref) } else if sb.Finalized() { t.Fatalf("Finalized too early") } - sb.RecordSuccessfulPoll(Blue) + sb.RecordSuccessfulPoll(blue) - if pref := sb.Preference(); pref != Blue { - t.Fatalf("Wrong preference. Expected %d got %d", Blue, pref) + if pref := sb.Preference(); pref != blue { + t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) } else if sb.Finalized() { t.Fatalf("Finalized too early") } sb.RecordUnsuccessfulPoll() - sb.RecordSuccessfulPoll(Blue) + sb.RecordSuccessfulPoll(blue) - if pref := sb.Preference(); pref != Blue { - t.Fatalf("Wrong preference. Expected %d got %d", Blue, pref) + if pref := sb.Preference(); pref != blue { + t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) } else if sb.Finalized() { t.Fatalf("Finalized too early") } - sb.RecordSuccessfulPoll(Blue) + sb.RecordSuccessfulPoll(blue) - if pref := sb.Preference(); pref != Blue { - t.Fatalf("Wrong preference. Expected %d got %d", Blue, pref) + if pref := sb.Preference(); pref != blue { + t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) } else if !sb.Finalized() { t.Fatalf("Finalized too late") } @@ -103,50 +103,50 @@ func TestBinarySnowballRecordUnsuccessfulPoll(t *testing.T) { } func TestBinarySnowballAcceptWeirdColor(t *testing.T) { - Blue := 0 - Red := 1 + blue := 0 + red := 1 beta := 2 sb := binarySnowball{} - sb.Initialize(beta, Red) + sb.Initialize(beta, red) - if pref := sb.Preference(); pref != Red { - t.Fatalf("Wrong preference. Expected %d got %d", Red, pref) + if pref := sb.Preference(); pref != red { + t.Fatalf("Wrong preference. Expected %d got %d", red, pref) } else if sb.Finalized() { t.Fatalf("Finalized too early") } - sb.RecordSuccessfulPoll(Red) + sb.RecordSuccessfulPoll(red) sb.RecordUnsuccessfulPoll() - if pref := sb.Preference(); pref != Red { - t.Fatalf("Wrong preference. Expected %d got %d", Red, pref) + if pref := sb.Preference(); pref != red { + t.Fatalf("Wrong preference. Expected %d got %d", red, pref) } else if sb.Finalized() { t.Fatalf("Finalized too early") } - sb.RecordSuccessfulPoll(Red) + sb.RecordSuccessfulPoll(red) sb.RecordUnsuccessfulPoll() - if pref := sb.Preference(); pref != Red { - t.Fatalf("Wrong preference. Expected %d got %d", Red, pref) + if pref := sb.Preference(); pref != red { + t.Fatalf("Wrong preference. Expected %d got %d", red, pref) } else if sb.Finalized() { t.Fatalf("Finalized too early") } - sb.RecordSuccessfulPoll(Blue) + sb.RecordSuccessfulPoll(blue) - if pref := sb.Preference(); pref != Red { - t.Fatalf("Wrong preference. Expected %d got %d", Red, pref) + if pref := sb.Preference(); pref != red { + t.Fatalf("Wrong preference. Expected %d got %d", red, pref) } else if sb.Finalized() { t.Fatalf("Finalized too early") } - sb.RecordSuccessfulPoll(Blue) + sb.RecordSuccessfulPoll(blue) - if pref := sb.Preference(); pref != Blue { - t.Fatalf("Wrong preference. Expected %d got %d", Blue, pref) + if pref := sb.Preference(); pref != blue { + t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) } else if !sb.Finalized() { t.Fatalf("Finalized too late") } @@ -158,34 +158,34 @@ func TestBinarySnowballAcceptWeirdColor(t *testing.T) { } func TestBinarySnowballLockColor(t *testing.T) { - Red := 0 - Blue := 1 + red := 0 + blue := 1 beta := 1 sb := binarySnowball{} - sb.Initialize(beta, Red) + sb.Initialize(beta, red) - sb.RecordSuccessfulPoll(Red) + sb.RecordSuccessfulPoll(red) - if pref := sb.Preference(); pref != Red { - t.Fatalf("Wrong preference. Expected %d got %d", Red, pref) + if pref := sb.Preference(); pref != red { + t.Fatalf("Wrong preference. Expected %d got %d", red, pref) } else if !sb.Finalized() { t.Fatalf("Finalized too late") } - sb.RecordSuccessfulPoll(Blue) + sb.RecordSuccessfulPoll(blue) - if pref := sb.Preference(); pref != Red { - t.Fatalf("Wrong preference. Expected %d got %d", Red, pref) + if pref := sb.Preference(); pref != red { + t.Fatalf("Wrong preference. Expected %d got %d", red, pref) } else if !sb.Finalized() { t.Fatalf("Finalized too late") } - sb.RecordSuccessfulPoll(Blue) + sb.RecordSuccessfulPoll(blue) - if pref := sb.Preference(); pref != Red { - t.Fatalf("Wrong preference. Expected %d got %d", Red, pref) + if pref := sb.Preference(); pref != red { + t.Fatalf("Wrong preference. Expected %d got %d", red, pref) } else if !sb.Finalized() { t.Fatalf("Finalized too late") } diff --git a/avalanchego/snow/consensus/snowball/binary_snowflake.go b/avalanchego/snow/consensus/snowball/binary_snowflake.go index f11b5584..50507dc0 100644 --- a/avalanchego/snow/consensus/snowball/binary_snowflake.go +++ b/avalanchego/snow/consensus/snowball/binary_snowflake.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -7,7 +7,7 @@ import ( "fmt" ) -var _ BinarySnowflake = &binarySnowflake{} +var _ BinarySnowflake = (*binarySnowflake)(nil) // binarySnowflake is the implementation of a binary snowflake instance type binarySnowflake struct { @@ -49,9 +49,13 @@ func (sf *binarySnowflake) RecordSuccessfulPoll(choice int) { sf.binarySlush.RecordSuccessfulPoll(choice) } -func (sf *binarySnowflake) RecordUnsuccessfulPoll() { sf.confidence = 0 } +func (sf *binarySnowflake) RecordUnsuccessfulPoll() { + sf.confidence = 0 +} -func (sf *binarySnowflake) Finalized() bool { return sf.finalized } +func (sf *binarySnowflake) Finalized() bool { + return sf.finalized +} func (sf *binarySnowflake) String() string { return fmt.Sprintf("SF(Confidence = %d, Finalized = %v, %s)", diff --git a/avalanchego/snow/consensus/snowball/binary_snowflake_test.go b/avalanchego/snow/consensus/snowball/binary_snowflake_test.go index 6d9f67b7..eb16d1e3 100644 --- a/avalanchego/snow/consensus/snowball/binary_snowflake_test.go +++ b/avalanchego/snow/consensus/snowball/binary_snowflake_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -8,48 +8,48 @@ import ( ) func TestBinarySnowflake(t *testing.T) { - Blue := 0 - Red := 1 + blue := 0 + red := 1 beta := 2 sf := binarySnowflake{} - sf.Initialize(beta, Red) + sf.Initialize(beta, red) - if pref := sf.Preference(); pref != Red { - t.Fatalf("Wrong preference. Expected %d got %d", Red, pref) + if pref := sf.Preference(); pref != red { + t.Fatalf("Wrong preference. Expected %d got %d", red, pref) } else if sf.Finalized() { t.Fatalf("Finalized too early") } - sf.RecordSuccessfulPoll(Blue) + sf.RecordSuccessfulPoll(blue) - if pref := sf.Preference(); pref != Blue { - t.Fatalf("Wrong preference. Expected %d got %d", Blue, pref) + if pref := sf.Preference(); pref != blue { + t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) } else if sf.Finalized() { t.Fatalf("Finalized too early") } - sf.RecordSuccessfulPoll(Red) + sf.RecordSuccessfulPoll(red) - if pref := sf.Preference(); pref != Red { - t.Fatalf("Wrong preference. Expected %d got %d", Red, pref) + if pref := sf.Preference(); pref != red { + t.Fatalf("Wrong preference. Expected %d got %d", red, pref) } else if sf.Finalized() { t.Fatalf("Finalized too early") } - sf.RecordSuccessfulPoll(Blue) + sf.RecordSuccessfulPoll(blue) - if pref := sf.Preference(); pref != Blue { - t.Fatalf("Wrong preference. Expected %d got %d", Blue, pref) + if pref := sf.Preference(); pref != blue { + t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) } else if sf.Finalized() { t.Fatalf("Finalized too early") } - sf.RecordSuccessfulPoll(Blue) + sf.RecordSuccessfulPoll(blue) - if pref := sf.Preference(); pref != Blue { - t.Fatalf("Wrong preference. Expected %d got %d", Blue, pref) + if pref := sf.Preference(); pref != blue { + t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) } else if !sf.Finalized() { t.Fatalf("Didn't finalized correctly") } diff --git a/avalanchego/snow/consensus/snowball/consensus.go b/avalanchego/snow/consensus/snowball/consensus.go index 87512b98..1eb54694 100644 --- a/avalanchego/snow/consensus/snowball/consensus.go +++ b/avalanchego/snow/consensus/snowball/consensus.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -7,6 +7,7 @@ import ( "fmt" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bag" ) // Consensus represents a general snow instance that can be used directly to @@ -17,9 +18,6 @@ type Consensus interface { // Takes in alpha, beta1, beta2, and the initial choice Initialize(params Parameters, initialPreference ids.ID) - // Returns the parameters that describe this snowball instance - Parameters() Parameters - // Adds a new choice to vote on Add(newChoice ids.ID) @@ -35,7 +33,7 @@ type Consensus interface { // // If the consensus instance was previously finalized, the function may // return true or false. - RecordPoll(votes ids.Bag) bool + RecordPoll(votes bag.Bag[ids.ID]) bool // RecordUnsuccessfulPoll resets the snowflake counters of this consensus // instance diff --git a/avalanchego/snow/consensus/snowball/consensus_performance_test.go b/avalanchego/snow/consensus/snowball/consensus_performance_test.go index 283b6677..16d1a924 100644 --- a/avalanchego/snow/consensus/snowball/consensus_performance_test.go +++ b/avalanchego/snow/consensus/snowball/consensus_performance_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/avalanchego/snow/consensus/snowball/consensus_reversibility_test.go b/avalanchego/snow/consensus/snowball/consensus_reversibility_test.go index 5d8be66d..7ce053c2 100644 --- a/avalanchego/snow/consensus/snowball/consensus_reversibility_test.go +++ b/avalanchego/snow/consensus/snowball/consensus_reversibility_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/avalanchego/snow/consensus/snowball/consensus_test.go b/avalanchego/snow/consensus/snowball/consensus_test.go index 83ae2abd..0944c68d 100644 --- a/avalanchego/snow/consensus/snowball/consensus_test.go +++ b/avalanchego/snow/consensus/snowball/consensus_test.go @@ -1,63 +1,47 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball import ( - "testing" - "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bag" ) -var _ Consensus = &Byzantine{} +var ( + Red = ids.Empty.Prefix(0) + Blue = ids.Empty.Prefix(1) + Green = ids.Empty.Prefix(2) + + _ Consensus = (*Byzantine)(nil) +) // Byzantine is a naive implementation of a multi-choice snowball instance type Byzantine struct { - // params contains all the configurations of a snowball instance - params Parameters - // Hardcode the preference preference ids.ID } -func (b *Byzantine) Initialize(params Parameters, choice ids.ID) { - b.params = params +func (b *Byzantine) Initialize(_ Parameters, choice ids.ID) { b.preference = choice } -func (b *Byzantine) Parameters() Parameters { return b.params } -func (b *Byzantine) Add(choice ids.ID) {} -func (b *Byzantine) Preference() ids.ID { return b.preference } -func (b *Byzantine) RecordPoll(votes ids.Bag) bool { return false } -func (b *Byzantine) RecordUnsuccessfulPoll() {} -func (b *Byzantine) Finalized() bool { return true } -func (b *Byzantine) String() string { return b.preference.String() } +func (*Byzantine) Add(ids.ID) {} -var ( - Red = ids.Empty.Prefix(0) - Blue = ids.Empty.Prefix(1) - Green = ids.Empty.Prefix(2) -) +func (b *Byzantine) Preference() ids.ID { + return b.preference +} + +func (*Byzantine) RecordPoll(bag.Bag[ids.ID]) bool { + return false +} + +func (*Byzantine) RecordUnsuccessfulPoll() {} + +func (*Byzantine) Finalized() bool { + return true +} -func ParamsTest(t *testing.T, factory Factory) { - sb := factory.New() - - params := Parameters{ - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, ConcurrentRepolls: 1, - } - sb.Initialize(params, Red) - - p := sb.Parameters() - switch { - case p.K != params.K: - t.Fatalf("Wrong K parameter") - case p.Alpha != params.Alpha: - t.Fatalf("Wrong Alpha parameter") - case p.BetaVirtuous != params.BetaVirtuous: - t.Fatalf("Wrong Beta1 parameter") - case p.BetaRogue != params.BetaRogue: - t.Fatalf("Wrong Beta2 parameter") - case p.ConcurrentRepolls != params.ConcurrentRepolls: - t.Fatalf("Wrong Repoll parameter") - } +func (b *Byzantine) String() string { + return b.preference.String() } diff --git a/avalanchego/snow/consensus/snowball/factory.go b/avalanchego/snow/consensus/snowball/factory.go index b5e4db87..716f8f6a 100644 --- a/avalanchego/snow/consensus/snowball/factory.go +++ b/avalanchego/snow/consensus/snowball/factory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/avalanchego/snow/consensus/snowball/flat.go b/avalanchego/snow/consensus/snowball/flat.go index 42ce8685..cf08c9bd 100644 --- a/avalanchego/snow/consensus/snowball/flat.go +++ b/avalanchego/snow/consensus/snowball/flat.go @@ -1,21 +1,24 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball import ( "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bag" ) var ( - _ Factory = &FlatFactory{} - _ Consensus = &Flat{} + _ Factory = (*FlatFactory)(nil) + _ Consensus = (*Flat)(nil) ) // FlatFactory implements Factory by returning a flat struct type FlatFactory struct{} -func (FlatFactory) New() Consensus { return &Flat{} } +func (FlatFactory) New() Consensus { + return &Flat{} +} // Flat is a naive implementation of a multi-choice snowball instance type Flat struct { @@ -31,9 +34,7 @@ func (f *Flat) Initialize(params Parameters, choice ids.ID) { f.params = params } -func (f *Flat) Parameters() Parameters { return f.params } - -func (f *Flat) RecordPoll(votes ids.Bag) bool { +func (f *Flat) RecordPoll(votes bag.Bag[ids.ID]) bool { if pollMode, numVotes := votes.Mode(); numVotes >= f.params.Alpha { f.RecordSuccessfulPoll(pollMode) return true diff --git a/avalanchego/snow/consensus/snowball/flat_test.go b/avalanchego/snow/consensus/snowball/flat_test.go index 2560daaa..c5d292d1 100644 --- a/avalanchego/snow/consensus/snowball/flat_test.go +++ b/avalanchego/snow/consensus/snowball/flat_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -9,10 +9,9 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bag" ) -func TestFlatParams(t *testing.T) { ParamsTest(t, FlatFactory{}) } - func TestFlat(t *testing.T) { require := require.New(t) @@ -27,13 +26,13 @@ func TestFlat(t *testing.T) { require.Equal(Red, f.Preference()) require.False(f.Finalized()) - twoBlue := ids.Bag{} + twoBlue := bag.Bag[ids.ID]{} twoBlue.Add(Blue, Blue) require.True(f.RecordPoll(twoBlue)) require.Equal(Blue, f.Preference()) require.False(f.Finalized()) - oneRedOneBlue := ids.Bag{} + oneRedOneBlue := bag.Bag[ids.ID]{} oneRedOneBlue.Add(Red, Blue) require.False(f.RecordPoll(oneRedOneBlue)) require.Equal(Blue, f.Preference()) diff --git a/avalanchego/snow/consensus/snowball/network_test.go b/avalanchego/snow/consensus/snowball/network_test.go index 2ce30e31..67000438 100644 --- a/avalanchego/snow/consensus/snowball/network_test.go +++ b/avalanchego/snow/consensus/snowball/network_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -7,6 +7,7 @@ import ( "math/rand" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bag" "github.com/ava-labs/avalanchego/utils/sampler" ) @@ -75,7 +76,7 @@ func (n *Network) Round() { count = n.params.K } indices, _ := s.Sample(count) - sampledColors := ids.Bag{} + sampledColors := bag.Bag[ids.ID]{} for _, index := range indices { peer := n.nodes[int(index)] sampledColors.Add(peer.Preference()) diff --git a/avalanchego/snow/consensus/snowball/nnary_slush.go b/avalanchego/snow/consensus/snowball/nnary_slush.go index 0b67afba..b6fe581c 100644 --- a/avalanchego/snow/consensus/snowball/nnary_slush.go +++ b/avalanchego/snow/consensus/snowball/nnary_slush.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -9,7 +9,7 @@ import ( "github.com/ava-labs/avalanchego/ids" ) -var _ NnarySlush = &nnarySlush{} +var _ NnarySlush = (*nnarySlush)(nil) // nnarySlush is the implementation of a slush instance with an unbounded number // of choices @@ -20,10 +20,18 @@ type nnarySlush struct { preference ids.ID } -func (sl *nnarySlush) Initialize(choice ids.ID) { sl.preference = choice } +func (sl *nnarySlush) Initialize(choice ids.ID) { + sl.preference = choice +} -func (sl *nnarySlush) Preference() ids.ID { return sl.preference } +func (sl *nnarySlush) Preference() ids.ID { + return sl.preference +} -func (sl *nnarySlush) RecordSuccessfulPoll(choice ids.ID) { sl.preference = choice } +func (sl *nnarySlush) RecordSuccessfulPoll(choice ids.ID) { + sl.preference = choice +} -func (sl *nnarySlush) String() string { return fmt.Sprintf("SL(Preference = %s)", sl.preference) } +func (sl *nnarySlush) String() string { + return fmt.Sprintf("SL(Preference = %s)", sl.preference) +} diff --git a/avalanchego/snow/consensus/snowball/nnary_snowball.go b/avalanchego/snow/consensus/snowball/nnary_snowball.go index fdbcc32a..b31a5f3d 100644 --- a/avalanchego/snow/consensus/snowball/nnary_snowball.go +++ b/avalanchego/snow/consensus/snowball/nnary_snowball.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -9,7 +9,7 @@ import ( "github.com/ava-labs/avalanchego/ids" ) -var _ NnarySnowball = &nnarySnowball{} +var _ NnarySnowball = (*nnarySnowball)(nil) // nnarySnowball is a naive implementation of a multi-color snowball instance type nnarySnowball struct { diff --git a/avalanchego/snow/consensus/snowball/nnary_snowball_test.go b/avalanchego/snow/consensus/snowball/nnary_snowball_test.go index c48c6e0a..3798f4fd 100644 --- a/avalanchego/snow/consensus/snowball/nnary_snowball_test.go +++ b/avalanchego/snow/consensus/snowball/nnary_snowball_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/avalanchego/snow/consensus/snowball/nnary_snowflake.go b/avalanchego/snow/consensus/snowball/nnary_snowflake.go index 44e712e3..57b32707 100644 --- a/avalanchego/snow/consensus/snowball/nnary_snowflake.go +++ b/avalanchego/snow/consensus/snowball/nnary_snowflake.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -9,7 +9,7 @@ import ( "github.com/ava-labs/avalanchego/ids" ) -var _ NnarySnowflake = &nnarySnowflake{} +var _ NnarySnowflake = (*nnarySnowflake)(nil) // nnarySnowflake is the implementation of a snowflake instance with an // unbounded number of choices @@ -43,11 +43,13 @@ func (sf *nnarySnowflake) Initialize(betaVirtuous, betaRogue int, choice ids.ID) sf.betaRogue = betaRogue } -func (sf *nnarySnowflake) Add(choice ids.ID) { sf.rogue = sf.rogue || choice != sf.preference } +func (sf *nnarySnowflake) Add(choice ids.ID) { + sf.rogue = sf.rogue || choice != sf.preference +} func (sf *nnarySnowflake) RecordSuccessfulPoll(choice ids.ID) { if sf.finalized { - return // This instace is already decided. + return // This instance is already decided. } if preference := sf.Preference(); preference == choice { @@ -63,9 +65,13 @@ func (sf *nnarySnowflake) RecordSuccessfulPoll(choice ids.ID) { sf.nnarySlush.RecordSuccessfulPoll(choice) } -func (sf *nnarySnowflake) RecordUnsuccessfulPoll() { sf.confidence = 0 } +func (sf *nnarySnowflake) RecordUnsuccessfulPoll() { + sf.confidence = 0 +} -func (sf *nnarySnowflake) Finalized() bool { return sf.finalized } +func (sf *nnarySnowflake) Finalized() bool { + return sf.finalized +} func (sf *nnarySnowflake) String() string { return fmt.Sprintf("SF(Confidence = %d, Finalized = %v, %s)", diff --git a/avalanchego/snow/consensus/snowball/nnary_snowflake_test.go b/avalanchego/snow/consensus/snowball/nnary_snowflake_test.go index aab32831..36febe49 100644 --- a/avalanchego/snow/consensus/snowball/nnary_snowflake_test.go +++ b/avalanchego/snow/consensus/snowball/nnary_snowflake_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/avalanchego/snow/consensus/snowball/parameters.go b/avalanchego/snow/consensus/snowball/parameters.go index 893ceb3b..460f9940 100644 --- a/avalanchego/snow/consensus/snowball/parameters.go +++ b/avalanchego/snow/consensus/snowball/parameters.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/avalanchego/snow/consensus/snowball/parameters_test.go b/avalanchego/snow/consensus/snowball/parameters_test.go index 3f37f4f1..3fc2632c 100644 --- a/avalanchego/snow/consensus/snowball/parameters_test.go +++ b/avalanchego/snow/consensus/snowball/parameters_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/avalanchego/snow/consensus/snowball/tree.go b/avalanchego/snow/consensus/snowball/tree.go index 92c08e40..1e9aa701 100644 --- a/avalanchego/snow/consensus/snowball/tree.go +++ b/avalanchego/snow/consensus/snowball/tree.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -8,19 +8,22 @@ import ( "strings" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bag" ) var ( - _ Factory = &TreeFactory{} - _ Consensus = &Tree{} - _ node = &unaryNode{} - _ node = &binaryNode{} + _ Factory = (*TreeFactory)(nil) + _ Consensus = (*Tree)(nil) + _ node = (*unaryNode)(nil) + _ node = (*binaryNode)(nil) ) // TreeFactory implements Factory by returning a tree struct type TreeFactory struct{} -func (TreeFactory) New() Consensus { return &Tree{} } +func (TreeFactory) New() Consensus { + return &Tree{} +} // Tree implements the snowball interface by using a modified patricia tree. type Tree struct { @@ -57,8 +60,6 @@ func (t *Tree) Initialize(params Parameters, choice ids.ID) { } } -func (t *Tree) Parameters() Parameters { return t.params } - func (t *Tree) Add(choice ids.ID) { prefix := t.node.DecidedPrefix() // Make sure that we haven't already decided against this new id @@ -67,13 +68,16 @@ func (t *Tree) Add(choice ids.ID) { } } -func (t *Tree) RecordPoll(votes ids.Bag) bool { +func (t *Tree) RecordPoll(votes bag.Bag[ids.ID]) bool { // Get the assumed decided prefix of the root node. decidedPrefix := t.node.DecidedPrefix() // If any of the bits differ from the preference in this prefix, the vote is // for a rejected operation. So, we filter out these invalid votes. - filteredVotes := votes.Filter(0, decidedPrefix, t.Preference()) + preference := t.Preference() + filteredVotes := votes.Filter(func(id ids.ID) bool { + return ids.EqualSubset(0, decidedPrefix, preference, id) + }) // Now that the votes have been restricted to valid votes, pass them into // the first snowball instance @@ -86,10 +90,12 @@ func (t *Tree) RecordPoll(votes ids.Bag) bool { return successful } -func (t *Tree) RecordUnsuccessfulPoll() { t.shouldReset = true } +func (t *Tree) RecordUnsuccessfulPoll() { + t.shouldReset = true +} func (t *Tree) String() string { - builder := strings.Builder{} + sb := strings.Builder{} prefixes := []string{""} nodes := []node{t.node} @@ -105,9 +111,9 @@ func (t *Tree) String() string { s, newNodes := node.Printable() - builder.WriteString(prefix) - builder.WriteString(s) - builder.WriteString("\n") + sb.WriteString(prefix) + sb.WriteString(s) + sb.WriteString("\n") newPrefix := prefix + " " for range newNodes { @@ -116,7 +122,7 @@ func (t *Tree) String() string { nodes = append(nodes, newNodes...) } - return strings.TrimSuffix(builder.String(), "\n") + return strings.TrimSuffix(sb.String(), "\n") } type node interface { @@ -129,7 +135,7 @@ type node interface { Add(newChoice ids.ID) node // Apply the votes, reset the model if needed // Returns the new node and whether the vote was successful - RecordPoll(votes ids.Bag, shouldReset bool) (newChild node, successful bool) + RecordPoll(votes bag.Bag[ids.ID], shouldReset bool) (newChild node, successful bool) // Returns true if consensus has been reached on this node Finalized() bool @@ -165,18 +171,26 @@ type unaryNode struct { child node } -func (u *unaryNode) Preference() ids.ID { return u.preference } -func (u *unaryNode) DecidedPrefix() int { return u.decidedPrefix } +func (u *unaryNode) Preference() ids.ID { + return u.preference +} +func (u *unaryNode) DecidedPrefix() int { + return u.decidedPrefix +} + +//nolint:gofmt,gofumpt,goimports // this comment is formatted as intended +// // This is by far the most complicated function in this algorithm. // The intuition is that this instance represents a series of consecutive unary // snowball instances, and this function's purpose is convert one of these unary // snowball instances into a binary snowball instance. // There are 5 possible cases. -// 1. None of these instances should be split, we should attempt to split a -// child // -// For example, attempting to insert the value "00001" in this node: +// 1. None of these instances should be split, we should attempt to split a +// child +// +// For example, attempting to insert the value "00001" in this node: // // +-------------------+ <-- This node will not be split // | | @@ -186,7 +200,7 @@ func (u *unaryNode) DecidedPrefix() int { return u.decidedPrefix } // ^ // | // -// Results in: +// Results in: // // +-------------------+ // | | @@ -196,13 +210,14 @@ func (u *unaryNode) DecidedPrefix() int { return u.decidedPrefix } // ^ // | // -// 2. This instance represents a series of only one unary instance and it must -// be split -// This will return a binary choice, with one child the same as my child, -// and another (possibly nil child) representing a new chain to the end of -// the hash +// 2. This instance represents a series of only one unary instance and it must +// be split. +// +// This will return a binary choice, with one child the same as my child, +// and another (possibly nil child) representing a new chain to the end of +// the hash // -// For example, attempting to insert the value "1" in this tree: +// For example, attempting to insert the value "1" in this tree: // // +-------------------+ // | | @@ -210,7 +225,7 @@ func (u *unaryNode) DecidedPrefix() int { return u.decidedPrefix } // | | // +-------------------+ // -// Results in: +// Results in: // // +-------------------+ // | | | @@ -218,12 +233,13 @@ func (u *unaryNode) DecidedPrefix() int { return u.decidedPrefix } // | | | // +-------------------+ // -// 3. This instance must be split on the first bit -// This will return a binary choice, with one child equal to this instance -// with decidedPrefix increased by one, and another representing a new -// chain to the end of the hash +// 3. This instance must be split on the first bit // -// For example, attempting to insert the value "10" in this tree: +// This will return a binary choice, with one child equal to this instance +// with decidedPrefix increased by one, and another representing a new +// chain to the end of the hash +// +// For example, attempting to insert the value "10" in this tree: // // +-------------------+ // | | @@ -231,7 +247,7 @@ func (u *unaryNode) DecidedPrefix() int { return u.decidedPrefix } // | | // +-------------------+ // -// Results in: +// Results in: // // +-------------------+ // | | | @@ -246,13 +262,14 @@ func (u *unaryNode) DecidedPrefix() int { return u.decidedPrefix } // | | | | // +-------------------+ +-------------------+ // -// 4. This instance must be split on the last bit -// This will modify this unary choice. The commonPrefix is decreased by -// one. The child is set to a binary instance that has a child equal to -// the current child and another child equal to a new unary instance to -// the end of the hash +// 4. This instance must be split on the last bit +// +// This will modify this unary choice. The commonPrefix is decreased by +// one. The child is set to a binary instance that has a child equal to +// the current child and another child equal to a new unary instance to +// the end of the hash // -// For example, attempting to insert the value "01" in this tree: +// For example, attempting to insert the value "01" in this tree: // // +-------------------+ // | | @@ -260,7 +277,7 @@ func (u *unaryNode) DecidedPrefix() int { return u.decidedPrefix } // | | // +-------------------+ // -// Results in: +// Results in: // // +-------------------+ // | | @@ -275,14 +292,15 @@ func (u *unaryNode) DecidedPrefix() int { return u.decidedPrefix } // | | | // +-------------------+ // -// 5. This instance must be split on an interior bit -// This will modify this unary choice. The commonPrefix is set to the -// interior bit. The child is set to a binary instance that has a child -// equal to this unary choice with the decidedPrefix equal to the interior -// bit and another child equal to a new unary instance to the end of the -// hash +// 5. This instance must be split on an interior bit // -// For example, attempting to insert the value "010" in this tree: +// This will modify this unary choice. The commonPrefix is set to the +// interior bit. The child is set to a binary instance that has a child +// equal to this unary choice with the decidedPrefix equal to the interior +// bit and another child equal to a new unary instance to the end of the +// hash +// +// For example, attempting to insert the value "010" in this tree: // // +-------------------+ // | | @@ -290,7 +308,7 @@ func (u *unaryNode) DecidedPrefix() int { return u.decidedPrefix } // | | // +-------------------+ // -// Results in: +// Results in: // // +-------------------+ // | | @@ -393,7 +411,7 @@ func (u *unaryNode) Add(newChoice ids.ID) node { return u // Do nothing, the choice was already rejected } -func (u *unaryNode) RecordPoll(votes ids.Bag, reset bool) (node, bool) { +func (u *unaryNode) RecordPoll(votes bag.Bag[ids.ID], reset bool) (node, bool) { // We are guaranteed that the votes are of IDs that have previously been // added. This ensures that the provided votes all have the same bits in the // range [u.decidedPrefix, u.commonPrefix) as in u.preference. @@ -439,7 +457,9 @@ func (u *unaryNode) RecordPoll(votes ids.Bag, reset bool) (node, bool) { return u, true } -func (u *unaryNode) Finalized() bool { return u.snowball.Finalized() } +func (u *unaryNode) Finalized() bool { + return u.snowball.Finalized() +} func (u *unaryNode) Printable() (string, []node) { s := fmt.Sprintf("%s Bits = [%d, %d)", @@ -475,8 +495,13 @@ type binaryNode struct { children [2]node } -func (b *binaryNode) Preference() ids.ID { return b.preferences[b.snowball.Preference()] } -func (b *binaryNode) DecidedPrefix() int { return b.bit } +func (b *binaryNode) Preference() ids.ID { + return b.preferences[b.snowball.Preference()] +} + +func (b *binaryNode) DecidedPrefix() int { + return b.bit +} func (b *binaryNode) Add(id ids.ID) node { bit := id.Bit(uint(b.bit)) @@ -496,10 +521,12 @@ func (b *binaryNode) Add(id ids.ID) node { return b } -func (b *binaryNode) RecordPoll(votes ids.Bag, reset bool) (node, bool) { +func (b *binaryNode) RecordPoll(votes bag.Bag[ids.ID], reset bool) (node, bool) { // The list of votes we are passed is split into votes for bit 0 and votes // for bit 1 - splitVotes := votes.Split(uint(b.bit)) + splitVotes := votes.Split(func(id ids.ID) bool { + return id.Bit(uint(b.bit)) == 1 + }) bit := 0 // We only care about which bit is set if a successful poll can happen @@ -528,8 +555,10 @@ func (b *binaryNode) RecordPoll(votes ids.Bag, reset bool) (node, bool) { if child := b.children[bit]; child != nil { // The votes are filtered to ensure that they are votes that should // count for the child - filteredVotes := prunedVotes.Filter( - b.bit+1, child.DecidedPrefix(), b.preferences[bit]) + decidedPrefix := child.DecidedPrefix() + filteredVotes := prunedVotes.Filter(func(id ids.ID) bool { + return ids.EqualSubset(b.bit+1, decidedPrefix, b.preferences[bit], id) + }) newChild, _ := child.RecordPoll(filteredVotes, b.shouldReset[bit]) if b.snowball.Finalized() { @@ -544,7 +573,9 @@ func (b *binaryNode) RecordPoll(votes ids.Bag, reset bool) (node, bool) { return b, true } -func (b *binaryNode) Finalized() bool { return b.snowball.Finalized() } +func (b *binaryNode) Finalized() bool { + return b.snowball.Finalized() +} func (b *binaryNode) Printable() (string, []node) { s := fmt.Sprintf("%s Bit = %d", b.snowball, b.bit) diff --git a/avalanchego/snow/consensus/snowball/tree_test.go b/avalanchego/snow/consensus/snowball/tree_test.go index 154b45ea..b11b3286 100644 --- a/avalanchego/snow/consensus/snowball/tree_test.go +++ b/avalanchego/snow/consensus/snowball/tree_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bag" "github.com/ava-labs/avalanchego/utils/sampler" ) @@ -17,8 +18,6 @@ const ( initialUnaryDescription = "SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 256)" ) -func TestTreeParams(t *testing.T) { ParamsTest(t, TreeFactory{}) } - func TestSnowballSingleton(t *testing.T) { require := require.New(t) @@ -30,12 +29,12 @@ func TestSnowballSingleton(t *testing.T) { require.False(tree.Finalized()) - oneRed := ids.Bag{} + oneRed := bag.Bag[ids.ID]{} oneRed.Add(Red) require.True(tree.RecordPoll(oneRed)) require.False(tree.Finalized()) - empty := ids.Bag{} + empty := bag.Bag[ids.ID]{} require.False(tree.RecordPoll(empty)) require.False(tree.Finalized()) @@ -52,7 +51,7 @@ func TestSnowballSingleton(t *testing.T) { // Because the tree is already finalized, RecordPoll can return either true // or false. - oneBlue := ids.Bag{} + oneBlue := bag.Bag[ids.ID]{} oneBlue.Add(Blue) tree.RecordPoll(oneBlue) require.Equal(Red, tree.Preference()) @@ -70,7 +69,7 @@ func TestSnowballRecordUnsuccessfulPoll(t *testing.T) { require.False(tree.Finalized()) - oneRed := ids.Bag{} + oneRed := bag.Bag[ids.ID]{} oneRed.Add(Red) require.True(tree.RecordPoll(oneRed)) @@ -100,13 +99,13 @@ func TestSnowballBinary(t *testing.T) { require.Equal(Red, tree.Preference()) require.False(tree.Finalized()) - oneBlue := ids.Bag{} + oneBlue := bag.Bag[ids.ID]{} oneBlue.Add(Blue) require.True(tree.RecordPoll(oneBlue)) require.Equal(Blue, tree.Preference()) require.False(tree.Finalized()) - oneRed := ids.Bag{} + oneRed := bag.Bag[ids.ID]{} oneRed.Add(Red) require.True(tree.RecordPoll(oneRed)) require.Equal(Blue, tree.Preference()) @@ -148,7 +147,7 @@ func TestSnowballLastBinary(t *testing.T) { require.Equal(zero, tree.Preference()) require.False(tree.Finalized()) - oneBag := ids.Bag{} + oneBag := bag.Bag[ids.ID]{} oneBag.Add(one) require.True(tree.RecordPoll(oneBag)) require.Equal(one, tree.Preference()) @@ -194,7 +193,7 @@ func TestSnowballAddPreviouslyRejected(t *testing.T) { require.False(tree.Finalized()) } - zeroBag := ids.Bag{} + zeroBag := bag.Bag[ids.ID]{} zeroBag.Add(zero) require.True(tree.RecordPoll(zeroBag)) @@ -245,7 +244,7 @@ func TestSnowballNewUnary(t *testing.T) { require.False(tree.Finalized()) } - oneBag := ids.Bag{} + oneBag := bag.Bag[ids.ID]{} oneBag.Add(one) require.True(tree.RecordPoll(oneBag)) @@ -298,7 +297,7 @@ func TestSnowballTransitiveReset(t *testing.T) { require.False(tree.Finalized()) } - zeroBag := ids.Bag{} + zeroBag := bag.Bag[ids.ID]{} zeroBag.Add(zero) require.True(tree.RecordPoll(zeroBag)) @@ -315,7 +314,7 @@ func TestSnowballTransitiveReset(t *testing.T) { require.False(tree.Finalized()) } - emptyBag := ids.Bag{} + emptyBag := bag.Bag[ids.ID]{} require.False(tree.RecordPoll(emptyBag)) { @@ -376,13 +375,13 @@ func TestSnowballTrinary(t *testing.T) { require.Equal(Green, tree.Preference()) require.False(tree.Finalized()) - redBag := ids.Bag{} + redBag := bag.Bag[ids.ID]{} redBag.Add(Red) require.True(tree.RecordPoll(redBag)) require.Equal(Red, tree.Preference()) require.False(tree.Finalized()) - blueBag := ids.Bag{} + blueBag := bag.Bag[ids.ID]{} blueBag.Add(Blue) require.True(tree.RecordPoll(blueBag)) require.Equal(Red, tree.Preference()) @@ -390,7 +389,7 @@ func TestSnowballTrinary(t *testing.T) { // Here is a case where voting for a color makes a different color become // the preferred color. This is intended behavior. - greenBag := ids.Bag{} + greenBag := bag.Bag[ids.ID]{} greenBag.Add(Green) require.True(tree.RecordPoll(greenBag)) require.Equal(Blue, tree.Preference()) @@ -430,20 +429,20 @@ func TestSnowballCloseTrinary(t *testing.T) { require.Equal(yellow, tree.Preference()) require.False(tree.Finalized()) - yellowBag := ids.Bag{} + yellowBag := bag.Bag[ids.ID]{} yellowBag.Add(yellow) require.True(tree.RecordPoll(yellowBag)) require.Equal(yellow, tree.Preference()) require.False(tree.Finalized()) - magentaBag := ids.Bag{} + magentaBag := bag.Bag[ids.ID]{} magentaBag.Add(magenta) require.True(tree.RecordPoll(magentaBag)) require.Equal(yellow, tree.Preference()) require.False(tree.Finalized()) // Cyan has already been rejected here, so these are not successful polls. - cyanBag := ids.Bag{} + cyanBag := bag.Bag[ids.ID]{} cyanBag.Add(cyan) require.False(tree.RecordPoll(cyanBag)) require.Equal(yellow, tree.Preference()) @@ -473,7 +472,7 @@ func TestSnowballAddRejected(t *testing.T) { require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) - c0010Bag := ids.Bag{} + c0010Bag := bag.Bag[ids.ID]{} c0010Bag.Add(c0010) require.True(tree.RecordPoll(c0010Bag)) @@ -520,7 +519,7 @@ func TestSnowballResetChild(t *testing.T) { require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) - c0000Bag := ids.Bag{} + c0000Bag := bag.Bag[ids.ID]{} c0000Bag.Add(c0000) require.True(tree.RecordPoll(c0000Bag)) @@ -535,7 +534,7 @@ func TestSnowballResetChild(t *testing.T) { require.False(tree.Finalized()) } - emptyBag := ids.Bag{} + emptyBag := bag.Bag[ids.ID]{} require.False(tree.RecordPoll(emptyBag)) { @@ -581,7 +580,7 @@ func TestSnowballResetSibling(t *testing.T) { require.Equal(c0000, tree.Preference()) require.False(tree.Finalized()) - c0100Bag := ids.Bag{} + c0100Bag := bag.Bag[ids.ID]{} c0100Bag.Add(c0100) require.True(tree.RecordPoll(c0100Bag)) @@ -596,7 +595,7 @@ func TestSnowballResetSibling(t *testing.T) { require.False(tree.Finalized()) } - c1000Bag := ids.Bag{} + c1000Bag := bag.Bag[ids.ID]{} c1000Bag.Add(c1000) require.True(tree.RecordPoll(c1000Bag)) @@ -717,7 +716,7 @@ func TestSnowballFineGrained(t *testing.T) { require.False(tree.Finalized()) } - c0000Bag := ids.Bag{} + c0000Bag := bag.Bag[ids.ID]{} c0000Bag.Add(c0000) require.True(tree.RecordPoll(c0000Bag)) @@ -734,7 +733,7 @@ func TestSnowballFineGrained(t *testing.T) { require.False(tree.Finalized()) } - c0010Bag := ids.Bag{} + c0010Bag := bag.Bag[ids.ID]{} c0010Bag.Add(c0010) require.True(tree.RecordPoll(c0010Bag)) @@ -841,7 +840,7 @@ func TestSnowballFilterBinaryChildren(t *testing.T) { require.False(tree.Finalized()) } - c0000Bag := ids.Bag{} + c0000Bag := bag.Bag[ids.ID]{} c0000Bag.Add(c0000) require.True(tree.RecordPoll(c0000Bag)) @@ -869,7 +868,7 @@ func TestSnowballFilterBinaryChildren(t *testing.T) { require.False(tree.Finalized()) } - c0100Bag := ids.Bag{} + c0100Bag := bag.Bag[ids.ID]{} c0100Bag.Add(c0100) require.True(tree.RecordPoll(c0100Bag)) diff --git a/avalanchego/snow/consensus/snowball/unary_snowball.go b/avalanchego/snow/consensus/snowball/unary_snowball.go index e4cfdb68..44bf1c42 100644 --- a/avalanchego/snow/consensus/snowball/unary_snowball.go +++ b/avalanchego/snow/consensus/snowball/unary_snowball.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -7,7 +7,7 @@ import ( "fmt" ) -var _ UnarySnowball = &unarySnowball{} +var _ UnarySnowball = (*unarySnowball)(nil) // unarySnowball is the implementation of a unary snowball instance type unarySnowball struct { diff --git a/avalanchego/snow/consensus/snowball/unary_snowball_test.go b/avalanchego/snow/consensus/snowball/unary_snowball_test.go index 814f80ee..012144bb 100644 --- a/avalanchego/snow/consensus/snowball/unary_snowball_test.go +++ b/avalanchego/snow/consensus/snowball/unary_snowball_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/avalanchego/snow/consensus/snowball/unary_snowflake.go b/avalanchego/snow/consensus/snowball/unary_snowflake.go index 519a6d96..84ef6e42 100644 --- a/avalanchego/snow/consensus/snowball/unary_snowflake.go +++ b/avalanchego/snow/consensus/snowball/unary_snowflake.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball @@ -7,7 +7,7 @@ import ( "fmt" ) -var _ UnarySnowflake = &unarySnowflake{} +var _ UnarySnowflake = (*unarySnowflake)(nil) // unarySnowflake is the implementation of a unary snowflake instance type unarySnowflake struct { @@ -24,16 +24,22 @@ type unarySnowflake struct { finalized bool } -func (sf *unarySnowflake) Initialize(beta int) { sf.beta = beta } +func (sf *unarySnowflake) Initialize(beta int) { + sf.beta = beta +} func (sf *unarySnowflake) RecordSuccessfulPoll() { sf.confidence++ sf.finalized = sf.finalized || sf.confidence >= sf.beta } -func (sf *unarySnowflake) RecordUnsuccessfulPoll() { sf.confidence = 0 } +func (sf *unarySnowflake) RecordUnsuccessfulPoll() { + sf.confidence = 0 +} -func (sf *unarySnowflake) Finalized() bool { return sf.finalized } +func (sf *unarySnowflake) Finalized() bool { + return sf.finalized +} func (sf *unarySnowflake) Extend(beta int, choice int) BinarySnowflake { return &binarySnowflake{ diff --git a/avalanchego/snow/consensus/snowball/unary_snowflake_test.go b/avalanchego/snow/consensus/snowball/unary_snowflake_test.go index ae23df60..ab76c94a 100644 --- a/avalanchego/snow/consensus/snowball/unary_snowflake_test.go +++ b/avalanchego/snow/consensus/snowball/unary_snowflake_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowball diff --git a/avalanchego/snow/consensus/snowman/block.go b/avalanchego/snow/consensus/snowman/block.go index 0321d64d..b5d79983 100644 --- a/avalanchego/snow/consensus/snowman/block.go +++ b/avalanchego/snow/consensus/snowman/block.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman import ( + "context" "time" "github.com/ava-labs/avalanchego/ids" @@ -34,7 +35,7 @@ type Block interface { // // If nil is returned, it is guaranteed that either Accept or Reject will be // called on this block, unless the VM is shut down. - Verify() error + Verify(context.Context) error // Bytes returns the binary representation of this block. // diff --git a/avalanchego/snow/consensus/snowman/consensus.go b/avalanchego/snow/consensus/snowman/consensus.go index 3f091d12..d88c385b 100644 --- a/avalanchego/snow/consensus/snowman/consensus.go +++ b/avalanchego/snow/consensus/snowman/consensus.go @@ -1,34 +1,39 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman import ( + "context" + "time" + + "github.com/ava-labs/avalanchego/api/health" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/snowball" + "github.com/ava-labs/avalanchego/utils/bag" ) // Consensus represents a general snowman instance that can be used directly to // process a series of dependent operations. type Consensus interface { + health.Checker + // Takes in the context, snowball parameters, and the last accepted block. Initialize( ctx *snow.ConsensusContext, params snowball.Parameters, lastAcceptedID ids.ID, lastAcceptedHeight uint64, + lastAcceptedTime time.Time, ) error - // Returns the parameters that describe this snowman instance - Parameters() snowball.Parameters - // Returns the number of blocks processing NumProcessing() int // Adds a new decision. Assumes the dependency has already been added. // Returns if a critical error has occurred. - Add(Block) error + Add(context.Context, Block) error // Decided returns true if the block has been decided. Decided(Block) bool @@ -40,19 +45,19 @@ type Consensus interface { // chain. IsPreferred(Block) bool + // Returns the ID of the last accepted decision. + LastAccepted() ids.ID + // Returns the ID of the tail of the strongly preferred sequence of // decisions. Preference() ids.ID // RecordPoll collects the results of a network poll. Assumes all decisions // have been previously added. Returns if a critical error has occurred. - RecordPoll(ids.Bag) error + RecordPoll(context.Context, bag.Bag[ids.ID]) error // Finalized returns true if all decisions that have been added have been // finalized. Note, it is possible that after returning finalized, a new // decision may be added such that this instance is no longer finalized. Finalized() bool - - // HealthCheck returns information about the consensus health. - HealthCheck() (interface{}, error) } diff --git a/avalanchego/snow/consensus/snowman/consensus_test.go b/avalanchego/snow/consensus/snowman/consensus_test.go index 66fdf9bf..ee731a28 100644 --- a/avalanchego/snow/consensus/snowman/consensus_test.go +++ b/avalanchego/snow/consensus/snowman/consensus_test.go @@ -1,15 +1,17 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman import ( + "context" "errors" "path" "reflect" "runtime" "strings" "testing" + "time" "github.com/prometheus/client_golang/prometheus" @@ -19,15 +21,17 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowball" + "github.com/ava-labs/avalanchego/utils/bag" "github.com/ava-labs/avalanchego/utils/sampler" ) type testFunc func(*testing.T, Factory) var ( - GenesisID = ids.Empty.Prefix(0) - GenesisHeight = uint64(0) - Genesis = &TestBlock{TestDecidable: choices.TestDecidable{ + GenesisID = ids.Empty.Prefix(0) + GenesisHeight = uint64(0) + GenesisTimestamp = time.Unix(1, 0) + Genesis = &TestBlock{TestDecidable: choices.TestDecidable{ IDV: GenesisID, StatusV: choices.Accepted, }} @@ -64,6 +68,8 @@ var ( ErrorOnAddDecidedBlock, ErrorOnAddDuplicateBlockID, } + + errTest = errors.New("non-nil error") ) // Execute all tests against a consensus implementation @@ -95,13 +101,11 @@ func InitializeTest(t *testing.T, factory Factory) { MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight); err != nil { + if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { t.Fatal(err) } - if p := sm.Parameters(); p != params { - t.Fatalf("Wrong returned parameters") - } else if pref := sm.Preference(); pref != GenesisID { + if pref := sm.Preference(); pref != GenesisID { t.Fatalf("Wrong preference returned") } else if !sm.Finalized() { t.Fatalf("Wrong should have marked the instance as being finalized") @@ -123,7 +127,7 @@ func NumProcessingTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight); err != nil { + if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { t.Fatal(err) } @@ -141,7 +145,7 @@ func NumProcessingTest(t *testing.T, factory Factory) { } // Adding to the previous preference will update the preference - if err := sm.Add(block); err != nil { + if err := sm.Add(context.Background(), block); err != nil { t.Fatal(err) } @@ -149,9 +153,9 @@ func NumProcessingTest(t *testing.T, factory Factory) { t.Fatalf("expected %d blocks to be processing but returned %d", 1, numProcessing) } - votes := ids.Bag{} + votes := bag.Bag[ids.ID]{} votes.Add(block.ID()) - if err := sm.RecordPoll(votes); err != nil { + if err := sm.RecordPoll(context.Background(), votes); err != nil { t.Fatal(err) } @@ -175,7 +179,7 @@ func AddToTailTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight); err != nil { + if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { t.Fatal(err) } @@ -189,7 +193,7 @@ func AddToTailTest(t *testing.T, factory Factory) { } // Adding to the previous preference will update the preference - if err := sm.Add(block); err != nil { + if err := sm.Add(context.Background(), block); err != nil { t.Fatal(err) } else if pref := sm.Preference(); pref != block.ID() { t.Fatalf("Wrong preference. Expected %s, got %s", block.ID(), pref) @@ -213,7 +217,7 @@ func AddToNonTailTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight); err != nil { + if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { t.Fatal(err) } @@ -235,7 +239,7 @@ func AddToNonTailTest(t *testing.T, factory Factory) { } // Adding to the previous preference will update the preference - if err := sm.Add(firstBlock); err != nil { + if err := sm.Add(context.Background(), firstBlock); err != nil { t.Fatal(err) } else if pref := sm.Preference(); pref != firstBlock.IDV { t.Fatalf("Wrong preference. Expected %s, got %s", firstBlock.IDV, pref) @@ -243,7 +247,7 @@ func AddToNonTailTest(t *testing.T, factory Factory) { // Adding to something other than the previous preference won't update the // preference - if err := sm.Add(secondBlock); err != nil { + if err := sm.Add(context.Background(), secondBlock); err != nil { t.Fatal(err) } else if pref := sm.Preference(); pref != firstBlock.IDV { t.Fatalf("Wrong preference. Expected %s, got %s", firstBlock.IDV, pref) @@ -266,7 +270,7 @@ func AddToUnknownTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight); err != nil { + if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { t.Fatal(err) } @@ -286,7 +290,7 @@ func AddToUnknownTest(t *testing.T, factory Factory) { // Adding a block with an unknown parent means the parent must have already // been rejected. Therefore the block should be immediately rejected - if err := sm.Add(block); err != nil { + if err := sm.Add(context.Background(), block); err != nil { t.Fatal(err) } else if pref := sm.Preference(); pref != GenesisID { t.Fatalf("Wrong preference. Expected %s, got %s", GenesisID, pref) @@ -309,7 +313,7 @@ func StatusOrProcessingPreviouslyAcceptedTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight); err != nil { + if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { t.Fatal(err) } @@ -341,7 +345,7 @@ func StatusOrProcessingPreviouslyRejectedTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight); err != nil { + if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { t.Fatal(err) } @@ -382,7 +386,7 @@ func StatusOrProcessingUnissuedTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight); err != nil { + if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { t.Fatal(err) } @@ -423,7 +427,7 @@ func StatusOrProcessingIssuedTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight); err != nil { + if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { t.Fatal(err) } @@ -436,7 +440,7 @@ func StatusOrProcessingIssuedTest(t *testing.T, factory Factory) { HeightV: Genesis.HeightV + 1, } - if err := sm.Add(block); err != nil { + if err := sm.Add(context.Background(), block); err != nil { t.Fatal(err) } if block.Status() == choices.Accepted { @@ -467,7 +471,7 @@ func RecordPollAcceptSingleBlockTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight); err != nil { + if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { t.Fatal(err) } @@ -480,13 +484,13 @@ func RecordPollAcceptSingleBlockTest(t *testing.T, factory Factory) { HeightV: Genesis.HeightV + 1, } - if err := sm.Add(block); err != nil { + if err := sm.Add(context.Background(), block); err != nil { t.Fatal(err) } - votes := ids.Bag{} + votes := bag.Bag[ids.ID]{} votes.Add(block.ID()) - if err := sm.RecordPoll(votes); err != nil { + if err := sm.RecordPoll(context.Background(), votes); err != nil { t.Fatal(err) } else if pref := sm.Preference(); pref != block.ID() { t.Fatalf("Preference returned the wrong block") @@ -494,7 +498,7 @@ func RecordPollAcceptSingleBlockTest(t *testing.T, factory Factory) { t.Fatalf("Snowman instance finalized too soon") } else if status := block.Status(); status != choices.Processing { t.Fatalf("Block's status changed unexpectedly") - } else if err := sm.RecordPoll(votes); err != nil { + } else if err := sm.RecordPoll(context.Background(), votes); err != nil { t.Fatal(err) } else if pref := sm.Preference(); pref != block.ID() { t.Fatalf("Preference returned the wrong block") @@ -519,7 +523,7 @@ func RecordPollAcceptAndRejectTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight); err != nil { + if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { t.Fatal(err) } @@ -540,16 +544,16 @@ func RecordPollAcceptAndRejectTest(t *testing.T, factory Factory) { HeightV: Genesis.HeightV + 1, } - if err := sm.Add(firstBlock); err != nil { + if err := sm.Add(context.Background(), firstBlock); err != nil { t.Fatal(err) - } else if err := sm.Add(secondBlock); err != nil { + } else if err := sm.Add(context.Background(), secondBlock); err != nil { t.Fatal(err) } - votes := ids.Bag{} + votes := bag.Bag[ids.ID]{} votes.Add(firstBlock.ID()) - if err := sm.RecordPoll(votes); err != nil { + if err := sm.RecordPoll(context.Background(), votes); err != nil { t.Fatal(err) } else if pref := sm.Preference(); pref != firstBlock.ID() { t.Fatalf("Preference returned the wrong block") @@ -559,7 +563,7 @@ func RecordPollAcceptAndRejectTest(t *testing.T, factory Factory) { t.Fatalf("Block's status changed unexpectedly") } else if status := secondBlock.Status(); status != choices.Processing { t.Fatalf("Block's status changed unexpectedly") - } else if err := sm.RecordPoll(votes); err != nil { + } else if err := sm.RecordPoll(context.Background(), votes); err != nil { t.Fatal(err) } else if pref := sm.Preference(); pref != firstBlock.ID() { t.Fatalf("Preference returned the wrong block") @@ -590,7 +594,7 @@ func RecordPollSplitVoteNoChangeTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight)) + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) firstBlock := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -609,15 +613,15 @@ func RecordPollSplitVoteNoChangeTest(t *testing.T, factory Factory) { HeightV: Genesis.HeightV + 1, } - require.NoError(sm.Add(firstBlock)) - require.NoError(sm.Add(secondBlock)) + require.NoError(sm.Add(context.Background(), firstBlock)) + require.NoError(sm.Add(context.Background(), secondBlock)) - votes := ids.Bag{} + votes := bag.Bag[ids.ID]{} votes.Add(firstBlock.ID()) votes.Add(secondBlock.ID()) // The first poll will accept shared bits - require.NoError(sm.RecordPoll(votes)) + require.NoError(sm.RecordPoll(context.Background(), votes)) require.Equal(firstBlock.ID(), sm.Preference()) require.False(sm.Finalized()) @@ -626,7 +630,7 @@ func RecordPollSplitVoteNoChangeTest(t *testing.T, factory Factory) { require.EqualValues(1, metrics["polls_successful"]) // The second poll will do nothing - require.NoError(sm.RecordPoll(votes)) + require.NoError(sm.RecordPoll(context.Background(), votes)) require.Equal(firstBlock.ID(), sm.Preference()) require.False(sm.Finalized()) @@ -649,13 +653,13 @@ func RecordPollWhenFinalizedTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight); err != nil { + if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { t.Fatal(err) } - votes := ids.Bag{} + votes := bag.Bag[ids.ID]{} votes.Add(GenesisID) - if err := sm.RecordPoll(votes); err != nil { + if err := sm.RecordPoll(context.Background(), votes); err != nil { t.Fatal(err) } else if !sm.Finalized() { t.Fatalf("Consensus should still be finalized") @@ -678,7 +682,7 @@ func RecordPollRejectTransitivelyTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight); err != nil { + if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { t.Fatal(err) } @@ -707,11 +711,11 @@ func RecordPollRejectTransitivelyTest(t *testing.T, factory Factory) { HeightV: block1.HeightV + 1, } - if err := sm.Add(block0); err != nil { + if err := sm.Add(context.Background(), block0); err != nil { t.Fatal(err) - } else if err := sm.Add(block1); err != nil { + } else if err := sm.Add(context.Background(), block1); err != nil { t.Fatal(err) - } else if err := sm.Add(block2); err != nil { + } else if err := sm.Add(context.Background(), block2); err != nil { t.Fatal(err) } @@ -723,9 +727,9 @@ func RecordPollRejectTransitivelyTest(t *testing.T, factory Factory) { // 2 // Tail = 0 - votes := ids.Bag{} + votes := bag.Bag[ids.ID]{} votes.Add(block0.ID()) - if err := sm.RecordPoll(votes); err != nil { + if err := sm.RecordPoll(context.Background(), votes); err != nil { t.Fatal(err) } @@ -760,7 +764,7 @@ func RecordPollTransitivelyResetConfidenceTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight); err != nil { + if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { t.Fatal(err) } @@ -797,13 +801,13 @@ func RecordPollTransitivelyResetConfidenceTest(t *testing.T, factory Factory) { HeightV: block1.HeightV + 1, } - if err := sm.Add(block0); err != nil { + if err := sm.Add(context.Background(), block0); err != nil { t.Fatal(err) - } else if err := sm.Add(block1); err != nil { + } else if err := sm.Add(context.Background(), block1); err != nil { t.Fatal(err) - } else if err := sm.Add(block2); err != nil { + } else if err := sm.Add(context.Background(), block2); err != nil { t.Fatal(err) - } else if err := sm.Add(block3); err != nil { + } else if err := sm.Add(context.Background(), block3); err != nil { t.Fatal(err) } @@ -814,9 +818,9 @@ func RecordPollTransitivelyResetConfidenceTest(t *testing.T, factory Factory) { // / \ // 2 3 - votesFor2 := ids.Bag{} + votesFor2 := bag.Bag[ids.ID]{} votesFor2.Add(block2.ID()) - if err := sm.RecordPoll(votesFor2); err != nil { + if err := sm.RecordPoll(context.Background(), votesFor2); err != nil { t.Fatal(err) } else if sm.Finalized() { t.Fatalf("Finalized too early") @@ -824,14 +828,14 @@ func RecordPollTransitivelyResetConfidenceTest(t *testing.T, factory Factory) { t.Fatalf("Wrong preference listed") } - emptyVotes := ids.Bag{} - if err := sm.RecordPoll(emptyVotes); err != nil { + emptyVotes := bag.Bag[ids.ID]{} + if err := sm.RecordPoll(context.Background(), emptyVotes); err != nil { t.Fatal(err) } else if sm.Finalized() { t.Fatalf("Finalized too early") } else if pref := sm.Preference(); block2.ID() != pref { t.Fatalf("Wrong preference listed") - } else if err := sm.RecordPoll(votesFor2); err != nil { + } else if err := sm.RecordPoll(context.Background(), votesFor2); err != nil { t.Fatal(err) } else if sm.Finalized() { t.Fatalf("Finalized too early") @@ -839,15 +843,15 @@ func RecordPollTransitivelyResetConfidenceTest(t *testing.T, factory Factory) { t.Fatalf("Wrong preference listed") } - votesFor3 := ids.Bag{} + votesFor3 := bag.Bag[ids.ID]{} votesFor3.Add(block3.ID()) - if err := sm.RecordPoll(votesFor3); err != nil { + if err := sm.RecordPoll(context.Background(), votesFor3); err != nil { t.Fatal(err) } else if sm.Finalized() { t.Fatalf("Finalized too early") } else if pref := sm.Preference(); block2.ID() != pref { t.Fatalf("Wrong preference listed") - } else if err := sm.RecordPoll(votesFor3); err != nil { + } else if err := sm.RecordPoll(context.Background(), votesFor3); err != nil { t.Fatal(err) } else if !sm.Finalized() { t.Fatalf("Finalized too late") @@ -878,7 +882,7 @@ func RecordPollInvalidVoteTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight); err != nil { + if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { t.Fatal(err) } @@ -892,21 +896,21 @@ func RecordPollInvalidVoteTest(t *testing.T, factory Factory) { } unknownBlockID := ids.Empty.Prefix(2) - if err := sm.Add(block); err != nil { + if err := sm.Add(context.Background(), block); err != nil { t.Fatal(err) } - validVotes := ids.Bag{} + validVotes := bag.Bag[ids.ID]{} validVotes.Add(block.ID()) - if err := sm.RecordPoll(validVotes); err != nil { + if err := sm.RecordPoll(context.Background(), validVotes); err != nil { t.Fatal(err) } - invalidVotes := ids.Bag{} + invalidVotes := bag.Bag[ids.ID]{} invalidVotes.Add(unknownBlockID) - if err := sm.RecordPoll(invalidVotes); err != nil { + if err := sm.RecordPoll(context.Background(), invalidVotes); err != nil { t.Fatal(err) - } else if err := sm.RecordPoll(validVotes); err != nil { + } else if err := sm.RecordPoll(context.Background(), validVotes); err != nil { t.Fatal(err) } else if sm.Finalized() { t.Fatalf("Finalized too early") @@ -929,7 +933,7 @@ func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight); err != nil { + if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { t.Fatal(err) } @@ -974,15 +978,15 @@ func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) { HeightV: block3.HeightV + 1, } - if err := sm.Add(block0); err != nil { + if err := sm.Add(context.Background(), block0); err != nil { t.Fatal(err) - } else if err := sm.Add(block1); err != nil { + } else if err := sm.Add(context.Background(), block1); err != nil { t.Fatal(err) - } else if err := sm.Add(block2); err != nil { + } else if err := sm.Add(context.Background(), block2); err != nil { t.Fatal(err) - } else if err := sm.Add(block3); err != nil { + } else if err := sm.Add(context.Background(), block3); err != nil { t.Fatal(err) - } else if err := sm.Add(block4); err != nil { + } else if err := sm.Add(context.Background(), block4); err != nil { t.Fatal(err) } @@ -996,13 +1000,13 @@ func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) { // 2 4 // Tail = 2 - votes0_2_4 := ids.Bag{} + votes0_2_4 := bag.Bag[ids.ID]{} votes0_2_4.Add( block0.ID(), block2.ID(), block4.ID(), ) - if err := sm.RecordPoll(votes0_2_4); err != nil { + if err := sm.RecordPoll(context.Background(), votes0_2_4); err != nil { t.Fatal(err) } @@ -1032,9 +1036,9 @@ func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) { t.Fatalf("Should have rejected") } - dep2_2_2 := ids.Bag{} + dep2_2_2 := bag.Bag[ids.ID]{} dep2_2_2.AddCount(block2.ID(), 3) - if err := sm.RecordPoll(dep2_2_2); err != nil { + if err := sm.RecordPoll(context.Background(), dep2_2_2); err != nil { t.Fatal(err) } @@ -1076,7 +1080,7 @@ func RecordPollDivergedVotingTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - err := sm.Initialize(ctx, params, GenesisID, GenesisHeight) + err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp) require.NoError(err) block0 := &TestBlock{ @@ -1112,30 +1116,30 @@ func RecordPollDivergedVotingTest(t *testing.T, factory Factory) { HeightV: block2.HeightV + 1, } - err = sm.Add(block0) + err = sm.Add(context.Background(), block0) require.NoError(err) - err = sm.Add(block1) + err = sm.Add(context.Background(), block1) require.NoError(err) // The first bit is contested as either 0 or 1. When voting for [block0] and // when the first bit is 1, the following bits have been decided to follow // the 255 remaining bits of [block0]. - votes0 := ids.Bag{} + votes0 := bag.Bag[ids.ID]{} votes0.Add(block0.ID()) - err = sm.RecordPoll(votes0) + err = sm.RecordPoll(context.Background(), votes0) require.NoError(err) // Although we are adding in [block2] here - the underlying snowball // instance has already decided it is rejected. Snowman doesn't actually // know that though, because that is an implementation detail of the // Snowball trie that is used. - err = sm.Add(block2) + err = sm.Add(context.Background(), block2) require.NoError(err) // Because [block2] is effectively rejected, [block3] is also effectively // rejected. - err = sm.Add(block3) + err = sm.Add(context.Background(), block3) require.NoError(err) require.Equal(block0.ID(), sm.Preference()) @@ -1160,9 +1164,9 @@ func RecordPollDivergedVotingTest(t *testing.T, factory Factory) { // [block0]. When [block0] is accepted, [block1] and [block2] are rejected // as conflicting. [block2]'s child, [block3], is then rejected // transitively. - votes3 := ids.Bag{} + votes3 := bag.Bag[ids.ID]{} votes3.Add(block3.ID()) - err = sm.RecordPoll(votes3) + err = sm.RecordPoll(context.Background(), votes3) require.NoError(err) require.True(sm.Finalized(), "finalized too late") @@ -1187,7 +1191,7 @@ func RecordPollDivergedVotingWithNoConflictingBitTest(t *testing.T, factory Fact MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight)) + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block0 := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -1222,26 +1226,26 @@ func RecordPollDivergedVotingWithNoConflictingBitTest(t *testing.T, factory Fact HeightV: block2.HeightV + 1, } - require.NoError(sm.Add(block0)) - require.NoError(sm.Add(block1)) + require.NoError(sm.Add(context.Background(), block0)) + require.NoError(sm.Add(context.Background(), block1)) // When voting for [block0], we end up finalizing the first bit as 0. The // second bit is contested as either 0 or 1. For when the second bit is 1, // the following bits have been decided to follow the 254 remaining bits of // [block0]. - votes0 := ids.Bag{} + votes0 := bag.Bag[ids.ID]{} votes0.Add(block0.ID()) - require.NoError(sm.RecordPoll(votes0)) + require.NoError(sm.RecordPoll(context.Background(), votes0)) // Although we are adding in [block2] here - the underlying snowball // instance has already decided it is rejected. Snowman doesn't actually // know that though, because that is an implementation detail of the // Snowball trie that is used. - require.NoError(sm.Add(block2)) + require.NoError(sm.Add(context.Background(), block2)) // Because [block2] is effectively rejected, [block3] is also effectively // rejected. - require.NoError(sm.Add(block3)) + require.NoError(sm.Add(context.Background(), block3)) require.Equal(block0.ID(), sm.Preference()) require.Equal(choices.Processing, block0.Status(), "should not be decided yet") @@ -1265,9 +1269,9 @@ func RecordPollDivergedVotingWithNoConflictingBitTest(t *testing.T, factory Fact // dropped. Although the votes for [block3] are still applied, [block3] will // only be marked as accepted after [block2] is marked as accepted; which // will never happen. - votes3 := ids.Bag{} + votes3 := bag.Bag[ids.ID]{} votes3.Add(block3.ID()) - require.NoError(sm.RecordPoll(votes3)) + require.NoError(sm.RecordPoll(context.Background(), votes3)) require.False(sm.Finalized(), "finalized too early") require.Equal(choices.Processing, block0.Status()) @@ -1290,7 +1294,7 @@ func RecordPollChangePreferredChainTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight); err != nil { + if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { t.Fatal(err) } @@ -1327,16 +1331,16 @@ func RecordPollChangePreferredChainTest(t *testing.T, factory Factory) { HeightV: b1Block.HeightV + 1, } - if err := sm.Add(a1Block); err != nil { + if err := sm.Add(context.Background(), a1Block); err != nil { t.Fatal(err) } - if err := sm.Add(a2Block); err != nil { + if err := sm.Add(context.Background(), a2Block); err != nil { t.Fatal(err) } - if err := sm.Add(b1Block); err != nil { + if err := sm.Add(context.Background(), b1Block); err != nil { t.Fatal(err) } - if err := sm.Add(b2Block); err != nil { + if err := sm.Add(context.Background(), b2Block); err != nil { t.Fatal(err) } @@ -1357,10 +1361,10 @@ func RecordPollChangePreferredChainTest(t *testing.T, factory Factory) { t.Fatalf("Shouldn't have reported b2 as being preferred") } - b2Votes := ids.Bag{} + b2Votes := bag.Bag[ids.ID]{} b2Votes.Add(b2Block.ID()) - if err := sm.RecordPoll(b2Votes); err != nil { + if err := sm.RecordPoll(context.Background(), b2Votes); err != nil { t.Fatal(err) } @@ -1381,13 +1385,13 @@ func RecordPollChangePreferredChainTest(t *testing.T, factory Factory) { t.Fatalf("Should have reported b2 as being preferred") } - a1Votes := ids.Bag{} + a1Votes := bag.Bag[ids.ID]{} a1Votes.Add(a1Block.ID()) - if err := sm.RecordPoll(a1Votes); err != nil { + if err := sm.RecordPoll(context.Background(), a1Votes); err != nil { t.Fatal(err) } - if err := sm.RecordPoll(a1Votes); err != nil { + if err := sm.RecordPoll(context.Background(), a1Votes); err != nil { t.Fatal(err) } @@ -1433,7 +1437,7 @@ func MetricsProcessingErrorTest(t *testing.T, factory Factory) { t.Fatal(err) } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight); err == nil { + if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err == nil { t.Fatalf("should have errored during initialization due to a duplicate metric") } } @@ -1462,7 +1466,7 @@ func MetricsAcceptedErrorTest(t *testing.T, factory Factory) { t.Fatal(err) } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight); err == nil { + if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err == nil { t.Fatalf("should have errored during initialization due to a duplicate metric") } } @@ -1491,7 +1495,7 @@ func MetricsRejectedErrorTest(t *testing.T, factory Factory) { t.Fatal(err) } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight); err == nil { + if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err == nil { t.Fatalf("should have errored during initialization due to a duplicate metric") } } @@ -1511,7 +1515,7 @@ func ErrorOnInitialRejectionTest(t *testing.T, factory Factory) { MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight); err != nil { + if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { t.Fatal(err) } @@ -1523,14 +1527,14 @@ func ErrorOnInitialRejectionTest(t *testing.T, factory Factory) { block := &TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(2), - RejectV: errors.New(""), + RejectV: errTest, StatusV: choices.Processing, }, ParentV: rejectedBlock.IDV, HeightV: rejectedBlock.HeightV + 1, } - if err := sm.Add(block); err == nil { + if err := sm.Add(context.Background(), block); err == nil { t.Fatalf("Should have errored on rejecting the rejectable block") } } @@ -1550,27 +1554,27 @@ func ErrorOnAcceptTest(t *testing.T, factory Factory) { MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight); err != nil { + if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { t.Fatal(err) } block := &TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(1), - AcceptV: errors.New(""), + AcceptV: errTest, StatusV: choices.Processing, }, ParentV: Genesis.IDV, HeightV: Genesis.HeightV + 1, } - if err := sm.Add(block); err != nil { + if err := sm.Add(context.Background(), block); err != nil { t.Fatal(err) } - votes := ids.Bag{} + votes := bag.Bag[ids.ID]{} votes.Add(block.ID()) - if err := sm.RecordPoll(votes); err == nil { + if err := sm.RecordPoll(context.Background(), votes); err == nil { t.Fatalf("Should have errored on accepted the block") } } @@ -1590,7 +1594,7 @@ func ErrorOnRejectSiblingTest(t *testing.T, factory Factory) { MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight); err != nil { + if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { t.Fatal(err) } @@ -1605,22 +1609,22 @@ func ErrorOnRejectSiblingTest(t *testing.T, factory Factory) { block1 := &TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(2), - RejectV: errors.New(""), + RejectV: errTest, StatusV: choices.Processing, }, ParentV: Genesis.IDV, HeightV: Genesis.HeightV + 1, } - if err := sm.Add(block0); err != nil { + if err := sm.Add(context.Background(), block0); err != nil { t.Fatal(err) - } else if err := sm.Add(block1); err != nil { + } else if err := sm.Add(context.Background(), block1); err != nil { t.Fatal(err) } - votes := ids.Bag{} + votes := bag.Bag[ids.ID]{} votes.Add(block0.ID()) - if err := sm.RecordPoll(votes); err == nil { + if err := sm.RecordPoll(context.Background(), votes); err == nil { t.Fatalf("Should have errored on rejecting the block's sibling") } } @@ -1640,7 +1644,7 @@ func ErrorOnTransitiveRejectionTest(t *testing.T, factory Factory) { MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight); err != nil { + if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { t.Fatal(err) } @@ -1663,24 +1667,24 @@ func ErrorOnTransitiveRejectionTest(t *testing.T, factory Factory) { block2 := &TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(3), - RejectV: errors.New(""), + RejectV: errTest, StatusV: choices.Processing, }, ParentV: block1.IDV, HeightV: block1.HeightV + 1, } - if err := sm.Add(block0); err != nil { + if err := sm.Add(context.Background(), block0); err != nil { t.Fatal(err) - } else if err := sm.Add(block1); err != nil { + } else if err := sm.Add(context.Background(), block1); err != nil { t.Fatal(err) - } else if err := sm.Add(block2); err != nil { + } else if err := sm.Add(context.Background(), block2); err != nil { t.Fatal(err) } - votes := ids.Bag{} + votes := bag.Bag[ids.ID]{} votes.Add(block0.ID()) - if err := sm.RecordPoll(votes); err == nil { + if err := sm.RecordPoll(context.Background(), votes); err == nil { t.Fatalf("Should have errored on transitively rejecting the block") } } @@ -1737,7 +1741,7 @@ func ErrorOnAddDecidedBlock(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight)) + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block0 := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -1747,7 +1751,7 @@ func ErrorOnAddDecidedBlock(t *testing.T, factory Factory) { ParentV: Genesis.IDV, HeightV: Genesis.HeightV + 1, } - require.ErrorIs(sm.Add(block0), errDuplicateAdd) + require.ErrorIs(sm.Add(context.Background(), block0), errDuplicateAdd) } func ErrorOnAddDuplicateBlockID(t *testing.T, factory Factory) { @@ -1765,7 +1769,7 @@ func ErrorOnAddDuplicateBlockID(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight)) + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block0 := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -1784,8 +1788,8 @@ func ErrorOnAddDuplicateBlockID(t *testing.T, factory Factory) { HeightV: block0.HeightV + 1, } - require.NoError(sm.Add(block0)) - require.ErrorIs(sm.Add(block1), errDuplicateAdd) + require.NoError(sm.Add(context.Background(), block0)) + require.ErrorIs(sm.Add(context.Background(), block1), errDuplicateAdd) } func gatherCounterGauge(t *testing.T, reg *prometheus.Registry) map[string]float64 { diff --git a/avalanchego/snow/consensus/snowman/factory.go b/avalanchego/snow/consensus/snowman/factory.go index 044fc33d..06341981 100644 --- a/avalanchego/snow/consensus/snowman/factory.go +++ b/avalanchego/snow/consensus/snowman/factory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman diff --git a/avalanchego/snow/consensus/snowman/mock_block.go b/avalanchego/snow/consensus/snowman/mock_block.go index 1fabe798..2d7eb12d 100644 --- a/avalanchego/snow/consensus/snowman/mock_block.go +++ b/avalanchego/snow/consensus/snowman/mock_block.go @@ -1,3 +1,6 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/snow/consensus/snowman (interfaces: Block) @@ -5,6 +8,7 @@ package snowman import ( + context "context" reflect "reflect" time "time" @@ -37,17 +41,17 @@ func (m *MockBlock) EXPECT() *MockBlockMockRecorder { } // Accept mocks base method. -func (m *MockBlock) Accept() error { +func (m *MockBlock) Accept(arg0 context.Context) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Accept") + ret := m.ctrl.Call(m, "Accept", arg0) ret0, _ := ret[0].(error) return ret0 } // Accept indicates an expected call of Accept. -func (mr *MockBlockMockRecorder) Accept() *gomock.Call { +func (mr *MockBlockMockRecorder) Accept(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Accept", reflect.TypeOf((*MockBlock)(nil).Accept)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Accept", reflect.TypeOf((*MockBlock)(nil).Accept), arg0) } // Bytes mocks base method. @@ -107,17 +111,17 @@ func (mr *MockBlockMockRecorder) Parent() *gomock.Call { } // Reject mocks base method. -func (m *MockBlock) Reject() error { +func (m *MockBlock) Reject(arg0 context.Context) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Reject") + ret := m.ctrl.Call(m, "Reject", arg0) ret0, _ := ret[0].(error) return ret0 } // Reject indicates an expected call of Reject. -func (mr *MockBlockMockRecorder) Reject() *gomock.Call { +func (mr *MockBlockMockRecorder) Reject(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reject", reflect.TypeOf((*MockBlock)(nil).Reject)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reject", reflect.TypeOf((*MockBlock)(nil).Reject), arg0) } // Status mocks base method. @@ -149,15 +153,15 @@ func (mr *MockBlockMockRecorder) Timestamp() *gomock.Call { } // Verify mocks base method. -func (m *MockBlock) Verify() error { +func (m *MockBlock) Verify(arg0 context.Context) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Verify") + ret := m.ctrl.Call(m, "Verify", arg0) ret0, _ := ret[0].(error) return ret0 } // Verify indicates an expected call of Verify. -func (mr *MockBlockMockRecorder) Verify() *gomock.Call { +func (mr *MockBlockMockRecorder) Verify(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verify", reflect.TypeOf((*MockBlock)(nil).Verify)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verify", reflect.TypeOf((*MockBlock)(nil).Verify), arg0) } diff --git a/avalanchego/snow/consensus/snowman/network_test.go b/avalanchego/snow/consensus/snowman/network_test.go index 66e8f7e4..8eadc4b5 100644 --- a/avalanchego/snow/consensus/snowman/network_test.go +++ b/avalanchego/snow/consensus/snowman/network_test.go @@ -1,15 +1,18 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman import ( + "context" "math/rand" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowball" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/bag" "github.com/ava-labs/avalanchego/utils/sampler" ) @@ -28,7 +31,7 @@ func (n *Network) shuffleColors() { colors = append(colors, n.colors[int(index)]) } n.colors = colors - SortTestBlocks(n.colors) + utils.Sort(n.colors) } func (n *Network) Initialize(params snowball.Parameters, numColors int) { @@ -58,7 +61,7 @@ func (n *Network) Initialize(params snowball.Parameters, numColors int) { } func (n *Network) AddNode(sm Consensus) error { - if err := sm.Initialize(snow.DefaultConsensusContextTest(), n.params, Genesis.ID(), Genesis.Height()); err != nil { + if err := sm.Initialize(snow.DefaultConsensusContextTest(), n.params, Genesis.ID(), Genesis.Height(), Genesis.Timestamp()); err != nil { return err } @@ -76,10 +79,10 @@ func (n *Network) AddNode(sm Consensus) error { }, ParentV: myDep, HeightV: blk.Height(), - VerifyV: blk.Verify(), + VerifyV: blk.Verify(context.Background()), BytesV: blk.Bytes(), } - if err := sm.Add(myVtx); err != nil { + if err := sm.Add(context.Background(), myVtx); err != nil { return err } deps[myVtx.ID()] = myDep @@ -89,7 +92,9 @@ func (n *Network) AddNode(sm Consensus) error { return nil } -func (n *Network) Finalized() bool { return len(n.running) == 0 } +func (n *Network) Finalized() bool { + return len(n.running) == 0 +} func (n *Network) Round() error { if len(n.running) == 0 { @@ -102,13 +107,13 @@ func (n *Network) Round() error { s := sampler.NewUniform() _ = s.Initialize(uint64(len(n.nodes))) indices, _ := s.Sample(n.params.K) - sampledColors := ids.Bag{} + sampledColors := bag.Bag[ids.ID]{} for _, index := range indices { peer := n.nodes[int(index)] sampledColors.Add(peer.Preference()) } - if err := running.RecordPoll(sampledColors); err != nil { + if err := running.RecordPoll(context.Background(), sampledColors); err != nil { return err } diff --git a/avalanchego/snow/consensus/snowman/oracle_block.go b/avalanchego/snow/consensus/snowman/oracle_block.go index 80961a1c..0d8bd2be 100644 --- a/avalanchego/snow/consensus/snowman/oracle_block.go +++ b/avalanchego/snow/consensus/snowman/oracle_block.go @@ -1,9 +1,12 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman -import "errors" +import ( + "context" + "errors" +) var ErrNotOracle = errors.New("block isn't an oracle") @@ -18,5 +21,5 @@ type OracleBlock interface { // Options returns the possible children of this block in the order this // validator prefers the blocks. // Options is guaranteed to only be called on a verified block. - Options() ([2]Block, error) + Options(context.Context) ([2]Block, error) } diff --git a/avalanchego/snow/consensus/snowman/poll/early_term_no_traversal.go b/avalanchego/snow/consensus/snowman/poll/early_term_no_traversal.go index 062fe6fe..701eeed6 100644 --- a/avalanchego/snow/consensus/snowman/poll/early_term_no_traversal.go +++ b/avalanchego/snow/consensus/snowman/poll/early_term_no_traversal.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package poll @@ -7,6 +7,7 @@ import ( "fmt" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bag" ) type earlyTermNoTraversalFactory struct { @@ -19,7 +20,7 @@ func NewEarlyTermNoTraversalFactory(alpha int) Factory { return &earlyTermNoTraversalFactory{alpha: alpha} } -func (f *earlyTermNoTraversalFactory) New(vdrs ids.NodeIDBag) Poll { +func (f *earlyTermNoTraversalFactory) New(vdrs bag.Bag[ids.NodeID]) Poll { return &earlyTermNoTraversalPoll{ polled: vdrs, alpha: f.alpha, @@ -30,8 +31,8 @@ func (f *earlyTermNoTraversalFactory) New(vdrs ids.NodeIDBag) Poll { // the result of the poll. However, does not terminate tightly with this bound. // It terminates as quickly as it can without performing any DAG traversals. type earlyTermNoTraversalPoll struct { - votes ids.Bag - polled ids.NodeIDBag + votes bag.Bag[ids.ID] + polled bag.Bag[ids.NodeID] alpha int } @@ -61,7 +62,9 @@ func (p *earlyTermNoTraversalPoll) Finished() bool { } // Result returns the result of this poll -func (p *earlyTermNoTraversalPoll) Result() ids.Bag { return p.votes } +func (p *earlyTermNoTraversalPoll) Result() bag.Bag[ids.ID] { + return p.votes +} func (p *earlyTermNoTraversalPoll) PrefixedString(prefix string) string { return fmt.Sprintf( @@ -72,4 +75,6 @@ func (p *earlyTermNoTraversalPoll) PrefixedString(prefix string) string { ) } -func (p *earlyTermNoTraversalPoll) String() string { return p.PrefixedString("") } +func (p *earlyTermNoTraversalPoll) String() string { + return p.PrefixedString("") +} diff --git a/avalanchego/snow/consensus/snowman/poll/early_term_no_traversal_test.go b/avalanchego/snow/consensus/snowman/poll/early_term_no_traversal_test.go index bc911430..63cca569 100644 --- a/avalanchego/snow/consensus/snowman/poll/early_term_no_traversal_test.go +++ b/avalanchego/snow/consensus/snowman/poll/early_term_no_traversal_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package poll @@ -7,6 +7,7 @@ import ( "testing" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bag" ) func TestEarlyTermNoTraversalResults(t *testing.T) { @@ -16,7 +17,7 @@ func TestEarlyTermNoTraversalResults(t *testing.T) { vdr1 := ids.NodeID{1} // k = 1 - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add(vdr1) factory := NewEarlyTermNoTraversalFactory(alpha) @@ -45,7 +46,7 @@ func TestEarlyTermNoTraversalString(t *testing.T) { vdr1 := ids.NodeID{1} vdr2 := ids.NodeID{2} // k = 2 - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add( vdr1, vdr2, @@ -57,9 +58,9 @@ func TestEarlyTermNoTraversalString(t *testing.T) { poll.Vote(vdr1, vtxID) expected := `waiting on Bag: (Size = 1) - ID[NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp]: Count = 1 + NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp: 1 received Bag: (Size = 1) - ID[SYXsAycDPUu4z2ZksJD5fh5nTDcH3vCFHnpcVye5XuJ2jArg]: Count = 1` + SYXsAycDPUu4z2ZksJD5fh5nTDcH3vCFHnpcVye5XuJ2jArg: 1` if result := poll.String(); expected != result { t.Fatalf("Poll should have returned %s but returned %s", expected, result) } @@ -73,7 +74,7 @@ func TestEarlyTermNoTraversalDropsDuplicatedVotes(t *testing.T) { vdr1 := ids.NodeID{1} vdr2 := ids.NodeID{2} // k = 2 - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add( vdr1, vdr2, @@ -107,7 +108,7 @@ func TestEarlyTermNoTraversalTerminatesEarly(t *testing.T) { vdr4 := ids.NodeID{4} vdr5 := ids.NodeID{5} // k = 5 - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add( vdr1, vdr2, @@ -151,7 +152,7 @@ func TestEarlyTermNoTraversalForSharedAncestor(t *testing.T) { vdr3 := ids.NodeID{3} vdr4 := ids.NodeID{4} - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add( vdr1, vdr2, @@ -187,7 +188,7 @@ func TestEarlyTermNoTraversalWithFastDrops(t *testing.T) { vdr2 := ids.NodeID{2} vdr3 := ids.NodeID{3} // k = 3 - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add( vdr1, vdr2, @@ -215,7 +216,7 @@ func TestEarlyTermNoTraversalWithWeightedResponses(t *testing.T) { vdr1 := ids.NodeID{2} vdr2 := ids.NodeID{3} - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add( vdr1, vdr2, @@ -246,7 +247,7 @@ func TestEarlyTermNoTraversalDropWithWeightedResponses(t *testing.T) { vdr1 := ids.NodeID{1} vdr2 := ids.NodeID{2} - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add( vdr1, vdr2, diff --git a/avalanchego/snow/consensus/snowman/poll/interfaces.go b/avalanchego/snow/consensus/snowman/poll/interfaces.go index de30cae1..cab31cfc 100644 --- a/avalanchego/snow/consensus/snowman/poll/interfaces.go +++ b/avalanchego/snow/consensus/snowman/poll/interfaces.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package poll @@ -7,6 +7,7 @@ import ( "fmt" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bag" "github.com/ava-labs/avalanchego/utils/formatting" ) @@ -14,9 +15,9 @@ import ( type Set interface { fmt.Stringer - Add(requestID uint32, vdrs ids.NodeIDBag) bool - Vote(requestID uint32, vdr ids.NodeID, vote ids.ID) []ids.Bag - Drop(requestID uint32, vdr ids.NodeID) []ids.Bag + Add(requestID uint32, vdrs bag.Bag[ids.NodeID]) bool + Vote(requestID uint32, vdr ids.NodeID, vote ids.ID) []bag.Bag[ids.ID] + Drop(requestID uint32, vdr ids.NodeID) []bag.Bag[ids.ID] Len() int } @@ -27,10 +28,10 @@ type Poll interface { Vote(vdr ids.NodeID, vote ids.ID) Drop(vdr ids.NodeID) Finished() bool - Result() ids.Bag + Result() bag.Bag[ids.ID] } // Factory creates a new Poll type Factory interface { - New(vdrs ids.NodeIDBag) Poll + New(vdrs bag.Bag[ids.NodeID]) Poll } diff --git a/avalanchego/snow/consensus/snowman/poll/no_early_term.go b/avalanchego/snow/consensus/snowman/poll/no_early_term.go index 21293ef6..ed5744d4 100644 --- a/avalanchego/snow/consensus/snowman/poll/no_early_term.go +++ b/avalanchego/snow/consensus/snowman/poll/no_early_term.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package poll @@ -7,23 +7,26 @@ import ( "fmt" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bag" ) type noEarlyTermFactory struct{} // NewNoEarlyTermFactory returns a factory that returns polls with no early // termination -func NewNoEarlyTermFactory() Factory { return noEarlyTermFactory{} } +func NewNoEarlyTermFactory() Factory { + return noEarlyTermFactory{} +} -func (noEarlyTermFactory) New(vdrs ids.NodeIDBag) Poll { +func (noEarlyTermFactory) New(vdrs bag.Bag[ids.NodeID]) Poll { return &noEarlyTermPoll{polled: vdrs} } // noEarlyTermPoll finishes when all polled validators either respond to the // query or a timeout occurs type noEarlyTermPoll struct { - votes ids.Bag - polled ids.NodeIDBag + votes bag.Bag[ids.ID] + polled bag.Bag[ids.NodeID] } // Vote registers a response for this poll @@ -37,7 +40,9 @@ func (p *noEarlyTermPoll) Vote(vdr ids.NodeID, vote ids.ID) { } // Drop any future response for this poll -func (p *noEarlyTermPoll) Drop(vdr ids.NodeID) { p.polled.Remove(vdr) } +func (p *noEarlyTermPoll) Drop(vdr ids.NodeID) { + p.polled.Remove(vdr) +} // Finished returns true when all validators have voted func (p *noEarlyTermPoll) Finished() bool { @@ -45,7 +50,9 @@ func (p *noEarlyTermPoll) Finished() bool { } // Result returns the result of this poll -func (p *noEarlyTermPoll) Result() ids.Bag { return p.votes } +func (p *noEarlyTermPoll) Result() bag.Bag[ids.ID] { + return p.votes +} func (p *noEarlyTermPoll) PrefixedString(prefix string) string { return fmt.Sprintf( @@ -56,4 +63,6 @@ func (p *noEarlyTermPoll) PrefixedString(prefix string) string { ) } -func (p *noEarlyTermPoll) String() string { return p.PrefixedString("") } +func (p *noEarlyTermPoll) String() string { + return p.PrefixedString("") +} diff --git a/avalanchego/snow/consensus/snowman/poll/no_early_term_test.go b/avalanchego/snow/consensus/snowman/poll/no_early_term_test.go index b0258ecc..fdc42a57 100644 --- a/avalanchego/snow/consensus/snowman/poll/no_early_term_test.go +++ b/avalanchego/snow/consensus/snowman/poll/no_early_term_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package poll @@ -7,6 +7,7 @@ import ( "testing" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bag" ) func TestNoEarlyTermResults(t *testing.T) { @@ -14,7 +15,7 @@ func TestNoEarlyTermResults(t *testing.T) { vdr1 := ids.NodeID{1} // k = 1 - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add(vdr1) factory := NewNoEarlyTermFactory() @@ -41,7 +42,7 @@ func TestNoEarlyTermString(t *testing.T) { vdr1 := ids.NodeID{1} vdr2 := ids.NodeID{2} // k = 2 - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add( vdr1, vdr2, @@ -53,9 +54,9 @@ func TestNoEarlyTermString(t *testing.T) { poll.Vote(vdr1, vtxID) expected := `waiting on Bag: (Size = 1) - ID[NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp]: Count = 1 + NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp: 1 received Bag: (Size = 1) - ID[SYXsAycDPUu4z2ZksJD5fh5nTDcH3vCFHnpcVye5XuJ2jArg]: Count = 1` + SYXsAycDPUu4z2ZksJD5fh5nTDcH3vCFHnpcVye5XuJ2jArg: 1` if result := poll.String(); expected != result { t.Fatalf("Poll should have returned %s but returned %s", expected, result) } @@ -67,7 +68,7 @@ func TestNoEarlyTermDropsDuplicatedVotes(t *testing.T) { vdr1 := ids.NodeID{1} vdr2 := ids.NodeID{2} // k = 2 - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add( vdr1, vdr2, diff --git a/avalanchego/snow/consensus/snowman/poll/set.go b/avalanchego/snow/consensus/snowman/poll/set.go index 62f464e9..e3182147 100644 --- a/avalanchego/snow/consensus/snowman/poll/set.go +++ b/avalanchego/snow/consensus/snowman/poll/set.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package poll @@ -13,6 +13,7 @@ import ( "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bag" "github.com/ava-labs/avalanchego/utils/linkedhashmap" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/metric" @@ -86,8 +87,8 @@ func NewSet( // Add to the current set of polls // Returns true if the poll was registered correctly and the network sample -// should be made. -func (s *set) Add(requestID uint32, vdrs ids.NodeIDBag) bool { +// should be made. +func (s *set) Add(requestID uint32, vdrs bag.Bag[ids.NodeID]) bool { if _, exists := s.polls.Get(requestID); exists { s.log.Debug("dropping poll", zap.String("reason", "duplicated request"), @@ -111,7 +112,7 @@ func (s *set) Add(requestID uint32, vdrs ids.NodeIDBag) bool { // Vote registers the connections response to a query for [id]. If there was no // query, or the response has already be registered, nothing is performed. -func (s *set) Vote(requestID uint32, vdr ids.NodeID, vote ids.ID) []ids.Bag { +func (s *set) Vote(requestID uint32, vdr ids.NodeID, vote ids.ID) []bag.Bag[ids.ID] { holder, exists := s.polls.Get(requestID) if !exists { s.log.Verbo("dropping vote", @@ -139,8 +140,8 @@ func (s *set) Vote(requestID uint32, vdr ids.NodeID, vote ids.ID) []ids.Bag { } // processFinishedPolls checks for other dependent finished polls and returns them all if finished -func (s *set) processFinishedPolls() []ids.Bag { - var results []ids.Bag +func (s *set) processFinishedPolls() []bag.Bag[ids.ID] { + var results []bag.Bag[ids.ID] // iterate from oldest to newest iter := s.polls.NewIterator() @@ -154,7 +155,7 @@ func (s *set) processFinishedPolls() []ids.Bag { } s.log.Verbo("poll finished", - zap.Any("requestID", iter.Key()), + zap.Uint32("requestID", iter.Key()), zap.Stringer("poll", holder.GetPoll()), ) s.durPolls.Observe(float64(time.Since(holder.StartTime()))) @@ -171,7 +172,7 @@ func (s *set) processFinishedPolls() []ids.Bag { // Drop registers the connections response to a query for [id]. If there was no // query, or the response has already be registered, nothing is performed. -func (s *set) Drop(requestID uint32, vdr ids.NodeID) []ids.Bag { +func (s *set) Drop(requestID uint32, vdr ids.NodeID) []bag.Bag[ids.ID] { holder, exists := s.polls.Get(requestID) if !exists { s.log.Verbo("dropping vote", @@ -198,7 +199,9 @@ func (s *set) Drop(requestID uint32, vdr ids.NodeID) []ids.Bag { } // Len returns the number of outstanding polls -func (s *set) Len() int { return s.polls.Len() } +func (s *set) Len() int { + return s.polls.Len() +} func (s *set) String() string { sb := strings.Builder{} diff --git a/avalanchego/snow/consensus/snowman/poll/set_test.go b/avalanchego/snow/consensus/snowman/poll/set_test.go index 0dfbc325..277d6d3d 100644 --- a/avalanchego/snow/consensus/snowman/poll/set_test.go +++ b/avalanchego/snow/consensus/snowman/poll/set_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package poll @@ -11,6 +11,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bag" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/wrappers" ) @@ -55,12 +56,12 @@ func TestCreateAndFinishPollOutOfOrder_NewerFinishesFirst(t *testing.T) { vdrs := []ids.NodeID{vdr1, vdr2, vdr3} // create two polls for the two vtxs - vdrBag := ids.NodeIDBag{} + vdrBag := bag.Bag[ids.NodeID]{} vdrBag.Add(vdrs...) added := s.Add(1, vdrBag) require.True(t, added) - vdrBag = ids.NodeIDBag{} + vdrBag = bag.Bag[ids.NodeID]{} vdrBag.Add(vdrs...) added = s.Add(2, vdrBag) require.True(t, added) @@ -71,7 +72,7 @@ func TestCreateAndFinishPollOutOfOrder_NewerFinishesFirst(t *testing.T) { vtx1 := ids.ID{1} vtx2 := ids.ID{2} - var results []ids.Bag + var results []bag.Bag[ids.ID] // vote out of order results = s.Vote(1, vdr1, vtx1) @@ -108,12 +109,12 @@ func TestCreateAndFinishPollOutOfOrder_OlderFinishesFirst(t *testing.T) { vdrs := []ids.NodeID{vdr1, vdr2, vdr3} // create two polls for the two vtxs - vdrBag := ids.NodeIDBag{} + vdrBag := bag.Bag[ids.NodeID]{} vdrBag.Add(vdrs...) added := s.Add(1, vdrBag) require.True(t, added) - vdrBag = ids.NodeIDBag{} + vdrBag = bag.Bag[ids.NodeID]{} vdrBag.Add(vdrs...) added = s.Add(2, vdrBag) require.True(t, added) @@ -124,7 +125,7 @@ func TestCreateAndFinishPollOutOfOrder_OlderFinishesFirst(t *testing.T) { vtx1 := ids.ID{1} vtx2 := ids.ID{2} - var results []ids.Bag + var results []bag.Bag[ids.ID] // vote out of order results = s.Vote(1, vdr1, vtx1) @@ -161,17 +162,17 @@ func TestCreateAndFinishPollOutOfOrder_UnfinishedPollsGaps(t *testing.T) { vdrs := []ids.NodeID{vdr1, vdr2, vdr3} // create three polls for the two vtxs - vdrBag := ids.NodeIDBag{} + vdrBag := bag.Bag[ids.NodeID]{} vdrBag.Add(vdrs...) added := s.Add(1, vdrBag) require.True(t, added) - vdrBag = ids.NodeIDBag{} + vdrBag = bag.Bag[ids.NodeID]{} vdrBag.Add(vdrs...) added = s.Add(2, vdrBag) require.True(t, added) - vdrBag = ids.NodeIDBag{} + vdrBag = bag.Bag[ids.NodeID]{} vdrBag.Add(vdrs...) added = s.Add(3, vdrBag) require.True(t, added) @@ -184,7 +185,7 @@ func TestCreateAndFinishPollOutOfOrder_UnfinishedPollsGaps(t *testing.T) { vtx2 := ids.ID{2} vtx3 := ids.ID{3} - var results []ids.Bag + var results []bag.Bag[ids.ID] // vote out of order // 2 finishes first to create a gap of finished poll between two unfinished polls 1 and 3 @@ -227,7 +228,7 @@ func TestCreateAndFinishSuccessfulPoll(t *testing.T) { vdr1 := ids.NodeID{1} vdr2 := ids.NodeID{2} // k = 2 - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add( vdr1, vdr2, @@ -245,11 +246,11 @@ func TestCreateAndFinishSuccessfulPoll(t *testing.T) { t.Fatalf("Should only have one active poll") } else if results := s.Vote(1, vdr1, vtxID); len(results) > 0 { t.Fatalf("Shouldn't have been able to finish a non-existent poll") - } else if results = s.Vote(0, vdr1, vtxID); len(results) > 0 { + } else if results := s.Vote(0, vdr1, vtxID); len(results) > 0 { t.Fatalf("Shouldn't have been able to finish an ongoing poll") - } else if results = s.Vote(0, vdr1, vtxID); len(results) > 0 { + } else if results := s.Vote(0, vdr1, vtxID); len(results) > 0 { t.Fatalf("Should have dropped a duplicated poll") - } else if results = s.Vote(0, vdr2, vtxID); len(results) == 0 { + } else if results := s.Vote(0, vdr2, vtxID); len(results) == 0 { t.Fatalf("Should have finished the") } else if len(results) != 1 { t.Fatalf("Wrong number of results returned") @@ -272,7 +273,7 @@ func TestCreateAndFinishFailedPoll(t *testing.T) { vdr1 := ids.NodeID{1} vdr2 := ids.NodeID{2} // k = 2 - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add( vdr1, vdr2, @@ -290,11 +291,11 @@ func TestCreateAndFinishFailedPoll(t *testing.T) { t.Fatalf("Should only have one active poll") } else if results := s.Drop(1, vdr1); len(results) > 0 { t.Fatalf("Shouldn't have been able to finish a non-existent poll") - } else if results = s.Drop(0, vdr1); len(results) > 0 { + } else if results := s.Drop(0, vdr1); len(results) > 0 { t.Fatalf("Shouldn't have been able to finish an ongoing poll") - } else if results = s.Drop(0, vdr1); len(results) > 0 { + } else if results := s.Drop(0, vdr1); len(results) > 0 { t.Fatalf("Should have dropped a duplicated poll") - } else if results = s.Drop(0, vdr2); len(results) == 0 { + } else if results := s.Drop(0, vdr2); len(results) == 0 { t.Fatalf("Should have finished the") } else if list := results[0].List(); len(list) != 0 { t.Fatalf("Wrong number of vertices returned") @@ -310,13 +311,13 @@ func TestSetString(t *testing.T) { vdr1 := ids.NodeID{1} // k = 1 - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add(vdr1) expected := `current polls: (Size = 1) RequestID 0: waiting on Bag: (Size = 1) - ID[NodeID-6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt]: Count = 1 + NodeID-6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt: 1 received Bag: (Size = 0)` if !s.Add(0, vdrs) { t.Fatalf("Should have been able to add a new poll") diff --git a/avalanchego/snow/consensus/snowman/snowman_block.go b/avalanchego/snow/consensus/snowman/snowman_block.go index d04d0349..ddb3ae30 100644 --- a/avalanchego/snow/consensus/snowman/snowman_block.go +++ b/avalanchego/snow/consensus/snowman/snowman_block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -11,8 +11,8 @@ import ( // Tracks the state of a snowman block type snowmanBlock struct { - // pointer to the snowman instance this node is managed by - sm Consensus + // parameters to initialize the snowball instance with + params snowball.Parameters // block that this node contains. For the genesis, this value will be nil blk Block @@ -39,7 +39,7 @@ func (n *snowmanBlock) AddChild(child Block) { // should be initialized. if n.sb == nil { n.sb = &snowball.Tree{} - n.sb.Initialize(n.sm.Parameters(), childID) + n.sb.Initialize(n.params, childID) n.children = make(map[ids.ID]Block) } else { n.sb.Add(childID) diff --git a/avalanchego/snow/consensus/snowman/test_block.go b/avalanchego/snow/consensus/snowman/test_block.go index 241688b5..a02bf317 100644 --- a/avalanchego/snow/consensus/snowman/test_block.go +++ b/avalanchego/snow/consensus/snowman/test_block.go @@ -1,17 +1,21 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman import ( - "sort" + "context" "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/utils" ) -var _ Block = &TestBlock{} +var ( + _ Block = (*TestBlock)(nil) + _ utils.Sortable[*TestBlock] = (*TestBlock)(nil) +) // TestBlock is a useful test block type TestBlock struct { @@ -24,17 +28,26 @@ type TestBlock struct { BytesV []byte } -func (b *TestBlock) Parent() ids.ID { return b.ParentV } -func (b *TestBlock) Height() uint64 { return b.HeightV } -func (b *TestBlock) Timestamp() time.Time { return b.TimestampV } -func (b *TestBlock) Verify() error { return b.VerifyV } -func (b *TestBlock) Bytes() []byte { return b.BytesV } +func (b *TestBlock) Parent() ids.ID { + return b.ParentV +} -type sortBlocks []*TestBlock +func (b *TestBlock) Height() uint64 { + return b.HeightV +} -func (sb sortBlocks) Less(i, j int) bool { return sb[i].HeightV < sb[j].HeightV } -func (sb sortBlocks) Len() int { return len(sb) } -func (sb sortBlocks) Swap(i, j int) { sb[j], sb[i] = sb[i], sb[j] } +func (b *TestBlock) Timestamp() time.Time { + return b.TimestampV +} -// SortTestBlocks sorts the array of blocks by height -func SortTestBlocks(blocks []*TestBlock) { sort.Sort(sortBlocks(blocks)) } +func (b *TestBlock) Verify(context.Context) error { + return b.VerifyV +} + +func (b *TestBlock) Bytes() []byte { + return b.BytesV +} + +func (b *TestBlock) Less(other *TestBlock) bool { + return b.HeightV < other.HeightV +} diff --git a/avalanchego/snow/consensus/snowman/topological.go b/avalanchego/snow/consensus/snowman/topological.go index 68c9c760..0d8fd26e 100644 --- a/avalanchego/snow/consensus/snowman/topological.go +++ b/avalanchego/snow/consensus/snowman/topological.go @@ -1,33 +1,41 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman import ( + "context" "errors" "fmt" "strings" + "time" "go.uber.org/zap" + "golang.org/x/exp/maps" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/metrics" "github.com/ava-labs/avalanchego/snow/consensus/snowball" + "github.com/ava-labs/avalanchego/utils/bag" + "github.com/ava-labs/avalanchego/utils/set" ) var ( errDuplicateAdd = errors.New("duplicate block add") - _ Factory = &TopologicalFactory{} - _ Consensus = &Topological{} + _ Factory = (*TopologicalFactory)(nil) + _ Consensus = (*Topological)(nil) ) // TopologicalFactory implements Factory by returning a topological struct type TopologicalFactory struct{} -func (TopologicalFactory) New() Consensus { return &Topological{} } +func (TopologicalFactory) New() Consensus { + return &Topological{} +} // Topological implements the Snowman interface by using a tree tracking the // strongly preferred branch. This tree structure amortizes network polls to @@ -36,6 +44,7 @@ type Topological struct { metrics.Latency metrics.Polls metrics.Height + metrics.Timestamp // pollNumber is the number of times RecordPolls has been called pollNumber uint64 @@ -57,16 +66,16 @@ type Topological struct { blocks map[ids.ID]*snowmanBlock // blockID -> snowmanBlock // preferredIDs stores the set of IDs that are currently preferred. - preferredIDs ids.Set + preferredIDs set.Set[ids.ID] // tail is the preferred block with no children tail ids.ID // Used in [calculateInDegree] and. // Should only be accessed in that method. - // We use this one instance of ids.Set instead of creating a - // new ids.Set during each call to [calculateInDegree]. - leaves ids.Set + // We use this one instance of set.Set instead of creating a + // new set.Set during each call to [calculateInDegree]. + leaves set.Set[ids.ID] // Kahn nodes used in [calculateInDegree] and [markAncestorInDegrees]. // Should only be accessed in those methods. @@ -81,7 +90,7 @@ type kahnNode struct { // inDegree is 0, then this node is a leaf inDegree int // votes for all the children of this node, so far - votes ids.Bag + votes bag.Bag[ids.ID] } // Used to track which children should receive votes @@ -89,10 +98,10 @@ type votes struct { // parentID is the parent of all the votes provided in the votes bag parentID ids.ID // votes for all the children of the parent - votes ids.Bag + votes bag.Bag[ids.ID] } -func (ts *Topological) Initialize(ctx *snow.ConsensusContext, params snowball.Parameters, rootID ids.ID, rootHeight uint64) error { +func (ts *Topological) Initialize(ctx *snow.ConsensusContext, params snowball.Parameters, rootID ids.ID, rootHeight uint64, rootTime time.Time) error { if err := params.Verify(); err != nil { return err } @@ -115,27 +124,35 @@ func (ts *Topological) Initialize(ctx *snow.ConsensusContext, params snowball.Pa } ts.Height = heightMetrics - ts.leaves = ids.Set{} + timestampMetrics, err := metrics.NewTimestamp("", ctx.Registerer) + if err != nil { + return err + } + ts.Timestamp = timestampMetrics + + ts.leaves = set.Set[ids.ID]{} ts.kahnNodes = make(map[ids.ID]kahnNode) ts.ctx = ctx ts.params = params ts.head = rootID ts.height = rootHeight ts.blocks = map[ids.ID]*snowmanBlock{ - rootID: {sm: ts}, + rootID: {params: ts.params}, } ts.tail = rootID - // Initially set the height to the last accepted block. + // Initially set the metrics for the last accepted block. ts.Height.Accepted(ts.height) + ts.Timestamp.Accepted(rootTime) + return nil } -func (ts *Topological) Parameters() snowball.Parameters { return ts.params } - -func (ts *Topological) NumProcessing() int { return len(ts.blocks) - 1 } +func (ts *Topological) NumProcessing() int { + return len(ts.blocks) - 1 +} -func (ts *Topological) Add(blk Block) error { +func (ts *Topological) Add(ctx context.Context, blk Block) error { blkID := blk.ID() // Make sure a block is not inserted twice. This enforces the invariant that @@ -155,18 +172,18 @@ func (ts *Topological) Add(blk Block) error { // If the ancestor is missing, this means the ancestor must have already // been pruned. Therefore, the dependent should be transitively // rejected. - if err := blk.Reject(); err != nil { + if err := blk.Reject(ctx); err != nil { return err } - ts.Latency.Rejected(blkID, ts.pollNumber) + ts.Latency.Rejected(blkID, ts.pollNumber, len(blk.Bytes())) return nil } // add the block as a child of its parent, and add the block to the tree parentNode.AddChild(blk) ts.blocks[blkID] = &snowmanBlock{ - sm: ts, - blk: blk, + params: ts.params, + blk: blk, } // If we are extending the tail, this is the new tail @@ -207,7 +224,13 @@ func (ts *Topological) IsPreferred(blk Block) bool { return ts.preferredIDs.Contains(blk.ID()) } -func (ts *Topological) Preference() ids.ID { return ts.tail } +func (ts *Topological) LastAccepted() ids.ID { + return ts.head +} + +func (ts *Topological) Preference() ids.ID { + return ts.tail +} // The votes bag contains at most K votes for blocks in the tree. If there is a // vote for a block that isn't in the tree, the vote is dropped. @@ -230,7 +253,7 @@ func (ts *Topological) Preference() ids.ID { return ts.tail } // The complexity of this function is: // - Runtime = 3 * |live set| + |votes| // - Space = 2 * |live set| + |votes| -func (ts *Topological) RecordPoll(voteBag ids.Bag) error { +func (ts *Topological) RecordPoll(ctx context.Context, voteBag bag.Bag[ids.ID]) error { // Register a new poll call ts.pollNumber++ @@ -250,7 +273,7 @@ func (ts *Topological) RecordPoll(voteBag ids.Bag) error { } // Runtime = |live set| ; Space = Constant - preferred, err := ts.vote(voteStack) + preferred, err := ts.vote(ctx, voteStack) if err != nil { return err } @@ -286,10 +309,12 @@ func (ts *Topological) RecordPoll(voteBag ids.Bag) error { return nil } -func (ts *Topological) Finalized() bool { return len(ts.blocks) == 1 } +func (ts *Topological) Finalized() bool { + return len(ts.blocks) == 1 +} // HealthCheck returns information about the consensus health. -func (ts *Topological) HealthCheck() (interface{}, error) { +func (ts *Topological) HealthCheck(context.Context) (interface{}, error) { numOutstandingBlks := ts.Latency.NumProcessing() isOutstandingBlks := numOutstandingBlks <= ts.params.MaxOutstandingItems healthy := isOutstandingBlks @@ -319,11 +344,9 @@ func (ts *Topological) HealthCheck() (interface{}, error) { // takes in a list of votes and sets up the topological ordering. Returns the // reachable section of the graph annotated with the number of inbound edges and // the non-transitively applied votes. Also returns the list of leaf blocks. -func (ts *Topological) calculateInDegree(votes ids.Bag) { +func (ts *Topological) calculateInDegree(votes bag.Bag[ids.ID]) { // Clear the Kahn node set - for k := range ts.kahnNodes { - delete(ts.kahnNodes, k) - } + maps.Clear(ts.kahnNodes) // Clear the leaf set ts.leaves.Clear() @@ -431,7 +454,7 @@ func (ts *Topological) pushVotes() []votes { // apply votes to the branch that received an Alpha threshold and returns the // next preferred block after the last preferred block that received an Alpha // threshold. -func (ts *Topological) vote(voteStack []votes) (ids.ID, error) { +func (ts *Topological) vote(ctx context.Context, voteStack []votes) (ids.ID, error) { // If the voteStack is empty, then the full tree should falter. This won't // change the preferred branch. if len(voteStack) == 0 { @@ -486,7 +509,7 @@ func (ts *Topological) vote(voteStack []votes) (ids.ID, error) { // Only accept when you are finalized and the head. if parentBlock.sb.Finalized() && ts.head == vote.parentID { - if err := ts.acceptPreferredChild(parentBlock); err != nil { + if err := ts.acceptPreferredChild(ctx, parentBlock); err != nil { return ids.ID{}, err } @@ -560,7 +583,7 @@ func (ts *Topological) vote(voteStack []votes) (ids.ID, error) { // // We accept a block once its parent's snowball instance has finalized // with it as the preference. -func (ts *Topological) acceptPreferredChild(n *snowmanBlock) error { +func (ts *Topological) acceptPreferredChild(ctx context.Context, n *snowmanBlock) error { // We are finalizing the block's child, so we need to get the preference pref := n.sb.Preference() @@ -568,19 +591,16 @@ func (ts *Topological) acceptPreferredChild(n *snowmanBlock) error { child := n.children[pref] // Notify anyone listening that this block was accepted. bytes := child.Bytes() - // Note that DecisionAcceptor.Accept / ConsensusAcceptor.Accept must be - // called before child.Accept to honor Acceptor.Accept's invariant. - if err := ts.ctx.DecisionAcceptor.Accept(ts.ctx, pref, bytes); err != nil { - return err - } - if err := ts.ctx.ConsensusAcceptor.Accept(ts.ctx, pref, bytes); err != nil { + // Note that BlockAcceptor.Accept must be called before child.Accept to + // honor Acceptor.Accept's invariant. + if err := ts.ctx.BlockAcceptor.Accept(ts.ctx, pref, bytes); err != nil { return err } ts.ctx.Log.Trace("accepting block", zap.Stringer("blkID", pref), ) - if err := child.Accept(); err != nil { + if err := child.Accept(ctx); err != nil { return err } @@ -591,8 +611,9 @@ func (ts *Topological) acceptPreferredChild(n *snowmanBlock) error { // now implies its preferredness. ts.preferredIDs.Remove(pref) - ts.Latency.Accepted(pref, ts.pollNumber) + ts.Latency.Accepted(pref, ts.pollNumber, len(bytes)) ts.Height.Accepted(ts.height) + ts.Timestamp.Accepted(child.Timestamp()) // Because ts.blocks contains the last accepted block, we don't delete the // block from the blocks map here. @@ -609,21 +630,21 @@ func (ts *Topological) acceptPreferredChild(n *snowmanBlock) error { zap.Stringer("rejectedID", childID), zap.Stringer("conflictedID", pref), ) - if err := child.Reject(); err != nil { + if err := child.Reject(ctx); err != nil { return err } - ts.Latency.Rejected(childID, ts.pollNumber) + ts.Latency.Rejected(childID, ts.pollNumber, len(child.Bytes())) // Track which blocks have been directly rejected rejects = append(rejects, childID) } // reject all the descendants of the blocks we just rejected - return ts.rejectTransitively(rejects) + return ts.rejectTransitively(ctx, rejects) } // Takes in a list of rejected ids and rejects all descendants of these IDs -func (ts *Topological) rejectTransitively(rejected []ids.ID) error { +func (ts *Topological) rejectTransitively(ctx context.Context, rejected []ids.ID) error { // the rejected array is treated as a stack, with the next element at index // 0 and the last element at the end of the slice. for len(rejected) > 0 { @@ -637,10 +658,10 @@ func (ts *Topological) rejectTransitively(rejected []ids.ID) error { delete(ts.blocks, rejectedID) for childID, child := range rejectedNode.children { - if err := child.Reject(); err != nil { + if err := child.Reject(ctx); err != nil { return err } - ts.Latency.Rejected(childID, ts.pollNumber) + ts.Latency.Rejected(childID, ts.pollNumber, len(child.Bytes())) // add the newly rejected block to the end of the stack rejected = append(rejected, childID) diff --git a/avalanchego/snow/consensus/snowman/topological_test.go b/avalanchego/snow/consensus/snowman/topological_test.go index ba8c16d1..f3e6ed6e 100644 --- a/avalanchego/snow/consensus/snowman/topological_test.go +++ b/avalanchego/snow/consensus/snowman/topological_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -7,4 +7,6 @@ import ( "testing" ) -func TestTopological(t *testing.T) { runConsensusTests(t, TopologicalFactory{}) } +func TestTopological(t *testing.T) { + runConsensusTests(t, TopologicalFactory{}) +} diff --git a/avalanchego/snow/consensus/snowman/traced_consensus.go b/avalanchego/snow/consensus/snowman/traced_consensus.go new file mode 100644 index 00000000..67a8797b --- /dev/null +++ b/avalanchego/snow/consensus/snowman/traced_consensus.go @@ -0,0 +1,50 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + + oteltrace "go.opentelemetry.io/otel/trace" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/utils/bag" +) + +var _ Consensus = (*tracedConsensus)(nil) + +type tracedConsensus struct { + Consensus + tracer trace.Tracer +} + +func Trace(consensus Consensus, tracer trace.Tracer) Consensus { + return &tracedConsensus{ + Consensus: consensus, + tracer: tracer, + } +} + +func (c *tracedConsensus) Add(ctx context.Context, blk Block) error { + ctx, span := c.tracer.Start(ctx, "tracedConsensus.Add", oteltrace.WithAttributes( + attribute.Stringer("blkID", blk.ID()), + attribute.Int64("height", int64(blk.Height())), + )) + defer span.End() + + return c.Consensus.Add(ctx, blk) +} + +func (c *tracedConsensus) RecordPoll(ctx context.Context, votes bag.Bag[ids.ID]) error { + ctx, span := c.tracer.Start(ctx, "tracedConsensus.RecordPoll", oteltrace.WithAttributes( + attribute.Int("numVotes", votes.Len()), + attribute.Int("numBlkIDs", len(votes.List())), + )) + defer span.End() + + return c.Consensus.RecordPoll(ctx, votes) +} diff --git a/avalanchego/snow/consensus/snowstorm/acceptor.go b/avalanchego/snow/consensus/snowstorm/acceptor.go index 6221588b..798dbac7 100644 --- a/avalanchego/snow/consensus/snowstorm/acceptor.go +++ b/avalanchego/snow/consensus/snowstorm/acceptor.go @@ -1,38 +1,45 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowstorm import ( + "context" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/events" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" ) -var _ events.Blockable = &acceptor{} +var _ events.Blockable = (*acceptor)(nil) type acceptor struct { g *Directed errs *wrappers.Errs - deps ids.Set + deps set.Set[ids.ID] rejected bool txID ids.ID } -func (a *acceptor) Dependencies() ids.Set { return a.deps } +func (a *acceptor) Dependencies() set.Set[ids.ID] { + return a.deps +} -func (a *acceptor) Fulfill(id ids.ID) { +func (a *acceptor) Fulfill(ctx context.Context, id ids.ID) { a.deps.Remove(id) - a.Update() + a.Update(ctx) } -func (a *acceptor) Abandon(id ids.ID) { a.rejected = true } +func (a *acceptor) Abandon(context.Context, ids.ID) { + a.rejected = true +} -func (a *acceptor) Update() { +func (a *acceptor) Update(ctx context.Context) { // If I was rejected or I am still waiting on dependencies to finish or an // error has occurred, I shouldn't do anything. if a.rejected || a.deps.Len() != 0 || a.errs.Errored() { return } - a.errs.Add(a.g.accept(a.txID)) + a.errs.Add(a.g.accept(ctx, a.txID)) } diff --git a/avalanchego/snow/consensus/snowstorm/benchmark_test.go b/avalanchego/snow/consensus/snowstorm/benchmark_test.go index 45d11675..91feb10d 100644 --- a/avalanchego/snow/consensus/snowstorm/benchmark_test.go +++ b/avalanchego/snow/consensus/snowstorm/benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowstorm diff --git a/avalanchego/snow/consensus/snowstorm/consensus.go b/avalanchego/snow/consensus/snowstorm/consensus.go index 458fea95..62a2977e 100644 --- a/avalanchego/snow/consensus/snowstorm/consensus.go +++ b/avalanchego/snow/consensus/snowstorm/consensus.go @@ -1,13 +1,17 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowstorm import ( + "context" "fmt" + "github.com/ava-labs/avalanchego/api/health" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/bag" + "github.com/ava-labs/avalanchego/utils/set" sbcon "github.com/ava-labs/avalanchego/snow/consensus/snowball" ) @@ -17,45 +21,43 @@ import ( // should call collect with the responses. type Consensus interface { fmt.Stringer + health.Checker // Takes in the context, alpha, betaVirtuous, and betaRogue Initialize(*snow.ConsensusContext, sbcon.Parameters) error - // Returns the parameters that describe this snowstorm instance - Parameters() sbcon.Parameters - // Returns true if transaction is virtuous. // That is, no transaction has been added that conflicts with IsVirtuous(Tx) bool // Adds a new transaction to vote on. Returns if a critical error has // occurred. - Add(Tx) error + Add(context.Context, Tx) error // Remove a transaction from the set of currently processing txs. It is // assumed that the provided transaction ID is currently processing. - Remove(ids.ID) error + Remove(context.Context, ids.ID) error // Returns true iff transaction has been added Issued(Tx) bool // Returns the set of virtuous transactions // that have not yet been accepted or rejected - Virtuous() ids.Set + Virtuous() set.Set[ids.ID] // Returns the currently preferred transactions to be finalized - Preferences() ids.Set + Preferences() set.Set[ids.ID] // Return the current virtuous transactions that are being voted on. - VirtuousVoting() ids.Set + VirtuousVoting() set.Set[ids.ID] // Returns the set of transactions conflicting with - Conflicts(Tx) ids.Set + Conflicts(Tx) set.Set[ids.ID] // Collects the results of a network poll. Assumes all transactions // have been previously added. Returns true if any statuses or preferences // changed. Returns if a critical error has occurred. - RecordPoll(ids.Bag) (bool, error) + RecordPoll(context.Context, bag.Bag[ids.ID]) (bool, error) // Returns true iff all remaining transactions are rogue. Note, it is // possible that after returning quiesce, a new decision may be added such @@ -66,7 +68,4 @@ type Consensus interface { // possible that after returning finalized, a new decision may be added such // that this instance is no longer finalized. Finalized() bool - - // HealthCheck returns information about the consensus health. - HealthCheck() (interface{}, error) } diff --git a/avalanchego/snow/consensus/snowstorm/consensus_test.go b/avalanchego/snow/consensus/snowstorm/consensus_test.go index d65c8847..6aac3623 100644 --- a/avalanchego/snow/consensus/snowstorm/consensus_test.go +++ b/avalanchego/snow/consensus/snowstorm/consensus_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowstorm import ( + "context" "errors" "path" "reflect" @@ -19,6 +20,8 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/bag" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" sbcon "github.com/ava-labs/avalanchego/snow/consensus/snowball" @@ -29,7 +32,6 @@ type testFunc func(*testing.T, Factory) var ( testFuncs = []testFunc{ MetricsTest, - ParamsTest, IssuedTest, LeftoverInputTest, LowerConfidenceTest, @@ -44,6 +46,7 @@ var ( AcceptingDependencyTest, AcceptingSlowDependencyTest, RejectingDependencyTest, + RejectMultipleTimesTest, VacuouslyAcceptedTest, ConflictsTest, VirtuousDependsOnRogueTest, @@ -56,9 +59,11 @@ var ( } Red, Green, Blue, Alpha *TestTx + + errTest = errors.New("non-nil error") ) -// R - G - B - A +// R - G - B - A func Setup() { Red = &TestTx{} Green = &TestTx{} @@ -77,25 +82,25 @@ func Setup() { color.BytesV = []byte{byte(i)} } - X := ids.Empty.Prefix(4) - Y := ids.Empty.Prefix(5) - Z := ids.Empty.Prefix(6) + x := ids.Empty.Prefix(4) + y := ids.Empty.Prefix(5) + z := ids.Empty.Prefix(6) - Red.InputIDsV = append(Red.InputIDsV, X) - Green.InputIDsV = append(Green.InputIDsV, X) - Green.InputIDsV = append(Green.InputIDsV, Y) + Red.InputIDsV = append(Red.InputIDsV, x) + Green.InputIDsV = append(Green.InputIDsV, x) + Green.InputIDsV = append(Green.InputIDsV, y) - Blue.InputIDsV = append(Blue.InputIDsV, Y) - Blue.InputIDsV = append(Blue.InputIDsV, Z) + Blue.InputIDsV = append(Blue.InputIDsV, y) + Blue.InputIDsV = append(Blue.InputIDsV, z) - Alpha.InputIDsV = append(Alpha.InputIDsV, Z) + Alpha.InputIDsV = append(Alpha.InputIDsV, z) errs := wrappers.Errs{} errs.Add( - Red.Verify(), - Green.Verify(), - Blue.Verify(), - Alpha.Verify(), + Red.Verify(context.Background()), + Green.Verify(context.Background()), + Blue.Verify(context.Background()), + Alpha.Verify(context.Background()), ) if errs.Errored() { panic(errs.Err) @@ -128,7 +133,7 @@ func MetricsTest(t *testing.T, factory Factory) { BetaRogue: 2, ConcurrentRepolls: 1, } - err := ctx.Registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ + err := ctx.AvalancheRegisterer.Register(prometheus.NewCounter(prometheus.CounterOpts{ Name: "tx_processing", })) if err != nil { @@ -148,7 +153,7 @@ func MetricsTest(t *testing.T, factory Factory) { BetaRogue: 2, ConcurrentRepolls: 1, } - err := ctx.Registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ + err := ctx.AvalancheRegisterer.Register(prometheus.NewCounter(prometheus.CounterOpts{ Name: "tx_accepted", })) if err != nil { @@ -168,7 +173,7 @@ func MetricsTest(t *testing.T, factory Factory) { BetaRogue: 2, ConcurrentRepolls: 1, } - err := ctx.Registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ + err := ctx.AvalancheRegisterer.Register(prometheus.NewCounter(prometheus.CounterOpts{ Name: "tx_rejected", })) if err != nil { @@ -181,35 +186,6 @@ func MetricsTest(t *testing.T, factory Factory) { } } -func ParamsTest(t *testing.T, factory Factory) { - graph := factory.New() - - params := sbcon.Parameters{ - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - } - err := graph.Initialize(snow.DefaultConsensusContextTest(), params) - if err != nil { - t.Fatal(err) - } - - if p := graph.Parameters(); p.K != params.K { - t.Fatalf("Wrong K parameter") - } else if p := graph.Parameters(); p.Alpha != params.Alpha { - t.Fatalf("Wrong Alpha parameter") - } else if p := graph.Parameters(); p.BetaVirtuous != params.BetaVirtuous { - t.Fatalf("Wrong Beta1 parameter") - } else if p := graph.Parameters(); p.BetaRogue != params.BetaRogue { - t.Fatalf("Wrong Beta2 parameter") - } -} - func IssuedTest(t *testing.T, factory Factory) { graph := factory.New() @@ -230,13 +206,13 @@ func IssuedTest(t *testing.T, factory Factory) { if issued := graph.Issued(Red); issued { t.Fatalf("Haven't issued anything yet.") - } else if err := graph.Add(Red); err != nil { + } else if err := graph.Add(context.Background(), Red); err != nil { t.Fatal(err) } else if issued := graph.Issued(Red); !issued { t.Fatalf("Have already issued.") } - _ = Blue.Accept() + _ = Blue.Accept(context.Background()) if issued := graph.Issued(Blue); !issued { t.Fatalf("Have already accepted.") @@ -261,9 +237,9 @@ func LeftoverInputTest(t *testing.T, factory Factory) { t.Fatal(err) } - if err := graph.Add(Red); err != nil { + if err := graph.Add(context.Background(), Red); err != nil { t.Fatal(err) - } else if err := graph.Add(Green); err != nil { + } else if err := graph.Add(context.Background(), Green); err != nil { t.Fatal(err) } @@ -277,10 +253,10 @@ func LeftoverInputTest(t *testing.T, factory Factory) { t.Fatalf("Finalized too early") } - r := ids.Bag{} + r := bag.Bag[ids.ID]{} r.SetThreshold(2) r.AddCount(Red.ID(), 2) - if updated, err := graph.RecordPoll(r); err != nil { + if updated, err := graph.RecordPoll(context.Background(), r); err != nil { t.Fatal(err) } else if !updated { t.Fatalf("Should have updated the frontiers") @@ -317,13 +293,13 @@ func LowerConfidenceTest(t *testing.T, factory Factory) { t.Fatal(err) } - if err := graph.Add(Red); err != nil { + if err := graph.Add(context.Background(), Red); err != nil { t.Fatal(err) } - if err := graph.Add(Green); err != nil { + if err := graph.Add(context.Background(), Green); err != nil { t.Fatal(err) } - if err := graph.Add(Blue); err != nil { + if err := graph.Add(context.Background(), Blue); err != nil { t.Fatal(err) } @@ -337,10 +313,10 @@ func LowerConfidenceTest(t *testing.T, factory Factory) { t.Fatalf("Finalized too early") } - r := ids.Bag{} + r := bag.Bag[ids.ID]{} r.SetThreshold(2) r.AddCount(Red.ID(), 2) - if updated, err := graph.RecordPoll(r); err != nil { + if updated, err := graph.RecordPoll(context.Background(), r); err != nil { t.Fatal(err) } else if !updated { t.Fatalf("Should have updated the frontiers") @@ -375,16 +351,16 @@ func MiddleConfidenceTest(t *testing.T, factory Factory) { t.Fatal(err) } - if err := graph.Add(Red); err != nil { + if err := graph.Add(context.Background(), Red); err != nil { t.Fatal(err) } - if err := graph.Add(Green); err != nil { + if err := graph.Add(context.Background(), Green); err != nil { t.Fatal(err) } - if err := graph.Add(Alpha); err != nil { + if err := graph.Add(context.Background(), Alpha); err != nil { t.Fatal(err) } - if err := graph.Add(Blue); err != nil { + if err := graph.Add(context.Background(), Blue); err != nil { t.Fatal(err) } @@ -400,10 +376,10 @@ func MiddleConfidenceTest(t *testing.T, factory Factory) { t.Fatalf("Finalized too early") } - r := ids.Bag{} + r := bag.Bag[ids.ID]{} r.SetThreshold(2) r.AddCount(Red.ID(), 2) - if updated, err := graph.RecordPoll(r); err != nil { + if updated, err := graph.RecordPoll(context.Background(), r); err != nil { t.Fatal(err) } else if !updated { t.Fatalf("Should have updated the frontiers") @@ -438,10 +414,10 @@ func IndependentTest(t *testing.T, factory Factory) { t.Fatal(err) } - if err := graph.Add(Red); err != nil { + if err := graph.Add(context.Background(), Red); err != nil { t.Fatal(err) } - if err := graph.Add(Alpha); err != nil { + if err := graph.Add(context.Background(), Alpha); err != nil { t.Fatal(err) } @@ -457,11 +433,11 @@ func IndependentTest(t *testing.T, factory Factory) { t.Fatalf("Finalized too early") } - ra := ids.Bag{} + ra := bag.Bag[ids.ID]{} ra.SetThreshold(2) ra.AddCount(Red.ID(), 2) ra.AddCount(Alpha.ID(), 2) - if updated, err := graph.RecordPoll(ra); err != nil { + if updated, err := graph.RecordPoll(context.Background(), ra); err != nil { t.Fatal(err) } else if updated { t.Fatalf("Shouldn't have updated the frontiers") @@ -473,7 +449,7 @@ func IndependentTest(t *testing.T, factory Factory) { t.Fatalf("Wrong preference. Expected %s", Alpha.ID()) } else if graph.Finalized() { t.Fatalf("Finalized too early") - } else if updated, err := graph.RecordPoll(ra); err != nil { + } else if updated, err := graph.RecordPoll(context.Background(), ra); err != nil { t.Fatal(err) } else if !updated { t.Fatalf("Should have updated the frontiers") @@ -502,13 +478,13 @@ func VirtuousTest(t *testing.T, factory Factory) { t.Fatal(err) } - if err := graph.Add(Red); err != nil { + if err := graph.Add(context.Background(), Red); err != nil { t.Fatal(err) } else if virtuous := graph.Virtuous(); virtuous.Len() != 1 { t.Fatalf("Wrong number of virtuous.") } else if !virtuous.Contains(Red.ID()) { t.Fatalf("Wrong virtuous. Expected %s", Red.ID()) - } else if err := graph.Add(Alpha); err != nil { + } else if err := graph.Add(context.Background(), Alpha); err != nil { t.Fatal(err) } else if virtuous := graph.Virtuous(); virtuous.Len() != 2 { t.Fatalf("Wrong number of virtuous.") @@ -516,13 +492,13 @@ func VirtuousTest(t *testing.T, factory Factory) { t.Fatalf("Wrong virtuous. Expected %s", Red.ID()) } else if !virtuous.Contains(Alpha.ID()) { t.Fatalf("Wrong virtuous. Expected %s", Alpha.ID()) - } else if err := graph.Add(Green); err != nil { + } else if err := graph.Add(context.Background(), Green); err != nil { t.Fatal(err) } else if virtuous := graph.Virtuous(); virtuous.Len() != 1 { t.Fatalf("Wrong number of virtuous.") } else if !virtuous.Contains(Alpha.ID()) { t.Fatalf("Wrong virtuous. Expected %s", Alpha.ID()) - } else if err := graph.Add(Blue); err != nil { + } else if err := graph.Add(context.Background(), Blue); err != nil { t.Fatal(err) } else if virtuous := graph.Virtuous(); virtuous.Len() != 0 { t.Fatalf("Wrong number of virtuous.") @@ -557,7 +533,7 @@ func IsVirtuousTest(t *testing.T, factory Factory) { t.Fatalf("Should be virtuous") } - err := graph.Add(Red) + err := graph.Add(context.Background(), Red) switch { case err != nil: t.Fatal(err) @@ -571,7 +547,7 @@ func IsVirtuousTest(t *testing.T, factory Factory) { t.Fatalf("Should be virtuous") } - err = graph.Add(Green) + err = graph.Add(context.Background(), Green) switch { case err != nil: t.Fatal(err) @@ -604,11 +580,11 @@ func QuiesceTest(t *testing.T, factory Factory) { if !graph.Quiesce() { t.Fatalf("Should quiesce") - } else if err := graph.Add(Red); err != nil { + } else if err := graph.Add(context.Background(), Red); err != nil { t.Fatal(err) } else if graph.Quiesce() { t.Fatalf("Shouldn't quiesce") - } else if err := graph.Add(Green); err != nil { + } else if err := graph.Add(context.Background(), Green); err != nil { t.Fatal(err) } else if !graph.Quiesce() { t.Fatalf("Should quiesce") @@ -630,7 +606,7 @@ func AddNonEmptyWhitelistTest(t *testing.T, factory Factory) { } ctx := snow.DefaultConsensusContextTest() reg := prometheus.NewRegistry() - ctx.Registerer = reg + ctx.AvalancheRegisterer = reg err := graph.Initialize(ctx, params) if err != nil { t.Fatal(err) @@ -694,7 +670,7 @@ func AddNonEmptyWhitelistTest(t *testing.T, factory Factory) { InputIDsV: []ids.ID{ids.GenerateTestID()}, DependenciesV: []Tx{tx1, tx2, tx3, tx4}, HasWhitelistV: true, - WhitelistV: ids.Set{ + WhitelistV: set.Set[ids.ID]{ tx1.IDV: struct{}{}, tx2.IDV: struct{}{}, tx3.IDV: struct{}{}, @@ -719,7 +695,7 @@ func AddNonEmptyWhitelistTest(t *testing.T, factory Factory) { InputIDsV: []ids.ID{ids.GenerateTestID()}, DependenciesV: []Tx{tx1, tx2, tx6}, HasWhitelistV: true, - WhitelistV: ids.Set{ + WhitelistV: set.Set[ids.ID]{ tx1.IDV: struct{}{}, tx2.IDV: struct{}{}, tx6.IDV: struct{}{}, @@ -729,7 +705,7 @@ func AddNonEmptyWhitelistTest(t *testing.T, factory Factory) { txs := []*TestTx{tx1, tx2, tx3, tx4, stx5, tx6, stx7} for _, tx := range txs { - if err := graph.Add(tx); err != nil { + if err := graph.Add(context.Background(), tx); err != nil { t.Fatal(err) } } @@ -742,14 +718,14 @@ func AddNonEmptyWhitelistTest(t *testing.T, factory Factory) { require.Equal(t, 2., mss["whitelist_tx_processing"]) vset1 := graph.Virtuous() - if !vset1.Equals(ids.Set{ + if !vset1.Equals(set.Set[ids.ID]{ tx1.IDV: struct{}{}, tx2.IDV: struct{}{}, }) { t.Fatalf("unexpected virtuous %v", vset1) } pset1 := graph.Preferences() - if !pset1.Equals(ids.Set{ + if !pset1.Equals(set.Set[ids.ID]{ tx1.IDV: struct{}{}, tx2.IDV: struct{}{}, tx3.IDV: struct{}{}, @@ -762,11 +738,11 @@ func AddNonEmptyWhitelistTest(t *testing.T, factory Factory) { t.Fatal("unexpected Finalized") } - r := ids.Bag{} + r := bag.Bag[ids.ID]{} r.SetThreshold(2) r.AddCount(tx1.ID(), 2) - updated, err := graph.RecordPoll(r) + updated, err := graph.RecordPoll(context.Background(), r) if err != nil { t.Fatal(err) } @@ -775,13 +751,13 @@ func AddNonEmptyWhitelistTest(t *testing.T, factory Factory) { } vset2 := graph.Virtuous() - if !vset2.Equals(ids.Set{ + if !vset2.Equals(set.Set[ids.ID]{ tx2.IDV: struct{}{}, }) { t.Fatalf("unexpected virtuous %v", vset2) } pset2 := graph.Preferences() - if !pset2.Equals(ids.Set{ + if !pset2.Equals(set.Set[ids.ID]{ tx2.IDV: struct{}{}, tx3.IDV: struct{}{}, tx4.IDV: struct{}{}, @@ -859,7 +835,7 @@ func AddWhitelistedVirtuousTest(t *testing.T, factory Factory) { txs := []*TestTx{tx0, tx1} for _, tx := range txs { - if err := graph.Add(tx); err != nil { + if err := graph.Add(context.Background(), tx); err != nil { t.Fatal(err) } } @@ -895,7 +871,7 @@ func WhitelistConflictsTest(t *testing.T, factory Factory) { for i := range txIDs { txIDs[i] = ids.GenerateTestID() } - allTxIDs := ids.NewSet(n) + allTxIDs := set.NewSet[ids.ID](n) allTxIDs.Add(txIDs...) // each spending each other @@ -912,12 +888,12 @@ func WhitelistConflictsTest(t *testing.T, factory Factory) { WhitelistV: nil, } allTxs[i] = tx - if err := graph.Add(tx); err != nil { + if err := graph.Add(context.Background(), tx); err != nil { t.Fatal(err) } } - whitelist := ids.NewSet(1) + whitelist := set.NewSet[ids.ID](1) whitelist.Add(ids.GenerateTestID()) // make whitelist transaction that conflicts with tx outside of its @@ -934,7 +910,7 @@ func WhitelistConflictsTest(t *testing.T, factory Factory) { WhitelistV: whitelist, WhitelistErrV: nil, } - if err := graph.Add(wlTx); err != nil { + if err := graph.Add(context.Background(), wlTx); err != nil { t.Fatal(err) } @@ -981,13 +957,13 @@ func AcceptingDependencyTest(t *testing.T, factory Factory) { t.Fatal(err) } - if err := graph.Add(Red); err != nil { + if err := graph.Add(context.Background(), Red); err != nil { t.Fatal(err) } - if err := graph.Add(Green); err != nil { + if err := graph.Add(context.Background(), Green); err != nil { t.Fatal(err) } - if err := graph.Add(purple); err != nil { + if err := graph.Add(context.Background(), purple); err != nil { t.Fatal(err) } @@ -1007,9 +983,9 @@ func AcceptingDependencyTest(t *testing.T, factory Factory) { t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) } - g := ids.Bag{} + g := bag.Bag[ids.ID]{} g.Add(Green.ID()) - if updated, err := graph.RecordPoll(g); err != nil { + if updated, err := graph.RecordPoll(context.Background(), g); err != nil { t.Fatal(err) } else if !updated { t.Fatalf("Should have updated the frontiers") @@ -1031,9 +1007,9 @@ func AcceptingDependencyTest(t *testing.T, factory Factory) { t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) } - rp := ids.Bag{} + rp := bag.Bag[ids.ID]{} rp.Add(Red.ID(), purple.ID()) - if updated, err := graph.RecordPoll(rp); err != nil { + if updated, err := graph.RecordPoll(context.Background(), rp); err != nil { t.Fatal(err) } else if updated { t.Fatalf("Shouldn't have updated the frontiers") @@ -1055,9 +1031,9 @@ func AcceptingDependencyTest(t *testing.T, factory Factory) { t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) } - r := ids.Bag{} + r := bag.Bag[ids.ID]{} r.Add(Red.ID()) - if updated, err := graph.RecordPoll(r); err != nil { + if updated, err := graph.RecordPoll(context.Background(), r); err != nil { t.Fatal(err) } else if !updated { t.Fatalf("Should have updated the frontiers") @@ -1083,12 +1059,12 @@ type singleAcceptTx struct { accepted bool } -func (tx *singleAcceptTx) Accept() error { +func (tx *singleAcceptTx) Accept(ctx context.Context) error { if tx.accepted { tx.t.Fatalf("accept called multiple times") } tx.accepted = true - return tx.Tx.Accept() + return tx.Tx.Accept(ctx) } func AcceptingSlowDependencyTest(t *testing.T, factory Factory) { @@ -1123,13 +1099,13 @@ func AcceptingSlowDependencyTest(t *testing.T, factory Factory) { t.Fatal(err) } - if err := graph.Add(Red); err != nil { + if err := graph.Add(context.Background(), Red); err != nil { t.Fatal(err) } - if err := graph.Add(Green); err != nil { + if err := graph.Add(context.Background(), Green); err != nil { t.Fatal(err) } - if err := graph.Add(purple); err != nil { + if err := graph.Add(context.Background(), purple); err != nil { t.Fatal(err) } @@ -1149,9 +1125,9 @@ func AcceptingSlowDependencyTest(t *testing.T, factory Factory) { t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) } - g := ids.Bag{} + g := bag.Bag[ids.ID]{} g.Add(Green.ID()) - if updated, err := graph.RecordPoll(g); err != nil { + if updated, err := graph.RecordPoll(context.Background(), g); err != nil { t.Fatal(err) } else if !updated { t.Fatalf("Should have updated the frontiers") @@ -1173,9 +1149,9 @@ func AcceptingSlowDependencyTest(t *testing.T, factory Factory) { t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) } - p := ids.Bag{} + p := bag.Bag[ids.ID]{} p.Add(purple.ID()) - if updated, err := graph.RecordPoll(p); err != nil { + if updated, err := graph.RecordPoll(context.Background(), p); err != nil { t.Fatal(err) } else if updated { t.Fatalf("Shouldn't have updated the frontiers") @@ -1197,9 +1173,9 @@ func AcceptingSlowDependencyTest(t *testing.T, factory Factory) { t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) } - rp := ids.Bag{} + rp := bag.Bag[ids.ID]{} rp.Add(Red.ID(), purple.ID()) - if updated, err := graph.RecordPoll(rp); err != nil { + if updated, err := graph.RecordPoll(context.Background(), rp); err != nil { t.Fatal(err) } else if updated { t.Fatalf("Shouldn't have updated the frontiers") @@ -1221,9 +1197,9 @@ func AcceptingSlowDependencyTest(t *testing.T, factory Factory) { t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) } - r := ids.Bag{} + r := bag.Bag[ids.ID]{} r.Add(Red.ID()) - if updated, err := graph.RecordPoll(r); err != nil { + if updated, err := graph.RecordPoll(context.Background(), r); err != nil { t.Fatal(err) } else if !updated { t.Fatalf("Should have updated the frontiers") @@ -1269,16 +1245,16 @@ func RejectingDependencyTest(t *testing.T, factory Factory) { t.Fatal(err) } - if err := graph.Add(Red); err != nil { + if err := graph.Add(context.Background(), Red); err != nil { t.Fatal(err) } - if err := graph.Add(Green); err != nil { + if err := graph.Add(context.Background(), Green); err != nil { t.Fatal(err) } - if err := graph.Add(Blue); err != nil { + if err := graph.Add(context.Background(), Blue); err != nil { t.Fatal(err) } - if err := graph.Add(purple); err != nil { + if err := graph.Add(context.Background(), purple); err != nil { t.Fatal(err) } @@ -1300,9 +1276,9 @@ func RejectingDependencyTest(t *testing.T, factory Factory) { t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) } - gp := ids.Bag{} + gp := bag.Bag[ids.ID]{} gp.Add(Green.ID(), purple.ID()) - if updated, err := graph.RecordPoll(gp); err != nil { + if updated, err := graph.RecordPoll(context.Background(), gp); err != nil { t.Fatal(err) } else if !updated { t.Fatalf("Should have updated the frontiers") @@ -1326,7 +1302,7 @@ func RejectingDependencyTest(t *testing.T, factory Factory) { t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) } - if updated, err := graph.RecordPoll(gp); err != nil { + if updated, err := graph.RecordPoll(context.Background(), gp); err != nil { t.Fatal(err) } else if !updated { t.Fatalf("Should have updated the frontiers") @@ -1347,6 +1323,74 @@ func RejectingDependencyTest(t *testing.T, factory Factory) { } } +func RejectMultipleTimesTest(t *testing.T, factory Factory) { + require := require.New(t) + + purple := &TestTx{ + TestDecidable: choices.TestDecidable{ + IDV: ids.Empty.Prefix(7), + StatusV: choices.Processing, + }, + DependenciesV: []Tx{Green}, + InputIDsV: []ids.ID{ids.Empty.Prefix(8)}, + } + yellow := &TestTx{ + TestDecidable: choices.TestDecidable{ + IDV: ids.Empty.Prefix(9), + StatusV: choices.Processing, + }, + InputIDsV: []ids.ID{ids.Empty.Prefix(8)}, + } + + graph := factory.New() + + params := sbcon.Parameters{ + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + } + require.NoError(graph.Initialize(snow.DefaultConsensusContextTest(), params)) + require.NoError(graph.Add(context.Background(), Red)) + require.NoError(graph.Add(context.Background(), yellow)) + require.NoError(graph.Add(context.Background(), Green)) + require.NoError(graph.Add(context.Background(), purple)) + + prefs := graph.Preferences() + require.Len(prefs, 2) + require.Contains(prefs, Red.ID()) + require.Contains(prefs, yellow.ID()) + + y := bag.Bag[ids.ID]{} + y.Add(yellow.ID()) + + updated, err := graph.RecordPoll(context.Background(), y) + require.NoError(err) + require.True(updated) + require.Equal(choices.Processing, Red.Status()) + require.Equal(choices.Accepted, yellow.Status()) + require.Equal(choices.Processing, Green.Status()) + require.Equal(choices.Rejected, purple.Status()) + + r := bag.Bag[ids.ID]{} + r.Add(Red.ID()) + + // Accepting Red rejects Green which was a dependency of purple. This + // results in purple being rejected for a second time. + updated, err = graph.RecordPoll(context.Background(), r) + require.NoError(err) + require.True(updated) + require.True(graph.Finalized()) + require.Equal(choices.Accepted, Red.Status()) + require.Equal(choices.Accepted, yellow.Status()) + require.Equal(choices.Rejected, Green.Status()) + require.Equal(choices.Rejected, purple.Status()) +} + func VacuouslyAcceptedTest(t *testing.T, factory Factory) { graph := factory.New() @@ -1370,7 +1414,7 @@ func VacuouslyAcceptedTest(t *testing.T, factory Factory) { t.Fatal(err) } - if err := graph.Add(purple); err != nil { + if err := graph.Add(context.Background(), purple); err != nil { t.Fatal(err) } else if prefs := graph.Preferences(); prefs.Len() != 0 { t.Fatalf("Wrong number of preferences.") @@ -1415,13 +1459,13 @@ func ConflictsTest(t *testing.T, factory Factory) { InputIDsV: []ids.ID{conflictInputID}, } - if err := graph.Add(purple); err != nil { + if err := graph.Add(context.Background(), purple); err != nil { t.Fatal(err) } else if orangeConflicts := graph.Conflicts(orange); orangeConflicts.Len() != 1 { t.Fatalf("Wrong number of conflicts") } else if !orangeConflicts.Contains(purple.IDV) { t.Fatalf("Conflicts does not contain the right transaction") - } else if err := graph.Add(orange); err != nil { + } else if err := graph.Add(context.Background(), orange); err != nil { t.Fatal(err) } else if orangeConflicts := graph.Conflicts(orange); orangeConflicts.Len() != 1 { t.Fatalf("Wrong number of conflicts") @@ -1472,18 +1516,18 @@ func VirtuousDependsOnRogueTest(t *testing.T, factory Factory) { virtuous.InputIDsV = append(virtuous.InputIDsV, input2) - if err := graph.Add(rogue1); err != nil { + if err := graph.Add(context.Background(), rogue1); err != nil { t.Fatal(err) - } else if err := graph.Add(rogue2); err != nil { + } else if err := graph.Add(context.Background(), rogue2); err != nil { t.Fatal(err) - } else if err := graph.Add(virtuous); err != nil { + } else if err := graph.Add(context.Background(), virtuous); err != nil { t.Fatal(err) } - votes := ids.Bag{} + votes := bag.Bag[ids.ID]{} votes.Add(rogue1.ID()) votes.Add(virtuous.ID()) - if updated, err := graph.RecordPoll(votes); err != nil { + if updated, err := graph.RecordPoll(context.Background(), votes); err != nil { t.Fatal(err) } else if updated { t.Fatalf("Shouldn't have updated the frontiers") @@ -1503,7 +1547,7 @@ func ErrorOnVacuouslyAcceptedTest(t *testing.T, factory Factory) { purple := &TestTx{TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(7), - AcceptV: errors.New(""), + AcceptV: errTest, StatusV: choices.Processing, }} @@ -1522,7 +1566,7 @@ func ErrorOnVacuouslyAcceptedTest(t *testing.T, factory Factory) { t.Fatal(err) } - if err := graph.Add(purple); err == nil { + if err := graph.Add(context.Background(), purple); err == nil { t.Fatalf("Should have errored on acceptance") } } @@ -1532,7 +1576,7 @@ func ErrorOnAcceptedTest(t *testing.T, factory Factory) { purple := &TestTx{TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(7), - AcceptV: errors.New(""), + AcceptV: errTest, StatusV: choices.Processing, }} purple.InputIDsV = append(purple.InputIDsV, ids.Empty.Prefix(4)) @@ -1552,13 +1596,13 @@ func ErrorOnAcceptedTest(t *testing.T, factory Factory) { t.Fatal(err) } - if err := graph.Add(purple); err != nil { + if err := graph.Add(context.Background(), purple); err != nil { t.Fatal(err) } - votes := ids.Bag{} + votes := bag.Bag[ids.ID]{} votes.Add(purple.ID()) - if _, err := graph.RecordPoll(votes); err == nil { + if _, err := graph.RecordPoll(context.Background(), votes); err == nil { t.Fatalf("Should have errored on accepting an invalid tx") } } @@ -1566,20 +1610,20 @@ func ErrorOnAcceptedTest(t *testing.T, factory Factory) { func ErrorOnRejectingLowerConfidenceConflictTest(t *testing.T, factory Factory) { graph := factory.New() - X := ids.Empty.Prefix(4) + x := ids.Empty.Prefix(4) purple := &TestTx{TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(7), StatusV: choices.Processing, }} - purple.InputIDsV = append(purple.InputIDsV, X) + purple.InputIDsV = append(purple.InputIDsV, x) pink := &TestTx{TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(8), - RejectV: errors.New(""), + RejectV: errTest, StatusV: choices.Processing, }} - pink.InputIDsV = append(pink.InputIDsV, X) + pink.InputIDsV = append(pink.InputIDsV, x) params := sbcon.Parameters{ K: 1, @@ -1596,15 +1640,15 @@ func ErrorOnRejectingLowerConfidenceConflictTest(t *testing.T, factory Factory) t.Fatal(err) } - if err := graph.Add(purple); err != nil { + if err := graph.Add(context.Background(), purple); err != nil { t.Fatal(err) - } else if err := graph.Add(pink); err != nil { + } else if err := graph.Add(context.Background(), pink); err != nil { t.Fatal(err) } - votes := ids.Bag{} + votes := bag.Bag[ids.ID]{} votes.Add(purple.ID()) - if _, err := graph.RecordPoll(votes); err == nil { + if _, err := graph.RecordPoll(context.Background(), votes); err == nil { t.Fatalf("Should have errored on rejecting an invalid tx") } } @@ -1612,20 +1656,20 @@ func ErrorOnRejectingLowerConfidenceConflictTest(t *testing.T, factory Factory) func ErrorOnRejectingHigherConfidenceConflictTest(t *testing.T, factory Factory) { graph := factory.New() - X := ids.Empty.Prefix(4) + x := ids.Empty.Prefix(4) purple := &TestTx{TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(7), StatusV: choices.Processing, }} - purple.InputIDsV = append(purple.InputIDsV, X) + purple.InputIDsV = append(purple.InputIDsV, x) pink := &TestTx{TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(8), - RejectV: errors.New(""), + RejectV: errTest, StatusV: choices.Processing, }} - pink.InputIDsV = append(pink.InputIDsV, X) + pink.InputIDsV = append(pink.InputIDsV, x) params := sbcon.Parameters{ K: 1, @@ -1642,15 +1686,15 @@ func ErrorOnRejectingHigherConfidenceConflictTest(t *testing.T, factory Factory) t.Fatal(err) } - if err := graph.Add(pink); err != nil { + if err := graph.Add(context.Background(), pink); err != nil { t.Fatal(err) - } else if err := graph.Add(purple); err != nil { + } else if err := graph.Add(context.Background(), purple); err != nil { t.Fatal(err) } - votes := ids.Bag{} + votes := bag.Bag[ids.ID]{} votes.Add(purple.ID()) - if _, err := graph.RecordPoll(votes); err == nil { + if _, err := graph.RecordPoll(context.Background(), votes); err == nil { t.Fatalf("Should have errored on rejecting an invalid tx") } } @@ -1671,31 +1715,31 @@ func UTXOCleanupTest(t *testing.T, factory Factory) { err := graph.Initialize(snow.DefaultConsensusContextTest(), params) require.NoError(t, err) - err = graph.Add(Red) + err = graph.Add(context.Background(), Red) require.NoError(t, err) - err = graph.Add(Green) + err = graph.Add(context.Background(), Green) require.NoError(t, err) - redVotes := ids.Bag{} + redVotes := bag.Bag[ids.ID]{} redVotes.Add(Red.ID()) - changed, err := graph.RecordPoll(redVotes) + changed, err := graph.RecordPoll(context.Background(), redVotes) require.NoError(t, err) require.False(t, changed, "shouldn't have accepted the red tx") - changed, err = graph.RecordPoll(redVotes) + changed, err = graph.RecordPoll(context.Background(), redVotes) require.NoError(t, err) require.True(t, changed, "should have accepted the red tx") require.Equal(t, choices.Accepted, Red.Status()) require.Equal(t, choices.Rejected, Green.Status()) - err = graph.Add(Blue) + err = graph.Add(context.Background(), Blue) require.NoError(t, err) - blueVotes := ids.Bag{} + blueVotes := bag.Bag[ids.ID]{} blueVotes.Add(Blue.ID()) - changed, err = graph.RecordPoll(blueVotes) + changed, err = graph.RecordPoll(context.Background(), blueVotes) require.NoError(t, err) require.True(t, changed, "should have accepted the blue tx") @@ -1718,13 +1762,13 @@ func RemoveVirtuousTest(t *testing.T, factory Factory) { err := graph.Initialize(snow.DefaultConsensusContextTest(), params) require.NoError(t, err) - err = graph.Add(Red) + err = graph.Add(context.Background(), Red) require.NoError(t, err) virtuous := graph.Virtuous() require.NotEmpty(t, virtuous, "a virtuous transaction was added but not tracked") - err = graph.Remove(Red.ID()) + err = graph.Remove(context.Background(), Red.ID()) require.NoError(t, err) virtuous = graph.Virtuous() @@ -1749,16 +1793,16 @@ func StringTest(t *testing.T, factory Factory, prefix string) { t.Fatal(err) } - if err := graph.Add(Red); err != nil { + if err := graph.Add(context.Background(), Red); err != nil { t.Fatal(err) } - if err := graph.Add(Green); err != nil { + if err := graph.Add(context.Background(), Green); err != nil { t.Fatal(err) } - if err := graph.Add(Blue); err != nil { + if err := graph.Add(context.Background(), Blue); err != nil { t.Fatal(err) } - if err := graph.Add(Alpha); err != nil { + if err := graph.Add(context.Background(), Alpha); err != nil { t.Fatal(err) } @@ -1772,15 +1816,15 @@ func StringTest(t *testing.T, factory Factory, prefix string) { t.Fatalf("Finalized too early") } - rb := ids.Bag{} + rb := bag.Bag[ids.ID]{} rb.SetThreshold(2) rb.AddCount(Red.ID(), 2) rb.AddCount(Blue.ID(), 2) - if changed, err := graph.RecordPoll(rb); err != nil { + if changed, err := graph.RecordPoll(context.Background(), rb); err != nil { t.Fatal(err) } else if !changed { t.Fatalf("Should have caused the frontiers to recalculate") - } else if err := graph.Add(Blue); err != nil { + } else if err := graph.Add(context.Background(), Blue); err != nil { t.Fatal(err) } @@ -1808,11 +1852,11 @@ func StringTest(t *testing.T, factory Factory, prefix string) { t.Fatalf("Finalized too early") } - ga := ids.Bag{} + ga := bag.Bag[ids.ID]{} ga.SetThreshold(2) ga.AddCount(Green.ID(), 2) ga.AddCount(Alpha.ID(), 2) - if changed, err := graph.RecordPoll(ga); err != nil { + if changed, err := graph.RecordPoll(context.Background(), ga); err != nil { t.Fatal(err) } else if changed { t.Fatalf("Shouldn't have caused the frontiers to recalculate") @@ -1842,8 +1886,8 @@ func StringTest(t *testing.T, factory Factory, prefix string) { t.Fatalf("Finalized too early") } - empty := ids.Bag{} - if changed, err := graph.RecordPoll(empty); err != nil { + empty := bag.Bag[ids.ID]{} + if changed, err := graph.RecordPoll(context.Background(), empty); err != nil { t.Fatal(err) } else if changed { t.Fatalf("Shouldn't have caused the frontiers to recalculate") @@ -1873,7 +1917,7 @@ func StringTest(t *testing.T, factory Factory, prefix string) { t.Fatalf("Finalized too early") } - if changed, err := graph.RecordPoll(ga); err != nil { + if changed, err := graph.RecordPoll(context.Background(), ga); err != nil { t.Fatal(err) } else if !changed { t.Fatalf("Should have caused the frontiers to recalculate") @@ -1903,7 +1947,7 @@ func StringTest(t *testing.T, factory Factory, prefix string) { t.Fatalf("Finalized too early") } - if changed, err := graph.RecordPoll(ga); err != nil { + if changed, err := graph.RecordPoll(context.Background(), ga); err != nil { t.Fatal(err) } else if !changed { t.Fatalf("Should have caused the frontiers to recalculate") @@ -1932,7 +1976,7 @@ func StringTest(t *testing.T, factory Factory, prefix string) { t.Fatalf("%s should have been rejected", Blue.ID()) } - if changed, err := graph.RecordPoll(rb); err != nil { + if changed, err := graph.RecordPoll(context.Background(), rb); err != nil { t.Fatal(err) } else if changed { t.Fatalf("Shouldn't have caused the frontiers to recalculate") diff --git a/avalanchego/snow/consensus/snowstorm/directed.go b/avalanchego/snow/consensus/snowstorm/directed.go index 658f22d4..b88aea68 100644 --- a/avalanchego/snow/consensus/snowstorm/directed.go +++ b/avalanchego/snow/consensus/snowstorm/directed.go @@ -1,10 +1,12 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowstorm import ( + "context" "fmt" + "strings" "github.com/prometheus/client_golang/prometheus" @@ -15,20 +17,24 @@ import ( "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/metrics" "github.com/ava-labs/avalanchego/snow/events" + "github.com/ava-labs/avalanchego/utils/bag" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" sbcon "github.com/ava-labs/avalanchego/snow/consensus/snowball" ) var ( - _ Factory = &DirectedFactory{} - _ Consensus = &Directed{} + _ Factory = (*DirectedFactory)(nil) + _ Consensus = (*Directed)(nil) ) // DirectedFactory implements Factory by returning a directed struct type DirectedFactory struct{} -func (DirectedFactory) New() Consensus { return &Directed{} } +func (DirectedFactory) New() Consensus { + return &Directed{} +} // Directed is an implementation of a multi-color, non-transitive, snowball // instance @@ -46,13 +52,13 @@ type Directed struct { params sbcon.Parameters // each element of preferences is the ID of a transaction that is preferred - preferences ids.Set + preferences set.Set[ids.ID] // each element of virtuous is the ID of a transaction that is virtuous - virtuous ids.Set + virtuous set.Set[ids.ID] // each element is in the virtuous set and is still being voted on - virtuousVoting ids.Set + virtuousVoting set.Set[ids.ID] // number of times RecordPoll has been called pollNumber uint64 @@ -72,10 +78,10 @@ type Directed struct { // Key: UTXO ID // Value: IDs of transactions that consume the UTXO specified in the key - utxos map[ids.ID]ids.Set + utxos map[ids.ID]set.Set[ids.ID] // map transaction ID to the set of whitelisted transaction IDs. - whitelists map[ids.ID]ids.Set + whitelists map[ids.ID]set.Set[ids.ID] } type directedTx struct { @@ -87,11 +93,11 @@ type directedTx struct { // ins is the set of txIDs that this tx conflicts with that are less // preferred than this tx - ins ids.Set + ins set.Set[ids.ID] // outs is the set of txIDs that this tx conflicts with that are more // preferred than this tx - outs ids.Set + outs set.Set[ids.ID] // tx is the actual transaction this node represents tx Tx @@ -105,17 +111,17 @@ func (dg *Directed) Initialize( dg.params = params var err error - dg.Polls, err = metrics.NewPolls("", ctx.Registerer) + dg.Polls, err = metrics.NewPolls("", ctx.AvalancheRegisterer) if err != nil { return fmt.Errorf("failed to create poll metrics: %w", err) } - dg.Latency, err = metrics.NewLatency("txs", "transaction(s)", ctx.Log, "", ctx.Registerer) + dg.Latency, err = metrics.NewLatency("txs", "transaction(s)", ctx.Log, "", ctx.AvalancheRegisterer) if err != nil { return fmt.Errorf("failed to create latency metrics: %w", err) } - dg.whitelistTxLatency, err = metrics.NewLatency("whitelist_tx", "whitelist transaction(s)", ctx.Log, "", ctx.Registerer) + dg.whitelistTxLatency, err = metrics.NewLatency("whitelist_tx", "whitelist transaction(s)", ctx.Log, "", ctx.AvalancheRegisterer) if err != nil { return fmt.Errorf("failed to create whitelist tx metrics: %w", err) } @@ -124,7 +130,7 @@ func (dg *Directed) Initialize( Name: "virtuous_tx_processing", Help: "Number of currently processing virtuous transaction(s)", }) - err = ctx.Registerer.Register(dg.numVirtuousTxs) + err = ctx.AvalancheRegisterer.Register(dg.numVirtuousTxs) if err != nil { return fmt.Errorf("failed to create virtuous tx metrics: %w", err) } @@ -133,25 +139,29 @@ func (dg *Directed) Initialize( Name: "rogue_tx_processing", Help: "Number of currently processing rogue transaction(s)", }) - err = ctx.Registerer.Register(dg.numRogueTxs) + err = ctx.AvalancheRegisterer.Register(dg.numRogueTxs) if err != nil { return fmt.Errorf("failed to create rogue tx metrics: %w", err) } dg.txs = make(map[ids.ID]*directedTx) - dg.utxos = make(map[ids.ID]ids.Set) - dg.whitelists = make(map[ids.ID]ids.Set) + dg.utxos = make(map[ids.ID]set.Set[ids.ID]) + dg.whitelists = make(map[ids.ID]set.Set[ids.ID]) return params.Verify() } -func (dg *Directed) Parameters() sbcon.Parameters { return dg.params } - -func (dg *Directed) Virtuous() ids.Set { return dg.virtuous } +func (dg *Directed) Virtuous() set.Set[ids.ID] { + return dg.virtuous +} -func (dg *Directed) Preferences() ids.Set { return dg.preferences } +func (dg *Directed) Preferences() set.Set[ids.ID] { + return dg.preferences +} -func (dg *Directed) VirtuousVoting() ids.Set { return dg.virtuousVoting } +func (dg *Directed) VirtuousVoting() set.Set[ids.ID] { + return dg.virtuousVoting +} func (dg *Directed) Quiesce() bool { numVirtuous := dg.virtuousVoting.Len() @@ -170,15 +180,29 @@ func (dg *Directed) Finalized() bool { } // HealthCheck returns information about the consensus health. -func (dg *Directed) HealthCheck() (interface{}, error) { +func (dg *Directed) HealthCheck(context.Context) (interface{}, error) { numOutstandingTxs := dg.Latency.NumProcessing() isOutstandingTxs := numOutstandingTxs <= dg.params.MaxOutstandingItems + healthy := isOutstandingTxs details := map[string]interface{}{ "outstandingTransactions": numOutstandingTxs, } - if !isOutstandingTxs { - errorReason := fmt.Sprintf("number of outstanding txs %d > %d", numOutstandingTxs, dg.params.MaxOutstandingItems) - return details, fmt.Errorf("snowstorm consensus is not healthy reason: %s", errorReason) + + // check for long running transactions + oldestProcessingDuration := dg.Latency.MeasureAndGetOldestDuration() + processingTimeOK := oldestProcessingDuration <= dg.params.MaxItemProcessingTime + healthy = healthy && processingTimeOK + details["longestRunningTransaction"] = oldestProcessingDuration.String() + + if !healthy { + var errorReasons []string + if !isOutstandingTxs { + errorReasons = append(errorReasons, fmt.Sprintf("number outstanding transactions %d > %d", numOutstandingTxs, dg.params.MaxOutstandingItems)) + } + if !processingTimeOK { + errorReasons = append(errorReasons, fmt.Sprintf("transaction processing time %s > %s", oldestProcessingDuration, dg.params.MaxItemProcessingTime)) + } + return details, fmt.Errorf("snowstorm consensus is not healthy reason: %s", strings.Join(errorReasons, ", ")) } return details, nil } @@ -186,7 +210,7 @@ func (dg *Directed) HealthCheck() (interface{}, error) { // shouldVote returns if the provided tx should be voted on to determine if it // can be accepted. If the tx can be vacuously accepted, the tx will be accepted // and will therefore not be valid to be voted on. -func (dg *Directed) shouldVote(tx Tx) (bool, error) { +func (dg *Directed) shouldVote(ctx context.Context, tx Tx) (bool, error) { if dg.Issued(tx) { // If the tx was previously inserted, it shouldn't be re-inserted. return false, nil @@ -215,20 +239,22 @@ func (dg *Directed) shouldVote(tx Tx) (bool, error) { // Notify those listening for accepted txs if the transaction has a binary // format. - if bytes := tx.Bytes(); len(bytes) > 0 { - // Note that DecisionAcceptor.Accept must be called before tx.Accept to - // honor Acceptor.Accept's invariant. - if err := dg.ctx.DecisionAcceptor.Accept(dg.ctx, txID, bytes); err != nil { + txBytes := tx.Bytes() + txBytesLen := len(txBytes) + if txBytesLen > 0 { + // Note that TxAcceptor.Accept must be called before tx.Accept to honor + // Acceptor.Accept's invariant. + if err := dg.ctx.TxAcceptor.Accept(dg.ctx, txID, txBytes); err != nil { return false, err } } - if err := tx.Accept(); err != nil { + if err := tx.Accept(ctx); err != nil { return false, err } // Notify the metrics that this transaction was accepted. - dg.Latency.Accepted(txID, dg.pollNumber) + dg.Latency.Accepted(txID, dg.pollNumber, txBytesLen) return false, nil } @@ -254,8 +280,8 @@ func (dg *Directed) IsVirtuous(tx Tx) bool { return true } -func (dg *Directed) Conflicts(tx Tx) ids.Set { - var conflicts ids.Set +func (dg *Directed) Conflicts(tx Tx) set.Set[ids.ID] { + var conflicts set.Set[ids.ID] if node, exists := dg.txs[tx.ID()]; exists { // If the tx is currently processing, the conflicting txs are just the // union of the inbound conflicts and the outbound conflicts. @@ -278,8 +304,8 @@ func (dg *Directed) Conflicts(tx Tx) ids.Set { return conflicts } -func (dg *Directed) Add(tx Tx) error { - if shouldVote, err := dg.shouldVote(tx); !shouldVote || err != nil { +func (dg *Directed) Add(ctx context.Context, tx Tx) error { + if shouldVote, err := dg.shouldVote(ctx, tx); !shouldVote || err != nil { return err } @@ -290,7 +316,11 @@ func (dg *Directed) Add(tx Tx) error { for otherID, otherWhitelist := range dg.whitelists { // [txID] is not whitelisted by [otherWhitelist] if !otherWhitelist.Contains(txID) { - otherNode := dg.txs[otherID] + otherNode, exists := dg.txs[otherID] + if !exists { + // This is not expected to happen. + return fmt.Errorf("whitelist tx %s is not in the graph", otherID) + } // The [otherNode] should be preferred over [txNode] because a newly // issued transaction's confidence is always 0 and ties are broken @@ -299,7 +329,7 @@ func (dg *Directed) Add(tx Tx) error { } } if tx.HasWhitelist() { - whitelist, err := tx.Whitelist() + whitelist, err := tx.Whitelist(ctx) if err != nil { return err } @@ -335,7 +365,11 @@ func (dg *Directed) Add(tx Tx) error { // Update txs conflicting with tx to account for its issuance for conflictIDKey := range spenders { // Get the node that contains this conflicting tx - conflict := dg.txs[conflictIDKey] + conflict, exists := dg.txs[conflictIDKey] + if !exists { + // This is not expected to happen. + return fmt.Errorf("spender tx %s is not in the graph", conflictIDKey) + } // Add all the txs that spend this UTXO to this txs conflicts. These // conflicting txs must be preferred over this tx. We know this @@ -367,7 +401,7 @@ func (dg *Directed) Add(tx Tx) error { // If a tx that this tx depends on is rejected, this tx should also be // rejected. - err := dg.registerRejector(tx) + err := dg.registerRejector(ctx, tx) numVirtuous := dg.virtuous.Len() dg.numVirtuousTxs.Set(float64(numVirtuous)) @@ -402,8 +436,8 @@ func (dg *Directed) addEdge(src, dst *directedTx) { dst.ins.Add(srcID) } -func (dg *Directed) Remove(txID ids.ID) error { - err := dg.reject(ids.Set{ +func (dg *Directed) Remove(ctx context.Context, txID ids.ID) error { + err := dg.reject(ctx, set.Set[ids.ID]{ txID: struct{}{}, }) @@ -425,7 +459,7 @@ func (dg *Directed) Issued(tx Tx) bool { return ok } -func (dg *Directed) RecordPoll(votes ids.Bag) (bool, error) { +func (dg *Directed) RecordPoll(ctx context.Context, votes bag.Bag[ids.ID]) (bool, error) { // Increase the vote ID. This is only updated here and is used to reset the // confidence values of transactions lazily. // This is also used to track the number of polls required to accept/reject @@ -461,7 +495,7 @@ func (dg *Directed) RecordPoll(votes ids.Bag) (bool, error) { // registered once. txNode.pendingAccept = true - if err := dg.registerAcceptor(txNode.tx); err != nil { + if err := dg.registerAcceptor(ctx, txNode.tx); err != nil { return false, err } if dg.errs.Errored() { @@ -472,7 +506,11 @@ func (dg *Directed) RecordPoll(votes ids.Bag) (bool, error) { if txNode.tx.Status() != choices.Accepted { // If this tx wasn't accepted, then this instance is only changed if // preferences changed. - changed = dg.redirectEdges(txNode) || changed + edgeChanged, err := dg.redirectEdges(txNode) + if err != nil { + return false, err + } + changed = edgeChanged || changed } else { // By accepting a tx, the state of this instance has changed. changed = true @@ -506,8 +544,12 @@ func (dg *Directed) String() string { } // accept the named txID and remove it from the graph -func (dg *Directed) accept(txID ids.ID) error { - txNode := dg.txs[txID] +func (dg *Directed) accept(ctx context.Context, txID ids.ID) error { + txNode, exists := dg.txs[txID] + if !exists { + // This is not expected to happen. + return fmt.Errorf("accepted tx %s is not in the graph", txID) + } // We are accepting the tx, so we should remove the node from the graph. delete(dg.txs, txID) delete(dg.whitelists, txID) @@ -524,21 +566,29 @@ func (dg *Directed) accept(txID ids.ID) error { dg.preferences.Remove(txID) // Reject all the txs that conflicted with this tx. - if err := dg.reject(txNode.ins); err != nil { + if err := dg.reject(ctx, txNode.ins); err != nil { return err } // While it is typically true that a tx that is being accepted is preferred, // it is possible for this to not be the case. - if err := dg.reject(txNode.outs); err != nil { + if err := dg.reject(ctx, txNode.outs); err != nil { return err } - return dg.acceptTx(txNode.tx) + return dg.acceptTx(ctx, txNode.tx) } // reject all the named txIDs and remove them from the graph -func (dg *Directed) reject(conflictIDs ids.Set) error { +func (dg *Directed) reject(ctx context.Context, conflictIDs set.Set[ids.ID]) error { for conflictKey := range conflictIDs { - conflict := dg.txs[conflictKey] + conflict, exists := dg.txs[conflictKey] + if !exists { + // Transaction dependencies are cleaned up when the dependency is + // either accepted or rejected. However, a transaction may have + // already been rejected due to a conflict of its own. In this case, + // the transaction has already been cleaned up from memory and there + // is nothing more to be done. + continue + } // This tx is no longer an option for consuming the UTXOs from its // inputs, so we should remove their reference to this tx. for _, inputID := range conflict.tx.InputIDs() { @@ -576,7 +626,7 @@ func (dg *Directed) reject(conflictIDs ids.Set) error { dg.removeConflict(conflictKey, conflict.ins) dg.removeConflict(conflictKey, conflict.outs) - if err := dg.rejectTx(conflict.tx); err != nil { + if err := dg.rejectTx(ctx, conflict.tx); err != nil { return err } } @@ -585,12 +635,16 @@ func (dg *Directed) reject(conflictIDs ids.Set) error { // redirectEdges attempts to turn outbound edges into inbound edges if the // preferences have changed -func (dg *Directed) redirectEdges(tx *directedTx) bool { +func (dg *Directed) redirectEdges(tx *directedTx) (bool, error) { changed := false for conflictID := range tx.outs { - changed = dg.redirectEdge(tx, conflictID) || changed + edgeChanged, err := dg.redirectEdge(tx, conflictID) + if err != nil { + return false, err + } + changed = edgeChanged || changed } - return changed + return changed, nil } // Fixes the direction of the edge between [txNode] and [conflictID] if needed. @@ -601,10 +655,15 @@ func (dg *Directed) redirectEdges(tx *directedTx) bool { // edge will be set to [conflictID] -> [txNode]. // // Returns true if the direction was switched. -func (dg *Directed) redirectEdge(txNode *directedTx, conflictID ids.ID) bool { - conflict := dg.txs[conflictID] +func (dg *Directed) redirectEdge(txNode *directedTx, conflictID ids.ID) (bool, error) { + conflict, exists := dg.txs[conflictID] + if !exists { + // This is not expected to happen. + return false, fmt.Errorf("redirected tx %s is not in the graph", conflictID) + } + if txNode.numSuccessfulPolls <= conflict.numSuccessfulPolls { - return false + return false, nil } // Because this tx has a higher preference than the conflicting tx, we must @@ -623,10 +682,10 @@ func (dg *Directed) redirectEdge(txNode *directedTx, conflictID ids.ID) bool { // If this tx doesn't have any outbound edges, it's preferred dg.preferences.Add(nodeID) } - return true + return true, nil } -func (dg *Directed) removeConflict(txIDKey ids.ID, neighborIDs ids.Set) { +func (dg *Directed) removeConflict(txIDKey ids.ID, neighborIDs set.Set[ids.ID]) { for neighborID := range neighborIDs { neighbor, exists := dg.txs[neighborID] if !exists { @@ -648,7 +707,7 @@ func (dg *Directed) removeConflict(txIDKey ids.ID, neighborIDs ids.Set) { } // accept the provided tx. -func (dg *Directed) acceptTx(tx Tx) error { +func (dg *Directed) acceptTx(ctx context.Context, tx Tx) error { txID := tx.ID() dg.ctx.Log.Trace("accepting transaction", zap.Stringer("txID", txID), @@ -656,15 +715,17 @@ func (dg *Directed) acceptTx(tx Tx) error { // Notify those listening that this tx has been accepted if the transaction // has a binary format. - if bytes := tx.Bytes(); len(bytes) > 0 { - // Note that DecisionAcceptor.Accept must be called before tx.Accept to - // honor Acceptor.Accept's invariant. - if err := dg.ctx.DecisionAcceptor.Accept(dg.ctx, txID, bytes); err != nil { + txBytes := tx.Bytes() + txBytesLen := len(txBytes) + if txBytesLen > 0 { + // Note that TxAcceptor.Accept must be called before tx.Accept to honor + // Acceptor.Accept's invariant. + if err := dg.ctx.TxAcceptor.Accept(dg.ctx, txID, txBytes); err != nil { return err } } - if err := tx.Accept(); err != nil { + if err := tx.Accept(ctx); err != nil { return err } @@ -673,24 +734,24 @@ func (dg *Directed) acceptTx(tx Tx) error { dg.ctx.Log.Info("whitelist tx accepted", zap.Stringer("txID", txID), ) - dg.whitelistTxLatency.Accepted(txID, dg.pollNumber) + dg.whitelistTxLatency.Accepted(txID, dg.pollNumber, txBytesLen) } else { // just regular tx - dg.Latency.Accepted(txID, dg.pollNumber) + dg.Latency.Accepted(txID, dg.pollNumber, txBytesLen) } // If there is a tx that was accepted pending on this tx, the ancestor // should be notified that it doesn't need to block on this tx anymore. - dg.pendingAccept.Fulfill(txID) + dg.pendingAccept.Fulfill(ctx, txID) // If there is a tx that was issued pending on this tx, the ancestor tx // doesn't need to be rejected because of this tx. - dg.pendingReject.Abandon(txID) + dg.pendingReject.Abandon(ctx, txID) return nil } // reject the provided tx. -func (dg *Directed) rejectTx(tx Tx) error { +func (dg *Directed) rejectTx(ctx context.Context, tx Tx) error { txID := tx.ID() dg.ctx.Log.Trace("rejecting transaction", zap.String("reason", "conflicting acceptance"), @@ -699,7 +760,7 @@ func (dg *Directed) rejectTx(tx Tx) error { // Reject is called before notifying the IPC so that rejections that // cause fatal errors aren't sent to an IPC peer. - if err := tx.Reject(); err != nil { + if err := tx.Reject(ctx); err != nil { return err } @@ -708,24 +769,24 @@ func (dg *Directed) rejectTx(tx Tx) error { dg.ctx.Log.Info("whitelist tx rejected", zap.Stringer("txID", txID), ) - dg.whitelistTxLatency.Rejected(txID, dg.pollNumber) + dg.whitelistTxLatency.Rejected(txID, dg.pollNumber, len(tx.Bytes())) } else { - dg.Latency.Rejected(txID, dg.pollNumber) + dg.Latency.Rejected(txID, dg.pollNumber, len(tx.Bytes())) } // If there is a tx that was accepted pending on this tx, the ancestor tx // can't be accepted. - dg.pendingAccept.Abandon(txID) + dg.pendingAccept.Abandon(ctx, txID) // If there is a tx that was issued pending on this tx, the ancestor tx must // be rejected. - dg.pendingReject.Fulfill(txID) + dg.pendingReject.Fulfill(ctx, txID) return nil } // registerAcceptor attempts to accept this tx once all its dependencies are // accepted. If all the dependencies are already accepted, this function will // immediately accept the tx. -func (dg *Directed) registerAcceptor(tx Tx) error { +func (dg *Directed) registerAcceptor(ctx context.Context, tx Tx) error { txID := tx.ID() toAccept := &acceptor{ @@ -752,12 +813,12 @@ func (dg *Directed) registerAcceptor(tx Tx) error { // This ensures that virtuous txs built on top of rogue txs don't force the // node to treat the rogue tx as virtuous. dg.virtuousVoting.Remove(txID) - dg.pendingAccept.Register(toAccept) + dg.pendingAccept.Register(ctx, toAccept) return nil } // registerRejector rejects this tx if any of its dependencies are rejected. -func (dg *Directed) registerRejector(tx Tx) error { +func (dg *Directed) registerRejector(ctx context.Context, tx Tx) error { // If a tx that this tx depends on is rejected, this tx should also be // rejected. toReject := &rejector{ @@ -782,6 +843,6 @@ func (dg *Directed) registerRejector(tx Tx) error { } // Register these dependencies - dg.pendingReject.Register(toReject) + dg.pendingReject.Register(ctx, toReject) return nil } diff --git a/avalanchego/snow/consensus/snowstorm/directed_test.go b/avalanchego/snow/consensus/snowstorm/directed_test.go index 53acc121..b94f4f69 100644 --- a/avalanchego/snow/consensus/snowstorm/directed_test.go +++ b/avalanchego/snow/consensus/snowstorm/directed_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowstorm @@ -7,4 +7,6 @@ import ( "testing" ) -func TestDirectedConsensus(t *testing.T) { runConsensusTests(t, DirectedFactory{}, "DG") } +func TestDirectedConsensus(t *testing.T) { + runConsensusTests(t, DirectedFactory{}, "DG") +} diff --git a/avalanchego/snow/consensus/snowstorm/factory.go b/avalanchego/snow/consensus/snowstorm/factory.go index 6d70e181..5dfe91b0 100644 --- a/avalanchego/snow/consensus/snowstorm/factory.go +++ b/avalanchego/snow/consensus/snowstorm/factory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowstorm diff --git a/avalanchego/snow/consensus/snowstorm/network_test.go b/avalanchego/snow/consensus/snowstorm/network_test.go index 248721ae..5ac31a00 100644 --- a/avalanchego/snow/consensus/snowstorm/network_test.go +++ b/avalanchego/snow/consensus/snowstorm/network_test.go @@ -1,12 +1,15 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowstorm import ( + "context" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/utils/bag" "github.com/ava-labs/avalanchego/utils/sampler" sbcon "github.com/ava-labs/avalanchego/snow/consensus/snowball" @@ -109,7 +112,7 @@ func (n *Network) AddNode(cg Consensus) error { } txs[newTx.ID()] = newTx - if err := cg.Add(newTx); err != nil { + if err := cg.Add(context.Background(), newTx); err != nil { return err } } @@ -138,7 +141,7 @@ func (n *Network) Round() error { _ = s.Initialize(uint64(len(n.nodes))) indices, _ := s.Sample(n.params.K) - sampledColors := ids.Bag{} + sampledColors := bag.Bag[ids.ID]{} sampledColors.SetThreshold(n.params.Alpha) for _, index := range indices { peer := n.nodes[int(index)] @@ -155,7 +158,7 @@ func (n *Network) Round() error { } } - if _, err := running.RecordPoll(sampledColors); err != nil { + if _, err := running.RecordPoll(context.Background(), sampledColors); err != nil { return err } diff --git a/avalanchego/snow/consensus/snowstorm/rejector.go b/avalanchego/snow/consensus/snowstorm/rejector.go index 99dc0379..a7e03ceb 100644 --- a/avalanchego/snow/consensus/snowstorm/rejector.go +++ b/avalanchego/snow/consensus/snowstorm/rejector.go @@ -1,35 +1,41 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowstorm import ( + "context" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/events" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" ) -var _ events.Blockable = &rejector{} +var _ events.Blockable = (*rejector)(nil) type rejector struct { g *Directed errs *wrappers.Errs - deps ids.Set + deps set.Set[ids.ID] rejected bool // true if the tx has been rejected txID ids.ID } -func (r *rejector) Dependencies() ids.Set { return r.deps } +func (r *rejector) Dependencies() set.Set[ids.ID] { + return r.deps +} -func (r *rejector) Fulfill(ids.ID) { +func (r *rejector) Fulfill(ctx context.Context, _ ids.ID) { if r.rejected || r.errs.Errored() { return } r.rejected = true - asSet := ids.NewSet(1) + asSet := set.NewSet[ids.ID](1) asSet.Add(r.txID) - r.errs.Add(r.g.reject(asSet)) + r.errs.Add(r.g.reject(ctx, asSet)) } -func (*rejector) Abandon(ids.ID) {} -func (*rejector) Update() {} +func (*rejector) Abandon(context.Context, ids.ID) {} + +func (*rejector) Update(context.Context) {} diff --git a/avalanchego/snow/consensus/snowstorm/snowball.go b/avalanchego/snow/consensus/snowstorm/snowball.go index 76c720be..e7fa9d09 100644 --- a/avalanchego/snow/consensus/snowstorm/snowball.go +++ b/avalanchego/snow/consensus/snowstorm/snowball.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowstorm diff --git a/avalanchego/snow/consensus/snowstorm/stringer.go b/avalanchego/snow/consensus/snowstorm/stringer.go index 273d1880..b60162e4 100644 --- a/avalanchego/snow/consensus/snowstorm/stringer.go +++ b/avalanchego/snow/consensus/snowstorm/stringer.go @@ -1,18 +1,19 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowstorm import ( - "bytes" "fmt" - "sort" "strings" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/formatting" ) +var _ utils.Sortable[*snowballNode] = (*snowballNode)(nil) + type snowballNode struct { txID ids.ID numSuccessfulPolls int @@ -26,23 +27,15 @@ func (sb *snowballNode) String() string { sb.confidence) } -type sortSnowballNodeData []*snowballNode - -func (sb sortSnowballNodeData) Less(i, j int) bool { - return bytes.Compare(sb[i].txID[:], sb[j].txID[:]) == -1 -} -func (sb sortSnowballNodeData) Len() int { return len(sb) } -func (sb sortSnowballNodeData) Swap(i, j int) { sb[j], sb[i] = sb[i], sb[j] } - -func sortSnowballNodes(nodes []*snowballNode) { - sort.Sort(sortSnowballNodeData(nodes)) +func (sb *snowballNode) Less(other *snowballNode) bool { + return sb.txID.Less(other.txID) } // consensusString converts a list of snowball nodes into a human-readable // string. func consensusString(nodes []*snowballNode) string { // Sort the nodes so that the string representation is canonical - sortSnowballNodes(nodes) + utils.Sort(nodes) sb := strings.Builder{} sb.WriteString("DG(") diff --git a/avalanchego/snow/consensus/snowstorm/stringer_test.go b/avalanchego/snow/consensus/snowstorm/stringer_test.go new file mode 100644 index 00000000..44c7d2aa --- /dev/null +++ b/avalanchego/snow/consensus/snowstorm/stringer_test.go @@ -0,0 +1,52 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowstorm + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" +) + +func TestSnowballNodeLess(t *testing.T) { + require := require.New(t) + + node1 := &snowballNode{ + txID: ids.ID{}, + } + node2 := &snowballNode{ + txID: ids.ID{}, + } + require.False(node1.Less(node2)) + require.False(node2.Less(node1)) + + node1 = &snowballNode{ + txID: ids.ID{1}, + } + node2 = &snowballNode{ + txID: ids.ID{}, + } + require.False(node1.Less(node2)) + require.True(node2.Less(node1)) + + node1 = &snowballNode{ + txID: ids.ID{1}, + } + node2 = &snowballNode{ + txID: ids.ID{1}, + } + require.False(node1.Less(node2)) + require.False(node2.Less(node1)) + + node1 = &snowballNode{ + txID: ids.ID{1}, + } + node2 = &snowballNode{ + txID: ids.ID{1, 2}, + } + require.True(node1.Less(node2)) + require.False(node2.Less(node1)) +} diff --git a/avalanchego/snow/consensus/snowstorm/test_tx.go b/avalanchego/snow/consensus/snowstorm/test_tx.go index a62d99e5..477f7438 100644 --- a/avalanchego/snow/consensus/snowstorm/test_tx.go +++ b/avalanchego/snow/consensus/snowstorm/test_tx.go @@ -1,14 +1,17 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowstorm import ( + "context" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/utils/set" ) -var _ Tx = &TestTx{} +var _ Tx = (*TestTx)(nil) // TestTx is a useful test tx type TestTx struct { @@ -18,15 +21,32 @@ type TestTx struct { DependenciesErrV error InputIDsV []ids.ID HasWhitelistV bool - WhitelistV ids.Set + WhitelistV set.Set[ids.ID] WhitelistErrV error VerifyV error BytesV []byte } -func (t *TestTx) Dependencies() ([]Tx, error) { return t.DependenciesV, t.DependenciesErrV } -func (t *TestTx) InputIDs() []ids.ID { return t.InputIDsV } -func (t *TestTx) HasWhitelist() bool { return t.HasWhitelistV } -func (t *TestTx) Whitelist() (ids.Set, error) { return t.WhitelistV, t.WhitelistErrV } -func (t *TestTx) Verify() error { return t.VerifyV } -func (t *TestTx) Bytes() []byte { return t.BytesV } +func (t *TestTx) Dependencies() ([]Tx, error) { + return t.DependenciesV, t.DependenciesErrV +} + +func (t *TestTx) InputIDs() []ids.ID { + return t.InputIDsV +} + +func (t *TestTx) HasWhitelist() bool { + return t.HasWhitelistV +} + +func (t *TestTx) Whitelist(context.Context) (set.Set[ids.ID], error) { + return t.WhitelistV, t.WhitelistErrV +} + +func (t *TestTx) Verify(context.Context) error { + return t.VerifyV +} + +func (t *TestTx) Bytes() []byte { + return t.BytesV +} diff --git a/avalanchego/snow/consensus/snowstorm/tx.go b/avalanchego/snow/consensus/snowstorm/tx.go index 150b7911..5a31181c 100644 --- a/avalanchego/snow/consensus/snowstorm/tx.go +++ b/avalanchego/snow/consensus/snowstorm/tx.go @@ -1,11 +1,14 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowstorm import ( + "context" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/utils/set" ) // Whitelister defines the interface for specifying whitelisted operations. @@ -17,7 +20,7 @@ type Whitelister interface { // Whitelist returns the set of transaction IDs that are explicitly // whitelisted. Transactions that are not explicitly whitelisted are // considered conflicting. - Whitelist() (ids.Set, error) + Whitelist(context.Context) (set.Set[ids.ID], error) } // Tx consumes state. @@ -46,7 +49,7 @@ type Tx interface { // // It is guaranteed that when Verify is called, all the dependencies of // this transaction have already been successfully verified. - Verify() error + Verify(context.Context) error // Bytes returns the binary representation of this transaction. // diff --git a/avalanchego/snow/context.go b/avalanchego/snow/context.go index 311f5af6..c89c2dd0 100644 --- a/avalanchego/snow/context.go +++ b/avalanchego/snow/context.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snow import ( - "crypto" - "crypto/x509" "sync" "github.com/prometheus/client_golang/prometheus" @@ -16,13 +14,11 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/platformvm/warp" ) -type SubnetLookup interface { - SubnetID(chainID ids.ID) (ids.ID, error) -} - // ContextInitializable represents an object that can be initialized // given a *Context object type ContextInitializable interface { @@ -39,8 +35,10 @@ type Context struct { SubnetID ids.ID ChainID ids.ID NodeID ids.NodeID + PublicKey *bls.PublicKey XChainID ids.ID + CChainID ids.ID AVAXAssetID ids.ID Log logging.Logger @@ -48,13 +46,14 @@ type Context struct { Keystore keystore.BlockchainKeystore SharedMemory atomic.SharedMemory BCLookup ids.AliaserReader - SNLookup SubnetLookup Metrics metrics.OptionalGatherer + WarpSigner warp.Signer + // snowman++ attributes - ValidatorState validators.State // interface for P-Chain validators - StakingLeafSigner crypto.Signer // block signer - StakingCertLeaf *x509.Certificate // block certificate + ValidatorState validators.State // interface for P-Chain validators + // Chain-specific directory where arbitrary data can be written + ChainDataDir string } // Expose gatherer interface for unit testing. @@ -66,75 +65,64 @@ type Registerer interface { type ConsensusContext struct { *Context + // Registers all common and snowman consensus metrics. Unlike the avalanche + // consensus engine metrics, we do not prefix the name with the engine name, + // as snowman is used for all chains by default. Registerer Registerer + // Only used to register Avalanche consensus metrics. Previously, all + // metrics were prefixed with "avalanche_{chainID}_". Now we add avalanche + // to the prefix, "avalanche_{chainID}_avalanche_", to differentiate + // consensus operations after the DAG linearization. + AvalancheRegisterer Registerer - // DecisionAcceptor is the callback that will be fired whenever a VM is - // notified that their object, either a block in snowman or a transaction - // in avalanche, was accepted. - DecisionAcceptor Acceptor - - // ConsensusAcceptor is the callback that will be fired whenever a - // container, either a block in snowman or a vertex in avalanche, was - // accepted. - ConsensusAcceptor Acceptor + // BlockAcceptor is the callback that will be fired whenever a VM is + // notified that their block was accepted. + BlockAcceptor Acceptor - // Non-zero iff this chain bootstrapped. - state utils.AtomicInterface + // TxAcceptor is the callback that will be fired whenever a VM is notified + // that their transaction was accepted. + TxAcceptor Acceptor - // Non-zero iff this chain is executing transactions. - executing utils.AtomicBool - - // Indicates this chain is available to only validators. - validatorOnly utils.AtomicBool -} - -func (ctx *ConsensusContext) SetState(newState State) { - ctx.state.SetValue(newState) -} - -func (ctx *ConsensusContext) GetState() State { - stateInf := ctx.state.GetValue() - return stateInf.(State) -} - -// IsExecuting returns true iff this chain is still executing transactions. -func (ctx *ConsensusContext) IsExecuting() bool { - return ctx.executing.GetValue() -} + // VertexAcceptor is the callback that will be fired whenever a vertex was + // accepted. + VertexAcceptor Acceptor -// Executing marks this chain as executing or not. -// Set to "true" if there's an ongoing transaction. -func (ctx *ConsensusContext) Executing(b bool) { - ctx.executing.SetValue(b) -} + // State indicates the current state of this consensus instance. + State utils.Atomic[EngineState] -// IsValidatorOnly returns true iff this chain is available only to validators -func (ctx *ConsensusContext) IsValidatorOnly() bool { - return ctx.validatorOnly.GetValue() -} + // True iff this chain is executing transactions as part of bootstrapping. + Executing utils.Atomic[bool] -// SetValidatorOnly marks this chain as available only to validators -func (ctx *ConsensusContext) SetValidatorOnly() { - ctx.validatorOnly.SetValue(true) + // True iff this chain is currently state-syncing + StateSyncing utils.Atomic[bool] } func DefaultContextTest() *Context { + sk, err := bls.NewSecretKey() + if err != nil { + panic(err) + } + pk := bls.PublicFromSecretKey(sk) return &Context{ - NetworkID: 0, - SubnetID: ids.Empty, - ChainID: ids.Empty, - NodeID: ids.EmptyNodeID, - Log: logging.NoLog{}, - BCLookup: ids.NewAliaser(), - Metrics: metrics.NewOptionalGatherer(), + NetworkID: 0, + SubnetID: ids.Empty, + ChainID: ids.Empty, + NodeID: ids.EmptyNodeID, + PublicKey: pk, + Log: logging.NoLog{}, + BCLookup: ids.NewAliaser(), + Metrics: metrics.NewOptionalGatherer(), + ChainDataDir: "", } } func DefaultConsensusContextTest() *ConsensusContext { return &ConsensusContext{ - Context: DefaultContextTest(), - Registerer: prometheus.NewRegistry(), - DecisionAcceptor: noOpAcceptor{}, - ConsensusAcceptor: noOpAcceptor{}, + Context: DefaultContextTest(), + Registerer: prometheus.NewRegistry(), + AvalancheRegisterer: prometheus.NewRegistry(), + BlockAcceptor: noOpAcceptor{}, + TxAcceptor: noOpAcceptor{}, + VertexAcceptor: noOpAcceptor{}, } } diff --git a/avalanchego/snow/engine/avalanche/bootstrap/bootstrapper.go b/avalanchego/snow/engine/avalanche/bootstrap/bootstrapper.go index 65c2bb8a..1e222f8e 100644 --- a/avalanchego/snow/engine/avalanche/bootstrap/bootstrapper.go +++ b/avalanchego/snow/engine/avalanche/bootstrap/bootstrapper.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap import ( + "context" "errors" "fmt" "math" @@ -13,11 +14,13 @@ import ( "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/avalanche" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" ) @@ -34,12 +37,17 @@ const ( ) var ( - _ common.BootstrapableEngine = &bootstrapper{} + _ common.BootstrapableEngine = (*bootstrapper)(nil) errUnexpectedTimeout = errors.New("unexpected timeout fired") ) -func New(config Config, onFinished func(lastReqID uint32) error) (common.BootstrapableEngine, error) { +func New( + ctx context.Context, + config Config, + startAvalancheConsensus func(ctx context.Context, lastReqID uint32) error, + startSnowmanBootstrapping func(ctx context.Context, lastReqID uint32) error, +) (common.BootstrapableEngine, error) { b := &bootstrapper{ Config: config, @@ -48,18 +56,37 @@ func New(config Config, onFinished func(lastReqID uint32) error) (common.Bootstr PutHandler: common.NewNoOpPutHandler(config.Ctx.Log), QueryHandler: common.NewNoOpQueryHandler(config.Ctx.Log), ChitsHandler: common.NewNoOpChitsHandler(config.Ctx.Log), - AppHandler: common.NewNoOpAppHandler(config.Ctx.Log), + AppHandler: config.VM, - processedCache: &cache.LRU{Size: cacheSize}, - Fetcher: common.Fetcher{OnFinished: onFinished}, + processedCache: &cache.LRU[ids.ID, struct{}]{Size: cacheSize}, + Fetcher: common.Fetcher{ + OnFinished: func(ctx context.Context, lastReqID uint32) error { + linearized, err := config.Manager.StopVertexAccepted(ctx) + if err != nil { + return err + } + if !linearized { + return startAvalancheConsensus(ctx, lastReqID) + } + + // Invariant: edge will only be the stop vertex after its + // acceptance. + edge := config.Manager.Edge(ctx) + stopVertexID := edge[0] + if err := config.VM.Linearize(ctx, stopVertexID); err != nil { + return err + } + return startSnowmanBootstrapping(ctx, lastReqID) + }, + }, executedStateTransitions: math.MaxInt32, } - if err := b.metrics.Initialize("bs", config.Ctx.Registerer); err != nil { + if err := b.metrics.Initialize("bs", config.Ctx.AvalancheRegisterer); err != nil { return nil, err } - if err := b.VtxBlocked.SetParser(&vtxParser{ + if err := b.VtxBlocked.SetParser(ctx, &vtxParser{ log: config.Ctx.Log, numAccepted: b.numAcceptedVts, numDropped: b.numDroppedVts, @@ -101,10 +128,10 @@ type bootstrapper struct { // IDs of vertices that we will send a GetAncestors request for once we are // not at the max number of outstanding requests - needToFetch ids.Set + needToFetch set.Set[ids.ID] // Contains IDs of vertices that have recently been processed - processedCache *cache.LRU + processedCache *cache.LRU[ids.ID, struct{}] // number of state transitions executed executedStateTransitions int @@ -127,14 +154,14 @@ func (b *bootstrapper) Clear() error { // Ancestors handles the receipt of multiple containers. Should be received in // response to a GetAncestors message to [nodeID] with request ID [requestID]. // Expects vtxs[0] to be the vertex requested in the corresponding GetAncestors. -func (b *bootstrapper) Ancestors(nodeID ids.NodeID, requestID uint32, vtxs [][]byte) error { +func (b *bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, requestID uint32, vtxs [][]byte) error { lenVtxs := len(vtxs) if lenVtxs == 0 { b.Ctx.Log.Debug("Ancestors contains no vertices", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) - return b.GetAncestorsFailed(nodeID, requestID) + return b.GetAncestorsFailed(ctx, nodeID, requestID) } if lenVtxs > b.Config.AncestorsMaxContainersReceived { b.Ctx.Log.Debug("ignoring containers in Ancestors", @@ -147,7 +174,7 @@ func (b *bootstrapper) Ancestors(nodeID ids.NodeID, requestID uint32, vtxs [][]b } requestedVtxID, requested := b.OutstandingRequests.Remove(nodeID, requestID) - vtx, err := b.Manager.ParseVtx(vtxs[0]) // first vertex should be the one we requested in GetAncestors request + vtx, err := b.Manager.ParseVtx(ctx, vtxs[0]) // first vertex should be the one we requested in GetAncestors request if err != nil { if !requested { b.Ctx.Log.Debug("failed to parse unrequested vertex", @@ -170,7 +197,7 @@ func (b *bootstrapper) Ancestors(nodeID ids.NodeID, requestID uint32, vtxs [][]b zap.Binary("vtxBytes", vtxs[0]), zap.Error(err), ) - return b.fetch(requestedVtxID) + return b.fetch(ctx, requestedVtxID) } vtxID := vtx.ID() @@ -181,7 +208,7 @@ func (b *bootstrapper) Ancestors(nodeID ids.NodeID, requestID uint32, vtxs [][]b zap.Uint32("requestID", requestID), zap.Stringer("vtxID", vtxID), ) - return b.fetch(requestedVtxID) + return b.fetch(ctx, requestedVtxID) } if !requested && !b.OutstandingRequests.Contains(vtxID) && !b.needToFetch.Contains(vtxID) { b.Ctx.Log.Debug("received un-needed vertex", @@ -204,13 +231,13 @@ func (b *bootstrapper) Ancestors(nodeID ids.NodeID, requestID uint32, vtxs [][]b if err != nil { return err } - eligibleVertices := ids.NewSet(len(parents)) + eligibleVertices := set.NewSet[ids.ID](len(parents)) for _, parent := range parents { eligibleVertices.Add(parent.ID()) } for _, vtxBytes := range vtxs[1:] { // Parse/persist all the vertices - vtx, err := b.Manager.ParseVtx(vtxBytes) // Persists the vtx + vtx, err := b.Manager.ParseVtx(ctx, vtxBytes) // Persists the vtx if err != nil { b.Ctx.Log.Debug("failed to parse vertex", zap.Stringer("nodeID", nodeID), @@ -246,10 +273,10 @@ func (b *bootstrapper) Ancestors(nodeID ids.NodeID, requestID uint32, vtxs [][]b b.needToFetch.Remove(vtxID) // No need to fetch this vertex since we have it now } - return b.process(processVertices...) + return b.process(ctx, processVertices...) } -func (b *bootstrapper) GetAncestorsFailed(nodeID ids.NodeID, requestID uint32) error { +func (b *bootstrapper) GetAncestorsFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { vtxID, ok := b.OutstandingRequests.Remove(nodeID, requestID) if !ok { b.Ctx.Log.Debug("skipping GetAncestorsFailed call", @@ -260,15 +287,19 @@ func (b *bootstrapper) GetAncestorsFailed(nodeID ids.NodeID, requestID uint32) e return nil } // Send another request for the vertex - return b.fetch(vtxID) + return b.fetch(ctx, vtxID) } -func (b *bootstrapper) Connected(nodeID ids.NodeID, nodeVersion *version.Application) error { - if err := b.VM.Connected(nodeID, nodeVersion); err != nil { +func (b *bootstrapper) Connected( + ctx context.Context, + nodeID ids.NodeID, + nodeVersion *version.Application, +) error { + if err := b.VM.Connected(ctx, nodeID, nodeVersion); err != nil { return err } - if err := b.StartupTracker.Connected(nodeID, nodeVersion); err != nil { + if err := b.StartupTracker.Connected(ctx, nodeID, nodeVersion); err != nil { return err } @@ -277,43 +308,50 @@ func (b *bootstrapper) Connected(nodeID ids.NodeID, nodeVersion *version.Applica } b.started = true - return b.Startup() + return b.Startup(ctx) } -func (b *bootstrapper) Disconnected(nodeID ids.NodeID) error { - if err := b.VM.Disconnected(nodeID); err != nil { +func (b *bootstrapper) Disconnected(ctx context.Context, nodeID ids.NodeID) error { + if err := b.VM.Disconnected(ctx, nodeID); err != nil { return err } - return b.StartupTracker.Disconnected(nodeID) + return b.StartupTracker.Disconnected(ctx, nodeID) } -func (b *bootstrapper) Timeout() error { +func (b *bootstrapper) Timeout(ctx context.Context) error { if !b.awaitingTimeout { return errUnexpectedTimeout } b.awaitingTimeout = false - if !b.Config.Subnet.IsBootstrapped() { - return b.Restart(true) + if !b.Config.BootstrapTracker.IsBootstrapped() { + return b.Restart(ctx, true) } - return b.OnFinished(b.Config.SharedCfg.RequestID) + return b.OnFinished(ctx, b.Config.SharedCfg.RequestID) } -func (b *bootstrapper) Gossip() error { return nil } +func (*bootstrapper) Gossip(context.Context) error { + return nil +} -func (b *bootstrapper) Shutdown() error { +func (b *bootstrapper) Shutdown(ctx context.Context) error { b.Ctx.Log.Info("shutting down bootstrapper") - return b.VM.Shutdown() + return b.VM.Shutdown(ctx) } -func (b *bootstrapper) Notify(common.Message) error { return nil } +func (*bootstrapper) Notify(context.Context, common.Message) error { + return nil +} -func (b *bootstrapper) Start(startReqID uint32) error { +func (b *bootstrapper) Start(ctx context.Context, startReqID uint32) error { b.Ctx.Log.Info("starting bootstrap") - b.Ctx.SetState(snow.Bootstrapping) - if err := b.VM.SetState(snow.Bootstrapping); err != nil { + b.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + State: snow.Bootstrapping, + }) + if err := b.VM.SetState(ctx, snow.Bootstrapping); err != nil { return fmt.Errorf("failed to notify VM that bootstrapping has started: %w", err) } @@ -325,11 +363,11 @@ func (b *bootstrapper) Start(startReqID uint32) error { } b.started = true - return b.Startup() + return b.Startup(ctx) } -func (b *bootstrapper) HealthCheck() (interface{}, error) { - vmIntf, vmErr := b.VM.HealthCheck() +func (b *bootstrapper) HealthCheck(ctx context.Context) (interface{}, error) { + vmIntf, vmErr := b.VM.HealthCheck(ctx) intf := map[string]interface{}{ "consensus": struct{}{}, "vm": vmIntf, @@ -337,12 +375,14 @@ func (b *bootstrapper) HealthCheck() (interface{}, error) { return intf, vmErr } -func (b *bootstrapper) GetVM() common.VM { return b.VM } +func (b *bootstrapper) GetVM() common.VM { + return b.VM +} // Add the vertices in [vtxIDs] to the set of vertices that we need to fetch, // and then fetch vertices (and their ancestors) until either there are no more // to fetch or we are at the maximum number of outstanding requests. -func (b *bootstrapper) fetch(vtxIDs ...ids.ID) error { +func (b *bootstrapper) fetch(ctx context.Context, vtxIDs ...ids.ID) error { b.needToFetch.Add(vtxIDs...) for b.needToFetch.Len() > 0 && b.OutstandingRequests.Len() < common.MaxOutstandingGetAncestorsRequests { vtxID := b.needToFetch.CappedList(1)[0] @@ -354,25 +394,25 @@ func (b *bootstrapper) fetch(vtxIDs ...ids.ID) error { } // Make sure we don't already have this vertex - if _, err := b.Manager.GetVtx(vtxID); err == nil { + if _, err := b.Manager.GetVtx(ctx, vtxID); err == nil { continue } - validators, err := b.Config.Beacons.Sample(1) // validator to send request to + validatorIDs, err := b.Config.Beacons.Sample(1) // validator to send request to if err != nil { return fmt.Errorf("dropping request for %s as there are no validators", vtxID) } - validatorID := validators[0].ID() + validatorID := validatorIDs[0] b.Config.SharedCfg.RequestID++ b.OutstandingRequests.Add(validatorID, b.Config.SharedCfg.RequestID, vtxID) - b.Config.Sender.SendGetAncestors(validatorID, b.Config.SharedCfg.RequestID, vtxID) // request vertex and ancestors + b.Config.Sender.SendGetAncestors(ctx, validatorID, b.Config.SharedCfg.RequestID, vtxID) // request vertex and ancestors } - return b.checkFinish() + return b.checkFinish(ctx) } // Process the vertices in [vtxs]. -func (b *bootstrapper) process(vtxs ...avalanche.Vertex) error { +func (b *bootstrapper) process(ctx context.Context, vtxs ...avalanche.Vertex) error { // Vertices that we need to process. Store them in a heap for deduplication // and so we always process vertices further down in the DAG first. This helps // to reduce the number of repeated DAG traversals. @@ -386,7 +426,7 @@ func (b *bootstrapper) process(vtxs ...avalanche.Vertex) error { } } - vtxHeightSet := ids.Set{} + vtxHeightSet := set.Set[ids.ID]{} prevHeight := uint64(0) for toProcess.Len() > 0 { // While there are unprocessed vertices @@ -408,33 +448,38 @@ func (b *bootstrapper) process(vtxs ...avalanche.Vertex) error { b.VtxBlocked.RemoveMissingID(vtxID) // Add to queue of vertices to execute when bootstrapping finishes. - if pushed, err := b.VtxBlocked.Push(&vertexJob{ + pushed, err := b.VtxBlocked.Push(ctx, &vertexJob{ log: b.Ctx.Log, numAccepted: b.numAcceptedVts, numDropped: b.numDroppedVts, vtx: vtx, - }); err != nil { + }) + if err != nil { return err - } else if !pushed { + } + if !pushed { // If the vertex is already on the queue, then we have already // pushed [vtx]'s transactions and traversed into its parents. continue } - txs, err := vtx.Txs() + txs, err := vtx.Txs(ctx) if err != nil { return err } + for _, tx := range txs { // Add to queue of txs to execute when bootstrapping finishes. - if pushed, err := b.TxBlocked.Push(&txJob{ + pushed, err := b.TxBlocked.Push(ctx, &txJob{ log: b.Ctx.Log, numAccepted: b.numAcceptedTxs, numDropped: b.numDroppedTxs, tx: tx, - }); err != nil { + }) + if err != nil { return err - } else if pushed { + } + if pushed { b.numFetchedTxs.Inc() } } @@ -471,7 +516,7 @@ func (b *bootstrapper) process(vtxs ...avalanche.Vertex) error { return err } if height%stripeDistance < stripeWidth { // See comment for stripeDistance - b.processedCache.Put(vtxID, nil) + b.processedCache.Put(vtxID, struct{}{}) } if height == prevHeight { vtxHeightSet.Add(vtxID) @@ -491,11 +536,11 @@ func (b *bootstrapper) process(vtxs ...avalanche.Vertex) error { return err } - return b.fetch() + return b.fetch(ctx) } // ForceAccepted starts bootstrapping. Process the vertices in [accepterContainerIDs]. -func (b *bootstrapper) ForceAccepted(acceptedContainerIDs []ids.ID) error { +func (b *bootstrapper) ForceAccepted(ctx context.Context, acceptedContainerIDs []ids.ID) error { pendingContainerIDs := b.VtxBlocked.MissingIDs() // Append the list of accepted container IDs to pendingContainerIDs to ensure // we iterate over every container that must be traversed. @@ -506,7 +551,7 @@ func (b *bootstrapper) ForceAccepted(acceptedContainerIDs []ids.ID) error { ) toProcess := make([]avalanche.Vertex, 0, len(pendingContainerIDs)) for _, vtxID := range pendingContainerIDs { - if vtx, err := b.Manager.GetVtx(vtxID); err == nil { + if vtx, err := b.Manager.GetVtx(ctx, vtxID); err == nil { if vtx.Status() == choices.Accepted { b.VtxBlocked.RemoveMissingID(vtxID) } else { @@ -517,12 +562,12 @@ func (b *bootstrapper) ForceAccepted(acceptedContainerIDs []ids.ID) error { b.needToFetch.Add(vtxID) // We don't have this vertex. Mark that we have to fetch it. } } - return b.process(toProcess...) + return b.process(ctx, toProcess...) } // checkFinish repeatedly executes pending transactions and requests new frontier blocks until there aren't any new ones // after which it finishes the bootstrap process -func (b *bootstrapper) checkFinish() error { +func (b *bootstrapper) checkFinish(ctx context.Context) error { // If there are outstanding requests for vertices or we still need to fetch vertices, we can't finish pendingJobs := b.VtxBlocked.MissingIDs() if b.IsBootstrapped() || len(pendingJobs) > 0 || b.awaitingTimeout { @@ -535,7 +580,13 @@ func (b *bootstrapper) checkFinish() error { b.Ctx.Log.Debug("executing transactions") } - _, err := b.TxBlocked.ExecuteAll(b.Config.Ctx, b, b.Config.SharedCfg.Restarted, b.Ctx.DecisionAcceptor) + _, err := b.TxBlocked.ExecuteAll( + ctx, + b.Config.Ctx, + b, + b.Config.SharedCfg.Restarted, + b.Ctx.TxAcceptor, + ) if err != nil || b.Halted() { return err } @@ -546,11 +597,28 @@ func (b *bootstrapper) checkFinish() error { b.Ctx.Log.Debug("executing vertices") } - executedVts, err := b.VtxBlocked.ExecuteAll(b.Config.Ctx, b, b.Config.SharedCfg.Restarted, b.Ctx.ConsensusAcceptor) + executedVts, err := b.VtxBlocked.ExecuteAll( + ctx, + b.Config.Ctx, + b, + b.Config.SharedCfg.Restarted, + b.Ctx.VertexAcceptor, + ) if err != nil || b.Halted() { return err } + // If the chain is linearized, we should immediately move on to start + // bootstrapping snowman. + linearized, err := b.Manager.StopVertexAccepted(ctx) + if err != nil { + return err + } + if linearized { + b.processedCache.Flush() + return b.OnFinished(ctx, b.Config.SharedCfg.RequestID) + } + previouslyExecuted := b.executedStateTransitions b.executedStateTransitions = executedVts @@ -559,16 +627,16 @@ func (b *bootstrapper) checkFinish() error { // issued. if executedVts > 0 && executedVts < previouslyExecuted/2 && b.Config.RetryBootstrap { b.Ctx.Log.Debug("checking for more vertices before finishing bootstrapping") - return b.Restart(true) + return b.Restart(ctx, true) } // Notify the subnet that this chain is synced - b.Config.Subnet.Bootstrapped(b.Ctx.ChainID) + b.Config.BootstrapTracker.Bootstrapped(b.Ctx.ChainID) b.processedCache.Flush() // If the subnet hasn't finished bootstrapping, this chain should remain // syncing. - if !b.Config.Subnet.IsBootstrapped() { + if !b.Config.BootstrapTracker.IsBootstrapped() { if !b.Config.SharedCfg.Restarted { b.Ctx.Log.Info("waiting for the remaining chains in this subnet to finish syncing") } else { @@ -580,5 +648,5 @@ func (b *bootstrapper) checkFinish() error { b.awaitingTimeout = true return nil } - return b.OnFinished(b.Config.SharedCfg.RequestID) + return b.OnFinished(ctx, b.Config.SharedCfg.RequestID) } diff --git a/avalanchego/snow/engine/avalanche/bootstrap/bootstrapper_test.go b/avalanchego/snow/engine/avalanche/bootstrap/bootstrapper_test.go index a95b68ee..bc1d58cf 100644 --- a/avalanchego/snow/engine/avalanche/bootstrap/bootstrapper_test.go +++ b/avalanchego/snow/engine/avalanche/bootstrap/bootstrapper_test.go @@ -1,34 +1,40 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap import ( "bytes" + "context" "errors" "testing" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/avalanche" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" + "github.com/ava-labs/avalanchego/snow/engine/avalanche/getter" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/common/queue" "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/validators" - - avagetter "github.com/ava-labs/avalanchego/snow/engine/avalanche/getter" ) var ( errUnknownVertex = errors.New("unknown vertex") errParsedUnknownVertex = errors.New("parsed unknown vertex") + errUnknownTx = errors.New("unknown tx") ) +func noopStarter(context.Context, uint32) error { + return nil +} + func newConfig(t *testing.T) (Config, ids.NodeID, *common.SenderTest, *vertex.TestManager, *vertex.TestVM) { ctx := snow.DefaultConsensusContextTest() @@ -40,10 +46,14 @@ func newConfig(t *testing.T) (Config, ids.NodeID, *common.SenderTest, *vertex.Te vm.T = t isBootstrapped := false - subnet := &common.SubnetTest{ - T: t, - IsBootstrappedF: func() bool { return isBootstrapped }, - BootstrappedF: func(ids.ID) { isBootstrapped = true }, + bootstrapTracker := &common.BootstrapTrackerTest{ + T: t, + IsBootstrappedF: func() bool { + return isBootstrapped + }, + BootstrappedF: func(ids.ID) { + isBootstrapped = true + }, } sender.Default(true) @@ -53,15 +63,15 @@ func newConfig(t *testing.T) (Config, ids.NodeID, *common.SenderTest, *vertex.Te sender.CantSendGetAcceptedFrontier = false peer := ids.GenerateTestNodeID() - if err := peers.AddWeight(peer, 1); err != nil { + if err := peers.Add(peer, nil, ids.Empty, 1); err != nil { t.Fatal(err) } - vtxBlocker, err := queue.NewWithMissing(prefixdb.New([]byte("vtx"), db), "vtx", ctx.Registerer) + vtxBlocker, err := queue.NewWithMissing(prefixdb.New([]byte("vtx"), db), "vtx", ctx.AvalancheRegisterer) if err != nil { t.Fatal(err) } - txBlocker, err := queue.New(prefixdb.New([]byte("tx"), db), "tx", ctx.Registerer) + txBlocker, err := queue.New(prefixdb.New([]byte("tx"), db), "tx", ctx.AvalancheRegisterer) if err != nil { t.Fatal(err) } @@ -72,20 +82,19 @@ func newConfig(t *testing.T) (Config, ids.NodeID, *common.SenderTest, *vertex.Te commonConfig := common.Config{ Ctx: ctx, - Validators: peers, Beacons: peers, SampleK: peers.Len(), Alpha: peers.Weight()/2 + 1, StartupTracker: startupTracker, Sender: sender, - Subnet: subnet, + BootstrapTracker: bootstrapTracker, Timer: &common.TimerTest{}, AncestorsMaxContainersSent: 2000, AncestorsMaxContainersReceived: 2000, SharedCfg: &common.SharedConfig{}, } - avaGetHandler, err := avagetter.New(manager, commonConfig) + avaGetHandler, err := getter.New(manager, commonConfig) if err != nil { t.Fatal(err) } @@ -138,21 +147,29 @@ func TestBootstrapperSingleFrontier(t *testing.T) { } bs, err := New( + context.Background(), config, - func(lastReqID uint32) error { config.Ctx.SetState(snow.NormalOp); return nil }, + func(context.Context, uint32) error { + config.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + State: snow.NormalOp, + }) + return nil + }, + noopStarter, ) if err != nil { t.Fatal(err) } vm.CantSetState = false - if err := bs.Start(0); err != nil { + if err := bs.Start(context.Background(), 0); err != nil { t.Fatal(err) } acceptedIDs := []ids.ID{vtxID0, vtxID1, vtxID2} - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { switch vtxID { case vtxID0: return vtx0, nil @@ -166,7 +183,7 @@ func TestBootstrapperSingleFrontier(t *testing.T) { } } - manager.ParseVtxF = func(vtxBytes []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { switch { case bytes.Equal(vtxBytes, vtxBytes0): return vtx0, nil @@ -179,12 +196,12 @@ func TestBootstrapperSingleFrontier(t *testing.T) { return nil, errParsedUnknownVertex } - if err := bs.ForceAccepted(acceptedIDs); err != nil { + if err := bs.ForceAccepted(context.Background(), acceptedIDs); err != nil { t.Fatal(err) } switch { - case config.Ctx.GetState() != snow.NormalOp: + case config.Ctx.State.Get().State != snow.NormalOp: t.Fatalf("Bootstrapping should have finished") case vtx0.Status() != choices.Accepted: t.Fatalf("Vertex should be accepted") @@ -237,21 +254,29 @@ func TestBootstrapperByzantineResponses(t *testing.T) { } bs, err := New( + context.Background(), config, - func(lastReqID uint32) error { config.Ctx.SetState(snow.NormalOp); return nil }, + func(context.Context, uint32) error { + config.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + State: snow.NormalOp, + }) + return nil + }, + noopStarter, ) if err != nil { t.Fatal(err) } vm.CantSetState = false - if err := bs.Start(0); err != nil { + if err := bs.Start(context.Background(), 0); err != nil { t.Fatal(err) } acceptedIDs := []ids.ID{vtxID1} - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { switch vtxID { case vtxID1: return vtx1, nil @@ -265,7 +290,7 @@ func TestBootstrapperByzantineResponses(t *testing.T) { requestID := new(uint32) reqVtxID := ids.Empty - sender.SendGetAncestorsF = func(vdr ids.NodeID, reqID uint32, vtxID ids.ID) { + sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { switch { case vdr != peerID: t.Fatalf("Should have requested vertex from %s, requested from %s", @@ -277,7 +302,7 @@ func TestBootstrapperByzantineResponses(t *testing.T) { reqVtxID = vtxID } - manager.ParseVtxF = func(vtxBytes []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { switch { case bytes.Equal(vtxBytes, vtxBytes0): vtx0.StatusV = choices.Processing @@ -293,14 +318,14 @@ func TestBootstrapperByzantineResponses(t *testing.T) { return nil, errParsedUnknownVertex } - if err := bs.ForceAccepted(acceptedIDs); err != nil { // should request vtx0 + if err := bs.ForceAccepted(context.Background(), acceptedIDs); err != nil { // should request vtx0 t.Fatal(err) } else if reqVtxID != vtxID0 { t.Fatalf("should have requested vtxID0 but requested %s", reqVtxID) } oldReqID := *requestID - err = bs.Ancestors(peerID, *requestID, [][]byte{vtxBytes2}) + err = bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{vtxBytes2}) switch { case err != nil: // send unexpected vertex t.Fatal(err) @@ -311,7 +336,7 @@ func TestBootstrapperByzantineResponses(t *testing.T) { } oldReqID = *requestID - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { switch vtxID { case vtxID1: return vtx1, nil @@ -323,14 +348,14 @@ func TestBootstrapperByzantineResponses(t *testing.T) { } } - if err := bs.Ancestors(peerID, *requestID, [][]byte{vtxBytes0, vtxBytes2}); err != nil { // send expected vertex and vertex that should not be accepted + if err := bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{vtxBytes0, vtxBytes2}); err != nil { // send expected vertex and vertex that should not be accepted t.Fatal(err) } switch { case *requestID != oldReqID: t.Fatal("should not have issued new request") - case config.Ctx.GetState() != snow.NormalOp: + case config.Ctx.State.Get().State != snow.NormalOp: t.Fatalf("Bootstrapping should have finished") case vtx0.Status() != choices.Accepted: t.Fatalf("Vertex should be accepted") @@ -379,14 +404,14 @@ func TestBootstrapperTxDependencies(t *testing.T) { vtxBytes0 := []byte{2} vtxBytes1 := []byte{3} - vm.ParseTxF = func(b []byte) (snowstorm.Tx, error) { + vm.ParseTxF = func(_ context.Context, b []byte) (snowstorm.Tx, error) { switch { case bytes.Equal(b, txBytes0): return tx0, nil case bytes.Equal(b, txBytes1): return tx1, nil default: - return nil, errors.New("wrong tx") + return nil, errUnknownTx } } @@ -411,21 +436,29 @@ func TestBootstrapperTxDependencies(t *testing.T) { } bs, err := New( + context.Background(), config, - func(lastReqID uint32) error { config.Ctx.SetState(snow.NormalOp); return nil }, + func(context.Context, uint32) error { + config.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + State: snow.NormalOp, + }) + return nil + }, + noopStarter, ) if err != nil { t.Fatal(err) } vm.CantSetState = false - if err := bs.Start(0); err != nil { + if err := bs.Start(context.Background(), 0); err != nil { t.Fatal(err) } acceptedIDs := []ids.ID{vtxID1} - manager.ParseVtxF = func(vtxBytes []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { switch { case bytes.Equal(vtxBytes, vtxBytes1): return vtx1, nil @@ -435,7 +468,7 @@ func TestBootstrapperTxDependencies(t *testing.T) { t.Fatal(errParsedUnknownVertex) return nil, errParsedUnknownVertex } - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { switch vtxID { case vtxID1: return vtx1, nil @@ -448,7 +481,7 @@ func TestBootstrapperTxDependencies(t *testing.T) { } reqIDPtr := new(uint32) - sender.SendGetAncestorsF = func(vdr ids.NodeID, reqID uint32, vtxID ids.ID) { + sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { if vdr != peerID { t.Fatalf("Should have requested vertex from %s, requested from %s", peerID, vdr) } @@ -461,11 +494,11 @@ func TestBootstrapperTxDependencies(t *testing.T) { *reqIDPtr = reqID } - if err := bs.ForceAccepted(acceptedIDs); err != nil { // should request vtx0 + if err := bs.ForceAccepted(context.Background(), acceptedIDs); err != nil { // should request vtx0 t.Fatal(err) } - manager.ParseVtxF = func(vtxBytes []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { switch { case bytes.Equal(vtxBytes, vtxBytes1): return vtx1, nil @@ -477,11 +510,11 @@ func TestBootstrapperTxDependencies(t *testing.T) { return nil, errParsedUnknownVertex } - if err := bs.Ancestors(peerID, *reqIDPtr, [][]byte{vtxBytes0}); err != nil { + if err := bs.Ancestors(context.Background(), peerID, *reqIDPtr, [][]byte{vtxBytes0}); err != nil { t.Fatal(err) } - if config.Ctx.GetState() != snow.NormalOp { + if config.Ctx.State.Get().State != snow.NormalOp { t.Fatalf("Should have finished bootstrapping") } if tx0.Status() != choices.Accepted { @@ -551,21 +584,29 @@ func TestBootstrapperMissingTxDependency(t *testing.T) { } bs, err := New( + context.Background(), config, - func(lastReqID uint32) error { config.Ctx.SetState(snow.NormalOp); return nil }, + func(context.Context, uint32) error { + config.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + State: snow.NormalOp, + }) + return nil + }, + noopStarter, ) if err != nil { t.Fatal(err) } vm.CantSetState = false - if err := bs.Start(0); err != nil { + if err := bs.Start(context.Background(), 0); err != nil { t.Fatal(err) } acceptedIDs := []ids.ID{vtxID1} - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { switch vtxID { case vtxID1: return vtx1, nil @@ -576,7 +617,7 @@ func TestBootstrapperMissingTxDependency(t *testing.T) { panic(errUnknownVertex) } } - manager.ParseVtxF = func(vtxBytes []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { switch { case bytes.Equal(vtxBytes, vtxBytes1): return vtx1, nil @@ -589,7 +630,7 @@ func TestBootstrapperMissingTxDependency(t *testing.T) { } reqIDPtr := new(uint32) - sender.SendGetAncestorsF = func(vdr ids.NodeID, reqID uint32, vtxID ids.ID) { + sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { if vdr != peerID { t.Fatalf("Should have requested vertex from %s, requested from %s", peerID, vdr) } @@ -602,15 +643,15 @@ func TestBootstrapperMissingTxDependency(t *testing.T) { *reqIDPtr = reqID } - if err := bs.ForceAccepted(acceptedIDs); err != nil { // should request vtx1 + if err := bs.ForceAccepted(context.Background(), acceptedIDs); err != nil { // should request vtx1 t.Fatal(err) } - if err := bs.Ancestors(peerID, *reqIDPtr, [][]byte{vtxBytes0}); err != nil { + if err := bs.Ancestors(context.Background(), peerID, *reqIDPtr, [][]byte{vtxBytes0}); err != nil { t.Fatal(err) } - if config.Ctx.GetState() != snow.NormalOp { + if config.Ctx.State.Get().State != snow.NormalOp { t.Fatalf("Bootstrapping should have finished") } if tx0.Status() != choices.Unknown { // never saw this tx @@ -668,21 +709,29 @@ func TestBootstrapperIncompleteAncestors(t *testing.T) { } bs, err := New( + context.Background(), config, - func(lastReqID uint32) error { config.Ctx.SetState(snow.NormalOp); return nil }, + func(context.Context, uint32) error { + config.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + State: snow.NormalOp, + }) + return nil + }, + noopStarter, ) if err != nil { t.Fatal(err) } vm.CantSetState = false - if err := bs.Start(0); err != nil { + if err := bs.Start(context.Background(), 0); err != nil { t.Fatal(err) } acceptedIDs := []ids.ID{vtxID2} - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { switch { case vtxID == vtxID0: return nil, errUnknownVertex @@ -695,7 +744,7 @@ func TestBootstrapperIncompleteAncestors(t *testing.T) { panic(errUnknownVertex) } } - manager.ParseVtxF = func(vtxBytes []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { switch { case bytes.Equal(vtxBytes, vtxBytes0): vtx0.StatusV = choices.Processing @@ -712,7 +761,7 @@ func TestBootstrapperIncompleteAncestors(t *testing.T) { } reqIDPtr := new(uint32) requested := ids.Empty - sender.SendGetAncestorsF = func(vdr ids.NodeID, reqID uint32, vtxID ids.ID) { + sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { if vdr != peerID { t.Fatalf("Should have requested vertex from %s, requested from %s", peerID, vdr) } @@ -725,27 +774,27 @@ func TestBootstrapperIncompleteAncestors(t *testing.T) { requested = vtxID } - if err := bs.ForceAccepted(acceptedIDs); err != nil { // should request vtx1 + if err := bs.ForceAccepted(context.Background(), acceptedIDs); err != nil { // should request vtx1 t.Fatal(err) } else if requested != vtxID1 { t.Fatal("requested wrong vtx") } - err = bs.Ancestors(peerID, *reqIDPtr, [][]byte{vtxBytes1}) + err = bs.Ancestors(context.Background(), peerID, *reqIDPtr, [][]byte{vtxBytes1}) switch { case err != nil: // Provide vtx1; should request vtx0 t.Fatal(err) - case bs.Context().GetState() == snow.NormalOp: + case bs.Context().State.Get().State == snow.NormalOp: t.Fatalf("should not have finished") case requested != vtxID0: t.Fatal("should hae requested vtx0") } - err = bs.Ancestors(peerID, *reqIDPtr, [][]byte{vtxBytes0}) + err = bs.Ancestors(context.Background(), peerID, *reqIDPtr, [][]byte{vtxBytes0}) switch { case err != nil: // Provide vtx0; can finish now t.Fatal(err) - case bs.Context().GetState() != snow.NormalOp: + case bs.Context().State.Get().State != snow.NormalOp: t.Fatal("should have finished") case vtx0.Status() != choices.Accepted: t.Fatal("should be accepted") @@ -784,15 +833,23 @@ func TestBootstrapperFinalized(t *testing.T) { } bs, err := New( + context.Background(), config, - func(lastReqID uint32) error { config.Ctx.SetState(snow.NormalOp); return nil }, + func(context.Context, uint32) error { + config.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + State: snow.NormalOp, + }) + return nil + }, + noopStarter, ) if err != nil { t.Fatal(err) } vm.CantSetState = false - if err := bs.Start(0); err != nil { + if err := bs.Start(context.Background(), 0); err != nil { t.Fatal(err) } @@ -800,7 +857,7 @@ func TestBootstrapperFinalized(t *testing.T) { parsedVtx0 := false parsedVtx1 := false - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { switch vtxID { case vtxID0: if parsedVtx0 { @@ -817,7 +874,7 @@ func TestBootstrapperFinalized(t *testing.T) { panic(errUnknownVertex) } } - manager.ParseVtxF = func(vtxBytes []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { switch { case bytes.Equal(vtxBytes, vtxBytes0): vtx0.StatusV = choices.Processing @@ -833,14 +890,14 @@ func TestBootstrapperFinalized(t *testing.T) { } requestIDs := map[ids.ID]uint32{} - sender.SendGetAncestorsF = func(vdr ids.NodeID, reqID uint32, vtxID ids.ID) { + sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { if vdr != peerID { t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) } requestIDs[vtxID] = reqID } - if err := bs.ForceAccepted(acceptedIDs); err != nil { // should request vtx0 and vtx1 + if err := bs.ForceAccepted(context.Background(), acceptedIDs); err != nil { // should request vtx0 and vtx1 t.Fatal(err) } @@ -849,7 +906,7 @@ func TestBootstrapperFinalized(t *testing.T) { t.Fatalf("should have requested vtx1") } - if err := bs.Ancestors(peerID, reqID, [][]byte{vtxBytes1, vtxBytes0}); err != nil { + if err := bs.Ancestors(context.Background(), peerID, reqID, [][]byte{vtxBytes1, vtxBytes0}); err != nil { t.Fatal(err) } @@ -858,11 +915,11 @@ func TestBootstrapperFinalized(t *testing.T) { t.Fatalf("should have requested vtx0") } - err = bs.GetAncestorsFailed(peerID, reqID) + err = bs.GetAncestorsFailed(context.Background(), peerID, reqID) switch { case err != nil: t.Fatal(err) - case config.Ctx.GetState() != snow.NormalOp: + case config.Ctx.State.Get().State != snow.NormalOp: t.Fatalf("Bootstrapping should have finished") case vtx0.Status() != choices.Accepted: t.Fatalf("Vertex should be accepted") @@ -911,15 +968,23 @@ func TestBootstrapperAcceptsAncestorsParents(t *testing.T) { } bs, err := New( + context.Background(), config, - func(lastReqID uint32) error { config.Ctx.SetState(snow.NormalOp); return nil }, + func(context.Context, uint32) error { + config.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + State: snow.NormalOp, + }) + return nil + }, + noopStarter, ) if err != nil { t.Fatal(err) } vm.CantSetState = false - if err := bs.Start(0); err != nil { + if err := bs.Start(context.Background(), 0); err != nil { t.Fatal(err) } @@ -928,7 +993,7 @@ func TestBootstrapperAcceptsAncestorsParents(t *testing.T) { parsedVtx0 := false parsedVtx1 := false parsedVtx2 := false - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { switch vtxID { case vtxID0: if parsedVtx0 { @@ -950,7 +1015,7 @@ func TestBootstrapperAcceptsAncestorsParents(t *testing.T) { } return nil, errUnknownVertex } - manager.ParseVtxF = func(vtxBytes []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { switch { case bytes.Equal(vtxBytes, vtxBytes0): vtx0.StatusV = choices.Processing @@ -970,14 +1035,14 @@ func TestBootstrapperAcceptsAncestorsParents(t *testing.T) { } requestIDs := map[ids.ID]uint32{} - sender.SendGetAncestorsF = func(vdr ids.NodeID, reqID uint32, vtxID ids.ID) { + sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { if vdr != peerID { t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) } requestIDs[vtxID] = reqID } - if err := bs.ForceAccepted(acceptedIDs); err != nil { // should request vtx2 + if err := bs.ForceAccepted(context.Background(), acceptedIDs); err != nil { // should request vtx2 t.Fatal(err) } @@ -986,12 +1051,12 @@ func TestBootstrapperAcceptsAncestorsParents(t *testing.T) { t.Fatalf("should have requested vtx2") } - if err := bs.Ancestors(peerID, reqID, [][]byte{vtxBytes2, vtxBytes1, vtxBytes0}); err != nil { + if err := bs.Ancestors(context.Background(), peerID, reqID, [][]byte{vtxBytes2, vtxBytes1, vtxBytes0}); err != nil { t.Fatal(err) } switch { - case config.Ctx.GetState() != snow.NormalOp: + case config.Ctx.State.Get().State != snow.NormalOp: t.Fatalf("Bootstrapping should have finished") case vtx0.Status() != choices.Accepted: t.Fatalf("Vertex should be accepted") @@ -1074,8 +1139,16 @@ func TestRestartBootstrapping(t *testing.T) { } bsIntf, err := New( + context.Background(), config, - func(lastReqID uint32) error { config.Ctx.SetState(snow.NormalOp); return nil }, + func(context.Context, uint32) error { + config.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + State: snow.NormalOp, + }) + return nil + }, + noopStarter, ) if err != nil { t.Fatal(err) @@ -1086,7 +1159,7 @@ func TestRestartBootstrapping(t *testing.T) { } vm.CantSetState = false - if err := bs.Start(0); err != nil { + if err := bs.Start(context.Background(), 0); err != nil { t.Fatal(err) } @@ -1096,7 +1169,7 @@ func TestRestartBootstrapping(t *testing.T) { parsedVtx3 := false parsedVtx4 := false parsedVtx5 := false - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { switch vtxID { case vtxID0: if parsedVtx0 { @@ -1130,7 +1203,7 @@ func TestRestartBootstrapping(t *testing.T) { } return nil, errUnknownVertex } - manager.ParseVtxF = func(vtxBytes []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { switch { case bytes.Equal(vtxBytes, vtxBytes0): vtx0.StatusV = choices.Processing @@ -1162,14 +1235,14 @@ func TestRestartBootstrapping(t *testing.T) { } requestIDs := map[ids.ID]uint32{} - sender.SendGetAncestorsF = func(vdr ids.NodeID, reqID uint32, vtxID ids.ID) { + sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { if vdr != peerID { t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) } requestIDs[vtxID] = reqID } - if err := bs.ForceAccepted([]ids.ID{vtxID3, vtxID4}); err != nil { // should request vtx3 and vtx4 + if err := bs.ForceAccepted(context.Background(), []ids.ID{vtxID3, vtxID4}); err != nil { // should request vtx3 and vtx4 t.Fatal(err) } @@ -1182,7 +1255,7 @@ func TestRestartBootstrapping(t *testing.T) { t.Fatal("should have requested vtx4") } - if err := bs.Ancestors(peerID, vtx3ReqID, [][]byte{vtxBytes3, vtxBytes2}); err != nil { + if err := bs.Ancestors(context.Background(), peerID, vtx3ReqID, [][]byte{vtxBytes3, vtxBytes2}); err != nil { t.Fatal(err) } @@ -1201,7 +1274,7 @@ func TestRestartBootstrapping(t *testing.T) { bs.needToFetch.Clear() requestIDs = map[ids.ID]uint32{} - if err := bs.ForceAccepted([]ids.ID{vtxID5, vtxID3}); err != nil { + if err := bs.ForceAccepted(context.Background(), []ids.ID{vtxID5, vtxID3}); err != nil { t.Fatal(err) } @@ -1221,7 +1294,7 @@ func TestRestartBootstrapping(t *testing.T) { t.Fatal("should not have re-requested vtx3 since it has been processed") } - if err := bs.Ancestors(peerID, vtx5ReqID, [][]byte{vtxBytes5, vtxBytes4, vtxBytes2, vtxBytes1}); err != nil { + if err := bs.Ancestors(context.Background(), peerID, vtx5ReqID, [][]byte{vtxBytes5, vtxBytes4, vtxBytes2, vtxBytes1}); err != nil { t.Fatal(err) } @@ -1230,12 +1303,12 @@ func TestRestartBootstrapping(t *testing.T) { t.Fatal("should have requested vtx0 after ancestors ended prior to it") } - if err := bs.Ancestors(peerID, vtx1ReqID, [][]byte{vtxBytes1, vtxBytes0}); err != nil { + if err := bs.Ancestors(context.Background(), peerID, vtx1ReqID, [][]byte{vtxBytes1, vtxBytes0}); err != nil { t.Fatal(err) } switch { - case config.Ctx.GetState() != snow.NormalOp: + case config.Ctx.State.Get().State != snow.NormalOp: t.Fatalf("Bootstrapping should have finished") case vtx0.Status() != choices.Accepted: t.Fatalf("Vertex should be accepted") diff --git a/avalanchego/snow/engine/avalanche/bootstrap/config.go b/avalanchego/snow/engine/avalanche/bootstrap/config.go index ef65a1d2..7cdbc82f 100644 --- a/avalanchego/snow/engine/avalanche/bootstrap/config.go +++ b/avalanchego/snow/engine/avalanche/bootstrap/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap @@ -19,5 +19,5 @@ type Config struct { TxBlocked *queue.Jobs Manager vertex.Manager - VM vertex.DAGVM + VM vertex.LinearizableVM } diff --git a/avalanchego/snow/engine/avalanche/bootstrap/metrics.go b/avalanchego/snow/engine/avalanche/bootstrap/metrics.go index 8c992b28..2033a776 100644 --- a/avalanchego/snow/engine/avalanche/bootstrap/metrics.go +++ b/avalanchego/snow/engine/avalanche/bootstrap/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap diff --git a/avalanchego/snow/engine/avalanche/bootstrap/tx_job.go b/avalanchego/snow/engine/avalanche/bootstrap/tx_job.go index d5ba7867..a9ead4a6 100644 --- a/avalanchego/snow/engine/avalanche/bootstrap/tx_job.go +++ b/avalanchego/snow/engine/avalanche/bootstrap/tx_job.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap import ( + "context" "errors" "fmt" @@ -17,6 +18,7 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common/queue" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" ) var errMissingTxDependenciesOnAccept = errors.New("attempting to accept a transaction with missing dependencies") @@ -24,11 +26,11 @@ var errMissingTxDependenciesOnAccept = errors.New("attempting to accept a transa type txParser struct { log logging.Logger numAccepted, numDropped prometheus.Counter - vm vertex.DAGVM + vm vertex.LinearizableVM } -func (p *txParser) Parse(txBytes []byte) (queue.Job, error) { - tx, err := p.vm.ParseTx(txBytes) +func (p *txParser) Parse(ctx context.Context, txBytes []byte) (queue.Job, error) { + tx, err := p.vm.ParseTx(ctx, txBytes) if err != nil { return nil, err } @@ -46,9 +48,12 @@ type txJob struct { tx snowstorm.Tx } -func (t *txJob) ID() ids.ID { return t.tx.ID() } -func (t *txJob) MissingDependencies() (ids.Set, error) { - missing := ids.Set{} +func (t *txJob) ID() ids.ID { + return t.tx.ID() +} + +func (t *txJob) MissingDependencies(context.Context) (set.Set[ids.ID], error) { + missing := set.Set[ids.ID]{} deps, err := t.tx.Dependencies() if err != nil { return missing, err @@ -62,7 +67,7 @@ func (t *txJob) MissingDependencies() (ids.Set, error) { } // Returns true if this tx job has at least 1 missing dependency -func (t *txJob) HasMissingDependencies() (bool, error) { +func (t *txJob) HasMissingDependencies(context.Context) (bool, error) { deps, err := t.tx.Dependencies() if err != nil { return false, err @@ -75,8 +80,8 @@ func (t *txJob) HasMissingDependencies() (bool, error) { return false, nil } -func (t *txJob) Execute() error { - hasMissingDeps, err := t.HasMissingDependencies() +func (t *txJob) Execute(ctx context.Context) error { + hasMissingDeps, err := t.HasMissingDependencies(ctx) if err != nil { return err } @@ -92,7 +97,7 @@ func (t *txJob) Execute() error { return fmt.Errorf("attempting to execute transaction with status %s", status) case choices.Processing: txID := t.tx.ID() - if err := t.tx.Verify(); err != nil { + if err := t.tx.Verify(ctx); err != nil { t.log.Error("transaction failed verification during bootstrapping", zap.Stringer("txID", txID), zap.Error(err), @@ -104,7 +109,7 @@ func (t *txJob) Execute() error { t.log.Trace("accepting transaction in bootstrapping", zap.Stringer("txID", txID), ) - if err := t.tx.Accept(); err != nil { + if err := t.tx.Accept(ctx); err != nil { t.log.Error("transaction failed to accept during bootstrapping", zap.Stringer("txID", txID), zap.Error(err), @@ -114,4 +119,7 @@ func (t *txJob) Execute() error { } return nil } -func (t *txJob) Bytes() []byte { return t.tx.Bytes() } + +func (t *txJob) Bytes() []byte { + return t.tx.Bytes() +} diff --git a/avalanchego/snow/engine/avalanche/bootstrap/vertex_job.go b/avalanchego/snow/engine/avalanche/bootstrap/vertex_job.go index 5790478a..3001ce89 100644 --- a/avalanchego/snow/engine/avalanche/bootstrap/vertex_job.go +++ b/avalanchego/snow/engine/avalanche/bootstrap/vertex_job.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap import ( + "context" "errors" "fmt" @@ -17,6 +18,7 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common/queue" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" ) var errMissingVtxDependenciesOnAccept = errors.New("attempting to execute blocked vertex") @@ -27,8 +29,8 @@ type vtxParser struct { manager vertex.Manager } -func (p *vtxParser) Parse(vtxBytes []byte) (queue.Job, error) { - vtx, err := p.manager.ParseVtx(vtxBytes) +func (p *vtxParser) Parse(ctx context.Context, vtxBytes []byte) (queue.Job, error) { + vtx, err := p.manager.ParseVtx(ctx, vtxBytes) if err != nil { return nil, err } @@ -46,10 +48,12 @@ type vertexJob struct { vtx avalanche.Vertex } -func (v *vertexJob) ID() ids.ID { return v.vtx.ID() } +func (v *vertexJob) ID() ids.ID { + return v.vtx.ID() +} -func (v *vertexJob) MissingDependencies() (ids.Set, error) { - missing := ids.Set{} +func (v *vertexJob) MissingDependencies(context.Context) (set.Set[ids.ID], error) { + missing := set.Set[ids.ID]{} parents, err := v.vtx.Parents() if err != nil { return missing, err @@ -63,7 +67,7 @@ func (v *vertexJob) MissingDependencies() (ids.Set, error) { } // Returns true if this vertex job has at least 1 missing dependency -func (v *vertexJob) HasMissingDependencies() (bool, error) { +func (v *vertexJob) HasMissingDependencies(context.Context) (bool, error) { parents, err := v.vtx.Parents() if err != nil { return false, err @@ -76,8 +80,8 @@ func (v *vertexJob) HasMissingDependencies() (bool, error) { return false, nil } -func (v *vertexJob) Execute() error { - hasMissingDependencies, err := v.HasMissingDependencies() +func (v *vertexJob) Execute(ctx context.Context) error { + hasMissingDependencies, err := v.HasMissingDependencies(ctx) if err != nil { return err } @@ -85,7 +89,7 @@ func (v *vertexJob) Execute() error { v.numDropped.Inc() return errMissingVtxDependenciesOnAccept } - txs, err := v.vtx.Txs() + txs, err := v.vtx.Txs(ctx) if err != nil { return err } @@ -106,11 +110,13 @@ func (v *vertexJob) Execute() error { v.log.Trace("accepting vertex in bootstrapping", zap.Stringer("vtxID", v.vtx.ID()), ) - if err := v.vtx.Accept(); err != nil { + if err := v.vtx.Accept(ctx); err != nil { return fmt.Errorf("failed to accept vertex in bootstrapping: %w", err) } } return nil } -func (v *vertexJob) Bytes() []byte { return v.vtx.Bytes() } +func (v *vertexJob) Bytes() []byte { + return v.vtx.Bytes() +} diff --git a/avalanchego/snow/engine/avalanche/config.go b/avalanchego/snow/engine/avalanche/config.go index 8c716b63..1bac47f5 100644 --- a/avalanchego/snow/engine/avalanche/config.go +++ b/avalanchego/snow/engine/avalanche/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche @@ -15,7 +15,7 @@ import ( type Config struct { Ctx *snow.ConsensusContext common.AllGetsServer - VM vertex.DAGVM + VM vertex.LinearizableVM Manager vertex.Manager Sender common.Sender Validators validators.Set diff --git a/avalanchego/snow/engine/avalanche/config_test.go b/avalanchego/snow/engine/avalanche/config_test.go index c20ea448..c2bd205b 100644 --- a/avalanchego/snow/engine/avalanche/config_test.go +++ b/avalanchego/snow/engine/avalanche/config_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche @@ -13,6 +13,7 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/common/queue" + "github.com/ava-labs/avalanchego/snow/validators" ) func DefaultConfig() (common.Config, bootstrap.Config, Config) { @@ -34,7 +35,7 @@ func DefaultConfig() (common.Config, bootstrap.Config, Config) { VM: bootstrapConfig.VM, Manager: bootstrapConfig.Manager, Sender: bootstrapConfig.Sender, - Validators: bootstrapConfig.Validators, + Validators: validators.NewSet(), Params: avalanche.Parameters{ Parameters: snowball.Parameters{ K: 1, diff --git a/avalanchego/snow/engine/avalanche/engine.go b/avalanchego/snow/engine/avalanche/engine.go index 72c3a6bd..02f035be 100644 --- a/avalanchego/snow/engine/avalanche/engine.go +++ b/avalanchego/snow/engine/avalanche/engine.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche import ( + "context" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/avalanche" "github.com/ava-labs/avalanchego/snow/engine/common" @@ -15,5 +17,5 @@ type Engine interface { // GetVtx returns a vertex by its ID. // Returns an error if unknown. - GetVtx(vtxID ids.ID) (avalanche.Vertex, error) + GetVtx(ctx context.Context, vtxID ids.ID) (avalanche.Vertex, error) } diff --git a/avalanchego/snow/engine/avalanche/getter/getter.go b/avalanchego/snow/engine/avalanche/getter/getter.go index 8b04fb3c..cc777bf4 100644 --- a/avalanchego/snow/engine/avalanche/getter/getter.go +++ b/avalanchego/snow/engine/avalanche/getter/getter.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package getter import ( + "context" "time" "go.uber.org/zap" @@ -17,11 +18,12 @@ import ( "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/metric" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" ) // Get requests are always served, regardless node state (bootstrapping or normal operations). -var _ common.AllGetsServer = &getter{} +var _ common.AllGetsServer = (*getter)(nil) func New(storage vertex.Storage, commonCfg common.Config) (common.AllGetsServer, error) { gh := &getter{ @@ -36,7 +38,7 @@ func New(storage vertex.Storage, commonCfg common.Config) (common.AllGetsServer, "bs", "get_ancestors_vtxs", "vertices fetched in a call to GetAncestors", - commonCfg.Ctx.Registerer, + commonCfg.Ctx.AvalancheRegisterer, ) return gh, err } @@ -50,51 +52,51 @@ type getter struct { getAncestorsVtxs metric.Averager } -func (gh *getter) GetStateSummaryFrontier(nodeID ids.NodeID, requestID uint32) error { +func (gh *getter) GetStateSummaryFrontier(_ context.Context, nodeID ids.NodeID, requestID uint32) error { gh.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.GetStateSummaryFrontier), + zap.Stringer("messageOp", message.GetStateSummaryFrontierOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) return nil } -func (gh *getter) GetAcceptedStateSummary(nodeID ids.NodeID, requestID uint32, _ []uint64) error { +func (gh *getter) GetAcceptedStateSummary(_ context.Context, nodeID ids.NodeID, requestID uint32, _ []uint64) error { gh.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.GetAcceptedStateSummary), + zap.Stringer("messageOp", message.GetAcceptedStateSummaryOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) return nil } -func (gh *getter) GetAcceptedFrontier(validatorID ids.NodeID, requestID uint32) error { - acceptedFrontier := gh.storage.Edge() - gh.sender.SendAcceptedFrontier(validatorID, requestID, acceptedFrontier) +func (gh *getter) GetAcceptedFrontier(ctx context.Context, validatorID ids.NodeID, requestID uint32) error { + acceptedFrontier := gh.storage.Edge(ctx) + gh.sender.SendAcceptedFrontier(ctx, validatorID, requestID, acceptedFrontier) return nil } -func (gh *getter) GetAccepted(nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { +func (gh *getter) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { acceptedVtxIDs := make([]ids.ID, 0, len(containerIDs)) for _, vtxID := range containerIDs { - if vtx, err := gh.storage.GetVtx(vtxID); err == nil && vtx.Status() == choices.Accepted { + if vtx, err := gh.storage.GetVtx(ctx, vtxID); err == nil && vtx.Status() == choices.Accepted { acceptedVtxIDs = append(acceptedVtxIDs, vtxID) } } - gh.sender.SendAccepted(nodeID, requestID, acceptedVtxIDs) + gh.sender.SendAccepted(ctx, nodeID, requestID, acceptedVtxIDs) return nil } -func (gh *getter) GetAncestors(nodeID ids.NodeID, requestID uint32, vtxID ids.ID) error { +func (gh *getter) GetAncestors(ctx context.Context, nodeID ids.NodeID, requestID uint32, vtxID ids.ID) error { startTime := time.Now() gh.log.Verbo("called GetAncestors", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), zap.Stringer("vtxID", vtxID), ) - vertex, err := gh.storage.GetVtx(vtxID) + vertex, err := gh.storage.GetVtx(ctx, vtxID) if err != nil || vertex.Status() == choices.Unknown { gh.log.Verbo("dropping getAncestors") return nil // Don't have the requested vertex. Drop message. @@ -104,7 +106,7 @@ func (gh *getter) GetAncestors(nodeID ids.NodeID, requestID uint32, vtxID ids.ID queue[0] = vertex ancestorsBytesLen := 0 // length, in bytes, of vertex and its ancestors ancestorsBytes := make([][]byte, 0, gh.cfg.AncestorsMaxContainersSent) // vertex and its ancestors in BFS order - visited := ids.Set{} // IDs of vertices that have been in queue before + visited := set.Set[ids.ID]{} // IDs of vertices that have been in queue before visited.Add(vertex.ID()) for len(ancestorsBytes) < gh.cfg.AncestorsMaxContainersSent && len(queue) > 0 && time.Since(startTime) < gh.cfg.MaxTimeGetAncestors { @@ -113,12 +115,13 @@ func (gh *getter) GetAncestors(nodeID ids.NodeID, requestID uint32, vtxID ids.ID vtxBytes := vtx.Bytes() // Ensure response size isn't too large. Include wrappers.IntLen because the size of the message // is included with each container, and the size is repr. by an int. - if newLen := wrappers.IntLen + ancestorsBytesLen + len(vtxBytes); newLen < constants.MaxContainersLen { - ancestorsBytes = append(ancestorsBytes, vtxBytes) - ancestorsBytesLen = newLen - } else { // reached maximum response size + newLen := wrappers.IntLen + ancestorsBytesLen + len(vtxBytes) + if newLen > constants.MaxContainersLen { + // reached maximum response size break } + ancestorsBytes = append(ancestorsBytes, vtxBytes) + ancestorsBytesLen = newLen parents, err := vtx.Parents() if err != nil { return err @@ -135,14 +138,14 @@ func (gh *getter) GetAncestors(nodeID ids.NodeID, requestID uint32, vtxID ids.ID } gh.getAncestorsVtxs.Observe(float64(len(ancestorsBytes))) - gh.sender.SendAncestors(nodeID, requestID, ancestorsBytes) + gh.sender.SendAncestors(ctx, nodeID, requestID, ancestorsBytes) return nil } -func (gh *getter) Get(nodeID ids.NodeID, requestID uint32, vtxID ids.ID) error { +func (gh *getter) Get(ctx context.Context, nodeID ids.NodeID, requestID uint32, vtxID ids.ID) error { // If this engine has access to the requested vertex, provide it - if vtx, err := gh.storage.GetVtx(vtxID); err == nil { - gh.sender.SendPut(nodeID, requestID, vtx.Bytes()) + if vtx, err := gh.storage.GetVtx(ctx, vtxID); err == nil { + gh.sender.SendPut(ctx, nodeID, requestID, vtx.Bytes()) } return nil } diff --git a/avalanchego/snow/engine/avalanche/getter/getter_test.go b/avalanchego/snow/engine/avalanche/getter/getter_test.go index eb798399..613bb7b0 100644 --- a/avalanchego/snow/engine/avalanche/getter/getter_test.go +++ b/avalanchego/snow/engine/avalanche/getter/getter_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package getter import ( + "context" "errors" "testing" @@ -14,6 +15,7 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/set" ) var errUnknownVertex = errors.New("unknown vertex") @@ -21,7 +23,7 @@ var errUnknownVertex = errors.New("unknown vertex") func testSetup(t *testing.T) (*vertex.TestManager, *common.SenderTest, common.Config) { peers := validators.NewSet() peer := ids.GenerateTestNodeID() - if err := peers.AddWeight(peer, 1); err != nil { + if err := peers.Add(peer, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -30,20 +32,23 @@ func testSetup(t *testing.T) (*vertex.TestManager, *common.SenderTest, common.Co sender.CantSendGetAcceptedFrontier = false isBootstrapped := false - subnet := &common.SubnetTest{ - T: t, - IsBootstrappedF: func() bool { return isBootstrapped }, - BootstrappedF: func(ids.ID) { isBootstrapped = true }, + bootstrapTracker := &common.BootstrapTrackerTest{ + T: t, + IsBootstrappedF: func() bool { + return isBootstrapped + }, + BootstrappedF: func(ids.ID) { + isBootstrapped = true + }, } commonConfig := common.Config{ Ctx: snow.DefaultConsensusContextTest(), - Validators: peers, Beacons: peers, SampleK: peers.Len(), Alpha: peers.Weight()/2 + 1, Sender: sender, - Subnet: subnet, + BootstrapTracker: bootstrapTracker, Timer: &common.TimerTest{}, AncestorsMaxContainersSent: 2000, AncestorsMaxContainersReceived: 2000, @@ -72,7 +77,7 @@ func TestAcceptedFrontier(t *testing.T) { t.Fatal("Unexpected get handler") } - manager.EdgeF = func() []ids.ID { + manager.EdgeF = func(context.Context) []ids.ID { return []ids.ID{ vtxID0, vtxID1, @@ -80,15 +85,15 @@ func TestAcceptedFrontier(t *testing.T) { } var accepted []ids.ID - sender.SendAcceptedFrontierF = func(_ ids.NodeID, _ uint32, frontier []ids.ID) { + sender.SendAcceptedFrontierF = func(_ context.Context, _ ids.NodeID, _ uint32, frontier []ids.ID) { accepted = frontier } - if err := bs.GetAcceptedFrontier(ids.EmptyNodeID, 0); err != nil { + if err := bs.GetAcceptedFrontier(context.Background(), ids.EmptyNodeID, 0); err != nil { t.Fatal(err) } - acceptedSet := ids.Set{} + acceptedSet := set.Set[ids.ID]{} acceptedSet.Add(accepted...) manager.EdgeF = nil @@ -131,7 +136,7 @@ func TestFilterAccepted(t *testing.T) { vtxIDs := []ids.ID{vtxID0, vtxID1, vtxID2} - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { switch vtxID { case vtxID0: return vtx0, nil @@ -145,15 +150,15 @@ func TestFilterAccepted(t *testing.T) { } var accepted []ids.ID - sender.SendAcceptedF = func(_ ids.NodeID, _ uint32, frontier []ids.ID) { + sender.SendAcceptedF = func(_ context.Context, _ ids.NodeID, _ uint32, frontier []ids.ID) { accepted = frontier } - if err := bs.GetAccepted(ids.EmptyNodeID, 0, vtxIDs); err != nil { + if err := bs.GetAccepted(context.Background(), ids.EmptyNodeID, 0, vtxIDs); err != nil { t.Fatal(err) } - acceptedSet := ids.Set{} + acceptedSet := set.Set[ids.ID]{} acceptedSet.Add(accepted...) manager.GetVtxF = nil diff --git a/avalanchego/snow/engine/avalanche/issuer.go b/avalanchego/snow/engine/avalanche/issuer.go index 460a0170..b777e4df 100644 --- a/avalanchego/snow/engine/avalanche/issuer.go +++ b/avalanchego/snow/engine/avalanche/issuer.go @@ -1,15 +1,19 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche import ( + "context" + "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/avalanche" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/bag" + "github.com/ava-labs/avalanchego/utils/set" ) // issuer issues [vtx] into consensus after its dependencies are met. @@ -17,34 +21,34 @@ type issuer struct { t *Transitive vtx avalanche.Vertex issued, abandoned bool - vtxDeps, txDeps ids.Set + vtxDeps, txDeps set.Set[ids.ID] } // Register that a vertex we were waiting on has been issued to consensus. -func (i *issuer) FulfillVtx(id ids.ID) { +func (i *issuer) FulfillVtx(ctx context.Context, id ids.ID) { i.vtxDeps.Remove(id) - i.Update() + i.Update(ctx) } // Register that a transaction we were waiting on has been issued to consensus. -func (i *issuer) FulfillTx(id ids.ID) { +func (i *issuer) FulfillTx(ctx context.Context, id ids.ID) { i.txDeps.Remove(id) - i.Update() + i.Update(ctx) } // Abandon this attempt to issue -func (i *issuer) Abandon() { +func (i *issuer) Abandon(ctx context.Context) { if !i.abandoned { vtxID := i.vtx.ID() i.t.pending.Remove(vtxID) i.abandoned = true - i.t.vtxBlocked.Abandon(vtxID) // Inform vertices waiting on this vtx that it won't be issued + i.t.vtxBlocked.Abandon(ctx, vtxID) // Inform vertices waiting on this vtx that it won't be issued i.t.metrics.blockerVtxs.Set(float64(i.t.vtxBlocked.Len())) } } // Issue the poll when all dependencies are met -func (i *issuer) Update() { +func (i *issuer) Update(ctx context.Context) { if i.abandoned || i.issued || i.vtxDeps.Len() != 0 || i.txDeps.Len() != 0 || i.t.Consensus.VertexIssued(i.vtx) || i.t.errs.Errored() { return } @@ -55,7 +59,7 @@ func (i *issuer) Update() { i.issued = true // check stop vertex validity - err := i.vtx.Verify() + err := i.vtx.Verify(ctx) if err != nil { if i.vtx.HasWhitelist() { // do not update "i.t.errs" since it's only used for critical errors @@ -73,27 +77,27 @@ func (i *issuer) Update() { ) } - i.t.vtxBlocked.Abandon(vtxID) + i.t.vtxBlocked.Abandon(ctx, vtxID) return } i.t.pending.Remove(vtxID) // Remove from set of vertices waiting to be issued. // Make sure the transactions in this vertex are valid - txs, err := i.vtx.Txs() + txs, err := i.vtx.Txs(ctx) if err != nil { i.t.errs.Add(err) return } validTxs := make([]snowstorm.Tx, 0, len(txs)) for _, tx := range txs { - if err := tx.Verify(); err != nil { + if err := tx.Verify(ctx); err != nil { txID := tx.ID() i.t.Ctx.Log.Debug("transaction verification failed", zap.Stringer("txID", txID), zap.Error(err), ) - i.t.txBlocked.Abandon(txID) + i.t.txBlocked.Abandon(ctx, txID) } else { validTxs = append(validTxs, tx) } @@ -106,10 +110,10 @@ func (i *issuer) Update() { zap.String("reason", "transaction verification failed"), zap.Stringer("vtxID", vtxID), ) - if _, err := i.t.batch(validTxs, batchOption{}); err != nil { + if _, err := i.t.batch(ctx, validTxs, batchOption{}); err != nil { i.t.errs.Add(err) } - i.t.vtxBlocked.Abandon(vtxID) + i.t.vtxBlocked.Abandon(ctx, vtxID) i.t.metrics.blockerVtxs.Set(float64(i.t.vtxBlocked.Len())) return } @@ -119,14 +123,13 @@ func (i *issuer) Update() { ) // Add this vertex to consensus. - if err := i.t.Consensus.Add(i.vtx); err != nil { + if err := i.t.Consensus.Add(ctx, i.vtx); err != nil { i.t.errs.Add(err) return } // Issue a poll for this vertex. - p := i.t.Consensus.Parameters() - vdrs, err := i.t.Validators.Sample(p.K) // Validators to sample + vdrIDs, err := i.t.Validators.Sample(i.t.Params.K) // Validators to sample if err != nil { i.t.Ctx.Log.Error("dropped query", zap.String("reason", "insufficient number of validators"), @@ -134,10 +137,8 @@ func (i *issuer) Update() { ) } - vdrBag := ids.NodeIDBag{} // Validators to sample repr. as a set - for _, vdr := range vdrs { - vdrBag.Add(vdr.ID()) - } + vdrBag := bag.Bag[ids.NodeID]{} // Validators to sample repr. as a set + vdrBag.Add(vdrIDs...) i.t.RequestID++ if err == nil && i.t.polls.Add(i.t.RequestID, vdrBag) { @@ -146,6 +147,7 @@ func (i *issuer) Update() { numPushTo = i.t.Params.MixedQueryNumPushNonVdr } common.SendMixedQuery( + ctx, i.t.Sender, vdrBag.List(), // Note that this doesn't contain duplicates; length may be < k numPushTo, @@ -156,9 +158,9 @@ func (i *issuer) Update() { } // Notify vertices waiting on this one that it (and its transactions) have been issued. - i.t.vtxBlocked.Fulfill(vtxID) + i.t.vtxBlocked.Fulfill(ctx, vtxID) for _, tx := range txs { - i.t.txBlocked.Fulfill(tx.ID()) + i.t.txBlocked.Fulfill(ctx, tx.ID()) } i.t.metrics.blockerTxs.Set(float64(i.t.txBlocked.Len())) i.t.metrics.blockerVtxs.Set(float64(i.t.vtxBlocked.Len())) @@ -171,19 +173,41 @@ func (i *issuer) Update() { } // Issue a repoll - i.t.repoll() + i.t.repoll(ctx) } type vtxIssuer struct{ i *issuer } -func (vi *vtxIssuer) Dependencies() ids.Set { return vi.i.vtxDeps } -func (vi *vtxIssuer) Fulfill(id ids.ID) { vi.i.FulfillVtx(id) } -func (vi *vtxIssuer) Abandon(ids.ID) { vi.i.Abandon() } -func (vi *vtxIssuer) Update() { vi.i.Update() } +func (vi *vtxIssuer) Dependencies() set.Set[ids.ID] { + return vi.i.vtxDeps +} + +func (vi *vtxIssuer) Fulfill(ctx context.Context, id ids.ID) { + vi.i.FulfillVtx(ctx, id) +} + +func (vi *vtxIssuer) Abandon(ctx context.Context, _ ids.ID) { + vi.i.Abandon(ctx) +} + +func (vi *vtxIssuer) Update(ctx context.Context) { + vi.i.Update(ctx) +} type txIssuer struct{ i *issuer } -func (ti *txIssuer) Dependencies() ids.Set { return ti.i.txDeps } -func (ti *txIssuer) Fulfill(id ids.ID) { ti.i.FulfillTx(id) } -func (ti *txIssuer) Abandon(ids.ID) { ti.i.Abandon() } -func (ti *txIssuer) Update() { ti.i.Update() } +func (ti *txIssuer) Dependencies() set.Set[ids.ID] { + return ti.i.txDeps +} + +func (ti *txIssuer) Fulfill(ctx context.Context, id ids.ID) { + ti.i.FulfillTx(ctx, id) +} + +func (ti *txIssuer) Abandon(ctx context.Context, _ ids.ID) { + ti.i.Abandon(ctx) +} + +func (ti *txIssuer) Update(ctx context.Context) { + ti.i.Update(ctx) +} diff --git a/avalanchego/snow/engine/avalanche/metrics.go b/avalanchego/snow/engine/avalanche/metrics.go index 939af937..cae97abc 100644 --- a/avalanchego/snow/engine/avalanche/metrics.go +++ b/avalanchego/snow/engine/avalanche/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche diff --git a/avalanchego/snow/engine/avalanche/mocks/engine.go b/avalanchego/snow/engine/avalanche/mocks/engine.go deleted file mode 100644 index 291f35ca..00000000 --- a/avalanchego/snow/engine/avalanche/mocks/engine.go +++ /dev/null @@ -1,580 +0,0 @@ -// Code generated by mockery v2.12.1. DO NOT EDIT. - -package mocks - -import ( - consensusavalanche "github.com/ava-labs/avalanchego/snow/consensus/avalanche" - common "github.com/ava-labs/avalanchego/snow/engine/common" - - ids "github.com/ava-labs/avalanchego/ids" - - mock "github.com/stretchr/testify/mock" - - snow "github.com/ava-labs/avalanchego/snow" - - testing "testing" - - time "time" - - version "github.com/ava-labs/avalanchego/version" -) - -// Engine is an autogenerated mock type for the Engine type -type Engine struct { - mock.Mock -} - -// Accepted provides a mock function with given fields: validatorID, requestID, containerIDs -func (_m *Engine) Accepted(validatorID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { - ret := _m.Called(validatorID, requestID, containerIDs) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, []ids.ID) error); ok { - r0 = rf(validatorID, requestID, containerIDs) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AcceptedFrontier provides a mock function with given fields: validatorID, requestID, containerIDs -func (_m *Engine) AcceptedFrontier(validatorID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { - ret := _m.Called(validatorID, requestID, containerIDs) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, []ids.ID) error); ok { - r0 = rf(validatorID, requestID, containerIDs) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AcceptedStateSummary provides a mock function with given fields: validatorID, requestID, summaryIDs -func (_m *Engine) AcceptedStateSummary(validatorID ids.NodeID, requestID uint32, summaryIDs []ids.ID) error { - ret := _m.Called(validatorID, requestID, summaryIDs) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, []ids.ID) error); ok { - r0 = rf(validatorID, requestID, summaryIDs) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Ancestors provides a mock function with given fields: validatorID, requestID, containers -func (_m *Engine) Ancestors(validatorID ids.NodeID, requestID uint32, containers [][]byte) error { - ret := _m.Called(validatorID, requestID, containers) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, [][]byte) error); ok { - r0 = rf(validatorID, requestID, containers) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AppGossip provides a mock function with given fields: nodeID, msg -func (_m *Engine) AppGossip(nodeID ids.NodeID, msg []byte) error { - ret := _m.Called(nodeID, msg) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, []byte) error); ok { - r0 = rf(nodeID, msg) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AppRequest provides a mock function with given fields: nodeID, requestID, deadline, request -func (_m *Engine) AppRequest(nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { - ret := _m.Called(nodeID, requestID, deadline, request) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, time.Time, []byte) error); ok { - r0 = rf(nodeID, requestID, deadline, request) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AppRequestFailed provides a mock function with given fields: nodeID, requestID -func (_m *Engine) AppRequestFailed(nodeID ids.NodeID, requestID uint32) error { - ret := _m.Called(nodeID, requestID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32) error); ok { - r0 = rf(nodeID, requestID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AppResponse provides a mock function with given fields: nodeID, requestID, response -func (_m *Engine) AppResponse(nodeID ids.NodeID, requestID uint32, response []byte) error { - ret := _m.Called(nodeID, requestID, response) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, []byte) error); ok { - r0 = rf(nodeID, requestID, response) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Chits provides a mock function with given fields: validatorID, requestID, containerIDs -func (_m *Engine) Chits(validatorID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { - ret := _m.Called(validatorID, requestID, containerIDs) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, []ids.ID) error); ok { - r0 = rf(validatorID, requestID, containerIDs) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Connected provides a mock function with given fields: id, nodeVersion -func (_m *Engine) Connected(id ids.NodeID, nodeVersion *version.Application) error { - ret := _m.Called(id, nodeVersion) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, *version.Application) error); ok { - r0 = rf(id, nodeVersion) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Context provides a mock function with given fields: -func (_m *Engine) Context() *snow.ConsensusContext { - ret := _m.Called() - - var r0 *snow.ConsensusContext - if rf, ok := ret.Get(0).(func() *snow.ConsensusContext); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*snow.ConsensusContext) - } - } - - return r0 -} - -// Disconnected provides a mock function with given fields: id -func (_m *Engine) Disconnected(id ids.NodeID) error { - ret := _m.Called(id) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID) error); ok { - r0 = rf(id) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Get provides a mock function with given fields: validatorID, requestID, containerID -func (_m *Engine) Get(validatorID ids.NodeID, requestID uint32, containerID ids.ID) error { - ret := _m.Called(validatorID, requestID, containerID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, ids.ID) error); ok { - r0 = rf(validatorID, requestID, containerID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetAccepted provides a mock function with given fields: validatorID, requestID, containerIDs -func (_m *Engine) GetAccepted(validatorID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { - ret := _m.Called(validatorID, requestID, containerIDs) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, []ids.ID) error); ok { - r0 = rf(validatorID, requestID, containerIDs) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetAcceptedFailed provides a mock function with given fields: validatorID, requestID -func (_m *Engine) GetAcceptedFailed(validatorID ids.NodeID, requestID uint32) error { - ret := _m.Called(validatorID, requestID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32) error); ok { - r0 = rf(validatorID, requestID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetAcceptedFrontier provides a mock function with given fields: validatorID, requestID -func (_m *Engine) GetAcceptedFrontier(validatorID ids.NodeID, requestID uint32) error { - ret := _m.Called(validatorID, requestID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32) error); ok { - r0 = rf(validatorID, requestID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetAcceptedFrontierFailed provides a mock function with given fields: validatorID, requestID -func (_m *Engine) GetAcceptedFrontierFailed(validatorID ids.NodeID, requestID uint32) error { - ret := _m.Called(validatorID, requestID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32) error); ok { - r0 = rf(validatorID, requestID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetAcceptedStateSummary provides a mock function with given fields: validatorID, requestID, keys -func (_m *Engine) GetAcceptedStateSummary(validatorID ids.NodeID, requestID uint32, keys []uint64) error { - ret := _m.Called(validatorID, requestID, keys) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, []uint64) error); ok { - r0 = rf(validatorID, requestID, keys) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetAcceptedStateSummaryFailed provides a mock function with given fields: validatorID, requestID -func (_m *Engine) GetAcceptedStateSummaryFailed(validatorID ids.NodeID, requestID uint32) error { - ret := _m.Called(validatorID, requestID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32) error); ok { - r0 = rf(validatorID, requestID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetAncestors provides a mock function with given fields: validatorID, requestID, containerID -func (_m *Engine) GetAncestors(validatorID ids.NodeID, requestID uint32, containerID ids.ID) error { - ret := _m.Called(validatorID, requestID, containerID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, ids.ID) error); ok { - r0 = rf(validatorID, requestID, containerID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetAncestorsFailed provides a mock function with given fields: validatorID, requestID -func (_m *Engine) GetAncestorsFailed(validatorID ids.NodeID, requestID uint32) error { - ret := _m.Called(validatorID, requestID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32) error); ok { - r0 = rf(validatorID, requestID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetFailed provides a mock function with given fields: validatorID, requestID -func (_m *Engine) GetFailed(validatorID ids.NodeID, requestID uint32) error { - ret := _m.Called(validatorID, requestID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32) error); ok { - r0 = rf(validatorID, requestID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetStateSummaryFrontier provides a mock function with given fields: validatorID, requestID -func (_m *Engine) GetStateSummaryFrontier(validatorID ids.NodeID, requestID uint32) error { - ret := _m.Called(validatorID, requestID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32) error); ok { - r0 = rf(validatorID, requestID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetStateSummaryFrontierFailed provides a mock function with given fields: validatorID, requestID -func (_m *Engine) GetStateSummaryFrontierFailed(validatorID ids.NodeID, requestID uint32) error { - ret := _m.Called(validatorID, requestID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32) error); ok { - r0 = rf(validatorID, requestID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetVM provides a mock function with given fields: -func (_m *Engine) GetVM() common.VM { - ret := _m.Called() - - var r0 common.VM - if rf, ok := ret.Get(0).(func() common.VM); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(common.VM) - } - } - - return r0 -} - -// GetVtx provides a mock function with given fields: vtxID -func (_m *Engine) GetVtx(vtxID ids.ID) (consensusavalanche.Vertex, error) { - ret := _m.Called(vtxID) - - var r0 consensusavalanche.Vertex - if rf, ok := ret.Get(0).(func(ids.ID) consensusavalanche.Vertex); ok { - r0 = rf(vtxID) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(consensusavalanche.Vertex) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(ids.ID) error); ok { - r1 = rf(vtxID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Gossip provides a mock function with given fields: -func (_m *Engine) Gossip() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Halt provides a mock function with given fields: -func (_m *Engine) Halt() { - _m.Called() -} - -// HealthCheck provides a mock function with given fields: -func (_m *Engine) HealthCheck() (interface{}, error) { - ret := _m.Called() - - var r0 interface{} - if rf, ok := ret.Get(0).(func() interface{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(interface{}) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Notify provides a mock function with given fields: _a0 -func (_m *Engine) Notify(_a0 common.Message) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Message) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// PullQuery provides a mock function with given fields: validatorID, requestID, containerID -func (_m *Engine) PullQuery(validatorID ids.NodeID, requestID uint32, containerID ids.ID) error { - ret := _m.Called(validatorID, requestID, containerID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, ids.ID) error); ok { - r0 = rf(validatorID, requestID, containerID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// PushQuery provides a mock function with given fields: validatorID, requestID, container -func (_m *Engine) PushQuery(validatorID ids.NodeID, requestID uint32, container []byte) error { - ret := _m.Called(validatorID, requestID, container) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, []byte) error); ok { - r0 = rf(validatorID, requestID, container) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Put provides a mock function with given fields: validatorID, requestID, container -func (_m *Engine) Put(validatorID ids.NodeID, requestID uint32, container []byte) error { - ret := _m.Called(validatorID, requestID, container) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, []byte) error); ok { - r0 = rf(validatorID, requestID, container) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// QueryFailed provides a mock function with given fields: validatorID, requestID -func (_m *Engine) QueryFailed(validatorID ids.NodeID, requestID uint32) error { - ret := _m.Called(validatorID, requestID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32) error); ok { - r0 = rf(validatorID, requestID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Shutdown provides a mock function with given fields: -func (_m *Engine) Shutdown() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Start provides a mock function with given fields: startReqID -func (_m *Engine) Start(startReqID uint32) error { - ret := _m.Called(startReqID) - - var r0 error - if rf, ok := ret.Get(0).(func(uint32) error); ok { - r0 = rf(startReqID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StateSummaryFrontier provides a mock function with given fields: validatorID, requestID, summary -func (_m *Engine) StateSummaryFrontier(validatorID ids.NodeID, requestID uint32, summary []byte) error { - ret := _m.Called(validatorID, requestID, summary) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, []byte) error); ok { - r0 = rf(validatorID, requestID, summary) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Timeout provides a mock function with given fields: -func (_m *Engine) Timeout() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// NewEngine creates a new instance of Engine. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewEngine(t testing.TB) *Engine { - mock := &Engine{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/avalanchego/snow/engine/avalanche/state/prefixed_state.go b/avalanchego/snow/engine/avalanche/state/prefixed_state.go index 13a328ea..5fac890b 100644 --- a/avalanchego/snow/engine/avalanche/state/prefixed_state.go +++ b/avalanchego/snow/engine/avalanche/state/prefixed_state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -21,28 +21,29 @@ var uniqueEdgeID = ids.Empty.Prefix(edgeID) type prefixedState struct { state *state - vtx, status cache.Cacher - uniqueVtx cache.Deduplicator + vtx, status cache.Cacher[ids.ID, ids.ID] + uniqueVtx cache.Deduplicator[ids.ID, *uniqueVertex] } func newPrefixedState(state *state, idCacheSizes int) *prefixedState { return &prefixedState{ state: state, - vtx: &cache.LRU{Size: idCacheSizes}, - status: &cache.LRU{Size: idCacheSizes}, - uniqueVtx: &cache.EvictableLRU{Size: idCacheSizes}, + vtx: &cache.LRU[ids.ID, ids.ID]{Size: idCacheSizes}, + status: &cache.LRU[ids.ID, ids.ID]{Size: idCacheSizes}, + uniqueVtx: &cache.EvictableLRU[ids.ID, *uniqueVertex]{Size: idCacheSizes}, } } func (s *prefixedState) UniqueVertex(vtx *uniqueVertex) *uniqueVertex { - return s.uniqueVtx.Deduplicate(vtx).(*uniqueVertex) + return s.uniqueVtx.Deduplicate(vtx) } func (s *prefixedState) Vertex(id ids.ID) vertex.StatelessVertex { - var vID ids.ID - if cachedVtxIDIntf, found := s.vtx.Get(id); found { - vID = cachedVtxIDIntf.(ids.ID) - } else { + var ( + vID ids.ID + ok bool + ) + if vID, ok = s.vtx.Get(id); !ok { vID = id.Prefix(vtxID) s.vtx.Put(id, vID) } @@ -51,11 +52,12 @@ func (s *prefixedState) Vertex(id ids.ID) vertex.StatelessVertex { } func (s *prefixedState) SetVertex(vtx vertex.StatelessVertex) error { - rawVertexID := vtx.ID() - var vID ids.ID - if cachedVtxIDIntf, found := s.vtx.Get(rawVertexID); found { - vID = cachedVtxIDIntf.(ids.ID) - } else { + var ( + rawVertexID = vtx.ID() + vID ids.ID + ok bool + ) + if vID, ok = s.vtx.Get(rawVertexID); !ok { vID = rawVertexID.Prefix(vtxID) s.vtx.Put(rawVertexID, vID) } @@ -64,10 +66,11 @@ func (s *prefixedState) SetVertex(vtx vertex.StatelessVertex) error { } func (s *prefixedState) Status(id ids.ID) choices.Status { - var sID ids.ID - if cachedStatusIDIntf, found := s.status.Get(id); found { - sID = cachedStatusIDIntf.(ids.ID) - } else { + var ( + sID ids.ID + ok bool + ) + if sID, ok = s.status.Get(id); !ok { sID = id.Prefix(vtxStatusID) s.status.Put(id, sID) } @@ -76,10 +79,11 @@ func (s *prefixedState) Status(id ids.ID) choices.Status { } func (s *prefixedState) SetStatus(id ids.ID, status choices.Status) error { - var sID ids.ID - if cachedStatusIDIntf, found := s.status.Get(id); found { - sID = cachedStatusIDIntf.(ids.ID) - } else { + var ( + sID ids.ID + ok bool + ) + if sID, ok = s.status.Get(id); !ok { sID = id.Prefix(vtxStatusID) s.status.Put(id, sID) } @@ -87,7 +91,9 @@ func (s *prefixedState) SetStatus(id ids.ID, status choices.Status) error { return s.state.SetStatus(sID, status) } -func (s *prefixedState) Edge() []ids.ID { return s.state.Edge(uniqueEdgeID) } +func (s *prefixedState) Edge() []ids.ID { + return s.state.Edge(uniqueEdgeID) +} func (s *prefixedState) SetEdge(frontier []ids.ID) error { return s.state.SetEdge(uniqueEdgeID, frontier) diff --git a/avalanchego/snow/engine/avalanche/state/serializer.go b/avalanchego/snow/engine/avalanche/state/serializer.go index 6468b073..274f3ea0 100644 --- a/avalanchego/snow/engine/avalanche/state/serializer.go +++ b/avalanchego/snow/engine/avalanche/state/serializer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // Package state manages the meta-data required by consensus for an avalanche @@ -6,6 +6,7 @@ package state import ( + "context" "errors" "time" @@ -19,6 +20,7 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/set" ) const ( @@ -31,27 +33,27 @@ var ( errWrongChainID = errors.New("wrong ChainID in vertex") ) -var _ vertex.Manager = &Serializer{} +var _ vertex.Manager = (*Serializer)(nil) // Serializer manages the state of multiple vertices type Serializer struct { SerializerConfig versionDB *versiondb.Database state *prefixedState - edge ids.Set + edge set.Set[ids.ID] } type SerializerConfig struct { - ChainID ids.ID - VM vertex.DAGVM - DB database.Database - Log logging.Logger - XChainMigrationTime time.Time + ChainID ids.ID + VM vertex.DAGVM + DB database.Database + Log logging.Logger + CortinaTime time.Time } func NewSerializer(config SerializerConfig) vertex.Manager { versionDB := versiondb.New(config.DB) - dbCache := &cache.LRU{Size: dbCacheSize} + dbCache := &cache.LRU[ids.ID, any]{Size: dbCacheSize} s := Serializer{ SerializerConfig: config, versionDB: versionDB, @@ -70,19 +72,27 @@ func NewSerializer(config SerializerConfig) vertex.Manager { return &s } -func (s *Serializer) ParseVtx(b []byte) (avalanche.Vertex, error) { - return newUniqueVertex(s, b) +func (s *Serializer) ParseVtx(ctx context.Context, b []byte) (avalanche.Vertex, error) { + return newUniqueVertex(ctx, s, b) } -func (s *Serializer) BuildVtx(parentIDs []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) { - return s.buildVtx(parentIDs, txs, false) +func (s *Serializer) BuildVtx( + ctx context.Context, + parentIDs []ids.ID, + txs []snowstorm.Tx, +) (avalanche.Vertex, error) { + return s.buildVtx(ctx, parentIDs, txs, false) } -func (s *Serializer) BuildStopVtx(parentIDs []ids.ID) (avalanche.Vertex, error) { - return s.buildVtx(parentIDs, nil, true) +func (s *Serializer) BuildStopVtx( + ctx context.Context, + parentIDs []ids.ID, +) (avalanche.Vertex, error) { + return s.buildVtx(ctx, parentIDs, nil, true) } func (s *Serializer) buildVtx( + ctx context.Context, parentIDs []ids.ID, txs []snowstorm.Tx, stopVtx bool, @@ -98,7 +108,7 @@ func (s *Serializer) buildVtx( if err != nil { return nil, err } - height = math.Max64(height, childHeight) + height = math.Max(height, childHeight) } var ( @@ -133,14 +143,16 @@ func (s *Serializer) buildVtx( } // setVertex handles the case where this vertex already exists even // though we just made it - return uVtx, uVtx.setVertex(vtx) + return uVtx, uVtx.setVertex(ctx, vtx) } -func (s *Serializer) GetVtx(vtxID ids.ID) (avalanche.Vertex, error) { +func (s *Serializer) GetVtx(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { return s.getUniqueVertex(vtxID) } -func (s *Serializer) Edge() []ids.ID { return s.edge.List() } +func (s *Serializer) Edge(context.Context) []ids.ID { + return s.edge.List() +} func (s *Serializer) parseVertex(b []byte) (vertex.StatelessVertex, error) { vtx, err := vertex.Parse(b) @@ -164,8 +176,8 @@ func (s *Serializer) getUniqueVertex(vtxID ids.ID) (*uniqueVertex, error) { return vtx, nil } -func (s *Serializer) StopVertexAccepted() (bool, error) { - edge := s.Edge() +func (s *Serializer) StopVertexAccepted(ctx context.Context) (bool, error) { + edge := s.Edge(ctx) if len(edge) != 1 { return false, nil } diff --git a/avalanchego/snow/engine/avalanche/state/state.go b/avalanchego/snow/engine/avalanche/state/state.go index 82c0aaeb..54bb727c 100644 --- a/avalanchego/snow/engine/avalanche/state/state.go +++ b/avalanchego/snow/engine/avalanche/state/state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -20,7 +20,7 @@ type state struct { serializer *Serializer log logging.Logger - dbCache cache.Cacher + dbCache cache.Cacher[ids.ID, any] db database.Database } @@ -28,18 +28,13 @@ type state struct { // Returns nil if it's not found. // TODO this should return an error func (s *state) Vertex(id ids.ID) vertex.StatelessVertex { - var ( - vtx vertex.StatelessVertex - bytes []byte - err error - ) - if vtxIntf, found := s.dbCache.Get(id); found { - vtx, _ = vtxIntf.(vertex.StatelessVertex) + vtx, _ := vtxIntf.(vertex.StatelessVertex) return vtx } - if bytes, err = s.db.Get(id[:]); err != nil { + bytes, err := s.db.Get(id[:]) + if err != nil { s.log.Verbo("failed to get vertex from database", zap.Binary("key", id[:]), zap.Error(err), @@ -48,7 +43,8 @@ func (s *state) Vertex(id ids.ID) vertex.StatelessVertex { return nil } - if vtx, err = s.serializer.parseVertex(bytes); err != nil { + vtx, err := s.serializer.parseVertex(bytes) + if err != nil { s.log.Error("failed parsing saved vertex", zap.Binary("key", id[:]), zap.Binary("vertex", bytes), diff --git a/avalanchego/snow/engine/avalanche/state/unique_vertex.go b/avalanchego/snow/engine/avalanche/state/unique_vertex.go index 791a9df7..e3a41ba4 100644 --- a/avalanchego/snow/engine/avalanche/state/unique_vertex.go +++ b/avalanchego/snow/engine/avalanche/state/unique_vertex.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state import ( + "context" "errors" "fmt" "strings" @@ -17,11 +18,12 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/set" ) var ( - _ cache.Evictable = &uniqueVertex{} - _ avalanche.Vertex = &uniqueVertex{} + _ cache.Evictable[ids.ID] = (*uniqueVertex)(nil) + _ avalanche.Vertex = (*uniqueVertex)(nil) ) // uniqueVertex acts as a cache for vertices in the database. @@ -41,7 +43,7 @@ type uniqueVertex struct { // newUniqueVertex returns a uniqueVertex instance from [b] by checking the cache // and then parsing the vertex bytes on a cache miss. -func newUniqueVertex(s *Serializer, b []byte) (*uniqueVertex, error) { +func newUniqueVertex(ctx context.Context, s *Serializer, b []byte) (*uniqueVertex, error) { vtx := &uniqueVertex{ id: hashing.ComputeHash256Array(b), serializer: s, @@ -65,7 +67,7 @@ func newUniqueVertex(s *Serializer, b []byte) (*uniqueVertex, error) { unparsedTxs := innerVertex.Txs() txs := make([]snowstorm.Tx, len(unparsedTxs)) for i, txBytes := range unparsedTxs { - tx, err := vtx.serializer.VM.ParseTx(txBytes) + tx, err := vtx.serializer.VM.ParseTx(ctx, txBytes) if err != nil { return nil, err } @@ -130,7 +132,7 @@ func (vtx *uniqueVertex) Evict() { } } -func (vtx *uniqueVertex) setVertex(innerVtx vertex.StatelessVertex) error { +func (vtx *uniqueVertex) setVertex(ctx context.Context, innerVtx vertex.StatelessVertex) error { vtx.shallowRefresh() vtx.v.vtx = innerVtx @@ -138,7 +140,7 @@ func (vtx *uniqueVertex) setVertex(innerVtx vertex.StatelessVertex) error { return nil } - if _, err := vtx.Txs(); err != nil { + if _, err := vtx.Txs(ctx); err != nil { return err } @@ -165,10 +167,15 @@ func (vtx *uniqueVertex) setStatus(status choices.Status) error { return vtx.serializer.state.SetStatus(vtx.ID(), status) } -func (vtx *uniqueVertex) ID() ids.ID { return vtx.id } -func (vtx *uniqueVertex) Key() interface{} { return vtx.id } +func (vtx *uniqueVertex) ID() ids.ID { + return vtx.id +} + +func (vtx *uniqueVertex) Key() ids.ID { + return vtx.id +} -func (vtx *uniqueVertex) Accept() error { +func (vtx *uniqueVertex) Accept(ctx context.Context) error { if err := vtx.setStatus(choices.Accepted); err != nil { return err } @@ -183,7 +190,7 @@ func (vtx *uniqueVertex) Accept() error { vtx.serializer.edge.Remove(parent.ID()) } - if err := vtx.serializer.state.SetEdge(vtx.serializer.Edge()); err != nil { + if err := vtx.serializer.state.SetEdge(vtx.serializer.Edge(ctx)); err != nil { return fmt.Errorf("failed to set edge while accepting vertex %s due to %w", vtx.id, err) } @@ -194,7 +201,7 @@ func (vtx *uniqueVertex) Accept() error { return vtx.serializer.versionDB.Commit() } -func (vtx *uniqueVertex) Reject() error { +func (vtx *uniqueVertex) Reject(context.Context) error { if err := vtx.setStatus(choices.Rejected); err != nil { return err } @@ -209,7 +216,10 @@ func (vtx *uniqueVertex) Reject() error { // TODO: run performance test to see if shallow refreshing // (which will mean that refresh must be called in Bytes and Verify) // improves performance -func (vtx *uniqueVertex) Status() choices.Status { vtx.refresh(); return vtx.v.status } +func (vtx *uniqueVertex) Status() choices.Status { + vtx.refresh() + return vtx.v.status +} func (vtx *uniqueVertex) Parents() ([]avalanche.Vertex, error) { vtx.refresh() @@ -242,7 +252,7 @@ var ( // "uniqueVertex" itself implements "Verify" regardless of whether the underlying vertex // is stop vertex or not. Called before issuing the vertex to the consensus. // No vertex should ever be able to refer to a stop vertex in its transitive closure. -func (vtx *uniqueVertex) Verify() error { +func (vtx *uniqueVertex) Verify(ctx context.Context) error { // first verify the underlying stateless vertex if err := vtx.v.vtx.Verify(); err != nil { return err @@ -254,7 +264,7 @@ func (vtx *uniqueVertex) Verify() error { if vtx.time != nil { now = vtx.time() } - allowed := vtx.serializer.XChainMigrationTime + allowed := vtx.serializer.CortinaTime if now.Before(allowed) { return errStopVertexNotAllowedTimestamp } @@ -262,7 +272,7 @@ func (vtx *uniqueVertex) Verify() error { // MUST error if stop vertex has already been accepted (can't be accepted twice) // regardless of whether the underlying vertex is stop vertex or not - stopVtxAccepted, err := vtx.serializer.StopVertexAccepted() + stopVtxAccepted, err := vtx.serializer.StopVertexAccepted(ctx) if err != nil { return err } @@ -294,11 +304,11 @@ func (vtx *uniqueVertex) Verify() error { // 1. check the edge of the transitive paths refers to the accepted frontier // 2. check dependencies of all txs must be subset of transitive paths queue := []avalanche.Vertex{vtx} - visitedVtx := ids.NewSet(0) + visitedVtx := set.NewSet[ids.ID](0) - acceptedFrontier := ids.NewSet(0) - transitivePaths := ids.NewSet(0) - dependencies := ids.NewSet(0) + acceptedFrontier := set.NewSet[ids.ID](0) + transitivePaths := set.NewSet[ids.ID](0) + dependencies := set.NewSet[ids.ID](0) for len(queue) > 0 { // perform BFS cur := queue[0] queue = queue[1:] @@ -319,7 +329,7 @@ func (vtx *uniqueVertex) Verify() error { visitedVtx.Add(curID) transitivePaths.Add(curID) - txs, err := cur.Txs() + txs, err := cur.Txs(ctx) if err != nil { return err } @@ -344,8 +354,8 @@ func (vtx *uniqueVertex) Verify() error { queue = append(queue, parents...) } - acceptedEdges := ids.NewSet(0) - acceptedEdges.Add(vtx.serializer.Edge()...) + acceptedEdges := set.NewSet[ids.ID](0) + acceptedEdges.Add(vtx.serializer.Edge(ctx)...) // stop vertex should be able to reach all IDs // that are returned by the "Edge" @@ -369,7 +379,7 @@ func (vtx *uniqueVertex) HasWhitelist() bool { // "uniqueVertex" itself implements "Whitelist" traversal iff its underlying // "vertex.StatelessVertex" is marked as a stop vertex. -func (vtx *uniqueVertex) Whitelist() (ids.Set, error) { +func (vtx *uniqueVertex) Whitelist(ctx context.Context) (set.Set[ids.ID], error) { if !vtx.v.vtx.StopVertex() { return nil, nil } @@ -378,8 +388,8 @@ func (vtx *uniqueVertex) Whitelist() (ids.Set, error) { // represents all processing transaction IDs transitively referenced by the // vertex queue := []avalanche.Vertex{vtx} - whitlist := ids.NewSet(0) - visitedVtx := ids.NewSet(0) + whitlist := set.NewSet[ids.ID](0) + visitedVtx := set.NewSet[ids.ID](0) for len(queue) > 0 { cur := queue[0] queue = queue[1:] @@ -395,7 +405,7 @@ func (vtx *uniqueVertex) Whitelist() (ids.Set, error) { } visitedVtx.Add(curID) - txs, err := cur.Txs() + txs, err := cur.Txs(ctx) if err != nil { return nil, err } @@ -433,7 +443,7 @@ func (vtx *uniqueVertex) Epoch() (uint32, error) { return vtx.v.vtx.Epoch(), nil } -func (vtx *uniqueVertex) Txs() ([]snowstorm.Tx, error) { +func (vtx *uniqueVertex) Txs(ctx context.Context) ([]snowstorm.Tx, error) { vtx.refresh() if vtx.v.vtx == nil { @@ -444,7 +454,7 @@ func (vtx *uniqueVertex) Txs() ([]snowstorm.Tx, error) { if len(txs) != len(vtx.v.txs) { vtx.v.txs = make([]snowstorm.Tx, len(txs)) for i, txBytes := range txs { - tx, err := vtx.serializer.VM.ParseTx(txBytes) + tx, err := vtx.serializer.VM.ParseTx(ctx, txBytes) if err != nil { return nil, err } @@ -455,7 +465,9 @@ func (vtx *uniqueVertex) Txs() ([]snowstorm.Tx, error) { return vtx.v.txs, nil } -func (vtx *uniqueVertex) Bytes() []byte { return vtx.v.vtx.Bytes() } +func (vtx *uniqueVertex) Bytes() []byte { + return vtx.v.vtx.Bytes() +} func (vtx *uniqueVertex) String() string { sb := strings.Builder{} @@ -465,7 +477,7 @@ func (vtx *uniqueVertex) String() string { sb.WriteString(fmt.Sprintf("Vertex(ID = %s, Error=error while retrieving vertex parents: %s)", vtx.ID(), err)) return sb.String() } - txs, err := vtx.Txs() + txs, err := vtx.Txs(context.Background()) if err != nil { sb.WriteString(fmt.Sprintf("Vertex(ID = %s, Error=error while retrieving vertex txs: %s)", vtx.ID(), err)) return sb.String() diff --git a/avalanchego/snow/engine/avalanche/state/unique_vertex_test.go b/avalanchego/snow/engine/avalanche/state/unique_vertex_test.go index 959285cc..c77f2a50 100644 --- a/avalanchego/snow/engine/avalanche/state/unique_vertex_test.go +++ b/avalanchego/snow/engine/avalanche/state/unique_vertex_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state import ( "bytes" + "context" "errors" "testing" "time" @@ -15,11 +16,14 @@ import ( "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" + "github.com/ava-labs/avalanchego/utils/compare" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/version" ) -func newTestSerializer(t *testing.T, parse func([]byte) (snowstorm.Tx, error)) *Serializer { +var errUnknownTx = errors.New("unknown tx") + +func newTestSerializer(t *testing.T, parse func(context.Context, []byte) (snowstorm.Tx, error)) *Serializer { vm := vertex.TestVM{} vm.T = t vm.Default(true) @@ -62,7 +66,7 @@ func TestUnknownUniqueVertexErrors(t *testing.T) { t.Fatalf("Height should have produced error for unknown vertex") } - _, err = uVtx.Txs() + _, err = uVtx.Txs(context.Background()) if err == nil { t.Fatalf("Txs should have produced an error for unknown vertex") } @@ -73,7 +77,7 @@ func TestUniqueVertexCacheHit(t *testing.T) { IDV: ids.ID{1}, }} - s := newTestSerializer(t, func(b []byte) (snowstorm.Tx, error) { + s := newTestSerializer(t, func(_ context.Context, b []byte) (snowstorm.Tx, error) { if !bytes.Equal(b, []byte{0}) { t.Fatal("unknown tx") } @@ -99,7 +103,7 @@ func TestUniqueVertexCacheHit(t *testing.T) { id: id, serializer: s, } - if err := uVtx.setVertex(vtx); err != nil { + if err := uVtx.setVertex(context.Background(), vtx); err != nil { t.Fatalf("Failed to set vertex due to: %s", err) } @@ -127,7 +131,7 @@ func TestUniqueVertexCacheHit(t *testing.T) { t.Fatalf("Vertex height should have been %d, but was: %d", height, newHeight) } - txs, err := newUVtx.Txs() + txs, err := newUVtx.Txs(context.Background()) if err != nil { t.Fatalf("Error while retrieving txs of known vertex: %s", err) } @@ -160,7 +164,7 @@ func TestUniqueVertexCacheMiss(t *testing.T) { }, BytesV: txBytes, } - parseTx := func(b []byte) (snowstorm.Tx, error) { + parseTx := func(_ context.Context, b []byte) (snowstorm.Tx, error) { if bytes.Equal(txBytesParent, b) { return testTxParent, nil } @@ -174,7 +178,7 @@ func TestUniqueVertexCacheMiss(t *testing.T) { s := newTestSerializer(t, parseTx) uvtxParent := newTestUniqueVertex(t, s, nil, [][]byte{txBytesParent}, false) - if err := uvtxParent.Accept(); err != nil { + if err := uvtxParent.Accept(context.Background()); err != nil { t.Fatal(err) } @@ -206,7 +210,7 @@ func TestUniqueVertexCacheMiss(t *testing.T) { } // Register cache hit - vtx, err := newUniqueVertex(s, vtxBytes) + vtx, err := newUniqueVertex(context.Background(), s, vtxBytes) if err != nil { t.Fatal(err) } @@ -215,7 +219,7 @@ func TestUniqueVertexCacheMiss(t *testing.T) { t.Fatalf("expected status to be processing, but found: %s", status) } - if err := vtx.Verify(); err != nil { + if err := vtx.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -239,7 +243,7 @@ func TestUniqueVertexCacheMiss(t *testing.T) { if err != nil { t.Fatalf("Fetching vertex height errored with: %s", err) } - vtxTxs, err := vtx.Txs() + vtxTxs, err := vtx.Txs(context.Background()) if err != nil { t.Fatalf("Fetching vertx txs errored with: %s", err) } @@ -267,7 +271,7 @@ func TestUniqueVertexCacheMiss(t *testing.T) { validateVertex(vtx, choices.Processing) // Check that a newly parsed vertex refreshed from the cache is valid - vtx, err = newUniqueVertex(s, vtxBytes) + vtx, err = newUniqueVertex(context.Background(), s, vtxBytes) if err != nil { t.Fatal(err) } @@ -284,7 +288,7 @@ func TestUniqueVertexCacheMiss(t *testing.T) { validateVertex(vtx, choices.Processing) s.state.uniqueVtx.Flush() - vtx, err = newUniqueVertex(s, vtxBytes) + vtx, err = newUniqueVertex(context.Background(), s, vtxBytes) if err != nil { t.Fatal(err) } @@ -303,14 +307,14 @@ func TestParseVertexWithIncorrectChainID(t *testing.T) { } vtxBytes := statelessVertex.Bytes() - s := newTestSerializer(t, func(b []byte) (snowstorm.Tx, error) { + s := newTestSerializer(t, func(_ context.Context, b []byte) (snowstorm.Tx, error) { if bytes.Equal(b, []byte{1}) { return &snowstorm.TestTx{}, nil } - return nil, errors.New("invalid tx") + return nil, errUnknownTx }) - if _, err := s.ParseVtx(vtxBytes); err == nil { + if _, err := s.ParseVtx(context.Background(), vtxBytes); err == nil { t.Fatal("should have failed to parse the vertex due to invalid chainID") } } @@ -328,27 +332,25 @@ func TestParseVertexWithInvalidTxs(t *testing.T) { } vtxBytes := statelessVertex.Bytes() - s := newTestSerializer(t, func(b []byte) (snowstorm.Tx, error) { + s := newTestSerializer(t, func(_ context.Context, b []byte) (snowstorm.Tx, error) { switch { - case bytes.Equal(b, []byte{1}): - return nil, errors.New("invalid tx") case bytes.Equal(b, []byte{2}): return &snowstorm.TestTx{}, nil default: - return nil, errors.New("invalid tx") + return nil, errUnknownTx } }) - if _, err := s.ParseVtx(vtxBytes); err == nil { + if _, err := s.ParseVtx(context.Background(), vtxBytes); err == nil { t.Fatal("should have failed to parse the vertex due to invalid transactions") } - if _, err := s.ParseVtx(vtxBytes); err == nil { + if _, err := s.ParseVtx(context.Background(), vtxBytes); err == nil { t.Fatal("should have failed to parse the vertex after previously error on parsing invalid transactions") } id := hashing.ComputeHash256Array(vtxBytes) - if _, err := s.GetVtx(id); err == nil { + if _, err := s.GetVtx(context.Background(), id); err == nil { t.Fatal("should have failed to lookup invalid vertex after previously error on parsing invalid transactions") } @@ -363,7 +365,7 @@ func TestParseVertexWithInvalidTxs(t *testing.T) { } childVtxBytes := childStatelessVertex.Bytes() - childVtx, err := s.ParseVtx(childVtxBytes) + childVtx, err := s.ParseVtx(context.Background(), childVtxBytes) if err != nil { t.Fatal(err) } @@ -390,11 +392,11 @@ func TestStopVertexWhitelistEmpty(t *testing.T) { ts := newTestSerializer(t, parseTx) uvtx := newTestUniqueVertex(t, ts, nil, [][]byte{{'a'}}, true) - if err := uvtx.Accept(); err != nil { + if err := uvtx.Accept(context.Background()); err != nil { t.Fatal(err) } - tsv, err := uvtx.Whitelist() + tsv, err := uvtx.Whitelist(context.Background()) if err != nil { t.Fatalf("failed to get whitelist %v", err) } @@ -418,18 +420,18 @@ func TestStopVertexWhitelistWithParents(t *testing.T) { // ⬉ ⬆ // stop_vertex_5 uvtx1 := newTestUniqueVertex(t, ts, nil, [][]byte{{'a'}, {'b'}}, false) - if err := uvtx1.Accept(); err != nil { + if err := uvtx1.Accept(context.Background()); err != nil { t.Fatal(err) } uvtx2 := newTestUniqueVertex(t, ts, nil, [][]byte{{'c'}, {'d'}}, false) - if err := uvtx2.Accept(); err != nil { + if err := uvtx2.Accept(context.Background()); err != nil { t.Fatal(err) } uvtx3 := newTestUniqueVertex(t, ts, []ids.ID{uvtx1.id, uvtx2.id}, [][]byte{{'e'}, {'f'}}, false) uvtx4 := newTestUniqueVertex(t, ts, []ids.ID{uvtx1.id, uvtx2.id}, [][]byte{{'g'}, {'h'}}, false) svtx5 := newTestUniqueVertex(t, ts, []ids.ID{uvtx3.id, uvtx4.id}, nil, true) - whitelist, err := svtx5.Whitelist() + whitelist, err := svtx5.Whitelist(context.Background()) if err != nil { t.Fatalf("failed to get whitelist %v", err) } @@ -443,7 +445,7 @@ func TestStopVertexWhitelistWithParents(t *testing.T) { uvtx4.ID(), svtx5.ID(), } - if !ids.UnsortedEquals(whitelist.List(), expectedWhitelist) { + if !compare.UnsortedEquals(whitelist.List(), expectedWhitelist) { t.Fatalf("whitelist expected %v, got %v", expectedWhitelist, whitelist) } } @@ -459,7 +461,7 @@ func TestStopVertexWhitelistWithLinearChain(t *testing.T) { ts := newTestSerializer(t, parseTx) uvtx5 := newTestUniqueVertex(t, ts, nil, [][]byte{{'e'}}, false) - if err := uvtx5.Accept(); err != nil { + if err := uvtx5.Accept(context.Background()); err != nil { t.Fatal(err) } @@ -469,7 +471,7 @@ func TestStopVertexWhitelistWithLinearChain(t *testing.T) { uvtx1 := newTestUniqueVertex(t, ts, []ids.ID{uvtx2.id}, [][]byte{{'a'}}, false) uvtx0 := newTestUniqueVertex(t, ts, []ids.ID{uvtx1.id}, nil, true) - whitelist, err := uvtx0.Whitelist() + whitelist, err := uvtx0.Whitelist(context.Background()) if err != nil { t.Fatalf("failed to get whitelist %v", err) } @@ -485,7 +487,7 @@ func TestStopVertexWhitelistWithLinearChain(t *testing.T) { uvtx3.ID(), uvtx4.ID(), } - if !ids.UnsortedEquals(whitelist.List(), expectedWhitelist) { + if !compare.UnsortedEquals(whitelist.List(), expectedWhitelist) { t.Fatalf("whitelist expected %v, got %v", expectedWhitelist, whitelist) } } @@ -521,11 +523,11 @@ func TestStopVertexVerifyUnexpectedDependencies(t *testing.T) { txG.DependenciesV = []snowstorm.Tx{txEInf} uvtx1 := newTestUniqueVertex(t, ts, nil, [][]byte{{'a'}, {'b'}}, false) - if err := uvtx1.Accept(); err != nil { + if err := uvtx1.Accept(context.Background()); err != nil { t.Fatal(err) } uvtx2 := newTestUniqueVertex(t, ts, nil, [][]byte{{'c'}, {'d'}}, false) - if err := uvtx2.Accept(); err != nil { + if err := uvtx2.Accept(context.Background()); err != nil { t.Fatal(err) } @@ -533,7 +535,7 @@ func TestStopVertexVerifyUnexpectedDependencies(t *testing.T) { uvtx4 := newTestUniqueVertex(t, ts, []ids.ID{uvtx1.id, uvtx2.id}, [][]byte{{'g'}, {'h'}}, false) svtx5 := newTestUniqueVertex(t, ts, []ids.ID{uvtx4.id}, nil, true) - if verr := svtx5.Verify(); !errors.Is(verr, errUnexpectedDependencyStopVtx) { + if verr := svtx5.Verify(context.Background()); !errors.Is(verr, errUnexpectedDependencyStopVtx) { t.Fatalf("stop vertex 'Verify' expected %v, got %v", errUnexpectedDependencyStopVtx, verr) } @@ -545,7 +547,7 @@ func TestStopVertexVerifyUnexpectedDependencies(t *testing.T) { } txE.StatusV = choices.Accepted svtx5 = newTestUniqueVertex(t, ts, []ids.ID{uvtx4.id}, nil, true) - if verr := svtx5.Verify(); verr != nil { + if verr := svtx5.Verify(context.Background()); verr != nil { t.Fatalf("stop vertex 'Verify' expected nil, got %v", verr) } @@ -560,20 +562,20 @@ func TestStopVertexVerifyUnexpectedDependencies(t *testing.T) { // ⬉ ⬆ // stop_vertex_5 svtx5 = newTestUniqueVertex(t, ts, []ids.ID{uvtx3.id, uvtx4.id}, nil, true) - if verr := svtx5.Verify(); verr != nil { + if verr := svtx5.Verify(context.Background()); verr != nil { t.Fatalf("stop vertex 'Verify' expected nil, got %v", verr) } - if err := uvtx3.Accept(); err != nil { + if err := uvtx3.Accept(context.Background()); err != nil { t.Fatal(err) } - if err := uvtx4.Accept(); err != nil { + if err := uvtx4.Accept(context.Background()); err != nil { t.Fatal(err) } - if err := svtx5.Accept(); err != nil { + if err := svtx5.Accept(context.Background()); err != nil { t.Fatal(err) } // stop vertex cannot be issued twice - if verr := svtx5.Verify(); !errors.Is(verr, errStopVertexAlreadyAccepted) { + if verr := svtx5.Verify(context.Background()); !errors.Is(verr, errStopVertexAlreadyAccepted) { t.Fatalf("stop vertex 'Verify' expected %v, got %v", errStopVertexAlreadyAccepted, verr) } @@ -593,7 +595,7 @@ func TestStopVertexVerifyUnexpectedDependencies(t *testing.T) { // [tx_x] // (should fail) uvtx6 := newTestUniqueVertex(t, ts, []ids.ID{svtx5.id}, [][]byte{{'x'}}, false) - if verr := uvtx6.Verify(); !errors.Is(verr, errStopVertexAlreadyAccepted) { + if verr := uvtx6.Verify(context.Background()); !errors.Is(verr, errStopVertexAlreadyAccepted) { t.Fatalf("stop vertex 'Verify' expected %v, got %v", errStopVertexAlreadyAccepted, verr) } } @@ -603,12 +605,14 @@ func TestStopVertexVerifyNotAllowedTimestamp(t *testing.T) { _, parseTx := generateTestTxs('a') ts := newTestSerializer(t, parseTx) - ts.XChainMigrationTime = version.XChainMigrationDefaultTime + ts.CortinaTime = version.CortinaDefaultTime svtx := newTestUniqueVertex(t, ts, nil, nil, true) - svtx.time = func() time.Time { return version.XChainMigrationDefaultTime.Add(-time.Second) } + svtx.time = func() time.Time { + return version.CortinaDefaultTime.Add(-time.Second) + } - if verr := svtx.Verify(); !errors.Is(verr, errStopVertexNotAllowedTimestamp) { + if verr := svtx.Verify(context.Background()); !errors.Is(verr, errStopVertexNotAllowedTimestamp) { t.Fatalf("stop vertex 'Verify' expected %v, got %v", errStopVertexNotAllowedTimestamp, verr) } } @@ -641,14 +645,14 @@ func newTestUniqueVertex( if err != nil { t.Fatal(err) } - uvtx, err := newUniqueVertex(s, vtx.Bytes()) + uvtx, err := newUniqueVertex(context.Background(), s, vtx.Bytes()) if err != nil { t.Fatal(err) } return uvtx } -func generateTestTxs(idSlice ...byte) ([]snowstorm.Tx, func(b []byte) (snowstorm.Tx, error)) { +func generateTestTxs(idSlice ...byte) ([]snowstorm.Tx, func(context.Context, []byte) (snowstorm.Tx, error)) { txs := make([]snowstorm.Tx, len(idSlice)) bytesToTx := make(map[string]snowstorm.Tx, len(idSlice)) for i, b := range idSlice { @@ -660,10 +664,10 @@ func generateTestTxs(idSlice ...byte) ([]snowstorm.Tx, func(b []byte) (snowstorm } bytesToTx[string([]byte{b})] = txs[i] } - parseTx := func(b []byte) (snowstorm.Tx, error) { + parseTx := func(_ context.Context, b []byte) (snowstorm.Tx, error) { tx, ok := bytesToTx[string(b)] if !ok { - return nil, errors.New("unknown tx bytes") + return nil, errUnknownTx } return tx, nil } diff --git a/avalanchego/snow/engine/avalanche/test_avalanche_engine.go b/avalanchego/snow/engine/avalanche/test_engine.go similarity index 67% rename from avalanchego/snow/engine/avalanche/test_avalanche_engine.go rename to avalanchego/snow/engine/avalanche/test_engine.go index 19e9dd07..8fe8589c 100644 --- a/avalanchego/snow/engine/avalanche/test_avalanche_engine.go +++ b/avalanchego/snow/engine/avalanche/test_engine.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche import ( + "context" "errors" "github.com/ava-labs/avalanchego/ids" @@ -12,7 +13,7 @@ import ( ) var ( - _ Engine = &EngineTest{} + _ Engine = (*EngineTest)(nil) errGetVtx = errors.New("unexpectedly called GetVtx") ) @@ -22,7 +23,7 @@ type EngineTest struct { common.EngineTest CantGetVtx bool - GetVtxF func(vtxID ids.ID) (avalanche.Vertex, error) + GetVtxF func(ctx context.Context, vtxID ids.ID) (avalanche.Vertex, error) } func (e *EngineTest) Default(cant bool) { @@ -30,9 +31,9 @@ func (e *EngineTest) Default(cant bool) { e.CantGetVtx = false } -func (e *EngineTest) GetVtx(vtxID ids.ID) (avalanche.Vertex, error) { +func (e *EngineTest) GetVtx(ctx context.Context, vtxID ids.ID) (avalanche.Vertex, error) { if e.GetVtxF != nil { - return e.GetVtxF(vtxID) + return e.GetVtxF(ctx, vtxID) } if e.CantGetVtx && e.T != nil { e.T.Fatalf("Unexpectedly called GetVtx") diff --git a/avalanchego/snow/engine/avalanche/traced_engine.go b/avalanchego/snow/engine/avalanche/traced_engine.go new file mode 100644 index 00000000..b35771f8 --- /dev/null +++ b/avalanchego/snow/engine/avalanche/traced_engine.go @@ -0,0 +1,42 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + + oteltrace "go.opentelemetry.io/otel/trace" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/consensus/avalanche" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/trace" +) + +var _ Engine = (*tracedEngine)(nil) + +type tracedEngine struct { + common.Engine + engine Engine + tracer trace.Tracer +} + +func TraceEngine(engine Engine, tracer trace.Tracer) Engine { + return &tracedEngine{ + Engine: common.TraceEngine(engine, tracer), + engine: engine, + tracer: tracer, + } +} + +func (e *tracedEngine) GetVtx(ctx context.Context, vtxID ids.ID) (avalanche.Vertex, error) { + ctx, span := e.tracer.Start(ctx, "tracedEngine.GetVtx", oteltrace.WithAttributes( + attribute.Stringer("vtxID", vtxID), + )) + defer span.End() + + return e.engine.GetVtx(ctx, vtxID) +} diff --git a/avalanchego/snow/engine/avalanche/transitive.go b/avalanchego/snow/engine/avalanche/transitive.go index 08a3416a..81e19008 100644 --- a/avalanchego/snow/engine/avalanche/transitive.go +++ b/avalanchego/snow/engine/avalanche/transitive.go @@ -1,31 +1,40 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche import ( + "context" "fmt" "time" "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/consensus/avalanche" "github.com/ava-labs/avalanchego/snow/consensus/avalanche/poll" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/events" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/bag" "github.com/ava-labs/avalanchego/utils/sampler" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" ) -var _ Engine = &Transitive{} +var _ Engine = (*Transitive)(nil) -func New(config Config) (Engine, error) { - return newTransitive(config) +func New( + config Config, + startSnowmanConsensus func(ctx context.Context, lastReqID uint32) error, +) (Engine, error) { + return newTransitive(config, startSnowmanConsensus) } // Transitive implements the Engine interface by attempting to fetch all @@ -40,20 +49,25 @@ type Transitive struct { common.AcceptedFrontierHandler common.AcceptedHandler common.AncestorsHandler + common.AppHandler + validators.Connector RequestID uint32 + // acceptedFrontiers of the other validators of this chain + acceptedFrontiers tracker.Accepted + polls poll.Set // track people I have asked for their preference // The set of vertices that have been requested in Get messages but not yet received outstandingVtxReqs common.Requests // missingTxs tracks transaction that are missing - missingTxs ids.Set + missingTxs set.Set[ids.ID] // IDs of vertices that are queued to be added to consensus but haven't yet been // because of missing dependencies - pending ids.Set + pending set.Set[ids.ID] // vtxBlocked tracks operations that are blocked on vertices // txBlocked tracks operations that are blocked on transactions @@ -64,15 +78,23 @@ type Transitive struct { // optimal number. pendingTxs []snowstorm.Tx + startSnowmanConsensus func(ctx context.Context, lastReqID uint32) error + // A uniform sampler without replacement uniformSampler sampler.Uniform errs wrappers.Errs } -func newTransitive(config Config) (*Transitive, error) { +func newTransitive( + config Config, + startSnowmanConsensus func(ctx context.Context, lastReqID uint32) error, +) (*Transitive, error) { config.Ctx.Log.Info("initializing consensus engine") + acceptedFrontiers := tracker.NewAccepted() + config.Validators.RegisterCallbackListener(acceptedFrontiers) + factory := poll.NewEarlyTermNoTraversalFactory(config.Params.Alpha) t := &Transitive{ @@ -82,23 +104,37 @@ func newTransitive(config Config) (*Transitive, error) { AcceptedFrontierHandler: common.NewNoOpAcceptedFrontierHandler(config.Ctx.Log), AcceptedHandler: common.NewNoOpAcceptedHandler(config.Ctx.Log), AncestorsHandler: common.NewNoOpAncestorsHandler(config.Ctx.Log), + AppHandler: config.VM, + Connector: config.VM, + acceptedFrontiers: acceptedFrontiers, polls: poll.NewSet(factory, config.Ctx.Log, "", - config.Ctx.Registerer, + config.Ctx.AvalancheRegisterer, ), - uniformSampler: sampler.NewUniform(), + startSnowmanConsensus: startSnowmanConsensus, + uniformSampler: sampler.NewUniform(), } - return t, t.metrics.Initialize("", config.Ctx.Registerer) + return t, t.metrics.Initialize("", config.Ctx.AvalancheRegisterer) } -func (t *Transitive) Put(nodeID ids.NodeID, requestID uint32, vtxBytes []byte) error { +func (t *Transitive) Put(ctx context.Context, nodeID ids.NodeID, requestID uint32, vtxBytes []byte) error { t.Ctx.Log.Verbo("called Put", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) - vtx, err := t.Manager.ParseVtx(vtxBytes) + + // If the chain is linearized, we should immediately drop all put messages. + linearized, err := t.Manager.StopVertexAccepted(ctx) + if err != nil { + return err + } + if linearized { + return nil + } + + vtx, err := t.Manager.ParseVtx(ctx, vtxBytes) if err != nil { t.Ctx.Log.Debug("failed to parse vertex", zap.Stringer("nodeID", nodeID), @@ -111,7 +147,7 @@ func (t *Transitive) Put(nodeID ids.NodeID, requestID uint32, vtxBytes []byte) e zap.Binary("vertex", vtxBytes), zap.Error(err), ) - return t.GetFailed(nodeID, requestID) + return t.GetFailed(ctx, nodeID, requestID) } actualVtxID := vtx.ID() @@ -127,20 +163,30 @@ func (t *Transitive) Put(nodeID ids.NodeID, requestID uint32, vtxBytes []byte) e ) // We assume that [vtx] is useless because it doesn't match what we // expected. - return t.GetFailed(nodeID, requestID) + return t.GetFailed(ctx, nodeID, requestID) } if t.Consensus.VertexIssued(vtx) || t.pending.Contains(actualVtxID) { t.metrics.numUselessPutBytes.Add(float64(len(vtxBytes))) } - if _, err := t.issueFrom(nodeID, vtx); err != nil { + if _, err := t.issueFrom(ctx, nodeID, vtx); err != nil { return err } - return t.attemptToIssueTxs() + return t.attemptToIssueTxs(ctx) } -func (t *Transitive) GetFailed(nodeID ids.NodeID, requestID uint32) error { +func (t *Transitive) GetFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + // If the chain is linearized, we don't care that a get request failed, we + // have already moved into snowman consensus. + linearized, err := t.Manager.StopVertexAccepted(ctx) + if err != nil { + return err + } + if linearized { + return nil + } + vtxID, ok := t.outstandingVtxReqs.Remove(nodeID, requestID) if !ok { t.Ctx.Log.Debug("unexpected GetFailed", @@ -150,11 +196,11 @@ func (t *Transitive) GetFailed(nodeID ids.NodeID, requestID uint32) error { return nil } - t.vtxBlocked.Abandon(vtxID) + t.vtxBlocked.Abandon(ctx, vtxID) if t.outstandingVtxReqs.Len() == 0 { for txID := range t.missingTxs { - t.txBlocked.Abandon(txID) + t.txBlocked.Abandon(ctx, txID) } t.missingTxs.Clear() } @@ -164,27 +210,59 @@ func (t *Transitive) GetFailed(nodeID ids.NodeID, requestID uint32) error { t.metrics.numMissingTxs.Set(float64(t.missingTxs.Len())) t.metrics.blockerVtxs.Set(float64(t.vtxBlocked.Len())) t.metrics.blockerTxs.Set(float64(t.txBlocked.Len())) - return t.attemptToIssueTxs() + return t.attemptToIssueTxs(ctx) } -func (t *Transitive) PullQuery(nodeID ids.NodeID, requestID uint32, vtxID ids.ID) error { +func (t *Transitive) PullQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, vtxID ids.ID) error { + // If the chain is linearized, we don't care to attempt to issue any new + // vertices. + linearized, err := t.Manager.StopVertexAccepted(ctx) + if err != nil { + return err + } + if linearized { + // Immediately respond to the query with the stop vertex. + // + // Invariant: This is done here, because the Consensus instance may have + // never been initialized if bootstrapping accepted the stop vertex. + edge := t.Manager.Edge(ctx) + t.Sender.SendChits(ctx, nodeID, requestID, edge, edge) + return nil + } + // Immediately respond to the query with the current consensus preferences. - t.Sender.SendChits(nodeID, requestID, t.Consensus.Preferences().List()) + t.Sender.SendChits(ctx, nodeID, requestID, t.Consensus.Preferences().List(), t.Manager.Edge(ctx)) // If we have [vtxID], attempt to put it into consensus, if we haven't // already. If we don't not have [vtxID], fetch it from [nodeID]. - if _, err := t.issueFromByID(nodeID, vtxID); err != nil { + if _, err := t.issueFromByID(ctx, nodeID, vtxID); err != nil { return err } - return t.attemptToIssueTxs() + return t.attemptToIssueTxs(ctx) } -func (t *Transitive) PushQuery(nodeID ids.NodeID, requestID uint32, vtxBytes []byte) error { +func (t *Transitive) PushQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, vtxBytes []byte) error { + // If the chain is linearized, we don't care to attempt to issue any new + // vertices. + linearized, err := t.Manager.StopVertexAccepted(ctx) + if err != nil { + return err + } + if linearized { + // Immediately respond to the query with the stop vertex. + // + // Invariant: This is done here, because the Consensus instance may have + // never been initialized if bootstrapping accepted the stop vertex. + edge := t.Manager.Edge(ctx) + t.Sender.SendChits(ctx, nodeID, requestID, edge, edge) + return nil + } + // Immediately respond to the query with the current consensus preferences. - t.Sender.SendChits(nodeID, requestID, t.Consensus.Preferences().List()) + t.Sender.SendChits(ctx, nodeID, requestID, t.Consensus.Preferences().List(), t.Manager.Edge(ctx)) - vtx, err := t.Manager.ParseVtx(vtxBytes) + vtx, err := t.Manager.ParseVtx(ctx, vtxBytes) if err != nil { t.Ctx.Log.Debug("failed to parse vertex", zap.Stringer("nodeID", nodeID), @@ -204,14 +282,25 @@ func (t *Transitive) PushQuery(nodeID ids.NodeID, requestID uint32, vtxBytes []b t.metrics.numUselessPushQueryBytes.Add(float64(len(vtxBytes))) } - if _, err := t.issueFrom(nodeID, vtx); err != nil { + if _, err := t.issueFrom(ctx, nodeID, vtx); err != nil { return err } - return t.attemptToIssueTxs() + return t.attemptToIssueTxs(ctx) } -func (t *Transitive) Chits(nodeID ids.NodeID, requestID uint32, votes []ids.ID) error { +func (t *Transitive) Chits(ctx context.Context, nodeID ids.NodeID, requestID uint32, votes []ids.ID, accepted []ids.ID) error { + // If the chain is linearized, we don't care to apply any votes. + linearized, err := t.Manager.StopVertexAccepted(ctx) + if err != nil { + return err + } + if linearized { + return nil + } + + t.acceptedFrontiers.SetAcceptedFrontier(nodeID, accepted) + v := &voter{ t: t, vdr: nodeID, @@ -219,54 +308,38 @@ func (t *Transitive) Chits(nodeID ids.NodeID, requestID uint32, votes []ids.ID) response: votes, } for _, vote := range votes { - if added, err := t.issueFromByID(nodeID, vote); err != nil { + if added, err := t.issueFromByID(ctx, nodeID, vote); err != nil { return err } else if !added { v.deps.Add(vote) } } - t.vtxBlocked.Register(v) + t.vtxBlocked.Register(ctx, v) t.metrics.blockerVtxs.Set(float64(t.vtxBlocked.Len())) - return t.attemptToIssueTxs() -} - -func (t *Transitive) QueryFailed(nodeID ids.NodeID, requestID uint32) error { - return t.Chits(nodeID, requestID, nil) + return t.attemptToIssueTxs(ctx) } -func (t *Transitive) AppRequest(nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { - // Notify the VM of this request - return t.VM.AppRequest(nodeID, requestID, deadline, request) -} - -func (t *Transitive) AppRequestFailed(nodeID ids.NodeID, requestID uint32) error { - // Notify the VM that a request it made failed - return t.VM.AppRequestFailed(nodeID, requestID) -} - -func (t *Transitive) AppResponse(nodeID ids.NodeID, requestID uint32, response []byte) error { - // Notify the VM of a response to its request - return t.VM.AppResponse(nodeID, requestID, response) -} - -func (t *Transitive) AppGossip(nodeID ids.NodeID, msg []byte) error { - // Notify the VM of this message which has been gossiped to it - return t.VM.AppGossip(nodeID, msg) -} +func (t *Transitive) QueryFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + // If the chain is linearized, we don't care to apply any votes. + linearized, err := t.Manager.StopVertexAccepted(ctx) + if err != nil { + return err + } + if linearized { + return nil + } -func (t *Transitive) Connected(nodeID ids.NodeID, nodeVersion *version.Application) error { - return t.VM.Connected(nodeID, nodeVersion) + lastAccepted := t.acceptedFrontiers.AcceptedFrontier(nodeID) + return t.Chits(ctx, nodeID, requestID, lastAccepted, lastAccepted) } -func (t *Transitive) Disconnected(nodeID ids.NodeID) error { - return t.VM.Disconnected(nodeID) +func (*Transitive) Timeout(context.Context) error { + return nil } -func (t *Transitive) Timeout() error { return nil } - -func (t *Transitive) Gossip() error { - edge := t.Manager.Edge() +func (t *Transitive) Gossip(ctx context.Context) error { + edge := t.Manager.Edge(ctx) if len(edge) == 0 { t.Ctx.Log.Verbo("dropping gossip request as no vertices have been accepted") return nil @@ -280,7 +353,7 @@ func (t *Transitive) Gossip() error { return err // Also should never really happen because the edge has positive length } vtxID := edge[int(indices[0])] - vtx, err := t.Manager.GetVtx(vtxID) + vtx, err := t.Manager.GetVtx(ctx, vtxID) if err != nil { t.Ctx.Log.Warn("dropping gossip request", zap.String("reason", "couldn't load vertex"), @@ -293,27 +366,43 @@ func (t *Transitive) Gossip() error { t.Ctx.Log.Verbo("gossiping accepted vertex to the network", zap.Stringer("vtxID", vtxID), ) - t.Sender.SendGossip(vtx.Bytes()) + t.Sender.SendGossip(ctx, vtx.Bytes()) return nil } -func (t *Transitive) Halt() {} +func (*Transitive) Halt(context.Context) {} -func (t *Transitive) Shutdown() error { +func (t *Transitive) Shutdown(ctx context.Context) error { t.Ctx.Log.Info("shutting down consensus engine") - return t.VM.Shutdown() + return t.VM.Shutdown(ctx) } -func (t *Transitive) Notify(msg common.Message) error { +func (t *Transitive) Notify(ctx context.Context, msg common.Message) error { + // If the chain is linearized, we shouldn't be processing any messages from + // the VM anymore. + linearized, err := t.Manager.StopVertexAccepted(ctx) + if err != nil { + return err + } + if linearized { + return nil + } + switch msg { case common.PendingTxs: - t.pendingTxs = append(t.pendingTxs, t.VM.PendingTxs()...) + // After the linearization, we shouldn't be building any new vertices + if cortinaTime, ok := version.CortinaTimes[t.Ctx.NetworkID]; ok && time.Now().After(cortinaTime) { + return nil + } + + txs := t.VM.PendingTxs(ctx) + t.pendingTxs = append(t.pendingTxs, txs...) t.metrics.pendingTxs.Set(float64(len(t.pendingTxs))) - return t.attemptToIssueTxs() + return t.attemptToIssueTxs(ctx) case common.StopVertex: // stop vertex doesn't have any txs, issue directly! - return t.issueStopVtx() + return t.issueStopVtx(ctx) default: t.Ctx.Log.Warn("received an unexpected message from the VM", @@ -327,13 +416,13 @@ func (t *Transitive) Context() *snow.ConsensusContext { return t.Ctx } -func (t *Transitive) Start(startReqID uint32) error { +func (t *Transitive) Start(ctx context.Context, startReqID uint32) error { t.RequestID = startReqID // Load the vertices that were last saved as the accepted frontier - edge := t.Manager.Edge() + edge := t.Manager.Edge(ctx) frontier := make([]avalanche.Vertex, 0, len(edge)) for _, vtxID := range edge { - if vtx, err := t.Manager.GetVtx(vtxID); err == nil { + if vtx, err := t.Manager.GetVtx(ctx, vtxID); err == nil { frontier = append(frontier, vtx) } else { t.Ctx.Log.Error("failed to load vertex from the frontier", @@ -348,17 +437,20 @@ func (t *Transitive) Start(startReqID uint32) error { ) t.metrics.bootstrapFinished.Set(1) - t.Ctx.SetState(snow.NormalOp) - if err := t.VM.SetState(snow.NormalOp); err != nil { + t.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + State: snow.NormalOp, + }) + if err := t.VM.SetState(ctx, snow.NormalOp); err != nil { return fmt.Errorf("failed to notify VM that consensus has started: %w", err) } - return t.Consensus.Initialize(t.Ctx, t.Params, frontier) + return t.Consensus.Initialize(ctx, t.Ctx, t.Params, frontier) } -func (t *Transitive) HealthCheck() (interface{}, error) { - consensusIntf, consensusErr := t.Consensus.HealthCheck() - vmIntf, vmErr := t.VM.HealthCheck() +func (t *Transitive) HealthCheck(ctx context.Context) (interface{}, error) { + consensusIntf, consensusErr := t.Consensus.HealthCheck(ctx) + vmIntf, vmErr := t.VM.HealthCheck(ctx) intf := map[string]interface{}{ "consensus": consensusIntf, "vm": vmIntf, @@ -369,26 +461,26 @@ func (t *Transitive) HealthCheck() (interface{}, error) { if vmErr == nil { return intf, consensusErr } - return intf, fmt.Errorf("vm: %s ; consensus: %s", vmErr, consensusErr) + return intf, fmt.Errorf("vm: %w ; consensus: %v", vmErr, consensusErr) } func (t *Transitive) GetVM() common.VM { return t.VM } -func (t *Transitive) GetVtx(vtxID ids.ID) (avalanche.Vertex, error) { +func (t *Transitive) GetVtx(ctx context.Context, vtxID ids.ID) (avalanche.Vertex, error) { // GetVtx returns a vertex by its ID. // Returns database.ErrNotFound if unknown. - return t.Manager.GetVtx(vtxID) + return t.Manager.GetVtx(ctx, vtxID) } -func (t *Transitive) attemptToIssueTxs() error { +func (t *Transitive) attemptToIssueTxs(ctx context.Context) error { err := t.errs.Err if err != nil { return err } - t.pendingTxs, err = t.batch(t.pendingTxs, batchOption{limit: true}) + t.pendingTxs, err = t.batch(ctx, t.pendingTxs, batchOption{limit: true}) t.metrics.pendingTxs.Set(float64(len(t.pendingTxs))) return err } @@ -396,29 +488,29 @@ func (t *Transitive) attemptToIssueTxs() error { // If there are pending transactions from the VM, issue them. // If we're not already at the limit for number of concurrent polls, issue a new // query. -func (t *Transitive) repoll() { +func (t *Transitive) repoll(ctx context.Context) { for i := t.polls.Len(); i < t.Params.ConcurrentRepolls && !t.errs.Errored(); i++ { - t.issueRepoll() + t.issueRepoll(ctx) } } // issueFromByID issues the branch ending with vertex [vtxID] to consensus. // Fetches [vtxID] if we don't have it locally. // Returns true if [vtx] has been added to consensus (now or previously) -func (t *Transitive) issueFromByID(nodeID ids.NodeID, vtxID ids.ID) (bool, error) { - vtx, err := t.Manager.GetVtx(vtxID) +func (t *Transitive) issueFromByID(ctx context.Context, nodeID ids.NodeID, vtxID ids.ID) (bool, error) { + vtx, err := t.Manager.GetVtx(ctx, vtxID) if err != nil { // We don't have [vtxID]. Request it. - t.sendRequest(nodeID, vtxID) + t.sendRequest(ctx, nodeID, vtxID) return false, nil } - return t.issueFrom(nodeID, vtx) + return t.issueFrom(ctx, nodeID, vtx) } // issueFrom issues the branch ending with [vtx] to consensus. // Assumes we have [vtx] locally // Returns true if [vtx] has been added to consensus (now or previously) -func (t *Transitive) issueFrom(nodeID ids.NodeID, vtx avalanche.Vertex) (bool, error) { +func (t *Transitive) issueFrom(ctx context.Context, nodeID ids.NodeID, vtx avalanche.Vertex) (bool, error) { issued := true // Before we issue [vtx] into consensus, we have to issue its ancestors. // Go through [vtx] and its ancestors. issue each ancestor that hasn't yet been issued. @@ -446,7 +538,7 @@ func (t *Transitive) issueFrom(nodeID ids.NodeID, vtx avalanche.Vertex) (bool, e for _, parent := range parents { if !parent.Status().Fetched() { // We don't have the parent. Request it. - t.sendRequest(nodeID, parent.ID()) + t.sendRequest(ctx, nodeID, parent.ID()) // We're missing an ancestor so we can't have issued the vtx in this method's argument issued = false } else { @@ -456,7 +548,7 @@ func (t *Transitive) issueFrom(nodeID ids.NodeID, vtx avalanche.Vertex) (bool, e } // Queue up this vertex to be issued once its dependencies are met - if err := t.issue(vtx); err != nil { + if err := t.issue(ctx, vtx); err != nil { return false, err } } @@ -465,7 +557,7 @@ func (t *Transitive) issueFrom(nodeID ids.NodeID, vtx avalanche.Vertex) (bool, e // issue queues [vtx] to be put into consensus after its dependencies are met. // Assumes we have [vtx]. -func (t *Transitive) issue(vtx avalanche.Vertex) error { +func (t *Transitive) issue(ctx context.Context, vtx avalanche.Vertex) error { vtxID := vtx.ID() // Add to set of vertices that have been queued up to be issued but haven't been yet @@ -489,11 +581,11 @@ func (t *Transitive) issue(vtx avalanche.Vertex) error { } } - txs, err := vtx.Txs() + txs, err := vtx.Txs(ctx) if err != nil { return err } - txIDs := ids.NewSet(len(txs)) + txIDs := set.NewSet[ids.ID](len(txs)) for _, tx := range txs { txIDs.Add(tx.ID()) } @@ -520,14 +612,14 @@ func (t *Transitive) issue(vtx avalanche.Vertex) error { ) // Wait until all the parents of [vtx] are added to consensus before adding [vtx] - t.vtxBlocked.Register(&vtxIssuer{i: i}) + t.vtxBlocked.Register(ctx, &vtxIssuer{i: i}) // Wait until all the parents of [tx] are added to consensus before adding [vtx] - t.txBlocked.Register(&txIssuer{i: i}) + t.txBlocked.Register(ctx, &txIssuer{i: i}) if t.outstandingVtxReqs.Len() == 0 { // There are no outstanding vertex requests but we don't have these transactions, so we're not getting them. for txID := range t.missingTxs { - t.txBlocked.Abandon(txID) + t.txBlocked.Abandon(ctx, txID) } t.missingTxs.Clear() } @@ -549,26 +641,26 @@ type batchOption struct { limit bool } -// Batchs [txs] into vertices and issue them. -func (t *Transitive) batch(txs []snowstorm.Tx, opt batchOption) ([]snowstorm.Tx, error) { +// Batches [txs] into vertices and issue them. +func (t *Transitive) batch(ctx context.Context, txs []snowstorm.Tx, opt batchOption) ([]snowstorm.Tx, error) { if len(txs) == 0 { return nil, nil } if opt.limit && t.Params.OptimalProcessing <= t.Consensus.NumProcessing() { return txs, nil } - issuedTxs := ids.Set{} - consumed := ids.Set{} + issuedTxs := set.Set[ids.ID]{} + consumed := set.Set[ids.ID]{} orphans := t.Consensus.Orphans() start := 0 end := 0 for end < len(txs) { tx := txs[end] - inputs := ids.Set{} + inputs := set.Set[ids.ID]{} inputs.Add(tx.InputIDs()...) overlaps := consumed.Overlaps(inputs) if end-start >= t.Params.BatchSize || (opt.force && overlaps) { - if err := t.issueBatch(txs[start:end]); err != nil { + if err := t.issueBatch(ctx, txs[start:end]); err != nil { return nil, err } if opt.limit && t.Params.OptimalProcessing <= t.Consensus.NumProcessing() { @@ -595,13 +687,13 @@ func (t *Transitive) batch(txs []snowstorm.Tx, opt batchOption) ([]snowstorm.Tx, } if end > start { - return txs[end:], t.issueBatch(txs[start:end]) + return txs[end:], t.issueBatch(ctx, txs[start:end]) } return txs[end:], nil } // Issues a new poll for a preferred vertex in order to move consensus along -func (t *Transitive) issueRepoll() { +func (t *Transitive) issueRepoll(ctx context.Context) { preferredIDs := t.Consensus.Preferences() if preferredIDs.Len() == 0 { t.Ctx.Log.Error("re-query attempt was dropped due to no pending vertices") @@ -609,7 +701,7 @@ func (t *Transitive) issueRepoll() { } vtxID := preferredIDs.CappedList(1)[0] - vdrs, err := t.Validators.Sample(t.Params.K) // Validators to sample + vdrIDs, err := t.Validators.Sample(t.Params.K) // Validators to sample if err != nil { t.Ctx.Log.Error("dropped re-query", zap.String("reason", "insufficient number of validators"), @@ -619,24 +711,22 @@ func (t *Transitive) issueRepoll() { return } - vdrBag := ids.NodeIDBag{} // IDs of validators to be sampled - for _, vdr := range vdrs { - vdrBag.Add(vdr.ID()) - } + vdrBag := bag.Bag[ids.NodeID]{} // IDs of validators to be sampled + vdrBag.Add(vdrIDs...) vdrList := vdrBag.List() - vdrSet := ids.NewNodeIDSet(len(vdrList)) + vdrSet := set.NewSet[ids.NodeID](len(vdrList)) vdrSet.Add(vdrList...) // Poll the network t.RequestID++ if t.polls.Add(t.RequestID, vdrBag) { - t.Sender.SendPullQuery(vdrSet, t.RequestID, vtxID) + t.Sender.SendPullQuery(ctx, vdrSet, t.RequestID, vtxID) } } // Puts a batch of transactions into a vertex and issues it into consensus. -func (t *Transitive) issueBatch(txs []snowstorm.Tx) error { +func (t *Transitive) issueBatch(ctx context.Context, txs []snowstorm.Tx) error { t.Ctx.Log.Verbo("batching transactions into a new vertex", zap.Int("numTxs", len(txs)), ) @@ -658,7 +748,7 @@ func (t *Transitive) issueBatch(txs []snowstorm.Tx) error { parentIDs[i] = virtuousIDs[int(index)] } - vtx, err := t.Manager.BuildVtx(parentIDs, txs) + vtx, err := t.Manager.BuildVtx(ctx, parentIDs, txs) if err != nil { t.Ctx.Log.Warn("error building new vertex", zap.Int("numParents", len(parentIDs)), @@ -668,14 +758,14 @@ func (t *Transitive) issueBatch(txs []snowstorm.Tx) error { return nil } - return t.issue(vtx) + return t.issue(ctx, vtx) } // to be triggered via X-Chain API -func (t *Transitive) issueStopVtx() error { +func (t *Transitive) issueStopVtx(ctx context.Context) error { // use virtuous frontier (accepted) as parents virtuousSet := t.Consensus.Virtuous() - vtx, err := t.Manager.BuildStopVtx(virtuousSet.List()) + vtx, err := t.Manager.BuildStopVtx(ctx, virtuousSet.List()) if err != nil { t.Ctx.Log.Warn("error building new stop vertex", zap.Int("numParents", virtuousSet.Len()), @@ -683,11 +773,11 @@ func (t *Transitive) issueStopVtx() error { ) return nil } - return t.issue(vtx) + return t.issue(ctx, vtx) } // Send a request to [vdr] asking them to send us vertex [vtxID] -func (t *Transitive) sendRequest(nodeID ids.NodeID, vtxID ids.ID) { +func (t *Transitive) sendRequest(ctx context.Context, nodeID ids.NodeID, vtxID ids.ID) { if t.outstandingVtxReqs.Contains(vtxID) { t.Ctx.Log.Debug("not sending request for vertex", zap.String("reason", "existing outstanding request"), @@ -697,6 +787,6 @@ func (t *Transitive) sendRequest(nodeID ids.NodeID, vtxID ids.ID) { } t.RequestID++ t.outstandingVtxReqs.Add(nodeID, t.RequestID, vtxID) // Mark that there is an outstanding request for this vertex - t.Sender.SendGet(nodeID, t.RequestID, vtxID) + t.Sender.SendGet(ctx, nodeID, t.RequestID, vtxID) t.metrics.numVtxRequests.Set(float64(t.outstandingVtxReqs.Len())) // Tracks performance statistics } diff --git a/avalanchego/snow/engine/avalanche/transitive_test.go b/avalanchego/snow/engine/avalanche/transitive_test.go index 4688d78b..605e5505 100644 --- a/avalanchego/snow/engine/avalanche/transitive_test.go +++ b/avalanchego/snow/engine/avalanche/transitive_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche import ( "bytes" + "context" "errors" "fmt" "testing" @@ -13,6 +14,8 @@ import ( "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/avalanche" @@ -22,9 +25,11 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/common/tracker" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" @@ -35,15 +40,20 @@ var ( errUnknownVertex = errors.New("unknown vertex") errFailedParsing = errors.New("failed parsing") errMissing = errors.New("missing") + errTest = errors.New("non-nil error") ) type dummyHandler struct { - startEngineF func(startReqID uint32) error + startEngineF func(ctx context.Context, startReqID uint32) error } -func (dh *dummyHandler) onDoneBootstrapping(lastReqID uint32) error { +func (dh *dummyHandler) onDoneBootstrapping(ctx context.Context, lastReqID uint32) error { lastReqID++ - return dh.startEngineF(lastReqID) + return dh.startEngineF(ctx, lastReqID) +} + +func noopStarter(context.Context, uint32) error { + return nil } func TestEngineShutdown(t *testing.T) { @@ -52,14 +62,17 @@ func TestEngineShutdown(t *testing.T) { vmShutdownCalled := false vm := &vertex.TestVM{} vm.T = t - vm.ShutdownF = func() error { vmShutdownCalled = true; return nil } + vm.ShutdownF = func(context.Context) error { + vmShutdownCalled = true + return nil + } engCfg.VM = vm - transitive, err := newTransitive(engCfg) + transitive, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := transitive.Shutdown(); err != nil { + if err := transitive.Shutdown(context.Background()); err != nil { t.Fatal(err) } if !vmShutdownCalled { @@ -74,7 +87,7 @@ func TestEngineAdd(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -91,12 +104,12 @@ func TestEngineAdd(t *testing.T) { manager.CantEdge = false - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } @@ -120,7 +133,7 @@ func TestEngineAdd(t *testing.T) { asked := new(bool) reqID := new(uint32) - sender.SendGetF = func(inVdr ids.NodeID, requestID uint32, vtxID ids.ID) { + sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, vtxID ids.ID) { *reqID = requestID if *asked { t.Fatalf("Asked multiple times") @@ -134,14 +147,14 @@ func TestEngineAdd(t *testing.T) { } } - manager.ParseVtxF = func(b []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { if !bytes.Equal(b, vtx.Bytes()) { t.Fatalf("Wrong bytes") } return vtx, nil } - if err := te.Put(vdr, 0, vtx.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, 0, vtx.Bytes()); err != nil { t.Fatal(err) } @@ -155,9 +168,11 @@ func TestEngineAdd(t *testing.T) { t.Fatalf("Should have been blocking on request") } - manager.ParseVtxF = func(b []byte) (avalanche.Vertex, error) { return nil, errFailedParsing } + manager.ParseVtxF = func(context.Context, []byte) (avalanche.Vertex, error) { + return nil, errFailedParsing + } - if err := te.Put(vdr, *reqID, nil); err != nil { + if err := te.Put(context.Background(), vdr, *reqID, nil); err != nil { t.Fatal(err) } @@ -175,7 +190,7 @@ func TestEngineQuery(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -219,8 +234,10 @@ func TestEngineQuery(t *testing.T) { BytesV: []byte{0, 1, 2, 3}, } - manager.EdgeF = func() []ids.ID { return []ids.ID{vts[0].ID(), vts[1].ID()} } - manager.GetVtxF = func(id ids.ID) (avalanche.Vertex, error) { + manager.EdgeF = func(context.Context) []ids.ID { + return []ids.ID{vts[0].ID(), vts[1].ID()} + } + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { switch id { case gVtx.ID(): return gVtx, nil @@ -232,17 +249,17 @@ func TestEngineQuery(t *testing.T) { panic("Should have errored") } - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } vertexed := new(bool) - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { if *vertexed { t.Fatalf("Sent multiple requests") } @@ -254,7 +271,7 @@ func TestEngineQuery(t *testing.T) { } chitted := new(bool) - sender.SendChitsF = func(inVdr ids.NodeID, _ uint32, prefs []ids.ID) { + sender.SendChitsF = func(_ context.Context, inVdr ids.NodeID, _ uint32, prefs []ids.ID, _ []ids.ID) { if *chitted { t.Fatalf("Sent multiple chits") } @@ -265,7 +282,7 @@ func TestEngineQuery(t *testing.T) { } asked := new(bool) - sender.SendGetF = func(inVdr ids.NodeID, _ uint32, vtxID ids.ID) { + sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, _ uint32, vtxID ids.ID) { if *asked { t.Fatalf("Asked multiple times") } @@ -280,7 +297,7 @@ func TestEngineQuery(t *testing.T) { // After receiving the pull query for [vtx0] we will first request [vtx0] // from the peer, because it is currently unknown to the engine. - if err := te.PullQuery(vdr, 0, vtx0.ID()); err != nil { + if err := te.PullQuery(context.Background(), vdr, 0, vtx0.ID()); err != nil { t.Fatal(err) } @@ -293,13 +310,13 @@ func TestEngineQuery(t *testing.T) { queried := new(bool) queryRequestID := new(uint32) - sender.SendPushQueryF = func(inVdrs ids.NodeIDSet, requestID uint32, vtx []byte) { + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, vtx []byte) { if *queried { t.Fatalf("Asked multiple times") } *queried = true *queryRequestID = requestID - vdrSet := ids.NodeIDSet{} + vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") @@ -309,7 +326,7 @@ func TestEngineQuery(t *testing.T) { } } - manager.ParseVtxF = func(b []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { if !bytes.Equal(b, vtx0.Bytes()) { t.Fatalf("Wrong bytes") } @@ -318,7 +335,7 @@ func TestEngineQuery(t *testing.T) { // Once the peer returns [vtx0], we will respond to its query and then issue // our own push query for [vtx0]. - if err := te.Put(vdr, 0, vtx0.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, 0, vtx0.Bytes()); err != nil { t.Fatal(err) } manager.ParseVtxF = nil @@ -341,7 +358,7 @@ func TestEngineQuery(t *testing.T) { BytesV: []byte{5, 4, 3, 2, 1, 9}, } - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { if vtxID == vtx0.ID() { return &avalanche.TestVertex{ TestDecidable: choices.TestDecidable{ @@ -357,7 +374,7 @@ func TestEngineQuery(t *testing.T) { } *asked = false - sender.SendGetF = func(inVdr ids.NodeID, _ uint32, vtxID ids.ID) { + sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, _ uint32, vtxID ids.ID) { if *asked { t.Fatalf("Asked multiple times") } @@ -372,18 +389,18 @@ func TestEngineQuery(t *testing.T) { // The peer returned [vtx1] from our query for [vtx0], which means we will // need to request the missing [vtx1]. - if err := te.Chits(vdr, *queryRequestID, []ids.ID{vtx1.ID()}); err != nil { + if err := te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{vtx1.ID()}, nil); err != nil { t.Fatal(err) } *queried = false - sender.SendPushQueryF = func(inVdrs ids.NodeIDSet, requestID uint32, vtx []byte) { + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, vtx []byte) { if *queried { t.Fatalf("Asked multiple times") } *queried = true *queryRequestID = requestID - vdrSet := ids.NodeIDSet{} + vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") @@ -393,12 +410,12 @@ func TestEngineQuery(t *testing.T) { } } - manager.ParseVtxF = func(b []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { if !bytes.Equal(b, vtx1.Bytes()) { t.Fatalf("Wrong bytes") } - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { if vtxID == vtx0.ID() { return &avalanche.TestVertex{ TestDecidable: choices.TestDecidable{ @@ -419,7 +436,7 @@ func TestEngineQuery(t *testing.T) { // Once the peer returns [vtx1], the poll that was issued for [vtx0] will be // able to terminate. Additionally the node will issue a push query with // [vtx1]. - if err := te.Put(vdr, 0, vtx1.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, 0, vtx1.Bytes()); err != nil { t.Fatal(err) } manager.ParseVtxF = nil @@ -445,7 +462,7 @@ func TestEngineQuery(t *testing.T) { sender.CantSendPullQuery = false // Abandon the query for [vtx1]. This will result in a re-query for [vtx0]. - if err := te.QueryFailed(vdr, *queryRequestID); err != nil { + if err := te.QueryFailed(context.Background(), vdr, *queryRequestID); err != nil { t.Fatal(err) } if len(te.vtxBlocked) != 0 { @@ -481,9 +498,9 @@ func TestEngineMultipleQuery(t *testing.T) { errs := wrappers.Errs{} errs.Add( - vals.AddWeight(vdr0, 1), - vals.AddWeight(vdr1, 1), - vals.AddWeight(vdr2, 1), + vals.Add(vdr0, nil, ids.Empty, 1), + vals.Add(vdr1, nil, ids.Empty, 1), + vals.Add(vdr2, nil, ids.Empty, 1), ) if errs.Errored() { t.Fatal(errs.Err) @@ -510,8 +527,10 @@ func TestEngineMultipleQuery(t *testing.T) { vts := []avalanche.Vertex{gVtx, mVtx} utxos := []ids.ID{ids.GenerateTestID()} - manager.EdgeF = func() []ids.ID { return []ids.ID{vts[0].ID(), vts[1].ID()} } - manager.GetVtxF = func(id ids.ID) (avalanche.Vertex, error) { + manager.EdgeF = func(context.Context) []ids.ID { + return []ids.ID{vts[0].ID(), vts[1].ID()} + } + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { switch id { case gVtx.ID(): return gVtx, nil @@ -538,24 +557,24 @@ func TestEngineMultipleQuery(t *testing.T) { TxsV: []snowstorm.Tx{tx0}, } - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } queried := new(bool) queryRequestID := new(uint32) - sender.SendPushQueryF = func(inVdrs ids.NodeIDSet, requestID uint32, vtx []byte) { + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, vtx []byte) { if *queried { t.Fatalf("Asked multiple times") } *queried = true *queryRequestID = requestID - vdrSet := ids.NodeIDSet{} + vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr0, vdr1, vdr2) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") @@ -565,7 +584,7 @@ func TestEngineMultipleQuery(t *testing.T) { } } - if err := te.issue(vtx0); err != nil { + if err := te.issue(context.Background(), vtx0); err != nil { t.Fatal(err) } @@ -579,7 +598,7 @@ func TestEngineMultipleQuery(t *testing.T) { TxsV: []snowstorm.Tx{tx0}, } - manager.GetVtxF = func(id ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { switch id { case gVtx.ID(): return gVtx, nil @@ -596,7 +615,7 @@ func TestEngineMultipleQuery(t *testing.T) { asked := new(bool) reqID := new(uint32) - sender.SendGetF = func(inVdr ids.NodeID, requestID uint32, vtxID ids.ID) { + sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, vtxID ids.ID) { *reqID = requestID if *asked { t.Fatalf("Asked multiple times") @@ -614,22 +633,22 @@ func TestEngineMultipleQuery(t *testing.T) { s2 := []ids.ID{vtx0.ID()} - if err := te.Chits(vdr0, *queryRequestID, s0); err != nil { + if err := te.Chits(context.Background(), vdr0, *queryRequestID, s0, nil); err != nil { t.Fatal(err) } - if err := te.QueryFailed(vdr1, *queryRequestID); err != nil { + if err := te.QueryFailed(context.Background(), vdr1, *queryRequestID); err != nil { t.Fatal(err) } - if err := te.Chits(vdr2, *queryRequestID, s2); err != nil { + if err := te.Chits(context.Background(), vdr2, *queryRequestID, s2, nil); err != nil { t.Fatal(err) } // Should be dropped because the query was marked as failed - if err := te.Chits(vdr1, *queryRequestID, s0); err != nil { + if err := te.Chits(context.Background(), vdr1, *queryRequestID, s0, nil); err != nil { t.Fatal(err) } - if err := te.GetFailed(vdr0, *reqID); err != nil { + if err := te.GetFailed(context.Background(), vdr0, *reqID); err != nil { t.Fatal(err) } @@ -648,7 +667,7 @@ func TestEngineBlockedIssue(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -698,21 +717,21 @@ func TestEngineBlockedIssue(t *testing.T) { TxsV: []snowstorm.Tx{tx0}, } - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } - if err := te.issue(vtx1); err != nil { + if err := te.issue(context.Background(), vtx1); err != nil { t.Fatal(err) } vtx1.ParentsV[0] = vtx0 - if err := te.issue(vtx0); err != nil { + if err := te.issue(context.Background(), vtx0); err != nil { t.Fatal(err) } @@ -728,7 +747,7 @@ func TestEngineAbandonResponse(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -768,27 +787,29 @@ func TestEngineAbandonResponse(t *testing.T) { TxsV: []snowstorm.Tx{tx0}, } - manager.GetVtxF = func(id ids.ID) (avalanche.Vertex, error) { return nil, errUnknownVertex } + manager.GetVtxF = func(context.Context, ids.ID) (avalanche.Vertex, error) { + return nil, errUnknownVertex + } - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } reqID := new(uint32) - sender.SendGetF = func(vID ids.NodeID, requestID uint32, vtxID ids.ID) { + sender.SendGetF = func(_ context.Context, vID ids.NodeID, requestID uint32, vtxID ids.ID) { *reqID = requestID } sender.CantSendChits = false - if err := te.PullQuery(vdr, 0, vtx.ID()); err != nil { + if err := te.PullQuery(context.Background(), vdr, 0, vtx.ID()); err != nil { t.Fatal(err) } - if err := te.GetFailed(vdr, *reqID); err != nil { + if err := te.GetFailed(context.Background(), vdr, *reqID); err != nil { t.Fatal(err) } @@ -804,7 +825,7 @@ func TestEngineScheduleRepoll(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -848,35 +869,35 @@ func TestEngineScheduleRepoll(t *testing.T) { sender.Default(true) sender.CantSendGetAcceptedFrontier = false - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } requestID := new(uint32) - sender.SendPushQueryF = func(_ ids.NodeIDSet, reqID uint32, _ []byte) { + sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], reqID uint32, _ []byte) { *requestID = reqID } - if err := te.issue(vtx); err != nil { + if err := te.issue(context.Background(), vtx); err != nil { t.Fatal(err) } sender.SendPushQueryF = nil repolled := new(bool) - sender.SendPullQueryF = func(_ ids.NodeIDSet, _ uint32, vtxID ids.ID) { + sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], _ uint32, vtxID ids.ID) { *repolled = true if vtxID != vtx.ID() { t.Fatalf("Wrong vertex queried") } } - if err := te.QueryFailed(vdr, *requestID); err != nil { + if err := te.QueryFailed(context.Background(), vdr, *requestID); err != nil { t.Fatal(err) } @@ -900,7 +921,7 @@ func TestEngineRejectDoubleSpendTx(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -908,7 +929,7 @@ func TestEngineRejectDoubleSpendTx(t *testing.T) { engCfg.Manager = manager manager.Default(true) - vm := &vertex.TestVM{TestVM: common.TestVM{T: t}} + vm := &vertex.TestVM{TestVM: block.TestVM{TestVM: common.TestVM{T: t}}} engCfg.VM = vm vm.Default(true) @@ -946,8 +967,10 @@ func TestEngineRejectDoubleSpendTx(t *testing.T) { } tx1.InputIDsV = append(tx1.InputIDsV, utxos[0]) - manager.EdgeF = func() []ids.ID { return []ids.ID{gVtx.ID(), mVtx.ID()} } - manager.GetVtxF = func(id ids.ID) (avalanche.Vertex, error) { + manager.EdgeF = func(context.Context) []ids.ID { + return []ids.ID{gVtx.ID(), mVtx.ID()} + } + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { switch id { case gVtx.ID(): return gVtx, nil @@ -957,7 +980,7 @@ func TestEngineRejectDoubleSpendTx(t *testing.T) { t.Fatalf("Unknown vertex") panic("Should have errored") } - manager.BuildVtxF = func(_ []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) { + manager.BuildVtxF = func(_ context.Context, _ []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) { return &avalanche.TestVertex{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), @@ -971,19 +994,21 @@ func TestEngineRejectDoubleSpendTx(t *testing.T) { } vm.CantSetState = false - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } vm.CantSetState = true sender.CantSendPushQuery = false - vm.PendingTxsF = func() []snowstorm.Tx { return []snowstorm.Tx{tx0, tx1} } - if err := te.Notify(common.PendingTxs); err != nil { + vm.PendingTxsF = func(context.Context) []snowstorm.Tx { + return []snowstorm.Tx{tx0, tx1} + } + if err := te.Notify(context.Background(), common.PendingTxs); err != nil { t.Fatal(err) } } @@ -1002,7 +1027,7 @@ func TestEngineRejectDoubleSpendIssuedTx(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -1010,7 +1035,7 @@ func TestEngineRejectDoubleSpendIssuedTx(t *testing.T) { engCfg.Manager = manager manager.Default(true) - vm := &vertex.TestVM{TestVM: common.TestVM{T: t}} + vm := &vertex.TestVM{TestVM: block.TestVM{TestVM: common.TestVM{T: t}}} engCfg.VM = vm vm.Default(true) @@ -1048,8 +1073,10 @@ func TestEngineRejectDoubleSpendIssuedTx(t *testing.T) { } tx1.InputIDsV = append(tx1.InputIDsV, utxos[0]) - manager.EdgeF = func() []ids.ID { return []ids.ID{gVtx.ID(), mVtx.ID()} } - manager.GetVtxF = func(id ids.ID) (avalanche.Vertex, error) { + manager.EdgeF = func(context.Context) []ids.ID { + return []ids.ID{gVtx.ID(), mVtx.ID()} + } + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { switch id { case gVtx.ID(): return gVtx, nil @@ -1061,17 +1088,17 @@ func TestEngineRejectDoubleSpendIssuedTx(t *testing.T) { } vm.CantSetState = false - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } vm.CantSetState = true - manager.BuildVtxF = func(_ []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) { + manager.BuildVtxF = func(_ context.Context, _ []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) { return &avalanche.TestVertex{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), @@ -1086,13 +1113,17 @@ func TestEngineRejectDoubleSpendIssuedTx(t *testing.T) { sender.CantSendPushQuery = false - vm.PendingTxsF = func() []snowstorm.Tx { return []snowstorm.Tx{tx0} } - if err := te.Notify(common.PendingTxs); err != nil { + vm.PendingTxsF = func(context.Context) []snowstorm.Tx { + return []snowstorm.Tx{tx0} + } + if err := te.Notify(context.Background(), common.PendingTxs); err != nil { t.Fatal(err) } - vm.PendingTxsF = func() []snowstorm.Tx { return []snowstorm.Tx{tx1} } - if err := te.Notify(common.PendingTxs); err != nil { + vm.PendingTxsF = func(context.Context) []snowstorm.Tx { + return []snowstorm.Tx{tx1} + } + if err := te.Notify(context.Background(), common.PendingTxs); err != nil { t.Fatal(err) } } @@ -1111,7 +1142,7 @@ func TestEngineIssueRepoll(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -1128,8 +1159,10 @@ func TestEngineIssueRepoll(t *testing.T) { StatusV: choices.Accepted, }} - manager.EdgeF = func() []ids.ID { return []ids.ID{gVtx.ID(), mVtx.ID()} } - manager.GetVtxF = func(id ids.ID) (avalanche.Vertex, error) { + manager.EdgeF = func(context.Context) []ids.ID { + return []ids.ID{gVtx.ID(), mVtx.ID()} + } + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { switch id { case gVtx.ID(): return gVtx, nil @@ -1140,17 +1173,17 @@ func TestEngineIssueRepoll(t *testing.T) { panic("Should have errored") } - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } - sender.SendPullQueryF = func(vdrs ids.NodeIDSet, _ uint32, vtxID ids.ID) { - vdrSet := ids.NodeIDSet{} + sender.SendPullQueryF = func(_ context.Context, vdrs set.Set[ids.NodeID], _ uint32, vtxID ids.ID) { + vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr) if !vdrs.Equals(vdrSet) { t.Fatalf("Wrong query recipients") @@ -1160,7 +1193,7 @@ func TestEngineIssueRepoll(t *testing.T) { } } - te.repoll() + te.repoll(context.Background()) if err := te.errs.Err; err != nil { t.Fatal(err) } @@ -1182,7 +1215,7 @@ func TestEngineReissue(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -1190,7 +1223,7 @@ func TestEngineReissue(t *testing.T) { manager.Default(true) engCfg.Manager = manager - vm := &vertex.TestVM{TestVM: common.TestVM{T: t}} + vm := &vertex.TestVM{TestVM: block.TestVM{TestVM: common.TestVM{T: t}}} vm.Default(true) engCfg.VM = vm @@ -1257,8 +1290,10 @@ func TestEngineReissue(t *testing.T) { BytesV: []byte{42}, } - manager.EdgeF = func() []ids.ID { return []ids.ID{gVtx.ID(), mVtx.ID()} } - manager.GetVtxF = func(id ids.ID) (avalanche.Vertex, error) { + manager.EdgeF = func(context.Context) []ids.ID { + return []ids.ID{gVtx.ID(), mVtx.ID()} + } + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { switch id { case gVtx.ID(): return gVtx, nil @@ -1272,18 +1307,18 @@ func TestEngineReissue(t *testing.T) { } vm.CantSetState = false - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } vm.CantSetState = true lastVtx := new(avalanche.TestVertex) - manager.BuildVtxF = func(_ []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) { + manager.BuildVtxF = func(_ context.Context, _ []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) { lastVtx = &avalanche.TestVertex{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), @@ -1297,7 +1332,7 @@ func TestEngineReissue(t *testing.T) { return lastVtx, nil } - vm.GetTxF = func(id ids.ID) (snowstorm.Tx, error) { + vm.GetTxF = func(_ context.Context, id ids.ID) (snowstorm.Tx, error) { if id != tx0.ID() { t.Fatalf("Wrong tx") } @@ -1305,16 +1340,18 @@ func TestEngineReissue(t *testing.T) { } queryRequestID := new(uint32) - sender.SendPushQueryF = func(_ ids.NodeIDSet, requestID uint32, _ []byte) { + sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, _ []byte) { *queryRequestID = requestID } - vm.PendingTxsF = func() []snowstorm.Tx { return []snowstorm.Tx{tx0, tx1} } - if err := te.Notify(common.PendingTxs); err != nil { + vm.PendingTxsF = func(context.Context) []snowstorm.Tx { + return []snowstorm.Tx{tx0, tx1} + } + if err := te.Notify(context.Background(), common.PendingTxs); err != nil { t.Fatal(err) } - manager.ParseVtxF = func(b []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { if !bytes.Equal(b, vtx.Bytes()) { t.Fatalf("Wrong bytes") } @@ -1323,22 +1360,24 @@ func TestEngineReissue(t *testing.T) { // must vote on the first poll for the second one to settle // *queryRequestID is 1 - if err := te.Chits(vdr, *queryRequestID, []ids.ID{vtx.ID()}); err != nil { + if err := te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{vtx.ID()}, nil); err != nil { t.Fatal(err) } - if err := te.Put(vdr, 0, vtx.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, 0, vtx.Bytes()); err != nil { t.Fatal(err) } manager.ParseVtxF = nil - vm.PendingTxsF = func() []snowstorm.Tx { return []snowstorm.Tx{tx3} } - if err := te.Notify(common.PendingTxs); err != nil { + vm.PendingTxsF = func(context.Context) []snowstorm.Tx { + return []snowstorm.Tx{tx3} + } + if err := te.Notify(context.Background(), common.PendingTxs); err != nil { t.Fatal(err) } // vote on second poll, *queryRequestID is 2 - if err := te.Chits(vdr, *queryRequestID, []ids.ID{vtx.ID()}); err != nil { + if err := te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{vtx.ID()}, nil); err != nil { t.Fatal(err) } @@ -1364,7 +1403,7 @@ func TestEngineLargeIssue(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -1372,7 +1411,7 @@ func TestEngineLargeIssue(t *testing.T) { manager.Default(true) engCfg.Manager = manager - vm := &vertex.TestVM{TestVM: common.TestVM{T: t}} + vm := &vertex.TestVM{TestVM: block.TestVM{TestVM: common.TestVM{T: t}}} vm.Default(true) engCfg.VM = vm @@ -1410,8 +1449,10 @@ func TestEngineLargeIssue(t *testing.T) { } tx1.InputIDsV = append(tx1.InputIDsV, utxos[1]) - manager.EdgeF = func() []ids.ID { return []ids.ID{gVtx.ID(), mVtx.ID()} } - manager.GetVtxF = func(id ids.ID) (avalanche.Vertex, error) { + manager.EdgeF = func(context.Context) []ids.ID { + return []ids.ID{gVtx.ID(), mVtx.ID()} + } + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { switch id { case gVtx.ID(): return gVtx, nil @@ -1423,18 +1464,18 @@ func TestEngineLargeIssue(t *testing.T) { } vm.CantSetState = false - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } vm.CantSetState = true lastVtx := new(avalanche.TestVertex) - manager.BuildVtxF = func(_ []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) { + manager.BuildVtxF = func(_ context.Context, _ []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) { lastVtx = &avalanche.TestVertex{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), @@ -1450,8 +1491,10 @@ func TestEngineLargeIssue(t *testing.T) { sender.CantSendPushQuery = false - vm.PendingTxsF = func() []snowstorm.Tx { return []snowstorm.Tx{tx0, tx1} } - if err := te.Notify(common.PendingTxs); err != nil { + vm.PendingTxsF = func(context.Context) []snowstorm.Tx { + return []snowstorm.Tx{tx0, tx1} + } + if err := te.Notify(context.Background(), common.PendingTxs); err != nil { t.Fatal(err) } @@ -1468,7 +1511,7 @@ func TestEngineGetVertex(t *testing.T) { sender.CantSendGetAcceptedFrontier = false engCfg.Sender = sender - vdr := validators.GenerateRandomValidator(1) + vdrID := ids.GenerateTestNodeID() manager := vertex.NewTestManager(t) manager.Default(true) @@ -1488,8 +1531,10 @@ func TestEngineGetVertex(t *testing.T) { StatusV: choices.Accepted, }} - manager.EdgeF = func() []ids.ID { return []ids.ID{gVtx.ID(), mVtx.ID()} } - manager.GetVtxF = func(id ids.ID) (avalanche.Vertex, error) { + manager.EdgeF = func(context.Context) []ids.ID { + return []ids.ID{gVtx.ID(), mVtx.ID()} + } + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { switch id { case gVtx.ID(): return gVtx, nil @@ -1500,17 +1545,17 @@ func TestEngineGetVertex(t *testing.T) { panic("Should have errored") } - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } - sender.SendPutF = func(v ids.NodeID, _ uint32, vtx []byte) { - if v != vdr.ID() { + sender.SendPutF = func(_ context.Context, v ids.NodeID, _ uint32, vtx []byte) { + if v != vdrID { t.Fatalf("Wrong validator") } if !bytes.Equal(mVtx.Bytes(), vtx) { @@ -1518,7 +1563,7 @@ func TestEngineGetVertex(t *testing.T) { } } - if err := te.Get(vdr.ID(), 0, mVtx.ID()); err != nil { + if err := te.Get(context.Background(), vdrID, 0, mVtx.ID()); err != nil { t.Fatal(err) } } @@ -1559,8 +1604,10 @@ func TestEngineInsufficientValidators(t *testing.T) { BytesV: []byte{0, 1, 2, 3}, } - manager.EdgeF = func() []ids.ID { return []ids.ID{vts[0].ID(), vts[1].ID()} } - manager.GetVtxF = func(id ids.ID) (avalanche.Vertex, error) { + manager.EdgeF = func(context.Context) []ids.ID { + return []ids.ID{vts[0].ID(), vts[1].ID()} + } + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { switch id { case gVtx.ID(): return gVtx, nil @@ -1571,21 +1618,21 @@ func TestEngineInsufficientValidators(t *testing.T) { panic("Should have errored") } - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } queried := new(bool) - sender.SendPushQueryF = func(ids.NodeIDSet, uint32, []byte) { + sender.SendPushQueryF = func(context.Context, set.Set[ids.NodeID], uint32, []byte) { *queried = true } - if err := te.issue(vtx); err != nil { + if err := te.issue(context.Background(), vtx); err != nil { t.Fatal(err) } @@ -1601,7 +1648,7 @@ func TestEnginePushGossip(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -1635,8 +1682,10 @@ func TestEnginePushGossip(t *testing.T) { BytesV: []byte{0, 1, 2, 3}, } - manager.EdgeF = func() []ids.ID { return []ids.ID{vts[0].ID(), vts[1].ID()} } - manager.GetVtxF = func(id ids.ID) (avalanche.Vertex, error) { + manager.EdgeF = func(context.Context) []ids.ID { + return []ids.ID{vts[0].ID(), vts[1].ID()} + } + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { switch id { case gVtx.ID(): return gVtx, nil @@ -1649,21 +1698,21 @@ func TestEnginePushGossip(t *testing.T) { panic("Should have errored") } - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } requested := new(bool) - sender.SendGetF = func(vdr ids.NodeID, _ uint32, vtxID ids.ID) { + sender.SendGetF = func(_ context.Context, vdr ids.NodeID, _ uint32, vtxID ids.ID) { *requested = true } - manager.ParseVtxF = func(b []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { if bytes.Equal(b, vtx.BytesV) { return vtx, nil } @@ -1673,7 +1722,7 @@ func TestEnginePushGossip(t *testing.T) { sender.CantSendPushQuery = false sender.CantSendChits = false - if err := te.PushQuery(vdr, 0, vtx.Bytes()); err != nil { + if err := te.PushQuery(context.Background(), vdr, 0, vtx.Bytes()); err != nil { t.Fatal(err) } @@ -1689,7 +1738,7 @@ func TestEngineSingleQuery(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -1723,8 +1772,10 @@ func TestEngineSingleQuery(t *testing.T) { BytesV: []byte{0, 1, 2, 3}, } - manager.EdgeF = func() []ids.ID { return []ids.ID{vts[0].ID(), vts[1].ID()} } - manager.GetVtxF = func(id ids.ID) (avalanche.Vertex, error) { + manager.EdgeF = func(context.Context) []ids.ID { + return []ids.ID{vts[0].ID(), vts[1].ID()} + } + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { switch id { case gVtx.ID(): return gVtx, nil @@ -1737,19 +1788,19 @@ func TestEngineSingleQuery(t *testing.T) { panic("Should have errored") } - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } sender.CantSendPushQuery = false sender.CantSendPullQuery = false - if err := te.issue(vtx); err != nil { + if err := te.issue(context.Background(), vtx); err != nil { t.Fatal(err) } } @@ -1761,7 +1812,7 @@ func TestEngineParentBlockingInsert(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -1815,8 +1866,10 @@ func TestEngineParentBlockingInsert(t *testing.T) { BytesV: []byte{0, 1, 2, 3}, } - manager.EdgeF = func() []ids.ID { return []ids.ID{vts[0].ID(), vts[1].ID()} } - manager.GetVtxF = func(id ids.ID) (avalanche.Vertex, error) { + manager.EdgeF = func(context.Context) []ids.ID { + return []ids.ID{vts[0].ID(), vts[1].ID()} + } + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { switch id { case gVtx.ID(): return gVtx, nil @@ -1827,19 +1880,19 @@ func TestEngineParentBlockingInsert(t *testing.T) { panic("Should have errored") } - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } - if err := te.issue(parentVtx); err != nil { + if err := te.issue(context.Background(), parentVtx); err != nil { t.Fatal(err) } - if err := te.issue(blockingVtx); err != nil { + if err := te.issue(context.Background(), blockingVtx); err != nil { t.Fatal(err) } @@ -1850,7 +1903,7 @@ func TestEngineParentBlockingInsert(t *testing.T) { sender.CantSendPushQuery = false missingVtx.StatusV = choices.Processing - if err := te.issue(missingVtx); err != nil { + if err := te.issue(context.Background(), missingVtx); err != nil { t.Fatal(err) } @@ -1859,6 +1912,204 @@ func TestEngineParentBlockingInsert(t *testing.T) { } } +func TestEngineAbandonChit(t *testing.T) { + require := require.New(t) + + _, _, engCfg := DefaultConfig() + + vals := validators.NewSet() + engCfg.Validators = vals + + vdr := ids.GenerateTestNodeID() + err := vals.Add(vdr, nil, ids.Empty, 1) + require.NoError(err) + + sender := &common.SenderTest{T: t} + sender.Default(true) + sender.CantSendGetAcceptedFrontier = false + engCfg.Sender = sender + + manager := vertex.NewTestManager(t) + manager.Default(true) + engCfg.Manager = manager + + gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Accepted, + }} + mVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Accepted, + }} + + vts := []avalanche.Vertex{gVtx, mVtx} + + vtx := &avalanche.TestVertex{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentsV: vts, + HeightV: 1, + BytesV: []byte{0, 1, 2, 3}, + } + + manager.EdgeF = func(context.Context) []ids.ID { + return []ids.ID{vts[0].ID(), vts[1].ID()} + } + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { + switch id { + case gVtx.ID(): + return gVtx, nil + case mVtx.ID(): + return mVtx, nil + } + t.Fatalf("Unknown vertex") + panic("Should have errored") + } + + te, err := newTransitive(engCfg, noopStarter) + require.NoError(err) + + err = te.Start(context.Background(), 0) + require.NoError(err) + + var reqID uint32 + sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, _ []byte) { + reqID = requestID + } + + err = te.issue(context.Background(), vtx) + require.NoError(err) + + fakeVtxID := ids.GenerateTestID() + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { + require.Equal(fakeVtxID, id) + return nil, errMissing + } + + sender.SendGetF = func(_ context.Context, _ ids.NodeID, requestID uint32, _ ids.ID) { + reqID = requestID + } + + // Register a voter dependency on an unknown vertex. + err = te.Chits(context.Background(), vdr, reqID, []ids.ID{fakeVtxID}, nil) + require.NoError(err) + require.Len(te.vtxBlocked, 1) + + sender.CantSendPullQuery = false + + err = te.GetFailed(context.Background(), vdr, reqID) + require.NoError(err) + require.Empty(te.vtxBlocked) +} + +func TestEngineAbandonChitWithUnexpectedPutVertex(t *testing.T) { + require := require.New(t) + + _, _, engCfg := DefaultConfig() + + vals := validators.NewSet() + engCfg.Validators = vals + + vdr := ids.GenerateTestNodeID() + err := vals.Add(vdr, nil, ids.Empty, 1) + require.NoError(err) + + sender := &common.SenderTest{T: t} + sender.Default(true) + sender.CantSendGetAcceptedFrontier = false + engCfg.Sender = sender + + manager := vertex.NewTestManager(t) + manager.Default(true) + engCfg.Manager = manager + + gVtx := &avalanche.TestVertex{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Accepted, + }, + BytesV: []byte{0}, + } + mVtx := &avalanche.TestVertex{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Accepted, + }, + BytesV: []byte{1}, + } + + vts := []avalanche.Vertex{gVtx, mVtx} + + vtx := &avalanche.TestVertex{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentsV: vts, + HeightV: 1, + BytesV: []byte{0, 1, 2, 3}, + } + + manager.EdgeF = func(context.Context) []ids.ID { + return []ids.ID{vts[0].ID(), vts[1].ID()} + } + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { + switch id { + case gVtx.ID(): + return gVtx, nil + case mVtx.ID(): + return mVtx, nil + } + t.Fatalf("Unknown vertex") + panic("Should have errored") + } + + te, err := newTransitive(engCfg, noopStarter) + require.NoError(err) + + err = te.Start(context.Background(), 0) + require.NoError(err) + + var reqID uint32 + sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, _ []byte) { + reqID = requestID + } + + err = te.issue(context.Background(), vtx) + require.NoError(err) + + fakeVtxID := ids.GenerateTestID() + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { + require.Equal(fakeVtxID, id) + return nil, errMissing + } + + sender.SendGetF = func(_ context.Context, _ ids.NodeID, requestID uint32, _ ids.ID) { + reqID = requestID + } + + // Register a voter dependency on an unknown vertex. + err = te.Chits(context.Background(), vdr, reqID, []ids.ID{fakeVtxID}, nil) + require.NoError(err) + require.Len(te.vtxBlocked, 1) + + sender.CantSendPullQuery = false + + gVtxBytes := gVtx.Bytes() + manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { + require.Equal(gVtxBytes, b) + return gVtx, nil + } + + // Respond with an unexpected vertex and verify that the request is + // correctly cleared. + err = te.Put(context.Background(), vdr, reqID, gVtxBytes) + require.NoError(err) + require.Empty(te.vtxBlocked) +} + func TestEngineBlockingChitRequest(t *testing.T) { _, _, engCfg := DefaultConfig() @@ -1866,7 +2117,7 @@ func TestEngineBlockingChitRequest(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -1920,8 +2171,10 @@ func TestEngineBlockingChitRequest(t *testing.T) { BytesV: []byte{2, 1, 2, 3}, } - manager.EdgeF = func() []ids.ID { return []ids.ID{vts[0].ID(), vts[1].ID()} } - manager.GetVtxF = func(id ids.ID) (avalanche.Vertex, error) { + manager.EdgeF = func(context.Context) []ids.ID { + return []ids.ID{vts[0].ID(), vts[1].ID()} + } + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { switch id { case gVtx.ID(): return gVtx, nil @@ -1932,27 +2185,27 @@ func TestEngineBlockingChitRequest(t *testing.T) { panic("Should have errored") } - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } - if err := te.issue(parentVtx); err != nil { + if err := te.issue(context.Background(), parentVtx); err != nil { t.Fatal(err) } - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { if vtxID == blockingVtx.ID() { return blockingVtx, nil } t.Fatalf("Unknown vertex") panic("Should have errored") } - manager.ParseVtxF = func(b []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { if bytes.Equal(b, blockingVtx.Bytes()) { return blockingVtx, nil } @@ -1961,7 +2214,7 @@ func TestEngineBlockingChitRequest(t *testing.T) { } sender.CantSendChits = false - if err := te.PushQuery(vdr, 0, blockingVtx.Bytes()); err != nil { + if err := te.PushQuery(context.Background(), vdr, 0, blockingVtx.Bytes()); err != nil { t.Fatal(err) } @@ -1972,7 +2225,7 @@ func TestEngineBlockingChitRequest(t *testing.T) { sender.CantSendPushQuery = false missingVtx.StatusV = choices.Processing - if err := te.issue(missingVtx); err != nil { + if err := te.issue(context.Background(), missingVtx); err != nil { t.Fatal(err) } @@ -1988,7 +2241,7 @@ func TestEngineBlockingChitResponse(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -2042,8 +2295,10 @@ func TestEngineBlockingChitResponse(t *testing.T) { BytesV: []byte{2, 1, 2, 3}, } - manager.EdgeF = func() []ids.ID { return []ids.ID{vts[0].ID(), vts[1].ID()} } - manager.GetVtxF = func(id ids.ID) (avalanche.Vertex, error) { + manager.EdgeF = func(context.Context) []ids.ID { + return []ids.ID{vts[0].ID(), vts[1].ID()} + } + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { switch id { case gVtx.ID(): return gVtx, nil @@ -2054,23 +2309,23 @@ func TestEngineBlockingChitResponse(t *testing.T) { panic("Should have errored") } - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } - if err := te.issue(blockingVtx); err != nil { + if err := te.issue(context.Background(), blockingVtx); err != nil { t.Fatal(err) } queryRequestID := new(uint32) - sender.SendPushQueryF = func(inVdrs ids.NodeIDSet, requestID uint32, vtx []byte) { + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, vtx []byte) { *queryRequestID = requestID - vdrSet := ids.NodeIDSet{} + vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") @@ -2080,11 +2335,11 @@ func TestEngineBlockingChitResponse(t *testing.T) { } } - if err := te.issue(issuedVtx); err != nil { + if err := te.issue(context.Background(), issuedVtx); err != nil { t.Fatal(err) } - manager.GetVtxF = func(id ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { if id == blockingVtx.ID() { return blockingVtx, nil } @@ -2092,7 +2347,7 @@ func TestEngineBlockingChitResponse(t *testing.T) { panic("Should have errored") } - if err := te.Chits(vdr, *queryRequestID, []ids.ID{blockingVtx.ID()}); err != nil { + if err := te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{blockingVtx.ID()}, nil); err != nil { t.Fatal(err) } @@ -2105,7 +2360,7 @@ func TestEngineBlockingChitResponse(t *testing.T) { sender.CantSendChits = false missingVtx.StatusV = choices.Processing - if err := te.issue(missingVtx); err != nil { + if err := te.issue(context.Background(), missingVtx); err != nil { t.Fatal(err) } @@ -2121,7 +2376,7 @@ func TestEngineMissingTx(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -2175,8 +2430,10 @@ func TestEngineMissingTx(t *testing.T) { BytesV: []byte{2, 1, 2, 3}, } - manager.EdgeF = func() []ids.ID { return []ids.ID{vts[0].ID(), vts[1].ID()} } - manager.GetVtxF = func(id ids.ID) (avalanche.Vertex, error) { + manager.EdgeF = func(context.Context) []ids.ID { + return []ids.ID{vts[0].ID(), vts[1].ID()} + } + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { switch id { case gVtx.ID(): return gVtx, nil @@ -2187,23 +2444,23 @@ func TestEngineMissingTx(t *testing.T) { panic("Should have errored") } - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } - if err := te.issue(blockingVtx); err != nil { + if err := te.issue(context.Background(), blockingVtx); err != nil { t.Fatal(err) } queryRequestID := new(uint32) - sender.SendPushQueryF = func(inVdrs ids.NodeIDSet, requestID uint32, vtx []byte) { + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, vtx []byte) { *queryRequestID = requestID - vdrSet := ids.NodeIDSet{} + vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") @@ -2213,11 +2470,11 @@ func TestEngineMissingTx(t *testing.T) { } } - if err := te.issue(issuedVtx); err != nil { + if err := te.issue(context.Background(), issuedVtx); err != nil { t.Fatal(err) } - manager.GetVtxF = func(id ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { if id == blockingVtx.ID() { return blockingVtx, nil } @@ -2225,7 +2482,7 @@ func TestEngineMissingTx(t *testing.T) { panic("Should have errored") } - if err := te.Chits(vdr, *queryRequestID, []ids.ID{blockingVtx.ID()}); err != nil { + if err := te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{blockingVtx.ID()}, nil); err != nil { t.Fatal(err) } @@ -2238,7 +2495,7 @@ func TestEngineMissingTx(t *testing.T) { sender.CantSendChits = false missingVtx.StatusV = choices.Processing - if err := te.issue(missingVtx); err != nil { + if err := te.issue(context.Background(), missingVtx); err != nil { t.Fatal(err) } @@ -2254,7 +2511,7 @@ func TestEngineIssueBlockingTx(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -2294,16 +2551,16 @@ func TestEngineIssueBlockingTx(t *testing.T) { TxsV: []snowstorm.Tx{tx0, tx1}, } - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } - if err := te.issue(vtx); err != nil { + if err := te.issue(context.Background(), vtx); err != nil { t.Fatal(err) } @@ -2319,7 +2576,7 @@ func TestEngineReissueAbortedVertex(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -2330,6 +2587,7 @@ func TestEngineReissueAbortedVertex(t *testing.T) { manager := vertex.NewTestManager(t) manager.Default(true) + manager.TestStorage.CantEdge = false engCfg.Manager = manager gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ @@ -2364,11 +2622,11 @@ func TestEngineReissueAbortedVertex(t *testing.T) { BytesV: vtxBytes1, } - manager.EdgeF = func() []ids.ID { + manager.EdgeF = func(context.Context) []ids.ID { return []ids.ID{gVtx.ID()} } - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { if vtxID == gVtx.ID() { return gVtx, nil } @@ -2376,12 +2634,12 @@ func TestEngineReissueAbortedVertex(t *testing.T) { panic("Unknown vertex requested") } - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } @@ -2389,18 +2647,18 @@ func TestEngineReissueAbortedVertex(t *testing.T) { manager.GetVtxF = nil requestID := new(uint32) - sender.SendGetF = func(vID ids.NodeID, reqID uint32, vtxID ids.ID) { + sender.SendGetF = func(_ context.Context, vID ids.NodeID, reqID uint32, vtxID ids.ID) { *requestID = reqID } sender.CantSendChits = false - manager.ParseVtxF = func(b []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { if bytes.Equal(b, vtxBytes1) { return vtx1, nil } t.Fatalf("Unknown bytes provided") panic("Unknown bytes provided") } - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { if vtxID == vtxID1 { return vtx1, nil } @@ -2408,24 +2666,24 @@ func TestEngineReissueAbortedVertex(t *testing.T) { panic("Unknown bytes provided") } - if err := te.PushQuery(vdr, 0, vtx1.Bytes()); err != nil { + if err := te.PushQuery(context.Background(), vdr, 0, vtx1.Bytes()); err != nil { t.Fatal(err) } sender.SendGetF = nil manager.ParseVtxF = nil - if err := te.GetFailed(vdr, *requestID); err != nil { + if err := te.GetFailed(context.Background(), vdr, *requestID); err != nil { t.Fatal(err) } requested := new(bool) - sender.SendGetF = func(_ ids.NodeID, _ uint32, vtxID ids.ID) { + sender.SendGetF = func(_ context.Context, _ ids.NodeID, _ uint32, vtxID ids.ID) { if vtxID == vtxID0 { *requested = true } } - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { if vtxID == vtxID1 { return vtx1, nil } @@ -2433,7 +2691,7 @@ func TestEngineReissueAbortedVertex(t *testing.T) { panic("Unknown bytes provided") } - if err := te.PullQuery(vdr, 0, vtxID1); err != nil { + if err := te.PullQuery(context.Background(), vdr, 0, vtxID1); err != nil { t.Fatal(err) } @@ -2447,7 +2705,7 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { vals := validators.NewSet() vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -2456,7 +2714,6 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { vals.RegisterCallbackListener(startup) bootCfg.Beacons = vals - bootCfg.Validators = vals bootCfg.StartupTracker = startup engCfg.Validators = vals @@ -2469,10 +2726,11 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { manager := vertex.NewTestManager(t) manager.Default(true) + manager.TestStorage.CantEdge = false bootCfg.Manager = manager engCfg.Manager = manager - vm := &vertex.TestVM{TestVM: common.TestVM{T: t}} + vm := &vertex.TestVM{TestVM: block.TestVM{TestVM: common.TestVM{T: t}}} vm.Default(true) bootCfg.VM = vm engCfg.VM = vm @@ -2535,7 +2793,7 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { requested := new(bool) requestID := new(uint32) - sender.SendGetAcceptedFrontierF = func(vdrs ids.NodeIDSet, reqID uint32) { + sender.SendGetAcceptedFrontierF = func(_ context.Context, vdrs set.Set[ids.NodeID], reqID uint32) { if vdrs.Len() != 1 { t.Fatalf("Should have requested from the validators") } @@ -2548,24 +2806,26 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { dh := &dummyHandler{} bootstrapper, err := bootstrap.New( + context.Background(), bootCfg, dh.onDoneBootstrapping, + noopStarter, ) if err != nil { t.Fatal(err) } - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } dh.startEngineF = te.Start - if err := bootstrapper.Start(0); err != nil { + if err := bootstrapper.Start(context.Background(), 0); err != nil { t.Fatal(err) } - if err := bootstrapper.Connected(vdr, version.CurrentApp); err != nil { + if err := bootstrapper.Connected(context.Background(), vdr, version.CurrentApp); err != nil { t.Fatal(err) } @@ -2578,21 +2838,21 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { acceptedFrontier := []ids.ID{vtxID0} *requested = false - sender.SendGetAcceptedF = func(vdrs ids.NodeIDSet, reqID uint32, proposedAccepted []ids.ID) { + sender.SendGetAcceptedF = func(_ context.Context, vdrs set.Set[ids.NodeID], reqID uint32, proposedAccepted []ids.ID) { if vdrs.Len() != 1 { t.Fatalf("Should have requested from the validators") } if !vdrs.Contains(vdr) { t.Fatalf("Should have requested from %s", vdr) } - if !ids.Equals(acceptedFrontier, proposedAccepted) { + if !slices.Equal(acceptedFrontier, proposedAccepted) { t.Fatalf("Wrong proposedAccepted vertices.\nExpected: %s\nGot: %s", acceptedFrontier, proposedAccepted) } *requested = true *requestID = reqID } - if err := bootstrapper.AcceptedFrontier(vdr, *requestID, acceptedFrontier); err != nil { + if err := bootstrapper.AcceptedFrontier(context.Background(), vdr, *requestID, acceptedFrontier); err != nil { t.Fatal(err) } @@ -2600,7 +2860,7 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { t.Fatalf("Should have requested from the validators during AcceptedFrontier") } - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { if vtxID == vtxID0 { return nil, errMissing } @@ -2608,7 +2868,7 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { panic("Unknown vertex requested") } - sender.SendGetAncestorsF = func(inVdr ids.NodeID, reqID uint32, vtxID ids.ID) { + sender.SendGetAncestorsF = func(_ context.Context, inVdr ids.NodeID, reqID uint32, vtxID ids.ID) { if vdr != inVdr { t.Fatalf("Asking wrong validator for vertex") } @@ -2618,31 +2878,31 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { *requestID = reqID } - if err := bootstrapper.Accepted(vdr, *requestID, acceptedFrontier); err != nil { + if err := bootstrapper.Accepted(context.Background(), vdr, *requestID, acceptedFrontier); err != nil { t.Fatal(err) } manager.GetVtxF = nil sender.SendGetF = nil - vm.ParseTxF = func(b []byte) (snowstorm.Tx, error) { + vm.ParseTxF = func(_ context.Context, b []byte) (snowstorm.Tx, error) { if bytes.Equal(b, txBytes0) { return tx0, nil } t.Fatalf("Unknown bytes provided") panic("Unknown bytes provided") } - manager.ParseVtxF = func(b []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { if bytes.Equal(b, vtxBytes0) { return vtx0, nil } t.Fatalf("Unknown bytes provided") panic("Unknown bytes provided") } - manager.EdgeF = func() []ids.ID { + manager.EdgeF = func(context.Context) []ids.ID { return []ids.ID{vtxID0} } - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { if vtxID == vtxID0 { return vtx0, nil } @@ -2650,7 +2910,7 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { panic("Unknown bytes provided") } - if err := bootstrapper.Ancestors(vdr, *requestID, [][]byte{vtxBytes0}); err != nil { + if err := bootstrapper.Ancestors(context.Background(), vdr, *requestID, [][]byte{vtxBytes0}); err != nil { t.Fatal(err) } @@ -2666,25 +2926,25 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { t.Fatalf("Should have accepted %s", vtxID0) } - manager.ParseVtxF = func(b []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { if bytes.Equal(b, vtxBytes1) { return vtx1, nil } t.Fatalf("Unknown bytes provided") panic("Unknown bytes provided") } - sender.SendChitsF = func(inVdr ids.NodeID, _ uint32, chits []ids.ID) { + sender.SendChitsF = func(_ context.Context, inVdr ids.NodeID, _ uint32, chits []ids.ID, _ []ids.ID) { if inVdr != vdr { t.Fatalf("Sent to the wrong validator") } expected := []ids.ID{vtxID0} - if !ids.Equals(expected, chits) { + if !slices.Equal(expected, chits) { t.Fatalf("Returned wrong chits") } } - sender.SendPushQueryF = func(vdrs ids.NodeIDSet, _ uint32, vtx []byte) { + sender.SendPushQueryF = func(_ context.Context, vdrs set.Set[ids.NodeID], _ uint32, vtx []byte) { if vdrs.Len() != 1 { t.Fatalf("Should have requested from the validators") } @@ -2696,7 +2956,7 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { t.Fatalf("Sent wrong query bytes") } } - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { if vtxID == vtxID1 { return vtx1, nil } @@ -2704,7 +2964,7 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { panic("Unknown bytes provided") } - if err := te.PushQuery(vdr, 0, vtxBytes1); err != nil { + if err := te.PushQuery(context.Background(), vdr, 0, vtxBytes1); err != nil { t.Fatal(err) } @@ -2722,7 +2982,7 @@ func TestEngineReBootstrapFails(t *testing.T) { vals := validators.NewSet() vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -2731,7 +2991,6 @@ func TestEngineReBootstrapFails(t *testing.T) { vals.RegisterCallbackListener(startup) bootCfg.Beacons = vals - bootCfg.Validators = vals bootCfg.StartupTracker = startup engCfg.Validators = vals @@ -2747,7 +3006,7 @@ func TestEngineReBootstrapFails(t *testing.T) { bootCfg.Manager = manager engCfg.Manager = manager - vm := &vertex.TestVM{TestVM: common.TestVM{T: t}} + vm := &vertex.TestVM{TestVM: block.TestVM{TestVM: common.TestVM{T: t}}} vm.Default(true) bootCfg.VM = vm engCfg.VM = vm @@ -2783,18 +3042,18 @@ func TestEngineReBootstrapFails(t *testing.T) { requested := new(bool) requestID := new(uint32) - sender.SendGetAcceptedFrontierF = func(vdrs ids.NodeIDSet, reqID uint32) { + sender.SendGetAcceptedFrontierF = func(_ context.Context, vdrs set.Set[ids.NodeID], reqID uint32) { // instead of triggering the timeout here, we'll just invoke the GetAcceptedFrontierFailed func // - // s.router.GetAcceptedFrontierFailed(vID, s.ctx.ChainID, requestID) - // -> chain.GetAcceptedFrontierFailed(validatorID, requestID) + // s.router.GetAcceptedFrontierFailed(context.Background(), vID, s.ctx.ChainID, requestID) + // -> chain.GetAcceptedFrontierFailed(context.Background(), validatorID, requestID) // ---> h.sendReliableMsg(message{ // messageType: constants.GetAcceptedFrontierFailedMsg, // validatorID: validatorID, // requestID: requestID, // }) - // -----> h.engine.GetAcceptedFrontierFailed(msg.validatorID, msg.requestID) - // -------> return b.AcceptedFrontier(validatorID, requestID, nil) + // -----> h.engine.GetAcceptedFrontierFailed(context.Background(), msg.validatorID, msg.requestID) + // -------> return b.AcceptedFrontier(context.Background(), validatorID, requestID, nil) // ensure the request is made to the correct validators if vdrs.Len() != 1 { @@ -2809,14 +3068,16 @@ func TestEngineReBootstrapFails(t *testing.T) { dh := &dummyHandler{} bootstrapper, err := bootstrap.New( + context.Background(), bootCfg, dh.onDoneBootstrapping, + noopStarter, ) if err != nil { t.Fatal(err) } - if err := bootstrapper.Start(0); err != nil { + if err := bootstrapper.Start(context.Background(), 0); err != nil { t.Fatal(err) } @@ -2826,7 +3087,7 @@ func TestEngineReBootstrapFails(t *testing.T) { // reset requested *requested = false - sender.SendGetAcceptedF = func(vdrs ids.NodeIDSet, reqID uint32, proposedAccepted []ids.ID) { + sender.SendGetAcceptedF = func(_ context.Context, vdrs set.Set[ids.NodeID], reqID uint32, proposedAccepted []ids.ID) { if vdrs.Len() != 1 { t.Fatalf("Should have requested from the validators") } @@ -2839,36 +3100,38 @@ func TestEngineReBootstrapFails(t *testing.T) { // mimic a GetAcceptedFrontierFailedMsg // only validator that was requested timed out on the request - if err := bootstrapper.GetAcceptedFrontierFailed(vdr, *requestID); err != nil { + if err := bootstrapper.GetAcceptedFrontierFailed(context.Background(), vdr, *requestID); err != nil { t.Fatal(err) } // mimic a GetAcceptedFrontierFailedMsg // only validator that was requested timed out on the request - if err := bootstrapper.GetAcceptedFrontierFailed(vdr, *requestID); err != nil { + if err := bootstrapper.GetAcceptedFrontierFailed(context.Background(), vdr, *requestID); err != nil { t.Fatal(err) } - bootCfg.Ctx.Registerer = prometheus.NewRegistry() + bootCfg.Ctx.AvalancheRegisterer = prometheus.NewRegistry() // re-register the Transitive bootstrapper2, err := bootstrap.New( + context.Background(), bootCfg, dh.onDoneBootstrapping, + noopStarter, ) if err != nil { t.Fatal(err) } - if err := bootstrapper2.Start(0); err != nil { + if err := bootstrapper2.Start(context.Background(), 0); err != nil { t.Fatal(err) } - if err := bootstrapper2.GetAcceptedFailed(vdr, *requestID); err != nil { + if err := bootstrapper2.GetAcceptedFailed(context.Background(), vdr, *requestID); err != nil { t.Fatal(err) } - if err := bootstrapper2.GetAcceptedFailed(vdr, *requestID); err != nil { + if err := bootstrapper2.GetAcceptedFailed(context.Background(), vdr, *requestID); err != nil { t.Fatal(err) } @@ -2885,7 +3148,7 @@ func TestEngineReBootstrappingIntoConsensus(t *testing.T) { vals := validators.NewSet() vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -2894,7 +3157,6 @@ func TestEngineReBootstrappingIntoConsensus(t *testing.T) { vals.RegisterCallbackListener(startup) bootCfg.Beacons = vals - bootCfg.Validators = vals bootCfg.StartupTracker = startup engCfg.Validators = vals @@ -2910,7 +3172,7 @@ func TestEngineReBootstrappingIntoConsensus(t *testing.T) { bootCfg.Manager = manager engCfg.Manager = manager - vm := &vertex.TestVM{TestVM: common.TestVM{T: t}} + vm := &vertex.TestVM{TestVM: block.TestVM{TestVM: common.TestVM{T: t}}} vm.Default(true) bootCfg.VM = vm engCfg.VM = vm @@ -2973,7 +3235,7 @@ func TestEngineReBootstrappingIntoConsensus(t *testing.T) { requested := new(bool) requestID := new(uint32) - sender.SendGetAcceptedFrontierF = func(vdrs ids.NodeIDSet, reqID uint32) { + sender.SendGetAcceptedFrontierF = func(_ context.Context, vdrs set.Set[ids.NodeID], reqID uint32) { if vdrs.Len() != 1 { t.Fatalf("Should have requested from the validators") } @@ -2986,34 +3248,36 @@ func TestEngineReBootstrappingIntoConsensus(t *testing.T) { dh := &dummyHandler{} bootstrapper, err := bootstrap.New( + context.Background(), bootCfg, dh.onDoneBootstrapping, + noopStarter, ) if err != nil { t.Fatal(err) } - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } dh.startEngineF = te.Start - if err := bootstrapper.Start(0); err != nil { + if err := bootstrapper.Start(context.Background(), 0); err != nil { t.Fatal(err) } - if err := bootstrapper.Connected(vdr, version.CurrentApp); err != nil { + if err := bootstrapper.Connected(context.Background(), vdr, version.CurrentApp); err != nil { t.Fatal(err) } // fail the AcceptedFrontier - if err := bootstrapper.GetAcceptedFrontierFailed(vdr, *requestID); err != nil { + if err := bootstrapper.GetAcceptedFrontierFailed(context.Background(), vdr, *requestID); err != nil { t.Fatal(err) } // fail the GetAcceptedFailed - if err := bootstrapper.GetAcceptedFailed(vdr, *requestID); err != nil { + if err := bootstrapper.GetAcceptedFailed(context.Background(), vdr, *requestID); err != nil { t.Fatal(err) } @@ -3024,21 +3288,21 @@ func TestEngineReBootstrappingIntoConsensus(t *testing.T) { acceptedFrontier := []ids.ID{vtxID0} *requested = false - sender.SendGetAcceptedF = func(vdrs ids.NodeIDSet, reqID uint32, proposedAccepted []ids.ID) { + sender.SendGetAcceptedF = func(_ context.Context, vdrs set.Set[ids.NodeID], reqID uint32, proposedAccepted []ids.ID) { if vdrs.Len() != 1 { t.Fatalf("Should have requested from the validators") } if !vdrs.Contains(vdr) { t.Fatalf("Should have requested from %s", vdr) } - if !ids.Equals(acceptedFrontier, proposedAccepted) { + if !slices.Equal(acceptedFrontier, proposedAccepted) { t.Fatalf("Wrong proposedAccepted vertices.\nExpected: %s\nGot: %s", acceptedFrontier, proposedAccepted) } *requested = true *requestID = reqID } - if err := bootstrapper.AcceptedFrontier(vdr, *requestID, acceptedFrontier); err != nil { + if err := bootstrapper.AcceptedFrontier(context.Background(), vdr, *requestID, acceptedFrontier); err != nil { t.Fatal(err) } @@ -3046,7 +3310,7 @@ func TestEngineReBootstrappingIntoConsensus(t *testing.T) { t.Fatalf("Should have requested from the validators during AcceptedFrontier") } - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { if vtxID == vtxID0 { return nil, errMissing } @@ -3054,7 +3318,7 @@ func TestEngineReBootstrappingIntoConsensus(t *testing.T) { panic("Unknown vertex requested") } - sender.SendGetAncestorsF = func(inVdr ids.NodeID, reqID uint32, vtxID ids.ID) { + sender.SendGetAncestorsF = func(_ context.Context, inVdr ids.NodeID, reqID uint32, vtxID ids.ID) { if vdr != inVdr { t.Fatalf("Asking wrong validator for vertex") } @@ -3064,30 +3328,30 @@ func TestEngineReBootstrappingIntoConsensus(t *testing.T) { *requestID = reqID } - if err := bootstrapper.Accepted(vdr, *requestID, acceptedFrontier); err != nil { + if err := bootstrapper.Accepted(context.Background(), vdr, *requestID, acceptedFrontier); err != nil { t.Fatal(err) } manager.GetVtxF = nil - vm.ParseTxF = func(b []byte) (snowstorm.Tx, error) { + vm.ParseTxF = func(_ context.Context, b []byte) (snowstorm.Tx, error) { if bytes.Equal(b, txBytes0) { return tx0, nil } t.Fatalf("Unknown bytes provided") panic("Unknown bytes provided") } - manager.ParseVtxF = func(b []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { if bytes.Equal(b, vtxBytes0) { return vtx0, nil } t.Fatalf("Unknown bytes provided") panic("Unknown bytes provided") } - manager.EdgeF = func() []ids.ID { + manager.EdgeF = func(context.Context) []ids.ID { return []ids.ID{vtxID0} } - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { if vtxID == vtxID0 { return vtx0, nil } @@ -3095,7 +3359,7 @@ func TestEngineReBootstrappingIntoConsensus(t *testing.T) { panic("Unknown bytes provided") } - if err := bootstrapper.Ancestors(vdr, *requestID, [][]byte{vtxBytes0}); err != nil { + if err := bootstrapper.Ancestors(context.Background(), vdr, *requestID, [][]byte{vtxBytes0}); err != nil { t.Fatal(err) } @@ -3113,25 +3377,25 @@ func TestEngineReBootstrappingIntoConsensus(t *testing.T) { t.Fatalf("Should have accepted %s", vtxID0) } - manager.ParseVtxF = func(b []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { if bytes.Equal(b, vtxBytes1) { return vtx1, nil } t.Fatalf("Unknown bytes provided") panic("Unknown bytes provided") } - sender.SendChitsF = func(inVdr ids.NodeID, _ uint32, chits []ids.ID) { + sender.SendChitsF = func(_ context.Context, inVdr ids.NodeID, _ uint32, chits []ids.ID, _ []ids.ID) { if inVdr != vdr { t.Fatalf("Sent to the wrong validator") } expected := []ids.ID{vtxID1} - if !ids.Equals(expected, chits) { + if !slices.Equal(expected, chits) { t.Fatalf("Returned wrong chits") } } - sender.SendPushQueryF = func(vdrs ids.NodeIDSet, _ uint32, vtx []byte) { + sender.SendPushQueryF = func(_ context.Context, vdrs set.Set[ids.NodeID], _ uint32, vtx []byte) { if vdrs.Len() != 1 { t.Fatalf("Should have requested from the validators") } @@ -3143,7 +3407,7 @@ func TestEngineReBootstrappingIntoConsensus(t *testing.T) { t.Fatalf("Sent wrong query bytes") } } - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { if vtxID == vtxID1 { return vtx1, nil } @@ -3151,7 +3415,7 @@ func TestEngineReBootstrappingIntoConsensus(t *testing.T) { panic("Unknown bytes provided") } - if err := bootstrapper.PushQuery(vdr, 0, vtxBytes1); err != nil { + if err := bootstrapper.PushQuery(context.Background(), vdr, 0, vtxBytes1); err != nil { t.Fatal(err) } @@ -3168,7 +3432,7 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -3194,7 +3458,7 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - VerifyV: errors.New(""), + VerifyV: errTest, } tx1.InputIDsV = append(tx1.InputIDsV, utxos[1]) @@ -3217,12 +3481,12 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { TxsV: []snowstorm.Tx{tx1}, } - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } @@ -3230,33 +3494,33 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { te.Sender = sender reqID := new(uint32) - sender.SendPushQueryF = func(_ ids.NodeIDSet, requestID uint32, _ []byte) { + sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, _ []byte) { *reqID = requestID } - if err := te.issue(vtx0); err != nil { + if err := te.issue(context.Background(), vtx0); err != nil { t.Fatal(err) } - sender.SendPushQueryF = func(ids.NodeIDSet, uint32, []byte) { + sender.SendPushQueryF = func(context.Context, set.Set[ids.NodeID], uint32, []byte) { t.Fatalf("should have failed verification") } - if err := te.issue(vtx1); err != nil { + if err := te.issue(context.Background(), vtx1); err != nil { t.Fatal(err) } - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { switch vtxID { case vtx0.ID(): return vtx0, nil case vtx1.ID(): return vtx1, nil } - return nil, errors.New("Unknown vtx") + return nil, errUnknownVertex } - if err := te.Chits(vdr, *reqID, []ids.ID{vtx1.ID()}); err != nil { + if err := te.Chits(context.Background(), vdr, *reqID, []ids.ID{vtx1.ID()}, nil); err != nil { t.Fatal(err) } @@ -3272,7 +3536,7 @@ func TestEnginePartiallyValidVertex(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -3298,7 +3562,7 @@ func TestEnginePartiallyValidVertex(t *testing.T) { IDV: ids.GenerateTestID(), StatusV: choices.Processing, }, - VerifyV: errors.New(""), + VerifyV: errTest, } tx1.InputIDsV = append(tx1.InputIDsV, utxos[1]) @@ -3312,17 +3576,17 @@ func TestEnginePartiallyValidVertex(t *testing.T) { TxsV: []snowstorm.Tx{tx0, tx1}, } - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } expectedVtxBytes := []byte{1} - manager.BuildVtxF = func(_ []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) { + manager.BuildVtxF = func(_ context.Context, _ []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) { return &avalanche.TestVertex{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), @@ -3338,13 +3602,13 @@ func TestEnginePartiallyValidVertex(t *testing.T) { sender := &common.SenderTest{T: t} te.Sender = sender - sender.SendPushQueryF = func(_ ids.NodeIDSet, _ uint32, vtx []byte) { + sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], _ uint32, vtx []byte) { if !bytes.Equal(expectedVtxBytes, vtx) { t.Fatalf("wrong vertex queried") } } - if err := te.issue(vtx); err != nil { + if err := te.issue(context.Background(), vtx); err != nil { t.Fatal(err) } } @@ -3364,17 +3628,19 @@ func TestEngineGossip(t *testing.T) { StatusV: choices.Accepted, }} - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } - manager.EdgeF = func() []ids.ID { return []ids.ID{gVtx.ID()} } - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.EdgeF = func(context.Context) []ids.ID { + return []ids.ID{gVtx.ID()} + } + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { if vtxID == gVtx.ID() { return gVtx, nil } @@ -3383,14 +3649,14 @@ func TestEngineGossip(t *testing.T) { } called := new(bool) - sender.SendGossipF = func(vtxBytes []byte) { + sender.SendGossipF = func(_ context.Context, vtxBytes []byte) { *called = true if !bytes.Equal(vtxBytes, gVtx.Bytes()) { t.Fatal(errUnknownVertex) } } - if err := te.Gossip(); err != nil { + if err := te.Gossip(context.Background()); err != nil { t.Fatal(err) } @@ -3408,10 +3674,10 @@ func TestEngineInvalidVertexIgnoredFromUnexpectedPeer(t *testing.T) { vdr := ids.GenerateTestNodeID() secondVdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } - if err := vals.AddWeight(secondVdr, 1); err != nil { + if err := vals.Add(secondVdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -3465,17 +3731,17 @@ func TestEngineInvalidVertexIgnoredFromUnexpectedPeer(t *testing.T) { BytesV: []byte{2}, } - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } parsed := new(bool) - manager.ParseVtxF = func(b []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { if bytes.Equal(b, vtx1.Bytes()) { *parsed = true return vtx1, nil @@ -3483,7 +3749,7 @@ func TestEngineInvalidVertexIgnoredFromUnexpectedPeer(t *testing.T) { return nil, errUnknownVertex } - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { if !*parsed { return nil, errUnknownVertex } @@ -3495,7 +3761,7 @@ func TestEngineInvalidVertexIgnoredFromUnexpectedPeer(t *testing.T) { } reqID := new(uint32) - sender.SendGetF = func(reqVdr ids.NodeID, requestID uint32, vtxID ids.ID) { + sender.SendGetF = func(_ context.Context, reqVdr ids.NodeID, requestID uint32, vtxID ids.ID) { *reqID = requestID if reqVdr != vdr { t.Fatalf("Wrong validator requested") @@ -3505,16 +3771,16 @@ func TestEngineInvalidVertexIgnoredFromUnexpectedPeer(t *testing.T) { } } - if err := te.PushQuery(vdr, 0, vtx1.Bytes()); err != nil { + if err := te.PushQuery(context.Background(), vdr, 0, vtx1.Bytes()); err != nil { t.Fatal(err) } - if err := te.Put(secondVdr, *reqID, []byte{3}); err != nil { + if err := te.Put(context.Background(), secondVdr, *reqID, []byte{3}); err != nil { t.Fatal(err) } *parsed = false - manager.ParseVtxF = func(b []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { if bytes.Equal(b, vtx0.Bytes()) { *parsed = true return vtx0, nil @@ -3522,7 +3788,7 @@ func TestEngineInvalidVertexIgnoredFromUnexpectedPeer(t *testing.T) { return nil, errUnknownVertex } - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { if !*parsed { return nil, errUnknownVertex } @@ -3537,7 +3803,7 @@ func TestEngineInvalidVertexIgnoredFromUnexpectedPeer(t *testing.T) { vtx0.StatusV = choices.Processing - if err := te.Put(vdr, *reqID, vtx0.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, *reqID, vtx0.Bytes()); err != nil { t.Fatal(err) } @@ -3554,7 +3820,7 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -3609,17 +3875,17 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { BytesV: []byte{2}, } - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } parsed := new(bool) - manager.ParseVtxF = func(b []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { if bytes.Equal(b, vtx1.Bytes()) { *parsed = true return vtx1, nil @@ -3627,7 +3893,7 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { return nil, errUnknownVertex } - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { if !*parsed { return nil, errUnknownVertex } @@ -3639,7 +3905,7 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { } reqID := new(uint32) - sender.SendGetF = func(reqVdr ids.NodeID, requestID uint32, vtxID ids.ID) { + sender.SendGetF = func(_ context.Context, reqVdr ids.NodeID, requestID uint32, vtxID ids.ID) { *reqID = requestID if reqVdr != vdr { t.Fatalf("Wrong validator requested") @@ -3649,19 +3915,19 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { } } - if err := te.PushQuery(vdr, 0, vtx1.Bytes()); err != nil { + if err := te.PushQuery(context.Background(), vdr, 0, vtx1.Bytes()); err != nil { t.Fatal(err) } sender.SendGetF = nil sender.CantSendGet = false - if err := te.PushQuery(vdr, *reqID, []byte{3}); err != nil { + if err := te.PushQuery(context.Background(), vdr, *reqID, []byte{3}); err != nil { t.Fatal(err) } *parsed = false - manager.ParseVtxF = func(b []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { if bytes.Equal(b, vtx0.Bytes()) { *parsed = true return vtx0, nil @@ -3669,7 +3935,7 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { return nil, errUnknownVertex } - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { if !*parsed { return nil, errUnknownVertex } @@ -3684,7 +3950,7 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { vtx0.StatusV = choices.Processing - if err := te.Put(vdr, *reqID, vtx0.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, *reqID, vtx0.Bytes()); err != nil { t.Fatal(err) } @@ -3704,7 +3970,7 @@ func TestEngineAggressivePolling(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -3714,7 +3980,7 @@ func TestEngineAggressivePolling(t *testing.T) { manager := vertex.NewTestManager(t) engCfg.Manager = manager - vm := &vertex.TestVM{TestVM: common.TestVM{T: t}} + vm := &vertex.TestVM{TestVM: block.TestVM{TestVM: common.TestVM{T: t}}} vm.Default(true) engCfg.VM = vm @@ -3753,18 +4019,18 @@ func TestEngineAggressivePolling(t *testing.T) { } vm.CantSetState = false - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } vm.CantSetState = true parsed := new(bool) - manager.ParseVtxF = func(b []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { if bytes.Equal(b, vtx.Bytes()) { *parsed = true return vtx, nil @@ -3772,7 +4038,7 @@ func TestEngineAggressivePolling(t *testing.T) { return nil, errUnknownVertex } - manager.GetVtxF = func(vtxID ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { if !*parsed { return nil, errUnknownVertex } @@ -3784,14 +4050,18 @@ func TestEngineAggressivePolling(t *testing.T) { } numPushQueries := new(int) - sender.SendPushQueryF = func(ids.NodeIDSet, uint32, []byte) { *numPushQueries++ } + sender.SendPushQueryF = func(context.Context, set.Set[ids.NodeID], uint32, []byte) { + *numPushQueries++ + } numPullQueries := new(int) - sender.SendPullQueryF = func(ids.NodeIDSet, uint32, ids.ID) { *numPullQueries++ } + sender.SendPullQueryF = func(context.Context, set.Set[ids.NodeID], uint32, ids.ID) { + *numPullQueries++ + } vm.CantPendingTxs = false - if err := te.Put(vdr, 0, vtx.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, 0, vtx.Bytes()); err != nil { t.Fatal(err) } @@ -3818,7 +4088,7 @@ func TestEngineDuplicatedIssuance(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -3827,7 +4097,7 @@ func TestEngineDuplicatedIssuance(t *testing.T) { manager.Default(true) - vm := &vertex.TestVM{TestVM: common.TestVM{T: t}} + vm := &vertex.TestVM{TestVM: block.TestVM{TestVM: common.TestVM{T: t}}} vm.Default(true) engCfg.VM = vm @@ -3856,8 +4126,10 @@ func TestEngineDuplicatedIssuance(t *testing.T) { } tx.InputIDsV = append(tx.InputIDsV, utxos[0]) - manager.EdgeF = func() []ids.ID { return []ids.ID{gVtx.ID(), mVtx.ID()} } - manager.GetVtxF = func(id ids.ID) (avalanche.Vertex, error) { + manager.EdgeF = func(context.Context) []ids.ID { + return []ids.ID{gVtx.ID(), mVtx.ID()} + } + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { switch id { case gVtx.ID(): return gVtx, nil @@ -3869,18 +4141,18 @@ func TestEngineDuplicatedIssuance(t *testing.T) { } vm.CantSetState = false - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } vm.CantSetState = true lastVtx := new(avalanche.TestVertex) - manager.BuildVtxF = func(_ []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) { + manager.BuildVtxF = func(_ context.Context, _ []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) { lastVtx = &avalanche.TestVertex{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), @@ -3896,8 +4168,10 @@ func TestEngineDuplicatedIssuance(t *testing.T) { sender.CantSendPushQuery = false - vm.PendingTxsF = func() []snowstorm.Tx { return []snowstorm.Tx{tx} } - if err := te.Notify(common.PendingTxs); err != nil { + vm.PendingTxsF = func(context.Context) []snowstorm.Tx { + return []snowstorm.Tx{tx} + } + if err := te.Notify(context.Background(), common.PendingTxs); err != nil { t.Fatal(err) } @@ -3905,12 +4179,12 @@ func TestEngineDuplicatedIssuance(t *testing.T) { t.Fatalf("Should have issued txs differently") } - manager.BuildVtxF = func([]ids.ID, []snowstorm.Tx) (avalanche.Vertex, error) { + manager.BuildVtxF = func(context.Context, []ids.ID, []snowstorm.Tx) (avalanche.Vertex, error) { t.Fatalf("shouldn't have attempted to issue a duplicated tx") return nil, nil } - if err := te.Notify(common.PendingTxs); err != nil { + if err := te.Notify(context.Background(), common.PendingTxs); err != nil { t.Fatal(err) } } @@ -3928,10 +4202,10 @@ func TestEngineDoubleChit(t *testing.T) { vdr0 := ids.GenerateTestNodeID() vdr1 := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr0, 1); err != nil { + if err := vals.Add(vdr0, nil, ids.Empty, 1); err != nil { t.Fatal(err) } - if err := vals.AddWeight(vdr1, 1); err != nil { + if err := vals.Add(vdr1, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -3973,8 +4247,10 @@ func TestEngineDoubleChit(t *testing.T) { BytesV: []byte{1, 1, 2, 3}, } - manager.EdgeF = func() []ids.ID { return []ids.ID{vts[0].ID(), vts[1].ID()} } - manager.GetVtxF = func(id ids.ID) (avalanche.Vertex, error) { + manager.EdgeF = func(context.Context) []ids.ID { + return []ids.ID{vts[0].ID(), vts[1].ID()} + } + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { switch id { case gVtx.ID(): return gVtx, nil @@ -3985,17 +4261,17 @@ func TestEngineDoubleChit(t *testing.T) { panic("Should have errored") } - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } reqID := new(uint32) - sender.SendPushQueryF = func(inVdrs ids.NodeIDSet, requestID uint32, vtxBytes []byte) { + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, vtxBytes []byte) { *reqID = requestID if inVdrs.Len() != 2 { t.Fatalf("Wrong number of validators") @@ -4004,7 +4280,7 @@ func TestEngineDoubleChit(t *testing.T) { t.Fatalf("Wrong vertex requested") } } - manager.GetVtxF = func(id ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { if id == vtx.ID() { return vtx, nil } @@ -4012,7 +4288,7 @@ func TestEngineDoubleChit(t *testing.T) { panic("Should have errored") } - if err := te.issue(vtx); err != nil { + if err := te.issue(context.Background(), vtx); err != nil { t.Fatal(err) } @@ -4022,7 +4298,7 @@ func TestEngineDoubleChit(t *testing.T) { t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Processing) } - if err := te.Chits(vdr0, *reqID, votes); err != nil { + if err := te.Chits(context.Background(), vdr0, *reqID, votes, nil); err != nil { t.Fatal(err) } @@ -4030,7 +4306,7 @@ func TestEngineDoubleChit(t *testing.T) { t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Processing) } - if err := te.Chits(vdr0, *reqID, votes); err != nil { + if err := te.Chits(context.Background(), vdr0, *reqID, votes, nil); err != nil { t.Fatal(err) } @@ -4038,7 +4314,7 @@ func TestEngineDoubleChit(t *testing.T) { t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Processing) } - if err := te.Chits(vdr1, *reqID, votes); err != nil { + if err := te.Chits(context.Background(), vdr1, *reqID, votes, nil); err != nil { t.Fatal(err) } @@ -4054,7 +4330,7 @@ func TestEngineBubbleVotes(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - err := vals.AddWeight(vdr, 1) + err := vals.Add(vdr, nil, ids.Empty, 1) require.NoError(t, err) sender := &common.SenderTest{T: t} @@ -4131,8 +4407,10 @@ func TestEngineBubbleVotes(t *testing.T) { BytesV: []byte{2}, } - manager.EdgeF = func() []ids.ID { return nil } - manager.GetVtxF = func(id ids.ID) (avalanche.Vertex, error) { + manager.EdgeF = func(context.Context) []ids.ID { + return nil + } + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { switch id { case vtx.ID(): return vtx, nil @@ -4147,18 +4425,18 @@ func TestEngineBubbleVotes(t *testing.T) { panic("should have errored") } - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } queryReqID := new(uint32) queried := new(bool) - sender.SendPushQueryF = func(inVdrs ids.NodeIDSet, requestID uint32, vtxBytes []byte) { + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, vtxBytes []byte) { require.Len(t, inVdrs, 1, "wrong number of validators") *queryReqID = requestID require.Equal(t, vtx.Bytes(), vtxBytes, "wrong vertex requested") @@ -4167,14 +4445,14 @@ func TestEngineBubbleVotes(t *testing.T) { getReqID := new(uint32) fetched := new(bool) - sender.SendGetF = func(inVdr ids.NodeID, requestID uint32, vtxID ids.ID) { + sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, vtxID ids.ID) { require.Equal(t, vdr, inVdr, "wrong validator") *getReqID = requestID require.Equal(t, missingVtx.ID(), vtxID, "wrong vertex requested") *fetched = true } - issued, err := te.issueFrom(vdr, pendingVtx1) + issued, err := te.issueFrom(context.Background(), vdr, pendingVtx1) require.NoError(t, err) require.False(t, issued, "shouldn't have been able to issue %s", pendingVtx1.ID()) require.True(t, *queried, "should have queried for %s", vtx.ID()) @@ -4182,13 +4460,13 @@ func TestEngineBubbleVotes(t *testing.T) { // can't apply votes yet because pendingVtx0 isn't issued because missingVtx // is missing - err = te.Chits(vdr, *queryReqID, []ids.ID{pendingVtx1.ID()}) + err = te.Chits(context.Background(), vdr, *queryReqID, []ids.ID{pendingVtx1.ID()}, nil) require.NoError(t, err) require.Equal(t, choices.Processing, tx0.Status(), "wrong tx status") require.Equal(t, choices.Processing, tx1.Status(), "wrong tx status") // vote for pendingVtx1 should be bubbled up to pendingVtx0 and then to vtx - err = te.GetFailed(vdr, *getReqID) + err = te.GetFailed(context.Background(), vdr, *getReqID) require.NoError(t, err) require.Equal(t, choices.Accepted, tx0.Status(), "wrong tx status") require.Equal(t, choices.Processing, tx1.Status(), "wrong tx status") @@ -4210,7 +4488,7 @@ func TestEngineIssue(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -4218,7 +4496,7 @@ func TestEngineIssue(t *testing.T) { manager.Default(true) engCfg.Manager = manager - vm := &vertex.TestVM{TestVM: common.TestVM{T: t}} + vm := &vertex.TestVM{TestVM: block.TestVM{TestVM: common.TestVM{T: t}}} vm.Default(true) engCfg.VM = vm @@ -4255,8 +4533,10 @@ func TestEngineIssue(t *testing.T) { InputIDsV: utxos[1:], } - manager.EdgeF = func() []ids.ID { return []ids.ID{gVtx.ID(), mVtx.ID()} } - manager.GetVtxF = func(id ids.ID) (avalanche.Vertex, error) { + manager.EdgeF = func(context.Context) []ids.ID { + return []ids.ID{gVtx.ID(), mVtx.ID()} + } + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { switch id { case gVtx.ID(): return gVtx, nil @@ -4268,19 +4548,19 @@ func TestEngineIssue(t *testing.T) { } vm.CantSetState = false - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } vm.CantSetState = true numBuilt := 0 vtxID := ids.GenerateTestID() - manager.BuildVtxF = func(_ []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) { + manager.BuildVtxF = func(_ context.Context, _ []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) { numBuilt++ vtx := &avalanche.TestVertex{ TestDecidable: choices.TestDecidable{ @@ -4293,7 +4573,7 @@ func TestEngineIssue(t *testing.T) { BytesV: []byte{1}, } - manager.GetVtxF = func(id ids.ID) (avalanche.Vertex, error) { + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { switch id { case gVtx.ID(): return gVtx, nil @@ -4310,12 +4590,14 @@ func TestEngineIssue(t *testing.T) { } var queryRequestID uint32 - sender.SendPushQueryF = func(_ ids.NodeIDSet, requestID uint32, _ []byte) { + sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, _ []byte) { queryRequestID = requestID } - vm.PendingTxsF = func() []snowstorm.Tx { return []snowstorm.Tx{tx0, tx1} } - if err := te.Notify(common.PendingTxs); err != nil { + vm.PendingTxsF = func(context.Context) []snowstorm.Tx { + return []snowstorm.Tx{tx0, tx1} + } + if err := te.Notify(context.Background(), common.PendingTxs); err != nil { t.Fatal(err) } @@ -4323,7 +4605,7 @@ func TestEngineIssue(t *testing.T) { t.Fatalf("Should have issued txs differently") } - if err := te.Chits(vdr, queryRequestID, []ids.ID{vtxID}); err != nil { + if err := te.Chits(context.Background(), vdr, queryRequestID, []ids.ID{vtxID}, nil); err != nil { t.Fatal(err) } @@ -4352,7 +4634,7 @@ func TestAbandonTx(t *testing.T) { engCfg.Validators = validators.NewSet() vdr := ids.GenerateTestNodeID() - if err := engCfg.Validators.AddWeight(vdr, 1); err != nil { + if err := engCfg.Validators.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -4362,18 +4644,18 @@ func TestAbandonTx(t *testing.T) { manager.CantGetVtx = false engCfg.Manager = manager - vm := &vertex.TestVM{TestVM: common.TestVM{T: t}} + vm := &vertex.TestVM{TestVM: block.TestVM{TestVM: common.TestVM{T: t}}} vm.Default(true) vm.CantSetState = false engCfg.VM = vm - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } @@ -4395,7 +4677,7 @@ func TestAbandonTx(t *testing.T) { DependenciesV: []snowstorm.Tx{gTx}, InputIDsV: []ids.ID{gTx.ID()}, BytesV: utils.RandomBytes(32), - VerifyV: errors.New(""), + VerifyV: errTest, } tx1 := &snowstorm.TestTx{ // Depends on tx0 @@ -4433,24 +4715,24 @@ func TestAbandonTx(t *testing.T) { // Cause the engine to send a Get request for vtx1, vtx0, and some other vtx that doesn't exist sender.CantSendGet = false sender.CantSendChits = false - err = te.PullQuery(vdr, 0, vtx1.ID()) + err = te.PullQuery(context.Background(), vdr, 0, vtx1.ID()) require.NoError(err) - err = te.PullQuery(vdr, 0, vtx0.ID()) + err = te.PullQuery(context.Background(), vdr, 0, vtx0.ID()) require.NoError(err) - err = te.PullQuery(vdr, 0, ids.GenerateTestID()) + err = te.PullQuery(context.Background(), vdr, 0, ids.GenerateTestID()) require.NoError(err) // Give the engine vtx1. It should wait to issue vtx1 // until tx0 is issued, because tx1 depends on tx0. - manager.ParseVtxF = func(b []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { if bytes.Equal(b, vtx1.BytesV) { vtx1.StatusV = choices.Processing return vtx1, nil } require.FailNow("should have asked to parse vtx1") - return nil, errors.New("should have asked to parse vtx1") + return nil, nil } - err = te.Put(vdr, 0, vtx1.Bytes()) + err = te.Put(context.Background(), vdr, 0, vtx1.Bytes()) require.NoError(err) // Verify that vtx1 is waiting to be issued. @@ -4458,15 +4740,15 @@ func TestAbandonTx(t *testing.T) { // Give the engine vtx0. It should try to issue vtx0 // but then abandon it because tx0 fails verification. - manager.ParseVtxF = func(b []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { if bytes.Equal(b, vtx0.BytesV) { vtx0.StatusV = choices.Processing return vtx0, nil } require.FailNow("should have asked to parse vtx0") - return nil, errors.New("should have asked to parse vtx0") + return nil, nil } - err = te.Put(vdr, 0, vtx0.Bytes()) + err = te.Put(context.Background(), vdr, 0, vtx0.Bytes()) require.NoError(err) // Despite the fact that there is still an outstanding vertex request, @@ -4492,7 +4774,6 @@ func TestSendMixedQuery(t *testing.T) { sender := &common.SenderTest{T: t} engCfg.Sender = sender sender.Default(true) - vdrSet := engCfg.Validators manager := vertex.NewTestManager(t) engCfg.Manager = manager // Override the parameters k, MixedQueryNumPushVdr, MixedQueryNumPushNonVdr, @@ -4501,28 +4782,31 @@ func TestSendMixedQuery(t *testing.T) { engCfg.Params.Alpha = 12 engCfg.Params.MixedQueryNumPushVdr = 12 engCfg.Params.MixedQueryNumPushNonVdr = 11 - te, err := newTransitive(engCfg) + te, err := newTransitive(engCfg, noopStarter) if err != nil { t.Fatal(err) } startReqID := uint32(0) - if err := te.Start(startReqID); err != nil { + if err := te.Start(context.Background(), startReqID); err != nil { t.Fatal(err) } - vdrsList := []validators.Validator{} - vdrs := ids.NodeIDSet{} - for i := 0; i < te.Config.Params.K; i++ { - vdr := ids.GenerateTestNodeID() - vdrs.Add(vdr) - vdrsList = append(vdrsList, validators.NewValidator(vdr, 1)) + vdrs := set.Set[ids.NodeID]{} + te.Validators = validators.NewSet() + for i := 0; i < engCfg.Params.K; i++ { + vdrID := ids.GenerateTestNodeID() + vdrs.Add(vdrID) + err := te.Validators.Add(vdrID, nil, ids.Empty, 1) + if err != nil { + t.Fatal(err) + } } if tt.isVdr { - vdrs.Add(te.Ctx.NodeID) - vdrsList = append(vdrsList, validators.NewValidator(te.Ctx.NodeID, 1)) - } - if err := vdrSet.Set(vdrsList); err != nil { - t.Fatal(err) + vdrs.Add(engCfg.Ctx.NodeID) + err := te.Validators.Add(engCfg.Ctx.NodeID, nil, ids.Empty, 1) + if err != nil { + t.Fatal(err) + } } // [blk1] is a child of [gBlk] and passes verification @@ -4540,7 +4824,7 @@ func TestSendMixedQuery(t *testing.T) { BytesV: []byte{1}, } - manager.ParseVtxF = func(b []byte) (avalanche.Vertex, error) { + manager.ParseVtxF = func(_ context.Context, b []byte) (avalanche.Vertex, error) { switch { case bytes.Equal(b, vtx1.Bytes()): return vtx1, nil @@ -4552,8 +4836,8 @@ func TestSendMixedQuery(t *testing.T) { pullQuerySent := new(bool) pullQueryReqID := new(uint32) - pullQueriedVdrs := ids.NodeIDSet{} - sender.SendPullQueryF = func(inVdrs ids.NodeIDSet, requestID uint32, vtxID ids.ID) { + pullQueriedVdrs := set.Set[ids.NodeID]{} + sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, vtxID ids.ID) { switch { case *pullQuerySent: t.Fatalf("Asked multiple times") @@ -4567,8 +4851,8 @@ func TestSendMixedQuery(t *testing.T) { pushQuerySent := new(bool) pushQueryReqID := new(uint32) - pushQueriedVdrs := ids.NodeIDSet{} - sender.SendPushQueryF = func(inVdrs ids.NodeIDSet, requestID uint32, vtx []byte) { + pushQueriedVdrs := set.Set[ids.NodeID]{} + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, vtx []byte) { switch { case *pushQuerySent: t.Fatal("Asked multiple times") @@ -4582,7 +4866,7 @@ func TestSendMixedQuery(t *testing.T) { // Give the engine vtx1. It should insert it into consensus and send a mixed query // consisting of 12 pull queries and 8 push queries. - if err := te.Put(vdrSet.List()[0].ID(), constants.GossipMsgRequestID, vtx1.Bytes()); err != nil { + if err := te.Put(context.Background(), te.Validators.List()[0].NodeID, constants.GossipMsgRequestID, vtx1.Bytes()); err != nil { t.Fatal(err) } @@ -4610,3 +4894,125 @@ func TestSendMixedQuery(t *testing.T) { }) } } + +func TestEngineApplyAcceptedFrontierInQueryFailed(t *testing.T) { + require := require.New(t) + + _, _, engCfg := DefaultConfig() + engCfg.Params.BatchSize = 1 + engCfg.Params.BetaVirtuous = 2 + engCfg.Params.BetaRogue = 2 + engCfg.Params.OptimalProcessing = 1 + + sender := &common.SenderTest{T: t} + sender.Default(true) + sender.CantSendGetAcceptedFrontier = false + engCfg.Sender = sender + + vals := validators.NewSet() + engCfg.Validators = vals + + vdr := ids.GenerateTestNodeID() + require.NoError(vals.Add(vdr, nil, ids.Empty, 1)) + + manager := vertex.NewTestManager(t) + manager.Default(true) + engCfg.Manager = manager + + vm := &vertex.TestVM{TestVM: block.TestVM{TestVM: common.TestVM{T: t}}} + vm.Default(true) + engCfg.VM = vm + + gVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Accepted, + }} + mVtx := &avalanche.TestVertex{TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Accepted, + }} + + gTx := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Accepted, + }} + + utxos := []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} + + tx := &snowstorm.TestTx{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + DependenciesV: []snowstorm.Tx{gTx}, + InputIDsV: utxos[:1], + } + + manager.EdgeF = func(context.Context) []ids.ID { + return []ids.ID{gVtx.ID(), mVtx.ID()} + } + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { + switch id { + case gVtx.ID(): + return gVtx, nil + case mVtx.ID(): + return mVtx, nil + } + t.Fatalf("Unknown vertex") + panic("Should have errored") + } + + vm.CantSetState = false + te, err := newTransitive(engCfg, noopStarter) + require.NoError(err) + require.NoError(te.Start(context.Background(), 0)) + + vtx := &avalanche.TestVertex{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentsV: []avalanche.Vertex{gVtx, mVtx}, + TxsV: []snowstorm.Tx{tx}, + BytesV: utils.RandomBytes(32), + } + + queryRequestID := new(uint32) + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, vtxBytes []byte) { + require.Contains(inVdrs, vdr) + require.Equal(vtx.Bytes(), vtxBytes) + *queryRequestID = requestID + } + + require.NoError(te.issue(context.Background(), vtx)) + + manager.GetVtxF = func(_ context.Context, id ids.ID) (avalanche.Vertex, error) { + switch id { + case gVtx.ID(): + return gVtx, nil + case mVtx.ID(): + return mVtx, nil + case vtx.ID(): + return vtx, nil + } + t.Fatalf("unknown vertex") + panic("Should have errored") + } + + require.Equal(choices.Processing, vtx.Status()) + + sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, vtxID ids.ID) { + require.Contains(inVdrs, vdr) + require.Equal(vtx.ID(), vtxID) + *queryRequestID = requestID + } + + vtxIDs := []ids.ID{vtx.ID()} + require.NoError(te.Chits(context.Background(), vdr, *queryRequestID, vtxIDs, vtxIDs)) + + require.Equal(choices.Processing, vtx.Status()) + + require.NoError(te.QueryFailed(context.Background(), vdr, *queryRequestID)) + + require.Equal(choices.Accepted, vtx.Status()) +} diff --git a/avalanchego/snow/engine/avalanche/vertex/builder.go b/avalanchego/snow/engine/avalanche/vertex/builder.go index d719ed84..506b2132 100644 --- a/avalanchego/snow/engine/avalanche/vertex/builder.go +++ b/avalanchego/snow/engine/avalanche/vertex/builder.go @@ -1,21 +1,24 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex import ( + "context" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/avalanche" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/hashing" ) // Builder builds a vertex given a set of parentIDs and transactions. type Builder interface { // Build a new vertex from the contents of a vertex - BuildVtx(parentIDs []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) + BuildVtx(ctx context.Context, parentIDs []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) // Build a new stop vertex from the parents - BuildStopVtx(parentIDs []ids.ID) (avalanche.Vertex, error) + BuildStopVtx(ctx context.Context, parentIDs []ids.ID) (avalanche.Vertex, error) } // Build a new stateless vertex from the contents of a vertex @@ -59,8 +62,8 @@ func buildVtx( verifyFunc func(innerStatelessVertex) error, stopVertex bool, ) (StatelessVertex, error) { - ids.SortIDs(parentIDs) - SortHashOf(txs) + utils.Sort(parentIDs) + utils.SortByHash(txs) codecVer := codecVersion if stopVertex { diff --git a/avalanchego/snow/engine/avalanche/vertex/builder_test.go b/avalanchego/snow/engine/avalanche/vertex/builder_test.go index 25cee635..7ed6a9a5 100644 --- a/avalanchego/snow/engine/avalanche/vertex/builder_test.go +++ b/avalanchego/snow/engine/avalanche/vertex/builder_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex diff --git a/avalanchego/snow/engine/avalanche/vertex/codec.go b/avalanchego/snow/engine/avalanche/vertex/codec.go index 203a713b..564d699a 100644 --- a/avalanchego/snow/engine/avalanche/vertex/codec.go +++ b/avalanchego/snow/engine/avalanche/vertex/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex diff --git a/avalanchego/snow/engine/avalanche/vertex/heap.go b/avalanchego/snow/engine/avalanche/vertex/heap.go index 4dbf4f66..fa9a0a83 100644 --- a/avalanchego/snow/engine/avalanche/vertex/heap.go +++ b/avalanchego/snow/engine/avalanche/vertex/heap.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex @@ -8,16 +8,19 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/avalanche" + "github.com/ava-labs/avalanchego/utils/set" ) var ( - _ Heap = &maxHeightVertexHeap{} - _ heap.Interface = &priorityQueue{} + _ Heap = (*maxHeightVertexHeap)(nil) + _ heap.Interface = (*priorityQueue)(nil) ) type priorityQueue []avalanche.Vertex -func (pq priorityQueue) Len() int { return len(pq) } +func (pq priorityQueue) Len() int { + return len(pq) +} // Returns true if the vertex at index i has greater height than the vertex at // index j. @@ -87,11 +90,13 @@ type Heap interface { } // NewHeap returns an empty Heap -func NewHeap() Heap { return &maxHeightVertexHeap{} } +func NewHeap() Heap { + return &maxHeightVertexHeap{} +} type maxHeightVertexHeap struct { heap priorityQueue - elementIDs ids.Set + elementIDs set.Set[ids.ID] } func (vh *maxHeightVertexHeap) Clear() { @@ -121,6 +126,10 @@ func (vh *maxHeightVertexHeap) Pop() avalanche.Vertex { return vtx } -func (vh *maxHeightVertexHeap) Len() int { return vh.heap.Len() } +func (vh *maxHeightVertexHeap) Len() int { + return vh.heap.Len() +} -func (vh *maxHeightVertexHeap) Contains(vtxID ids.ID) bool { return vh.elementIDs.Contains(vtxID) } +func (vh *maxHeightVertexHeap) Contains(vtxID ids.ID) bool { + return vh.elementIDs.Contains(vtxID) +} diff --git a/avalanchego/snow/engine/avalanche/vertex/heap_test.go b/avalanchego/snow/engine/avalanche/vertex/heap_test.go index b09fff3e..b4e049b5 100644 --- a/avalanchego/snow/engine/avalanche/vertex/heap_test.go +++ b/avalanchego/snow/engine/avalanche/vertex/heap_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex diff --git a/avalanchego/snow/engine/avalanche/vertex/manager.go b/avalanchego/snow/engine/avalanche/vertex/manager.go index 4da2d7ae..cf206742 100644 --- a/avalanchego/snow/engine/avalanche/vertex/manager.go +++ b/avalanchego/snow/engine/avalanche/vertex/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex diff --git a/avalanchego/snow/engine/avalanche/vertex/mock_vm.go b/avalanchego/snow/engine/avalanche/vertex/mock_vm.go new file mode 100644 index 00000000..2597d64e --- /dev/null +++ b/avalanchego/snow/engine/avalanche/vertex/mock_vm.go @@ -0,0 +1,406 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex (interfaces: LinearizableVM) + +// Package vertex is a generated GoMock package. +package vertex + +import ( + context "context" + reflect "reflect" + time "time" + + manager "github.com/ava-labs/avalanchego/database/manager" + ids "github.com/ava-labs/avalanchego/ids" + snow "github.com/ava-labs/avalanchego/snow" + snowman "github.com/ava-labs/avalanchego/snow/consensus/snowman" + snowstorm "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" + common "github.com/ava-labs/avalanchego/snow/engine/common" + version "github.com/ava-labs/avalanchego/version" + gomock "github.com/golang/mock/gomock" +) + +// MockLinearizableVM is a mock of LinearizableVM interface. +type MockLinearizableVM struct { + ctrl *gomock.Controller + recorder *MockLinearizableVMMockRecorder +} + +// MockLinearizableVMMockRecorder is the mock recorder for MockLinearizableVM. +type MockLinearizableVMMockRecorder struct { + mock *MockLinearizableVM +} + +// NewMockLinearizableVM creates a new mock instance. +func NewMockLinearizableVM(ctrl *gomock.Controller) *MockLinearizableVM { + mock := &MockLinearizableVM{ctrl: ctrl} + mock.recorder = &MockLinearizableVMMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockLinearizableVM) EXPECT() *MockLinearizableVMMockRecorder { + return m.recorder +} + +// AppGossip mocks base method. +func (m *MockLinearizableVM) AppGossip(arg0 context.Context, arg1 ids.NodeID, arg2 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AppGossip", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// AppGossip indicates an expected call of AppGossip. +func (mr *MockLinearizableVMMockRecorder) AppGossip(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppGossip", reflect.TypeOf((*MockLinearizableVM)(nil).AppGossip), arg0, arg1, arg2) +} + +// AppRequest mocks base method. +func (m *MockLinearizableVM) AppRequest(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 time.Time, arg4 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AppRequest", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(error) + return ret0 +} + +// AppRequest indicates an expected call of AppRequest. +func (mr *MockLinearizableVMMockRecorder) AppRequest(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppRequest", reflect.TypeOf((*MockLinearizableVM)(nil).AppRequest), arg0, arg1, arg2, arg3, arg4) +} + +// AppRequestFailed mocks base method. +func (m *MockLinearizableVM) AppRequestFailed(arg0 context.Context, arg1 ids.NodeID, arg2 uint32) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AppRequestFailed", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// AppRequestFailed indicates an expected call of AppRequestFailed. +func (mr *MockLinearizableVMMockRecorder) AppRequestFailed(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppRequestFailed", reflect.TypeOf((*MockLinearizableVM)(nil).AppRequestFailed), arg0, arg1, arg2) +} + +// AppResponse mocks base method. +func (m *MockLinearizableVM) AppResponse(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AppResponse", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// AppResponse indicates an expected call of AppResponse. +func (mr *MockLinearizableVMMockRecorder) AppResponse(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppResponse", reflect.TypeOf((*MockLinearizableVM)(nil).AppResponse), arg0, arg1, arg2, arg3) +} + +// BuildBlock mocks base method. +func (m *MockLinearizableVM) BuildBlock(arg0 context.Context) (snowman.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BuildBlock", arg0) + ret0, _ := ret[0].(snowman.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BuildBlock indicates an expected call of BuildBlock. +func (mr *MockLinearizableVMMockRecorder) BuildBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildBlock", reflect.TypeOf((*MockLinearizableVM)(nil).BuildBlock), arg0) +} + +// Connected mocks base method. +func (m *MockLinearizableVM) Connected(arg0 context.Context, arg1 ids.NodeID, arg2 *version.Application) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Connected", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// Connected indicates an expected call of Connected. +func (mr *MockLinearizableVMMockRecorder) Connected(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connected", reflect.TypeOf((*MockLinearizableVM)(nil).Connected), arg0, arg1, arg2) +} + +// CreateHandlers mocks base method. +func (m *MockLinearizableVM) CreateHandlers(arg0 context.Context) (map[string]*common.HTTPHandler, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateHandlers", arg0) + ret0, _ := ret[0].(map[string]*common.HTTPHandler) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateHandlers indicates an expected call of CreateHandlers. +func (mr *MockLinearizableVMMockRecorder) CreateHandlers(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateHandlers", reflect.TypeOf((*MockLinearizableVM)(nil).CreateHandlers), arg0) +} + +// CreateStaticHandlers mocks base method. +func (m *MockLinearizableVM) CreateStaticHandlers(arg0 context.Context) (map[string]*common.HTTPHandler, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateStaticHandlers", arg0) + ret0, _ := ret[0].(map[string]*common.HTTPHandler) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateStaticHandlers indicates an expected call of CreateStaticHandlers. +func (mr *MockLinearizableVMMockRecorder) CreateStaticHandlers(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateStaticHandlers", reflect.TypeOf((*MockLinearizableVM)(nil).CreateStaticHandlers), arg0) +} + +// CrossChainAppRequest mocks base method. +func (m *MockLinearizableVM) CrossChainAppRequest(arg0 context.Context, arg1 ids.ID, arg2 uint32, arg3 time.Time, arg4 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CrossChainAppRequest", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(error) + return ret0 +} + +// CrossChainAppRequest indicates an expected call of CrossChainAppRequest. +func (mr *MockLinearizableVMMockRecorder) CrossChainAppRequest(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CrossChainAppRequest", reflect.TypeOf((*MockLinearizableVM)(nil).CrossChainAppRequest), arg0, arg1, arg2, arg3, arg4) +} + +// CrossChainAppRequestFailed mocks base method. +func (m *MockLinearizableVM) CrossChainAppRequestFailed(arg0 context.Context, arg1 ids.ID, arg2 uint32) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CrossChainAppRequestFailed", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// CrossChainAppRequestFailed indicates an expected call of CrossChainAppRequestFailed. +func (mr *MockLinearizableVMMockRecorder) CrossChainAppRequestFailed(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CrossChainAppRequestFailed", reflect.TypeOf((*MockLinearizableVM)(nil).CrossChainAppRequestFailed), arg0, arg1, arg2) +} + +// CrossChainAppResponse mocks base method. +func (m *MockLinearizableVM) CrossChainAppResponse(arg0 context.Context, arg1 ids.ID, arg2 uint32, arg3 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CrossChainAppResponse", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// CrossChainAppResponse indicates an expected call of CrossChainAppResponse. +func (mr *MockLinearizableVMMockRecorder) CrossChainAppResponse(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CrossChainAppResponse", reflect.TypeOf((*MockLinearizableVM)(nil).CrossChainAppResponse), arg0, arg1, arg2, arg3) +} + +// Disconnected mocks base method. +func (m *MockLinearizableVM) Disconnected(arg0 context.Context, arg1 ids.NodeID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Disconnected", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Disconnected indicates an expected call of Disconnected. +func (mr *MockLinearizableVMMockRecorder) Disconnected(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Disconnected", reflect.TypeOf((*MockLinearizableVM)(nil).Disconnected), arg0, arg1) +} + +// GetBlock mocks base method. +func (m *MockLinearizableVM) GetBlock(arg0 context.Context, arg1 ids.ID) (snowman.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlock", arg0, arg1) + ret0, _ := ret[0].(snowman.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBlock indicates an expected call of GetBlock. +func (mr *MockLinearizableVMMockRecorder) GetBlock(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockLinearizableVM)(nil).GetBlock), arg0, arg1) +} + +// GetTx mocks base method. +func (m *MockLinearizableVM) GetTx(arg0 context.Context, arg1 ids.ID) (snowstorm.Tx, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTx", arg0, arg1) + ret0, _ := ret[0].(snowstorm.Tx) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTx indicates an expected call of GetTx. +func (mr *MockLinearizableVMMockRecorder) GetTx(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTx", reflect.TypeOf((*MockLinearizableVM)(nil).GetTx), arg0, arg1) +} + +// HealthCheck mocks base method. +func (m *MockLinearizableVM) HealthCheck(arg0 context.Context) (interface{}, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HealthCheck", arg0) + ret0, _ := ret[0].(interface{}) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HealthCheck indicates an expected call of HealthCheck. +func (mr *MockLinearizableVMMockRecorder) HealthCheck(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HealthCheck", reflect.TypeOf((*MockLinearizableVM)(nil).HealthCheck), arg0) +} + +// Initialize mocks base method. +func (m *MockLinearizableVM) Initialize(arg0 context.Context, arg1 *snow.Context, arg2 manager.Manager, arg3, arg4, arg5 []byte, arg6 chan<- common.Message, arg7 []*common.Fx, arg8 common.AppSender) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Initialize", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) + ret0, _ := ret[0].(error) + return ret0 +} + +// Initialize indicates an expected call of Initialize. +func (mr *MockLinearizableVMMockRecorder) Initialize(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockLinearizableVM)(nil).Initialize), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) +} + +// LastAccepted mocks base method. +func (m *MockLinearizableVM) LastAccepted(arg0 context.Context) (ids.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LastAccepted", arg0) + ret0, _ := ret[0].(ids.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LastAccepted indicates an expected call of LastAccepted. +func (mr *MockLinearizableVMMockRecorder) LastAccepted(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastAccepted", reflect.TypeOf((*MockLinearizableVM)(nil).LastAccepted), arg0) +} + +// Linearize mocks base method. +func (m *MockLinearizableVM) Linearize(arg0 context.Context, arg1 ids.ID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Linearize", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// Linearize indicates an expected call of Linearize. +func (mr *MockLinearizableVMMockRecorder) Linearize(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Linearize", reflect.TypeOf((*MockLinearizableVM)(nil).Linearize), arg0, arg1) +} + +// ParseBlock mocks base method. +func (m *MockLinearizableVM) ParseBlock(arg0 context.Context, arg1 []byte) (snowman.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParseBlock", arg0, arg1) + ret0, _ := ret[0].(snowman.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParseBlock indicates an expected call of ParseBlock. +func (mr *MockLinearizableVMMockRecorder) ParseBlock(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParseBlock", reflect.TypeOf((*MockLinearizableVM)(nil).ParseBlock), arg0, arg1) +} + +// ParseTx mocks base method. +func (m *MockLinearizableVM) ParseTx(arg0 context.Context, arg1 []byte) (snowstorm.Tx, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ParseTx", arg0, arg1) + ret0, _ := ret[0].(snowstorm.Tx) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ParseTx indicates an expected call of ParseTx. +func (mr *MockLinearizableVMMockRecorder) ParseTx(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParseTx", reflect.TypeOf((*MockLinearizableVM)(nil).ParseTx), arg0, arg1) +} + +// PendingTxs mocks base method. +func (m *MockLinearizableVM) PendingTxs(arg0 context.Context) []snowstorm.Tx { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PendingTxs", arg0) + ret0, _ := ret[0].([]snowstorm.Tx) + return ret0 +} + +// PendingTxs indicates an expected call of PendingTxs. +func (mr *MockLinearizableVMMockRecorder) PendingTxs(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PendingTxs", reflect.TypeOf((*MockLinearizableVM)(nil).PendingTxs), arg0) +} + +// SetPreference mocks base method. +func (m *MockLinearizableVM) SetPreference(arg0 context.Context, arg1 ids.ID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetPreference", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetPreference indicates an expected call of SetPreference. +func (mr *MockLinearizableVMMockRecorder) SetPreference(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPreference", reflect.TypeOf((*MockLinearizableVM)(nil).SetPreference), arg0, arg1) +} + +// SetState mocks base method. +func (m *MockLinearizableVM) SetState(arg0 context.Context, arg1 snow.State) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetState", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetState indicates an expected call of SetState. +func (mr *MockLinearizableVMMockRecorder) SetState(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetState", reflect.TypeOf((*MockLinearizableVM)(nil).SetState), arg0, arg1) +} + +// Shutdown mocks base method. +func (m *MockLinearizableVM) Shutdown(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Shutdown", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Shutdown indicates an expected call of Shutdown. +func (mr *MockLinearizableVMMockRecorder) Shutdown(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockLinearizableVM)(nil).Shutdown), arg0) +} + +// Version mocks base method. +func (m *MockLinearizableVM) Version(arg0 context.Context) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Version", arg0) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Version indicates an expected call of Version. +func (mr *MockLinearizableVMMockRecorder) Version(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockLinearizableVM)(nil).Version), arg0) +} diff --git a/avalanchego/snow/engine/avalanche/vertex/mocks/dag_vm.go b/avalanchego/snow/engine/avalanche/vertex/mocks/dag_vm.go deleted file mode 100644 index bd1e6b73..00000000 --- a/avalanchego/snow/engine/avalanche/vertex/mocks/dag_vm.go +++ /dev/null @@ -1,303 +0,0 @@ -// Code generated by mockery v2.9.4. DO NOT EDIT. - -package mocks - -import ( - ids "github.com/ava-labs/avalanchego/ids" - common "github.com/ava-labs/avalanchego/snow/engine/common" - - manager "github.com/ava-labs/avalanchego/database/manager" - - mock "github.com/stretchr/testify/mock" - - snow "github.com/ava-labs/avalanchego/snow" - - snowstorm "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" - - time "time" - - version "github.com/ava-labs/avalanchego/version" -) - -// DAGVM is an autogenerated mock type for the DAGVM type -type DAGVM struct { - mock.Mock -} - -// AppGossip provides a mock function with given fields: nodeID, msg -func (_m *DAGVM) AppGossip(nodeID ids.NodeID, msg []byte) error { - ret := _m.Called(nodeID, msg) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, []byte) error); ok { - r0 = rf(nodeID, msg) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AppRequest provides a mock function with given fields: nodeID, requestID, deadline, request -func (_m *DAGVM) AppRequest(nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { - ret := _m.Called(nodeID, requestID, deadline, request) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, time.Time, []byte) error); ok { - r0 = rf(nodeID, requestID, deadline, request) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AppRequestFailed provides a mock function with given fields: nodeID, requestID -func (_m *DAGVM) AppRequestFailed(nodeID ids.NodeID, requestID uint32) error { - ret := _m.Called(nodeID, requestID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32) error); ok { - r0 = rf(nodeID, requestID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AppResponse provides a mock function with given fields: nodeID, requestID, response -func (_m *DAGVM) AppResponse(nodeID ids.NodeID, requestID uint32, response []byte) error { - ret := _m.Called(nodeID, requestID, response) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, []byte) error); ok { - r0 = rf(nodeID, requestID, response) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Connected provides a mock function with given fields: id, nodeVersion -func (_m *DAGVM) Connected(id ids.NodeID, nodeVersion version.Application) error { - ret := _m.Called(id, nodeVersion) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, version.Application) error); ok { - r0 = rf(id, nodeVersion) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// CreateHandlers provides a mock function with given fields: -func (_m *DAGVM) CreateHandlers() (map[string]*common.HTTPHandler, error) { - ret := _m.Called() - - var r0 map[string]*common.HTTPHandler - if rf, ok := ret.Get(0).(func() map[string]*common.HTTPHandler); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[string]*common.HTTPHandler) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CreateStaticHandlers provides a mock function with given fields: -func (_m *DAGVM) CreateStaticHandlers() (map[string]*common.HTTPHandler, error) { - ret := _m.Called() - - var r0 map[string]*common.HTTPHandler - if rf, ok := ret.Get(0).(func() map[string]*common.HTTPHandler); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[string]*common.HTTPHandler) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Disconnected provides a mock function with given fields: id -func (_m *DAGVM) Disconnected(id ids.NodeID) error { - ret := _m.Called(id) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID) error); ok { - r0 = rf(id) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetTx provides a mock function with given fields: _a0 -func (_m *DAGVM) GetTx(_a0 ids.ID) (snowstorm.Tx, error) { - ret := _m.Called(_a0) - - var r0 snowstorm.Tx - if rf, ok := ret.Get(0).(func(ids.ID) snowstorm.Tx); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(snowstorm.Tx) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(ids.ID) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// HealthCheck provides a mock function with given fields: -func (_m *DAGVM) HealthCheck() (interface{}, error) { - ret := _m.Called() - - var r0 interface{} - if rf, ok := ret.Get(0).(func() interface{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(interface{}) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Initialize provides a mock function with given fields: ctx, dbManager, genesisBytes, upgradeBytes, configBytes, toEngine, fxs, appSender -func (_m *DAGVM) Initialize(ctx *snow.Context, dbManager manager.Manager, genesisBytes []byte, upgradeBytes []byte, configBytes []byte, toEngine chan<- common.Message, fxs []*common.Fx, appSender common.AppSender) error { - ret := _m.Called(ctx, dbManager, genesisBytes, upgradeBytes, configBytes, toEngine, fxs, appSender) - - var r0 error - if rf, ok := ret.Get(0).(func(*snow.Context, manager.Manager, []byte, []byte, []byte, chan<- common.Message, []*common.Fx, common.AppSender) error); ok { - r0 = rf(ctx, dbManager, genesisBytes, upgradeBytes, configBytes, toEngine, fxs, appSender) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// ParseTx provides a mock function with given fields: tx -func (_m *DAGVM) ParseTx(tx []byte) (snowstorm.Tx, error) { - ret := _m.Called(tx) - - var r0 snowstorm.Tx - if rf, ok := ret.Get(0).(func([]byte) snowstorm.Tx); ok { - r0 = rf(tx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(snowstorm.Tx) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func([]byte) error); ok { - r1 = rf(tx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// PendingTxs provides a mock function with given fields: -func (_m *DAGVM) PendingTxs() []snowstorm.Tx { - ret := _m.Called() - - var r0 []snowstorm.Tx - if rf, ok := ret.Get(0).(func() []snowstorm.Tx); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]snowstorm.Tx) - } - } - - return r0 -} - -// SetState provides a mock function with given fields: state -func (_m *DAGVM) SetState(state snow.State) error { - ret := _m.Called(state) - - var r0 error - if rf, ok := ret.Get(0).(func(snow.State) error); ok { - r0 = rf(state) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Shutdown provides a mock function with given fields: -func (_m *DAGVM) Shutdown() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Version provides a mock function with given fields: -func (_m *DAGVM) Version() (string, error) { - ret := _m.Called() - - var r0 string - if rf, ok := ret.Get(0).(func() string); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(string) - } - - var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/avalanchego/snow/engine/avalanche/vertex/parser.go b/avalanchego/snow/engine/avalanche/vertex/parser.go index 60409fcf..cd409c7e 100644 --- a/avalanchego/snow/engine/avalanche/vertex/parser.go +++ b/avalanchego/snow/engine/avalanche/vertex/parser.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex import ( + "context" + "github.com/ava-labs/avalanchego/snow/consensus/avalanche" "github.com/ava-labs/avalanchego/utils/hashing" ) @@ -11,7 +13,7 @@ import ( // Parser parses bytes into a vertex. type Parser interface { // Parse a vertex from a slice of bytes - ParseVtx(vertex []byte) (avalanche.Vertex, error) + ParseVtx(ctx context.Context, vertex []byte) (avalanche.Vertex, error) } // Parse parses the provided vertex bytes into a stateless vertex diff --git a/avalanchego/snow/engine/avalanche/vertex/parser_test.go b/avalanchego/snow/engine/avalanche/vertex/parser_test.go index fd2104c9..8f1f2e9e 100644 --- a/avalanchego/snow/engine/avalanche/vertex/parser_test.go +++ b/avalanchego/snow/engine/avalanche/vertex/parser_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex diff --git a/avalanchego/snow/engine/avalanche/vertex/sorting.go b/avalanchego/snow/engine/avalanche/vertex/sorting.go deleted file mode 100644 index ed8f350c..00000000 --- a/avalanchego/snow/engine/avalanche/vertex/sorting.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package vertex - -import ( - "bytes" - "sort" - - "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/hashing" -) - -type sortHashOfData [][]byte - -func (d sortHashOfData) Less(i, j int) bool { - return bytes.Compare( - hashing.ComputeHash256(d[i]), - hashing.ComputeHash256(d[j]), - ) == -1 -} -func (d sortHashOfData) Len() int { return len(d) } -func (d sortHashOfData) Swap(i, j int) { d[j], d[i] = d[i], d[j] } - -func SortHashOf(bytesSlice [][]byte) { sort.Sort(sortHashOfData(bytesSlice)) } -func IsSortedAndUniqueHashOf(bytesSlice [][]byte) bool { - return utils.IsSortedAndUnique(sortHashOfData(bytesSlice)) -} diff --git a/avalanchego/snow/engine/avalanche/vertex/stateless_vertex.go b/avalanchego/snow/engine/avalanche/vertex/stateless_vertex.go index c4d688fe..f87996a4 100644 --- a/avalanchego/snow/engine/avalanche/vertex/stateless_vertex.go +++ b/avalanchego/snow/engine/avalanche/vertex/stateless_vertex.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex @@ -8,6 +8,7 @@ import ( "fmt" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/vms/components/verify" ) @@ -55,17 +56,41 @@ type statelessVertex struct { bytes []byte } -func (v statelessVertex) ID() ids.ID { return v.id } -func (v statelessVertex) Bytes() []byte { return v.bytes } -func (v statelessVertex) Version() uint16 { return v.innerStatelessVertex.Version } -func (v statelessVertex) ChainID() ids.ID { return v.innerStatelessVertex.ChainID } +func (v statelessVertex) ID() ids.ID { + return v.id +} + +func (v statelessVertex) Bytes() []byte { + return v.bytes +} + +func (v statelessVertex) Version() uint16 { + return v.innerStatelessVertex.Version +} + +func (v statelessVertex) ChainID() ids.ID { + return v.innerStatelessVertex.ChainID +} + func (v statelessVertex) StopVertex() bool { return v.innerStatelessVertex.Version == codecVersionWithStopVtx } -func (v statelessVertex) Height() uint64 { return v.innerStatelessVertex.Height } -func (v statelessVertex) Epoch() uint32 { return v.innerStatelessVertex.Epoch } -func (v statelessVertex) ParentIDs() []ids.ID { return v.innerStatelessVertex.ParentIDs } -func (v statelessVertex) Txs() [][]byte { return v.innerStatelessVertex.Txs } + +func (v statelessVertex) Height() uint64 { + return v.innerStatelessVertex.Height +} + +func (v statelessVertex) Epoch() uint32 { + return v.innerStatelessVertex.Epoch +} + +func (v statelessVertex) ParentIDs() []ids.ID { + return v.innerStatelessVertex.ParentIDs +} + +func (v statelessVertex) Txs() [][]byte { + return v.innerStatelessVertex.Txs +} type innerStatelessVertex struct { Version uint16 `json:"version"` @@ -95,9 +120,9 @@ func (v innerStatelessVertex) verify() error { return errNoOperations case len(v.Txs) > maxTxsPerVtx: return errTooManyTxs - case !ids.IsSortedAndUniqueIDs(v.ParentIDs): + case !utils.IsSortedAndUniqueSortable(v.ParentIDs): return errInvalidParents - case !IsSortedAndUniqueHashOf(v.Txs): + case !utils.IsSortedAndUniqueByHash(v.Txs): return errInvalidTxs default: return nil @@ -114,7 +139,7 @@ func (v innerStatelessVertex) verifyStopVertex() error { return errTooManyparentIDs case len(v.Txs) != 0: return errTooManyTxs - case !ids.IsSortedAndUniqueIDs(v.ParentIDs): + case !utils.IsSortedAndUniqueSortable(v.ParentIDs): return errInvalidParents default: return nil diff --git a/avalanchego/snow/engine/avalanche/vertex/stateless_vertex_test.go b/avalanchego/snow/engine/avalanche/vertex/stateless_vertex_test.go index c1d60a81..af9819da 100644 --- a/avalanchego/snow/engine/avalanche/vertex/stateless_vertex_test.go +++ b/avalanchego/snow/engine/avalanche/vertex/stateless_vertex_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex diff --git a/avalanchego/snow/engine/avalanche/vertex/storage.go b/avalanchego/snow/engine/avalanche/vertex/storage.go index 0a3ce368..40ec863d 100644 --- a/avalanchego/snow/engine/avalanche/vertex/storage.go +++ b/avalanchego/snow/engine/avalanche/vertex/storage.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex import ( + "context" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/avalanche" ) @@ -12,9 +14,9 @@ import ( // engine. type Storage interface { // Get a vertex by its hash from storage. - GetVtx(vtxID ids.ID) (avalanche.Vertex, error) + GetVtx(ctx context.Context, vtxID ids.ID) (avalanche.Vertex, error) // Edge returns a list of accepted vertex IDs with no accepted children. - Edge() (vtxIDs []ids.ID) + Edge(ctx context.Context) (vtxIDs []ids.ID) // Returns "true" if accepted frontier ("Edge") is stop vertex. - StopVertexAccepted() (bool, error) + StopVertexAccepted(ctx context.Context) (bool, error) } diff --git a/avalanchego/snow/engine/avalanche/vertex/test_builder.go b/avalanchego/snow/engine/avalanche/vertex/test_builder.go index 25ff4ce5..a0811527 100644 --- a/avalanchego/snow/engine/avalanche/vertex/test_builder.go +++ b/avalanchego/snow/engine/avalanche/vertex/test_builder.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex import ( + "context" "errors" "testing" @@ -15,21 +16,23 @@ import ( var ( errBuild = errors.New("unexpectedly called Build") - _ Builder = &TestBuilder{} + _ Builder = (*TestBuilder)(nil) ) type TestBuilder struct { T *testing.T CantBuildVtx bool - BuildVtxF func(parentIDs []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) - BuildStopVtxF func(parentIDs []ids.ID) (avalanche.Vertex, error) + BuildVtxF func(ctx context.Context, parentIDs []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) + BuildStopVtxF func(ctx context.Context, parentIDs []ids.ID) (avalanche.Vertex, error) } -func (b *TestBuilder) Default(cant bool) { b.CantBuildVtx = cant } +func (b *TestBuilder) Default(cant bool) { + b.CantBuildVtx = cant +} -func (b *TestBuilder) BuildVtx(parentIDs []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) { +func (b *TestBuilder) BuildVtx(ctx context.Context, parentIDs []ids.ID, txs []snowstorm.Tx) (avalanche.Vertex, error) { if b.BuildVtxF != nil { - return b.BuildVtxF(parentIDs, txs) + return b.BuildVtxF(ctx, parentIDs, txs) } if b.CantBuildVtx && b.T != nil { b.T.Fatal(errBuild) @@ -37,9 +40,9 @@ func (b *TestBuilder) BuildVtx(parentIDs []ids.ID, txs []snowstorm.Tx) (avalanch return nil, errBuild } -func (b *TestBuilder) BuildStopVtx(parentIDs []ids.ID) (avalanche.Vertex, error) { +func (b *TestBuilder) BuildStopVtx(ctx context.Context, parentIDs []ids.ID) (avalanche.Vertex, error) { if b.BuildStopVtxF != nil { - return b.BuildStopVtxF(parentIDs) + return b.BuildStopVtxF(ctx, parentIDs) } if b.CantBuildVtx && b.T != nil { b.T.Fatal(errBuild) diff --git a/avalanchego/snow/engine/avalanche/vertex/test_manager.go b/avalanchego/snow/engine/avalanche/vertex/test_manager.go index 6d5fe33c..a2f55ee7 100644 --- a/avalanchego/snow/engine/avalanche/vertex/test_manager.go +++ b/avalanchego/snow/engine/avalanche/vertex/test_manager.go @@ -1,11 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex import "testing" -var _ Manager = &TestManager{} +var _ Manager = (*TestManager)(nil) type TestManager struct { TestBuilder diff --git a/avalanchego/snow/engine/avalanche/vertex/test_parser.go b/avalanchego/snow/engine/avalanche/vertex/test_parser.go index c7b97896..ef680ee8 100644 --- a/avalanchego/snow/engine/avalanche/vertex/test_parser.go +++ b/avalanchego/snow/engine/avalanche/vertex/test_parser.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex import ( + "context" "errors" "testing" @@ -13,20 +14,22 @@ import ( var ( errParse = errors.New("unexpectedly called Parse") - _ Parser = &TestParser{} + _ Parser = (*TestParser)(nil) ) type TestParser struct { T *testing.T CantParseVtx bool - ParseVtxF func([]byte) (avalanche.Vertex, error) + ParseVtxF func(context.Context, []byte) (avalanche.Vertex, error) } -func (p *TestParser) Default(cant bool) { p.CantParseVtx = cant } +func (p *TestParser) Default(cant bool) { + p.CantParseVtx = cant +} -func (p *TestParser) ParseVtx(b []byte) (avalanche.Vertex, error) { +func (p *TestParser) ParseVtx(ctx context.Context, b []byte) (avalanche.Vertex, error) { if p.ParseVtxF != nil { - return p.ParseVtxF(b) + return p.ParseVtxF(ctx, b) } if p.CantParseVtx && p.T != nil { p.T.Fatal(errParse) diff --git a/avalanchego/snow/engine/avalanche/vertex/test_storage.go b/avalanchego/snow/engine/avalanche/vertex/test_storage.go index 2ed40c53..10403a92 100644 --- a/avalanchego/snow/engine/avalanche/vertex/test_storage.go +++ b/avalanchego/snow/engine/avalanche/vertex/test_storage.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex import ( + "context" "errors" "testing" @@ -16,15 +17,15 @@ var ( errEdge = errors.New("unexpectedly called Edge") errStopVertexAccepted = errors.New("unexpectedly called StopVertexAccepted") - _ Storage = &TestStorage{} + _ Storage = (*TestStorage)(nil) ) type TestStorage struct { T *testing.T CantGetVtx, CantEdge, CantStopVertexAccepted bool - GetVtxF func(ids.ID) (avalanche.Vertex, error) - EdgeF func() []ids.ID - StopVertexAcceptedF func() (bool, error) + GetVtxF func(context.Context, ids.ID) (avalanche.Vertex, error) + EdgeF func(context.Context) []ids.ID + StopVertexAcceptedF func(context.Context) (bool, error) } func (s *TestStorage) Default(cant bool) { @@ -32,9 +33,9 @@ func (s *TestStorage) Default(cant bool) { s.CantEdge = cant } -func (s *TestStorage) GetVtx(id ids.ID) (avalanche.Vertex, error) { +func (s *TestStorage) GetVtx(ctx context.Context, vtxID ids.ID) (avalanche.Vertex, error) { if s.GetVtxF != nil { - return s.GetVtxF(id) + return s.GetVtxF(ctx, vtxID) } if s.CantGetVtx && s.T != nil { s.T.Fatal(errGet) @@ -42,9 +43,9 @@ func (s *TestStorage) GetVtx(id ids.ID) (avalanche.Vertex, error) { return nil, errGet } -func (s *TestStorage) Edge() []ids.ID { +func (s *TestStorage) Edge(ctx context.Context) []ids.ID { if s.EdgeF != nil { - return s.EdgeF() + return s.EdgeF(ctx) } if s.CantEdge && s.T != nil { s.T.Fatal(errEdge) @@ -52,9 +53,9 @@ func (s *TestStorage) Edge() []ids.ID { return nil } -func (s *TestStorage) StopVertexAccepted() (bool, error) { +func (s *TestStorage) StopVertexAccepted(ctx context.Context) (bool, error) { if s.StopVertexAcceptedF != nil { - return s.StopVertexAcceptedF() + return s.StopVertexAcceptedF(ctx) } if s.CantStopVertexAccepted && s.T != nil { s.T.Fatal(errStopVertexAccepted) diff --git a/avalanchego/snow/engine/avalanche/vertex/test_vm.go b/avalanchego/snow/engine/avalanche/vertex/test_vm.go index 17ef7cf6..576cfba1 100644 --- a/avalanchego/snow/engine/avalanche/vertex/test_vm.go +++ b/avalanchego/snow/engine/avalanche/vertex/test_vm.go @@ -1,30 +1,33 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex import ( + "context" "errors" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" - "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" ) var ( - errPending = errors.New("unexpectedly called Pending") + errPending = errors.New("unexpectedly called Pending") + errLinearize = errors.New("unexpectedly called Linearize") - _ DAGVM = &TestVM{} + _ LinearizableVM = (*TestVM)(nil) ) type TestVM struct { - common.TestVM + block.TestVM - CantPendingTxs, CantParse, CantGet bool + CantLinearize, CantPendingTxs, CantParse, CantGet bool - PendingTxsF func() []snowstorm.Tx - ParseTxF func([]byte) (snowstorm.Tx, error) - GetTxF func(ids.ID) (snowstorm.Tx, error) + LinearizeF func(context.Context, ids.ID) error + PendingTxsF func(context.Context) []snowstorm.Tx + ParseTxF func(context.Context, []byte) (snowstorm.Tx, error) + GetTxF func(context.Context, ids.ID) (snowstorm.Tx, error) } func (vm *TestVM) Default(cant bool) { @@ -35,9 +38,19 @@ func (vm *TestVM) Default(cant bool) { vm.CantGet = cant } -func (vm *TestVM) PendingTxs() []snowstorm.Tx { +func (vm *TestVM) Linearize(ctx context.Context, stopVertexID ids.ID) error { + if vm.LinearizeF != nil { + return vm.LinearizeF(ctx, stopVertexID) + } + if vm.CantLinearize && vm.T != nil { + vm.T.Fatal(errLinearize) + } + return errLinearize +} + +func (vm *TestVM) PendingTxs(ctx context.Context) []snowstorm.Tx { if vm.PendingTxsF != nil { - return vm.PendingTxsF() + return vm.PendingTxsF(ctx) } if vm.CantPendingTxs && vm.T != nil { vm.T.Fatal(errPending) @@ -45,9 +58,9 @@ func (vm *TestVM) PendingTxs() []snowstorm.Tx { return nil } -func (vm *TestVM) ParseTx(b []byte) (snowstorm.Tx, error) { +func (vm *TestVM) ParseTx(ctx context.Context, b []byte) (snowstorm.Tx, error) { if vm.ParseTxF != nil { - return vm.ParseTxF(b) + return vm.ParseTxF(ctx, b) } if vm.CantParse && vm.T != nil { vm.T.Fatal(errParse) @@ -55,9 +68,9 @@ func (vm *TestVM) ParseTx(b []byte) (snowstorm.Tx, error) { return nil, errParse } -func (vm *TestVM) GetTx(txID ids.ID) (snowstorm.Tx, error) { +func (vm *TestVM) GetTx(ctx context.Context, txID ids.ID) (snowstorm.Tx, error) { if vm.GetTxF != nil { - return vm.GetTxF(txID) + return vm.GetTxF(ctx, txID) } if vm.CantGet && vm.T != nil { vm.T.Fatal(errGet) diff --git a/avalanchego/snow/engine/avalanche/vertex/vm.go b/avalanchego/snow/engine/avalanche/vertex/vm.go index 52289419..710dfad7 100644 --- a/avalanchego/snow/engine/avalanche/vertex/vm.go +++ b/avalanchego/snow/engine/avalanche/vertex/vm.go @@ -1,29 +1,72 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vertex import ( + "context" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" ) +type LinearizableVMWithEngine interface { + DAGVM + + // Linearize is called after [Initialize] and after the DAG has been + // finalized. After Linearize is called: + // + // - PendingTxs will never be called again + // - GetTx will never be called again + // - ParseTx may still be called + // - All the block based functions of the [block.ChainVM] must work as + // expected. + // + // Linearize is part of the VM initialization, and will be called at most + // once per VM instantiation. This means that Linearize should be called + // every time the chain restarts after the DAG has finalized. + Linearize( + ctx context.Context, + stopVertexID ids.ID, + toEngine chan<- common.Message, + ) error +} + +type LinearizableVM interface { + DAGVM + + // Linearize is called after [Initialize] and after the DAG has been + // finalized. After Linearize is called: + // + // - PendingTxs will never be called again + // - GetTx will never be called again + // - ParseTx may still be called + // - All the block based functions of the [block.ChainVM] must work as + // expected. + // + // Linearize is part of the VM initialization, and will be called at most + // once per VM instantiation. This means that Linearize should be called + // every time the chain restarts after the DAG has finalized. + Linearize(ctx context.Context, stopVertexID ids.ID) error +} + // DAGVM defines the minimum functionality that an avalanche VM must // implement type DAGVM interface { - common.VM + block.ChainVM Getter // Return any transactions that have not been sent to consensus yet - PendingTxs() []snowstorm.Tx + PendingTxs(ctx context.Context) []snowstorm.Tx // Convert a stream of bytes to a transaction or return an error - ParseTx(tx []byte) (snowstorm.Tx, error) + ParseTx(ctx context.Context, txBytes []byte) (snowstorm.Tx, error) } // Getter defines the functionality for fetching a tx/block by its ID. type Getter interface { // Retrieve a transaction that was submitted previously - GetTx(ids.ID) (snowstorm.Tx, error) + GetTx(ctx context.Context, txID ids.ID) (snowstorm.Tx, error) } diff --git a/avalanchego/snow/engine/avalanche/voter.go b/avalanchego/snow/engine/avalanche/voter.go index c0af5e6b..e9aa585d 100644 --- a/avalanchego/snow/engine/avalanche/voter.go +++ b/avalanchego/snow/engine/avalanche/voter.go @@ -1,14 +1,18 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche import ( + "context" + "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" + "github.com/ava-labs/avalanchego/utils/bag" + "github.com/ava-labs/avalanchego/utils/set" ) // Voter records chits received from [vdr] once its dependencies are met. @@ -17,21 +21,25 @@ type voter struct { vdr ids.NodeID requestID uint32 response []ids.ID - deps ids.Set + deps set.Set[ids.ID] } -func (v *voter) Dependencies() ids.Set { return v.deps } +func (v *voter) Dependencies() set.Set[ids.ID] { + return v.deps +} // Mark that a dependency has been met. -func (v *voter) Fulfill(id ids.ID) { +func (v *voter) Fulfill(ctx context.Context, id ids.ID) { v.deps.Remove(id) - v.Update() + v.Update(ctx) } // Abandon this attempt to record chits. -func (v *voter) Abandon(id ids.ID) { v.Fulfill(id) } +func (v *voter) Abandon(ctx context.Context, id ids.ID) { + v.Fulfill(ctx, id) +} -func (v *voter) Update() { +func (v *voter) Update(ctx context.Context) { if v.deps.Len() != 0 || v.t.errs.Errored() { return } @@ -40,30 +48,70 @@ func (v *voter) Update() { if len(results) == 0 { return } + + previouslyLinearized, err := v.t.Manager.StopVertexAccepted(ctx) + if err != nil { + v.t.errs.Add(err) + return + } + for _, result := range results { - _, err := v.bubbleVotes(result) + result := result + v.t.Ctx.Log.Debug("filtering poll results", + zap.Stringer("result", &result), + ) + + _, err := v.bubbleVotes(ctx, result) if err != nil { v.t.errs.Add(err) return } - } - - for _, result := range results { - result := result v.t.Ctx.Log.Debug("finishing poll", zap.Stringer("result", &result), ) - if err := v.t.Consensus.RecordPoll(result); err != nil { + if err := v.t.Consensus.RecordPoll(ctx, result); err != nil { v.t.errs.Add(err) return } } + linearized, err := v.t.Manager.StopVertexAccepted(ctx) + if err != nil { + v.t.errs.Add(err) + return + } + + if linearized { + // We guard here to ensure we only call the underlying vm.Linearize and + // startSnowmanConsensus calls once. + if !previouslyLinearized { + // After the chain has been linearized, we will not be issuing any new + // vertices. + v.t.pendingTxs = nil + v.t.metrics.pendingTxs.Set(0) + + // Invariant: The edge should only be the stop vertex after the + // linearization. + edge := v.t.Manager.Edge(ctx) + stopVertexID := edge[0] + if err := v.t.VM.Linearize(ctx, stopVertexID); err != nil { + v.t.errs.Add(err) + return + } + if err := v.t.startSnowmanConsensus(ctx, v.t.RequestID); err != nil { + v.t.errs.Add(err) + } + } + // If the chain has been linearized, there can't be any orphans, so we + // can exit here. + return + } + orphans := v.t.Consensus.Orphans() txs := make([]snowstorm.Tx, 0, orphans.Len()) for orphanID := range orphans { - if tx, err := v.t.VM.GetTx(orphanID); err == nil { + if tx, err := v.t.VM.GetTx(ctx, orphanID); err == nil { txs = append(txs, tx) } else { v.t.Ctx.Log.Warn("failed to fetch tx during attempted re-issuance", @@ -77,7 +125,7 @@ func (v *voter) Update() { zap.Int("numTxs", len(txs)), ) } - if _, err := v.t.batch(txs, batchOption{force: true}); err != nil { + if _, err := v.t.batch(ctx, txs, batchOption{force: true}); err != nil { v.t.errs.Add(err) return } @@ -88,13 +136,13 @@ func (v *voter) Update() { } v.t.Ctx.Log.Debug("avalanche engine can't quiesce") - v.t.repoll() + v.t.repoll(ctx) } -func (v *voter) bubbleVotes(votes ids.UniqueBag) (ids.UniqueBag, error) { +func (v *voter) bubbleVotes(ctx context.Context, votes bag.UniqueBag[ids.ID]) (bag.UniqueBag[ids.ID], error) { vertexHeap := vertex.NewHeap() for vote, set := range votes { - vtx, err := v.t.Manager.GetVtx(vote) + vtx, err := v.t.Manager.GetVtx(ctx, vote) if err != nil { v.t.Ctx.Log.Debug("dropping vote(s)", zap.String("reason", "failed to fetch vertex"), diff --git a/avalanchego/snow/engine/avalanche/voter_test.go b/avalanchego/snow/engine/avalanche/voter_test.go index 94d14882..2429b025 100644 --- a/avalanchego/snow/engine/avalanche/voter_test.go +++ b/avalanchego/snow/engine/avalanche/voter_test.go @@ -1,31 +1,34 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avalanche import ( + "context" "testing" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" + "github.com/ava-labs/avalanchego/utils/bag" + "github.com/ava-labs/avalanchego/utils/set" ) func TestVotingFinishesWithAbandonedDep(t *testing.T) { _, _, engCfg := DefaultConfig() mngr := vertex.NewTestManager(t) engCfg.Manager = mngr - transitive, err := newTransitive(engCfg) + transitive, err := newTransitive(engCfg, noopStarter) require.NoError(t, err) - require.NoError(t, transitive.Start( /*startReqID*/ 0)) + require.NoError(t, transitive.Start(context.Background(), 0 /*=startReqID*/)) // prepare 3 validators vdr1 := ids.NodeID{1} vdr2 := ids.NodeID{2} vdr3 := ids.NodeID{3} - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add( vdr1, vdr2, @@ -34,7 +37,7 @@ func TestVotingFinishesWithAbandonedDep(t *testing.T) { // add poll for request 1 transitive.polls.Add(1, vdrs) - vdrs = ids.NodeIDBag{} + vdrs = bag.Bag[ids.NodeID]{} vdrs.Add( vdr1, vdr3, @@ -54,7 +57,7 @@ func TestVotingFinishesWithAbandonedDep(t *testing.T) { t: transitive, requestID: 2, response: []ids.ID{vote2}, - deps: ids.NewSet(0), + deps: set.NewSet[ids.ID](0), vdr: vdr1, } @@ -62,12 +65,12 @@ func TestVotingFinishesWithAbandonedDep(t *testing.T) { t: transitive, requestID: 2, response: []ids.ID{vote2}, - deps: ids.NewSet(0), + deps: set.NewSet[ids.ID](0), vdr: vdr3, } - voter1.Update() - voter3.Update() + voter1.Update(context.Background()) + voter3.Update(context.Background()) // still expect 2 pending polls since request 1 voting is still pending require.Equal(t, 2, transitive.polls.Len()) @@ -75,7 +78,7 @@ func TestVotingFinishesWithAbandonedDep(t *testing.T) { // vote on request 1 // add dependency to voter1's vote which has to be fulfilled prior to finishing voter1Dep := ids.GenerateTestID() - voter1DepSet := ids.NewSet(1) + voter1DepSet := set.NewSet[ids.ID](1) voter1DepSet.Add(voter1Dep) voter1 = &voter{ @@ -90,14 +93,14 @@ func TestVotingFinishesWithAbandonedDep(t *testing.T) { t: transitive, requestID: 1, response: []ids.ID{vote1}, - deps: ids.NewSet(0), + deps: set.NewSet[ids.ID](0), vdr: vdr2, } - voter1.Update() // does nothing because the dependency is still pending - voter2.Update() // voter1 is still remaining with the pending dependency + voter1.Update(context.Background()) // does nothing because the dependency is still pending + voter2.Update(context.Background()) // voter1 is still remaining with the pending dependency - voter1.Abandon(voter1Dep) // voter1 abandons dep1 + voter1.Abandon(context.Background(), voter1Dep) // voter1 abandons dep1 // expect all polls to have finished require.Equal(t, 0, transitive.polls.Len()) @@ -107,16 +110,16 @@ func TestVotingFinishesWithAbandonDepMiddleRequest(t *testing.T) { _, _, engCfg := DefaultConfig() mngr := vertex.NewTestManager(t) engCfg.Manager = mngr - transitive, err := newTransitive(engCfg) + transitive, err := newTransitive(engCfg, noopStarter) require.NoError(t, err) - require.NoError(t, transitive.Start( /*startReqID*/ 0)) + require.NoError(t, transitive.Start(context.Background(), 0 /*=startReqID*/)) // prepare 3 validators vdr1 := ids.NodeID{1} vdr2 := ids.NodeID{2} vdr3 := ids.NodeID{3} - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add( vdr1, vdr2, @@ -125,7 +128,7 @@ func TestVotingFinishesWithAbandonDepMiddleRequest(t *testing.T) { // add poll for request 1 transitive.polls.Add(1, vdrs) - vdrs = ids.NodeIDBag{} + vdrs = bag.Bag[ids.NodeID]{} vdrs.Add( vdr1, vdr3, @@ -134,7 +137,7 @@ func TestVotingFinishesWithAbandonDepMiddleRequest(t *testing.T) { // add poll for request 2 transitive.polls.Add(2, vdrs) - vdrs = ids.NodeIDBag{} + vdrs = bag.Bag[ids.NodeID]{} vdrs.Add( vdr2, vdr3, @@ -155,7 +158,7 @@ func TestVotingFinishesWithAbandonDepMiddleRequest(t *testing.T) { t: transitive, requestID: 3, response: []ids.ID{vote3}, - deps: ids.NewSet(0), + deps: set.NewSet[ids.ID](0), vdr: vdr3, } @@ -163,12 +166,12 @@ func TestVotingFinishesWithAbandonDepMiddleRequest(t *testing.T) { t: transitive, requestID: 3, response: []ids.ID{vote3}, - deps: ids.NewSet(0), + deps: set.NewSet[ids.ID](0), vdr: vdr2, } - req3Voter1.Update() - req3Voter2.Update() + req3Voter1.Update(context.Background()) + req3Voter2.Update(context.Background()) // expect 3 pending polls since 2 and 1 are still pending require.Equal(t, 3, transitive.polls.Len()) @@ -176,14 +179,14 @@ func TestVotingFinishesWithAbandonDepMiddleRequest(t *testing.T) { // vote on request 2 // add dependency to req2/voter3's vote which has to be fulfilled prior to finishing req2Voter2Dep := ids.GenerateTestID() - req2Voter2DepSet := ids.NewSet(1) + req2Voter2DepSet := set.NewSet[ids.ID](1) req2Voter2DepSet.Add(req2Voter2Dep) req2Voter1 := &voter{ t: transitive, requestID: 2, response: []ids.ID{vote2}, - deps: ids.NewSet(0), + deps: set.NewSet[ids.ID](0), vdr: vdr1, } @@ -195,8 +198,8 @@ func TestVotingFinishesWithAbandonDepMiddleRequest(t *testing.T) { vdr: vdr3, } - req2Voter1.Update() // does nothing because dep is unfulfilled - req2Voter2.Update() + req2Voter1.Update(context.Background()) // does nothing because dep is unfulfilled + req2Voter2.Update(context.Background()) // still expect 3 pending polls since request 1 voting is still pending require.Equal(t, 3, transitive.polls.Len()) @@ -204,7 +207,7 @@ func TestVotingFinishesWithAbandonDepMiddleRequest(t *testing.T) { // vote on request 1 // add dependency to voter1's vote which has to be fulfilled prior to finishing req1Voter1Dep := ids.GenerateTestID() - req1Voter1DepSet := ids.NewSet(1) + req1Voter1DepSet := set.NewSet[ids.ID](1) req1Voter1DepSet.Add(req1Voter1Dep) req1Voter1 := &voter{ t: transitive, @@ -218,20 +221,20 @@ func TestVotingFinishesWithAbandonDepMiddleRequest(t *testing.T) { t: transitive, requestID: 1, response: []ids.ID{vote1}, - deps: ids.NewSet(0), + deps: set.NewSet[ids.ID](0), vdr: vdr2, } - req1Voter1.Update() // does nothing because the req2/voter1 dependency is still pending - req1Voter2.Update() // voter1 is still remaining with the pending dependency + req1Voter1.Update(context.Background()) // does nothing because the req2/voter1 dependency is still pending + req1Voter2.Update(context.Background()) // voter1 is still remaining with the pending dependency // abandon dep on voter3 - req2Voter2.Abandon(req2Voter2Dep) // voter3 abandons dep1 + req2Voter2.Abandon(context.Background(), req2Voter2Dep) // voter3 abandons dep1 // expect polls to be pending as req1/voter1's dep is still unfulfilled require.Equal(t, 3, transitive.polls.Len()) - req1Voter1.Abandon(req1Voter1Dep) + req1Voter1.Abandon(context.Background(), req1Voter1Dep) // expect all polls to have finished require.Equal(t, 0, transitive.polls.Len()) @@ -241,16 +244,16 @@ func TestSharedDependency(t *testing.T) { _, _, engCfg := DefaultConfig() mngr := vertex.NewTestManager(t) engCfg.Manager = mngr - transitive, err := newTransitive(engCfg) + transitive, err := newTransitive(engCfg, noopStarter) require.NoError(t, err) - require.NoError(t, transitive.Start( /*startReqID*/ 0)) + require.NoError(t, transitive.Start(context.Background(), 0 /*=startReqID*/)) // prepare 3 validators vdr1 := ids.NodeID{1} vdr2 := ids.NodeID{2} vdr3 := ids.NodeID{3} - vdrs := ids.NodeIDBag{} + vdrs := bag.Bag[ids.NodeID]{} vdrs.Add( vdr1, vdr2, @@ -259,7 +262,7 @@ func TestSharedDependency(t *testing.T) { // add poll for request 1 transitive.polls.Add(1, vdrs) - vdrs = ids.NodeIDBag{} + vdrs = bag.Bag[ids.NodeID]{} vdrs.Add( vdr1, vdr3, @@ -268,7 +271,7 @@ func TestSharedDependency(t *testing.T) { // add poll for request 2 transitive.polls.Add(2, vdrs) - vdrs = ids.NodeIDBag{} + vdrs = bag.Bag[ids.NodeID]{} vdrs.Add( vdr2, vdr3, @@ -290,28 +293,28 @@ func TestSharedDependency(t *testing.T) { t: transitive, requestID: 3, response: []ids.ID{vote3}, - deps: ids.NewSet(0), + deps: set.NewSet[ids.ID](0), vdr: vdr3, } - req3Voter1.Update() + req3Voter1.Update(context.Background()) req3Voter2 := &voter{ t: transitive, requestID: 3, response: []ids.ID{vote3}, - deps: ids.NewSet(0), + deps: set.NewSet[ids.ID](0), vdr: vdr2, } - req3Voter2.Update() + req3Voter2.Update(context.Background()) // 3 polls pending because req 2 and 1 have not voted require.Equal(t, 3, transitive.polls.Len()) // setup common dependency dep := ids.GenerateTestID() - depSet := ids.NewSet(1) + depSet := set.NewSet[ids.ID](1) depSet.Add(dep) req2Voter1 := &voter{ @@ -323,17 +326,17 @@ func TestSharedDependency(t *testing.T) { } // does nothing because dependency is unfulfilled - req2Voter1.Update() + req2Voter1.Update(context.Background()) req2Voter2 := &voter{ t: transitive, requestID: 2, response: []ids.ID{vote2}, - deps: ids.NewSet(0), + deps: set.NewSet[ids.ID](0), vdr: vdr3, } - req2Voter2.Update() + req2Voter2.Update(context.Background()) // 3 polls pending as req 2 dependency is unfulfilled and 1 has not voted require.Equal(t, 3, transitive.polls.Len()) @@ -347,24 +350,24 @@ func TestSharedDependency(t *testing.T) { } // does nothing because dependency is unfulfilled - req1Voter1.Update() + req1Voter1.Update(context.Background()) req1Voter2 := &voter{ t: transitive, requestID: 1, response: []ids.ID{vote1}, - deps: ids.NewSet(0), + deps: set.NewSet[ids.ID](0), vdr: vdr2, } - req1Voter2.Update() + req1Voter2.Update(context.Background()) // 3 polls pending as req2 and req 1 dependencies are unfulfilled require.Equal(t, 3, transitive.polls.Len()) // abandon dependency - req1Voter1.Abandon(dep) - req2Voter1.Abandon(dep) + req1Voter1.Abandon(context.Background(), dep) + req2Voter1.Abandon(context.Background(), dep) // expect no pending polls require.Equal(t, 0, transitive.polls.Len()) diff --git a/avalanchego/snow/engine/common/appsender/appsender_client.go b/avalanchego/snow/engine/common/appsender/appsender_client.go index f95ceb0b..a816dd68 100644 --- a/avalanchego/snow/engine/common/appsender/appsender_client.go +++ b/avalanchego/snow/engine/common/appsender/appsender_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package appsender @@ -8,11 +8,12 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/set" appsenderpb "github.com/ava-labs/avalanchego/proto/pb/appsender" ) -var _ common.AppSender = &Client{} +var _ common.AppSender = (*Client)(nil) type Client struct { client appsenderpb.AppSenderClient @@ -23,7 +24,31 @@ func NewClient(client appsenderpb.AppSenderClient) *Client { return &Client{client: client} } -func (c *Client) SendAppRequest(nodeIDs ids.NodeIDSet, requestID uint32, request []byte) error { +func (c *Client) SendCrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, appRequestBytes []byte) error { + _, err := c.client.SendCrossChainAppRequest( + ctx, + &appsenderpb.SendCrossChainAppRequestMsg{ + ChainId: chainID[:], + RequestId: requestID, + Request: appRequestBytes, + }, + ) + return err +} + +func (c *Client) SendCrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, appResponseBytes []byte) error { + _, err := c.client.SendCrossChainAppResponse( + ctx, + &appsenderpb.SendCrossChainAppResponseMsg{ + ChainId: chainID[:], + RequestId: requestID, + Response: appResponseBytes, + }, + ) + return err +} + +func (c *Client) SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, request []byte) error { nodeIDsBytes := make([][]byte, nodeIDs.Len()) i := 0 for nodeID := range nodeIDs { @@ -32,7 +57,7 @@ func (c *Client) SendAppRequest(nodeIDs ids.NodeIDSet, requestID uint32, request i++ } _, err := c.client.SendAppRequest( - context.Background(), + ctx, &appsenderpb.SendAppRequestMsg{ NodeIds: nodeIDsBytes, RequestId: requestID, @@ -42,9 +67,9 @@ func (c *Client) SendAppRequest(nodeIDs ids.NodeIDSet, requestID uint32, request return err } -func (c *Client) SendAppResponse(nodeID ids.NodeID, requestID uint32, response []byte) error { +func (c *Client) SendAppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { _, err := c.client.SendAppResponse( - context.Background(), + ctx, &appsenderpb.SendAppResponseMsg{ NodeId: nodeID[:], RequestId: requestID, @@ -54,9 +79,9 @@ func (c *Client) SendAppResponse(nodeID ids.NodeID, requestID uint32, response [ return err } -func (c *Client) SendAppGossip(msg []byte) error { +func (c *Client) SendAppGossip(ctx context.Context, msg []byte) error { _, err := c.client.SendAppGossip( - context.Background(), + ctx, &appsenderpb.SendAppGossipMsg{ Msg: msg, }, @@ -64,7 +89,7 @@ func (c *Client) SendAppGossip(msg []byte) error { return err } -func (c *Client) SendAppGossipSpecific(nodeIDs ids.NodeIDSet, msg []byte) error { +func (c *Client) SendAppGossipSpecific(ctx context.Context, nodeIDs set.Set[ids.NodeID], msg []byte) error { nodeIDsBytes := make([][]byte, nodeIDs.Len()) i := 0 for nodeID := range nodeIDs { @@ -73,7 +98,7 @@ func (c *Client) SendAppGossipSpecific(nodeIDs ids.NodeIDSet, msg []byte) error i++ } _, err := c.client.SendAppGossipSpecific( - context.Background(), + ctx, &appsenderpb.SendAppGossipSpecificMsg{ NodeIds: nodeIDsBytes, Msg: msg, diff --git a/avalanchego/snow/engine/common/appsender/appsender_server.go b/avalanchego/snow/engine/common/appsender/appsender_server.go index 03267026..3583940d 100644 --- a/avalanchego/snow/engine/common/appsender/appsender_server.go +++ b/avalanchego/snow/engine/common/appsender/appsender_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package appsender @@ -10,11 +10,12 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/set" appsenderpb "github.com/ava-labs/avalanchego/proto/pb/appsender" ) -var _ appsenderpb.AppSenderServer = &Server{} +var _ appsenderpb.AppSenderServer = (*Server)(nil) type Server struct { appsenderpb.UnsafeAppSenderServer @@ -26,8 +27,26 @@ func NewServer(appSender common.AppSender) *Server { return &Server{appSender: appSender} } -func (s *Server) SendAppRequest(_ context.Context, req *appsenderpb.SendAppRequestMsg) (*emptypb.Empty, error) { - nodeIDs := ids.NewNodeIDSet(len(req.NodeIds)) +func (s *Server) SendCrossChainAppRequest(ctx context.Context, msg *appsenderpb.SendCrossChainAppRequestMsg) (*emptypb.Empty, error) { + chainID, err := ids.ToID(msg.ChainId) + if err != nil { + return &emptypb.Empty{}, err + } + + return &emptypb.Empty{}, s.appSender.SendCrossChainAppRequest(ctx, chainID, msg.RequestId, msg.Request) +} + +func (s *Server) SendCrossChainAppResponse(ctx context.Context, msg *appsenderpb.SendCrossChainAppResponseMsg) (*emptypb.Empty, error) { + chainID, err := ids.ToID(msg.ChainId) + if err != nil { + return &emptypb.Empty{}, err + } + + return &emptypb.Empty{}, s.appSender.SendCrossChainAppResponse(ctx, chainID, msg.RequestId, msg.Response) +} + +func (s *Server) SendAppRequest(ctx context.Context, req *appsenderpb.SendAppRequestMsg) (*emptypb.Empty, error) { + nodeIDs := set.NewSet[ids.NodeID](len(req.NodeIds)) for _, nodeIDBytes := range req.NodeIds { nodeID, err := ids.ToNodeID(nodeIDBytes) if err != nil { @@ -35,26 +54,26 @@ func (s *Server) SendAppRequest(_ context.Context, req *appsenderpb.SendAppReque } nodeIDs.Add(nodeID) } - err := s.appSender.SendAppRequest(nodeIDs, req.RequestId, req.Request) + err := s.appSender.SendAppRequest(ctx, nodeIDs, req.RequestId, req.Request) return &emptypb.Empty{}, err } -func (s *Server) SendAppResponse(_ context.Context, req *appsenderpb.SendAppResponseMsg) (*emptypb.Empty, error) { +func (s *Server) SendAppResponse(ctx context.Context, req *appsenderpb.SendAppResponseMsg) (*emptypb.Empty, error) { nodeID, err := ids.ToNodeID(req.NodeId) if err != nil { return nil, err } - err = s.appSender.SendAppResponse(nodeID, req.RequestId, req.Response) + err = s.appSender.SendAppResponse(ctx, nodeID, req.RequestId, req.Response) return &emptypb.Empty{}, err } -func (s *Server) SendAppGossip(_ context.Context, req *appsenderpb.SendAppGossipMsg) (*emptypb.Empty, error) { - err := s.appSender.SendAppGossip(req.Msg) +func (s *Server) SendAppGossip(ctx context.Context, req *appsenderpb.SendAppGossipMsg) (*emptypb.Empty, error) { + err := s.appSender.SendAppGossip(ctx, req.Msg) return &emptypb.Empty{}, err } -func (s *Server) SendAppGossipSpecific(_ context.Context, req *appsenderpb.SendAppGossipSpecificMsg) (*emptypb.Empty, error) { - nodeIDs := ids.NewNodeIDSet(len(req.NodeIds)) +func (s *Server) SendAppGossipSpecific(ctx context.Context, req *appsenderpb.SendAppGossipSpecificMsg) (*emptypb.Empty, error) { + nodeIDs := set.NewSet[ids.NodeID](len(req.NodeIds)) for _, nodeIDBytes := range req.NodeIds { nodeID, err := ids.ToNodeID(nodeIDBytes) if err != nil { @@ -62,6 +81,6 @@ func (s *Server) SendAppGossipSpecific(_ context.Context, req *appsenderpb.SendA } nodeIDs.Add(nodeID) } - err := s.appSender.SendAppGossipSpecific(nodeIDs, req.Msg) + err := s.appSender.SendAppGossipSpecific(ctx, nodeIDs, req.Msg) return &emptypb.Empty{}, err } diff --git a/avalanchego/snow/engine/common/bootstrap_tracker.go b/avalanchego/snow/engine/common/bootstrap_tracker.go new file mode 100644 index 00000000..bf81644b --- /dev/null +++ b/avalanchego/snow/engine/common/bootstrap_tracker.go @@ -0,0 +1,20 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import ( + "github.com/ava-labs/avalanchego/ids" +) + +// BootstrapTracker describes the standard interface for tracking the status of +// a subnet bootstrapping +type BootstrapTracker interface { + // Returns true iff done bootstrapping + IsBootstrapped() bool + + // Bootstrapped marks the named chain as being bootstrapped + Bootstrapped(chainID ids.ID) + + OnBootstrapCompleted() chan struct{} +} diff --git a/avalanchego/snow/engine/common/bootstrapable.go b/avalanchego/snow/engine/common/bootstrapable.go index 0636b785..f18b3295 100644 --- a/avalanchego/snow/engine/common/bootstrapable.go +++ b/avalanchego/snow/engine/common/bootstrapable.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common import ( + "context" + "github.com/ava-labs/avalanchego/ids" ) @@ -16,7 +18,7 @@ type BootstrapableEngine interface { type Bootstrapable interface { // Force the provided containers to be accepted. Only returns fatal errors // if they occur. - ForceAccepted(acceptedContainerIDs []ids.ID) error + ForceAccepted(ctx context.Context, acceptedContainerIDs []ids.ID) error // Clear removes all containers to be processed upon bootstrapping Clear() error diff --git a/avalanchego/snow/engine/common/bootstrapper.go b/avalanchego/snow/engine/common/bootstrapper.go index 57363e9e..82ef874d 100644 --- a/avalanchego/snow/engine/common/bootstrapper.go +++ b/avalanchego/snow/engine/common/bootstrapper.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common import ( + "context" + stdmath "math" "go.uber.org/zap" @@ -11,6 +13,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/set" ) const ( @@ -27,14 +30,14 @@ const ( MaxOutstandingBroadcastRequests = 50 ) -var _ Bootstrapper = &bootstrapper{} +var _ Bootstrapper = (*bootstrapper)(nil) type Bootstrapper interface { AcceptedFrontierHandler AcceptedHandler Haltable - Startup() error - Restart(reset bool) error + Startup(context.Context) error + Restart(ctx context.Context, reset bool) error } // It collects mechanisms common to both snowman and avalanche bootstrappers @@ -45,23 +48,23 @@ type bootstrapper struct { // Holds the beacons that were sampled for the accepted frontier sampledBeacons validators.Set // IDs of validators we should request an accepted frontier from - pendingSendAcceptedFrontier ids.NodeIDSet + pendingSendAcceptedFrontier set.Set[ids.NodeID] // IDs of validators we requested an accepted frontier from but haven't // received a reply yet - pendingReceiveAcceptedFrontier ids.NodeIDSet + pendingReceiveAcceptedFrontier set.Set[ids.NodeID] // IDs of validators that failed to respond with their accepted frontier - failedAcceptedFrontier ids.NodeIDSet + failedAcceptedFrontier set.Set[ids.NodeID] // IDs of all the returned accepted frontiers - acceptedFrontierSet ids.Set + acceptedFrontierSet set.Set[ids.ID] // IDs of validators we should request filtering the accepted frontier from - pendingSendAccepted ids.NodeIDSet + pendingSendAccepted set.Set[ids.NodeID] // IDs of validators we requested filtering the accepted frontier from but // haven't received a reply yet - pendingReceiveAccepted ids.NodeIDSet + pendingReceiveAccepted set.Set[ids.NodeID] // IDs of validators that failed to respond with their filtered accepted // frontier - failedAccepted ids.NodeIDSet + failedAccepted set.Set[ids.NodeID] // IDs of the returned accepted containers and the stake weight that has // marked them as accepted acceptedVotes map[ids.ID]uint64 @@ -77,7 +80,7 @@ func NewCommonBootstrapper(config Config) Bootstrapper { } } -func (b *bootstrapper) AcceptedFrontier(nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { +func (b *bootstrapper) AcceptedFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { // ignores any late responses if requestID != b.Config.SharedCfg.RequestID { b.Ctx.Log.Debug("received out-of-sync AcceptedFrontier message", @@ -102,7 +105,7 @@ func (b *bootstrapper) AcceptedFrontier(nodeID ids.NodeID, requestID uint32, con // frontier we got from others b.acceptedFrontierSet.Add(containerIDs...) - b.sendGetAcceptedFrontiers() + b.sendGetAcceptedFrontiers(ctx) // still waiting on requests if b.pendingReceiveAcceptedFrontier.Len() != 0 { @@ -112,19 +115,15 @@ func (b *bootstrapper) AcceptedFrontier(nodeID ids.NodeID, requestID uint32, con // We've received the accepted frontier from every bootstrap validator // Ask each bootstrap validator to filter the list of containers that we were // told are on the accepted frontier such that the list only contains containers - // they think are accepted - var err error - + // they think are accepted. + // // Create a newAlpha taking using the sampled beacon // Keep the proportion of b.Alpha in the newAlpha // newAlpha := totalSampledWeight * b.Alpha / totalWeight newAlpha := float64(b.sampledBeacons.Weight()*b.Alpha) / float64(b.Beacons.Weight()) - failedBeaconWeight, err := b.Beacons.SubsetWeight(b.failedAcceptedFrontier) - if err != nil { - return err - } + failedBeaconWeight := b.Beacons.SubsetWeight(b.failedAcceptedFrontier) // fail the bootstrap if the weight is not enough to bootstrap if float64(b.sampledBeacons.Weight())-newAlpha < float64(failedBeaconWeight) { @@ -135,7 +134,7 @@ func (b *bootstrapper) AcceptedFrontier(nodeID ids.NodeID, requestID uint32, con zap.Int("numFailedBootstrappers", b.failedAcceptedFrontier.Len()), zap.Int("numBootstrapAttemps", b.bootstrapAttempts), ) - return b.Restart(false) + return b.Restart(ctx, false) } b.Ctx.Log.Debug("didn't receive enough frontiers", @@ -147,11 +146,11 @@ func (b *bootstrapper) AcceptedFrontier(nodeID ids.NodeID, requestID uint32, con b.Config.SharedCfg.RequestID++ b.acceptedFrontier = b.acceptedFrontierSet.List() - b.sendGetAccepted() + b.sendGetAccepted(ctx) return nil } -func (b *bootstrapper) GetAcceptedFrontierFailed(nodeID ids.NodeID, requestID uint32) error { +func (b *bootstrapper) GetAcceptedFrontierFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { // ignores any late responses if requestID != b.Config.SharedCfg.RequestID { b.Ctx.Log.Debug("received out-of-sync GetAcceptedFrontierFailed message", @@ -165,10 +164,10 @@ func (b *bootstrapper) GetAcceptedFrontierFailed(nodeID ids.NodeID, requestID ui // If we can't get a response from [nodeID], act as though they said their // accepted frontier is empty and we add the validator to the failed list b.failedAcceptedFrontier.Add(nodeID) - return b.AcceptedFrontier(nodeID, requestID, nil) + return b.AcceptedFrontier(ctx, nodeID, requestID, nil) } -func (b *bootstrapper) Accepted(nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { +func (b *bootstrapper) Accepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { // ignores any late responses if requestID != b.Config.SharedCfg.RequestID { b.Ctx.Log.Debug("received out-of-sync Accepted message", @@ -188,11 +187,7 @@ func (b *bootstrapper) Accepted(nodeID ids.NodeID, requestID uint32, containerID // Mark that we received a response from [nodeID] b.pendingReceiveAccepted.Remove(nodeID) - weight := uint64(0) - if w, ok := b.Beacons.GetWeight(nodeID); ok { - weight = w - } - + weight := b.Beacons.GetWeight(nodeID) for _, containerID := range containerIDs { previousWeight := b.acceptedVotes[containerID] newWeight, err := math.Add64(weight, previousWeight) @@ -207,7 +202,7 @@ func (b *bootstrapper) Accepted(nodeID ids.NodeID, requestID uint32, containerID b.acceptedVotes[containerID] = newWeight } - b.sendGetAccepted() + b.sendGetAccepted(ctx) // wait on pending responses if b.pendingReceiveAccepted.Len() != 0 { @@ -223,24 +218,25 @@ func (b *bootstrapper) Accepted(nodeID ids.NodeID, requestID uint32, containerID } } - // if we don't have enough weight for the bootstrap to be accepted then retry or fail the bootstrap + // if we don't have enough weight for the bootstrap to be accepted then + // retry or fail the bootstrap size := len(accepted) if size == 0 && b.Beacons.Len() > 0 { - // retry the bootstrap if the weight is not enough to bootstrap - failedBeaconWeight, err := b.Beacons.SubsetWeight(b.failedAccepted) - if err != nil { - return err - } - - // in a zero network there will be no accepted votes but the voting weight will be greater than the failed weight - if b.Config.RetryBootstrap && b.Beacons.Weight()-b.Alpha < failedBeaconWeight { + // if we had too many timeouts when asking for validator votes, we + // should restart bootstrap hoping for the network problems to go away; + // otherwise, we received enough (>= b.Alpha) responses, but no frontier + // was supported by a majority of validators (i.e. votes are split + // between minorities supporting different frontiers). + failedBeaconWeight := b.Beacons.SubsetWeight(b.failedAccepted) + votingStakes := b.Beacons.Weight() - failedBeaconWeight + if b.Config.RetryBootstrap && votingStakes < b.Alpha { b.Ctx.Log.Debug("restarting bootstrap", zap.String("reason", "not enough votes received"), zap.Int("numBeacons", b.Beacons.Len()), zap.Int("numFailedBootstrappers", b.failedAccepted.Len()), zap.Int("numBootstrapAttempts", b.bootstrapAttempts), ) - return b.Restart(false) + return b.Restart(ctx, false) } } @@ -254,10 +250,10 @@ func (b *bootstrapper) Accepted(nodeID ids.NodeID, requestID uint32, containerID ) } - return b.Bootstrapable.ForceAccepted(accepted) + return b.Bootstrapable.ForceAccepted(ctx, accepted) } -func (b *bootstrapper) GetAcceptedFailed(nodeID ids.NodeID, requestID uint32) error { +func (b *bootstrapper) GetAcceptedFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { // ignores any late responses if requestID != b.Config.SharedCfg.RequestID { b.Ctx.Log.Debug("received out-of-sync GetAcceptedFailed message", @@ -272,25 +268,28 @@ func (b *bootstrapper) GetAcceptedFailed(nodeID ids.NodeID, requestID uint32) er // they think none of the containers we sent them in GetAccepted are // accepted b.failedAccepted.Add(nodeID) - return b.Accepted(nodeID, requestID, nil) + return b.Accepted(ctx, nodeID, requestID, nil) } -func (b *bootstrapper) Startup() error { - beacons, err := b.Beacons.Sample(b.Config.SampleK) +func (b *bootstrapper) Startup(ctx context.Context) error { + beaconIDs, err := b.Beacons.Sample(b.Config.SampleK) if err != nil { return err } b.sampledBeacons = validators.NewSet() - err = b.sampledBeacons.Set(beacons) - if err != nil { - return err - } - b.pendingSendAcceptedFrontier.Clear() - for _, vdr := range beacons { - vdrID := vdr.ID() - b.pendingSendAcceptedFrontier.Add(vdrID) + for _, nodeID := range beaconIDs { + if !b.sampledBeacons.Contains(nodeID) { + // Invariant: We never use the TxID or BLS keys populated here. + err = b.sampledBeacons.Add(nodeID, nil, ids.Empty, 1) + } else { + err = b.sampledBeacons.AddWeight(nodeID, 1) + } + if err != nil { + return err + } + b.pendingSendAcceptedFrontier.Add(nodeID) } b.pendingReceiveAcceptedFrontier.Clear() @@ -299,8 +298,7 @@ func (b *bootstrapper) Startup() error { b.pendingSendAccepted.Clear() for _, vdr := range b.Beacons.List() { - vdrID := vdr.ID() - b.pendingSendAccepted.Add(vdrID) + b.pendingSendAccepted.Add(vdr.NodeID) } b.pendingReceiveAccepted.Clear() @@ -312,15 +310,15 @@ func (b *bootstrapper) Startup() error { b.Ctx.Log.Info("bootstrapping skipped", zap.String("reason", "no provided bootstraps"), ) - return b.Bootstrapable.ForceAccepted(nil) + return b.Bootstrapable.ForceAccepted(ctx, nil) } b.Config.SharedCfg.RequestID++ - b.sendGetAcceptedFrontiers() + b.sendGetAcceptedFrontiers(ctx) return nil } -func (b *bootstrapper) Restart(reset bool) error { +func (b *bootstrapper) Restart(ctx context.Context, reset bool) error { // resets the attempts when we're pulling blocks/vertices we don't want to // fail the bootstrap at that stage if reset { @@ -336,13 +334,13 @@ func (b *bootstrapper) Restart(reset bool) error { ) } - return b.Startup() + return b.Startup(ctx) } // Ask up to [MaxOutstandingBroadcastRequests] bootstrap validators to send // their accepted frontier with the current accepted frontier -func (b *bootstrapper) sendGetAcceptedFrontiers() { - vdrs := ids.NewNodeIDSet(1) +func (b *bootstrapper) sendGetAcceptedFrontiers(ctx context.Context) { + vdrs := set.NewSet[ids.NodeID](1) for b.pendingSendAcceptedFrontier.Len() > 0 && b.pendingReceiveAcceptedFrontier.Len() < MaxOutstandingBroadcastRequests { vdr, _ := b.pendingSendAcceptedFrontier.Pop() // Add the validator to the set to send the messages to @@ -352,14 +350,14 @@ func (b *bootstrapper) sendGetAcceptedFrontiers() { } if vdrs.Len() > 0 { - b.Sender.SendGetAcceptedFrontier(vdrs, b.Config.SharedCfg.RequestID) + b.Sender.SendGetAcceptedFrontier(ctx, vdrs, b.Config.SharedCfg.RequestID) } } // Ask up to [MaxOutstandingBroadcastRequests] bootstrap validators to send // their filtered accepted frontier -func (b *bootstrapper) sendGetAccepted() { - vdrs := ids.NewNodeIDSet(1) +func (b *bootstrapper) sendGetAccepted(ctx context.Context) { + vdrs := set.NewSet[ids.NodeID](1) for b.pendingSendAccepted.Len() > 0 && b.pendingReceiveAccepted.Len() < MaxOutstandingBroadcastRequests { vdr, _ := b.pendingSendAccepted.Pop() // Add the validator to the set to send the messages to @@ -373,6 +371,6 @@ func (b *bootstrapper) sendGetAccepted() { zap.Int("numSent", vdrs.Len()), zap.Int("numPending", b.pendingSendAccepted.Len()), ) - b.Sender.SendGetAccepted(vdrs, b.Config.SharedCfg.RequestID, b.acceptedFrontier) + b.Sender.SendGetAccepted(ctx, vdrs, b.Config.SharedCfg.RequestID, b.acceptedFrontier) } } diff --git a/avalanchego/snow/engine/common/config.go b/avalanchego/snow/engine/common/config.go index 803bf736..57507e7e 100644 --- a/avalanchego/snow/engine/common/config.go +++ b/avalanchego/snow/engine/common/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -14,17 +14,16 @@ import ( // Config wraps the common configurations that are needed by a Snow consensus // engine type Config struct { - Ctx *snow.ConsensusContext - Validators validators.Set - Beacons validators.Set - - SampleK int - Alpha uint64 - StartupTracker tracker.Startup - Sender Sender - Bootstrapable Bootstrapable - Subnet Subnet - Timer Timer + Ctx *snow.ConsensusContext + Beacons validators.Set + + SampleK int + Alpha uint64 + StartupTracker tracker.Startup + Sender Sender + Bootstrapable Bootstrapable + BootstrapTracker BootstrapTracker + Timer Timer // Should Bootstrap be retried RetryBootstrap bool @@ -46,10 +45,14 @@ type Config struct { SharedCfg *SharedConfig } -func (c *Config) Context() *snow.ConsensusContext { return c.Ctx } +func (c *Config) Context() *snow.ConsensusContext { + return c.Ctx +} // IsBootstrapped returns true iff this chain is done bootstrapping -func (c *Config) IsBootstrapped() bool { return c.Ctx.GetState() == snow.NormalOp } +func (c *Config) IsBootstrapped() bool { + return c.Ctx.State.Get().State == snow.NormalOp +} // Shared among common.bootstrapper and snowman/avalanche bootstrapper type SharedConfig struct { diff --git a/avalanchego/snow/engine/common/engine.go b/avalanchego/snow/engine/common/engine.go index 97c4e919..142351b2 100644 --- a/avalanchego/snow/engine/common/engine.go +++ b/avalanchego/snow/engine/common/engine.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common import ( + "context" "time" "github.com/ava-labs/avalanchego/api/health" @@ -20,7 +21,7 @@ type Engine interface { Context() *snow.ConsensusContext // Start engine operations from given request ID - Start(startReqID uint32) error + Start(ctx context.Context, startReqID uint32) error // Returns nil if the engine is healthy. // Periodically called and reported through the health API @@ -66,7 +67,7 @@ type GetStateSummaryFrontierHandler interface { // // This engine should respond with an StateSummaryFrontier message with the // same requestID, and the engine's current state summary frontier. - GetStateSummaryFrontier(validatorID ids.NodeID, requestID uint32) error + GetStateSummaryFrontier(ctx context.Context, validatorID ids.NodeID, requestID uint32) error } // StateSummaryFrontierHandler defines how a consensus engine reacts to a state @@ -79,7 +80,7 @@ type StateSummaryFrontierHandler interface { // this message is in response to a GetStateSummaryFrontier message, is // utilizing a unique requestID, or that the summary bytes are from a valid // state summary. - StateSummaryFrontier(validatorID ids.NodeID, requestID uint32, summary []byte) error + StateSummaryFrontier(ctx context.Context, validatorID ids.NodeID, requestID uint32, summary []byte) error // Notify this engine that a get state summary frontier request it issued // has failed. @@ -91,7 +92,7 @@ type StateSummaryFrontierHandler interface { // // The validatorID, and requestID, are assumed to be the same as those sent // in the GetStateSummaryFrontier message. - GetStateSummaryFrontierFailed(validatorID ids.NodeID, requestID uint32) error + GetStateSummaryFrontierFailed(ctx context.Context, validatorID ids.NodeID, requestID uint32) error } // GetAcceptedStateSummaryHandler defines how a consensus engine reacts to a get @@ -108,7 +109,7 @@ type GetAcceptedStateSummaryHandler interface { // This engine should respond with an AcceptedStateSummary message with the // same requestID, and the subset of the state summaries that this node has // locally available. - GetAcceptedStateSummary(validatorID ids.NodeID, requestID uint32, keys []uint64) error + GetAcceptedStateSummary(ctx context.Context, validatorID ids.NodeID, requestID uint32, keys []uint64) error } // AcceptedStateSummaryHandler defines how a consensus engine reacts to an @@ -121,7 +122,7 @@ type AcceptedStateSummaryHandler interface { // this message is in response to a GetAcceptedStateSummary message, // is utilizing a unique requestID, or that the summaryIDs are a subset of the // state summaries requested by key from a GetAcceptedStateSummary message. - AcceptedStateSummary(validatorID ids.NodeID, requestID uint32, summaryIDs []ids.ID) error + AcceptedStateSummary(ctx context.Context, validatorID ids.NodeID, requestID uint32, summaryIDs []ids.ID) error // Notify this engine that a get accepted state summary request it issued has // failed. @@ -133,7 +134,7 @@ type AcceptedStateSummaryHandler interface { // // The validatorID, and requestID, are assumed to be the same as those sent // in the GetAcceptedStateSummary message. - GetAcceptedStateSummaryFailed(validatorID ids.NodeID, requestID uint32) error + GetAcceptedStateSummaryFailed(ctx context.Context, validatorID ids.NodeID, requestID uint32) error } // GetAcceptedFrontierHandler defines how a consensus engine reacts to a get @@ -150,7 +151,7 @@ type GetAcceptedFrontierHandler interface { // // This engine should respond with an AcceptedFrontier message with the same // requestID, and the engine's current accepted frontier. - GetAcceptedFrontier(validatorID ids.NodeID, requestID uint32) error + GetAcceptedFrontier(ctx context.Context, validatorID ids.NodeID, requestID uint32) error } // AcceptedFrontierHandler defines how a consensus engine reacts to accepted @@ -163,6 +164,7 @@ type AcceptedFrontierHandler interface { // utilizing a unique requestID, or that the containerIDs from a valid // frontier. AcceptedFrontier( + ctx context.Context, validatorID ids.NodeID, requestID uint32, containerIDs []ids.ID, @@ -178,7 +180,7 @@ type AcceptedFrontierHandler interface { // // The validatorID, and requestID, are assumed to be the same as those sent // in the GetAcceptedFrontier message. - GetAcceptedFrontierFailed(validatorID ids.NodeID, requestID uint32) error + GetAcceptedFrontierFailed(ctx context.Context, validatorID ids.NodeID, requestID uint32) error } // GetAcceptedHandler defines how a consensus engine reacts to a get accepted @@ -194,6 +196,7 @@ type GetAcceptedHandler interface { // requestID, and the subset of the containerIDs that this node has decided // are accepted. GetAccepted( + ctx context.Context, validatorID ids.NodeID, requestID uint32, containerIDs []ids.ID, @@ -211,6 +214,7 @@ type AcceptedHandler interface { // unique requestID, or that the containerIDs are a subset of the // containerIDs from a GetAccepted message. Accepted( + ctx context.Context, validatorID ids.NodeID, requestID uint32, containerIDs []ids.ID, @@ -225,7 +229,7 @@ type AcceptedHandler interface { // // The validatorID, and requestID, are assumed to be the same as those sent // in the GetAccepted message. - GetAcceptedFailed(validatorID ids.NodeID, requestID uint32) error + GetAcceptedFailed(ctx context.Context, validatorID ids.NodeID, requestID uint32) error } // GetAncestorsHandler defines how a consensus engine reacts to a get ancestors @@ -247,7 +251,7 @@ type GetAncestorsHandler interface { // If this engine doesn't have some ancestors, it should reply with its best // effort attempt at getting them. If this engine doesn't have [containerID] // it can ignore this message. - GetAncestors(validatorID ids.NodeID, requestID uint32, containerID ids.ID) error + GetAncestors(ctx context.Context, validatorID ids.NodeID, requestID uint32, containerID ids.ID) error } // AncestorsHandler defines how a consensus engine reacts to bootstrapping @@ -269,6 +273,7 @@ type AncestorsHandler interface { // message, that this message has a unique requestID or that any of the // containers in [containers] are valid. Ancestors( + ctx context.Context, validatorID ids.NodeID, requestID uint32, containers [][]byte, @@ -283,7 +288,7 @@ type AncestorsHandler interface { // // The validatorID and requestID are assumed to be the same as those sent in // the GetAncestors message. - GetAncestorsFailed(validatorID ids.NodeID, requestID uint32) error + GetAncestorsFailed(ctx context.Context, validatorID ids.NodeID, requestID uint32) error } // GetHandler defines how a consensus engine reacts to get message from another @@ -302,7 +307,7 @@ type GetHandler interface { // This engine should respond with a Put message with the same requestID if // the container was locally available. Otherwise, the message can be safely // dropped. - Get(validatorID ids.NodeID, requestID uint32, containerID ids.ID) error + Get(ctx context.Context, validatorID ids.NodeID, requestID uint32, containerID ids.ID) error } // PutHandler defines how a consensus engine reacts to put messages from other @@ -313,6 +318,7 @@ type PutHandler interface { // This function can be called by any validator. It is not safe to assume // this message is utilizing a unique requestID. Put( + ctx context.Context, validatorID ids.NodeID, requestID uint32, container []byte, @@ -326,7 +332,7 @@ type PutHandler interface { // // The validatorID and requestID are assumed to be the same as those sent in // the Get message. - GetFailed(validatorID ids.NodeID, requestID uint32) error + GetFailed(ctx context.Context, validatorID ids.NodeID, requestID uint32) error } // QueryHandler defines how a consensus engine reacts to query messages from @@ -344,6 +350,7 @@ type QueryHandler interface { // preferences in a Chits message. The Chits message should have the same // requestID that was passed in here. PullQuery( + ctx context.Context, validatorID ids.NodeID, requestID uint32, containerID ids.ID, @@ -364,6 +371,7 @@ type QueryHandler interface { // in a Chits message. The Chits message should have the same requestID that // was passed in here. PushQuery( + ctx context.Context, validatorID ids.NodeID, requestID uint32, container []byte, @@ -378,7 +386,13 @@ type ChitsHandler interface { // This function can be called by any validator. It is not safe to assume // this message is in response to a PullQuery or a PushQuery message. // However, the validatorID is assumed to be authenticated. - Chits(validatorID ids.NodeID, requestID uint32, containerIDs []ids.ID) error + Chits( + ctx context.Context, + validatorID ids.NodeID, + requestID uint32, + preferredContainerIDs []ids.ID, + acceptedContainerIDs []ids.ID, + ) error // Notify this engine that a query it issued has failed. // @@ -389,12 +403,14 @@ type ChitsHandler interface { // // The validatorID and the requestID are assumed to be the same as those // sent in the Query message. - QueryFailed(validatorID ids.NodeID, requestID uint32) error + QueryFailed(ctx context.Context, validatorID ids.NodeID, requestID uint32) error } -// AppHandler defines how a consensus engine reacts to app specific messages. +// NetworkAppHandler defines how a consensus engine reacts to app specific +// messages from the network. +// // Functions only return fatal errors. -type AppHandler interface { +type NetworkAppHandler interface { // Notify this engine of a request for data from [nodeID]. // // The meaning of [request], and what should be sent in response to it, is @@ -406,7 +422,7 @@ type AppHandler interface { // This node should typically send an AppResponse to [nodeID] in response to // a valid message using the same request ID before the deadline. However, // the VM may arbitrarily choose to not send a response to this request. - AppRequest(nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error + AppRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error // Notify this engine that an AppRequest message it sent to [nodeID] with // request ID [requestID] failed. @@ -418,7 +434,7 @@ type AppHandler interface { // * This engine sent a request to [nodeID] with ID [requestID]. // * AppRequestFailed([nodeID], [requestID]) has not already been called. // * AppResponse([nodeID], [requestID]) has not already been called. - AppRequestFailed(nodeID ids.NodeID, requestID uint32) error + AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error // Notify this engine of a response to the AppRequest message it sent to // [nodeID] with request ID [requestID]. @@ -437,7 +453,7 @@ type AppHandler interface { // If [response] is invalid or not the expected response, the VM chooses how // to react. For example, the VM may send another AppRequest, or it may give // up trying to get the requested information. - AppResponse(nodeID ids.NodeID, requestID uint32, response []byte) error + AppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error // Notify this engine of a gossip message from [nodeID]. // @@ -449,7 +465,73 @@ type AppHandler interface { // // A node may gossip the same message multiple times. That is, // AppGossip([nodeID], [msg]) may be called multiple times. - AppGossip(nodeID ids.NodeID, msg []byte) error + AppGossip(ctx context.Context, nodeID ids.NodeID, msg []byte) error +} + +// CrossChainAppHandler defines how a consensus engine reacts to cross-chain app +// specific messages. +// +// Functions only return fatal errors. +type CrossChainAppHandler interface { + // CrossChainAppRequest Notify this engine of a request for data from + // [chainID]. + // + // The meaning of [request], and what should be sent in response to it, is + // application (VM) specific. + // + // Guarantees surrounding the request are specific to the implementation of + // the requesting VM. For example, the request may or may not be guaranteed + // to be well-formed/valid depending on the implementation of the requesting + // VM. + // + // This node should typically send a CrossChainAppResponse to [chainID] in + // response to a valid message using the same request ID before the + // deadline. However, the VM may arbitrarily choose to not send a response + // to this request. + CrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, deadline time.Time, request []byte) error + // CrossChainAppRequestFailed notifies this engine that a + // CrossChainAppRequest message it sent to [chainID] with request ID + // [requestID] failed. + // + // This may be because the request timed out or because the message couldn't + // be sent to [chainID]. + // + // It is guaranteed that: + // * This engine sent a request to [chainID] with ID [requestID]. + // * CrossChainAppRequestFailed([chainID], [requestID]) has not already been + // called. + // * CrossChainAppResponse([chainID], [requestID]) has not already been + // called. + CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32) error + // CrossChainAppResponse notifies this engine of a response to the + // CrossChainAppRequest message it sent to [chainID] with request ID + // [requestID]. + // + // The meaning of [response] is application (VM) specific. + // + // It is guaranteed that: + // * This engine sent a request to [chainID] with ID [requestID]. + // * CrossChainAppRequestFailed([chainID], [requestID]) has not already been + // called. + // * CrossChainAppResponse([chainID], [requestID]) has not already been + // called. + // + // Guarantees surrounding the response are specific to the implementation of + // the responding VM. For example, the response may or may not be guaranteed + // to be well-formed/valid depending on the implementation of the requesting + // VM. + // + // If [response] is invalid or not the expected response, the VM chooses how + // to react. For example, the VM may send another CrossChainAppRequest, or + // it may give up trying to get the requested information. + CrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, response []byte) error +} + +// AppHandler defines how a consensus engine reacts to app specific messages. +// Functions only return fatal errors. +type AppHandler interface { + NetworkAppHandler + CrossChainAppHandler } // InternalHandler defines how this consensus engine reacts to messages from @@ -460,23 +542,24 @@ type InternalHandler interface { validators.Connector // Notify this engine that a registered timeout has fired. - Timeout() error + Timeout(context.Context) error // Gossip to the network a container on the accepted frontier - Gossip() error + Gossip(context.Context) error // Halt this engine. // // This function will be called before the environment starts exiting. This - // function is slightly special, in that it does not expect the chain's - // context lock to be held before calling this function. - Halt() + // function is special, in that it does not expect the chain's context lock + // to be held before calling this function. This function also does not + // require the engine to have been started. + Halt(context.Context) // Shutdown this engine. // // This function will be called when the environment is exiting. - Shutdown() error + Shutdown(context.Context) error // Notify this engine of a message from the virtual machine. - Notify(Message) error + Notify(context.Context, Message) error } diff --git a/avalanchego/snow/engine/common/fetcher.go b/avalanchego/snow/engine/common/fetcher.go index 8084023d..9e90da3d 100644 --- a/avalanchego/snow/engine/common/fetcher.go +++ b/avalanchego/snow/engine/common/fetcher.go @@ -1,12 +1,14 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common +import "context" + type Fetcher struct { // tracks which validators were asked for which containers in which requests OutstandingRequests Requests // Called when bootstrapping is done on a specific chain - OnFinished func(lastReqID uint32) error + OnFinished func(ctx context.Context, lastReqID uint32) error } diff --git a/avalanchego/snow/engine/common/fx.go b/avalanchego/snow/engine/common/fx.go index cfac66c1..414b5a7d 100644 --- a/avalanchego/snow/engine/common/fx.go +++ b/avalanchego/snow/engine/common/fx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common diff --git a/avalanchego/snow/engine/common/halter.go b/avalanchego/snow/engine/common/halter.go index f67eb3c7..bdfe3c9d 100644 --- a/avalanchego/snow/engine/common/halter.go +++ b/avalanchego/snow/engine/common/halter.go @@ -1,16 +1,17 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common import ( + "context" "sync/atomic" ) -var _ Haltable = &Halter{} +var _ Haltable = (*Halter)(nil) type Haltable interface { - Halt() + Halt(context.Context) Halted() bool } @@ -18,7 +19,7 @@ type Halter struct { halted uint32 } -func (h *Halter) Halt() { +func (h *Halter) Halt(context.Context) { atomic.StoreUint32(&h.halted, 1) } diff --git a/avalanchego/snow/engine/common/http_handler.go b/avalanchego/snow/engine/common/http_handler.go index 35ed954c..48724a2b 100644 --- a/avalanchego/snow/engine/common/http_handler.go +++ b/avalanchego/snow/engine/common/http_handler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common diff --git a/avalanchego/snow/engine/common/message.go b/avalanchego/snow/engine/common/message.go index 7209ea8b..e46238bc 100644 --- a/avalanchego/snow/engine/common/message.go +++ b/avalanchego/snow/engine/common/message.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -16,7 +16,7 @@ const ( // PendingTxs notifies a consensus engine that // its VM has pending transactions // (i.e. it would like to add a new block/vertex to consensus) - PendingTxs Message = iota + PendingTxs Message = iota + 1 // StateSyncDone notifies the state syncer engine that the VM has finishing // syncing the requested state summary. diff --git a/avalanchego/snow/engine/common/mixed_query.go b/avalanchego/snow/engine/common/mixed_query.go index 56a259dc..653297ce 100644 --- a/avalanchego/snow/engine/common/mixed_query.go +++ b/avalanchego/snow/engine/common/mixed_query.go @@ -1,9 +1,14 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common -import "github.com/ava-labs/avalanchego/ids" +import ( + "context" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" +) // Send a query composed partially of push queries and partially of pull queries. // The validators in [vdrs] will be queried. @@ -12,6 +17,7 @@ import "github.com/ava-labs/avalanchego/ids" // [containerID] and [container] are the ID and body of the container being queried. // [sender] is used to actually send the queries. func SendMixedQuery( + ctx context.Context, sender Sender, vdrs []ids.NodeID, numPushTo int, @@ -23,13 +29,13 @@ func SendMixedQuery( numPushTo = len(vdrs) } if numPushTo > 0 { - sendPushQueryTo := ids.NewNodeIDSet(numPushTo) + sendPushQueryTo := set.NewSet[ids.NodeID](numPushTo) sendPushQueryTo.Add(vdrs[:numPushTo]...) - sender.SendPushQuery(sendPushQueryTo, reqID, container) + sender.SendPushQuery(ctx, sendPushQueryTo, reqID, container) } if numPullTo := len(vdrs) - numPushTo; numPullTo > 0 { - sendPullQueryTo := ids.NewNodeIDSet(numPullTo) + sendPullQueryTo := set.NewSet[ids.NodeID](numPullTo) sendPullQueryTo.Add(vdrs[numPushTo:]...) - sender.SendPullQuery(sendPullQueryTo, reqID, containerID) + sender.SendPullQuery(ctx, sendPullQueryTo, reqID, containerID) } } diff --git a/avalanchego/snow/engine/common/mixed_query_test.go b/avalanchego/snow/engine/common/mixed_query_test.go index 8147e790..4d488dc2 100644 --- a/avalanchego/snow/engine/common/mixed_query_test.go +++ b/avalanchego/snow/engine/common/mixed_query_test.go @@ -1,14 +1,17 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common import ( + "context" "fmt" "testing" - "github.com/ava-labs/avalanchego/ids" "github.com/golang/mock/gomock" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" ) func TestSendMixedQuery(t *testing.T) { @@ -29,7 +32,8 @@ func TestSendMixedQuery(t *testing.T) { senderF: func() *MockSender { s := NewMockSender(ctrl) s.EXPECT().SendPushQuery( - ids.NodeIDSet{vdr1: struct{}{}, vdr2: struct{}{}, vdr3: struct{}{}}, + gomock.Any(), + set.Set[ids.NodeID]{vdr1: struct{}{}, vdr2: struct{}{}, vdr3: struct{}{}}, reqID, containerBytes, ).Times(1) @@ -37,6 +41,7 @@ func TestSendMixedQuery(t *testing.T) { gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), ).Times(0) return s }, @@ -47,12 +52,14 @@ func TestSendMixedQuery(t *testing.T) { senderF: func() *MockSender { s := NewMockSender(ctrl) s.EXPECT().SendPushQuery( - ids.NodeIDSet{vdr1: struct{}{}}, + gomock.Any(), + set.Set[ids.NodeID]{vdr1: struct{}{}}, reqID, containerBytes, ).Times(1) s.EXPECT().SendPullQuery( - ids.NodeIDSet{vdr2: struct{}{}, vdr3: struct{}{}}, + gomock.Any(), + set.Set[ids.NodeID]{vdr2: struct{}{}, vdr3: struct{}{}}, reqID, containerID, ).Times(1) @@ -65,7 +72,8 @@ func TestSendMixedQuery(t *testing.T) { senderF: func() *MockSender { s := NewMockSender(ctrl) s.EXPECT().SendPushQuery( - ids.NodeIDSet{vdr1: struct{}{}, vdr2: struct{}{}}, + gomock.Any(), + set.Set[ids.NodeID]{vdr1: struct{}{}, vdr2: struct{}{}}, reqID, containerBytes, ).Times(1) @@ -73,6 +81,7 @@ func TestSendMixedQuery(t *testing.T) { gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), ).Times(0) return s }, @@ -83,12 +92,14 @@ func TestSendMixedQuery(t *testing.T) { senderF: func() *MockSender { s := NewMockSender(ctrl) s.EXPECT().SendPushQuery( + gomock.Any(), gomock.Any(), reqID, containerBytes, ).Times(0) s.EXPECT().SendPullQuery( - ids.NodeIDSet{vdr1: struct{}{}}, + gomock.Any(), + set.Set[ids.NodeID]{vdr1: struct{}{}}, reqID, containerID, ).Times(1) @@ -101,7 +112,8 @@ func TestSendMixedQuery(t *testing.T) { senderF: func() *MockSender { s := NewMockSender(ctrl) s.EXPECT().SendPushQuery( - ids.NodeIDSet{vdr1: struct{}{}, vdr2: struct{}{}}, + gomock.Any(), + set.Set[ids.NodeID]{vdr1: struct{}{}, vdr2: struct{}{}}, reqID, containerBytes, ).Times(1) @@ -109,6 +121,7 @@ func TestSendMixedQuery(t *testing.T) { gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), ).Times(0) return s }, @@ -123,6 +136,7 @@ func TestSendMixedQuery(t *testing.T) { func(t *testing.T) { sender := tt.senderF() SendMixedQuery( + context.Background(), sender, tt.vdrs, tt.numPushTo, diff --git a/avalanchego/snow/engine/common/mock_sender.go b/avalanchego/snow/engine/common/mock_sender.go index abab0b14..c599077d 100644 --- a/avalanchego/snow/engine/common/mock_sender.go +++ b/avalanchego/snow/engine/common/mock_sender.go @@ -1,13 +1,19 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. -// Source: snow/engine/common/sender.go +// Source: github.com/ava-labs/avalanchego/snow/engine/common (interfaces: Sender) +// Package common is a generated GoMock package. package common import ( + context "context" reflect "reflect" ids "github.com/ava-labs/avalanchego/ids" snow "github.com/ava-labs/avalanchego/snow" + set "github.com/ava-labs/avalanchego/utils/set" gomock "github.com/golang/mock/gomock" ) @@ -35,695 +41,291 @@ func (m *MockSender) EXPECT() *MockSenderMockRecorder { } // Accept mocks base method. -func (m *MockSender) Accept(ctx *snow.ConsensusContext, containerID ids.ID, container []byte) error { +func (m *MockSender) Accept(arg0 *snow.ConsensusContext, arg1 ids.ID, arg2 []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Accept", ctx, containerID, container) + ret := m.ctrl.Call(m, "Accept", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // Accept indicates an expected call of Accept. -func (mr *MockSenderMockRecorder) Accept(ctx, containerID, container interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) Accept(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Accept", reflect.TypeOf((*MockSender)(nil).Accept), ctx, containerID, container) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Accept", reflect.TypeOf((*MockSender)(nil).Accept), arg0, arg1, arg2) } // SendAccepted mocks base method. -func (m *MockSender) SendAccepted(nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) { +func (m *MockSender) SendAccepted(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 []ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendAccepted", nodeID, requestID, containerIDs) + m.ctrl.Call(m, "SendAccepted", arg0, arg1, arg2, arg3) } // SendAccepted indicates an expected call of SendAccepted. -func (mr *MockSenderMockRecorder) SendAccepted(nodeID, requestID, containerIDs interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendAccepted(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAccepted", reflect.TypeOf((*MockSender)(nil).SendAccepted), nodeID, requestID, containerIDs) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAccepted", reflect.TypeOf((*MockSender)(nil).SendAccepted), arg0, arg1, arg2, arg3) } // SendAcceptedFrontier mocks base method. -func (m *MockSender) SendAcceptedFrontier(nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) { +func (m *MockSender) SendAcceptedFrontier(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 []ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendAcceptedFrontier", nodeID, requestID, containerIDs) + m.ctrl.Call(m, "SendAcceptedFrontier", arg0, arg1, arg2, arg3) } // SendAcceptedFrontier indicates an expected call of SendAcceptedFrontier. -func (mr *MockSenderMockRecorder) SendAcceptedFrontier(nodeID, requestID, containerIDs interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendAcceptedFrontier(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAcceptedFrontier", reflect.TypeOf((*MockSender)(nil).SendAcceptedFrontier), nodeID, requestID, containerIDs) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAcceptedFrontier", reflect.TypeOf((*MockSender)(nil).SendAcceptedFrontier), arg0, arg1, arg2, arg3) } // SendAcceptedStateSummary mocks base method. -func (m *MockSender) SendAcceptedStateSummary(nodeID ids.NodeID, requestID uint32, summaryIDs []ids.ID) { +func (m *MockSender) SendAcceptedStateSummary(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 []ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendAcceptedStateSummary", nodeID, requestID, summaryIDs) + m.ctrl.Call(m, "SendAcceptedStateSummary", arg0, arg1, arg2, arg3) } // SendAcceptedStateSummary indicates an expected call of SendAcceptedStateSummary. -func (mr *MockSenderMockRecorder) SendAcceptedStateSummary(nodeID, requestID, summaryIDs interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendAcceptedStateSummary(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAcceptedStateSummary", reflect.TypeOf((*MockSender)(nil).SendAcceptedStateSummary), nodeID, requestID, summaryIDs) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAcceptedStateSummary", reflect.TypeOf((*MockSender)(nil).SendAcceptedStateSummary), arg0, arg1, arg2, arg3) } // SendAncestors mocks base method. -func (m *MockSender) SendAncestors(nodeID ids.NodeID, requestID uint32, containers [][]byte) { +func (m *MockSender) SendAncestors(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 [][]byte) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendAncestors", nodeID, requestID, containers) + m.ctrl.Call(m, "SendAncestors", arg0, arg1, arg2, arg3) } // SendAncestors indicates an expected call of SendAncestors. -func (mr *MockSenderMockRecorder) SendAncestors(nodeID, requestID, containers interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendAncestors(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAncestors", reflect.TypeOf((*MockSender)(nil).SendAncestors), nodeID, requestID, containers) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAncestors", reflect.TypeOf((*MockSender)(nil).SendAncestors), arg0, arg1, arg2, arg3) } // SendAppGossip mocks base method. -func (m *MockSender) SendAppGossip(appGossipBytes []byte) error { +func (m *MockSender) SendAppGossip(arg0 context.Context, arg1 []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendAppGossip", appGossipBytes) + ret := m.ctrl.Call(m, "SendAppGossip", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // SendAppGossip indicates an expected call of SendAppGossip. -func (mr *MockSenderMockRecorder) SendAppGossip(appGossipBytes interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendAppGossip(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppGossip", reflect.TypeOf((*MockSender)(nil).SendAppGossip), appGossipBytes) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppGossip", reflect.TypeOf((*MockSender)(nil).SendAppGossip), arg0, arg1) } // SendAppGossipSpecific mocks base method. -func (m *MockSender) SendAppGossipSpecific(nodeIDs ids.NodeIDSet, appGossipBytes []byte) error { +func (m *MockSender) SendAppGossipSpecific(arg0 context.Context, arg1 set.Set[ids.NodeID], arg2 []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendAppGossipSpecific", nodeIDs, appGossipBytes) + ret := m.ctrl.Call(m, "SendAppGossipSpecific", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // SendAppGossipSpecific indicates an expected call of SendAppGossipSpecific. -func (mr *MockSenderMockRecorder) SendAppGossipSpecific(nodeIDs, appGossipBytes interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendAppGossipSpecific(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppGossipSpecific", reflect.TypeOf((*MockSender)(nil).SendAppGossipSpecific), nodeIDs, appGossipBytes) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppGossipSpecific", reflect.TypeOf((*MockSender)(nil).SendAppGossipSpecific), arg0, arg1, arg2) } // SendAppRequest mocks base method. -func (m *MockSender) SendAppRequest(nodeIDs ids.NodeIDSet, requestID uint32, appRequestBytes []byte) error { +func (m *MockSender) SendAppRequest(arg0 context.Context, arg1 set.Set[ids.NodeID], arg2 uint32, arg3 []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendAppRequest", nodeIDs, requestID, appRequestBytes) + ret := m.ctrl.Call(m, "SendAppRequest", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // SendAppRequest indicates an expected call of SendAppRequest. -func (mr *MockSenderMockRecorder) SendAppRequest(nodeIDs, requestID, appRequestBytes interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendAppRequest(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppRequest", reflect.TypeOf((*MockSender)(nil).SendAppRequest), nodeIDs, requestID, appRequestBytes) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppRequest", reflect.TypeOf((*MockSender)(nil).SendAppRequest), arg0, arg1, arg2, arg3) } // SendAppResponse mocks base method. -func (m *MockSender) SendAppResponse(nodeID ids.NodeID, requestID uint32, appResponseBytes []byte) error { +func (m *MockSender) SendAppResponse(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendAppResponse", nodeID, requestID, appResponseBytes) + ret := m.ctrl.Call(m, "SendAppResponse", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // SendAppResponse indicates an expected call of SendAppResponse. -func (mr *MockSenderMockRecorder) SendAppResponse(nodeID, requestID, appResponseBytes interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendAppResponse(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppResponse", reflect.TypeOf((*MockSender)(nil).SendAppResponse), nodeID, requestID, appResponseBytes) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppResponse", reflect.TypeOf((*MockSender)(nil).SendAppResponse), arg0, arg1, arg2, arg3) } // SendChits mocks base method. -func (m *MockSender) SendChits(nodeID ids.NodeID, requestID uint32, votes []ids.ID) { +func (m *MockSender) SendChits(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3, arg4 []ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendChits", nodeID, requestID, votes) + m.ctrl.Call(m, "SendChits", arg0, arg1, arg2, arg3, arg4) } // SendChits indicates an expected call of SendChits. -func (mr *MockSenderMockRecorder) SendChits(nodeID, requestID, votes interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendChits(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendChits", reflect.TypeOf((*MockSender)(nil).SendChits), arg0, arg1, arg2, arg3, arg4) +} + +// SendCrossChainAppRequest mocks base method. +func (m *MockSender) SendCrossChainAppRequest(arg0 context.Context, arg1 ids.ID, arg2 uint32, arg3 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendCrossChainAppRequest", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendCrossChainAppRequest indicates an expected call of SendCrossChainAppRequest. +func (mr *MockSenderMockRecorder) SendCrossChainAppRequest(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendChits", reflect.TypeOf((*MockSender)(nil).SendChits), nodeID, requestID, votes) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendCrossChainAppRequest", reflect.TypeOf((*MockSender)(nil).SendCrossChainAppRequest), arg0, arg1, arg2, arg3) +} + +// SendCrossChainAppResponse mocks base method. +func (m *MockSender) SendCrossChainAppResponse(arg0 context.Context, arg1 ids.ID, arg2 uint32, arg3 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendCrossChainAppResponse", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendCrossChainAppResponse indicates an expected call of SendCrossChainAppResponse. +func (mr *MockSenderMockRecorder) SendCrossChainAppResponse(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendCrossChainAppResponse", reflect.TypeOf((*MockSender)(nil).SendCrossChainAppResponse), arg0, arg1, arg2, arg3) } // SendGet mocks base method. -func (m *MockSender) SendGet(nodeID ids.NodeID, requestID uint32, containerID ids.ID) { +func (m *MockSender) SendGet(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGet", nodeID, requestID, containerID) + m.ctrl.Call(m, "SendGet", arg0, arg1, arg2, arg3) } // SendGet indicates an expected call of SendGet. -func (mr *MockSenderMockRecorder) SendGet(nodeID, requestID, containerID interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendGet(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGet", reflect.TypeOf((*MockSender)(nil).SendGet), nodeID, requestID, containerID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGet", reflect.TypeOf((*MockSender)(nil).SendGet), arg0, arg1, arg2, arg3) } // SendGetAccepted mocks base method. -func (m *MockSender) SendGetAccepted(nodeIDs ids.NodeIDSet, requestID uint32, containerIDs []ids.ID) { +func (m *MockSender) SendGetAccepted(arg0 context.Context, arg1 set.Set[ids.NodeID], arg2 uint32, arg3 []ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGetAccepted", nodeIDs, requestID, containerIDs) + m.ctrl.Call(m, "SendGetAccepted", arg0, arg1, arg2, arg3) } // SendGetAccepted indicates an expected call of SendGetAccepted. -func (mr *MockSenderMockRecorder) SendGetAccepted(nodeIDs, requestID, containerIDs interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendGetAccepted(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAccepted", reflect.TypeOf((*MockSender)(nil).SendGetAccepted), nodeIDs, requestID, containerIDs) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAccepted", reflect.TypeOf((*MockSender)(nil).SendGetAccepted), arg0, arg1, arg2, arg3) } // SendGetAcceptedFrontier mocks base method. -func (m *MockSender) SendGetAcceptedFrontier(nodeIDs ids.NodeIDSet, requestID uint32) { +func (m *MockSender) SendGetAcceptedFrontier(arg0 context.Context, arg1 set.Set[ids.NodeID], arg2 uint32) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGetAcceptedFrontier", nodeIDs, requestID) + m.ctrl.Call(m, "SendGetAcceptedFrontier", arg0, arg1, arg2) } // SendGetAcceptedFrontier indicates an expected call of SendGetAcceptedFrontier. -func (mr *MockSenderMockRecorder) SendGetAcceptedFrontier(nodeIDs, requestID interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendGetAcceptedFrontier(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAcceptedFrontier", reflect.TypeOf((*MockSender)(nil).SendGetAcceptedFrontier), nodeIDs, requestID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAcceptedFrontier", reflect.TypeOf((*MockSender)(nil).SendGetAcceptedFrontier), arg0, arg1, arg2) } // SendGetAcceptedStateSummary mocks base method. -func (m *MockSender) SendGetAcceptedStateSummary(nodeIDs ids.NodeIDSet, requestID uint32, heights []uint64) { +func (m *MockSender) SendGetAcceptedStateSummary(arg0 context.Context, arg1 set.Set[ids.NodeID], arg2 uint32, arg3 []uint64) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGetAcceptedStateSummary", nodeIDs, requestID, heights) + m.ctrl.Call(m, "SendGetAcceptedStateSummary", arg0, arg1, arg2, arg3) } // SendGetAcceptedStateSummary indicates an expected call of SendGetAcceptedStateSummary. -func (mr *MockSenderMockRecorder) SendGetAcceptedStateSummary(nodeIDs, requestID, heights interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendGetAcceptedStateSummary(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAcceptedStateSummary", reflect.TypeOf((*MockSender)(nil).SendGetAcceptedStateSummary), nodeIDs, requestID, heights) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAcceptedStateSummary", reflect.TypeOf((*MockSender)(nil).SendGetAcceptedStateSummary), arg0, arg1, arg2, arg3) } // SendGetAncestors mocks base method. -func (m *MockSender) SendGetAncestors(nodeID ids.NodeID, requestID uint32, containerID ids.ID) { +func (m *MockSender) SendGetAncestors(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGetAncestors", nodeID, requestID, containerID) + m.ctrl.Call(m, "SendGetAncestors", arg0, arg1, arg2, arg3) } // SendGetAncestors indicates an expected call of SendGetAncestors. -func (mr *MockSenderMockRecorder) SendGetAncestors(nodeID, requestID, containerID interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendGetAncestors(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAncestors", reflect.TypeOf((*MockSender)(nil).SendGetAncestors), nodeID, requestID, containerID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAncestors", reflect.TypeOf((*MockSender)(nil).SendGetAncestors), arg0, arg1, arg2, arg3) } // SendGetStateSummaryFrontier mocks base method. -func (m *MockSender) SendGetStateSummaryFrontier(nodeIDs ids.NodeIDSet, requestID uint32) { +func (m *MockSender) SendGetStateSummaryFrontier(arg0 context.Context, arg1 set.Set[ids.NodeID], arg2 uint32) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGetStateSummaryFrontier", nodeIDs, requestID) + m.ctrl.Call(m, "SendGetStateSummaryFrontier", arg0, arg1, arg2) } // SendGetStateSummaryFrontier indicates an expected call of SendGetStateSummaryFrontier. -func (mr *MockSenderMockRecorder) SendGetStateSummaryFrontier(nodeIDs, requestID interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendGetStateSummaryFrontier(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetStateSummaryFrontier", reflect.TypeOf((*MockSender)(nil).SendGetStateSummaryFrontier), nodeIDs, requestID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetStateSummaryFrontier", reflect.TypeOf((*MockSender)(nil).SendGetStateSummaryFrontier), arg0, arg1, arg2) } // SendGossip mocks base method. -func (m *MockSender) SendGossip(container []byte) { +func (m *MockSender) SendGossip(arg0 context.Context, arg1 []byte) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGossip", container) + m.ctrl.Call(m, "SendGossip", arg0, arg1) } // SendGossip indicates an expected call of SendGossip. -func (mr *MockSenderMockRecorder) SendGossip(container interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendGossip(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGossip", reflect.TypeOf((*MockSender)(nil).SendGossip), container) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGossip", reflect.TypeOf((*MockSender)(nil).SendGossip), arg0, arg1) } // SendPullQuery mocks base method. -func (m *MockSender) SendPullQuery(nodeIDs ids.NodeIDSet, requestID uint32, containerID ids.ID) { +func (m *MockSender) SendPullQuery(arg0 context.Context, arg1 set.Set[ids.NodeID], arg2 uint32, arg3 ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendPullQuery", nodeIDs, requestID, containerID) + m.ctrl.Call(m, "SendPullQuery", arg0, arg1, arg2, arg3) } // SendPullQuery indicates an expected call of SendPullQuery. -func (mr *MockSenderMockRecorder) SendPullQuery(nodeIDs, requestID, containerID interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendPullQuery(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPullQuery", reflect.TypeOf((*MockSender)(nil).SendPullQuery), nodeIDs, requestID, containerID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPullQuery", reflect.TypeOf((*MockSender)(nil).SendPullQuery), arg0, arg1, arg2, arg3) } // SendPushQuery mocks base method. -func (m *MockSender) SendPushQuery(nodeIDs ids.NodeIDSet, requestID uint32, container []byte) { +func (m *MockSender) SendPushQuery(arg0 context.Context, arg1 set.Set[ids.NodeID], arg2 uint32, arg3 []byte) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendPushQuery", nodeIDs, requestID, container) + m.ctrl.Call(m, "SendPushQuery", arg0, arg1, arg2, arg3) } // SendPushQuery indicates an expected call of SendPushQuery. -func (mr *MockSenderMockRecorder) SendPushQuery(nodeIDs, requestID, container interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendPushQuery(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPushQuery", reflect.TypeOf((*MockSender)(nil).SendPushQuery), nodeIDs, requestID, container) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPushQuery", reflect.TypeOf((*MockSender)(nil).SendPushQuery), arg0, arg1, arg2, arg3) } // SendPut mocks base method. -func (m *MockSender) SendPut(nodeID ids.NodeID, requestID uint32, container []byte) { +func (m *MockSender) SendPut(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 []byte) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendPut", nodeID, requestID, container) + m.ctrl.Call(m, "SendPut", arg0, arg1, arg2, arg3) } // SendPut indicates an expected call of SendPut. -func (mr *MockSenderMockRecorder) SendPut(nodeID, requestID, container interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendPut(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPut", reflect.TypeOf((*MockSender)(nil).SendPut), nodeID, requestID, container) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPut", reflect.TypeOf((*MockSender)(nil).SendPut), arg0, arg1, arg2, arg3) } // SendStateSummaryFrontier mocks base method. -func (m *MockSender) SendStateSummaryFrontier(nodeID ids.NodeID, requestID uint32, summary []byte) { +func (m *MockSender) SendStateSummaryFrontier(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 []byte) { m.ctrl.T.Helper() - m.ctrl.Call(m, "SendStateSummaryFrontier", nodeID, requestID, summary) + m.ctrl.Call(m, "SendStateSummaryFrontier", arg0, arg1, arg2, arg3) } // SendStateSummaryFrontier indicates an expected call of SendStateSummaryFrontier. -func (mr *MockSenderMockRecorder) SendStateSummaryFrontier(nodeID, requestID, summary interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendStateSummaryFrontier", reflect.TypeOf((*MockSender)(nil).SendStateSummaryFrontier), nodeID, requestID, summary) -} - -// MockStateSummarySender is a mock of StateSummarySender interface. -type MockStateSummarySender struct { - ctrl *gomock.Controller - recorder *MockStateSummarySenderMockRecorder -} - -// MockStateSummarySenderMockRecorder is the mock recorder for MockStateSummarySender. -type MockStateSummarySenderMockRecorder struct { - mock *MockStateSummarySender -} - -// NewMockStateSummarySender creates a new mock instance. -func NewMockStateSummarySender(ctrl *gomock.Controller) *MockStateSummarySender { - mock := &MockStateSummarySender{ctrl: ctrl} - mock.recorder = &MockStateSummarySenderMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockStateSummarySender) EXPECT() *MockStateSummarySenderMockRecorder { - return m.recorder -} - -// SendGetStateSummaryFrontier mocks base method. -func (m *MockStateSummarySender) SendGetStateSummaryFrontier(nodeIDs ids.NodeIDSet, requestID uint32) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGetStateSummaryFrontier", nodeIDs, requestID) -} - -// SendGetStateSummaryFrontier indicates an expected call of SendGetStateSummaryFrontier. -func (mr *MockStateSummarySenderMockRecorder) SendGetStateSummaryFrontier(nodeIDs, requestID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetStateSummaryFrontier", reflect.TypeOf((*MockStateSummarySender)(nil).SendGetStateSummaryFrontier), nodeIDs, requestID) -} - -// SendStateSummaryFrontier mocks base method. -func (m *MockStateSummarySender) SendStateSummaryFrontier(nodeID ids.NodeID, requestID uint32, summary []byte) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SendStateSummaryFrontier", nodeID, requestID, summary) -} - -// SendStateSummaryFrontier indicates an expected call of SendStateSummaryFrontier. -func (mr *MockStateSummarySenderMockRecorder) SendStateSummaryFrontier(nodeID, requestID, summary interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendStateSummaryFrontier", reflect.TypeOf((*MockStateSummarySender)(nil).SendStateSummaryFrontier), nodeID, requestID, summary) -} - -// MockAcceptedStateSummarySender is a mock of AcceptedStateSummarySender interface. -type MockAcceptedStateSummarySender struct { - ctrl *gomock.Controller - recorder *MockAcceptedStateSummarySenderMockRecorder -} - -// MockAcceptedStateSummarySenderMockRecorder is the mock recorder for MockAcceptedStateSummarySender. -type MockAcceptedStateSummarySenderMockRecorder struct { - mock *MockAcceptedStateSummarySender -} - -// NewMockAcceptedStateSummarySender creates a new mock instance. -func NewMockAcceptedStateSummarySender(ctrl *gomock.Controller) *MockAcceptedStateSummarySender { - mock := &MockAcceptedStateSummarySender{ctrl: ctrl} - mock.recorder = &MockAcceptedStateSummarySenderMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockAcceptedStateSummarySender) EXPECT() *MockAcceptedStateSummarySenderMockRecorder { - return m.recorder -} - -// SendAcceptedStateSummary mocks base method. -func (m *MockAcceptedStateSummarySender) SendAcceptedStateSummary(nodeID ids.NodeID, requestID uint32, summaryIDs []ids.ID) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SendAcceptedStateSummary", nodeID, requestID, summaryIDs) -} - -// SendAcceptedStateSummary indicates an expected call of SendAcceptedStateSummary. -func (mr *MockAcceptedStateSummarySenderMockRecorder) SendAcceptedStateSummary(nodeID, requestID, summaryIDs interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAcceptedStateSummary", reflect.TypeOf((*MockAcceptedStateSummarySender)(nil).SendAcceptedStateSummary), nodeID, requestID, summaryIDs) -} - -// SendGetAcceptedStateSummary mocks base method. -func (m *MockAcceptedStateSummarySender) SendGetAcceptedStateSummary(nodeIDs ids.NodeIDSet, requestID uint32, heights []uint64) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGetAcceptedStateSummary", nodeIDs, requestID, heights) -} - -// SendGetAcceptedStateSummary indicates an expected call of SendGetAcceptedStateSummary. -func (mr *MockAcceptedStateSummarySenderMockRecorder) SendGetAcceptedStateSummary(nodeIDs, requestID, heights interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAcceptedStateSummary", reflect.TypeOf((*MockAcceptedStateSummarySender)(nil).SendGetAcceptedStateSummary), nodeIDs, requestID, heights) -} - -// MockFrontierSender is a mock of FrontierSender interface. -type MockFrontierSender struct { - ctrl *gomock.Controller - recorder *MockFrontierSenderMockRecorder -} - -// MockFrontierSenderMockRecorder is the mock recorder for MockFrontierSender. -type MockFrontierSenderMockRecorder struct { - mock *MockFrontierSender -} - -// NewMockFrontierSender creates a new mock instance. -func NewMockFrontierSender(ctrl *gomock.Controller) *MockFrontierSender { - mock := &MockFrontierSender{ctrl: ctrl} - mock.recorder = &MockFrontierSenderMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockFrontierSender) EXPECT() *MockFrontierSenderMockRecorder { - return m.recorder -} - -// SendAcceptedFrontier mocks base method. -func (m *MockFrontierSender) SendAcceptedFrontier(nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SendAcceptedFrontier", nodeID, requestID, containerIDs) -} - -// SendAcceptedFrontier indicates an expected call of SendAcceptedFrontier. -func (mr *MockFrontierSenderMockRecorder) SendAcceptedFrontier(nodeID, requestID, containerIDs interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAcceptedFrontier", reflect.TypeOf((*MockFrontierSender)(nil).SendAcceptedFrontier), nodeID, requestID, containerIDs) -} - -// SendGetAcceptedFrontier mocks base method. -func (m *MockFrontierSender) SendGetAcceptedFrontier(nodeIDs ids.NodeIDSet, requestID uint32) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGetAcceptedFrontier", nodeIDs, requestID) -} - -// SendGetAcceptedFrontier indicates an expected call of SendGetAcceptedFrontier. -func (mr *MockFrontierSenderMockRecorder) SendGetAcceptedFrontier(nodeIDs, requestID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAcceptedFrontier", reflect.TypeOf((*MockFrontierSender)(nil).SendGetAcceptedFrontier), nodeIDs, requestID) -} - -// MockAcceptedSender is a mock of AcceptedSender interface. -type MockAcceptedSender struct { - ctrl *gomock.Controller - recorder *MockAcceptedSenderMockRecorder -} - -// MockAcceptedSenderMockRecorder is the mock recorder for MockAcceptedSender. -type MockAcceptedSenderMockRecorder struct { - mock *MockAcceptedSender -} - -// NewMockAcceptedSender creates a new mock instance. -func NewMockAcceptedSender(ctrl *gomock.Controller) *MockAcceptedSender { - mock := &MockAcceptedSender{ctrl: ctrl} - mock.recorder = &MockAcceptedSenderMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockAcceptedSender) EXPECT() *MockAcceptedSenderMockRecorder { - return m.recorder -} - -// SendAccepted mocks base method. -func (m *MockAcceptedSender) SendAccepted(nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SendAccepted", nodeID, requestID, containerIDs) -} - -// SendAccepted indicates an expected call of SendAccepted. -func (mr *MockAcceptedSenderMockRecorder) SendAccepted(nodeID, requestID, containerIDs interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAccepted", reflect.TypeOf((*MockAcceptedSender)(nil).SendAccepted), nodeID, requestID, containerIDs) -} - -// SendGetAccepted mocks base method. -func (m *MockAcceptedSender) SendGetAccepted(nodeIDs ids.NodeIDSet, requestID uint32, containerIDs []ids.ID) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGetAccepted", nodeIDs, requestID, containerIDs) -} - -// SendGetAccepted indicates an expected call of SendGetAccepted. -func (mr *MockAcceptedSenderMockRecorder) SendGetAccepted(nodeIDs, requestID, containerIDs interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAccepted", reflect.TypeOf((*MockAcceptedSender)(nil).SendGetAccepted), nodeIDs, requestID, containerIDs) -} - -// MockFetchSender is a mock of FetchSender interface. -type MockFetchSender struct { - ctrl *gomock.Controller - recorder *MockFetchSenderMockRecorder -} - -// MockFetchSenderMockRecorder is the mock recorder for MockFetchSender. -type MockFetchSenderMockRecorder struct { - mock *MockFetchSender -} - -// NewMockFetchSender creates a new mock instance. -func NewMockFetchSender(ctrl *gomock.Controller) *MockFetchSender { - mock := &MockFetchSender{ctrl: ctrl} - mock.recorder = &MockFetchSenderMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockFetchSender) EXPECT() *MockFetchSenderMockRecorder { - return m.recorder -} - -// SendAncestors mocks base method. -func (m *MockFetchSender) SendAncestors(nodeID ids.NodeID, requestID uint32, containers [][]byte) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SendAncestors", nodeID, requestID, containers) -} - -// SendAncestors indicates an expected call of SendAncestors. -func (mr *MockFetchSenderMockRecorder) SendAncestors(nodeID, requestID, containers interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAncestors", reflect.TypeOf((*MockFetchSender)(nil).SendAncestors), nodeID, requestID, containers) -} - -// SendGet mocks base method. -func (m *MockFetchSender) SendGet(nodeID ids.NodeID, requestID uint32, containerID ids.ID) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGet", nodeID, requestID, containerID) -} - -// SendGet indicates an expected call of SendGet. -func (mr *MockFetchSenderMockRecorder) SendGet(nodeID, requestID, containerID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGet", reflect.TypeOf((*MockFetchSender)(nil).SendGet), nodeID, requestID, containerID) -} - -// SendGetAncestors mocks base method. -func (m *MockFetchSender) SendGetAncestors(nodeID ids.NodeID, requestID uint32, containerID ids.ID) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGetAncestors", nodeID, requestID, containerID) -} - -// SendGetAncestors indicates an expected call of SendGetAncestors. -func (mr *MockFetchSenderMockRecorder) SendGetAncestors(nodeID, requestID, containerID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGetAncestors", reflect.TypeOf((*MockFetchSender)(nil).SendGetAncestors), nodeID, requestID, containerID) -} - -// SendPut mocks base method. -func (m *MockFetchSender) SendPut(nodeID ids.NodeID, requestID uint32, container []byte) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SendPut", nodeID, requestID, container) -} - -// SendPut indicates an expected call of SendPut. -func (mr *MockFetchSenderMockRecorder) SendPut(nodeID, requestID, container interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPut", reflect.TypeOf((*MockFetchSender)(nil).SendPut), nodeID, requestID, container) -} - -// MockQuerySender is a mock of QuerySender interface. -type MockQuerySender struct { - ctrl *gomock.Controller - recorder *MockQuerySenderMockRecorder -} - -// MockQuerySenderMockRecorder is the mock recorder for MockQuerySender. -type MockQuerySenderMockRecorder struct { - mock *MockQuerySender -} - -// NewMockQuerySender creates a new mock instance. -func NewMockQuerySender(ctrl *gomock.Controller) *MockQuerySender { - mock := &MockQuerySender{ctrl: ctrl} - mock.recorder = &MockQuerySenderMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockQuerySender) EXPECT() *MockQuerySenderMockRecorder { - return m.recorder -} - -// SendChits mocks base method. -func (m *MockQuerySender) SendChits(nodeID ids.NodeID, requestID uint32, votes []ids.ID) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SendChits", nodeID, requestID, votes) -} - -// SendChits indicates an expected call of SendChits. -func (mr *MockQuerySenderMockRecorder) SendChits(nodeID, requestID, votes interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendChits", reflect.TypeOf((*MockQuerySender)(nil).SendChits), nodeID, requestID, votes) -} - -// SendPullQuery mocks base method. -func (m *MockQuerySender) SendPullQuery(nodeIDs ids.NodeIDSet, requestID uint32, containerID ids.ID) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SendPullQuery", nodeIDs, requestID, containerID) -} - -// SendPullQuery indicates an expected call of SendPullQuery. -func (mr *MockQuerySenderMockRecorder) SendPullQuery(nodeIDs, requestID, containerID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPullQuery", reflect.TypeOf((*MockQuerySender)(nil).SendPullQuery), nodeIDs, requestID, containerID) -} - -// SendPushQuery mocks base method. -func (m *MockQuerySender) SendPushQuery(nodeIDs ids.NodeIDSet, requestID uint32, container []byte) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SendPushQuery", nodeIDs, requestID, container) -} - -// SendPushQuery indicates an expected call of SendPushQuery. -func (mr *MockQuerySenderMockRecorder) SendPushQuery(nodeIDs, requestID, container interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPushQuery", reflect.TypeOf((*MockQuerySender)(nil).SendPushQuery), nodeIDs, requestID, container) -} - -// MockGossiper is a mock of Gossiper interface. -type MockGossiper struct { - ctrl *gomock.Controller - recorder *MockGossiperMockRecorder -} - -// MockGossiperMockRecorder is the mock recorder for MockGossiper. -type MockGossiperMockRecorder struct { - mock *MockGossiper -} - -// NewMockGossiper creates a new mock instance. -func NewMockGossiper(ctrl *gomock.Controller) *MockGossiper { - mock := &MockGossiper{ctrl: ctrl} - mock.recorder = &MockGossiperMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockGossiper) EXPECT() *MockGossiperMockRecorder { - return m.recorder -} - -// SendGossip mocks base method. -func (m *MockGossiper) SendGossip(container []byte) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "SendGossip", container) -} - -// SendGossip indicates an expected call of SendGossip. -func (mr *MockGossiperMockRecorder) SendGossip(container interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendGossip", reflect.TypeOf((*MockGossiper)(nil).SendGossip), container) -} - -// MockAppSender is a mock of AppSender interface. -type MockAppSender struct { - ctrl *gomock.Controller - recorder *MockAppSenderMockRecorder -} - -// MockAppSenderMockRecorder is the mock recorder for MockAppSender. -type MockAppSenderMockRecorder struct { - mock *MockAppSender -} - -// NewMockAppSender creates a new mock instance. -func NewMockAppSender(ctrl *gomock.Controller) *MockAppSender { - mock := &MockAppSender{ctrl: ctrl} - mock.recorder = &MockAppSenderMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockAppSender) EXPECT() *MockAppSenderMockRecorder { - return m.recorder -} - -// SendAppGossip mocks base method. -func (m *MockAppSender) SendAppGossip(appGossipBytes []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendAppGossip", appGossipBytes) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendAppGossip indicates an expected call of SendAppGossip. -func (mr *MockAppSenderMockRecorder) SendAppGossip(appGossipBytes interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppGossip", reflect.TypeOf((*MockAppSender)(nil).SendAppGossip), appGossipBytes) -} - -// SendAppGossipSpecific mocks base method. -func (m *MockAppSender) SendAppGossipSpecific(nodeIDs ids.NodeIDSet, appGossipBytes []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendAppGossipSpecific", nodeIDs, appGossipBytes) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendAppGossipSpecific indicates an expected call of SendAppGossipSpecific. -func (mr *MockAppSenderMockRecorder) SendAppGossipSpecific(nodeIDs, appGossipBytes interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppGossipSpecific", reflect.TypeOf((*MockAppSender)(nil).SendAppGossipSpecific), nodeIDs, appGossipBytes) -} - -// SendAppRequest mocks base method. -func (m *MockAppSender) SendAppRequest(nodeIDs ids.NodeIDSet, requestID uint32, appRequestBytes []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendAppRequest", nodeIDs, requestID, appRequestBytes) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendAppRequest indicates an expected call of SendAppRequest. -func (mr *MockAppSenderMockRecorder) SendAppRequest(nodeIDs, requestID, appRequestBytes interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppRequest", reflect.TypeOf((*MockAppSender)(nil).SendAppRequest), nodeIDs, requestID, appRequestBytes) -} - -// SendAppResponse mocks base method. -func (m *MockAppSender) SendAppResponse(nodeID ids.NodeID, requestID uint32, appResponseBytes []byte) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SendAppResponse", nodeID, requestID, appResponseBytes) - ret0, _ := ret[0].(error) - return ret0 -} - -// SendAppResponse indicates an expected call of SendAppResponse. -func (mr *MockAppSenderMockRecorder) SendAppResponse(nodeID, requestID, appResponseBytes interface{}) *gomock.Call { +func (mr *MockSenderMockRecorder) SendStateSummaryFrontier(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendAppResponse", reflect.TypeOf((*MockAppSender)(nil).SendAppResponse), nodeID, requestID, appResponseBytes) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendStateSummaryFrontier", reflect.TypeOf((*MockSender)(nil).SendStateSummaryFrontier), arg0, arg1, arg2, arg3) } diff --git a/avalanchego/snow/engine/common/no_ops_handlers.go b/avalanchego/snow/engine/common/no_ops_handlers.go index 501cac99..c2f2c211 100644 --- a/avalanchego/snow/engine/common/no_ops_handlers.go +++ b/avalanchego/snow/engine/common/no_ops_handlers.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common import ( + "context" "time" "go.uber.org/zap" @@ -15,15 +16,15 @@ import ( ) var ( - _ StateSummaryFrontierHandler = &noOpStateSummaryFrontierHandler{} - _ AcceptedStateSummaryHandler = &noOpAcceptedStateSummaryHandler{} - _ AcceptedFrontierHandler = &noOpAcceptedFrontierHandler{} - _ AcceptedHandler = &noOpAcceptedHandler{} - _ AncestorsHandler = &noOpAncestorsHandler{} - _ PutHandler = &noOpPutHandler{} - _ QueryHandler = &noOpQueryHandler{} - _ ChitsHandler = &noOpChitsHandler{} - _ AppHandler = &noOpAppHandler{} + _ StateSummaryFrontierHandler = (*noOpStateSummaryFrontierHandler)(nil) + _ AcceptedStateSummaryHandler = (*noOpAcceptedStateSummaryHandler)(nil) + _ AcceptedFrontierHandler = (*noOpAcceptedFrontierHandler)(nil) + _ AcceptedHandler = (*noOpAcceptedHandler)(nil) + _ AncestorsHandler = (*noOpAncestorsHandler)(nil) + _ PutHandler = (*noOpPutHandler)(nil) + _ QueryHandler = (*noOpQueryHandler)(nil) + _ ChitsHandler = (*noOpChitsHandler)(nil) + _ AppHandler = (*noOpAppHandler)(nil) ) type noOpStateSummaryFrontierHandler struct { @@ -34,20 +35,20 @@ func NewNoOpStateSummaryFrontierHandler(log logging.Logger) StateSummaryFrontier return &noOpStateSummaryFrontierHandler{log: log} } -func (nop *noOpStateSummaryFrontierHandler) StateSummaryFrontier(nodeID ids.NodeID, requestID uint32, _ []byte) error { +func (nop *noOpStateSummaryFrontierHandler) StateSummaryFrontier(_ context.Context, nodeID ids.NodeID, requestID uint32, _ []byte) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.StateSummaryFrontier), + zap.Stringer("messageOp", message.StateSummaryFrontierOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) return nil } -func (nop *noOpStateSummaryFrontierHandler) GetStateSummaryFrontierFailed(nodeID ids.NodeID, requestID uint32) error { +func (nop *noOpStateSummaryFrontierHandler) GetStateSummaryFrontierFailed(_ context.Context, nodeID ids.NodeID, requestID uint32) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.GetStateSummaryFrontierFailed), + zap.Stringer("messageOp", message.GetStateSummaryFrontierFailedOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) @@ -62,20 +63,20 @@ func NewNoOpAcceptedStateSummaryHandler(log logging.Logger) AcceptedStateSummary return &noOpAcceptedStateSummaryHandler{log: log} } -func (nop *noOpAcceptedStateSummaryHandler) AcceptedStateSummary(nodeID ids.NodeID, requestID uint32, _ []ids.ID) error { +func (nop *noOpAcceptedStateSummaryHandler) AcceptedStateSummary(_ context.Context, nodeID ids.NodeID, requestID uint32, _ []ids.ID) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.AcceptedStateSummary), + zap.Stringer("messageOp", message.AcceptedStateSummaryOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) return nil } -func (nop *noOpAcceptedStateSummaryHandler) GetAcceptedStateSummaryFailed(nodeID ids.NodeID, requestID uint32) error { +func (nop *noOpAcceptedStateSummaryHandler) GetAcceptedStateSummaryFailed(_ context.Context, nodeID ids.NodeID, requestID uint32) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.GetAcceptedStateSummaryFailed), + zap.Stringer("messageOp", message.GetAcceptedStateSummaryFailedOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) @@ -90,20 +91,20 @@ func NewNoOpAcceptedFrontierHandler(log logging.Logger) AcceptedFrontierHandler return &noOpAcceptedFrontierHandler{log: log} } -func (nop *noOpAcceptedFrontierHandler) AcceptedFrontier(nodeID ids.NodeID, requestID uint32, _ []ids.ID) error { +func (nop *noOpAcceptedFrontierHandler) AcceptedFrontier(_ context.Context, nodeID ids.NodeID, requestID uint32, _ []ids.ID) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.AcceptedFrontier), + zap.Stringer("messageOp", message.AcceptedFrontierOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) return nil } -func (nop *noOpAcceptedFrontierHandler) GetAcceptedFrontierFailed(nodeID ids.NodeID, requestID uint32) error { +func (nop *noOpAcceptedFrontierHandler) GetAcceptedFrontierFailed(_ context.Context, nodeID ids.NodeID, requestID uint32) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.GetAcceptedFrontierFailed), + zap.Stringer("messageOp", message.GetAcceptedFrontierFailedOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) @@ -118,20 +119,20 @@ func NewNoOpAcceptedHandler(log logging.Logger) AcceptedHandler { return &noOpAcceptedHandler{log: log} } -func (nop *noOpAcceptedHandler) Accepted(nodeID ids.NodeID, requestID uint32, _ []ids.ID) error { +func (nop *noOpAcceptedHandler) Accepted(_ context.Context, nodeID ids.NodeID, requestID uint32, _ []ids.ID) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.Accepted), + zap.Stringer("messageOp", message.AcceptedOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) return nil } -func (nop *noOpAcceptedHandler) GetAcceptedFailed(nodeID ids.NodeID, requestID uint32) error { +func (nop *noOpAcceptedHandler) GetAcceptedFailed(_ context.Context, nodeID ids.NodeID, requestID uint32) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.GetAcceptedFailed), + zap.Stringer("messageOp", message.GetAcceptedFailedOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) @@ -146,20 +147,20 @@ func NewNoOpAncestorsHandler(log logging.Logger) AncestorsHandler { return &noOpAncestorsHandler{log: log} } -func (nop *noOpAncestorsHandler) Ancestors(nodeID ids.NodeID, requestID uint32, _ [][]byte) error { +func (nop *noOpAncestorsHandler) Ancestors(_ context.Context, nodeID ids.NodeID, requestID uint32, _ [][]byte) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.Ancestors), + zap.Stringer("messageOp", message.AncestorsOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) return nil } -func (nop *noOpAncestorsHandler) GetAncestorsFailed(nodeID ids.NodeID, requestID uint32) error { +func (nop *noOpAncestorsHandler) GetAncestorsFailed(_ context.Context, nodeID ids.NodeID, requestID uint32) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.GetAncestorsFailed), + zap.Stringer("messageOp", message.GetAncestorsFailedOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) @@ -174,18 +175,18 @@ func NewNoOpPutHandler(log logging.Logger) PutHandler { return &noOpPutHandler{log: log} } -func (nop *noOpPutHandler) Put(nodeID ids.NodeID, requestID uint32, _ []byte) error { +func (nop *noOpPutHandler) Put(_ context.Context, nodeID ids.NodeID, requestID uint32, _ []byte) error { if requestID == constants.GossipMsgRequestID { nop.log.Verbo("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.Put), + zap.Stringer("messageOp", message.PutOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) } else { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.Put), + zap.Stringer("messageOp", message.PutOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) @@ -193,10 +194,10 @@ func (nop *noOpPutHandler) Put(nodeID ids.NodeID, requestID uint32, _ []byte) er return nil } -func (nop *noOpPutHandler) GetFailed(nodeID ids.NodeID, requestID uint32) error { +func (nop *noOpPutHandler) GetFailed(_ context.Context, nodeID ids.NodeID, requestID uint32) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.GetFailed), + zap.Stringer("messageOp", message.GetFailedOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) @@ -211,20 +212,20 @@ func NewNoOpQueryHandler(log logging.Logger) QueryHandler { return &noOpQueryHandler{log: log} } -func (nop *noOpQueryHandler) PullQuery(nodeID ids.NodeID, requestID uint32, _ ids.ID) error { +func (nop *noOpQueryHandler) PullQuery(_ context.Context, nodeID ids.NodeID, requestID uint32, _ ids.ID) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.PullQuery), + zap.Stringer("messageOp", message.PullQueryOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) return nil } -func (nop *noOpQueryHandler) PushQuery(nodeID ids.NodeID, requestID uint32, _ []byte) error { +func (nop *noOpQueryHandler) PushQuery(_ context.Context, nodeID ids.NodeID, requestID uint32, _ []byte) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.PushQuery), + zap.Stringer("messageOp", message.PushQueryOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) @@ -239,20 +240,20 @@ func NewNoOpChitsHandler(log logging.Logger) ChitsHandler { return &noOpChitsHandler{log: log} } -func (nop *noOpChitsHandler) Chits(nodeID ids.NodeID, requestID uint32, _ []ids.ID) error { +func (nop *noOpChitsHandler) Chits(_ context.Context, nodeID ids.NodeID, requestID uint32, _, _ []ids.ID) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.Chits), + zap.Stringer("messageOp", message.ChitsOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) return nil } -func (nop *noOpChitsHandler) QueryFailed(nodeID ids.NodeID, requestID uint32) error { +func (nop *noOpChitsHandler) QueryFailed(_ context.Context, nodeID ids.NodeID, requestID uint32) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.QueryFailed), + zap.Stringer("messageOp", message.QueryFailedOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) @@ -267,40 +268,70 @@ func NewNoOpAppHandler(log logging.Logger) AppHandler { return &noOpAppHandler{log: log} } -func (nop *noOpAppHandler) AppRequest(nodeID ids.NodeID, requestID uint32, deadline time.Time, _ []byte) error { +func (nop *noOpAppHandler) CrossChainAppRequest(_ context.Context, chainID ids.ID, requestID uint32, _ time.Time, _ []byte) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.AppRequest), + zap.Stringer("messageOp", message.CrossChainAppRequestOp), + zap.Stringer("chainID", chainID), + zap.Uint32("requestID", requestID), + ) + return nil +} + +func (nop *noOpAppHandler) CrossChainAppRequestFailed(_ context.Context, chainID ids.ID, requestID uint32) error { + nop.log.Debug("dropping request", + zap.String("reason", "unhandled by this gear"), + zap.Stringer("messageOp", message.CrossChainAppRequestFailedOp), + zap.Stringer("chainID", chainID), + zap.Uint32("requestID", requestID), + ) + return nil +} + +func (nop *noOpAppHandler) CrossChainAppResponse(_ context.Context, chainID ids.ID, requestID uint32, _ []byte) error { + nop.log.Debug("dropping request", + zap.String("reason", "unhandled by this gear"), + zap.Stringer("messageOp", message.CrossChainAppResponseOp), + zap.Stringer("chainID", chainID), + zap.Uint32("requestID", requestID), + ) + return nil +} + +func (nop *noOpAppHandler) AppRequest(_ context.Context, nodeID ids.NodeID, requestID uint32, _ time.Time, _ []byte) error { + nop.log.Debug("dropping request", + zap.String("reason", "unhandled by this gear"), + zap.Stringer("messageOp", message.AppRequestOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) return nil } -func (nop *noOpAppHandler) AppRequestFailed(nodeID ids.NodeID, requestID uint32) error { +func (nop *noOpAppHandler) AppRequestFailed(_ context.Context, nodeID ids.NodeID, requestID uint32) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.AppRequestFailed), + zap.Stringer("messageOp", message.AppRequestFailedOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) return nil } -func (nop *noOpAppHandler) AppResponse(nodeID ids.NodeID, requestID uint32, _ []byte) error { +func (nop *noOpAppHandler) AppResponse(_ context.Context, nodeID ids.NodeID, requestID uint32, _ []byte) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.AppResponse), + zap.Stringer("messageOp", message.AppResponseOp), zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) return nil } -func (nop *noOpAppHandler) AppGossip(nodeID ids.NodeID, _ []byte) error { +func (nop *noOpAppHandler) AppGossip(_ context.Context, nodeID ids.NodeID, _ []byte) error { nop.log.Debug("dropping request", zap.String("reason", "unhandled by this gear"), - zap.Stringer("messageOp", message.AppGossip), + zap.Stringer("messageOp", message.AppGossipOp), zap.Stringer("nodeID", nodeID), ) return nil diff --git a/avalanchego/snow/engine/common/queue/job.go b/avalanchego/snow/engine/common/queue/job.go index 5f8f61d3..4ac5a60f 100644 --- a/avalanchego/snow/engine/common/queue/job.go +++ b/avalanchego/snow/engine/common/queue/job.go @@ -1,18 +1,21 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package queue import ( + "context" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" ) // Job defines the interface required to be placed on the job queue. type Job interface { ID() ids.ID - MissingDependencies() (ids.Set, error) + MissingDependencies(context.Context) (set.Set[ids.ID], error) // Returns true if this job has at least 1 missing dependency - HasMissingDependencies() (bool, error) - Execute() error + HasMissingDependencies(context.Context) (bool, error) + Execute(context.Context) error Bytes() []byte } diff --git a/avalanchego/snow/engine/common/queue/jobs.go b/avalanchego/snow/engine/common/queue/jobs.go index 621911d2..728edcc9 100644 --- a/avalanchego/snow/engine/common/queue/jobs.go +++ b/avalanchego/snow/engine/common/queue/jobs.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package queue import ( + "context" "fmt" "time" @@ -16,15 +17,12 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/utils/wrappers" ) -const ( - // StatusUpdateFrequency is how many containers should be processed between - // logs - StatusUpdateFrequency = 2500 -) +const progressUpdateFrequency = 30 * time.Second // Jobs tracks a series of jobs that form a DAG of dependencies. type Jobs struct { @@ -62,16 +60,23 @@ func New( } // SetParser tells this job queue how to parse jobs from the database. -func (j *Jobs) SetParser(parser Parser) error { j.state.parser = parser; return nil } +func (j *Jobs) SetParser(parser Parser) error { + j.state.parser = parser + return nil +} -func (j *Jobs) Has(jobID ids.ID) (bool, error) { return j.state.HasJob(jobID) } +func (j *Jobs) Has(jobID ids.ID) (bool, error) { + return j.state.HasJob(jobID) +} // Returns how many pending jobs are waiting in the queue. -func (j *Jobs) PendingJobs() uint64 { return j.state.numJobs } +func (j *Jobs) PendingJobs() uint64 { + return j.state.numJobs +} // Push adds a new job to the queue. Returns true if [job] was added to the queue and false // if [job] was already in the queue. -func (j *Jobs) Push(job Job) (bool, error) { +func (j *Jobs) Push(ctx context.Context, job Job) (bool, error) { jobID := job.ID() if has, err := j.state.HasJob(jobID); err != nil { return false, fmt.Errorf("failed to check for existing job %s due to %w", jobID, err) @@ -79,7 +84,7 @@ func (j *Jobs) Push(job Job) (bool, error) { return false, nil } - deps, err := job.MissingDependencies() + deps, err := job.MissingDependencies(ctx) if err != nil { return false, err } @@ -105,13 +110,20 @@ func (j *Jobs) Push(job Job) (bool, error) { return true, nil } -func (j *Jobs) ExecuteAll(ctx *snow.ConsensusContext, halter common.Haltable, restarted bool, acceptors ...snow.Acceptor) (int, error) { - ctx.Executing(true) - defer ctx.Executing(false) +func (j *Jobs) ExecuteAll( + ctx context.Context, + chainCtx *snow.ConsensusContext, + halter common.Haltable, + restarted bool, + acceptors ...snow.Acceptor, +) (int, error) { + chainCtx.Executing.Set(true) + defer chainCtx.Executing.Set(false) numExecuted := 0 numToExecute := j.state.numJobs startTime := time.Now() + lastProgressUpdate := startTime // Disable and clear state caches to prevent us from attempting to execute // a vertex that was previously parsed, but not saved to the VM. Some VMs @@ -123,13 +135,13 @@ func (j *Jobs) ExecuteAll(ctx *snow.ConsensusContext, halter common.Haltable, re j.state.DisableCaching() for { if halter.Halted() { - ctx.Log.Info("interrupted execution", + chainCtx.Log.Info("interrupted execution", zap.Int("numExecuted", numExecuted), ) return numExecuted, nil } - job, err := j.state.RemoveRunnableJob() + job, err := j.state.RemoveRunnableJob(ctx) if err == database.ErrNotFound { break } @@ -138,18 +150,18 @@ func (j *Jobs) ExecuteAll(ctx *snow.ConsensusContext, halter common.Haltable, re } jobID := job.ID() - ctx.Log.Debug("executing", + chainCtx.Log.Debug("executing", zap.Stringer("jobID", jobID), ) jobBytes := job.Bytes() // Note that acceptor.Accept must be called before executing [job] to // honor Acceptor.Accept's invariant. for _, acceptor := range acceptors { - if err := acceptor.Accept(ctx, jobID, jobBytes); err != nil { + if err := acceptor.Accept(chainCtx, jobID, jobBytes); err != nil { return numExecuted, err } } - if err := job.Execute(); err != nil { + if err := job.Execute(ctx); err != nil { return 0, fmt.Errorf("failed to execute job %s due to %w", jobID, err) } @@ -159,11 +171,11 @@ func (j *Jobs) ExecuteAll(ctx *snow.ConsensusContext, halter common.Haltable, re } for _, dependentID := range dependentIDs { - job, err := j.state.GetJob(dependentID) + job, err := j.state.GetJob(ctx, dependentID) if err != nil { return 0, fmt.Errorf("failed to get job %s from blocking jobs due to %w", dependentID, err) } - hasMissingDeps, err := job.HasMissingDependencies() + hasMissingDeps, err := job.HasMissingDependencies(ctx) if err != nil { return 0, fmt.Errorf("failed to get missing dependencies for %s due to %w", dependentID, err) } @@ -179,7 +191,7 @@ func (j *Jobs) ExecuteAll(ctx *snow.ConsensusContext, halter common.Haltable, re } numExecuted++ - if numExecuted%StatusUpdateFrequency == 0 { // Periodically print progress + if time.Since(lastProgressUpdate) > progressUpdateFrequency { // Periodically print progress eta := timer.EstimateETA( startTime, uint64(numExecuted), @@ -188,18 +200,20 @@ func (j *Jobs) ExecuteAll(ctx *snow.ConsensusContext, halter common.Haltable, re j.etaMetric.Set(float64(eta)) if !restarted { - ctx.Log.Info("executing operations", + chainCtx.Log.Info("executing operations", zap.Int("numExecuted", numExecuted), zap.Uint64("numToExecute", numToExecute), zap.Duration("eta", eta), ) } else { - ctx.Log.Debug("executing operations", + chainCtx.Log.Debug("executing operations", zap.Int("numExecuted", numExecuted), zap.Uint64("numToExecute", numToExecute), zap.Duration("eta", eta), ) } + + lastProgressUpdate = time.Now() } } @@ -207,11 +221,11 @@ func (j *Jobs) ExecuteAll(ctx *snow.ConsensusContext, halter common.Haltable, re j.etaMetric.Set(0) if !restarted { - ctx.Log.Info("executed operations", + chainCtx.Log.Info("executed operations", zap.Int("numExecuted", numExecuted), ) } else { - ctx.Log.Debug("executed operations", + chainCtx.Log.Debug("executed operations", zap.Int("numExecuted", numExecuted), ) } @@ -232,8 +246,8 @@ type JobsWithMissing struct { // keep the missing ID set in memory to avoid unnecessary database reads and // writes. - missingIDs ids.Set - removeFromMissingIDs, addToMissingIDs ids.Set + missingIDs set.Set[ids.ID] + removeFromMissingIDs, addToMissingIDs set.Set[ids.ID] } func NewWithMissing( @@ -256,9 +270,9 @@ func NewWithMissing( } // SetParser tells this job queue how to parse jobs from the database. -func (jm *JobsWithMissing) SetParser(parser Parser) error { +func (jm *JobsWithMissing) SetParser(ctx context.Context, parser Parser) error { jm.state.parser = parser - return jm.cleanRunnableStack() + return jm.cleanRunnableStack(ctx) } func (jm *JobsWithMissing) Clear() error { @@ -283,7 +297,7 @@ func (jm *JobsWithMissing) Has(jobID ids.ID) (bool, error) { // Push adds a new job to the queue. Returns true if [job] was added to the queue and false // if [job] was already in the queue. -func (jm *JobsWithMissing) Push(job Job) (bool, error) { +func (jm *JobsWithMissing) Push(ctx context.Context, job Job) (bool, error) { jobID := job.ID() if has, err := jm.Has(jobID); err != nil { return false, fmt.Errorf("failed to check for existing job %s due to %w", jobID, err) @@ -291,7 +305,7 @@ func (jm *JobsWithMissing) Push(job Job) (bool, error) { return false, nil } - deps, err := job.MissingDependencies() + deps, err := job.MissingDependencies(ctx) if err != nil { return false, err } @@ -339,9 +353,13 @@ func (jm *JobsWithMissing) RemoveMissingID(jobIDs ...ids.ID) { } } -func (jm *JobsWithMissing) MissingIDs() []ids.ID { return jm.missingIDs.List() } +func (jm *JobsWithMissing) MissingIDs() []ids.ID { + return jm.missingIDs.List() +} -func (jm *JobsWithMissing) NumMissingIDs() int { return jm.missingIDs.Len() } +func (jm *JobsWithMissing) NumMissingIDs() int { + return jm.missingIDs.Len() +} // Commit the versionDB to the underlying database. func (jm *JobsWithMissing) Commit() error { @@ -370,7 +388,7 @@ func (jm *JobsWithMissing) Commit() error { // without writing the state transition to the VM's database. When the node restarts, the // VM will not have marked the first block (the proposal block as accepted), but it could // have already been removed from the jobs queue. cleanRunnableStack handles this case. -func (jm *JobsWithMissing) cleanRunnableStack() error { +func (jm *JobsWithMissing) cleanRunnableStack(ctx context.Context) error { runnableJobsIter := jm.state.runnableJobIDs.NewIterator() defer runnableJobsIter.Release() @@ -381,11 +399,11 @@ func (jm *JobsWithMissing) cleanRunnableStack() error { return fmt.Errorf("failed to convert jobID bytes into ID due to: %w", err) } - job, err := jm.state.GetJob(jobID) + job, err := jm.state.GetJob(ctx, jobID) if err != nil { return fmt.Errorf("failed to retrieve job on runnnable stack due to: %w", err) } - deps, err := job.MissingDependencies() + deps, err := job.MissingDependencies(ctx) if err != nil { return fmt.Errorf("failed to retrieve missing dependencies of job on runnable stack due to: %w", err) } diff --git a/avalanchego/snow/engine/common/queue/jobs_test.go b/avalanchego/snow/engine/common/queue/jobs_test.go index 0ef9f295..296266ee 100644 --- a/avalanchego/snow/engine/common/queue/jobs_test.go +++ b/avalanchego/snow/engine/common/queue/jobs_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package queue import ( "bytes" + "context" "testing" "github.com/prometheus/client_golang/prometheus" @@ -16,6 +17,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/set" ) // Magic value that comes from the size in bytes of a serialized key-value bootstrap checkpoint in a database + @@ -24,27 +26,31 @@ const bootstrapProgressCheckpointSize = 55 func testJob(t *testing.T, jobID ids.ID, executed *bool, parentID ids.ID, parentExecuted *bool) *TestJob { return &TestJob{ - T: t, - IDF: func() ids.ID { return jobID }, - MissingDependenciesF: func() (ids.Set, error) { + T: t, + IDF: func() ids.ID { + return jobID + }, + MissingDependenciesF: func(context.Context) (set.Set[ids.ID], error) { if parentID != ids.Empty && !*parentExecuted { - return ids.Set{parentID: struct{}{}}, nil + return set.Set[ids.ID]{parentID: struct{}{}}, nil } - return ids.Set{}, nil + return set.Set[ids.ID]{}, nil }, - HasMissingDependenciesF: func() (bool, error) { + HasMissingDependenciesF: func(context.Context) (bool, error) { if parentID != ids.Empty && !*parentExecuted { return true, nil } return false, nil }, - ExecuteF: func() error { + ExecuteF: func(context.Context) error { if executed != nil { *executed = true } return nil }, - BytesF: func() []byte { return []byte{0} }, + BytesF: func() []byte { + return []byte{0} + }, } } @@ -90,7 +96,7 @@ func TestPushAndExecute(t *testing.T) { require.NoError(err) require.False(has) - pushed, err := jobs.Push(job) + pushed, err := jobs.Push(context.Background(), job) require.True(pushed) require.NoError(err) @@ -115,12 +121,12 @@ func TestPushAndExecute(t *testing.T) { require.NoError(err) require.True(hasNext) - parser.ParseF = func(b []byte) (Job, error) { + parser.ParseF = func(_ context.Context, b []byte) (Job, error) { require.Equal([]byte{0}, b) return job, nil } - count, err := jobs.ExecuteAll(snow.DefaultConsensusContextTest(), &common.Halter{}, false) + count, err := jobs.ExecuteAll(context.Background(), snow.DefaultConsensusContextTest(), &common.Halter{}, false) require.NoError(err) require.Equal(1, count) @@ -158,9 +164,11 @@ func TestRemoveDependency(t *testing.T) { job0 := testJob(t, job0ID, &executed0, ids.Empty, nil) job1 := testJob(t, job1ID, &executed1, job0ID, &executed0) - job1.BytesF = func() []byte { return []byte{1} } + job1.BytesF = func() []byte { + return []byte{1} + } - pushed, err := jobs.Push(job1) + pushed, err := jobs.Push(context.Background(), job1) require.True(pushed) require.NoError(err) @@ -168,7 +176,7 @@ func TestRemoveDependency(t *testing.T) { require.NoError(err) require.False(hasNext) - pushed, err = jobs.Push(job0) + pushed, err = jobs.Push(context.Background(), job0) require.True(pushed) require.NoError(err) @@ -176,7 +184,7 @@ func TestRemoveDependency(t *testing.T) { require.NoError(err) require.True(hasNext) - parser.ParseF = func(b []byte) (Job, error) { + parser.ParseF = func(_ context.Context, b []byte) (Job, error) { switch { case bytes.Equal(b, []byte{0}): return job0, nil @@ -188,7 +196,7 @@ func TestRemoveDependency(t *testing.T) { } } - count, err := jobs.ExecuteAll(snow.DefaultConsensusContextTest(), &common.Halter{}, false) + count, err := jobs.ExecuteAll(context.Background(), snow.DefaultConsensusContextTest(), &common.Halter{}, false) require.NoError(err) require.Equal(2, count) require.True(executed0) @@ -217,11 +225,11 @@ func TestDuplicatedExecutablePush(t *testing.T) { jobID := ids.GenerateTestID() job := testJob(t, jobID, nil, ids.Empty, nil) - pushed, err := jobs.Push(job) + pushed, err := jobs.Push(context.Background(), job) require.True(pushed) require.NoError(err) - pushed, err = jobs.Push(job) + pushed, err = jobs.Push(context.Background(), job) require.False(pushed) require.NoError(err) @@ -231,7 +239,7 @@ func TestDuplicatedExecutablePush(t *testing.T) { jobs, err = New(db, "", prometheus.NewRegistry()) require.NoError(err) - pushed, err = jobs.Push(job) + pushed, err = jobs.Push(context.Background(), job) require.False(pushed) require.NoError(err) } @@ -251,11 +259,11 @@ func TestDuplicatedNotExecutablePush(t *testing.T) { job1ID := ids.GenerateTestID() job1 := testJob(t, job1ID, nil, job0ID, &executed0) - pushed, err := jobs.Push(job1) + pushed, err := jobs.Push(context.Background(), job1) require.True(pushed) require.NoError(err) - pushed, err = jobs.Push(job1) + pushed, err = jobs.Push(context.Background(), job1) require.False(pushed) require.NoError(err) @@ -265,7 +273,7 @@ func TestDuplicatedNotExecutablePush(t *testing.T) { jobs, err = New(db, "", prometheus.NewRegistry()) require.NoError(err) - pushed, err = jobs.Push(job1) + pushed, err = jobs.Push(context.Background(), job1) require.False(pushed) require.NoError(err) } @@ -278,7 +286,7 @@ func TestMissingJobs(t *testing.T) { jobs, err := NewWithMissing(db, "", prometheus.NewRegistry()) require.NoError(err) - if err := jobs.SetParser(parser); err != nil { + if err := jobs.SetParser(context.Background(), parser); err != nil { t.Fatal(err) } @@ -294,7 +302,7 @@ func TestMissingJobs(t *testing.T) { numMissingIDs := jobs.NumMissingIDs() require.Equal(2, numMissingIDs) - missingIDSet := ids.Set{} + missingIDSet := set.Set[ids.ID]{} missingIDSet.Add(jobs.MissingIDs()...) containsJob0ID := missingIDSet.Contains(job0ID) @@ -310,11 +318,11 @@ func TestMissingJobs(t *testing.T) { jobs, err = NewWithMissing(db, "", prometheus.NewRegistry()) require.NoError(err) - if err := jobs.SetParser(parser); err != nil { + if err := jobs.SetParser(context.Background(), parser); err != nil { t.Fatal(err) } - missingIDSet = ids.Set{} + missingIDSet = set.Set[ids.ID]{} missingIDSet.Add(jobs.MissingIDs()...) containsJob0ID = missingIDSet.Contains(job0ID) @@ -334,7 +342,7 @@ func TestHandleJobWithMissingDependencyOnRunnableStack(t *testing.T) { if err != nil { t.Fatal(err) } - if err := jobs.SetParser(parser); err != nil { + if err := jobs.SetParser(context.Background(), parser); err != nil { t.Fatal(err) } @@ -342,10 +350,16 @@ func TestHandleJobWithMissingDependencyOnRunnableStack(t *testing.T) { job1ID, executed1 := ids.GenerateTestID(), false job0 := testJob(t, job0ID, &executed0, ids.Empty, nil) job1 := testJob(t, job1ID, &executed1, job0ID, &executed0) - job1.ExecuteF = func() error { return database.ErrClosed } // job1 fails to execute the first time due to a closed database - job1.BytesF = func() []byte { return []byte{1} } - pushed, err := jobs.Push(job1) + // job1 fails to execute the first time due to a closed database + job1.ExecuteF = func(context.Context) error { + return database.ErrClosed + } + job1.BytesF = func() []byte { + return []byte{1} + } + + pushed, err := jobs.Push(context.Background(), job1) require.True(pushed) require.NoError(err) @@ -353,7 +367,7 @@ func TestHandleJobWithMissingDependencyOnRunnableStack(t *testing.T) { require.NoError(err) require.False(hasNext) - pushed, err = jobs.Push(job0) + pushed, err = jobs.Push(context.Background(), job0) require.True(pushed) require.NoError(err) @@ -361,7 +375,7 @@ func TestHandleJobWithMissingDependencyOnRunnableStack(t *testing.T) { require.NoError(err) require.True(hasNext) - parser.ParseF = func(b []byte) (Job, error) { + parser.ParseF = func(_ context.Context, b []byte) (Job, error) { switch { case bytes.Equal(b, []byte{0}): return job0, nil @@ -373,7 +387,7 @@ func TestHandleJobWithMissingDependencyOnRunnableStack(t *testing.T) { } } - _, err = jobs.ExecuteAll(snow.DefaultConsensusContextTest(), &common.Halter{}, false) + _, err = jobs.ExecuteAll(context.Background(), snow.DefaultConsensusContextTest(), &common.Halter{}, false) // Assert that the database closed error on job1 causes ExecuteAll // to fail in the middle of execution. require.Error(err) @@ -381,7 +395,10 @@ func TestHandleJobWithMissingDependencyOnRunnableStack(t *testing.T) { require.False(executed1) executed0 = false - job1.ExecuteF = func() error { executed1 = true; return nil } // job1 succeeds the second time + job1.ExecuteF = func(context.Context) error { + executed1 = true // job1 succeeds the second time + return nil + } // Create jobs queue from the same database and ensure that the jobs queue // recovers correctly. @@ -389,7 +406,7 @@ func TestHandleJobWithMissingDependencyOnRunnableStack(t *testing.T) { if err != nil { t.Fatal(err) } - if err := jobs.SetParser(parser); err != nil { + if err := jobs.SetParser(context.Background(), parser); err != nil { t.Fatal(err) } @@ -398,7 +415,7 @@ func TestHandleJobWithMissingDependencyOnRunnableStack(t *testing.T) { require.Equal(missingIDs[0], job0.ID()) - pushed, err = jobs.Push(job0) + pushed, err = jobs.Push(context.Background(), job0) require.NoError(err) require.True(pushed) @@ -406,7 +423,7 @@ func TestHandleJobWithMissingDependencyOnRunnableStack(t *testing.T) { require.NoError(err) require.True(hasNext) - count, err := jobs.ExecuteAll(snow.DefaultConsensusContextTest(), &common.Halter{}, false) + count, err := jobs.ExecuteAll(context.Background(), snow.DefaultConsensusContextTest(), &common.Halter{}, false) require.NoError(err) require.Equal(2, count) require.True(executed1) @@ -422,7 +439,7 @@ func TestInitializeNumJobs(t *testing.T) { if err != nil { t.Fatal(err) } - if err := jobs.SetParser(parser); err != nil { + if err := jobs.SetParser(context.Background(), parser); err != nil { t.Fatal(err) } @@ -432,26 +449,42 @@ func TestInitializeNumJobs(t *testing.T) { job0 := &TestJob{ T: t, - IDF: func() ids.ID { return job0ID }, - MissingDependenciesF: func() (ids.Set, error) { return nil, nil }, - HasMissingDependenciesF: func() (bool, error) { return false, nil }, - BytesF: func() []byte { return []byte{0} }, + IDF: func() ids.ID { + return job0ID + }, + MissingDependenciesF: func(context.Context) (set.Set[ids.ID], error) { + return nil, nil + }, + HasMissingDependenciesF: func(context.Context) (bool, error) { + return false, nil + }, + BytesF: func() []byte { + return []byte{0} + }, } job1 := &TestJob{ T: t, - IDF: func() ids.ID { return job1ID }, - MissingDependenciesF: func() (ids.Set, error) { return nil, nil }, - HasMissingDependenciesF: func() (bool, error) { return false, nil }, - BytesF: func() []byte { return []byte{1} }, + IDF: func() ids.ID { + return job1ID + }, + MissingDependenciesF: func(context.Context) (set.Set[ids.ID], error) { + return nil, nil + }, + HasMissingDependenciesF: func(context.Context) (bool, error) { + return false, nil + }, + BytesF: func() []byte { + return []byte{1} + }, } - pushed, err := jobs.Push(job0) + pushed, err := jobs.Push(context.Background(), job0) require.True(pushed) require.NoError(err) require.EqualValues(1, jobs.state.numJobs) - pushed, err = jobs.Push(job1) + pushed, err = jobs.Push(context.Background(), job1) require.True(pushed) require.NoError(err) require.EqualValues(2, jobs.state.numJobs) @@ -482,24 +515,26 @@ func TestClearAll(t *testing.T) { if err != nil { t.Fatal(err) } - if err := jobs.SetParser(parser); err != nil { + if err := jobs.SetParser(context.Background(), parser); err != nil { t.Fatal(err) } job0ID, executed0 := ids.GenerateTestID(), false job1ID, executed1 := ids.GenerateTestID(), false job0 := testJob(t, job0ID, &executed0, ids.Empty, nil) job1 := testJob(t, job1ID, &executed1, job0ID, &executed0) - job1.BytesF = func() []byte { return []byte{1} } + job1.BytesF = func() []byte { + return []byte{1} + } - pushed, err := jobs.Push(job0) + pushed, err := jobs.Push(context.Background(), job0) require.NoError(err) require.True(pushed) - pushed, err = jobs.Push(job1) + pushed, err = jobs.Push(context.Background(), job1) require.True(pushed) require.NoError(err) - parser.ParseF = func(b []byte) (Job, error) { + parser.ParseF = func(_ context.Context, b []byte) (Job, error) { switch { case bytes.Equal(b, []byte{0}): return job0, nil diff --git a/avalanchego/snow/engine/common/queue/parser.go b/avalanchego/snow/engine/common/queue/parser.go index b808b2f3..ee8f3980 100644 --- a/avalanchego/snow/engine/common/queue/parser.go +++ b/avalanchego/snow/engine/common/queue/parser.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package queue +import "context" + // Parser allows parsing a job from bytes. type Parser interface { - Parse([]byte) (Job, error) + Parse(context.Context, []byte) (Job, error) } diff --git a/avalanchego/snow/engine/common/queue/state.go b/avalanchego/snow/engine/common/queue/state.go index 01e22896..5e5ccb23 100644 --- a/avalanchego/snow/engine/common/queue/state.go +++ b/avalanchego/snow/engine/common/queue/state.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package queue import ( + "context" "fmt" "github.com/prometheus/client_golang/prometheus" @@ -14,6 +15,7 @@ import ( "github.com/ava-labs/avalanchego/database/linkeddb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" ) @@ -35,7 +37,7 @@ type state struct { parser Parser runnableJobIDs linkeddb.LinkedDB cachingEnabled bool - jobsCache cache.Cacher + jobsCache cache.Cacher[ids.ID, Job] jobsDB database.Database // Should be prefixed with the jobID that we are attempting to find the // dependencies of. This prefixdb.Database should then be wrapped in a @@ -43,7 +45,7 @@ type state struct { dependenciesDB database.Database // This is a cache that tracks LinkedDB iterators that have recently been // made. - dependentsCache cache.Cacher + dependentsCache cache.Cacher[ids.ID, linkeddb.LinkedDB] missingJobIDs linkeddb.LinkedDB // This tracks the summary values of this state. Currently, this only // contains the last known checkpoint of how many jobs are currently in the @@ -60,7 +62,13 @@ func newState( metricsRegisterer prometheus.Registerer, ) (*state, error) { jobsCacheMetricsNamespace := fmt.Sprintf("%s_jobs_cache", metricsNamespace) - jobsCache, err := metercacher.New(jobsCacheMetricsNamespace, metricsRegisterer, &cache.LRU{Size: jobsCacheSize}) + jobsCache, err := metercacher.New[ids.ID, Job]( + jobsCacheMetricsNamespace, + metricsRegisterer, + &cache.LRU[ids.ID, Job]{ + Size: jobsCacheSize, + }, + ) if err != nil { return nil, fmt.Errorf("couldn't create metered cache: %w", err) } @@ -77,7 +85,7 @@ func newState( jobsCache: jobsCache, jobsDB: jobs, dependenciesDB: prefixdb.New(dependenciesPrefix, db), - dependentsCache: &cache.LRU{Size: dependentsCacheSize}, + dependentsCache: &cache.LRU[ids.ID, linkeddb.LinkedDB]{Size: dependentsCacheSize}, missingJobIDs: linkeddb.NewDefault(prefixdb.New(missingJobIDsPrefix, db)), metadataDB: metadataDB, numJobs: numJobs, @@ -166,7 +174,7 @@ func (s *state) HasRunnableJob() (bool, error) { } // RemoveRunnableJob fetches and deletes the next job from the runnable queue -func (s *state) RemoveRunnableJob() (Job, error) { +func (s *state) RemoveRunnableJob(ctx context.Context) (Job, error) { jobIDBytes, err := s.runnableJobIDs.HeadKey() if err != nil { return nil, err @@ -179,7 +187,7 @@ func (s *state) RemoveRunnableJob() (Job, error) { if err != nil { return nil, fmt.Errorf("couldn't convert job ID bytes to job ID: %w", err) } - job, err := s.GetJob(jobID) + job, err := s.GetJob(ctx, jobID) if err != nil { return nil, err } @@ -223,17 +231,17 @@ func (s *state) HasJob(id ids.ID) (bool, error) { } // GetJob returns the job [id] -func (s *state) GetJob(id ids.ID) (Job, error) { +func (s *state) GetJob(ctx context.Context, id ids.ID) (Job, error) { if s.cachingEnabled { if job, exists := s.jobsCache.Get(id); exists { - return job.(Job), nil + return job, nil } } jobBytes, err := s.jobsDB.Get(id[:]) if err != nil { return nil, err } - job, err := s.parser.Parse(jobBytes) + job, err := s.parser.Parse(ctx, jobBytes) if err == nil && s.cachingEnabled { s.jobsCache.Put(id, job) } @@ -274,7 +282,7 @@ func (s *state) DisableCaching() { s.cachingEnabled = false } -func (s *state) AddMissingJobIDs(missingIDs ids.Set) error { +func (s *state) AddMissingJobIDs(missingIDs set.Set[ids.ID]) error { for missingID := range missingIDs { missingID := missingID if err := s.missingJobIDs.Put(missingID[:], nil); err != nil { @@ -284,7 +292,7 @@ func (s *state) AddMissingJobIDs(missingIDs ids.Set) error { return nil } -func (s *state) RemoveMissingJobIDs(missingIDs ids.Set) error { +func (s *state) RemoveMissingJobIDs(missingIDs set.Set[ids.ID]) error { for missingID := range missingIDs { missingID := missingID if err := s.missingJobIDs.Delete(missingID[:]); err != nil { @@ -311,8 +319,8 @@ func (s *state) MissingJobIDs() ([]ids.ID, error) { func (s *state) getDependentsDB(dependency ids.ID) linkeddb.LinkedDB { if s.cachingEnabled { - if dependentsDBIntf, ok := s.dependentsCache.Get(dependency); ok { - return dependentsDBIntf.(linkeddb.LinkedDB) + if dependentsDB, ok := s.dependentsCache.Get(dependency); ok { + return dependentsDB } } dependencyDB := prefixdb.New(dependency[:], s.dependenciesDB) diff --git a/avalanchego/snow/engine/common/queue/test_job.go b/avalanchego/snow/engine/common/queue/test_job.go index 83abd2ec..09e51855 100644 --- a/avalanchego/snow/engine/common/queue/test_job.go +++ b/avalanchego/snow/engine/common/queue/test_job.go @@ -1,13 +1,15 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package queue import ( + "context" "errors" "testing" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" ) var ( @@ -26,10 +28,10 @@ type TestJob struct { CantHasMissingDependencies bool IDF func() ids.ID - MissingDependenciesF func() (ids.Set, error) - ExecuteF func() error + MissingDependenciesF func(context.Context) (set.Set[ids.ID], error) + ExecuteF func(context.Context) error BytesF func() []byte - HasMissingDependenciesF func() (bool, error) + HasMissingDependenciesF func(context.Context) (bool, error) } func (j *TestJob) Default(cant bool) { @@ -50,19 +52,19 @@ func (j *TestJob) ID() ids.ID { return ids.ID{} } -func (j *TestJob) MissingDependencies() (ids.Set, error) { +func (j *TestJob) MissingDependencies(ctx context.Context) (set.Set[ids.ID], error) { if j.MissingDependenciesF != nil { - return j.MissingDependenciesF() + return j.MissingDependenciesF(ctx) } if j.CantMissingDependencies && j.T != nil { j.T.Fatalf("Unexpectedly called MissingDependencies") } - return ids.Set{}, nil + return set.Set[ids.ID]{}, nil } -func (j *TestJob) Execute() error { +func (j *TestJob) Execute(ctx context.Context) error { if j.ExecuteF != nil { - return j.ExecuteF() + return j.ExecuteF(ctx) } if j.CantExecute && j.T != nil { j.T.Fatal(errExecute) @@ -80,9 +82,9 @@ func (j *TestJob) Bytes() []byte { return nil } -func (j *TestJob) HasMissingDependencies() (bool, error) { +func (j *TestJob) HasMissingDependencies(ctx context.Context) (bool, error) { if j.HasMissingDependenciesF != nil { - return j.HasMissingDependenciesF() + return j.HasMissingDependenciesF(ctx) } if j.CantHasMissingDependencies && j.T != nil { j.T.Fatal(errHasMissingDependencies) diff --git a/avalanchego/snow/engine/common/queue/test_parser.go b/avalanchego/snow/engine/common/queue/test_parser.go index b28a288a..1e7fa9cd 100644 --- a/avalanchego/snow/engine/common/queue/test_parser.go +++ b/avalanchego/snow/engine/common/queue/test_parser.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package queue import ( + "context" "errors" "testing" ) @@ -16,14 +17,16 @@ type TestParser struct { CantParse bool - ParseF func([]byte) (Job, error) + ParseF func(context.Context, []byte) (Job, error) } -func (p *TestParser) Default(cant bool) { p.CantParse = cant } +func (p *TestParser) Default(cant bool) { + p.CantParse = cant +} -func (p *TestParser) Parse(b []byte) (Job, error) { +func (p *TestParser) Parse(ctx context.Context, b []byte) (Job, error) { if p.ParseF != nil { - return p.ParseF(b) + return p.ParseF(ctx, b) } if p.CantParse && p.T != nil { p.T.Fatal(errParse) diff --git a/avalanchego/snow/engine/common/requests.go b/avalanchego/snow/engine/common/requests.go index dbf64c25..ce66585e 100644 --- a/avalanchego/snow/engine/common/requests.go +++ b/avalanchego/snow/engine/common/requests.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -86,7 +86,9 @@ func (r *Requests) RemoveAny(containerID ids.ID) bool { } // Len returns the total number of outstanding requests. -func (r *Requests) Len() int { return len(r.idToReq) } +func (r *Requests) Len() int { + return len(r.idToReq) +} // Contains returns true if there is an outstanding request for the container // ID. diff --git a/avalanchego/snow/engine/common/requests_test.go b/avalanchego/snow/engine/common/requests_test.go index 7984a7f8..4d779a64 100644 --- a/avalanchego/snow/engine/common/requests_test.go +++ b/avalanchego/snow/engine/common/requests_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common diff --git a/avalanchego/snow/engine/common/sender.go b/avalanchego/snow/engine/common/sender.go index 2ae5ce91..1c657daa 100644 --- a/avalanchego/snow/engine/common/sender.go +++ b/avalanchego/snow/engine/common/sender.go @@ -1,11 +1,14 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common import ( + "context" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/set" ) // Sender defines how a consensus engine sends messages and requests to other @@ -28,22 +31,22 @@ type Sender interface { type StateSummarySender interface { // SendGetStateSummaryFrontier requests that every node in [nodeIDs] sends a // StateSummaryFrontier message. - SendGetStateSummaryFrontier(nodeIDs ids.NodeIDSet, requestID uint32) + SendGetStateSummaryFrontier(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32) // SendStateSummaryFrontier responds to a StateSummaryFrontier message with this // engine's current state summary frontier. - SendStateSummaryFrontier(nodeID ids.NodeID, requestID uint32, summary []byte) + SendStateSummaryFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, summary []byte) } type AcceptedStateSummarySender interface { // SendGetAcceptedStateSummary requests that every node in [nodeIDs] sends an // AcceptedStateSummary message with all the state summary IDs referenced by [heights] // that the node thinks are accepted. - SendGetAcceptedStateSummary(nodeIDs ids.NodeIDSet, requestID uint32, heights []uint64) + SendGetAcceptedStateSummary(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, heights []uint64) // SendAcceptedStateSummary responds to a AcceptedStateSummary message with a // set of summary ids that are accepted. - SendAcceptedStateSummary(nodeID ids.NodeID, requestID uint32, summaryIDs []ids.ID) + SendAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs []ids.ID) } // FrontierSender defines how a consensus engine sends frontier messages to @@ -51,11 +54,12 @@ type AcceptedStateSummarySender interface { type FrontierSender interface { // SendGetAcceptedFrontier requests that every node in [nodeIDs] sends an // AcceptedFrontier message. - SendGetAcceptedFrontier(nodeIDs ids.NodeIDSet, requestID uint32) + SendGetAcceptedFrontier(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32) // SendAcceptedFrontier responds to a AcceptedFrontier message with this // engine's current accepted frontier. SendAcceptedFrontier( + ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID, @@ -69,14 +73,15 @@ type AcceptedSender interface { // message with all the IDs in [containerIDs] that the node thinks are // accepted. SendGetAccepted( - nodeIDs ids.NodeIDSet, + ctx context.Context, + nodeIDs set.Set[ids.NodeID], requestID uint32, containerIDs []ids.ID, ) // SendAccepted responds to a GetAccepted message with a set of IDs of // containers that are accepted. - SendAccepted(nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) + SendAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) } // FetchSender defines how a consensus engine sends retrieval messages to other @@ -84,18 +89,18 @@ type AcceptedSender interface { type FetchSender interface { // Request that the specified node send the specified container to this // node. - SendGet(nodeID ids.NodeID, requestID uint32, containerID ids.ID) + SendGet(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) // SendGetAncestors requests that node [nodeID] send container [containerID] // and its ancestors. - SendGetAncestors(nodeID ids.NodeID, requestID uint32, containerID ids.ID) + SendGetAncestors(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) // Tell the specified node about [container]. - SendPut(nodeID ids.NodeID, requestID uint32, container []byte) + SendPut(ctx context.Context, nodeID ids.NodeID, requestID uint32, container []byte) // Give the specified node several containers at once. Should be in response // to a GetAncestors message with request ID [requestID] from the node. - SendAncestors(nodeID ids.NodeID, requestID uint32, containers [][]byte) + SendAncestors(ctx context.Context, nodeID ids.NodeID, requestID uint32, containers [][]byte) } // QuerySender defines how a consensus engine sends query messages to other @@ -105,26 +110,25 @@ type QuerySender interface { // existence of the specified container. // This is the same as PullQuery, except that this message includes the body // of the container rather than its ID. - SendPushQuery(nodeIDs ids.NodeIDSet, requestID uint32, container []byte) + SendPushQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, container []byte) // Request from the specified nodes their preferred frontier, given the // existence of the specified container. - SendPullQuery(nodeIDs ids.NodeIDSet, requestID uint32, containerID ids.ID) + SendPullQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, containerID ids.ID) // Send chits to the specified node - SendChits(nodeID ids.NodeID, requestID uint32, votes []ids.ID) + SendChits(ctx context.Context, nodeID ids.NodeID, requestID uint32, votes []ids.ID, accepted []ids.ID) } // Gossiper defines how a consensus engine gossips a container on the accepted // frontier to other nodes type Gossiper interface { // Gossip the provided container throughout the network - SendGossip(container []byte) + SendGossip(ctx context.Context, container []byte) } -// AppSender sends application (VM) level messages. -// See also common.AppHandler. -type AppSender interface { +// NetworkAppSender sends VM-level messages to nodes in the network. +type NetworkAppSender interface { // Send an application-level request. // A nil return value guarantees that for each nodeID in [nodeIDs], // the VM corresponding to this AppSender eventually receives either: @@ -132,14 +136,44 @@ type AppSender interface { // * An AppRequestFailed from nodeID with ID [requestID] // Exactly one of the above messages will eventually be received per nodeID. // A non-nil error should be considered fatal. - SendAppRequest(nodeIDs ids.NodeIDSet, requestID uint32, appRequestBytes []byte) error + SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, appRequestBytes []byte) error // Send an application-level response to a request. // This response must be in response to an AppRequest that the VM corresponding // to this AppSender received from [nodeID] with ID [requestID]. // A non-nil error should be considered fatal. - SendAppResponse(nodeID ids.NodeID, requestID uint32, appResponseBytes []byte) error + SendAppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, appResponseBytes []byte) error // Gossip an application-level message. // A non-nil error should be considered fatal. - SendAppGossip(appGossipBytes []byte) error - SendAppGossipSpecific(nodeIDs ids.NodeIDSet, appGossipBytes []byte) error + SendAppGossip(ctx context.Context, appGossipBytes []byte) error + SendAppGossipSpecific(ctx context.Context, nodeIDs set.Set[ids.NodeID], appGossipBytes []byte) error +} + +// CrossChainAppSender sends local VM-level messages to another VM. +type CrossChainAppSender interface { + // SendCrossChainAppRequest sends an application-level request to a + // specific chain. + // + // A nil return value guarantees that the VM corresponding to this + // CrossChainAppSender eventually receives either: + // * A CrossChainAppResponse from [chainID] with ID [requestID] + // * A CrossChainAppRequestFailed from [chainID] with ID [requestID] + // Exactly one of the above messages will eventually be received from + // [chainID]. + // A non-nil error should be considered fatal. + SendCrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, appRequestBytes []byte) error + // SendCrossChainAppResponse sends an application-level response to a + // specific chain + // + // This response must be in response to a CrossChainAppRequest that the VM + // corresponding to this CrossChainAppSender received from [chainID] with ID + // [requestID]. + // A non-nil error should be considered fatal. + SendCrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, appResponseBytes []byte) error +} + +// AppSender sends application (VM) level messages. +// See also common.AppHandler. +type AppSender interface { + NetworkAppSender + CrossChainAppSender } diff --git a/avalanchego/snow/engine/common/state_syncer.go b/avalanchego/snow/engine/common/state_syncer.go index 47bd1234..e23ad126 100644 --- a/avalanchego/snow/engine/common/state_syncer.go +++ b/avalanchego/snow/engine/common/state_syncer.go @@ -1,8 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common +import "context" + // StateSyncer controls the selection and verification of state summaries // to drive VM state syncing. It collects the latest state summaries and elicit // votes on them, making sure that a qualified majority of nodes support the @@ -12,5 +14,5 @@ type StateSyncer interface { // IsEnabled returns true if the underlying VM wants to perform state sync. // Any returned error will be considered fatal. - IsEnabled() (bool, error) + IsEnabled(context.Context) (bool, error) } diff --git a/avalanchego/snow/engine/common/subnet.go b/avalanchego/snow/engine/common/subnet.go deleted file mode 100644 index 7f75ab53..00000000 --- a/avalanchego/snow/engine/common/subnet.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package common - -import ( - "github.com/ava-labs/avalanchego/ids" -) - -// Subnet describes the standard interface of a subnet description -type Subnet interface { - // Returns true iff the subnet is done bootstrapping - IsBootstrapped() bool - - // Bootstrapped marks the named chain as being bootstrapped - Bootstrapped(chainID ids.ID) -} diff --git a/avalanchego/snow/engine/common/subnet_tracker.go b/avalanchego/snow/engine/common/subnet_tracker.go deleted file mode 100644 index 6708feb1..00000000 --- a/avalanchego/snow/engine/common/subnet_tracker.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package common - -import ( - "github.com/ava-labs/avalanchego/ids" -) - -// SubnetTracker describes the interface for checking if a node is tracking a -// subnet, namely if a node has whitelisted a subnet. -type SubnetTracker interface { - // TracksSubnet returns true if [nodeID] tracks [subnetID] - TracksSubnet(nodeID ids.NodeID, subnetID ids.ID) bool -} diff --git a/avalanchego/snow/engine/common/test_subnet.go b/avalanchego/snow/engine/common/test_bootstrap_tracker.go similarity index 58% rename from avalanchego/snow/engine/common/test_subnet.go rename to avalanchego/snow/engine/common/test_bootstrap_tracker.go index 5d220811..5c5ec4d7 100644 --- a/avalanchego/snow/engine/common/test_subnet.go +++ b/avalanchego/snow/engine/common/test_bootstrap_tracker.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -9,26 +9,29 @@ import ( "github.com/ava-labs/avalanchego/ids" ) -// SubnetTest is a test subnet -type SubnetTest struct { +// BootstrapTrackerTest is a test subnet +type BootstrapTrackerTest struct { T *testing.T - CantIsBootstrapped, CantBootstrapped bool + CantIsBootstrapped, CantBootstrapped, CantOnBootstrapCompleted bool IsBootstrappedF func() bool BootstrappedF func(ids.ID) + + OnBootstrapCompletedF func() chan struct{} } // Default set the default callable value to [cant] -func (s *SubnetTest) Default(cant bool) { +func (s *BootstrapTrackerTest) Default(cant bool) { s.CantIsBootstrapped = cant s.CantBootstrapped = cant + s.CantOnBootstrapCompleted = cant } // IsBootstrapped calls IsBootstrappedF if it was initialized. If it wasn't // initialized and this function shouldn't be called and testing was // initialized, then testing will fail. Defaults to returning false. -func (s *SubnetTest) IsBootstrapped() bool { +func (s *BootstrapTrackerTest) IsBootstrapped() bool { if s.IsBootstrappedF != nil { return s.IsBootstrappedF() } @@ -41,10 +44,19 @@ func (s *SubnetTest) IsBootstrapped() bool { // Bootstrapped calls BootstrappedF if it was initialized. If it wasn't // initialized and this function shouldn't be called and testing was // initialized, then testing will fail. -func (s *SubnetTest) Bootstrapped(chainID ids.ID) { +func (s *BootstrapTrackerTest) Bootstrapped(chainID ids.ID) { if s.BootstrappedF != nil { s.BootstrappedF(chainID) } else if s.CantBootstrapped && s.T != nil { s.T.Fatalf("Unexpectedly called Bootstrapped") } } + +func (s *BootstrapTrackerTest) OnBootstrapCompleted() chan struct{} { + if s.OnBootstrapCompletedF != nil { + return s.OnBootstrapCompletedF() + } else if s.CantOnBootstrapCompleted && s.T != nil { + s.T.Fatalf("Unexpectedly called OnBootstrapCompleted") + } + return nil +} diff --git a/avalanchego/snow/engine/common/test_bootstrapable.go b/avalanchego/snow/engine/common/test_bootstrapable.go index 9be078eb..ddc67b48 100644 --- a/avalanchego/snow/engine/common/test_bootstrapable.go +++ b/avalanchego/snow/engine/common/test_bootstrapable.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common import ( + "context" "errors" "testing" @@ -11,7 +12,7 @@ import ( ) var ( - _ Bootstrapable = &BootstrapableTest{} + _ Bootstrapable = (*BootstrapableTest)(nil) errForceAccepted = errors.New("unexpectedly called ForceAccepted") errClear = errors.New("unexpectedly called Clear") @@ -24,7 +25,7 @@ type BootstrapableTest struct { CantForceAccepted, CantClear bool ClearF func() error - ForceAcceptedF func(acceptedContainerIDs []ids.ID) error + ForceAcceptedF func(ctx context.Context, acceptedContainerIDs []ids.ID) error } // Default sets the default on call handling @@ -44,9 +45,9 @@ func (b *BootstrapableTest) Clear() error { return nil } -func (b *BootstrapableTest) ForceAccepted(containerIDs []ids.ID) error { +func (b *BootstrapableTest) ForceAccepted(ctx context.Context, containerIDs []ids.ID) error { if b.ForceAcceptedF != nil { - return b.ForceAcceptedF(containerIDs) + return b.ForceAcceptedF(ctx, containerIDs) } else if b.CantForceAccepted { if b.T != nil { b.T.Fatalf("Unexpectedly called ForceAccepted") diff --git a/avalanchego/snow/engine/common/test_bootstrapper.go b/avalanchego/snow/engine/common/test_bootstrapper.go index 79b7fb90..1f8fd59b 100644 --- a/avalanchego/snow/engine/common/test_bootstrapper.go +++ b/avalanchego/snow/engine/common/test_bootstrapper.go @@ -1,11 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common var ( - _ Engine = &BootstrapperTest{} - _ Bootstrapable = &BootstrapperTest{} + _ Engine = (*BootstrapperTest)(nil) + _ Bootstrapable = (*BootstrapperTest)(nil) ) // EngineTest is a test engine diff --git a/avalanchego/snow/engine/common/test_config.go b/avalanchego/snow/engine/common/test_config.go index e6b4304a..ceca80f2 100644 --- a/avalanchego/snow/engine/common/test_config.go +++ b/avalanchego/snow/engine/common/test_config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -13,9 +13,13 @@ import ( // DefaultConfigTest returns a test configuration func DefaultConfigTest() Config { isBootstrapped := false - subnet := &SubnetTest{ - IsBootstrappedF: func() bool { return isBootstrapped }, - BootstrappedF: func(ids.ID) { isBootstrapped = true }, + bootstrapTracker := &BootstrapTrackerTest{ + IsBootstrappedF: func() bool { + return isBootstrapped + }, + BootstrappedF: func(ids.ID) { + isBootstrapped = true + }, } beacons := validators.NewSet() @@ -26,12 +30,11 @@ func DefaultConfigTest() Config { return Config{ Ctx: snow.DefaultConsensusContextTest(), - Validators: validators.NewSet(), Beacons: beacons, StartupTracker: startupTracker, Sender: &SenderTest{}, Bootstrapable: &BootstrapableTest{}, - Subnet: subnet, + BootstrapTracker: bootstrapTracker, Timer: &TimerTest{}, AncestorsMaxContainersSent: 2000, AncestorsMaxContainersReceived: 2000, diff --git a/avalanchego/snow/engine/common/test_engine.go b/avalanchego/snow/engine/common/test_engine.go index 81cc8937..6645efad 100644 --- a/avalanchego/snow/engine/common/test_engine.go +++ b/avalanchego/snow/engine/common/test_engine.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common import ( + "context" "errors" "testing" "time" @@ -41,7 +42,7 @@ var ( errChits = errors.New("unexpectedly called Chits") errStart = errors.New("unexpectedly called Start") - _ Engine = &EngineTest{} + _ Engine = (*EngineTest)(nil) ) // EngineTest is a test engine @@ -93,6 +94,10 @@ type EngineTest struct { CantHealth, + CantCrossChainAppRequest, + CantCrossChainAppRequestFailed, + CantCrossChainAppResponse, + CantAppRequest, CantAppResponse, CantAppGossip, @@ -100,28 +105,34 @@ type EngineTest struct { CantGetVM bool - StartF func(startReqID uint32) error - IsBootstrappedF func() bool - ContextF func() *snow.ConsensusContext - HaltF func() - TimeoutF, GossipF, ShutdownF func() error - NotifyF func(Message) error - GetF, GetAncestorsF, PullQueryF func(nodeID ids.NodeID, requestID uint32, containerID ids.ID) error - PutF, PushQueryF func(nodeID ids.NodeID, requestID uint32, container []byte) error - AncestorsF func(nodeID ids.NodeID, requestID uint32, containers [][]byte) error - AcceptedFrontierF, GetAcceptedF, AcceptedF, ChitsF func(nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error + StartF func(ctx context.Context, startReqID uint32) error + IsBootstrappedF func() bool + ContextF func() *snow.ConsensusContext + HaltF func(context.Context) + TimeoutF, GossipF, ShutdownF func(context.Context) error + NotifyF func(context.Context, Message) error + GetF, GetAncestorsF, PullQueryF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) error + PutF, PushQueryF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, container []byte) error + AncestorsF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, containers [][]byte) error + AcceptedFrontierF, GetAcceptedF, AcceptedF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, preferredIDs []ids.ID) error + ChitsF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, preferredIDs []ids.ID, acceptedIDs []ids.ID) error GetStateSummaryFrontierF, GetStateSummaryFrontierFailedF, GetAcceptedStateSummaryFailedF, GetAcceptedFrontierF, GetFailedF, GetAncestorsFailedF, - QueryFailedF, GetAcceptedFrontierFailedF, GetAcceptedFailedF, AppRequestFailedF func(nodeID ids.NodeID, requestID uint32) error - StateSummaryFrontierF func(nodeID ids.NodeID, requestID uint32, summary []byte) error - GetAcceptedStateSummaryF func(nodeID ids.NodeID, requestID uint32, keys []uint64) error - AcceptedStateSummaryF func(nodeID ids.NodeID, requestID uint32, summaryIDs []ids.ID) error - ConnectedF func(nodeID ids.NodeID, nodeVersion *version.Application) error - DisconnectedF func(nodeID ids.NodeID) error - HealthF func() (interface{}, error) - GetVMF func() VM - AppRequestF, AppResponseF func(nodeID ids.NodeID, requestID uint32, msg []byte) error - AppGossipF func(nodeID ids.NodeID, msg []byte) error + QueryFailedF, GetAcceptedFrontierFailedF, GetAcceptedFailedF func(ctx context.Context, nodeID ids.NodeID, requestID uint32) error + AppRequestFailedF func(ctx context.Context, nodeID ids.NodeID, requestID uint32) error + StateSummaryFrontierF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, summary []byte) error + GetAcceptedStateSummaryF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, keys []uint64) error + AcceptedStateSummaryF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs []ids.ID) error + ConnectedF func(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error + DisconnectedF func(ctx context.Context, nodeID ids.NodeID) error + HealthF func(context.Context) (interface{}, error) + GetVMF func() VM + AppRequestF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, msg []byte) error + AppResponseF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, msg []byte) error + AppGossipF func(ctx context.Context, nodeID ids.NodeID, msg []byte) error + CrossChainAppRequestF func(ctx context.Context, chainID ids.ID, requestID uint32, deadline time.Time, msg []byte) error + CrossChainAppResponseF func(ctx context.Context, chainID ids.ID, requestID uint32, msg []byte) error + CrossChainAppRequestFailedF func(ctx context.Context, chainID ids.ID, requestID uint32) error } func (e *EngineTest) Default(cant bool) { @@ -163,13 +174,19 @@ func (e *EngineTest) Default(cant bool) { e.CantAppResponse = cant e.CantAppGossip = cant e.CantGetVM = cant + e.CantCrossChainAppRequest = cant + e.CantCrossChainAppRequestFailed = cant + e.CantCrossChainAppResponse = cant } -func (e *EngineTest) Start(startReqID uint32) error { +func (e *EngineTest) Start(ctx context.Context, startReqID uint32) error { if e.StartF != nil { - return e.StartF(startReqID) + return e.StartF(ctx, startReqID) + } + if !e.CantStart { + return nil } - if e.CantStart && e.T != nil { + if e.T != nil { e.T.Fatalf("Unexpectedly called Start") } return errStart @@ -179,15 +196,18 @@ func (e *EngineTest) Context() *snow.ConsensusContext { if e.ContextF != nil { return e.ContextF() } - if e.CantContext && e.T != nil { + if !e.CantContext { + return nil + } + if e.T != nil { e.T.Fatalf("Unexpectedly called Context") } return nil } -func (e *EngineTest) Timeout() error { +func (e *EngineTest) Timeout(ctx context.Context) error { if e.TimeoutF != nil { - return e.TimeoutF() + return e.TimeoutF(ctx) } if !e.CantTimeout { return nil @@ -198,9 +218,9 @@ func (e *EngineTest) Timeout() error { return errTimeout } -func (e *EngineTest) Gossip() error { +func (e *EngineTest) Gossip(ctx context.Context) error { if e.GossipF != nil { - return e.GossipF() + return e.GossipF(ctx) } if !e.CantGossip { return nil @@ -211,17 +231,19 @@ func (e *EngineTest) Gossip() error { return errGossip } -func (e *EngineTest) Halt() { +func (e *EngineTest) Halt(ctx context.Context) { if e.HaltF != nil { - e.HaltF() - } else if e.CantHalt && e.T != nil { + e.HaltF(ctx) + return + } + if e.CantHalt && e.T != nil { e.T.Fatalf("Unexpectedly called Halt") } } -func (e *EngineTest) Shutdown() error { +func (e *EngineTest) Shutdown(ctx context.Context) error { if e.ShutdownF != nil { - return e.ShutdownF() + return e.ShutdownF(ctx) } if !e.CantShutdown { return nil @@ -232,9 +254,9 @@ func (e *EngineTest) Shutdown() error { return errShutdown } -func (e *EngineTest) Notify(msg Message) error { +func (e *EngineTest) Notify(ctx context.Context, msg Message) error { if e.NotifyF != nil { - return e.NotifyF(msg) + return e.NotifyF(ctx, msg) } if !e.CantNotify { return nil @@ -245,69 +267,87 @@ func (e *EngineTest) Notify(msg Message) error { return errNotify } -func (e *EngineTest) GetStateSummaryFrontier(validatorID ids.NodeID, requestID uint32) error { +func (e *EngineTest) GetStateSummaryFrontier(ctx context.Context, validatorID ids.NodeID, requestID uint32) error { if e.GetStateSummaryFrontierF != nil { - return e.GetStateSummaryFrontierF(validatorID, requestID) + return e.GetStateSummaryFrontierF(ctx, validatorID, requestID) + } + if !e.CantGetStateSummaryFrontier { + return nil } - if e.CantGetStateSummaryFrontier && e.T != nil { + if e.T != nil { e.T.Fatalf("Unexpectedly called GetStateSummaryFrontier") } return errGetStateSummaryFrontier } -func (e *EngineTest) StateSummaryFrontier(validatorID ids.NodeID, requestID uint32, summary []byte) error { +func (e *EngineTest) StateSummaryFrontier(ctx context.Context, validatorID ids.NodeID, requestID uint32, summary []byte) error { if e.StateSummaryFrontierF != nil { - return e.StateSummaryFrontierF(validatorID, requestID, summary) + return e.StateSummaryFrontierF(ctx, validatorID, requestID, summary) + } + if !e.CantStateSummaryFrontier { + return nil } - if e.CantGetStateSummaryFrontier && e.T != nil { + if e.T != nil { e.T.Fatalf("Unexpectedly called CantStateSummaryFrontier") } return errStateSummaryFrontier } -func (e *EngineTest) GetStateSummaryFrontierFailed(validatorID ids.NodeID, requestID uint32) error { +func (e *EngineTest) GetStateSummaryFrontierFailed(ctx context.Context, validatorID ids.NodeID, requestID uint32) error { if e.GetStateSummaryFrontierFailedF != nil { - return e.GetStateSummaryFrontierFailedF(validatorID, requestID) + return e.GetStateSummaryFrontierFailedF(ctx, validatorID, requestID) + } + if !e.CantGetStateSummaryFrontierFailed { + return nil } - if e.CantGetStateSummaryFrontierFailed && e.T != nil { + if e.T != nil { e.T.Fatalf("Unexpectedly called GetStateSummaryFrontierFailed") } return errGetStateSummaryFrontierFailed } -func (e *EngineTest) GetAcceptedStateSummary(validatorID ids.NodeID, requestID uint32, keys []uint64) error { +func (e *EngineTest) GetAcceptedStateSummary(ctx context.Context, validatorID ids.NodeID, requestID uint32, keys []uint64) error { if e.GetAcceptedStateSummaryF != nil { - return e.GetAcceptedStateSummaryF(validatorID, requestID, keys) + return e.GetAcceptedStateSummaryF(ctx, validatorID, requestID, keys) + } + if !e.CantGetAcceptedStateSummary { + return nil } - if e.CantGetAcceptedStateSummary && e.T != nil { + if e.T != nil { e.T.Fatalf("Unexpectedly called GetAcceptedStateSummary") } return errGetAcceptedStateSummary } -func (e *EngineTest) AcceptedStateSummary(validatorID ids.NodeID, requestID uint32, summaryIDs []ids.ID) error { +func (e *EngineTest) AcceptedStateSummary(ctx context.Context, validatorID ids.NodeID, requestID uint32, summaryIDs []ids.ID) error { if e.AcceptedStateSummaryF != nil { - return e.AcceptedStateSummaryF(validatorID, requestID, summaryIDs) + return e.AcceptedStateSummaryF(ctx, validatorID, requestID, summaryIDs) + } + if !e.CantAcceptedStateSummary { + return nil } - if e.CantAcceptedStateSummary && e.T != nil { + if e.T != nil { e.T.Fatalf("Unexpectedly called AcceptedStateSummary") } return errAcceptedStateSummary } -func (e *EngineTest) GetAcceptedStateSummaryFailed(validatorID ids.NodeID, requestID uint32) error { +func (e *EngineTest) GetAcceptedStateSummaryFailed(ctx context.Context, validatorID ids.NodeID, requestID uint32) error { if e.GetAcceptedStateSummaryFailedF != nil { - return e.GetAcceptedStateSummaryFailedF(validatorID, requestID) + return e.GetAcceptedStateSummaryFailedF(ctx, validatorID, requestID) + } + if !e.CantGetAcceptedStateSummaryFailed { + return nil } - if e.CantGetAcceptedStateSummaryFailed && e.T != nil { + if e.T != nil { e.T.Fatalf("Unexpectedly called GetAcceptedStateSummaryFailed") } return errGetAcceptedStateSummaryFailed } -func (e *EngineTest) GetAcceptedFrontier(nodeID ids.NodeID, requestID uint32) error { +func (e *EngineTest) GetAcceptedFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { if e.GetAcceptedFrontierF != nil { - return e.GetAcceptedFrontierF(nodeID, requestID) + return e.GetAcceptedFrontierF(ctx, nodeID, requestID) } if !e.CantGetAcceptedFrontier { return nil @@ -318,9 +358,9 @@ func (e *EngineTest) GetAcceptedFrontier(nodeID ids.NodeID, requestID uint32) er return errGetAcceptedFrontier } -func (e *EngineTest) GetAcceptedFrontierFailed(nodeID ids.NodeID, requestID uint32) error { +func (e *EngineTest) GetAcceptedFrontierFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { if e.GetAcceptedFrontierFailedF != nil { - return e.GetAcceptedFrontierFailedF(nodeID, requestID) + return e.GetAcceptedFrontierFailedF(ctx, nodeID, requestID) } if !e.CantGetAcceptedFrontierFailed { return nil @@ -331,9 +371,9 @@ func (e *EngineTest) GetAcceptedFrontierFailed(nodeID ids.NodeID, requestID uint return errGetAcceptedFrontierFailed } -func (e *EngineTest) AcceptedFrontier(nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { +func (e *EngineTest) AcceptedFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { if e.AcceptedFrontierF != nil { - return e.AcceptedFrontierF(nodeID, requestID, containerIDs) + return e.AcceptedFrontierF(ctx, nodeID, requestID, containerIDs) } if !e.CantAcceptedFrontier { return nil @@ -344,9 +384,9 @@ func (e *EngineTest) AcceptedFrontier(nodeID ids.NodeID, requestID uint32, conta return errAcceptedFrontier } -func (e *EngineTest) GetAccepted(nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { +func (e *EngineTest) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { if e.GetAcceptedF != nil { - return e.GetAcceptedF(nodeID, requestID, containerIDs) + return e.GetAcceptedF(ctx, nodeID, requestID, containerIDs) } if !e.CantGetAccepted { return nil @@ -357,9 +397,9 @@ func (e *EngineTest) GetAccepted(nodeID ids.NodeID, requestID uint32, containerI return errGetAccepted } -func (e *EngineTest) GetAcceptedFailed(nodeID ids.NodeID, requestID uint32) error { +func (e *EngineTest) GetAcceptedFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { if e.GetAcceptedFailedF != nil { - return e.GetAcceptedFailedF(nodeID, requestID) + return e.GetAcceptedFailedF(ctx, nodeID, requestID) } if !e.CantGetAcceptedFailed { return nil @@ -370,9 +410,9 @@ func (e *EngineTest) GetAcceptedFailed(nodeID ids.NodeID, requestID uint32) erro return errGetAcceptedFailed } -func (e *EngineTest) Accepted(nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { +func (e *EngineTest) Accepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { if e.AcceptedF != nil { - return e.AcceptedF(nodeID, requestID, containerIDs) + return e.AcceptedF(ctx, nodeID, requestID, containerIDs) } if !e.CantAccepted { return nil @@ -383,9 +423,9 @@ func (e *EngineTest) Accepted(nodeID ids.NodeID, requestID uint32, containerIDs return errAccepted } -func (e *EngineTest) Get(nodeID ids.NodeID, requestID uint32, containerID ids.ID) error { +func (e *EngineTest) Get(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) error { if e.GetF != nil { - return e.GetF(nodeID, requestID, containerID) + return e.GetF(ctx, nodeID, requestID, containerID) } if !e.CantGet { return nil @@ -396,9 +436,9 @@ func (e *EngineTest) Get(nodeID ids.NodeID, requestID uint32, containerID ids.ID return errGet } -func (e *EngineTest) GetAncestors(nodeID ids.NodeID, requestID uint32, containerID ids.ID) error { +func (e *EngineTest) GetAncestors(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) error { if e.GetAncestorsF != nil { - return e.GetAncestorsF(nodeID, requestID, containerID) + return e.GetAncestorsF(ctx, nodeID, requestID, containerID) } if !e.CantGetAncestors { return nil @@ -409,9 +449,9 @@ func (e *EngineTest) GetAncestors(nodeID ids.NodeID, requestID uint32, container return errGetAncestors } -func (e *EngineTest) GetFailed(nodeID ids.NodeID, requestID uint32) error { +func (e *EngineTest) GetFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { if e.GetFailedF != nil { - return e.GetFailedF(nodeID, requestID) + return e.GetFailedF(ctx, nodeID, requestID) } if !e.CantGetFailed { return nil @@ -422,9 +462,9 @@ func (e *EngineTest) GetFailed(nodeID ids.NodeID, requestID uint32) error { return errGetFailed } -func (e *EngineTest) GetAncestorsFailed(nodeID ids.NodeID, requestID uint32) error { +func (e *EngineTest) GetAncestorsFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { if e.GetAncestorsFailedF != nil { - return e.GetAncestorsFailedF(nodeID, requestID) + return e.GetAncestorsFailedF(ctx, nodeID, requestID) } if e.CantGetAncestorsFailed { return nil @@ -435,9 +475,9 @@ func (e *EngineTest) GetAncestorsFailed(nodeID ids.NodeID, requestID uint32) err return errGetAncestorsFailed } -func (e *EngineTest) Put(nodeID ids.NodeID, requestID uint32, container []byte) error { +func (e *EngineTest) Put(ctx context.Context, nodeID ids.NodeID, requestID uint32, container []byte) error { if e.PutF != nil { - return e.PutF(nodeID, requestID, container) + return e.PutF(ctx, nodeID, requestID, container) } if !e.CantPut { return nil @@ -448,9 +488,9 @@ func (e *EngineTest) Put(nodeID ids.NodeID, requestID uint32, container []byte) return errPut } -func (e *EngineTest) Ancestors(nodeID ids.NodeID, requestID uint32, containers [][]byte) error { +func (e *EngineTest) Ancestors(ctx context.Context, nodeID ids.NodeID, requestID uint32, containers [][]byte) error { if e.AncestorsF != nil { - return e.AncestorsF(nodeID, requestID, containers) + return e.AncestorsF(ctx, nodeID, requestID, containers) } if !e.CantAncestors { return nil @@ -461,9 +501,9 @@ func (e *EngineTest) Ancestors(nodeID ids.NodeID, requestID uint32, containers [ return errAncestors } -func (e *EngineTest) PushQuery(nodeID ids.NodeID, requestID uint32, container []byte) error { +func (e *EngineTest) PushQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, container []byte) error { if e.PushQueryF != nil { - return e.PushQueryF(nodeID, requestID, container) + return e.PushQueryF(ctx, nodeID, requestID, container) } if !e.CantPushQuery { return nil @@ -474,9 +514,9 @@ func (e *EngineTest) PushQuery(nodeID ids.NodeID, requestID uint32, container [] return errPushQuery } -func (e *EngineTest) PullQuery(nodeID ids.NodeID, requestID uint32, containerID ids.ID) error { +func (e *EngineTest) PullQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) error { if e.PullQueryF != nil { - return e.PullQueryF(nodeID, requestID, containerID) + return e.PullQueryF(ctx, nodeID, requestID, containerID) } if !e.CantPullQuery { return nil @@ -487,9 +527,9 @@ func (e *EngineTest) PullQuery(nodeID ids.NodeID, requestID uint32, containerID return errPullQuery } -func (e *EngineTest) QueryFailed(nodeID ids.NodeID, requestID uint32) error { +func (e *EngineTest) QueryFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { if e.QueryFailedF != nil { - return e.QueryFailedF(nodeID, requestID) + return e.QueryFailedF(ctx, nodeID, requestID) } if !e.CantQueryFailed { return nil @@ -500,9 +540,48 @@ func (e *EngineTest) QueryFailed(nodeID ids.NodeID, requestID uint32) error { return errQueryFailed } -func (e *EngineTest) AppRequest(nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { +func (e *EngineTest) CrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, deadline time.Time, request []byte) error { + if e.CrossChainAppRequestF != nil { + return e.CrossChainAppRequestF(ctx, chainID, requestID, deadline, request) + } + if !e.CantCrossChainAppRequest { + return nil + } + if e.T != nil { + e.T.Fatal(errCrossChainAppRequest) + } + return errCrossChainAppRequest +} + +func (e *EngineTest) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32) error { + if e.CrossChainAppRequestFailedF != nil { + return e.CrossChainAppRequestFailedF(ctx, chainID, requestID) + } + if !e.CantCrossChainAppRequestFailed { + return nil + } + if e.T != nil { + e.T.Fatal(errCrossChainAppRequestFailed) + } + return errCrossChainAppRequestFailed +} + +func (e *EngineTest) CrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, response []byte) error { + if e.CrossChainAppResponseF != nil { + return e.CrossChainAppResponseF(ctx, chainID, requestID, response) + } + if !e.CantCrossChainAppResponse { + return nil + } + if e.T != nil { + e.T.Fatal(errCrossChainAppResponse) + } + return errCrossChainAppResponse +} + +func (e *EngineTest) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { if e.AppRequestF != nil { - return e.AppRequestF(nodeID, requestID, request) + return e.AppRequestF(ctx, nodeID, requestID, deadline, request) } if !e.CantAppRequest { return nil @@ -513,9 +592,9 @@ func (e *EngineTest) AppRequest(nodeID ids.NodeID, requestID uint32, deadline ti return errAppRequest } -func (e *EngineTest) AppResponse(nodeID ids.NodeID, requestID uint32, response []byte) error { +func (e *EngineTest) AppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { if e.AppResponseF != nil { - return e.AppResponseF(nodeID, requestID, response) + return e.AppResponseF(ctx, nodeID, requestID, response) } if !e.CantAppResponse { return nil @@ -526,9 +605,9 @@ func (e *EngineTest) AppResponse(nodeID ids.NodeID, requestID uint32, response [ return errAppResponse } -func (e *EngineTest) AppRequestFailed(nodeID ids.NodeID, requestID uint32) error { +func (e *EngineTest) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { if e.AppRequestFailedF != nil { - return e.AppRequestFailedF(nodeID, requestID) + return e.AppRequestFailedF(ctx, nodeID, requestID) } if !e.CantAppRequestFailed { return nil @@ -539,9 +618,9 @@ func (e *EngineTest) AppRequestFailed(nodeID ids.NodeID, requestID uint32) error return errAppRequestFailed } -func (e *EngineTest) AppGossip(nodeID ids.NodeID, msg []byte) error { +func (e *EngineTest) AppGossip(ctx context.Context, nodeID ids.NodeID, msg []byte) error { if e.AppGossipF != nil { - return e.AppGossipF(nodeID, msg) + return e.AppGossipF(ctx, nodeID, msg) } if !e.CantAppGossip { return nil @@ -552,9 +631,9 @@ func (e *EngineTest) AppGossip(nodeID ids.NodeID, msg []byte) error { return errAppGossip } -func (e *EngineTest) Chits(nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { +func (e *EngineTest) Chits(ctx context.Context, nodeID ids.NodeID, requestID uint32, preferredIDs []ids.ID, acceptedIDs []ids.ID) error { if e.ChitsF != nil { - return e.ChitsF(nodeID, requestID, containerIDs) + return e.ChitsF(ctx, nodeID, requestID, preferredIDs, acceptedIDs) } if !e.CantChits { return nil @@ -565,9 +644,9 @@ func (e *EngineTest) Chits(nodeID ids.NodeID, requestID uint32, containerIDs []i return errChits } -func (e *EngineTest) Connected(nodeID ids.NodeID, nodeVersion *version.Application) error { +func (e *EngineTest) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { if e.ConnectedF != nil { - return e.ConnectedF(nodeID, nodeVersion) + return e.ConnectedF(ctx, nodeID, nodeVersion) } if !e.CantConnected { return nil @@ -578,9 +657,9 @@ func (e *EngineTest) Connected(nodeID ids.NodeID, nodeVersion *version.Applicati return errConnected } -func (e *EngineTest) Disconnected(nodeID ids.NodeID) error { +func (e *EngineTest) Disconnected(ctx context.Context, nodeID ids.NodeID) error { if e.DisconnectedF != nil { - return e.DisconnectedF(nodeID) + return e.DisconnectedF(ctx, nodeID) } if !e.CantDisconnected { return nil @@ -591,11 +670,14 @@ func (e *EngineTest) Disconnected(nodeID ids.NodeID) error { return errDisconnected } -func (e *EngineTest) HealthCheck() (interface{}, error) { +func (e *EngineTest) HealthCheck(ctx context.Context) (interface{}, error) { if e.HealthF != nil { - return e.HealthF() + return e.HealthF(ctx) + } + if !e.CantHealth { + return nil, nil } - if e.CantHealth && e.T != nil { + if e.T != nil { e.T.Fatal(errHealthCheck) } return nil, errHealthCheck diff --git a/avalanchego/snow/engine/common/test_sender.go b/avalanchego/snow/engine/common/test_sender.go index cb03619f..0a32dcc0 100644 --- a/avalanchego/snow/engine/common/test_sender.go +++ b/avalanchego/snow/engine/common/test_sender.go @@ -1,18 +1,20 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common import ( + "context" "errors" "testing" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/set" ) var ( - _ Sender = &SenderTest{} + _ Sender = (*SenderTest)(nil) errAccept = errors.New("unexpectedly called Accept") errSendAppRequest = errors.New("unexpectedly called SendAppRequest") @@ -33,29 +35,32 @@ type SenderTest struct { CantSendGet, CantSendGetAncestors, CantSendPut, CantSendAncestors, CantSendPullQuery, CantSendPushQuery, CantSendChits, CantSendGossip, - CantSendAppRequest, CantSendAppResponse, CantSendAppGossip, CantSendAppGossipSpecific bool + CantSendAppRequest, CantSendAppResponse, CantSendAppGossip, CantSendAppGossipSpecific, + CantSendCrossChainAppRequest, CantSendCrossChainAppResponse bool AcceptF func(*snow.ConsensusContext, ids.ID, []byte) error - SendGetStateSummaryFrontierF func(ids.NodeIDSet, uint32) - SendStateSummaryFrontierF func(ids.NodeID, uint32, []byte) - SendGetAcceptedStateSummaryF func(ids.NodeIDSet, uint32, []uint64) - SendAcceptedStateSummaryF func(ids.NodeID, uint32, []ids.ID) - SendGetAcceptedFrontierF func(ids.NodeIDSet, uint32) - SendAcceptedFrontierF func(ids.NodeID, uint32, []ids.ID) - SendGetAcceptedF func(ids.NodeIDSet, uint32, []ids.ID) - SendAcceptedF func(ids.NodeID, uint32, []ids.ID) - SendGetF func(ids.NodeID, uint32, ids.ID) - SendGetAncestorsF func(ids.NodeID, uint32, ids.ID) - SendPutF func(ids.NodeID, uint32, []byte) - SendAncestorsF func(ids.NodeID, uint32, [][]byte) - SendPushQueryF func(ids.NodeIDSet, uint32, []byte) - SendPullQueryF func(ids.NodeIDSet, uint32, ids.ID) - SendChitsF func(ids.NodeID, uint32, []ids.ID) - SendGossipF func([]byte) - SendAppRequestF func(ids.NodeIDSet, uint32, []byte) error - SendAppResponseF func(ids.NodeID, uint32, []byte) error - SendAppGossipF func([]byte) error - SendAppGossipSpecificF func(ids.NodeIDSet, []byte) error + SendGetStateSummaryFrontierF func(context.Context, set.Set[ids.NodeID], uint32) + SendStateSummaryFrontierF func(context.Context, ids.NodeID, uint32, []byte) + SendGetAcceptedStateSummaryF func(context.Context, set.Set[ids.NodeID], uint32, []uint64) + SendAcceptedStateSummaryF func(context.Context, ids.NodeID, uint32, []ids.ID) + SendGetAcceptedFrontierF func(context.Context, set.Set[ids.NodeID], uint32) + SendAcceptedFrontierF func(context.Context, ids.NodeID, uint32, []ids.ID) + SendGetAcceptedF func(context.Context, set.Set[ids.NodeID], uint32, []ids.ID) + SendAcceptedF func(context.Context, ids.NodeID, uint32, []ids.ID) + SendGetF func(context.Context, ids.NodeID, uint32, ids.ID) + SendGetAncestorsF func(context.Context, ids.NodeID, uint32, ids.ID) + SendPutF func(context.Context, ids.NodeID, uint32, []byte) + SendAncestorsF func(context.Context, ids.NodeID, uint32, [][]byte) + SendPushQueryF func(context.Context, set.Set[ids.NodeID], uint32, []byte) + SendPullQueryF func(context.Context, set.Set[ids.NodeID], uint32, ids.ID) + SendChitsF func(context.Context, ids.NodeID, uint32, []ids.ID, []ids.ID) + SendGossipF func(context.Context, []byte) + SendAppRequestF func(context.Context, set.Set[ids.NodeID], uint32, []byte) error + SendAppResponseF func(context.Context, ids.NodeID, uint32, []byte) error + SendAppGossipF func(context.Context, []byte) error + SendAppGossipSpecificF func(context.Context, set.Set[ids.NodeID], []byte) error + SendCrossChainAppRequestF func(context.Context, ids.ID, uint32, []byte) + SendCrossChainAppResponseF func(context.Context, ids.ID, uint32, []byte) } // Default set the default callable value to [cant] @@ -81,6 +86,8 @@ func (s *SenderTest) Default(cant bool) { s.CantSendAppResponse = cant s.CantSendAppGossip = cant s.CantSendAppGossipSpecific = cant + s.CantSendCrossChainAppRequest = cant + s.CantSendCrossChainAppResponse = cant } // SendGetStateSummaryFrontier calls SendGetStateSummaryFrontierF if it was initialized. If it @@ -102,9 +109,9 @@ func (s *SenderTest) Accept(ctx *snow.ConsensusContext, containerID ids.ID, cont // SendGetStateSummaryFrontier calls SendGetStateSummaryFrontierF if it was initialized. If it // wasn't initialized and this function shouldn't be called and testing was // initialized, then testing will fail. -func (s *SenderTest) SendGetStateSummaryFrontier(validatorIDs ids.NodeIDSet, requestID uint32) { +func (s *SenderTest) SendGetStateSummaryFrontier(ctx context.Context, validatorIDs set.Set[ids.NodeID], requestID uint32) { if s.SendGetStateSummaryFrontierF != nil { - s.SendGetStateSummaryFrontierF(validatorIDs, requestID) + s.SendGetStateSummaryFrontierF(ctx, validatorIDs, requestID) } else if s.CantSendGetStateSummaryFrontier && s.T != nil { s.T.Fatalf("Unexpectedly called SendGetStateSummaryFrontier") } @@ -113,9 +120,9 @@ func (s *SenderTest) SendGetStateSummaryFrontier(validatorIDs ids.NodeIDSet, req // SendAcceptedFrontier calls SendAcceptedFrontierF if it was initialized. If it // wasn't initialized and this function shouldn't be called and testing was // initialized, then testing will fail. -func (s *SenderTest) SendStateSummaryFrontier(validatorID ids.NodeID, requestID uint32, summary []byte) { +func (s *SenderTest) SendStateSummaryFrontier(ctx context.Context, validatorID ids.NodeID, requestID uint32, summary []byte) { if s.SendStateSummaryFrontierF != nil { - s.SendStateSummaryFrontierF(validatorID, requestID, summary) + s.SendStateSummaryFrontierF(ctx, validatorID, requestID, summary) } else if s.CantSendStateSummaryFrontier && s.T != nil { s.T.Fatalf("Unexpectedly called SendStateSummaryFrontier") } @@ -124,9 +131,9 @@ func (s *SenderTest) SendStateSummaryFrontier(validatorID ids.NodeID, requestID // SendGetAcceptedStateSummary calls SendGetAcceptedStateSummaryF if it was initialized. If it wasn't // initialized and this function shouldn't be called and testing was // initialized, then testing will fail. -func (s *SenderTest) SendGetAcceptedStateSummary(nodeIDs ids.NodeIDSet, requestID uint32, heights []uint64) { +func (s *SenderTest) SendGetAcceptedStateSummary(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, heights []uint64) { if s.SendGetAcceptedStateSummaryF != nil { - s.SendGetAcceptedStateSummaryF(nodeIDs, requestID, heights) + s.SendGetAcceptedStateSummaryF(ctx, nodeIDs, requestID, heights) } else if s.CantSendGetAcceptedStateSummary && s.T != nil { s.T.Fatalf("Unexpectedly called SendGetAcceptedStateSummaryF") } @@ -135,9 +142,9 @@ func (s *SenderTest) SendGetAcceptedStateSummary(nodeIDs ids.NodeIDSet, requestI // SendAcceptedStateSummary calls SendAcceptedStateSummaryF if it was initialized. If it wasn't // initialized and this function shouldn't be called and testing was // initialized, then testing will fail. -func (s *SenderTest) SendAcceptedStateSummary(validatorID ids.NodeID, requestID uint32, summaryIDs []ids.ID) { +func (s *SenderTest) SendAcceptedStateSummary(ctx context.Context, validatorID ids.NodeID, requestID uint32, summaryIDs []ids.ID) { if s.SendAcceptedStateSummaryF != nil { - s.SendAcceptedStateSummaryF(validatorID, requestID, summaryIDs) + s.SendAcceptedStateSummaryF(ctx, validatorID, requestID, summaryIDs) } else if s.CantSendAcceptedStateSummary && s.T != nil { s.T.Fatalf("Unexpectedly called SendAcceptedStateSummary") } @@ -146,9 +153,9 @@ func (s *SenderTest) SendAcceptedStateSummary(validatorID ids.NodeID, requestID // SendGetAcceptedFrontier calls SendGetAcceptedFrontierF if it was initialized. // If it wasn't initialized and this function shouldn't be called and testing // was initialized, then testing will fail. -func (s *SenderTest) SendGetAcceptedFrontier(validatorIDs ids.NodeIDSet, requestID uint32) { +func (s *SenderTest) SendGetAcceptedFrontier(ctx context.Context, validatorIDs set.Set[ids.NodeID], requestID uint32) { if s.SendGetAcceptedFrontierF != nil { - s.SendGetAcceptedFrontierF(validatorIDs, requestID) + s.SendGetAcceptedFrontierF(ctx, validatorIDs, requestID) } else if s.CantSendGetAcceptedFrontier && s.T != nil { s.T.Fatalf("Unexpectedly called SendGetAcceptedFrontier") } @@ -157,9 +164,9 @@ func (s *SenderTest) SendGetAcceptedFrontier(validatorIDs ids.NodeIDSet, request // SendAcceptedFrontier calls SendAcceptedFrontierF if it was initialized. If it // wasn't initialized and this function shouldn't be called and testing was // initialized, then testing will fail. -func (s *SenderTest) SendAcceptedFrontier(validatorID ids.NodeID, requestID uint32, containerIDs []ids.ID) { +func (s *SenderTest) SendAcceptedFrontier(ctx context.Context, validatorID ids.NodeID, requestID uint32, containerIDs []ids.ID) { if s.SendAcceptedFrontierF != nil { - s.SendAcceptedFrontierF(validatorID, requestID, containerIDs) + s.SendAcceptedFrontierF(ctx, validatorID, requestID, containerIDs) } else if s.CantSendAcceptedFrontier && s.T != nil { s.T.Fatalf("Unexpectedly called SendAcceptedFrontier") } @@ -168,9 +175,9 @@ func (s *SenderTest) SendAcceptedFrontier(validatorID ids.NodeID, requestID uint // SendGetAccepted calls SendGetAcceptedF if it was initialized. If it wasn't // initialized and this function shouldn't be called and testing was // initialized, then testing will fail. -func (s *SenderTest) SendGetAccepted(nodeIDs ids.NodeIDSet, requestID uint32, containerIDs []ids.ID) { +func (s *SenderTest) SendGetAccepted(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, containerIDs []ids.ID) { if s.SendGetAcceptedF != nil { - s.SendGetAcceptedF(nodeIDs, requestID, containerIDs) + s.SendGetAcceptedF(ctx, nodeIDs, requestID, containerIDs) } else if s.CantSendGetAccepted && s.T != nil { s.T.Fatalf("Unexpectedly called SendGetAccepted") } @@ -179,9 +186,9 @@ func (s *SenderTest) SendGetAccepted(nodeIDs ids.NodeIDSet, requestID uint32, co // SendAccepted calls SendAcceptedF if it was initialized. If it wasn't // initialized and this function shouldn't be called and testing was // initialized, then testing will fail. -func (s *SenderTest) SendAccepted(validatorID ids.NodeID, requestID uint32, containerIDs []ids.ID) { +func (s *SenderTest) SendAccepted(ctx context.Context, validatorID ids.NodeID, requestID uint32, containerIDs []ids.ID) { if s.SendAcceptedF != nil { - s.SendAcceptedF(validatorID, requestID, containerIDs) + s.SendAcceptedF(ctx, validatorID, requestID, containerIDs) } else if s.CantSendAccepted && s.T != nil { s.T.Fatalf("Unexpectedly called SendAccepted") } @@ -190,9 +197,9 @@ func (s *SenderTest) SendAccepted(validatorID ids.NodeID, requestID uint32, cont // SendGet calls SendGetF if it was initialized. If it wasn't initialized and // this function shouldn't be called and testing was initialized, then testing // will fail. -func (s *SenderTest) SendGet(vdr ids.NodeID, requestID uint32, vtxID ids.ID) { +func (s *SenderTest) SendGet(ctx context.Context, vdr ids.NodeID, requestID uint32, vtxID ids.ID) { if s.SendGetF != nil { - s.SendGetF(vdr, requestID, vtxID) + s.SendGetF(ctx, vdr, requestID, vtxID) } else if s.CantSendGet && s.T != nil { s.T.Fatalf("Unexpectedly called SendGet") } @@ -201,9 +208,9 @@ func (s *SenderTest) SendGet(vdr ids.NodeID, requestID uint32, vtxID ids.ID) { // SendGetAncestors calls SendGetAncestorsF if it was initialized. If it wasn't // initialized and this function shouldn't be called and testing was // initialized, then testing will fail. -func (s *SenderTest) SendGetAncestors(validatorID ids.NodeID, requestID uint32, vtxID ids.ID) { +func (s *SenderTest) SendGetAncestors(ctx context.Context, validatorID ids.NodeID, requestID uint32, vtxID ids.ID) { if s.SendGetAncestorsF != nil { - s.SendGetAncestorsF(validatorID, requestID, vtxID) + s.SendGetAncestorsF(ctx, validatorID, requestID, vtxID) } else if s.CantSendGetAncestors && s.T != nil { s.T.Fatalf("Unexpectedly called SendCantSendGetAncestors") } @@ -212,9 +219,9 @@ func (s *SenderTest) SendGetAncestors(validatorID ids.NodeID, requestID uint32, // SendPut calls SendPutF if it was initialized. If it wasn't initialized and // this function shouldn't be called and testing was initialized, then testing // will fail. -func (s *SenderTest) SendPut(vdr ids.NodeID, requestID uint32, vtx []byte) { +func (s *SenderTest) SendPut(ctx context.Context, vdr ids.NodeID, requestID uint32, vtx []byte) { if s.SendPutF != nil { - s.SendPutF(vdr, requestID, vtx) + s.SendPutF(ctx, vdr, requestID, vtx) } else if s.CantSendPut && s.T != nil { s.T.Fatalf("Unexpectedly called SendPut") } @@ -223,9 +230,9 @@ func (s *SenderTest) SendPut(vdr ids.NodeID, requestID uint32, vtx []byte) { // SendAncestors calls SendAncestorsF if it was initialized. If it wasn't // initialized and this function shouldn't be called and testing was // initialized, then testing will fail. -func (s *SenderTest) SendAncestors(vdr ids.NodeID, requestID uint32, vtxs [][]byte) { +func (s *SenderTest) SendAncestors(ctx context.Context, vdr ids.NodeID, requestID uint32, vtxs [][]byte) { if s.SendAncestorsF != nil { - s.SendAncestorsF(vdr, requestID, vtxs) + s.SendAncestorsF(ctx, vdr, requestID, vtxs) } else if s.CantSendAncestors && s.T != nil { s.T.Fatalf("Unexpectedly called SendAncestors") } @@ -234,9 +241,9 @@ func (s *SenderTest) SendAncestors(vdr ids.NodeID, requestID uint32, vtxs [][]by // SendPushQuery calls SendPushQueryF if it was initialized. If it wasn't // initialized and this function shouldn't be called and testing was // initialized, then testing will fail. -func (s *SenderTest) SendPushQuery(vdrs ids.NodeIDSet, requestID uint32, vtx []byte) { +func (s *SenderTest) SendPushQuery(ctx context.Context, vdrs set.Set[ids.NodeID], requestID uint32, vtx []byte) { if s.SendPushQueryF != nil { - s.SendPushQueryF(vdrs, requestID, vtx) + s.SendPushQueryF(ctx, vdrs, requestID, vtx) } else if s.CantSendPushQuery && s.T != nil { s.T.Fatalf("Unexpectedly called SendPushQuery") } @@ -245,9 +252,9 @@ func (s *SenderTest) SendPushQuery(vdrs ids.NodeIDSet, requestID uint32, vtx []b // SendPullQuery calls SendPullQueryF if it was initialized. If it wasn't // initialized and this function shouldn't be called and testing was // initialized, then testing will fail. -func (s *SenderTest) SendPullQuery(vdrs ids.NodeIDSet, requestID uint32, vtxID ids.ID) { +func (s *SenderTest) SendPullQuery(ctx context.Context, vdrs set.Set[ids.NodeID], requestID uint32, vtxID ids.ID) { if s.SendPullQueryF != nil { - s.SendPullQueryF(vdrs, requestID, vtxID) + s.SendPullQueryF(ctx, vdrs, requestID, vtxID) } else if s.CantSendPullQuery && s.T != nil { s.T.Fatalf("Unexpectedly called SendPullQuery") } @@ -256,9 +263,9 @@ func (s *SenderTest) SendPullQuery(vdrs ids.NodeIDSet, requestID uint32, vtxID i // SendChits calls SendChitsF if it was initialized. If it wasn't initialized // and this function shouldn't be called and testing was initialized, then // testing will fail. -func (s *SenderTest) SendChits(vdr ids.NodeID, requestID uint32, votes []ids.ID) { +func (s *SenderTest) SendChits(ctx context.Context, vdr ids.NodeID, requestID uint32, votes []ids.ID, accepted []ids.ID) { if s.SendChitsF != nil { - s.SendChitsF(vdr, requestID, votes) + s.SendChitsF(ctx, vdr, requestID, votes, accepted) } else if s.CantSendChits && s.T != nil { s.T.Fatalf("Unexpectedly called SendChits") } @@ -267,21 +274,39 @@ func (s *SenderTest) SendChits(vdr ids.NodeID, requestID uint32, votes []ids.ID) // SendGossip calls SendGossipF if it was initialized. If it wasn't initialized // and this function shouldn't be called and testing was initialized, then // testing will fail. -func (s *SenderTest) SendGossip(container []byte) { +func (s *SenderTest) SendGossip(ctx context.Context, container []byte) { if s.SendGossipF != nil { - s.SendGossipF(container) + s.SendGossipF(ctx, container) } else if s.CantSendGossip && s.T != nil { s.T.Fatalf("Unexpectedly called SendGossip") } } +func (s *SenderTest) SendCrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, appRequestBytes []byte) error { + if s.SendCrossChainAppRequestF != nil { + s.SendCrossChainAppRequestF(ctx, chainID, requestID, appRequestBytes) + } else if s.CantSendCrossChainAppRequest && s.T != nil { + s.T.Fatal("Unexpectedly called SendCrossChainAppRequest") + } + return nil +} + +func (s *SenderTest) SendCrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, appResponseBytes []byte) error { + if s.SendCrossChainAppResponseF != nil { + s.SendCrossChainAppResponseF(ctx, chainID, requestID, appResponseBytes) + } else if s.CantSendCrossChainAppResponse && s.T != nil { + s.T.Fatal("Unexpectedly called SendCrossChainAppResponse") + } + return nil +} + // SendAppRequest calls SendAppRequestF if it was initialized. If it wasn't // initialized and this function shouldn't be called and testing was // initialized, then testing will fail. -func (s *SenderTest) SendAppRequest(nodeIDs ids.NodeIDSet, requestID uint32, appRequestBytes []byte) error { +func (s *SenderTest) SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, appRequestBytes []byte) error { switch { case s.SendAppRequestF != nil: - return s.SendAppRequestF(nodeIDs, requestID, appRequestBytes) + return s.SendAppRequestF(ctx, nodeIDs, requestID, appRequestBytes) case s.CantSendAppRequest && s.T != nil: s.T.Fatal(errSendAppRequest) } @@ -291,10 +316,10 @@ func (s *SenderTest) SendAppRequest(nodeIDs ids.NodeIDSet, requestID uint32, app // SendAppResponse calls SendAppResponseF if it was initialized. If it wasn't // initialized and this function shouldn't be called and testing was // initialized, then testing will fail. -func (s *SenderTest) SendAppResponse(nodeID ids.NodeID, requestID uint32, appResponseBytes []byte) error { +func (s *SenderTest) SendAppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, appResponseBytes []byte) error { switch { case s.SendAppResponseF != nil: - return s.SendAppResponseF(nodeID, requestID, appResponseBytes) + return s.SendAppResponseF(ctx, nodeID, requestID, appResponseBytes) case s.CantSendAppResponse && s.T != nil: s.T.Fatal(errSendAppResponse) } @@ -304,10 +329,10 @@ func (s *SenderTest) SendAppResponse(nodeID ids.NodeID, requestID uint32, appRes // SendAppGossip calls SendAppGossipF if it was initialized. If it wasn't // initialized and this function shouldn't be called and testing was // initialized, then testing will fail. -func (s *SenderTest) SendAppGossip(appGossipBytes []byte) error { +func (s *SenderTest) SendAppGossip(ctx context.Context, appGossipBytes []byte) error { switch { case s.SendAppGossipF != nil: - return s.SendAppGossipF(appGossipBytes) + return s.SendAppGossipF(ctx, appGossipBytes) case s.CantSendAppGossip && s.T != nil: s.T.Fatal(errSendAppGossip) } @@ -317,10 +342,10 @@ func (s *SenderTest) SendAppGossip(appGossipBytes []byte) error { // SendAppGossipSpecific calls SendAppGossipSpecificF if it was initialized. If it wasn't // initialized and this function shouldn't be called and testing was // initialized, then testing will fail. -func (s *SenderTest) SendAppGossipSpecific(nodeIDs ids.NodeIDSet, appGossipBytes []byte) error { +func (s *SenderTest) SendAppGossipSpecific(ctx context.Context, nodeIDs set.Set[ids.NodeID], appGossipBytes []byte) error { switch { case s.SendAppGossipSpecificF != nil: - return s.SendAppGossipSpecificF(nodeIDs, appGossipBytes) + return s.SendAppGossipSpecificF(ctx, nodeIDs, appGossipBytes) case s.CantSendAppGossipSpecific && s.T != nil: s.T.Fatal(errSendAppGossipSpecific) } diff --git a/avalanchego/snow/engine/common/test_timer.go b/avalanchego/snow/engine/common/test_timer.go index 9d307f5f..a563e65c 100644 --- a/avalanchego/snow/engine/common/test_timer.go +++ b/avalanchego/snow/engine/common/test_timer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -8,7 +8,7 @@ import ( "time" ) -var _ Timer = &TimerTest{} +var _ Timer = (*TimerTest)(nil) // TimerTest is a test timer type TimerTest struct { diff --git a/avalanchego/snow/engine/common/test_vm.go b/avalanchego/snow/engine/common/test_vm.go index 31469d4e..bbf10d4d 100644 --- a/avalanchego/snow/engine/common/test_vm.go +++ b/avalanchego/snow/engine/common/test_vm.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common import ( + "context" "errors" "testing" "time" @@ -16,21 +17,24 @@ import ( ) var ( - errInitialize = errors.New("unexpectedly called Initialize") - errSetState = errors.New("unexpectedly called SetState") - errShutdown = errors.New("unexpectedly called Shutdown") - errCreateHandlers = errors.New("unexpectedly called CreateHandlers") - errCreateStaticHandlers = errors.New("unexpectedly called CreateStaticHandlers") - errHealthCheck = errors.New("unexpectedly called HealthCheck") - errConnected = errors.New("unexpectedly called Connected") - errDisconnected = errors.New("unexpectedly called Disconnected") - errVersion = errors.New("unexpectedly called Version") - errAppRequest = errors.New("unexpectedly called AppRequest") - errAppResponse = errors.New("unexpectedly called AppResponse") - errAppRequestFailed = errors.New("unexpectedly called AppRequestFailed") - errAppGossip = errors.New("unexpectedly called AppGossip") - - _ VM = &TestVM{} + errInitialize = errors.New("unexpectedly called Initialize") + errSetState = errors.New("unexpectedly called SetState") + errShutdown = errors.New("unexpectedly called Shutdown") + errCreateHandlers = errors.New("unexpectedly called CreateHandlers") + errCreateStaticHandlers = errors.New("unexpectedly called CreateStaticHandlers") + errHealthCheck = errors.New("unexpectedly called HealthCheck") + errConnected = errors.New("unexpectedly called Connected") + errDisconnected = errors.New("unexpectedly called Disconnected") + errVersion = errors.New("unexpectedly called Version") + errAppRequest = errors.New("unexpectedly called AppRequest") + errAppResponse = errors.New("unexpectedly called AppResponse") + errAppRequestFailed = errors.New("unexpectedly called AppRequestFailed") + errAppGossip = errors.New("unexpectedly called AppGossip") + errCrossChainAppRequest = errors.New("unexpectedly called CrossChainAppRequest") + errCrossChainAppResponse = errors.New("unexpectedly called CrossChainAppResponse") + errCrossChainAppRequestFailed = errors.New("unexpectedly called CrossChainAppRequestFailed") + + _ VM = (*TestVM)(nil) ) // TestVM is a test vm @@ -40,21 +44,25 @@ type TestVM struct { CantInitialize, CantSetState, CantShutdown, CantCreateHandlers, CantCreateStaticHandlers, CantHealthCheck, CantConnected, CantDisconnected, CantVersion, - CantAppRequest, CantAppResponse, CantAppGossip, CantAppRequestFailed bool - - InitializeF func(*snow.Context, manager.Manager, []byte, []byte, []byte, chan<- Message, []*Fx, AppSender) error - SetStateF func(snow.State) error - ShutdownF func() error - CreateHandlersF func() (map[string]*HTTPHandler, error) - CreateStaticHandlersF func() (map[string]*HTTPHandler, error) - ConnectedF func(nodeID ids.NodeID, nodeVersion *version.Application) error - DisconnectedF func(nodeID ids.NodeID) error - HealthCheckF func() (interface{}, error) - AppRequestF func(nodeID ids.NodeID, requestID uint32, deadline time.Time, msg []byte) error - AppResponseF func(nodeID ids.NodeID, requestID uint32, msg []byte) error - AppGossipF func(nodeID ids.NodeID, msg []byte) error - AppRequestFailedF func(nodeID ids.NodeID, requestID uint32) error - VersionF func() (string, error) + CantAppRequest, CantAppResponse, CantAppGossip, CantAppRequestFailed, + CantCrossChainAppRequest, CantCrossChainAppResponse, CantCrossChainAppRequestFailed bool + + InitializeF func(ctx context.Context, chainCtx *snow.Context, db manager.Manager, genesisBytes []byte, upgradeBytes []byte, configBytes []byte, msgChan chan<- Message, fxs []*Fx, appSender AppSender) error + SetStateF func(ctx context.Context, state snow.State) error + ShutdownF func(context.Context) error + CreateHandlersF func(context.Context) (map[string]*HTTPHandler, error) + CreateStaticHandlersF func(context.Context) (map[string]*HTTPHandler, error) + ConnectedF func(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error + DisconnectedF func(ctx context.Context, nodeID ids.NodeID) error + HealthCheckF func(context.Context) (interface{}, error) + AppRequestF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, msg []byte) error + AppResponseF func(ctx context.Context, nodeID ids.NodeID, requestID uint32, msg []byte) error + AppGossipF func(ctx context.Context, nodeID ids.NodeID, msg []byte) error + AppRequestFailedF func(ctx context.Context, nodeID ids.NodeID, requestID uint32) error + VersionF func(context.Context) (string, error) + CrossChainAppRequestF func(ctx context.Context, chainID ids.ID, requestID uint32, deadline time.Time, msg []byte) error + CrossChainAppResponseF func(ctx context.Context, chainID ids.ID, requestID uint32, msg []byte) error + CrossChainAppRequestFailedF func(ctx context.Context, chainID ids.ID, requestID uint32) error } func (vm *TestVM) Default(cant bool) { @@ -71,11 +79,34 @@ func (vm *TestVM) Default(cant bool) { vm.CantVersion = cant vm.CantConnected = cant vm.CantDisconnected = cant + vm.CantCrossChainAppRequest = cant + vm.CantCrossChainAppRequestFailed = cant + vm.CantCrossChainAppResponse = cant } -func (vm *TestVM) Initialize(ctx *snow.Context, db manager.Manager, genesisBytes, upgradeBytes, configBytes []byte, msgChan chan<- Message, fxs []*Fx, appSender AppSender) error { +func (vm *TestVM) Initialize( + ctx context.Context, + chainCtx *snow.Context, + db manager.Manager, + genesisBytes, + upgradeBytes, + configBytes []byte, + msgChan chan<- Message, + fxs []*Fx, + appSender AppSender, +) error { if vm.InitializeF != nil { - return vm.InitializeF(ctx, db, genesisBytes, upgradeBytes, configBytes, msgChan, fxs, appSender) + return vm.InitializeF( + ctx, + chainCtx, + db, + genesisBytes, + upgradeBytes, + configBytes, + msgChan, + fxs, + appSender, + ) } if vm.CantInitialize && vm.T != nil { vm.T.Fatal(errInitialize) @@ -83,9 +114,9 @@ func (vm *TestVM) Initialize(ctx *snow.Context, db manager.Manager, genesisBytes return errInitialize } -func (vm *TestVM) SetState(state snow.State) error { +func (vm *TestVM) SetState(ctx context.Context, state snow.State) error { if vm.SetStateF != nil { - return vm.SetStateF(state) + return vm.SetStateF(ctx, state) } if vm.CantSetState { if vm.T != nil { @@ -96,9 +127,9 @@ func (vm *TestVM) SetState(state snow.State) error { return nil } -func (vm *TestVM) Shutdown() error { +func (vm *TestVM) Shutdown(ctx context.Context) error { if vm.ShutdownF != nil { - return vm.ShutdownF() + return vm.ShutdownF(ctx) } if vm.CantShutdown { if vm.T != nil { @@ -109,9 +140,9 @@ func (vm *TestVM) Shutdown() error { return nil } -func (vm *TestVM) CreateHandlers() (map[string]*HTTPHandler, error) { +func (vm *TestVM) CreateHandlers(ctx context.Context) (map[string]*HTTPHandler, error) { if vm.CreateHandlersF != nil { - return vm.CreateHandlersF() + return vm.CreateHandlersF(ctx) } if vm.CantCreateHandlers && vm.T != nil { vm.T.Fatal(errCreateHandlers) @@ -119,9 +150,9 @@ func (vm *TestVM) CreateHandlers() (map[string]*HTTPHandler, error) { return nil, nil } -func (vm *TestVM) CreateStaticHandlers() (map[string]*HTTPHandler, error) { +func (vm *TestVM) CreateStaticHandlers(ctx context.Context) (map[string]*HTTPHandler, error) { if vm.CreateStaticHandlersF != nil { - return vm.CreateStaticHandlersF() + return vm.CreateStaticHandlersF(ctx) } if vm.CantCreateStaticHandlers && vm.T != nil { vm.T.Fatal(errCreateStaticHandlers) @@ -129,9 +160,9 @@ func (vm *TestVM) CreateStaticHandlers() (map[string]*HTTPHandler, error) { return nil, nil } -func (vm *TestVM) HealthCheck() (interface{}, error) { +func (vm *TestVM) HealthCheck(ctx context.Context) (interface{}, error) { if vm.HealthCheckF != nil { - return vm.HealthCheckF() + return vm.HealthCheckF(ctx) } if vm.CantHealthCheck && vm.T != nil { vm.T.Fatal(errHealthCheck) @@ -139,11 +170,11 @@ func (vm *TestVM) HealthCheck() (interface{}, error) { return nil, errHealthCheck } -func (vm *TestVM) AppRequestFailed(nodeID ids.NodeID, requestID uint32) error { - if vm.AppRequestFailedF != nil { - return vm.AppRequestFailedF(nodeID, requestID) +func (vm *TestVM) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { + if vm.AppRequestF != nil { + return vm.AppRequestF(ctx, nodeID, requestID, deadline, request) } - if !vm.CantAppRequestFailed { + if !vm.CantAppRequest { return nil } if vm.T != nil { @@ -152,22 +183,22 @@ func (vm *TestVM) AppRequestFailed(nodeID ids.NodeID, requestID uint32) error { return errAppRequest } -func (vm *TestVM) AppRequest(nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { - if vm.AppRequestF != nil { - return vm.AppRequestF(nodeID, requestID, deadline, request) +func (vm *TestVM) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + if vm.AppRequestFailedF != nil { + return vm.AppRequestFailedF(ctx, nodeID, requestID) } - if !vm.CantAppRequest { + if !vm.CantAppRequestFailed { return nil } if vm.T != nil { - vm.T.Fatal(errAppRequest) + vm.T.Fatal(errAppRequestFailed) } - return errAppRequest + return errAppRequestFailed } -func (vm *TestVM) AppResponse(nodeID ids.NodeID, requestID uint32, response []byte) error { +func (vm *TestVM) AppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { if vm.AppResponseF != nil { - return vm.AppResponseF(nodeID, requestID, response) + return vm.AppResponseF(ctx, nodeID, requestID, response) } if !vm.CantAppResponse { return nil @@ -178,9 +209,9 @@ func (vm *TestVM) AppResponse(nodeID ids.NodeID, requestID uint32, response []by return errAppResponse } -func (vm *TestVM) AppGossip(nodeID ids.NodeID, msg []byte) error { +func (vm *TestVM) AppGossip(ctx context.Context, nodeID ids.NodeID, msg []byte) error { if vm.AppGossipF != nil { - return vm.AppGossipF(nodeID, msg) + return vm.AppGossipF(ctx, nodeID, msg) } if !vm.CantAppGossip { return nil @@ -191,9 +222,48 @@ func (vm *TestVM) AppGossip(nodeID ids.NodeID, msg []byte) error { return errAppGossip } -func (vm *TestVM) Connected(id ids.NodeID, nodeVersion *version.Application) error { +func (vm *TestVM) CrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, deadline time.Time, request []byte) error { + if vm.CrossChainAppRequestF != nil { + return vm.CrossChainAppRequestF(ctx, chainID, requestID, deadline, request) + } + if !vm.CantCrossChainAppRequest { + return nil + } + if vm.T != nil { + vm.T.Fatal(errCrossChainAppRequest) + } + return errCrossChainAppRequest +} + +func (vm *TestVM) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32) error { + if vm.CrossChainAppRequestFailedF != nil { + return vm.CrossChainAppRequestFailedF(ctx, chainID, requestID) + } + if !vm.CantCrossChainAppRequestFailed { + return nil + } + if vm.T != nil { + vm.T.Fatal(errCrossChainAppRequestFailed) + } + return errCrossChainAppRequestFailed +} + +func (vm *TestVM) CrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, response []byte) error { + if vm.CrossChainAppResponseF != nil { + return vm.CrossChainAppResponseF(ctx, chainID, requestID, response) + } + if !vm.CantCrossChainAppResponse { + return nil + } + if vm.T != nil { + vm.T.Fatal(errCrossChainAppResponse) + } + return errCrossChainAppResponse +} + +func (vm *TestVM) Connected(ctx context.Context, id ids.NodeID, nodeVersion *version.Application) error { if vm.ConnectedF != nil { - return vm.ConnectedF(id, nodeVersion) + return vm.ConnectedF(ctx, id, nodeVersion) } if vm.CantConnected && vm.T != nil { vm.T.Fatal(errConnected) @@ -201,9 +271,9 @@ func (vm *TestVM) Connected(id ids.NodeID, nodeVersion *version.Application) err return nil } -func (vm *TestVM) Disconnected(id ids.NodeID) error { +func (vm *TestVM) Disconnected(ctx context.Context, id ids.NodeID) error { if vm.DisconnectedF != nil { - return vm.DisconnectedF(id) + return vm.DisconnectedF(ctx, id) } if vm.CantDisconnected && vm.T != nil { vm.T.Fatal(errDisconnected) @@ -211,9 +281,9 @@ func (vm *TestVM) Disconnected(id ids.NodeID) error { return nil } -func (vm *TestVM) Version() (string, error) { +func (vm *TestVM) Version(ctx context.Context) (string, error) { if vm.VersionF != nil { - return vm.VersionF() + return vm.VersionF(ctx) } if vm.CantVersion && vm.T != nil { vm.T.Fatal(errVersion) diff --git a/avalanchego/snow/engine/common/timer.go b/avalanchego/snow/engine/common/timer.go index 980fa551..56d98a05 100644 --- a/avalanchego/snow/engine/common/timer.go +++ b/avalanchego/snow/engine/common/timer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common diff --git a/avalanchego/snow/engine/common/traced_bootstrapable_engine.go b/avalanchego/snow/engine/common/traced_bootstrapable_engine.go new file mode 100644 index 00000000..c2379799 --- /dev/null +++ b/avalanchego/snow/engine/common/traced_bootstrapable_engine.go @@ -0,0 +1,43 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + + oteltrace "go.opentelemetry.io/otel/trace" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/trace" +) + +var _ BootstrapableEngine = (*tracedBootstrapableEngine)(nil) + +type tracedBootstrapableEngine struct { + Engine + bootstrapableEngine BootstrapableEngine + tracer trace.Tracer +} + +func TraceBootstrapableEngine(bootstrapableEngine BootstrapableEngine, tracer trace.Tracer) BootstrapableEngine { + return &tracedBootstrapableEngine{ + Engine: TraceEngine(bootstrapableEngine, tracer), + bootstrapableEngine: bootstrapableEngine, + } +} + +func (e *tracedBootstrapableEngine) ForceAccepted(ctx context.Context, acceptedContainerIDs []ids.ID) error { + ctx, span := e.tracer.Start(ctx, "tracedBootstrapableEngine.ForceAccepted", oteltrace.WithAttributes( + attribute.Int("numAcceptedContainerIDs", len(acceptedContainerIDs)), + )) + defer span.End() + + return e.bootstrapableEngine.ForceAccepted(ctx, acceptedContainerIDs) +} + +func (e *tracedBootstrapableEngine) Clear() error { + return e.bootstrapableEngine.Clear() +} diff --git a/avalanchego/snow/engine/common/traced_engine.go b/avalanchego/snow/engine/common/traced_engine.go new file mode 100644 index 00000000..f2e723a4 --- /dev/null +++ b/avalanchego/snow/engine/common/traced_engine.go @@ -0,0 +1,420 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" + + oteltrace "go.opentelemetry.io/otel/trace" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/version" +) + +var _ Engine = (*tracedEngine)(nil) + +type tracedEngine struct { + engine Engine + tracer trace.Tracer +} + +func TraceEngine(engine Engine, tracer trace.Tracer) Engine { + return &tracedEngine{ + engine: engine, + tracer: tracer, + } +} + +func (e *tracedEngine) GetStateSummaryFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.GetStateSummaryFrontier", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + )) + defer span.End() + + return e.engine.GetStateSummaryFrontier(ctx, nodeID, requestID) +} + +func (e *tracedEngine) StateSummaryFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, summary []byte) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.StateSummaryFrontier", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int("summaryLen", len(summary)), + )) + defer span.End() + + return e.engine.StateSummaryFrontier(ctx, nodeID, requestID, summary) +} + +func (e *tracedEngine) GetStateSummaryFrontierFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.GetStateSummaryFrontierFailed", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + )) + defer span.End() + + return e.engine.GetStateSummaryFrontierFailed(ctx, nodeID, requestID) +} + +func (e *tracedEngine) GetAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, heights []uint64) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.GetAcceptedStateSummary", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int("numHeights", len(heights)), + )) + defer span.End() + + return e.engine.GetAcceptedStateSummary(ctx, nodeID, requestID, heights) +} + +func (e *tracedEngine) AcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs []ids.ID) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.AcceptedStateSummary", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int("numSummaryIDs", len(summaryIDs)), + )) + defer span.End() + + return e.engine.AcceptedStateSummary(ctx, nodeID, requestID, summaryIDs) +} + +func (e *tracedEngine) GetAcceptedStateSummaryFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.GetAcceptedStateSummaryFailed", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + )) + defer span.End() + + return e.engine.GetAcceptedStateSummaryFailed(ctx, nodeID, requestID) +} + +func (e *tracedEngine) GetAcceptedFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.GetAcceptedFrontier", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + )) + defer span.End() + + return e.engine.GetAcceptedFrontier(ctx, nodeID, requestID) +} + +func (e *tracedEngine) AcceptedFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.AcceptedFrontier", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int("numContainerIDs", len(containerIDs)), + )) + defer span.End() + + return e.engine.AcceptedFrontier(ctx, nodeID, requestID, containerIDs) +} + +func (e *tracedEngine) GetAcceptedFrontierFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.GetAcceptedFrontierFailed", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + )) + defer span.End() + + return e.engine.GetAcceptedFrontierFailed(ctx, nodeID, requestID) +} + +func (e *tracedEngine) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.GetAccepted", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int("numContainerIDs", len(containerIDs)), + )) + defer span.End() + + return e.engine.GetAccepted(ctx, nodeID, requestID, containerIDs) +} + +func (e *tracedEngine) Accepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.Accepted", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int("numContainerIDs", len(containerIDs)), + )) + defer span.End() + + return e.engine.Accepted(ctx, nodeID, requestID, containerIDs) +} + +func (e *tracedEngine) GetAcceptedFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.GetAcceptedFailed", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + )) + defer span.End() + + return e.engine.GetAcceptedFailed(ctx, nodeID, requestID) +} + +func (e *tracedEngine) GetAncestors(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.GetAncestors", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Stringer("containerID", containerID), + )) + defer span.End() + + return e.engine.GetAncestors(ctx, nodeID, requestID, containerID) +} + +func (e *tracedEngine) Ancestors(ctx context.Context, nodeID ids.NodeID, requestID uint32, containers [][]byte) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.Ancestors", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int("numContainers", len(containers)), + )) + defer span.End() + + return e.engine.Ancestors(ctx, nodeID, requestID, containers) +} + +func (e *tracedEngine) GetAncestorsFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.GetAncestorsFailed", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + )) + defer span.End() + + return e.engine.GetAncestorsFailed(ctx, nodeID, requestID) +} + +func (e *tracedEngine) Get(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.Get", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Stringer("containerID", containerID), + )) + defer span.End() + + return e.engine.Get(ctx, nodeID, requestID, containerID) +} + +func (e *tracedEngine) Put(ctx context.Context, nodeID ids.NodeID, requestID uint32, container []byte) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.Put", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int("containerLen", len(container)), + )) + defer span.End() + + return e.engine.Put(ctx, nodeID, requestID, container) +} + +func (e *tracedEngine) GetFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.GetFailed", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + )) + defer span.End() + + return e.engine.GetFailed(ctx, nodeID, requestID) +} + +func (e *tracedEngine) PullQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.PullQuery", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Stringer("containerID", containerID), + )) + defer span.End() + + return e.engine.PullQuery(ctx, nodeID, requestID, containerID) +} + +func (e *tracedEngine) PushQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, container []byte) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.PushQuery", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int("containerLen", len(container)), + )) + defer span.End() + + return e.engine.PushQuery(ctx, nodeID, requestID, container) +} + +func (e *tracedEngine) Chits(ctx context.Context, nodeID ids.NodeID, requestID uint32, preferredIDs []ids.ID, acceptedIDs []ids.ID) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.Chits", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int("numPreferredIDs", len(preferredIDs)), + attribute.Int("numAcceptedIDs", len(acceptedIDs)), + )) + defer span.End() + + return e.engine.Chits(ctx, nodeID, requestID, preferredIDs, acceptedIDs) +} + +func (e *tracedEngine) QueryFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.QueryFailed", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + )) + defer span.End() + + return e.engine.QueryFailed(ctx, nodeID, requestID) +} + +func (e *tracedEngine) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.AppRequest", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int("requestLen", len(request)), + )) + defer span.End() + + return e.engine.AppRequest(ctx, nodeID, requestID, deadline, request) +} + +func (e *tracedEngine) AppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.AppResponse", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int("responseLen", len(response)), + )) + defer span.End() + + return e.engine.AppResponse(ctx, nodeID, requestID, response) +} + +func (e *tracedEngine) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.AppRequestFailed", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int64("requestID", int64(requestID)), + )) + defer span.End() + + return e.engine.AppRequestFailed(ctx, nodeID, requestID) +} + +func (e *tracedEngine) AppGossip(ctx context.Context, nodeID ids.NodeID, msg []byte) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.AppGossip", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Int("gossipLen", len(msg)), + )) + defer span.End() + + return e.engine.AppGossip(ctx, nodeID, msg) +} + +func (e *tracedEngine) CrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, deadline time.Time, request []byte) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.CrossChainAppRequest", oteltrace.WithAttributes( + attribute.Stringer("chainID", chainID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int("requestLen", len(request)), + )) + defer span.End() + + return e.engine.CrossChainAppRequest(ctx, chainID, requestID, deadline, request) +} + +func (e *tracedEngine) CrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, response []byte) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.CrossChainAppResponse", oteltrace.WithAttributes( + attribute.Stringer("chainID", chainID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int("responseLen", len(response)), + )) + defer span.End() + + return e.engine.CrossChainAppResponse(ctx, chainID, requestID, response) +} + +func (e *tracedEngine) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.CrossChainAppRequestFailed", oteltrace.WithAttributes( + attribute.Stringer("chainID", chainID), + attribute.Int64("requestID", int64(requestID)), + )) + defer span.End() + + return e.engine.CrossChainAppRequestFailed(ctx, chainID, requestID) +} + +func (e *tracedEngine) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.Connected", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + attribute.Stringer("version", nodeVersion), + )) + defer span.End() + + return e.engine.Connected(ctx, nodeID, nodeVersion) +} + +func (e *tracedEngine) Disconnected(ctx context.Context, nodeID ids.NodeID) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.Disconnected", oteltrace.WithAttributes( + attribute.Stringer("nodeID", nodeID), + )) + defer span.End() + + return e.engine.Disconnected(ctx, nodeID) +} + +func (e *tracedEngine) Timeout(ctx context.Context) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.Timeout") + defer span.End() + + return e.engine.Timeout(ctx) +} + +func (e *tracedEngine) Gossip(ctx context.Context) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.Gossip") + defer span.End() + + return e.engine.Gossip(ctx) +} + +func (e *tracedEngine) Halt(ctx context.Context) { + ctx, span := e.tracer.Start(ctx, "tracedEngine.Halt") + defer span.End() + + e.engine.Halt(ctx) +} + +func (e *tracedEngine) Shutdown(ctx context.Context) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.Shutdown") + defer span.End() + + return e.engine.Shutdown(ctx) +} + +func (e *tracedEngine) Notify(ctx context.Context, msg Message) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.Notify", oteltrace.WithAttributes( + attribute.Stringer("message", msg), + )) + defer span.End() + + return e.engine.Notify(ctx, msg) +} + +func (e *tracedEngine) Context() *snow.ConsensusContext { + return e.engine.Context() +} + +func (e *tracedEngine) Start(ctx context.Context, startReqID uint32) error { + ctx, span := e.tracer.Start(ctx, "tracedEngine.Start", oteltrace.WithAttributes( + attribute.Int64("requestID", int64(startReqID)), + )) + defer span.End() + + return e.engine.Start(ctx, startReqID) +} + +func (e *tracedEngine) HealthCheck(ctx context.Context) (interface{}, error) { + ctx, span := e.tracer.Start(ctx, "tracedEngine.HealthCheck") + defer span.End() + + return e.engine.HealthCheck(ctx) +} + +func (e *tracedEngine) GetVM() VM { + return e.engine.GetVM() +} diff --git a/avalanchego/snow/engine/common/traced_state_syncer.go b/avalanchego/snow/engine/common/traced_state_syncer.go new file mode 100644 index 00000000..db2569ee --- /dev/null +++ b/avalanchego/snow/engine/common/traced_state_syncer.go @@ -0,0 +1,33 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package common + +import ( + "context" + + "github.com/ava-labs/avalanchego/trace" +) + +var _ StateSyncer = (*tracedStateSyncer)(nil) + +type tracedStateSyncer struct { + Engine + stateSyncer StateSyncer + tracer trace.Tracer +} + +func TraceStateSyncer(stateSyncer StateSyncer, tracer trace.Tracer) StateSyncer { + return &tracedStateSyncer{ + Engine: TraceEngine(stateSyncer, tracer), + stateSyncer: stateSyncer, + tracer: tracer, + } +} + +func (e *tracedStateSyncer) IsEnabled(ctx context.Context) (bool, error) { + ctx, span := e.tracer.Start(ctx, "tracedStateSyncer.IsEnabled") + defer span.End() + + return e.stateSyncer.IsEnabled(ctx) +} diff --git a/avalanchego/snow/engine/common/tracker/accepted.go b/avalanchego/snow/engine/common/tracker/accepted.go new file mode 100644 index 00000000..4b3f0f1a --- /dev/null +++ b/avalanchego/snow/engine/common/tracker/accepted.go @@ -0,0 +1,70 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tracker + +import ( + "sync" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/crypto/bls" +) + +var _ Accepted = (*accepted)(nil) + +type Accepted interface { + validators.SetCallbackListener + + // SetAcceptedFrontier updates the latest frontier for [nodeID] to + // [frontier]. If [nodeID] is not currently a validator, this is a noop. + SetAcceptedFrontier(nodeID ids.NodeID, frontier []ids.ID) + // AcceptedFrontier returns the latest known accepted frontier of [nodeID]. + // If [nodeID]'s last accepted frontier is unknown, an empty slice will be + // returned. + AcceptedFrontier(nodeID ids.NodeID) []ids.ID +} + +type accepted struct { + lock sync.RWMutex + // frontier contains an entry for all current validators + frontier map[ids.NodeID][]ids.ID +} + +func NewAccepted() Accepted { + return &accepted{ + frontier: make(map[ids.NodeID][]ids.ID), + } +} + +func (a *accepted) OnValidatorAdded(nodeID ids.NodeID, _ *bls.PublicKey, _ ids.ID, _ uint64) { + a.lock.Lock() + defer a.lock.Unlock() + + a.frontier[nodeID] = nil +} + +func (a *accepted) OnValidatorRemoved(nodeID ids.NodeID, _ uint64) { + a.lock.Lock() + defer a.lock.Unlock() + + delete(a.frontier, nodeID) +} + +func (*accepted) OnValidatorWeightChanged(_ ids.NodeID, _, _ uint64) {} + +func (a *accepted) SetAcceptedFrontier(nodeID ids.NodeID, frontier []ids.ID) { + a.lock.Lock() + defer a.lock.Unlock() + + if _, ok := a.frontier[nodeID]; ok { + a.frontier[nodeID] = frontier + } +} + +func (a *accepted) AcceptedFrontier(nodeID ids.NodeID) []ids.ID { + a.lock.RLock() + defer a.lock.RUnlock() + + return a.frontier[nodeID] +} diff --git a/avalanchego/snow/engine/common/tracker/accepted_test.go b/avalanchego/snow/engine/common/tracker/accepted_test.go new file mode 100644 index 00000000..dbd5faf8 --- /dev/null +++ b/avalanchego/snow/engine/common/tracker/accepted_test.go @@ -0,0 +1,41 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tracker + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" +) + +func TestAccepted(t *testing.T) { + require := require.New(t) + + nodeID := ids.GenerateTestNodeID() + frontier0 := []ids.ID{ids.GenerateTestID()} + frontier1 := []ids.ID{ids.GenerateTestID()} + + a := NewAccepted() + + require.Empty(a.AcceptedFrontier(nodeID)) + + a.SetAcceptedFrontier(nodeID, frontier0) + require.Empty(a.AcceptedFrontier(nodeID)) + + a.OnValidatorAdded(nodeID, nil, ids.GenerateTestID(), 1) + + require.Empty(a.AcceptedFrontier(nodeID)) + + a.SetAcceptedFrontier(nodeID, frontier0) + require.Equal(frontier0, a.AcceptedFrontier(nodeID)) + + a.SetAcceptedFrontier(nodeID, frontier1) + require.Equal(frontier1, a.AcceptedFrontier(nodeID)) + + a.OnValidatorRemoved(nodeID, 1) + + require.Empty(a.AcceptedFrontier(nodeID)) +} diff --git a/avalanchego/snow/engine/common/tracker/peers.go b/avalanchego/snow/engine/common/tracker/peers.go index 07f636c9..47ba2827 100644 --- a/avalanchego/snow/engine/common/tracker/peers.go +++ b/avalanchego/snow/engine/common/tracker/peers.go @@ -1,17 +1,20 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracker import ( + "context" "sync" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" ) -var _ Peers = &peers{} +var _ Peers = (*peers)(nil) type Peers interface { validators.SetCallbackListener @@ -22,7 +25,7 @@ type Peers interface { // PreferredPeers returns the currently connected validators. If there are // no currently connected validators then it will return the currently // connected peers. - PreferredPeers() ids.NodeIDSet + PreferredPeers() set.Set[ids.NodeID] } type peers struct { @@ -33,9 +36,9 @@ type peers struct { connectedWeight uint64 // connectedValidators is the set of currently connected peers with a // non-zero stake weight - connectedValidators ids.NodeIDSet + connectedValidators set.Set[ids.NodeID] // connectedPeers is the set of all connected peers - connectedPeers ids.NodeIDSet + connectedPeers set.Set[ids.NodeID] } func NewPeers() Peers { @@ -44,7 +47,7 @@ func NewPeers() Peers { } } -func (p *peers) OnValidatorAdded(nodeID ids.NodeID, weight uint64) { +func (p *peers) OnValidatorAdded(nodeID ids.NodeID, _ *bls.PublicKey, _ ids.ID, weight uint64) { p.lock.Lock() defer p.lock.Unlock() @@ -77,7 +80,7 @@ func (p *peers) OnValidatorWeightChanged(nodeID ids.NodeID, oldWeight, newWeight } } -func (p *peers) Connected(nodeID ids.NodeID, _ *version.Application) error { +func (p *peers) Connected(_ context.Context, nodeID ids.NodeID, _ *version.Application) error { p.lock.Lock() defer p.lock.Unlock() @@ -89,7 +92,7 @@ func (p *peers) Connected(nodeID ids.NodeID, _ *version.Application) error { return nil } -func (p *peers) Disconnected(nodeID ids.NodeID) error { +func (p *peers) Disconnected(_ context.Context, nodeID ids.NodeID) error { p.lock.Lock() defer p.lock.Unlock() @@ -108,17 +111,17 @@ func (p *peers) ConnectedWeight() uint64 { return p.connectedWeight } -func (p *peers) PreferredPeers() ids.NodeIDSet { +func (p *peers) PreferredPeers() set.Set[ids.NodeID] { p.lock.RLock() defer p.lock.RUnlock() if p.connectedValidators.Len() == 0 { - connectedPeers := ids.NewNodeIDSet(p.connectedPeers.Len()) + connectedPeers := set.NewSet[ids.NodeID](p.connectedPeers.Len()) connectedPeers.Union(p.connectedPeers) return connectedPeers } - connectedValidators := ids.NewNodeIDSet(p.connectedValidators.Len()) + connectedValidators := set.NewSet[ids.NodeID](p.connectedValidators.Len()) connectedValidators.Union(p.connectedValidators) return connectedValidators } diff --git a/avalanchego/snow/engine/common/tracker/peers_test.go b/avalanchego/snow/engine/common/tracker/peers_test.go index a00005d5..8c8b0821 100644 --- a/avalanchego/snow/engine/common/tracker/peers_test.go +++ b/avalanchego/snow/engine/common/tracker/peers_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracker import ( + "context" "testing" "github.com/stretchr/testify/require" @@ -22,11 +23,11 @@ func TestPeers(t *testing.T) { require.Zero(p.ConnectedWeight()) require.Empty(p.PreferredPeers()) - p.OnValidatorAdded(nodeID, 5) + p.OnValidatorAdded(nodeID, nil, ids.Empty, 5) require.Zero(p.ConnectedWeight()) require.Empty(p.PreferredPeers()) - err := p.Connected(nodeID, version.CurrentApp) + err := p.Connected(context.Background(), nodeID, version.CurrentApp) require.NoError(err) require.EqualValues(5, p.ConnectedWeight()) require.Contains(p.PreferredPeers(), nodeID) @@ -39,11 +40,11 @@ func TestPeers(t *testing.T) { require.Zero(p.ConnectedWeight()) require.Contains(p.PreferredPeers(), nodeID) - p.OnValidatorAdded(nodeID, 5) + p.OnValidatorAdded(nodeID, nil, ids.Empty, 5) require.EqualValues(5, p.ConnectedWeight()) require.Contains(p.PreferredPeers(), nodeID) - err = p.Disconnected(nodeID) + err = p.Disconnected(context.Background(), nodeID) require.NoError(err) require.Zero(p.ConnectedWeight()) require.Empty(p.PreferredPeers()) diff --git a/avalanchego/snow/engine/common/tracker/startup.go b/avalanchego/snow/engine/common/tracker/startup.go index 66891219..282d88ce 100644 --- a/avalanchego/snow/engine/common/tracker/startup.go +++ b/avalanchego/snow/engine/common/tracker/startup.go @@ -1,16 +1,18 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracker import ( + "context" "sync" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/version" ) -var _ Startup = &startup{} +var _ Startup = (*startup)(nil) type Startup interface { Peers @@ -34,11 +36,11 @@ func NewStartup(peers Peers, startupWeight uint64) Startup { } } -func (s *startup) OnValidatorAdded(nodeID ids.NodeID, weight uint64) { +func (s *startup) OnValidatorAdded(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { s.lock.Lock() defer s.lock.Unlock() - s.Peers.OnValidatorAdded(nodeID, weight) + s.Peers.OnValidatorAdded(nodeID, pk, txID, weight) s.shouldStart = s.shouldStart || s.Peers.ConnectedWeight() >= s.startupWeight } @@ -50,11 +52,11 @@ func (s *startup) OnValidatorWeightChanged(nodeID ids.NodeID, oldWeight, newWeig s.shouldStart = s.shouldStart || s.Peers.ConnectedWeight() >= s.startupWeight } -func (s *startup) Connected(nodeID ids.NodeID, nodeVersion *version.Application) error { +func (s *startup) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { s.lock.Lock() defer s.lock.Unlock() - if err := s.Peers.Connected(nodeID, nodeVersion); err != nil { + if err := s.Peers.Connected(ctx, nodeID, nodeVersion); err != nil { return err } diff --git a/avalanchego/snow/engine/common/vm.go b/avalanchego/snow/engine/common/vm.go index 4c45ee0c..5fedd50f 100644 --- a/avalanchego/snow/engine/common/vm.go +++ b/avalanchego/snow/engine/common/vm.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common import ( + "context" + "github.com/ava-labs/avalanchego/api/health" "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/snow" @@ -22,14 +24,16 @@ type VM interface { validators.Connector // Initialize this VM. - // [ctx]: Metadata about this VM. - // [ctx.networkID]: The ID of the network this VM's chain is running on. - // [ctx.chainID]: The unique ID of the chain this VM is running on. - // [ctx.Log]: Used to log messages - // [ctx.NodeID]: The unique staker ID of this node. - // [ctx.Lock]: A Read/Write lock shared by this VM and the consensus - // engine that manages this VM. The write lock is held - // whenever code in the consensus engine calls the VM. + // [chainCtx]: Metadata about this VM. + // [chainCtx.networkID]: The ID of the network this VM's chain is + // running on. + // [chainCtx.chainID]: The unique ID of the chain this VM is running on. + // [chainCtx.Log]: Used to log messages + // [chainCtx.NodeID]: The unique staker ID of this node. + // [chainCtx.Lock]: A Read/Write lock shared by this VM and the + // consensus engine that manages this VM. The write + // lock is held whenever code in the consensus engine + // calls the VM. // [dbManager]: The manager of the database this VM will persist data to. // [genesisBytes]: The byte-encoding of the genesis information of this // VM. The VM uses it to initialize its state. For @@ -40,7 +44,8 @@ type VM interface { // [toEngine]: The channel used to send messages to the consensus engine. // [fxs]: Feature extensions that attach to this VM. Initialize( - ctx *snow.Context, + ctx context.Context, + chainCtx *snow.Context, dbManager manager.Manager, genesisBytes []byte, upgradeBytes []byte, @@ -51,13 +56,13 @@ type VM interface { ) error // SetState communicates to VM its next state it starts - SetState(state snow.State) error + SetState(ctx context.Context, state snow.State) error // Shutdown is called when the node is shutting down. - Shutdown() error + Shutdown(context.Context) error - // Version returns the version of the VM this node is running. - Version() (string, error) + // Version returns the version of the VM. + Version(context.Context) (string, error) // Creates the HTTP handlers for custom VM network calls. // @@ -71,7 +76,12 @@ type VM interface { // // For example, it might make sense to have an extension for creating // genesis bytes this VM can interpret. - CreateStaticHandlers() (map[string]*HTTPHandler, error) + // + // Note: If this method is called, no other method will be called on this VM. + // Each registered VM will have a single instance created to handle static + // APIs. This instance will be handled separately from instances created to + // service an instance of a chain. + CreateStaticHandlers(context.Context) (map[string]*HTTPHandler, error) // Creates the HTTP handlers for custom chain network calls. // @@ -86,5 +96,5 @@ type VM interface { // For example, if this VM implements an account-based payments system, // it have an extension called `accounts`, where clients could get // information about their accounts. - CreateHandlers() (map[string]*HTTPHandler, error) + CreateHandlers(context.Context) (map[string]*HTTPHandler, error) } diff --git a/avalanchego/snow/engine/snowman/ancestor_tree.go b/avalanchego/snow/engine/snowman/ancestor_tree.go index 92f43e41..7a8f514b 100644 --- a/avalanchego/snow/engine/snowman/ancestor_tree.go +++ b/avalanchego/snow/engine/snowman/ancestor_tree.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman import ( "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" ) type AncestorTree interface { @@ -18,13 +19,13 @@ type AncestorTree interface { type ancestorTree struct { childToParent map[ids.ID]ids.ID - parentToChildren map[ids.ID]ids.Set + parentToChildren map[ids.ID]set.Set[ids.ID] } func NewAncestorTree() AncestorTree { return &ancestorTree{ childToParent: make(map[ids.ID]ids.ID), - parentToChildren: make(map[ids.ID]ids.Set), + parentToChildren: make(map[ids.ID]set.Set[ids.ID]), } } diff --git a/avalanchego/snow/engine/snowman/ancestor_tree_test.go b/avalanchego/snow/engine/snowman/ancestor_tree_test.go index 790a5d6d..f2419d16 100644 --- a/avalanchego/snow/engine/snowman/ancestor_tree_test.go +++ b/avalanchego/snow/engine/snowman/ancestor_tree_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman diff --git a/avalanchego/snow/engine/snowman/block/README.md b/avalanchego/snow/engine/snowman/block/README.md index 08a6bdfd..4354e8d1 100644 --- a/avalanchego/snow/engine/snowman/block/README.md +++ b/avalanchego/snow/engine/snowman/block/README.md @@ -55,9 +55,9 @@ Of all the valid state summaries, one is selected and passed to the VM by callin The Avalanche engine declares state syncing complete in the following cases: -1. `Summary.Accept()` returns `(false, nil)` signalling that the VM considered the summary valid but skips the whole syncing process. This may happen if the VM estimates that bootstrapping would be faster than state syncing with the provided summary. +1. `Summary.Accept()` returns `(StateSyncSkipped, nil)` signalling that the VM considered the summary valid but skips the whole syncing process. This may happen if the VM estimates that bootstrapping would be faster than state syncing with the provided summary. 2. The VM sends `StateSyncDone` via the `Notify` channel. Note that any error returned from `Summary.Accept()` is considered fatal and causes the engine to shutdown. -After state sync is complete, Avalanche will continue bootstrapping the remaining blocks until the node has reached the block frontier. +If `Summary.Accept()` returns `(StateSyncStatic, nil)`, Avalanche will wait until state synced is complete before continuing the bootstrapping process. If `Summary.Accept()` returns `(StateSyncDynamic, nil)`, Avalanche will immediately continue the bootstrapping process. If bootstrapping finishes before the state sync has been completed, `Chits` messages will include the `LastAcceptedID` rather than the `PreferredID`. diff --git a/avalanchego/snow/engine/snowman/block/batched_vm.go b/avalanchego/snow/engine/snowman/block/batched_vm.go index 921bc0e8..5cb63cd2 100644 --- a/avalanchego/snow/engine/snowman/block/batched_vm.go +++ b/avalanchego/snow/engine/snowman/block/batched_vm.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block import ( + "context" "errors" "time" @@ -20,16 +21,18 @@ var ErrRemoteVMNotImplemented = errors.New("vm does not implement RemoteVM inter // operations since calls over network can be duly batched type BatchedChainVM interface { GetAncestors( + ctx context.Context, blkID ids.ID, // first requested block maxBlocksNum int, // max number of blocks to be retrieved maxBlocksSize int, // max cumulated byte size of retrieved blocks maxBlocksRetrivalTime time.Duration, // max duration of retrival operation ) ([][]byte, error) - BatchedParseBlock(blks [][]byte) ([]snowman.Block, error) + BatchedParseBlock(ctx context.Context, blks [][]byte) ([]snowman.Block, error) } func GetAncestors( + ctx context.Context, vm Getter, // fetch blocks blkID ids.ID, // first requested block maxBlocksNum int, // max number of blocks to be retrieved @@ -39,6 +42,7 @@ func GetAncestors( // Try and batch GetBlock requests if vm, ok := vm.(BatchedChainVM); ok { blocks, err := vm.GetAncestors( + ctx, blkID, maxBlocksNum, maxBlocksSize, @@ -54,7 +58,7 @@ func GetAncestors( // RemoteVM did not work, try local logic startTime := time.Now() - blk, err := vm.GetBlock(blkID) + blk, err := vm.GetBlock(ctx, blkID) if err == database.ErrNotFound { // special case ErrNotFound as an empty response: this signals // the client to avoid contacting this node for further ancestors @@ -70,28 +74,34 @@ func GetAncestors( ancestorsBytesLen := len(blk.Bytes()) + wrappers.IntLen // length, in bytes, of all elements of ancestors for numFetched := 1; numFetched < maxBlocksNum && time.Since(startTime) < maxBlocksRetrivalTime; numFetched++ { - if blk, err = vm.GetBlock(blk.Parent()); err != nil { + blk, err = vm.GetBlock(ctx, blk.Parent()) + if err != nil { break } blkBytes := blk.Bytes() // Ensure response size isn't too large. Include wrappers.IntLen because // the size of the message is included with each container, and the size // is repr. by an int. - if newLen := ancestorsBytesLen + len(blkBytes) + wrappers.IntLen; newLen <= maxBlocksSize { - ancestorsBytes = append(ancestorsBytes, blkBytes) - ancestorsBytesLen = newLen - } else { // reached maximum response size + newLen := ancestorsBytesLen + len(blkBytes) + wrappers.IntLen + if newLen > maxBlocksSize { + // reached maximum response size break } + ancestorsBytes = append(ancestorsBytes, blkBytes) + ancestorsBytesLen = newLen } return ancestorsBytes, nil } -func BatchedParseBlock(vm Parser, blks [][]byte) ([]snowman.Block, error) { +func BatchedParseBlock( + ctx context.Context, + vm Parser, + blks [][]byte, +) ([]snowman.Block, error) { // Try and batch ParseBlock requests if vm, ok := vm.(BatchedChainVM); ok { - blocks, err := vm.BatchedParseBlock(blks) + blocks, err := vm.BatchedParseBlock(ctx, blks) if err == nil { return blocks, nil } @@ -104,7 +114,7 @@ func BatchedParseBlock(vm Parser, blks [][]byte) ([]snowman.Block, error) { // time. blocks := make([]snowman.Block, len(blks)) for i, blockBytes := range blks { - block, err := vm.ParseBlock(blockBytes) + block, err := vm.ParseBlock(ctx, blockBytes) if err != nil { return nil, err } diff --git a/avalanchego/snow/engine/snowman/block/batched_vm_test.go b/avalanchego/snow/engine/snowman/block/batched_vm_test.go index 5193f226..be536d03 100644 --- a/avalanchego/snow/engine/snowman/block/batched_vm_test.go +++ b/avalanchego/snow/engine/snowman/block/batched_vm_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block import ( + "context" "errors" "testing" "time" @@ -15,14 +16,16 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" ) +var errTest = errors.New("non-nil error") + func TestGetAncestorsDatabaseNotFound(t *testing.T) { vm := &TestVM{} someID := ids.GenerateTestID() - vm.GetBlockF = func(id ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { require.Equal(t, someID, id) return nil, database.ErrNotFound } - containers, err := GetAncestors(vm, someID, 10, 10, 1*time.Second) + containers, err := GetAncestors(context.Background(), vm, someID, 10, 10, 1*time.Second) require.NoError(t, err) require.Len(t, containers, 0) } @@ -32,12 +35,11 @@ func TestGetAncestorsDatabaseNotFound(t *testing.T) { func TestGetAncestorsPropagatesErrors(t *testing.T) { vm := &TestVM{} someID := ids.GenerateTestID() - someError := errors.New("some error that is not ErrNotFound") - vm.GetBlockF = func(id ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { require.Equal(t, someID, id) - return nil, someError + return nil, errTest } - containers, err := GetAncestors(vm, someID, 10, 10, 1*time.Second) + containers, err := GetAncestors(context.Background(), vm, someID, 10, 10, 1*time.Second) require.Nil(t, containers) - require.ErrorIs(t, err, someError) + require.ErrorIs(t, err, errTest) } diff --git a/avalanchego/snow/engine/snowman/block/block_context_vm.go b/avalanchego/snow/engine/snowman/block/block_context_vm.go new file mode 100644 index 00000000..4a259571 --- /dev/null +++ b/avalanchego/snow/engine/snowman/block/block_context_vm.go @@ -0,0 +1,68 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package block + +import ( + "context" + + "github.com/ava-labs/avalanchego/snow/consensus/snowman" +) + +// Context defines the block context that will be optionally provided by the +// proposervm to an underlying vm. +type Context struct { + // PChainHeight is the height that this block will use to verify it's state. + // In the proposervm, blocks verify the proposer based on the P-chain height + // recorded in the parent block. The P-chain height provided here is also + // the parent's P-chain height, not this block's P-chain height. + // + // Because PreForkBlocks and PostForkOptions do not verify their execution + // against the P-chain's state, this context is undefined for those blocks. + PChainHeight uint64 +} + +// BuildBlockWithContextChainVM defines the interface a ChainVM can optionally +// implement to consider the P-Chain height when building blocks. +type BuildBlockWithContextChainVM interface { + // Attempt to build a new block given that the P-Chain height is + // [blockCtx.PChainHeight]. + // + // This method will be called if and only if the proposervm is activated. + // Otherwise [BuildBlock] will be called. + BuildBlockWithContext(ctx context.Context, blockCtx *Context) (snowman.Block, error) +} + +// WithVerifyContext defines the interface a Block can optionally implement to +// consider the P-Chain height when verifying itself. +// +// As with all Blocks, it is guaranteed for verification to be called in +// topological order. +// +// If the status of the block is Accepted or Rejected; VerifyWithContext will +// never be called. +type WithVerifyContext interface { + // Returns true if [VerifyWithContext] should be called. + // Returns false if [Verify] should be called. + // + // This method will be called if and only if the proposervm is activated. + // Otherwise [Verify] will be called. + ShouldVerifyWithContext(context.Context) (bool, error) + + // Verify that the state transition this block would make if accepted is + // valid. If the state transition is invalid, a non-nil error should be + // returned. + // + // It is guaranteed that the Parent has been successfully verified. + // + // This method may be called again with a different context. + // + // If nil is returned, it is guaranteed that either Accept or Reject will be + // called on this block, unless the VM is shut down. + // + // Note: During `Accept` the block context is not provided. This implies + // that the block context provided here can not be used to alter any + // potential state transition that assumes network agreement. The block + // context should only be used to determine the validity of the block. + VerifyWithContext(context.Context, *Context) error +} diff --git a/avalanchego/snow/engine/snowman/block/height_indexed_vm.go b/avalanchego/snow/engine/snowman/block/height_indexed_vm.go index 724dc98a..da449c3f 100644 --- a/avalanchego/snow/engine/snowman/block/height_indexed_vm.go +++ b/avalanchego/snow/engine/snowman/block/height_indexed_vm.go @@ -1,8 +1,9 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block import ( + "context" "errors" "github.com/ava-labs/avalanchego/ids" @@ -21,7 +22,7 @@ type HeightIndexedChainVM interface { // - ErrIndexIncomplete if the height index is not currently available. // - Any other non-standard error that may have occurred when verifying the // index. - VerifyHeightIndex() error + VerifyHeightIndex(context.Context) error // GetBlockIDAtHeight returns: // - The ID of the block that was accepted with [height]. @@ -30,5 +31,5 @@ type HeightIndexedChainVM interface { // Note: A returned value of [database.ErrNotFound] typically means that the // underlying VM was state synced and does not have access to the // blockID at [height]. - GetBlockIDAtHeight(height uint64) (ids.ID, error) + GetBlockIDAtHeight(ctx context.Context, height uint64) (ids.ID, error) } diff --git a/avalanchego/snow/engine/snowman/block/mocks/build_block_with_context_vm.go b/avalanchego/snow/engine/snowman/block/mocks/build_block_with_context_vm.go new file mode 100644 index 00000000..9b01018a --- /dev/null +++ b/avalanchego/snow/engine/snowman/block/mocks/build_block_with_context_vm.go @@ -0,0 +1,55 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/snow/engine/snowman/block (interfaces: BuildBlockWithContextChainVM) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + snowman "github.com/ava-labs/avalanchego/snow/consensus/snowman" + block "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + gomock "github.com/golang/mock/gomock" +) + +// MockBuildBlockWithContextChainVM is a mock of BuildBlockWithContextChainVM interface. +type MockBuildBlockWithContextChainVM struct { + ctrl *gomock.Controller + recorder *MockBuildBlockWithContextChainVMMockRecorder +} + +// MockBuildBlockWithContextChainVMMockRecorder is the mock recorder for MockBuildBlockWithContextChainVM. +type MockBuildBlockWithContextChainVMMockRecorder struct { + mock *MockBuildBlockWithContextChainVM +} + +// NewMockBuildBlockWithContextChainVM creates a new mock instance. +func NewMockBuildBlockWithContextChainVM(ctrl *gomock.Controller) *MockBuildBlockWithContextChainVM { + mock := &MockBuildBlockWithContextChainVM{ctrl: ctrl} + mock.recorder = &MockBuildBlockWithContextChainVMMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBuildBlockWithContextChainVM) EXPECT() *MockBuildBlockWithContextChainVMMockRecorder { + return m.recorder +} + +// BuildBlockWithContext mocks base method. +func (m *MockBuildBlockWithContextChainVM) BuildBlockWithContext(arg0 context.Context, arg1 *block.Context) (snowman.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BuildBlockWithContext", arg0, arg1) + ret0, _ := ret[0].(snowman.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// BuildBlockWithContext indicates an expected call of BuildBlockWithContext. +func (mr *MockBuildBlockWithContextChainVMMockRecorder) BuildBlockWithContext(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildBlockWithContext", reflect.TypeOf((*MockBuildBlockWithContextChainVM)(nil).BuildBlockWithContext), arg0, arg1) +} diff --git a/avalanchego/snow/engine/snowman/block/mocks/chain_vm.go b/avalanchego/snow/engine/snowman/block/mocks/chain_vm.go index 61280d1c..73821562 100644 --- a/avalanchego/snow/engine/snowman/block/mocks/chain_vm.go +++ b/avalanchego/snow/engine/snowman/block/mocks/chain_vm.go @@ -1,10 +1,14 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. -// Source: snow/engine/snowman/block/vm.go +// Source: github.com/ava-labs/avalanchego/snow/engine/snowman/block (interfaces: ChainVM) // Package mocks is a generated GoMock package. package mocks import ( + context "context" reflect "reflect" time "time" @@ -41,337 +45,303 @@ func (m *MockChainVM) EXPECT() *MockChainVMMockRecorder { } // AppGossip mocks base method. -func (m *MockChainVM) AppGossip(nodeID ids.NodeID, msg []byte) error { +func (m *MockChainVM) AppGossip(arg0 context.Context, arg1 ids.NodeID, arg2 []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AppGossip", nodeID, msg) + ret := m.ctrl.Call(m, "AppGossip", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // AppGossip indicates an expected call of AppGossip. -func (mr *MockChainVMMockRecorder) AppGossip(nodeID, msg interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) AppGossip(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppGossip", reflect.TypeOf((*MockChainVM)(nil).AppGossip), nodeID, msg) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppGossip", reflect.TypeOf((*MockChainVM)(nil).AppGossip), arg0, arg1, arg2) } // AppRequest mocks base method. -func (m *MockChainVM) AppRequest(nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { +func (m *MockChainVM) AppRequest(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 time.Time, arg4 []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AppRequest", nodeID, requestID, deadline, request) + ret := m.ctrl.Call(m, "AppRequest", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].(error) return ret0 } // AppRequest indicates an expected call of AppRequest. -func (mr *MockChainVMMockRecorder) AppRequest(nodeID, requestID, deadline, request interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) AppRequest(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppRequest", reflect.TypeOf((*MockChainVM)(nil).AppRequest), nodeID, requestID, deadline, request) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppRequest", reflect.TypeOf((*MockChainVM)(nil).AppRequest), arg0, arg1, arg2, arg3, arg4) } // AppRequestFailed mocks base method. -func (m *MockChainVM) AppRequestFailed(nodeID ids.NodeID, requestID uint32) error { +func (m *MockChainVM) AppRequestFailed(arg0 context.Context, arg1 ids.NodeID, arg2 uint32) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AppRequestFailed", nodeID, requestID) + ret := m.ctrl.Call(m, "AppRequestFailed", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // AppRequestFailed indicates an expected call of AppRequestFailed. -func (mr *MockChainVMMockRecorder) AppRequestFailed(nodeID, requestID interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) AppRequestFailed(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppRequestFailed", reflect.TypeOf((*MockChainVM)(nil).AppRequestFailed), nodeID, requestID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppRequestFailed", reflect.TypeOf((*MockChainVM)(nil).AppRequestFailed), arg0, arg1, arg2) } // AppResponse mocks base method. -func (m *MockChainVM) AppResponse(nodeID ids.NodeID, requestID uint32, response []byte) error { +func (m *MockChainVM) AppResponse(arg0 context.Context, arg1 ids.NodeID, arg2 uint32, arg3 []byte) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AppResponse", nodeID, requestID, response) + ret := m.ctrl.Call(m, "AppResponse", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // AppResponse indicates an expected call of AppResponse. -func (mr *MockChainVMMockRecorder) AppResponse(nodeID, requestID, response interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) AppResponse(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppResponse", reflect.TypeOf((*MockChainVM)(nil).AppResponse), nodeID, requestID, response) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AppResponse", reflect.TypeOf((*MockChainVM)(nil).AppResponse), arg0, arg1, arg2, arg3) } // BuildBlock mocks base method. -func (m *MockChainVM) BuildBlock() (snowman.Block, error) { +func (m *MockChainVM) BuildBlock(arg0 context.Context) (snowman.Block, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "BuildBlock") + ret := m.ctrl.Call(m, "BuildBlock", arg0) ret0, _ := ret[0].(snowman.Block) ret1, _ := ret[1].(error) return ret0, ret1 } // BuildBlock indicates an expected call of BuildBlock. -func (mr *MockChainVMMockRecorder) BuildBlock() *gomock.Call { +func (mr *MockChainVMMockRecorder) BuildBlock(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildBlock", reflect.TypeOf((*MockChainVM)(nil).BuildBlock)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildBlock", reflect.TypeOf((*MockChainVM)(nil).BuildBlock), arg0) } // Connected mocks base method. -func (m *MockChainVM) Connected(id ids.NodeID, nodeVersion *version.Application) error { +func (m *MockChainVM) Connected(arg0 context.Context, arg1 ids.NodeID, arg2 *version.Application) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Connected", id, nodeVersion) + ret := m.ctrl.Call(m, "Connected", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // Connected indicates an expected call of Connected. -func (mr *MockChainVMMockRecorder) Connected(id, nodeVersion interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) Connected(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connected", reflect.TypeOf((*MockChainVM)(nil).Connected), id, nodeVersion) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connected", reflect.TypeOf((*MockChainVM)(nil).Connected), arg0, arg1, arg2) } // CreateHandlers mocks base method. -func (m *MockChainVM) CreateHandlers() (map[string]*common.HTTPHandler, error) { +func (m *MockChainVM) CreateHandlers(arg0 context.Context) (map[string]*common.HTTPHandler, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateHandlers") + ret := m.ctrl.Call(m, "CreateHandlers", arg0) ret0, _ := ret[0].(map[string]*common.HTTPHandler) ret1, _ := ret[1].(error) return ret0, ret1 } // CreateHandlers indicates an expected call of CreateHandlers. -func (mr *MockChainVMMockRecorder) CreateHandlers() *gomock.Call { +func (mr *MockChainVMMockRecorder) CreateHandlers(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateHandlers", reflect.TypeOf((*MockChainVM)(nil).CreateHandlers)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateHandlers", reflect.TypeOf((*MockChainVM)(nil).CreateHandlers), arg0) } // CreateStaticHandlers mocks base method. -func (m *MockChainVM) CreateStaticHandlers() (map[string]*common.HTTPHandler, error) { +func (m *MockChainVM) CreateStaticHandlers(arg0 context.Context) (map[string]*common.HTTPHandler, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateStaticHandlers") + ret := m.ctrl.Call(m, "CreateStaticHandlers", arg0) ret0, _ := ret[0].(map[string]*common.HTTPHandler) ret1, _ := ret[1].(error) return ret0, ret1 } // CreateStaticHandlers indicates an expected call of CreateStaticHandlers. -func (mr *MockChainVMMockRecorder) CreateStaticHandlers() *gomock.Call { +func (mr *MockChainVMMockRecorder) CreateStaticHandlers(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateStaticHandlers", reflect.TypeOf((*MockChainVM)(nil).CreateStaticHandlers), arg0) +} + +// CrossChainAppRequest mocks base method. +func (m *MockChainVM) CrossChainAppRequest(arg0 context.Context, arg1 ids.ID, arg2 uint32, arg3 time.Time, arg4 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CrossChainAppRequest", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(error) + return ret0 +} + +// CrossChainAppRequest indicates an expected call of CrossChainAppRequest. +func (mr *MockChainVMMockRecorder) CrossChainAppRequest(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CrossChainAppRequest", reflect.TypeOf((*MockChainVM)(nil).CrossChainAppRequest), arg0, arg1, arg2, arg3, arg4) +} + +// CrossChainAppRequestFailed mocks base method. +func (m *MockChainVM) CrossChainAppRequestFailed(arg0 context.Context, arg1 ids.ID, arg2 uint32) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CrossChainAppRequestFailed", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// CrossChainAppRequestFailed indicates an expected call of CrossChainAppRequestFailed. +func (mr *MockChainVMMockRecorder) CrossChainAppRequestFailed(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CrossChainAppRequestFailed", reflect.TypeOf((*MockChainVM)(nil).CrossChainAppRequestFailed), arg0, arg1, arg2) +} + +// CrossChainAppResponse mocks base method. +func (m *MockChainVM) CrossChainAppResponse(arg0 context.Context, arg1 ids.ID, arg2 uint32, arg3 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CrossChainAppResponse", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// CrossChainAppResponse indicates an expected call of CrossChainAppResponse. +func (mr *MockChainVMMockRecorder) CrossChainAppResponse(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateStaticHandlers", reflect.TypeOf((*MockChainVM)(nil).CreateStaticHandlers)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CrossChainAppResponse", reflect.TypeOf((*MockChainVM)(nil).CrossChainAppResponse), arg0, arg1, arg2, arg3) } // Disconnected mocks base method. -func (m *MockChainVM) Disconnected(id ids.NodeID) error { +func (m *MockChainVM) Disconnected(arg0 context.Context, arg1 ids.NodeID) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Disconnected", id) + ret := m.ctrl.Call(m, "Disconnected", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // Disconnected indicates an expected call of Disconnected. -func (mr *MockChainVMMockRecorder) Disconnected(id interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) Disconnected(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Disconnected", reflect.TypeOf((*MockChainVM)(nil).Disconnected), id) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Disconnected", reflect.TypeOf((*MockChainVM)(nil).Disconnected), arg0, arg1) } // GetBlock mocks base method. -func (m *MockChainVM) GetBlock(arg0 ids.ID) (snowman.Block, error) { +func (m *MockChainVM) GetBlock(arg0 context.Context, arg1 ids.ID) (snowman.Block, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBlock", arg0) + ret := m.ctrl.Call(m, "GetBlock", arg0, arg1) ret0, _ := ret[0].(snowman.Block) ret1, _ := ret[1].(error) return ret0, ret1 } // GetBlock indicates an expected call of GetBlock. -func (mr *MockChainVMMockRecorder) GetBlock(arg0 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) GetBlock(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockChainVM)(nil).GetBlock), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockChainVM)(nil).GetBlock), arg0, arg1) } // HealthCheck mocks base method. -func (m *MockChainVM) HealthCheck() (interface{}, error) { +func (m *MockChainVM) HealthCheck(arg0 context.Context) (interface{}, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HealthCheck") + ret := m.ctrl.Call(m, "HealthCheck", arg0) ret0, _ := ret[0].(interface{}) ret1, _ := ret[1].(error) return ret0, ret1 } // HealthCheck indicates an expected call of HealthCheck. -func (mr *MockChainVMMockRecorder) HealthCheck() *gomock.Call { +func (mr *MockChainVMMockRecorder) HealthCheck(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HealthCheck", reflect.TypeOf((*MockChainVM)(nil).HealthCheck)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HealthCheck", reflect.TypeOf((*MockChainVM)(nil).HealthCheck), arg0) } // Initialize mocks base method. -func (m *MockChainVM) Initialize(ctx *snow.Context, dbManager manager.Manager, genesisBytes, upgradeBytes, configBytes []byte, toEngine chan<- common.Message, fxs []*common.Fx, appSender common.AppSender) error { +func (m *MockChainVM) Initialize(arg0 context.Context, arg1 *snow.Context, arg2 manager.Manager, arg3, arg4, arg5 []byte, arg6 chan<- common.Message, arg7 []*common.Fx, arg8 common.AppSender) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Initialize", ctx, dbManager, genesisBytes, upgradeBytes, configBytes, toEngine, fxs, appSender) + ret := m.ctrl.Call(m, "Initialize", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) ret0, _ := ret[0].(error) return ret0 } // Initialize indicates an expected call of Initialize. -func (mr *MockChainVMMockRecorder) Initialize(ctx, dbManager, genesisBytes, upgradeBytes, configBytes, toEngine, fxs, appSender interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) Initialize(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockChainVM)(nil).Initialize), ctx, dbManager, genesisBytes, upgradeBytes, configBytes, toEngine, fxs, appSender) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockChainVM)(nil).Initialize), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) } // LastAccepted mocks base method. -func (m *MockChainVM) LastAccepted() (ids.ID, error) { +func (m *MockChainVM) LastAccepted(arg0 context.Context) (ids.ID, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "LastAccepted") + ret := m.ctrl.Call(m, "LastAccepted", arg0) ret0, _ := ret[0].(ids.ID) ret1, _ := ret[1].(error) return ret0, ret1 } // LastAccepted indicates an expected call of LastAccepted. -func (mr *MockChainVMMockRecorder) LastAccepted() *gomock.Call { +func (mr *MockChainVMMockRecorder) LastAccepted(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastAccepted", reflect.TypeOf((*MockChainVM)(nil).LastAccepted)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastAccepted", reflect.TypeOf((*MockChainVM)(nil).LastAccepted), arg0) } // ParseBlock mocks base method. -func (m *MockChainVM) ParseBlock(arg0 []byte) (snowman.Block, error) { +func (m *MockChainVM) ParseBlock(arg0 context.Context, arg1 []byte) (snowman.Block, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ParseBlock", arg0) + ret := m.ctrl.Call(m, "ParseBlock", arg0, arg1) ret0, _ := ret[0].(snowman.Block) ret1, _ := ret[1].(error) return ret0, ret1 } // ParseBlock indicates an expected call of ParseBlock. -func (mr *MockChainVMMockRecorder) ParseBlock(arg0 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) ParseBlock(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParseBlock", reflect.TypeOf((*MockChainVM)(nil).ParseBlock), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParseBlock", reflect.TypeOf((*MockChainVM)(nil).ParseBlock), arg0, arg1) } // SetPreference mocks base method. -func (m *MockChainVM) SetPreference(arg0 ids.ID) error { +func (m *MockChainVM) SetPreference(arg0 context.Context, arg1 ids.ID) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetPreference", arg0) + ret := m.ctrl.Call(m, "SetPreference", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // SetPreference indicates an expected call of SetPreference. -func (mr *MockChainVMMockRecorder) SetPreference(arg0 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) SetPreference(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPreference", reflect.TypeOf((*MockChainVM)(nil).SetPreference), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPreference", reflect.TypeOf((*MockChainVM)(nil).SetPreference), arg0, arg1) } // SetState mocks base method. -func (m *MockChainVM) SetState(state snow.State) error { +func (m *MockChainVM) SetState(arg0 context.Context, arg1 snow.State) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetState", state) + ret := m.ctrl.Call(m, "SetState", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // SetState indicates an expected call of SetState. -func (mr *MockChainVMMockRecorder) SetState(state interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) SetState(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetState", reflect.TypeOf((*MockChainVM)(nil).SetState), state) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetState", reflect.TypeOf((*MockChainVM)(nil).SetState), arg0, arg1) } // Shutdown mocks base method. -func (m *MockChainVM) Shutdown() error { +func (m *MockChainVM) Shutdown(arg0 context.Context) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Shutdown") + ret := m.ctrl.Call(m, "Shutdown", arg0) ret0, _ := ret[0].(error) return ret0 } // Shutdown indicates an expected call of Shutdown. -func (mr *MockChainVMMockRecorder) Shutdown() *gomock.Call { +func (mr *MockChainVMMockRecorder) Shutdown(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockChainVM)(nil).Shutdown)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockChainVM)(nil).Shutdown), arg0) } // Version mocks base method. -func (m *MockChainVM) Version() (string, error) { +func (m *MockChainVM) Version(arg0 context.Context) (string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Version") + ret := m.ctrl.Call(m, "Version", arg0) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } // Version indicates an expected call of Version. -func (mr *MockChainVMMockRecorder) Version() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockChainVM)(nil).Version)) -} - -// MockGetter is a mock of Getter interface. -type MockGetter struct { - ctrl *gomock.Controller - recorder *MockGetterMockRecorder -} - -// MockGetterMockRecorder is the mock recorder for MockGetter. -type MockGetterMockRecorder struct { - mock *MockGetter -} - -// NewMockGetter creates a new mock instance. -func NewMockGetter(ctrl *gomock.Controller) *MockGetter { - mock := &MockGetter{ctrl: ctrl} - mock.recorder = &MockGetterMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockGetter) EXPECT() *MockGetterMockRecorder { - return m.recorder -} - -// GetBlock mocks base method. -func (m *MockGetter) GetBlock(arg0 ids.ID) (snowman.Block, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetBlock", arg0) - ret0, _ := ret[0].(snowman.Block) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetBlock indicates an expected call of GetBlock. -func (mr *MockGetterMockRecorder) GetBlock(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockGetter)(nil).GetBlock), arg0) -} - -// MockParser is a mock of Parser interface. -type MockParser struct { - ctrl *gomock.Controller - recorder *MockParserMockRecorder -} - -// MockParserMockRecorder is the mock recorder for MockParser. -type MockParserMockRecorder struct { - mock *MockParser -} - -// NewMockParser creates a new mock instance. -func NewMockParser(ctrl *gomock.Controller) *MockParser { - mock := &MockParser{ctrl: ctrl} - mock.recorder = &MockParserMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockParser) EXPECT() *MockParserMockRecorder { - return m.recorder -} - -// ParseBlock mocks base method. -func (m *MockParser) ParseBlock(arg0 []byte) (snowman.Block, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ParseBlock", arg0) - ret0, _ := ret[0].(snowman.Block) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ParseBlock indicates an expected call of ParseBlock. -func (mr *MockParserMockRecorder) ParseBlock(arg0 interface{}) *gomock.Call { +func (mr *MockChainVMMockRecorder) Version(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParseBlock", reflect.TypeOf((*MockParser)(nil).ParseBlock), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockChainVM)(nil).Version), arg0) } diff --git a/avalanchego/snow/engine/snowman/block/mocks/state_syncable_vm.go b/avalanchego/snow/engine/snowman/block/mocks/state_syncable_vm.go index 3eefb490..58af74a5 100644 --- a/avalanchego/snow/engine/snowman/block/mocks/state_syncable_vm.go +++ b/avalanchego/snow/engine/snowman/block/mocks/state_syncable_vm.go @@ -1,10 +1,14 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. -// Source: snow/engine/snowman/block/state_syncable_vm.go +// Source: github.com/ava-labs/avalanchego/snow/engine/snowman/block (interfaces: StateSyncableVM) // Package mocks is a generated GoMock package. package mocks import ( + context "context" reflect "reflect" block "github.com/ava-labs/avalanchego/snow/engine/snowman/block" @@ -35,76 +39,76 @@ func (m *MockStateSyncableVM) EXPECT() *MockStateSyncableVMMockRecorder { } // GetLastStateSummary mocks base method. -func (m *MockStateSyncableVM) GetLastStateSummary() (block.StateSummary, error) { +func (m *MockStateSyncableVM) GetLastStateSummary(arg0 context.Context) (block.StateSummary, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetLastStateSummary") + ret := m.ctrl.Call(m, "GetLastStateSummary", arg0) ret0, _ := ret[0].(block.StateSummary) ret1, _ := ret[1].(error) return ret0, ret1 } // GetLastStateSummary indicates an expected call of GetLastStateSummary. -func (mr *MockStateSyncableVMMockRecorder) GetLastStateSummary() *gomock.Call { +func (mr *MockStateSyncableVMMockRecorder) GetLastStateSummary(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLastStateSummary", reflect.TypeOf((*MockStateSyncableVM)(nil).GetLastStateSummary)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLastStateSummary", reflect.TypeOf((*MockStateSyncableVM)(nil).GetLastStateSummary), arg0) } // GetOngoingSyncStateSummary mocks base method. -func (m *MockStateSyncableVM) GetOngoingSyncStateSummary() (block.StateSummary, error) { +func (m *MockStateSyncableVM) GetOngoingSyncStateSummary(arg0 context.Context) (block.StateSummary, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetOngoingSyncStateSummary") + ret := m.ctrl.Call(m, "GetOngoingSyncStateSummary", arg0) ret0, _ := ret[0].(block.StateSummary) ret1, _ := ret[1].(error) return ret0, ret1 } // GetOngoingSyncStateSummary indicates an expected call of GetOngoingSyncStateSummary. -func (mr *MockStateSyncableVMMockRecorder) GetOngoingSyncStateSummary() *gomock.Call { +func (mr *MockStateSyncableVMMockRecorder) GetOngoingSyncStateSummary(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOngoingSyncStateSummary", reflect.TypeOf((*MockStateSyncableVM)(nil).GetOngoingSyncStateSummary)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOngoingSyncStateSummary", reflect.TypeOf((*MockStateSyncableVM)(nil).GetOngoingSyncStateSummary), arg0) } // GetStateSummary mocks base method. -func (m *MockStateSyncableVM) GetStateSummary(summaryHeight uint64) (block.StateSummary, error) { +func (m *MockStateSyncableVM) GetStateSummary(arg0 context.Context, arg1 uint64) (block.StateSummary, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetStateSummary", summaryHeight) + ret := m.ctrl.Call(m, "GetStateSummary", arg0, arg1) ret0, _ := ret[0].(block.StateSummary) ret1, _ := ret[1].(error) return ret0, ret1 } // GetStateSummary indicates an expected call of GetStateSummary. -func (mr *MockStateSyncableVMMockRecorder) GetStateSummary(summaryHeight interface{}) *gomock.Call { +func (mr *MockStateSyncableVMMockRecorder) GetStateSummary(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStateSummary", reflect.TypeOf((*MockStateSyncableVM)(nil).GetStateSummary), summaryHeight) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStateSummary", reflect.TypeOf((*MockStateSyncableVM)(nil).GetStateSummary), arg0, arg1) } // ParseStateSummary mocks base method. -func (m *MockStateSyncableVM) ParseStateSummary(summaryBytes []byte) (block.StateSummary, error) { +func (m *MockStateSyncableVM) ParseStateSummary(arg0 context.Context, arg1 []byte) (block.StateSummary, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ParseStateSummary", summaryBytes) + ret := m.ctrl.Call(m, "ParseStateSummary", arg0, arg1) ret0, _ := ret[0].(block.StateSummary) ret1, _ := ret[1].(error) return ret0, ret1 } // ParseStateSummary indicates an expected call of ParseStateSummary. -func (mr *MockStateSyncableVMMockRecorder) ParseStateSummary(summaryBytes interface{}) *gomock.Call { +func (mr *MockStateSyncableVMMockRecorder) ParseStateSummary(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParseStateSummary", reflect.TypeOf((*MockStateSyncableVM)(nil).ParseStateSummary), summaryBytes) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ParseStateSummary", reflect.TypeOf((*MockStateSyncableVM)(nil).ParseStateSummary), arg0, arg1) } // StateSyncEnabled mocks base method. -func (m *MockStateSyncableVM) StateSyncEnabled() (bool, error) { +func (m *MockStateSyncableVM) StateSyncEnabled(arg0 context.Context) (bool, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StateSyncEnabled") + ret := m.ctrl.Call(m, "StateSyncEnabled", arg0) ret0, _ := ret[0].(bool) ret1, _ := ret[1].(error) return ret0, ret1 } // StateSyncEnabled indicates an expected call of StateSyncEnabled. -func (mr *MockStateSyncableVMMockRecorder) StateSyncEnabled() *gomock.Call { +func (mr *MockStateSyncableVMMockRecorder) StateSyncEnabled(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSyncEnabled", reflect.TypeOf((*MockStateSyncableVM)(nil).StateSyncEnabled)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateSyncEnabled", reflect.TypeOf((*MockStateSyncableVM)(nil).StateSyncEnabled), arg0) } diff --git a/avalanchego/snow/engine/snowman/block/mocks/with_verify_context.go b/avalanchego/snow/engine/snowman/block/mocks/with_verify_context.go new file mode 100644 index 00000000..473ca027 --- /dev/null +++ b/avalanchego/snow/engine/snowman/block/mocks/with_verify_context.go @@ -0,0 +1,68 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/snow/engine/snowman/block (interfaces: WithVerifyContext) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + block "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + gomock "github.com/golang/mock/gomock" +) + +// MockWithVerifyContext is a mock of WithVerifyContext interface. +type MockWithVerifyContext struct { + ctrl *gomock.Controller + recorder *MockWithVerifyContextMockRecorder +} + +// MockWithVerifyContextMockRecorder is the mock recorder for MockWithVerifyContext. +type MockWithVerifyContextMockRecorder struct { + mock *MockWithVerifyContext +} + +// NewMockWithVerifyContext creates a new mock instance. +func NewMockWithVerifyContext(ctrl *gomock.Controller) *MockWithVerifyContext { + mock := &MockWithVerifyContext{ctrl: ctrl} + mock.recorder = &MockWithVerifyContextMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockWithVerifyContext) EXPECT() *MockWithVerifyContextMockRecorder { + return m.recorder +} + +// ShouldVerifyWithContext mocks base method. +func (m *MockWithVerifyContext) ShouldVerifyWithContext(arg0 context.Context) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ShouldVerifyWithContext", arg0) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ShouldVerifyWithContext indicates an expected call of ShouldVerifyWithContext. +func (mr *MockWithVerifyContextMockRecorder) ShouldVerifyWithContext(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ShouldVerifyWithContext", reflect.TypeOf((*MockWithVerifyContext)(nil).ShouldVerifyWithContext), arg0) +} + +// VerifyWithContext mocks base method. +func (m *MockWithVerifyContext) VerifyWithContext(arg0 context.Context, arg1 *block.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VerifyWithContext", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// VerifyWithContext indicates an expected call of VerifyWithContext. +func (mr *MockWithVerifyContextMockRecorder) VerifyWithContext(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyWithContext", reflect.TypeOf((*MockWithVerifyContext)(nil).VerifyWithContext), arg0, arg1) +} diff --git a/avalanchego/snow/engine/snowman/block/state_summary.go b/avalanchego/snow/engine/snowman/block/state_summary.go index 60fd54b9..337a27d9 100644 --- a/avalanchego/snow/engine/snowman/block/state_summary.go +++ b/avalanchego/snow/engine/snowman/block/state_summary.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block import ( + "context" + "github.com/ava-labs/avalanchego/ids" ) @@ -19,9 +21,8 @@ type StateSummary interface { // Bytes returns a byte slice than can be used to reconstruct this summary. Bytes() []byte - // Accept triggers the VM to start state syncing this summary. + // Accept triggers the VM to start state syncing to this summary. // - // The returned boolean will be [true] if the VM has started state sync or - // [false] if the VM has skipped state sync. - Accept() (bool, error) + // It returns the state sync mode selected by the VM. + Accept(context.Context) (StateSyncMode, error) } diff --git a/avalanchego/snow/engine/snowman/block/state_sync_mode.go b/avalanchego/snow/engine/snowman/block/state_sync_mode.go new file mode 100644 index 00000000..79f5c2e8 --- /dev/null +++ b/avalanchego/snow/engine/snowman/block/state_sync_mode.go @@ -0,0 +1,44 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package block + +// StateSyncMode is returned by the StateSyncableVM when a state summary is +// passed to it. It indicates which type of state sync the VM is performing. +type StateSyncMode uint8 + +const ( + // StateSyncSkipped indicates that state sync won't be run by the VM. This + // may happen if the VM decides that the state sync is too recent and it + // would be faster to bootstrap the missing blocks. + StateSyncSkipped StateSyncMode = iota + 1 + + // StateSyncStatic indicates that engine should stop and wait for the VM to + // complete state syncing before moving ahead with bootstrapping. + StateSyncStatic + + // StateSyncDynamic indicates that engine should immediately transition + // into bootstrapping and then normal consensus. State sync will proceed + // asynchronously in the VM. + // + // Invariant: If this is returned it is assumed that the VM should be able + // to handle requests from the engine as if the VM is fully synced. + // Specifically, it is required that the invariants specified by + // LastAccepted, GetBlock, ParseBlock, and Block.Verify are maintained. This + // means that when StateSummary.Accept returns, the block that would become + // the last accepted block must be immediately fetchable by the engine. + StateSyncDynamic +) + +func (s StateSyncMode) String() string { + switch s { + case StateSyncSkipped: + return "Skipped" + case StateSyncStatic: + return "Static" + case StateSyncDynamic: + return "Dynamic" + default: + return "Unknown" + } +} diff --git a/avalanchego/snow/engine/snowman/block/state_syncable_vm.go b/avalanchego/snow/engine/snowman/block/state_syncable_vm.go index 8e4e8b95..5c25f37a 100644 --- a/avalanchego/snow/engine/snowman/block/state_syncable_vm.go +++ b/avalanchego/snow/engine/snowman/block/state_syncable_vm.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block import ( + "context" "errors" ) @@ -15,7 +16,7 @@ type StateSyncableVM interface { // StateSyncEnabled indicates whether the state sync is enabled for this VM. // If StateSyncableVM is not implemented, as it may happen with a wrapper // VM, StateSyncEnabled should return false, nil - StateSyncEnabled() (bool, error) + StateSyncEnabled(context.Context) (bool, error) // GetOngoingSyncStateSummary returns an in-progress state summary if it // exists. @@ -25,20 +26,20 @@ type StateSyncableVM interface { // sync or start over. // // Returns database.ErrNotFound if there is no in-progress sync. - GetOngoingSyncStateSummary() (StateSummary, error) + GetOngoingSyncStateSummary(context.Context) (StateSummary, error) // GetLastStateSummary returns the latest state summary. // // Returns database.ErrNotFound if no summary is available. - GetLastStateSummary() (StateSummary, error) + GetLastStateSummary(context.Context) (StateSummary, error) // ParseStateSummary parses a state summary out of [summaryBytes]. - ParseStateSummary(summaryBytes []byte) (StateSummary, error) + ParseStateSummary(ctx context.Context, summaryBytes []byte) (StateSummary, error) // GetStateSummary retrieves the state summary that was generated at height // [summaryHeight]. // // Returns database.ErrNotFound if no summary is available at // [summaryHeight]. - GetStateSummary(summaryHeight uint64) (StateSummary, error) + GetStateSummary(ctx context.Context, summaryHeight uint64) (StateSummary, error) } diff --git a/avalanchego/snow/engine/snowman/block/test_batched_vm.go b/avalanchego/snow/engine/snowman/block/test_batched_vm.go index dca36b37..f5a94bb9 100644 --- a/avalanchego/snow/engine/snowman/block/test_batched_vm.go +++ b/avalanchego/snow/engine/snowman/block/test_batched_vm.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block import ( + "context" "errors" "testing" "time" @@ -16,7 +17,7 @@ var ( errGetAncestor = errors.New("unexpectedly called GetAncestor") errBatchedParseBlock = errors.New("unexpectedly called BatchedParseBlock") - _ BatchedChainVM = &TestBatchedVM{} + _ BatchedChainVM = (*TestBatchedVM)(nil) ) // TestBatchedVM is a BatchedVM that is useful for testing. @@ -27,13 +28,17 @@ type TestBatchedVM struct { CantBatchParseBlock bool GetAncestorsF func( + ctx context.Context, blkID ids.ID, maxBlocksNum int, maxBlocksSize int, maxBlocksRetrivalTime time.Duration, ) ([][]byte, error) - BatchedParseBlockF func(blks [][]byte) ([]snowman.Block, error) + BatchedParseBlockF func( + ctx context.Context, + blks [][]byte, + ) ([]snowman.Block, error) } func (vm *TestBatchedVM) Default(cant bool) { @@ -42,13 +47,20 @@ func (vm *TestBatchedVM) Default(cant bool) { } func (vm *TestBatchedVM) GetAncestors( + ctx context.Context, blkID ids.ID, maxBlocksNum int, maxBlocksSize int, maxBlocksRetrivalTime time.Duration, ) ([][]byte, error) { if vm.GetAncestorsF != nil { - return vm.GetAncestorsF(blkID, maxBlocksNum, maxBlocksSize, maxBlocksRetrivalTime) + return vm.GetAncestorsF( + ctx, + blkID, + maxBlocksNum, + maxBlocksSize, + maxBlocksRetrivalTime, + ) } if vm.CantGetAncestors && vm.T != nil { vm.T.Fatal(errGetAncestor) @@ -56,9 +68,12 @@ func (vm *TestBatchedVM) GetAncestors( return nil, errGetAncestor } -func (vm *TestBatchedVM) BatchedParseBlock(blks [][]byte) ([]snowman.Block, error) { +func (vm *TestBatchedVM) BatchedParseBlock( + ctx context.Context, + blks [][]byte, +) ([]snowman.Block, error) { if vm.BatchedParseBlockF != nil { - return vm.BatchedParseBlockF(blks) + return vm.BatchedParseBlockF(ctx, blks) } if vm.CantBatchParseBlock && vm.T != nil { vm.T.Fatal(errBatchedParseBlock) diff --git a/avalanchego/snow/engine/snowman/block/test_height_indexed_vm.go b/avalanchego/snow/engine/snowman/block/test_height_indexed_vm.go index f2d8996d..c1587a72 100644 --- a/avalanchego/snow/engine/snowman/block/test_height_indexed_vm.go +++ b/avalanchego/snow/engine/snowman/block/test_height_indexed_vm.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block import ( + "context" "errors" "testing" @@ -14,7 +15,7 @@ var ( errVerifyHeightIndex = errors.New("unexpectedly called VerifyHeightIndex") errGetBlockIDAtHeight = errors.New("unexpectedly called GetBlockIDAtHeight") - _ HeightIndexedChainVM = &TestHeightIndexedVM{} + _ HeightIndexedChainVM = (*TestHeightIndexedVM)(nil) ) // TestBatchedVM is a BatchedVM that is useful for testing. @@ -24,13 +25,13 @@ type TestHeightIndexedVM struct { CantVerifyHeightIndex bool CantGetBlockIDAtHeight bool - VerifyHeightIndexF func() error - GetBlockIDAtHeightF func(height uint64) (ids.ID, error) + VerifyHeightIndexF func(context.Context) error + GetBlockIDAtHeightF func(ctx context.Context, height uint64) (ids.ID, error) } -func (vm *TestHeightIndexedVM) VerifyHeightIndex() error { +func (vm *TestHeightIndexedVM) VerifyHeightIndex(ctx context.Context) error { if vm.VerifyHeightIndexF != nil { - return vm.VerifyHeightIndexF() + return vm.VerifyHeightIndexF(ctx) } if vm.CantVerifyHeightIndex && vm.T != nil { vm.T.Fatal(errVerifyHeightIndex) @@ -38,9 +39,9 @@ func (vm *TestHeightIndexedVM) VerifyHeightIndex() error { return errVerifyHeightIndex } -func (vm *TestHeightIndexedVM) GetBlockIDAtHeight(height uint64) (ids.ID, error) { +func (vm *TestHeightIndexedVM) GetBlockIDAtHeight(ctx context.Context, height uint64) (ids.ID, error) { if vm.GetBlockIDAtHeightF != nil { - return vm.GetBlockIDAtHeightF(height) + return vm.GetBlockIDAtHeightF(ctx, height) } if vm.CantGetBlockIDAtHeight && vm.T != nil { vm.T.Fatal(errGetAncestor) diff --git a/avalanchego/snow/engine/snowman/block/test_state_summary.go b/avalanchego/snow/engine/snowman/block/test_state_summary.go index 139fb390..26cd9fcc 100644 --- a/avalanchego/snow/engine/snowman/block/test_state_summary.go +++ b/avalanchego/snow/engine/snowman/block/test_state_summary.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block import ( + "context" "errors" "testing" @@ -11,7 +12,7 @@ import ( ) var ( - _ StateSummary = &TestStateSummary{} + _ StateSummary = (*TestStateSummary)(nil) errAccept = errors.New("unexpectedly called Accept") ) @@ -23,19 +24,27 @@ type TestStateSummary struct { T *testing.T CantAccept bool - AcceptF func() (bool, error) + AcceptF func(context.Context) (StateSyncMode, error) } -func (s *TestStateSummary) ID() ids.ID { return s.IDV } -func (s *TestStateSummary) Height() uint64 { return s.HeightV } -func (s *TestStateSummary) Bytes() []byte { return s.BytesV } +func (s *TestStateSummary) ID() ids.ID { + return s.IDV +} + +func (s *TestStateSummary) Height() uint64 { + return s.HeightV +} + +func (s *TestStateSummary) Bytes() []byte { + return s.BytesV +} -func (s *TestStateSummary) Accept() (bool, error) { +func (s *TestStateSummary) Accept(ctx context.Context) (StateSyncMode, error) { if s.AcceptF != nil { - return s.AcceptF() + return s.AcceptF(ctx) } if s.CantAccept && s.T != nil { s.T.Fatal(errAccept) } - return false, errAccept + return StateSyncSkipped, errAccept } diff --git a/avalanchego/snow/engine/snowman/block/test_state_syncable_vm.go b/avalanchego/snow/engine/snowman/block/test_state_syncable_vm.go index bec3ebc8..60e179e5 100644 --- a/avalanchego/snow/engine/snowman/block/test_state_syncable_vm.go +++ b/avalanchego/snow/engine/snowman/block/test_state_syncable_vm.go @@ -1,15 +1,16 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block import ( + "context" "errors" "testing" ) var ( - _ StateSyncableVM = &TestStateSyncableVM{} + _ StateSyncableVM = (*TestStateSyncableVM)(nil) errStateSyncEnabled = errors.New("unexpectedly called StateSyncEnabled") errStateSyncGetOngoingSummary = errors.New("unexpectedly called StateSyncGetOngoingSummary") @@ -27,16 +28,16 @@ type TestStateSyncableVM struct { CantParseStateSummary, CantGetStateSummary bool - StateSyncEnabledF func() (bool, error) - GetOngoingSyncStateSummaryF func() (StateSummary, error) - GetLastStateSummaryF func() (StateSummary, error) - ParseStateSummaryF func(summaryBytes []byte) (StateSummary, error) - GetStateSummaryF func(uint64) (StateSummary, error) + StateSyncEnabledF func(context.Context) (bool, error) + GetOngoingSyncStateSummaryF func(context.Context) (StateSummary, error) + GetLastStateSummaryF func(context.Context) (StateSummary, error) + ParseStateSummaryF func(ctx context.Context, summaryBytes []byte) (StateSummary, error) + GetStateSummaryF func(ctx context.Context, summaryHeight uint64) (StateSummary, error) } -func (vm *TestStateSyncableVM) StateSyncEnabled() (bool, error) { +func (vm *TestStateSyncableVM) StateSyncEnabled(ctx context.Context) (bool, error) { if vm.StateSyncEnabledF != nil { - return vm.StateSyncEnabledF() + return vm.StateSyncEnabledF(ctx) } if vm.CantStateSyncEnabled && vm.T != nil { vm.T.Fatal(errStateSyncEnabled) @@ -44,9 +45,9 @@ func (vm *TestStateSyncableVM) StateSyncEnabled() (bool, error) { return false, errStateSyncEnabled } -func (vm *TestStateSyncableVM) GetOngoingSyncStateSummary() (StateSummary, error) { +func (vm *TestStateSyncableVM) GetOngoingSyncStateSummary(ctx context.Context) (StateSummary, error) { if vm.GetOngoingSyncStateSummaryF != nil { - return vm.GetOngoingSyncStateSummaryF() + return vm.GetOngoingSyncStateSummaryF(ctx) } if vm.CantStateSyncGetOngoingSummary && vm.T != nil { vm.T.Fatal(errStateSyncGetOngoingSummary) @@ -54,9 +55,9 @@ func (vm *TestStateSyncableVM) GetOngoingSyncStateSummary() (StateSummary, error return nil, errStateSyncGetOngoingSummary } -func (vm *TestStateSyncableVM) GetLastStateSummary() (StateSummary, error) { +func (vm *TestStateSyncableVM) GetLastStateSummary(ctx context.Context) (StateSummary, error) { if vm.GetLastStateSummaryF != nil { - return vm.GetLastStateSummaryF() + return vm.GetLastStateSummaryF(ctx) } if vm.CantGetLastStateSummary && vm.T != nil { vm.T.Fatal(errGetLastStateSummary) @@ -64,9 +65,9 @@ func (vm *TestStateSyncableVM) GetLastStateSummary() (StateSummary, error) { return nil, errGetLastStateSummary } -func (vm *TestStateSyncableVM) ParseStateSummary(summaryBytes []byte) (StateSummary, error) { +func (vm *TestStateSyncableVM) ParseStateSummary(ctx context.Context, summaryBytes []byte) (StateSummary, error) { if vm.ParseStateSummaryF != nil { - return vm.ParseStateSummaryF(summaryBytes) + return vm.ParseStateSummaryF(ctx, summaryBytes) } if vm.CantParseStateSummary && vm.T != nil { vm.T.Fatal(errParseStateSummary) @@ -74,9 +75,9 @@ func (vm *TestStateSyncableVM) ParseStateSummary(summaryBytes []byte) (StateSumm return nil, errParseStateSummary } -func (vm *TestStateSyncableVM) GetStateSummary(key uint64) (StateSummary, error) { +func (vm *TestStateSyncableVM) GetStateSummary(ctx context.Context, summaryHeight uint64) (StateSummary, error) { if vm.GetStateSummaryF != nil { - return vm.GetStateSummaryF(key) + return vm.GetStateSummaryF(ctx, summaryHeight) } if vm.CantGetStateSummary && vm.T != nil { vm.T.Fatal(errGetStateSummary) diff --git a/avalanchego/snow/engine/snowman/block/test_vm.go b/avalanchego/snow/engine/snowman/block/test_vm.go index 6e0f3877..b2cce5e2 100644 --- a/avalanchego/snow/engine/snowman/block/test_vm.go +++ b/avalanchego/snow/engine/snowman/block/test_vm.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block import ( + "context" "errors" "github.com/ava-labs/avalanchego/ids" @@ -17,7 +18,7 @@ var ( errGetBlock = errors.New("unexpectedly called GetBlock") errLastAccepted = errors.New("unexpectedly called LastAccepted") - _ ChainVM = &TestVM{} + _ ChainVM = (*TestVM)(nil) ) // TestVM is a ChainVM that is useful for testing. @@ -30,11 +31,11 @@ type TestVM struct { CantSetPreference, CantLastAccepted bool - BuildBlockF func() (snowman.Block, error) - ParseBlockF func([]byte) (snowman.Block, error) - GetBlockF func(ids.ID) (snowman.Block, error) - SetPreferenceF func(ids.ID) error - LastAcceptedF func() (ids.ID, error) + BuildBlockF func(context.Context) (snowman.Block, error) + ParseBlockF func(context.Context, []byte) (snowman.Block, error) + GetBlockF func(context.Context, ids.ID) (snowman.Block, error) + SetPreferenceF func(context.Context, ids.ID) error + LastAcceptedF func(context.Context) (ids.ID, error) } func (vm *TestVM) Default(cant bool) { @@ -47,9 +48,9 @@ func (vm *TestVM) Default(cant bool) { vm.CantLastAccepted = cant } -func (vm *TestVM) BuildBlock() (snowman.Block, error) { +func (vm *TestVM) BuildBlock(ctx context.Context) (snowman.Block, error) { if vm.BuildBlockF != nil { - return vm.BuildBlockF() + return vm.BuildBlockF(ctx) } if vm.CantBuildBlock && vm.T != nil { vm.T.Fatal(errBuildBlock) @@ -57,9 +58,9 @@ func (vm *TestVM) BuildBlock() (snowman.Block, error) { return nil, errBuildBlock } -func (vm *TestVM) ParseBlock(b []byte) (snowman.Block, error) { +func (vm *TestVM) ParseBlock(ctx context.Context, b []byte) (snowman.Block, error) { if vm.ParseBlockF != nil { - return vm.ParseBlockF(b) + return vm.ParseBlockF(ctx, b) } if vm.CantParseBlock && vm.T != nil { vm.T.Fatal(errParseBlock) @@ -67,9 +68,9 @@ func (vm *TestVM) ParseBlock(b []byte) (snowman.Block, error) { return nil, errParseBlock } -func (vm *TestVM) GetBlock(id ids.ID) (snowman.Block, error) { +func (vm *TestVM) GetBlock(ctx context.Context, id ids.ID) (snowman.Block, error) { if vm.GetBlockF != nil { - return vm.GetBlockF(id) + return vm.GetBlockF(ctx, id) } if vm.CantGetBlock && vm.T != nil { vm.T.Fatal(errGetBlock) @@ -77,9 +78,9 @@ func (vm *TestVM) GetBlock(id ids.ID) (snowman.Block, error) { return nil, errGetBlock } -func (vm *TestVM) SetPreference(id ids.ID) error { +func (vm *TestVM) SetPreference(ctx context.Context, id ids.ID) error { if vm.SetPreferenceF != nil { - return vm.SetPreferenceF(id) + return vm.SetPreferenceF(ctx, id) } if vm.CantSetPreference && vm.T != nil { vm.T.Fatalf("Unexpectedly called SetPreference") @@ -87,9 +88,9 @@ func (vm *TestVM) SetPreference(id ids.ID) error { return nil } -func (vm *TestVM) LastAccepted() (ids.ID, error) { +func (vm *TestVM) LastAccepted(ctx context.Context) (ids.ID, error) { if vm.LastAcceptedF != nil { - return vm.LastAcceptedF() + return vm.LastAcceptedF(ctx) } if vm.CantLastAccepted && vm.T != nil { vm.T.Fatal(errLastAccepted) diff --git a/avalanchego/snow/engine/snowman/block/vm.go b/avalanchego/snow/engine/snowman/block/vm.go index 063d9150..c096f9f0 100644 --- a/avalanchego/snow/engine/snowman/block/vm.go +++ b/avalanchego/snow/engine/snowman/block/vm.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block import ( + "context" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" @@ -33,19 +35,19 @@ type ChainVM interface { // // If the VM doesn't want to issue a new block, an error should be // returned. - BuildBlock() (snowman.Block, error) + BuildBlock(context.Context) (snowman.Block, error) // Notify the VM of the currently preferred block. // // This should always be a block that has no children known to consensus. - SetPreference(ids.ID) error + SetPreference(ctx context.Context, blkID ids.ID) error // LastAccepted returns the ID of the last accepted block. // // If no blocks have been accepted by consensus yet, it is assumed there is // a definitionally accepted block, the Genesis block, that will be // returned. - LastAccepted() (ids.ID, error) + LastAccepted(context.Context) (ids.ID, error) } // Getter defines the functionality for fetching a block by its ID. @@ -59,7 +61,7 @@ type Getter interface { // accepted by the consensus engine should be able to be fetched. It is not // required for blocks that have been rejected by the consensus engine to be // able to be fetched. - GetBlock(ids.ID) (snowman.Block, error) + GetBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) } // Parser defines the functionality for fetching a block by its bytes. @@ -70,5 +72,5 @@ type Parser interface { // bytes. // // It is expected for all historical blocks to be parseable. - ParseBlock([]byte) (snowman.Block, error) + ParseBlock(ctx context.Context, blockBytes []byte) (snowman.Block, error) } diff --git a/avalanchego/snow/engine/snowman/bootstrap/block_job.go b/avalanchego/snow/engine/snowman/bootstrap/block_job.go index 6e557edb..f782a804 100644 --- a/avalanchego/snow/engine/snowman/bootstrap/block_job.go +++ b/avalanchego/snow/engine/snowman/bootstrap/block_job.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap import ( + "context" "errors" "fmt" @@ -17,6 +18,7 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common/queue" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" ) var errMissingDependenciesOnAccept = errors.New("attempting to accept a block with missing dependencies") @@ -27,8 +29,8 @@ type parser struct { vm block.ChainVM } -func (p *parser) Parse(blkBytes []byte) (queue.Job, error) { - blk, err := p.vm.ParseBlock(blkBytes) +func (p *parser) Parse(ctx context.Context, blkBytes []byte) (queue.Job, error) { + blk, err := p.vm.ParseBlock(ctx, blkBytes) if err != nil { return nil, err } @@ -50,26 +52,29 @@ type blockJob struct { vm block.Getter } -func (b *blockJob) ID() ids.ID { return b.blk.ID() } -func (b *blockJob) MissingDependencies() (ids.Set, error) { - missing := ids.Set{} +func (b *blockJob) ID() ids.ID { + return b.blk.ID() +} + +func (b *blockJob) MissingDependencies(ctx context.Context) (set.Set[ids.ID], error) { + missing := set.Set[ids.ID]{} parentID := b.blk.Parent() - if parent, err := b.vm.GetBlock(parentID); err != nil || parent.Status() != choices.Accepted { + if parent, err := b.vm.GetBlock(ctx, parentID); err != nil || parent.Status() != choices.Accepted { missing.Add(parentID) } return missing, nil } -func (b *blockJob) HasMissingDependencies() (bool, error) { +func (b *blockJob) HasMissingDependencies(ctx context.Context) (bool, error) { parentID := b.blk.Parent() - if parent, err := b.vm.GetBlock(parentID); err != nil || parent.Status() != choices.Accepted { + if parent, err := b.vm.GetBlock(ctx, parentID); err != nil || parent.Status() != choices.Accepted { return true, nil } return false, nil } -func (b *blockJob) Execute() error { - hasMissingDeps, err := b.HasMissingDependencies() +func (b *blockJob) Execute(ctx context.Context) error { + hasMissingDeps, err := b.HasMissingDependencies(ctx) if err != nil { return err } @@ -84,7 +89,7 @@ func (b *blockJob) Execute() error { return fmt.Errorf("attempting to execute block with status %s", status) case choices.Processing: blkID := b.blk.ID() - if err := b.blk.Verify(); err != nil { + if err := b.blk.Verify(ctx); err != nil { b.log.Error("block failed verification during bootstrapping", zap.Stringer("blkID", blkID), zap.Error(err), @@ -97,7 +102,7 @@ func (b *blockJob) Execute() error { zap.Stringer("blkID", blkID), zap.Uint64("blkHeight", b.blk.Height()), ) - if err := b.blk.Accept(); err != nil { + if err := b.blk.Accept(ctx); err != nil { b.log.Debug("failed to accept block during bootstrapping", zap.Stringer("blkID", blkID), zap.Error(err), @@ -107,4 +112,7 @@ func (b *blockJob) Execute() error { } return nil } -func (b *blockJob) Bytes() []byte { return b.blk.Bytes() } + +func (b *blockJob) Bytes() []byte { + return b.blk.Bytes() +} diff --git a/avalanchego/snow/engine/snowman/bootstrap/bootstrapper.go b/avalanchego/snow/engine/snowman/bootstrap/bootstrapper.go index a6617914..134ce8ca 100644 --- a/avalanchego/snow/engine/snowman/bootstrap/bootstrapper.go +++ b/avalanchego/snow/engine/snowman/bootstrap/bootstrapper.go @@ -1,22 +1,26 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap import ( + "context" "errors" "fmt" "math" + "sync" "time" "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/version" ) @@ -25,7 +29,7 @@ import ( const bootstrappingDelay = 10 * time.Second var ( - _ common.BootstrapableEngine = &bootstrapper{} + _ common.BootstrapableEngine = (*bootstrapper)(nil) errUnexpectedTimeout = errors.New("unexpected timeout fired") ) @@ -70,10 +74,14 @@ type bootstrapper struct { // nodeID will be added back to [fetchFrom] unless the Ancestors message is // empty. This is to attempt to prevent requesting containers from that peer // again. - fetchFrom ids.NodeIDSet + fetchFrom set.Set[ids.NodeID] + + // bootstrappedOnce ensures that the [Bootstrapped] callback is only invoked + // once, even if bootstrapping is retried. + bootstrappedOnce sync.Once } -func New(config Config, onFinished func(lastReqID uint32) error) (common.BootstrapableEngine, error) { +func New(ctx context.Context, config Config, onFinished func(ctx context.Context, lastReqID uint32) error) (common.BootstrapableEngine, error) { metrics, err := newMetrics("bs", config.Ctx.Registerer) if err != nil { return nil, err @@ -87,7 +95,7 @@ func New(config Config, onFinished func(lastReqID uint32) error) (common.Bootstr PutHandler: common.NewNoOpPutHandler(config.Ctx.Log), QueryHandler: common.NewNoOpQueryHandler(config.Ctx.Log), ChitsHandler: common.NewNoOpChitsHandler(config.Ctx.Log), - AppHandler: common.NewNoOpAppHandler(config.Ctx.Log), + AppHandler: config.VM, Fetcher: common.Fetcher{ OnFinished: onFinished, @@ -101,7 +109,7 @@ func New(config Config, onFinished func(lastReqID uint32) error) (common.Bootstr numDropped: b.numDropped, vm: b.VM, } - if err := b.Blocked.SetParser(b.parser); err != nil { + if err := b.Blocked.SetParser(ctx, b.parser); err != nil { return nil, err } @@ -111,21 +119,24 @@ func New(config Config, onFinished func(lastReqID uint32) error) (common.Bootstr return b, nil } -func (b *bootstrapper) Start(startReqID uint32) error { +func (b *bootstrapper) Start(ctx context.Context, startReqID uint32) error { b.Ctx.Log.Info("starting bootstrapper") - b.Ctx.SetState(snow.Bootstrapping) - if err := b.VM.SetState(snow.Bootstrapping); err != nil { + b.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.Bootstrapping, + }) + if err := b.VM.SetState(ctx, snow.Bootstrapping); err != nil { return fmt.Errorf("failed to notify VM that bootstrapping has started: %w", err) } // Set the starting height - lastAcceptedID, err := b.VM.LastAccepted() + lastAcceptedID, err := b.VM.LastAccepted(ctx) if err != nil { return fmt.Errorf("couldn't get last accepted ID: %w", err) } - lastAccepted, err := b.VM.GetBlock(lastAcceptedID) + lastAccepted, err := b.VM.GetBlock(ctx, lastAcceptedID) if err != nil { return fmt.Errorf("couldn't get last accepted block: %w", err) } @@ -137,12 +148,12 @@ func (b *bootstrapper) Start(startReqID uint32) error { } b.started = true - return b.Startup() + return b.Startup(ctx) } // Ancestors handles the receipt of multiple containers. Should be received in // response to a GetAncestors message to [nodeID] with request ID [requestID] -func (b *bootstrapper) Ancestors(nodeID ids.NodeID, requestID uint32, blks [][]byte) error { +func (b *bootstrapper) Ancestors(ctx context.Context, nodeID ids.NodeID, requestID uint32, blks [][]byte) error { // Make sure this is in response to a request we made wantedBlkID, ok := b.OutstandingRequests.Remove(nodeID, requestID) if !ok { // this message isn't in response to a request we made @@ -163,7 +174,7 @@ func (b *bootstrapper) Ancestors(nodeID ids.NodeID, requestID uint32, blks [][]b b.markUnavailable(nodeID) // Send another request for this - return b.fetch(wantedBlkID) + return b.fetch(ctx, wantedBlkID) } // This node has responded - so add it back into the set @@ -178,14 +189,14 @@ func (b *bootstrapper) Ancestors(nodeID ids.NodeID, requestID uint32, blks [][]b ) } - blocks, err := block.BatchedParseBlock(b.VM, blks) + blocks, err := block.BatchedParseBlock(ctx, b.VM, blks) if err != nil { // the provided blocks couldn't be parsed b.Ctx.Log.Debug("failed to parse blocks in Ancestors", zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), zap.Error(err), ) - return b.fetch(wantedBlkID) + return b.fetch(ctx, wantedBlkID) } if len(blocks) == 0 { @@ -193,7 +204,7 @@ func (b *bootstrapper) Ancestors(nodeID ids.NodeID, requestID uint32, blks [][]b zap.Stringer("nodeID", nodeID), zap.Uint32("requestID", requestID), ) - return b.fetch(wantedBlkID) + return b.fetch(ctx, wantedBlkID) } requestedBlock := blocks[0] @@ -202,17 +213,17 @@ func (b *bootstrapper) Ancestors(nodeID ids.NodeID, requestID uint32, blks [][]b zap.Stringer("expectedBlkID", wantedBlkID), zap.Stringer("blkID", actualID), ) - return b.fetch(wantedBlkID) + return b.fetch(ctx, wantedBlkID) } blockSet := make(map[ids.ID]snowman.Block, len(blocks)) for _, block := range blocks[1:] { blockSet[block.ID()] = block } - return b.process(requestedBlock, blockSet) + return b.process(ctx, requestedBlock, blockSet) } -func (b *bootstrapper) GetAncestorsFailed(nodeID ids.NodeID, requestID uint32) error { +func (b *bootstrapper) GetAncestorsFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { blkID, ok := b.OutstandingRequests.Remove(nodeID, requestID) if !ok { b.Ctx.Log.Debug("unexpectedly called GetAncestorsFailed", @@ -226,15 +237,15 @@ func (b *bootstrapper) GetAncestorsFailed(nodeID ids.NodeID, requestID uint32) e b.fetchFrom.Add(nodeID) // Send another request for this - return b.fetch(blkID) + return b.fetch(ctx, blkID) } -func (b *bootstrapper) Connected(nodeID ids.NodeID, nodeVersion *version.Application) error { - if err := b.VM.Connected(nodeID, nodeVersion); err != nil { +func (b *bootstrapper) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { + if err := b.VM.Connected(ctx, nodeID, nodeVersion); err != nil { return err } - if err := b.StartupTracker.Connected(nodeID, nodeVersion); err != nil { + if err := b.StartupTracker.Connected(ctx, nodeID, nodeVersion); err != nil { return err } // Ensure fetchFrom reflects proper validator list @@ -247,15 +258,15 @@ func (b *bootstrapper) Connected(nodeID ids.NodeID, nodeVersion *version.Applica } b.started = true - return b.Startup() + return b.Startup(ctx) } -func (b *bootstrapper) Disconnected(nodeID ids.NodeID) error { - if err := b.VM.Disconnected(nodeID); err != nil { +func (b *bootstrapper) Disconnected(ctx context.Context, nodeID ids.NodeID) error { + if err := b.VM.Disconnected(ctx, nodeID); err != nil { return err } - if err := b.StartupTracker.Disconnected(nodeID); err != nil { + if err := b.StartupTracker.Disconnected(ctx, nodeID); err != nil { return err } @@ -263,30 +274,42 @@ func (b *bootstrapper) Disconnected(nodeID ids.NodeID) error { return nil } -func (b *bootstrapper) Timeout() error { +func (b *bootstrapper) Timeout(ctx context.Context) error { if !b.awaitingTimeout { return errUnexpectedTimeout } b.awaitingTimeout = false - if !b.Config.Subnet.IsBootstrapped() { - return b.Restart(true) + if !b.Config.BootstrapTracker.IsBootstrapped() { + return b.Restart(ctx, true) } b.fetchETA.Set(0) - return b.OnFinished(b.Config.SharedCfg.RequestID) + return b.OnFinished(ctx, b.Config.SharedCfg.RequestID) } -func (b *bootstrapper) Gossip() error { return nil } +func (*bootstrapper) Gossip(context.Context) error { + return nil +} -func (b *bootstrapper) Shutdown() error { +func (b *bootstrapper) Shutdown(ctx context.Context) error { b.Ctx.Log.Info("shutting down bootstrapper") - return b.VM.Shutdown() + return b.VM.Shutdown(ctx) } -func (b *bootstrapper) Notify(common.Message) error { return nil } +func (b *bootstrapper) Notify(_ context.Context, msg common.Message) error { + if msg != common.StateSyncDone { + b.Ctx.Log.Warn("received an unexpected message from the VM", + zap.Stringer("msg", msg), + ) + return nil + } -func (b *bootstrapper) HealthCheck() (interface{}, error) { - vmIntf, vmErr := b.VM.HealthCheck() + b.Ctx.StateSyncing.Set(false) + return nil +} + +func (b *bootstrapper) HealthCheck(ctx context.Context) (interface{}, error) { + vmIntf, vmErr := b.VM.HealthCheck(ctx) intf := map[string]interface{}{ "consensus": struct{}{}, "vm": vmIntf, @@ -294,9 +317,11 @@ func (b *bootstrapper) HealthCheck() (interface{}, error) { return intf, vmErr } -func (b *bootstrapper) GetVM() common.VM { return b.VM } +func (b *bootstrapper) GetVM() common.VM { + return b.VM +} -func (b *bootstrapper) ForceAccepted(acceptedContainerIDs []ids.ID) error { +func (b *bootstrapper) ForceAccepted(ctx context.Context, acceptedContainerIDs []ids.ID) error { pendingContainerIDs := b.Blocked.MissingIDs() // Initialize the fetch from set to the currently preferred peers @@ -315,9 +340,9 @@ func (b *bootstrapper) ForceAccepted(acceptedContainerIDs []ids.ID) error { // TODO: if `GetBlock` returns an error other than // `database.ErrNotFound`, then the error should be propagated. - blk, err := b.VM.GetBlock(blkID) + blk, err := b.VM.GetBlock(ctx, blkID) if err != nil { - if err := b.fetch(blkID); err != nil { + if err := b.fetch(ctx, blkID); err != nil { return err } continue @@ -330,24 +355,24 @@ func (b *bootstrapper) ForceAccepted(acceptedContainerIDs []ids.ID) error { // Process received blocks for _, blk := range toProcess { - if err := b.process(blk, nil); err != nil { + if err := b.process(ctx, blk, nil); err != nil { return err } } - return b.checkFinish() + return b.checkFinish(ctx) } // Get block [blkID] and its ancestors from a validator -func (b *bootstrapper) fetch(blkID ids.ID) error { +func (b *bootstrapper) fetch(ctx context.Context, blkID ids.ID) error { // Make sure we haven't already requested this block if b.OutstandingRequests.Contains(blkID) { return nil } // Make sure we don't already have this block - if _, err := b.VM.GetBlock(blkID); err == nil { - return b.checkFinish() + if _, err := b.VM.GetBlock(ctx, blkID); err == nil { + return b.checkFinish(ctx) } validatorID, ok := b.fetchFrom.Peek() @@ -361,7 +386,7 @@ func (b *bootstrapper) fetch(blkID ids.ID) error { b.Config.SharedCfg.RequestID++ b.OutstandingRequests.Add(validatorID, b.Config.SharedCfg.RequestID, blkID) - b.Config.Sender.SendGetAncestors(validatorID, b.Config.SharedCfg.RequestID, blkID) // request block and ancestors + b.Config.Sender.SendGetAncestors(ctx, validatorID, b.Config.SharedCfg.RequestID, blkID) // request block and ancestors return nil } @@ -395,7 +420,7 @@ func (b *bootstrapper) Clear() error { // // If [blk]'s height is <= the last accepted height, then it will be removed // from the missingIDs set. -func (b *bootstrapper) process(blk snowman.Block, processingBlocks map[ids.ID]snowman.Block) error { +func (b *bootstrapper) process(ctx context.Context, blk snowman.Block, processingBlocks map[ids.ID]snowman.Block) error { for { blkID := blk.ID() if b.Halted() { @@ -421,7 +446,7 @@ func (b *bootstrapper) process(blk snowman.Block, processingBlocks map[ids.ID]sn if err := b.Blocked.Commit(); err != nil { return err } - return b.checkFinish() + return b.checkFinish(ctx) } // If this block is going to be accepted, make sure to update the @@ -430,7 +455,7 @@ func (b *bootstrapper) process(blk snowman.Block, processingBlocks map[ids.ID]sn b.tipHeight = blkHeight } - pushed, err := b.Blocked.Push(&blockJob{ + pushed, err := b.Blocked.Push(ctx, &blockJob{ parser: b.parser, log: b.Ctx.Log, numAccepted: b.numAccepted, @@ -448,7 +473,7 @@ func (b *bootstrapper) process(blk snowman.Block, processingBlocks map[ids.ID]sn if err := b.Blocked.Commit(); err != nil { return err } - return b.checkFinish() + return b.checkFinish(ctx) } // We added a new block to the queue, so track that it was fetched @@ -492,7 +517,7 @@ func (b *bootstrapper) process(blk snowman.Block, processingBlocks map[ids.ID]sn // If the parent is not available in processing blocks, attempt to get // the block from the vm - parent, err = b.VM.GetBlock(parentID) + parent, err = b.VM.GetBlock(ctx, parentID) if err == nil { blk = parent continue @@ -502,20 +527,20 @@ func (b *bootstrapper) process(blk snowman.Block, processingBlocks map[ids.ID]sn // If the block wasn't able to be acquired immediately, attempt to fetch // it b.Blocked.AddMissingID(parentID) - if err := b.fetch(parentID); err != nil { + if err := b.fetch(ctx, parentID); err != nil { return err } if err := b.Blocked.Commit(); err != nil { return err } - return b.checkFinish() + return b.checkFinish(ctx) } } // checkFinish repeatedly executes pending transactions and requests new frontier vertices until there aren't any new ones // after which it finishes the bootstrap process -func (b *bootstrapper) checkFinish() error { +func (b *bootstrapper) checkFinish(ctx context.Context) error { if numPending := b.Blocked.NumMissingIDs(); numPending != 0 { return nil } @@ -535,11 +560,11 @@ func (b *bootstrapper) checkFinish() error { } executedBlocks, err := b.Blocked.ExecuteAll( + ctx, b.Config.Ctx, b, b.Config.SharedCfg.Restarted, - b.Ctx.ConsensusAcceptor, - b.Ctx.DecisionAcceptor, + b.Ctx.BlockAcceptor, ) if err != nil || b.Halted() { return err @@ -552,21 +577,23 @@ func (b *bootstrapper) checkFinish() error { // so that the bootstrapping process will terminate even as new blocks are // being issued. if b.Config.RetryBootstrap && executedBlocks > 0 && executedBlocks < previouslyExecuted/2 { - return b.Restart(true) + return b.Restart(ctx, true) } // If there is an additional callback, notify them that this chain has been // synced. if b.Bootstrapped != nil { - b.Bootstrapped() + b.bootstrappedOnce.Do(func() { + b.Bootstrapped() + }) } // Notify the subnet that this chain is synced - b.Config.Subnet.Bootstrapped(b.Ctx.ChainID) + b.Config.BootstrapTracker.Bootstrapped(b.Ctx.ChainID) // If the subnet hasn't finished bootstrapping, this chain should remain // syncing. - if !b.Config.Subnet.IsBootstrapped() { + if !b.Config.BootstrapTracker.IsBootstrapped() { if !b.Config.SharedCfg.Restarted { b.Ctx.Log.Info("waiting for the remaining chains in this subnet to finish syncing") } else { @@ -579,5 +606,5 @@ func (b *bootstrapper) checkFinish() error { return nil } b.fetchETA.Set(0) - return b.OnFinished(b.Config.SharedCfg.RequestID) + return b.OnFinished(ctx, b.Config.SharedCfg.RequestID) } diff --git a/avalanchego/snow/engine/snowman/bootstrap/bootstrapper_test.go b/avalanchego/snow/engine/snowman/bootstrap/bootstrapper_test.go index 2a70c123..2fc50adb 100644 --- a/avalanchego/snow/engine/snowman/bootstrap/bootstrapper_test.go +++ b/avalanchego/snow/engine/snowman/bootstrap/bootstrapper_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap import ( "bytes" + "context" "errors" "testing" @@ -15,6 +16,7 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" @@ -25,6 +27,7 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/snowman/getter" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" ) @@ -45,16 +48,20 @@ func newConfig(t *testing.T) (Config, ids.NodeID, *common.SenderTest, *block.Tes vm.Default(true) isBootstrapped := false - subnet := &common.SubnetTest{ - T: t, - IsBootstrappedF: func() bool { return isBootstrapped }, - BootstrappedF: func(ids.ID) { isBootstrapped = true }, + bootstrapTracker := &common.BootstrapTrackerTest{ + T: t, + IsBootstrappedF: func() bool { + return isBootstrapped + }, + BootstrappedF: func(ids.ID) { + isBootstrapped = true + }, } sender.CantSendGetAcceptedFrontier = false peer := ids.GenerateTestNodeID() - if err := peers.AddWeight(peer, 1); err != nil { + if err := peers.Add(peer, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -62,19 +69,18 @@ func newConfig(t *testing.T) (Config, ids.NodeID, *common.SenderTest, *block.Tes startupTracker := tracker.NewStartup(peerTracker, peers.Weight()/2+1) peers.RegisterCallbackListener(startupTracker) - if err := startupTracker.Connected(peer, version.CurrentApp); err != nil { + if err := startupTracker.Connected(context.Background(), peer, version.CurrentApp); err != nil { t.Fatal(err) } commonConfig := common.Config{ Ctx: ctx, - Validators: peers, Beacons: peers, SampleK: peers.Len(), Alpha: peers.Weight()/2 + 1, StartupTracker: startupTracker, Sender: sender, - Subnet: subnet, + BootstrapTracker: bootstrapTracker, Timer: &common.TimerTest{}, AncestorsMaxContainersSent: 2000, AncestorsMaxContainersReceived: 2000, @@ -118,13 +124,12 @@ func TestBootstrapperStartsOnlyIfEnoughStakeIsConnected(t *testing.T) { commonCfg := common.Config{ Ctx: snow.DefaultConsensusContextTest(), - Validators: peers, Beacons: peers, SampleK: sampleK, Alpha: alpha, StartupTracker: startupTracker, Sender: sender, - Subnet: &common.SubnetTest{}, + BootstrapTracker: &common.BootstrapTrackerTest{}, Timer: &common.TimerTest{}, AncestorsMaxContainersSent: 2000, AncestorsMaxContainersReceived: 2000, @@ -152,43 +157,53 @@ func TestBootstrapperStartsOnlyIfEnoughStakeIsConnected(t *testing.T) { BytesV: blkBytes0, } vm.CantLastAccepted = false - vm.LastAcceptedF = func() (ids.ID, error) { return blk0.ID(), nil } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return blk0.ID(), nil + } + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { require.Equal(blk0.ID(), blkID) return blk0, nil } // create bootstrapper - dummyCallback := func(lastReqID uint32) error { cfg.Ctx.SetState(snow.NormalOp); return nil } - bs, err := New(cfg, dummyCallback) + dummyCallback := func(context.Context, uint32) error { + cfg.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.NormalOp, + }) + return nil + } + bs, err := New(context.Background(), cfg, dummyCallback) require.NoError(err) vm.CantSetState = false vm.CantConnected = true - vm.ConnectedF = func(ids.NodeID, *version.Application) error { return nil } + vm.ConnectedF = func(context.Context, ids.NodeID, *version.Application) error { + return nil + } frontierRequested := false sender.CantSendGetAcceptedFrontier = false - sender.SendGetAcceptedFrontierF = func(ss ids.NodeIDSet, u uint32) { + sender.SendGetAcceptedFrontierF = func(context.Context, set.Set[ids.NodeID], uint32) { frontierRequested = true } // attempt starting bootstrapper with no stake connected. Bootstrapper should stall. - require.NoError(bs.Start(0)) + require.NoError(bs.Start(context.Background(), 0)) require.False(frontierRequested) // attempt starting bootstrapper with not enough stake connected. Bootstrapper should stall. vdr0 := ids.GenerateTestNodeID() - require.NoError(peers.AddWeight(vdr0, startupAlpha/2)) - require.NoError(bs.Connected(vdr0, version.CurrentApp)) + require.NoError(peers.Add(vdr0, nil, ids.Empty, startupAlpha/2)) + require.NoError(bs.Connected(context.Background(), vdr0, version.CurrentApp)) - require.NoError(bs.Start(0)) + require.NoError(bs.Start(context.Background(), 0)) require.False(frontierRequested) // finally attempt starting bootstrapper with enough stake connected. Frontiers should be requested. vdr := ids.GenerateTestNodeID() - require.NoError(peers.AddWeight(vdr, startupAlpha)) - require.NoError(bs.Connected(vdr, version.CurrentApp)) + require.NoError(peers.Add(vdr, nil, ids.Empty, startupAlpha)) + require.NoError(bs.Connected(context.Background(), vdr, version.CurrentApp)) require.True(frontierRequested) } @@ -221,28 +236,37 @@ func TestBootstrapperSingleFrontier(t *testing.T) { } vm.CantLastAccepted = false - vm.LastAcceptedF = func() (ids.ID, error) { return blk0.ID(), nil } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return blk0.ID(), nil + } + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { require.Equal(t, blk0.ID(), blkID) return blk0, nil } bs, err := New( + context.Background(), config, - func(lastReqID uint32) error { config.Ctx.SetState(snow.NormalOp); return nil }, + func(context.Context, uint32) error { + config.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.NormalOp, + }) + return nil + }, ) if err != nil { t.Fatal(err) } vm.CantSetState = false - if err := bs.Start(0); err != nil { + if err := bs.Start(context.Background(), 0); err != nil { t.Fatal(err) } acceptedIDs := []ids.ID{blkID1} - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case blkID1: return blk1, nil @@ -253,7 +277,7 @@ func TestBootstrapperSingleFrontier(t *testing.T) { panic(database.ErrNotFound) } } - vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { switch { case bytes.Equal(blkBytes, blkBytes1): return blk1, nil @@ -264,11 +288,11 @@ func TestBootstrapperSingleFrontier(t *testing.T) { return nil, errUnknownBlock } - err = bs.ForceAccepted(acceptedIDs) + err = bs.ForceAccepted(context.Background(), acceptedIDs) switch { case err != nil: // should finish t.Fatal(err) - case config.Ctx.GetState() != snow.NormalOp: + case config.Ctx.State.Get().State != snow.NormalOp: t.Fatalf("Bootstrapping should have finished") case blk1.Status() != choices.Accepted: t.Fatalf("Block should be accepted") @@ -319,28 +343,37 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) { vm.CantSetState = false vm.CantLastAccepted = false - vm.LastAcceptedF = func() (ids.ID, error) { return blk0.ID(), nil } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return blk0.ID(), nil + } + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { require.Equal(t, blk0.ID(), blkID) return blk0, nil } bs, err := New( + context.Background(), config, - func(lastReqID uint32) error { config.Ctx.SetState(snow.NormalOp); return nil }, + func(context.Context, uint32) error { + config.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.NormalOp, + }) + return nil + }, ) if err != nil { t.Fatal(err) } - if err := bs.Start(0); err != nil { + if err := bs.Start(context.Background(), 0); err != nil { t.Fatal(err) } acceptedIDs := []ids.ID{blkID2} parsedBlk1 := false - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case blkID0: return blk0, nil @@ -356,7 +389,7 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) { panic(database.ErrNotFound) } } - vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { switch { case bytes.Equal(blkBytes, blkBytes0): return blk0, nil @@ -372,7 +405,7 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) { } requestID := new(uint32) - sender.SendGetAncestorsF = func(vdr ids.NodeID, reqID uint32, vtxID ids.ID) { + sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { if vdr != peerID { t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) } @@ -385,34 +418,34 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) { } vm.CantSetState = false - if err := bs.ForceAccepted(acceptedIDs); err != nil { // should request blk1 + if err := bs.ForceAccepted(context.Background(), acceptedIDs); err != nil { // should request blk1 t.Fatal(err) } oldReqID := *requestID - if err := bs.Ancestors(peerID, *requestID+1, [][]byte{blkBytes1}); err != nil { // respond with wrong request ID + if err := bs.Ancestors(context.Background(), peerID, *requestID+1, [][]byte{blkBytes1}); err != nil { // respond with wrong request ID t.Fatal(err) } else if oldReqID != *requestID { t.Fatal("should not have sent new request") } - if err := bs.Ancestors(ids.NodeID{1, 2, 3}, *requestID, [][]byte{blkBytes1}); err != nil { // respond from wrong peer + if err := bs.Ancestors(context.Background(), ids.NodeID{1, 2, 3}, *requestID, [][]byte{blkBytes1}); err != nil { // respond from wrong peer t.Fatal(err) } else if oldReqID != *requestID { t.Fatal("should not have sent new request") } - if err := bs.Ancestors(peerID, *requestID, [][]byte{blkBytes0}); err != nil { // respond with wrong block + if err := bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes0}); err != nil { // respond with wrong block t.Fatal(err) } else if oldReqID == *requestID { t.Fatal("should have sent new request") } - err = bs.Ancestors(peerID, *requestID, [][]byte{blkBytes1}) + err = bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes1}) switch { case err != nil: // respond with right block t.Fatal(err) - case config.Ctx.GetState() != snow.NormalOp: + case config.Ctx.State.Get().State != snow.NormalOp: t.Fatalf("Bootstrapping should have finished") case blk0.Status() != choices.Accepted: t.Fatalf("Block should be accepted") @@ -474,22 +507,31 @@ func TestBootstrapperPartialFetch(t *testing.T) { } vm.CantLastAccepted = false - vm.LastAcceptedF = func() (ids.ID, error) { return blk0.ID(), nil } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return blk0.ID(), nil + } + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { require.Equal(t, blk0.ID(), blkID) return blk0, nil } bs, err := New( + context.Background(), config, - func(lastReqID uint32) error { config.Ctx.SetState(snow.NormalOp); return nil }, + func(context.Context, uint32) error { + config.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.NormalOp, + }) + return nil + }, ) if err != nil { t.Fatal(err) } vm.CantSetState = false - if err := bs.Start(0); err != nil { + if err := bs.Start(context.Background(), 0); err != nil { t.Fatal(err) } @@ -497,7 +539,7 @@ func TestBootstrapperPartialFetch(t *testing.T) { parsedBlk1 := false parsedBlk2 := false - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case blkID0: return blk0, nil @@ -518,7 +560,7 @@ func TestBootstrapperPartialFetch(t *testing.T) { panic(database.ErrNotFound) } } - vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { switch { case bytes.Equal(blkBytes, blkBytes0): return blk0, nil @@ -539,7 +581,7 @@ func TestBootstrapperPartialFetch(t *testing.T) { requestID := new(uint32) requested := ids.Empty - sender.SendGetAncestorsF = func(vdr ids.NodeID, reqID uint32, vtxID ids.ID) { + sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { if vdr != peerID { t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) } @@ -552,24 +594,24 @@ func TestBootstrapperPartialFetch(t *testing.T) { requested = vtxID } - if err := bs.ForceAccepted(acceptedIDs); err != nil { // should request blk2 + if err := bs.ForceAccepted(context.Background(), acceptedIDs); err != nil { // should request blk2 t.Fatal(err) } - if err := bs.Ancestors(peerID, *requestID, [][]byte{blkBytes2}); err != nil { // respond with blk2 + if err := bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes2}); err != nil { // respond with blk2 t.Fatal(err) } else if requested != blkID1 { t.Fatal("should have requested blk1") } - if err := bs.Ancestors(peerID, *requestID, [][]byte{blkBytes1}); err != nil { // respond with blk1 + if err := bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes1}); err != nil { // respond with blk1 t.Fatal(err) } else if requested != blkID1 { t.Fatal("should not have requested another block") } switch { - case config.Ctx.GetState() != snow.NormalOp: + case config.Ctx.State.Get().State != snow.NormalOp: t.Fatalf("Bootstrapping should have finished") case blk0.Status() != choices.Accepted: t.Fatalf("Block should be accepted") @@ -632,22 +674,31 @@ func TestBootstrapperEmptyResponse(t *testing.T) { } vm.CantLastAccepted = false - vm.LastAcceptedF = func() (ids.ID, error) { return blk0.ID(), nil } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return blk0.ID(), nil + } + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { require.Equal(t, blk0.ID(), blkID) return blk0, nil } bs, err := New( + context.Background(), config, - func(lastReqID uint32) error { config.Ctx.SetState(snow.NormalOp); return nil }, + func(context.Context, uint32) error { + config.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.NormalOp, + }) + return nil + }, ) if err != nil { t.Fatal(err) } vm.CantSetState = false - if err := bs.Start(0); err != nil { + if err := bs.Start(context.Background(), 0); err != nil { t.Fatal(err) } @@ -655,7 +706,7 @@ func TestBootstrapperEmptyResponse(t *testing.T) { parsedBlk1 := false parsedBlk2 := false - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case blkID0: return blk0, nil @@ -676,7 +727,7 @@ func TestBootstrapperEmptyResponse(t *testing.T) { panic(database.ErrNotFound) } } - vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { switch { case bytes.Equal(blkBytes, blkBytes0): return blk0, nil @@ -698,14 +749,14 @@ func TestBootstrapperEmptyResponse(t *testing.T) { requestedVdr := ids.EmptyNodeID requestID := uint32(0) requestedBlock := ids.Empty - sender.SendGetAncestorsF = func(vdr ids.NodeID, reqID uint32, blkID ids.ID) { + sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, blkID ids.ID) { requestedVdr = vdr requestID = reqID requestedBlock = blkID } // should request blk2 - err = bs.ForceAccepted(acceptedIDs) + err = bs.ForceAccepted(context.Background(), acceptedIDs) switch { case err != nil: t.Fatal(err) @@ -722,7 +773,7 @@ func TestBootstrapperEmptyResponse(t *testing.T) { newPeerID = ids.GenerateTestNodeID() bs.(*bootstrapper).fetchFrom.Add(newPeerID) - if err := bs.Ancestors(peerID, requestID, [][]byte{blkBytes2}); err != nil { // respond with blk2 + if err := bs.Ancestors(context.Background(), peerID, requestID, [][]byte{blkBytes2}); err != nil { // respond with blk2 t.Fatal(err) } else if requestedBlock != blkID1 { t.Fatal("should have requested blk1") @@ -731,7 +782,7 @@ func TestBootstrapperEmptyResponse(t *testing.T) { peerToBlacklist := requestedVdr // respond with empty - err = bs.Ancestors(peerToBlacklist, requestID, nil) + err = bs.Ancestors(context.Background(), peerToBlacklist, requestID, nil) switch { case err != nil: t.Fatal(err) @@ -741,12 +792,12 @@ func TestBootstrapperEmptyResponse(t *testing.T) { t.Fatal("should have requested blk1") } - if err := bs.Ancestors(requestedVdr, requestID, [][]byte{blkBytes1}); err != nil { // respond with blk1 + if err := bs.Ancestors(context.Background(), requestedVdr, requestID, [][]byte{blkBytes1}); err != nil { // respond with blk1 t.Fatal(err) } switch { - case config.Ctx.GetState() != snow.NormalOp: + case config.Ctx.State.Get().State != snow.NormalOp: t.Fatalf("Bootstrapping should have finished") case blk0.Status() != choices.Accepted: t.Fatalf("Block should be accepted") @@ -812,21 +863,30 @@ func TestBootstrapperAncestors(t *testing.T) { vm.CantSetState = false vm.CantLastAccepted = false - vm.LastAcceptedF = func() (ids.ID, error) { return blk0.ID(), nil } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return blk0.ID(), nil + } + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { require.Equal(t, blk0.ID(), blkID) return blk0, nil } bs, err := New( + context.Background(), config, - func(lastReqID uint32) error { config.Ctx.SetState(snow.NormalOp); return nil }, + func(context.Context, uint32) error { + config.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.NormalOp, + }) + return nil + }, ) if err != nil { t.Fatal(err) } - if err := bs.Start(0); err != nil { + if err := bs.Start(context.Background(), 0); err != nil { t.Fatal(err) } @@ -834,7 +894,7 @@ func TestBootstrapperAncestors(t *testing.T) { parsedBlk1 := false parsedBlk2 := false - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case blkID0: return blk0, nil @@ -855,7 +915,7 @@ func TestBootstrapperAncestors(t *testing.T) { panic(database.ErrNotFound) } } - vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { switch { case bytes.Equal(blkBytes, blkBytes0): return blk0, nil @@ -876,7 +936,7 @@ func TestBootstrapperAncestors(t *testing.T) { requestID := new(uint32) requested := ids.Empty - sender.SendGetAncestorsF = func(vdr ids.NodeID, reqID uint32, vtxID ids.ID) { + sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { if vdr != peerID { t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) } @@ -889,18 +949,18 @@ func TestBootstrapperAncestors(t *testing.T) { requested = vtxID } - if err := bs.ForceAccepted(acceptedIDs); err != nil { // should request blk2 + if err := bs.ForceAccepted(context.Background(), acceptedIDs); err != nil { // should request blk2 t.Fatal(err) } - if err := bs.Ancestors(peerID, *requestID, [][]byte{blkBytes2, blkBytes1}); err != nil { // respond with blk2 and blk1 + if err := bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes2, blkBytes1}); err != nil { // respond with blk2 and blk1 t.Fatal(err) } else if requested != blkID2 { t.Fatal("should not have requested another block") } switch { - case config.Ctx.GetState() != snow.NormalOp: + case config.Ctx.State.Get().State != snow.NormalOp: t.Fatalf("Bootstrapping should have finished") case blk0.Status() != choices.Accepted: t.Fatalf("Block should be accepted") @@ -950,27 +1010,36 @@ func TestBootstrapperFinalized(t *testing.T) { } vm.CantLastAccepted = false - vm.LastAcceptedF = func() (ids.ID, error) { return blk0.ID(), nil } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return blk0.ID(), nil + } + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { require.Equal(t, blk0.ID(), blkID) return blk0, nil } bs, err := New( + context.Background(), config, - func(lastReqID uint32) error { config.Ctx.SetState(snow.NormalOp); return nil }, + func(context.Context, uint32) error { + config.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.NormalOp, + }) + return nil + }, ) if err != nil { t.Fatal(err) } vm.CantSetState = false - if err := bs.Start(0); err != nil { + if err := bs.Start(context.Background(), 0); err != nil { t.Fatal(err) } parsedBlk1 := false parsedBlk2 := false - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case blkID0: return blk0, nil @@ -989,7 +1058,7 @@ func TestBootstrapperFinalized(t *testing.T) { panic(database.ErrNotFound) } } - vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { switch { case bytes.Equal(blkBytes, blkBytes0): return blk0, nil @@ -1007,14 +1076,14 @@ func TestBootstrapperFinalized(t *testing.T) { } requestIDs := map[ids.ID]uint32{} - sender.SendGetAncestorsF = func(vdr ids.NodeID, reqID uint32, vtxID ids.ID) { + sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { if vdr != peerID { t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) } requestIDs[vtxID] = reqID } - if err := bs.ForceAccepted([]ids.ID{blkID1, blkID2}); err != nil { // should request blk2 and blk1 + if err := bs.ForceAccepted(context.Background(), []ids.ID{blkID1, blkID2}); err != nil { // should request blk2 and blk1 t.Fatal(err) } @@ -1023,12 +1092,12 @@ func TestBootstrapperFinalized(t *testing.T) { t.Fatalf("should have requested blk2") } - if err := bs.Ancestors(peerID, reqIDBlk2, [][]byte{blkBytes2, blkBytes1}); err != nil { + if err := bs.Ancestors(context.Background(), peerID, reqIDBlk2, [][]byte{blkBytes2, blkBytes1}); err != nil { t.Fatal(err) } switch { - case config.Ctx.GetState() != snow.NormalOp: + case config.Ctx.State.Get().State != snow.NormalOp: t.Fatalf("Bootstrapping should have finished") case blk0.Status() != choices.Accepted: t.Fatalf("Block should be accepted") @@ -1100,12 +1169,14 @@ func TestRestartBootstrapping(t *testing.T) { } vm.CantLastAccepted = false - vm.LastAcceptedF = func() (ids.ID, error) { return blk0.ID(), nil } + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return blk0.ID(), nil + } parsedBlk1 := false parsedBlk2 := false parsedBlk3 := false parsedBlk4 := false - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case blkID0: return blk0, nil @@ -1134,7 +1205,7 @@ func TestRestartBootstrapping(t *testing.T) { panic(database.ErrNotFound) } } - vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { switch { case bytes.Equal(blkBytes, blkBytes0): return blk0, nil @@ -1160,8 +1231,15 @@ func TestRestartBootstrapping(t *testing.T) { } bsIntf, err := New( + context.Background(), config, - func(lastReqID uint32) error { config.Ctx.SetState(snow.NormalOp); return nil }, + func(context.Context, uint32) error { + config.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.NormalOp, + }) + return nil + }, ) if err != nil { t.Fatal(err) @@ -1172,12 +1250,12 @@ func TestRestartBootstrapping(t *testing.T) { } vm.CantSetState = false - if err := bs.Start(0); err != nil { + if err := bs.Start(context.Background(), 0); err != nil { t.Fatal(err) } requestIDs := map[ids.ID]uint32{} - sender.SendGetAncestorsF = func(vdr ids.NodeID, reqID uint32, vtxID ids.ID) { + sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { if vdr != peerID { t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) } @@ -1185,7 +1263,7 @@ func TestRestartBootstrapping(t *testing.T) { } // Force Accept blk3 - if err := bs.ForceAccepted([]ids.ID{blkID3}); err != nil { // should request blk3 + if err := bs.ForceAccepted(context.Background(), []ids.ID{blkID3}); err != nil { // should request blk3 t.Fatal(err) } @@ -1194,7 +1272,7 @@ func TestRestartBootstrapping(t *testing.T) { t.Fatalf("should have requested blk3") } - if err := bs.Ancestors(peerID, reqID, [][]byte{blkBytes3, blkBytes2}); err != nil { + if err := bs.Ancestors(context.Background(), peerID, reqID, [][]byte{blkBytes3, blkBytes2}); err != nil { t.Fatal(err) } @@ -1208,7 +1286,7 @@ func TestRestartBootstrapping(t *testing.T) { } requestIDs = map[ids.ID]uint32{} - if err := bs.ForceAccepted([]ids.ID{blkID4}); err != nil { + if err := bs.ForceAccepted(context.Background(), []ids.ID{blkID4}); err != nil { t.Fatal(err) } @@ -1221,20 +1299,20 @@ func TestRestartBootstrapping(t *testing.T) { t.Fatal("should have requested blk4 as new accepted frontier") } - if err := bs.Ancestors(peerID, blk1RequestID, [][]byte{blkBytes1}); err != nil { + if err := bs.Ancestors(context.Background(), peerID, blk1RequestID, [][]byte{blkBytes1}); err != nil { t.Fatal(err) } - if config.Ctx.GetState() == snow.NormalOp { + if config.Ctx.State.Get().State == snow.NormalOp { t.Fatal("Bootstrapping should not have finished with outstanding request for blk4") } - if err := bs.Ancestors(peerID, blk4RequestID, [][]byte{blkBytes4}); err != nil { + if err := bs.Ancestors(context.Background(), peerID, blk4RequestID, [][]byte{blkBytes4}); err != nil { t.Fatal(err) } switch { - case config.Ctx.GetState() != snow.NormalOp: + case config.Ctx.State.Get().State != snow.NormalOp: t.Fatalf("Bootstrapping should have finished") case blk0.Status() != choices.Accepted: t.Fatalf("Block should be accepted") @@ -1270,8 +1348,10 @@ func TestBootstrapOldBlockAfterStateSync(t *testing.T) { BytesV: utils.RandomBytes(32), } - vm.LastAcceptedF = func() (ids.ID, error) { return blk1.ID(), nil } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return blk1.ID(), nil + } + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case blk0.ID(): return nil, database.ErrNotFound @@ -1282,7 +1362,7 @@ func TestBootstrapOldBlockAfterStateSync(t *testing.T) { panic(database.ErrNotFound) } } - vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { switch { case bytes.Equal(blkBytes, blk0.Bytes()): return blk0, nil @@ -1294,8 +1374,15 @@ func TestBootstrapOldBlockAfterStateSync(t *testing.T) { } bsIntf, err := New( + context.Background(), config, - func(lastReqID uint32) error { config.Ctx.SetState(snow.NormalOp); return nil }, + func(context.Context, uint32) error { + config.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.NormalOp, + }) + return nil + }, ) if err != nil { t.Fatal(err) @@ -1306,12 +1393,12 @@ func TestBootstrapOldBlockAfterStateSync(t *testing.T) { } vm.CantSetState = false - if err := bs.Start(0); err != nil { + if err := bs.Start(context.Background(), 0); err != nil { t.Fatal(err) } requestIDs := map[ids.ID]uint32{} - sender.SendGetAncestorsF = func(vdr ids.NodeID, reqID uint32, vtxID ids.ID) { + sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { if vdr != peerID { t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) } @@ -1319,7 +1406,7 @@ func TestBootstrapOldBlockAfterStateSync(t *testing.T) { } // Force Accept, the already transitively accepted, blk0 - if err := bs.ForceAccepted([]ids.ID{blk0.ID()}); err != nil { // should request blk0 + if err := bs.ForceAccepted(context.Background(), []ids.ID{blk0.ID()}); err != nil { // should request blk0 t.Fatal(err) } @@ -1328,12 +1415,12 @@ func TestBootstrapOldBlockAfterStateSync(t *testing.T) { t.Fatalf("should have requested blk0") } - if err := bs.Ancestors(peerID, reqID, [][]byte{blk0.Bytes()}); err != nil { + if err := bs.Ancestors(context.Background(), peerID, reqID, [][]byte{blk0.Bytes()}); err != nil { t.Fatal(err) } switch { - case config.Ctx.GetState() != snow.NormalOp: + case config.Ctx.State.Get().State != snow.NormalOp: t.Fatalf("Bootstrapping should have finished") case blk0.Status() != choices.Processing: t.Fatalf("Block should be processing") @@ -1372,11 +1459,20 @@ func TestBootstrapContinueAfterHalt(t *testing.T) { BytesV: utils.RandomBytes(32), } - vm.LastAcceptedF = func() (ids.ID, error) { return blk0.ID(), nil } + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return blk0.ID(), nil + } bsIntf, err := New( + context.Background(), config, - func(lastReqID uint32) error { config.Ctx.SetState(snow.NormalOp); return nil }, + func(context.Context, uint32) error { + config.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.NormalOp, + }) + return nil + }, ) if err != nil { t.Fatal(err) @@ -1386,12 +1482,12 @@ func TestBootstrapContinueAfterHalt(t *testing.T) { t.Fatal("unexpected bootstrapper type") } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case blk0.ID(): return blk0, nil case blk1.ID(): - bs.Halt() + bs.Halt(context.Background()) return blk1, nil case blk2.ID(): return blk2, nil @@ -1402,11 +1498,11 @@ func TestBootstrapContinueAfterHalt(t *testing.T) { } vm.CantSetState = false - if err := bs.Start(0); err != nil { + if err := bs.Start(context.Background(), 0); err != nil { t.Fatal(err) } - if err := bs.ForceAccepted([]ids.ID{blk2.ID()}); err != nil { + if err := bs.ForceAccepted(context.Background(), []ids.ID{blk2.ID()}); err != nil { t.Fatal(err) } diff --git a/avalanchego/snow/engine/snowman/bootstrap/config.go b/avalanchego/snow/engine/snowman/bootstrap/config.go index c32b87e1..0c05feb7 100644 --- a/avalanchego/snow/engine/snowman/bootstrap/config.go +++ b/avalanchego/snow/engine/snowman/bootstrap/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap diff --git a/avalanchego/snow/engine/snowman/bootstrap/metrics.go b/avalanchego/snow/engine/snowman/bootstrap/metrics.go index 6de5d062..91260df3 100644 --- a/avalanchego/snow/engine/snowman/bootstrap/metrics.go +++ b/avalanchego/snow/engine/snowman/bootstrap/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bootstrap diff --git a/avalanchego/snow/engine/snowman/config.go b/avalanchego/snow/engine/snowman/config.go index 53eca5d6..32f92380 100644 --- a/avalanchego/snow/engine/snowman/config.go +++ b/avalanchego/snow/engine/snowman/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman diff --git a/avalanchego/snow/engine/snowman/config_test.go b/avalanchego/snow/engine/snowman/config_test.go index a7f1d40c..c01731cd 100644 --- a/avalanchego/snow/engine/snowman/config_test.go +++ b/avalanchego/snow/engine/snowman/config_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman @@ -8,6 +8,7 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/validators" ) func DefaultConfigs() Config { @@ -15,7 +16,7 @@ func DefaultConfigs() Config { return Config{ Ctx: commonCfg.Ctx, Sender: commonCfg.Sender, - Validators: commonCfg.Validators, + Validators: validators.NewSet(), VM: &block.TestVM{}, Params: snowball.Parameters{ K: 1, diff --git a/avalanchego/snow/engine/snowman/engine.go b/avalanchego/snow/engine/snowman/engine.go index 0c34cb4c..37985f5b 100644 --- a/avalanchego/snow/engine/snowman/engine.go +++ b/avalanchego/snow/engine/snowman/engine.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman diff --git a/avalanchego/snow/engine/snowman/getter/getter.go b/avalanchego/snow/engine/snowman/getter/getter.go index c35ddae0..429826bf 100644 --- a/avalanchego/snow/engine/snowman/getter/getter.go +++ b/avalanchego/snow/engine/snowman/getter/getter.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package getter import ( + "context" + "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" @@ -16,7 +18,7 @@ import ( ) // Get requests are always served, regardless node state (bootstrapping or normal operations). -var _ common.AllGetsServer = &getter{} +var _ common.AllGetsServer = (*getter)(nil) func New( vm block.ChainVM, @@ -51,7 +53,7 @@ type getter struct { getAncestorsBlks metric.Averager } -func (gh *getter) GetStateSummaryFrontier(nodeID ids.NodeID, requestID uint32) error { +func (gh *getter) GetStateSummaryFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { // Note: we do not check if gh.ssVM.StateSyncEnabled since we want all // nodes, including those disabling state sync to serve state summaries if // these are available @@ -64,7 +66,7 @@ func (gh *getter) GetStateSummaryFrontier(nodeID ids.NodeID, requestID uint32) e return nil } - summary, err := gh.ssVM.GetLastStateSummary() + summary, err := gh.ssVM.GetLastStateSummary(ctx) if err != nil { gh.log.Debug("dropping GetStateSummaryFrontier message", zap.String("reason", "couldn't get state summary frontier"), @@ -75,15 +77,15 @@ func (gh *getter) GetStateSummaryFrontier(nodeID ids.NodeID, requestID uint32) e return nil } - gh.sender.SendStateSummaryFrontier(nodeID, requestID, summary.Bytes()) + gh.sender.SendStateSummaryFrontier(ctx, nodeID, requestID, summary.Bytes()) return nil } -func (gh *getter) GetAcceptedStateSummary(nodeID ids.NodeID, requestID uint32, heights []uint64) error { +func (gh *getter) GetAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, heights []uint64) error { // If there are no requested heights, then we can return the result // immediately, regardless of if the underlying VM implements state sync. if len(heights) == 0 { - gh.sender.SendAcceptedStateSummary(nodeID, requestID, nil) + gh.sender.SendAcceptedStateSummary(ctx, nodeID, requestID, nil) return nil } @@ -101,7 +103,7 @@ func (gh *getter) GetAcceptedStateSummary(nodeID ids.NodeID, requestID uint32, h summaryIDs := make([]ids.ID, 0, len(heights)) for _, height := range heights { - summary, err := gh.ssVM.GetStateSummary(height) + summary, err := gh.ssVM.GetStateSummary(ctx, height) if err == block.ErrStateSyncableVMNotImplemented { gh.log.Debug("dropping GetAcceptedStateSummary message", zap.String("reason", "state sync not supported"), @@ -120,32 +122,34 @@ func (gh *getter) GetAcceptedStateSummary(nodeID ids.NodeID, requestID uint32, h summaryIDs = append(summaryIDs, summary.ID()) } - gh.sender.SendAcceptedStateSummary(nodeID, requestID, summaryIDs) + gh.sender.SendAcceptedStateSummary(ctx, nodeID, requestID, summaryIDs) return nil } -func (gh *getter) GetAcceptedFrontier(nodeID ids.NodeID, requestID uint32) error { - lastAccepted, err := gh.vm.LastAccepted() +func (gh *getter) GetAcceptedFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + lastAccepted, err := gh.vm.LastAccepted(ctx) if err != nil { return err } - gh.sender.SendAcceptedFrontier(nodeID, requestID, []ids.ID{lastAccepted}) + gh.sender.SendAcceptedFrontier(ctx, nodeID, requestID, []ids.ID{lastAccepted}) return nil } -func (gh *getter) GetAccepted(nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { +func (gh *getter) GetAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { acceptedIDs := make([]ids.ID, 0, len(containerIDs)) for _, blkID := range containerIDs { - if blk, err := gh.vm.GetBlock(blkID); err == nil && blk.Status() == choices.Accepted { + blk, err := gh.vm.GetBlock(ctx, blkID) + if err == nil && blk.Status() == choices.Accepted { acceptedIDs = append(acceptedIDs, blkID) } } - gh.sender.SendAccepted(nodeID, requestID, acceptedIDs) + gh.sender.SendAccepted(ctx, nodeID, requestID, acceptedIDs) return nil } -func (gh *getter) GetAncestors(nodeID ids.NodeID, requestID uint32, blkID ids.ID) error { +func (gh *getter) GetAncestors(ctx context.Context, nodeID ids.NodeID, requestID uint32, blkID ids.ID) error { ancestorsBytes, err := block.GetAncestors( + ctx, gh.vm, blkID, gh.cfg.AncestorsMaxContainersSent, @@ -164,12 +168,12 @@ func (gh *getter) GetAncestors(nodeID ids.NodeID, requestID uint32, blkID ids.ID } gh.getAncestorsBlks.Observe(float64(len(ancestorsBytes))) - gh.sender.SendAncestors(nodeID, requestID, ancestorsBytes) + gh.sender.SendAncestors(ctx, nodeID, requestID, ancestorsBytes) return nil } -func (gh *getter) Get(nodeID ids.NodeID, requestID uint32, blkID ids.ID) error { - blk, err := gh.vm.GetBlock(blkID) +func (gh *getter) Get(ctx context.Context, nodeID ids.NodeID, requestID uint32, blkID ids.ID) error { + blk, err := gh.vm.GetBlock(ctx, blkID) if err != nil { // If we failed to get the block, that means either an unexpected error // has occurred, [vdr] is not following the protocol, or the @@ -184,6 +188,6 @@ func (gh *getter) Get(nodeID ids.NodeID, requestID uint32, blkID ids.ID) error { } // Respond to the validator with the fetched block and the same requestID. - gh.sender.SendPut(nodeID, requestID, blk.Bytes()) + gh.sender.SendPut(ctx, nodeID, requestID, blk.Bytes()) return nil } diff --git a/avalanchego/snow/engine/snowman/getter/getter_test.go b/avalanchego/snow/engine/snowman/getter/getter_test.go index dcf5d349..40e6fc03 100644 --- a/avalanchego/snow/engine/snowman/getter/getter_test.go +++ b/avalanchego/snow/engine/snowman/getter/getter_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package getter import ( + "context" "errors" "testing" @@ -19,6 +20,7 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/set" ) var errUnknownBlock = errors.New("unknown block") @@ -46,27 +48,30 @@ func testSetup( sender.Default(true) isBootstrapped := false - subnet := &common.SubnetTest{ - T: t, - IsBootstrappedF: func() bool { return isBootstrapped }, - BootstrappedF: func(ids.ID) { isBootstrapped = true }, + bootstrapTracker := &common.BootstrapTrackerTest{ + T: t, + IsBootstrappedF: func() bool { + return isBootstrapped + }, + BootstrappedF: func(ids.ID) { + isBootstrapped = true + }, } sender.CantSendGetAcceptedFrontier = false peer := ids.GenerateTestNodeID() - if err := peers.AddWeight(peer, 1); err != nil { + if err := peers.Add(peer, nil, ids.Empty, 1); err != nil { t.Fatal(err) } commonConfig := common.Config{ Ctx: ctx, - Validators: peers, Beacons: peers, SampleK: peers.Len(), Alpha: peers.Weight()/2 + 1, Sender: sender, - Subnet: subnet, + BootstrapTracker: bootstrapTracker, Timer: &common.TimerTest{}, AncestorsMaxContainersSent: 2000, AncestorsMaxContainersReceived: 2000, @@ -93,8 +98,10 @@ func TestAcceptedFrontier(t *testing.T) { BytesV: []byte{1, 2, 3}, } vm.CantLastAccepted = false - vm.LastAcceptedF = func() (ids.ID, error) { return blkID, nil } - vm.GetBlockF = func(bID ids.ID) (snowman.Block, error) { + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return blkID, nil + } + vm.GetBlockF = func(_ context.Context, bID ids.ID) (snowman.Block, error) { require.Equal(t, blkID, bID) return dummyBlk, nil } @@ -109,11 +116,11 @@ func TestAcceptedFrontier(t *testing.T) { } var accepted []ids.ID - sender.SendAcceptedFrontierF = func(_ ids.NodeID, _ uint32, frontier []ids.ID) { + sender.SendAcceptedFrontierF = func(_ context.Context, _ ids.NodeID, _ uint32, frontier []ids.ID) { accepted = frontier } - if err := bs.GetAcceptedFrontier(ids.EmptyNodeID, 0); err != nil { + if err := bs.GetAcceptedFrontier(context.Background(), ids.EmptyNodeID, 0); err != nil { t.Fatal(err) } @@ -145,8 +152,10 @@ func TestFilterAccepted(t *testing.T) { }} vm.CantLastAccepted = false - vm.LastAcceptedF = func() (ids.ID, error) { return blk1.ID(), nil } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return blk1.ID(), nil + } + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { require.Equal(t, blk1.ID(), blkID) return blk1, nil } @@ -161,7 +170,7 @@ func TestFilterAccepted(t *testing.T) { } blkIDs := []ids.ID{blkID0, blkID1, blkID2} - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case blkID0: return blk0, nil @@ -175,15 +184,15 @@ func TestFilterAccepted(t *testing.T) { } var accepted []ids.ID - sender.SendAcceptedF = func(_ ids.NodeID, _ uint32, frontier []ids.ID) { + sender.SendAcceptedF = func(_ context.Context, _ ids.NodeID, _ uint32, frontier []ids.ID) { accepted = frontier } - if err := bs.GetAccepted(ids.EmptyNodeID, 0, blkIDs); err != nil { + if err := bs.GetAccepted(context.Background(), ids.EmptyNodeID, 0, blkIDs); err != nil { t.Fatal(err) } - acceptedSet := ids.Set{} + acceptedSet := set.Set[ids.ID]{} acceptedSet.Add(accepted...) if acceptedSet.Len() != 2 { diff --git a/avalanchego/snow/engine/snowman/issuer.go b/avalanchego/snow/engine/snowman/issuer.go index e0ea2204..f7446167 100644 --- a/avalanchego/snow/engine/snowman/issuer.go +++ b/avalanchego/snow/engine/snowman/issuer.go @@ -1,11 +1,14 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman import ( + "context" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/utils/set" ) // issuer issues [blk] into to consensus after its dependencies are met. @@ -13,24 +16,26 @@ type issuer struct { t *Transitive blk snowman.Block abandoned bool - deps ids.Set + deps set.Set[ids.ID] } -func (i *issuer) Dependencies() ids.Set { return i.deps } +func (i *issuer) Dependencies() set.Set[ids.ID] { + return i.deps +} // Mark that a dependency has been met -func (i *issuer) Fulfill(id ids.ID) { +func (i *issuer) Fulfill(ctx context.Context, id ids.ID) { i.deps.Remove(id) - i.Update() + i.Update(ctx) } // Abandon the attempt to issue [i.block] -func (i *issuer) Abandon(ids.ID) { +func (i *issuer) Abandon(ctx context.Context, _ ids.ID) { if !i.abandoned { blkID := i.blk.ID() i.t.removeFromPending(i.blk) i.t.addToNonVerifieds(i.blk) - i.t.blocked.Abandon(blkID) + i.t.blocked.Abandon(ctx, blkID) // Tracks performance statistics i.t.metrics.numRequests.Set(float64(i.t.blkReqs.Len())) @@ -40,10 +45,10 @@ func (i *issuer) Abandon(ids.ID) { i.abandoned = true } -func (i *issuer) Update() { +func (i *issuer) Update(ctx context.Context) { if i.abandoned || i.deps.Len() != 0 || i.t.errs.Errored() { return } // Issue the block into consensus - i.t.errs.Add(i.t.deliver(i.blk)) + i.t.errs.Add(i.t.deliver(ctx, i.blk)) } diff --git a/avalanchego/snow/engine/snowman/memory_block.go b/avalanchego/snow/engine/snowman/memory_block.go index 2ab28809..957b22d1 100644 --- a/avalanchego/snow/engine/snowman/memory_block.go +++ b/avalanchego/snow/engine/snowman/memory_block.go @@ -1,12 +1,16 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman import ( + "context" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" ) +var _ snowman.Block = (*memoryBlock)(nil) + // memoryBlock wraps a snowman Block to manage non-verified blocks type memoryBlock struct { snowman.Block @@ -16,15 +20,15 @@ type memoryBlock struct { } // Accept accepts the underlying block & removes sibling subtrees -func (mb *memoryBlock) Accept() error { +func (mb *memoryBlock) Accept(ctx context.Context) error { mb.tree.RemoveSubtree(mb.Parent()) mb.metrics.numNonVerifieds.Set(float64(mb.tree.Len())) - return mb.Block.Accept() + return mb.Block.Accept(ctx) } // Reject rejects the underlying block & removes child subtrees -func (mb *memoryBlock) Reject() error { +func (mb *memoryBlock) Reject(ctx context.Context) error { mb.tree.RemoveSubtree(mb.ID()) mb.metrics.numNonVerifieds.Set(float64(mb.tree.Len())) - return mb.Block.Reject() + return mb.Block.Reject(ctx) } diff --git a/avalanchego/snow/engine/snowman/metrics.go b/avalanchego/snow/engine/snowman/metrics.go index 279b05c3..19fb4229 100644 --- a/avalanchego/snow/engine/snowman/metrics.go +++ b/avalanchego/snow/engine/snowman/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman diff --git a/avalanchego/snow/engine/snowman/mocks/engine.go b/avalanchego/snow/engine/snowman/mocks/engine.go deleted file mode 100644 index d7f4916c..00000000 --- a/avalanchego/snow/engine/snowman/mocks/engine.go +++ /dev/null @@ -1,580 +0,0 @@ -// Code generated by mockery v2.12.1. DO NOT EDIT. - -package mocks - -import ( - consensussnowman "github.com/ava-labs/avalanchego/snow/consensus/snowman" - common "github.com/ava-labs/avalanchego/snow/engine/common" - - ids "github.com/ava-labs/avalanchego/ids" - - mock "github.com/stretchr/testify/mock" - - snow "github.com/ava-labs/avalanchego/snow" - - testing "testing" - - time "time" - - version "github.com/ava-labs/avalanchego/version" -) - -// Engine is an autogenerated mock type for the Engine type -type Engine struct { - mock.Mock -} - -// Accepted provides a mock function with given fields: validatorID, requestID, containerIDs -func (_m *Engine) Accepted(validatorID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { - ret := _m.Called(validatorID, requestID, containerIDs) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, []ids.ID) error); ok { - r0 = rf(validatorID, requestID, containerIDs) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AcceptedFrontier provides a mock function with given fields: validatorID, requestID, containerIDs -func (_m *Engine) AcceptedFrontier(validatorID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { - ret := _m.Called(validatorID, requestID, containerIDs) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, []ids.ID) error); ok { - r0 = rf(validatorID, requestID, containerIDs) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AcceptedStateSummary provides a mock function with given fields: validatorID, requestID, summaryIDs -func (_m *Engine) AcceptedStateSummary(validatorID ids.NodeID, requestID uint32, summaryIDs []ids.ID) error { - ret := _m.Called(validatorID, requestID, summaryIDs) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, []ids.ID) error); ok { - r0 = rf(validatorID, requestID, summaryIDs) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Ancestors provides a mock function with given fields: validatorID, requestID, containers -func (_m *Engine) Ancestors(validatorID ids.NodeID, requestID uint32, containers [][]byte) error { - ret := _m.Called(validatorID, requestID, containers) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, [][]byte) error); ok { - r0 = rf(validatorID, requestID, containers) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AppGossip provides a mock function with given fields: nodeID, msg -func (_m *Engine) AppGossip(nodeID ids.NodeID, msg []byte) error { - ret := _m.Called(nodeID, msg) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, []byte) error); ok { - r0 = rf(nodeID, msg) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AppRequest provides a mock function with given fields: nodeID, requestID, deadline, request -func (_m *Engine) AppRequest(nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { - ret := _m.Called(nodeID, requestID, deadline, request) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, time.Time, []byte) error); ok { - r0 = rf(nodeID, requestID, deadline, request) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AppRequestFailed provides a mock function with given fields: nodeID, requestID -func (_m *Engine) AppRequestFailed(nodeID ids.NodeID, requestID uint32) error { - ret := _m.Called(nodeID, requestID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32) error); ok { - r0 = rf(nodeID, requestID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// AppResponse provides a mock function with given fields: nodeID, requestID, response -func (_m *Engine) AppResponse(nodeID ids.NodeID, requestID uint32, response []byte) error { - ret := _m.Called(nodeID, requestID, response) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, []byte) error); ok { - r0 = rf(nodeID, requestID, response) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Chits provides a mock function with given fields: validatorID, requestID, containerIDs -func (_m *Engine) Chits(validatorID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { - ret := _m.Called(validatorID, requestID, containerIDs) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, []ids.ID) error); ok { - r0 = rf(validatorID, requestID, containerIDs) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Connected provides a mock function with given fields: id, nodeVersion -func (_m *Engine) Connected(id ids.NodeID, nodeVersion *version.Application) error { - ret := _m.Called(id, nodeVersion) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, *version.Application) error); ok { - r0 = rf(id, nodeVersion) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Context provides a mock function with given fields: -func (_m *Engine) Context() *snow.ConsensusContext { - ret := _m.Called() - - var r0 *snow.ConsensusContext - if rf, ok := ret.Get(0).(func() *snow.ConsensusContext); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*snow.ConsensusContext) - } - } - - return r0 -} - -// Disconnected provides a mock function with given fields: id -func (_m *Engine) Disconnected(id ids.NodeID) error { - ret := _m.Called(id) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID) error); ok { - r0 = rf(id) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Get provides a mock function with given fields: validatorID, requestID, containerID -func (_m *Engine) Get(validatorID ids.NodeID, requestID uint32, containerID ids.ID) error { - ret := _m.Called(validatorID, requestID, containerID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, ids.ID) error); ok { - r0 = rf(validatorID, requestID, containerID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetAccepted provides a mock function with given fields: validatorID, requestID, containerIDs -func (_m *Engine) GetAccepted(validatorID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { - ret := _m.Called(validatorID, requestID, containerIDs) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, []ids.ID) error); ok { - r0 = rf(validatorID, requestID, containerIDs) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetAcceptedFailed provides a mock function with given fields: validatorID, requestID -func (_m *Engine) GetAcceptedFailed(validatorID ids.NodeID, requestID uint32) error { - ret := _m.Called(validatorID, requestID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32) error); ok { - r0 = rf(validatorID, requestID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetAcceptedFrontier provides a mock function with given fields: validatorID, requestID -func (_m *Engine) GetAcceptedFrontier(validatorID ids.NodeID, requestID uint32) error { - ret := _m.Called(validatorID, requestID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32) error); ok { - r0 = rf(validatorID, requestID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetAcceptedFrontierFailed provides a mock function with given fields: validatorID, requestID -func (_m *Engine) GetAcceptedFrontierFailed(validatorID ids.NodeID, requestID uint32) error { - ret := _m.Called(validatorID, requestID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32) error); ok { - r0 = rf(validatorID, requestID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetAcceptedStateSummary provides a mock function with given fields: validatorID, requestID, keys -func (_m *Engine) GetAcceptedStateSummary(validatorID ids.NodeID, requestID uint32, keys []uint64) error { - ret := _m.Called(validatorID, requestID, keys) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, []uint64) error); ok { - r0 = rf(validatorID, requestID, keys) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetAcceptedStateSummaryFailed provides a mock function with given fields: validatorID, requestID -func (_m *Engine) GetAcceptedStateSummaryFailed(validatorID ids.NodeID, requestID uint32) error { - ret := _m.Called(validatorID, requestID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32) error); ok { - r0 = rf(validatorID, requestID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetAncestors provides a mock function with given fields: validatorID, requestID, containerID -func (_m *Engine) GetAncestors(validatorID ids.NodeID, requestID uint32, containerID ids.ID) error { - ret := _m.Called(validatorID, requestID, containerID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, ids.ID) error); ok { - r0 = rf(validatorID, requestID, containerID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetAncestorsFailed provides a mock function with given fields: validatorID, requestID -func (_m *Engine) GetAncestorsFailed(validatorID ids.NodeID, requestID uint32) error { - ret := _m.Called(validatorID, requestID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32) error); ok { - r0 = rf(validatorID, requestID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetBlock provides a mock function with given fields: _a0 -func (_m *Engine) GetBlock(_a0 ids.ID) (consensussnowman.Block, error) { - ret := _m.Called(_a0) - - var r0 consensussnowman.Block - if rf, ok := ret.Get(0).(func(ids.ID) consensussnowman.Block); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(consensussnowman.Block) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(ids.ID) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GetFailed provides a mock function with given fields: validatorID, requestID -func (_m *Engine) GetFailed(validatorID ids.NodeID, requestID uint32) error { - ret := _m.Called(validatorID, requestID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32) error); ok { - r0 = rf(validatorID, requestID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetStateSummaryFrontier provides a mock function with given fields: validatorID, requestID -func (_m *Engine) GetStateSummaryFrontier(validatorID ids.NodeID, requestID uint32) error { - ret := _m.Called(validatorID, requestID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32) error); ok { - r0 = rf(validatorID, requestID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetStateSummaryFrontierFailed provides a mock function with given fields: validatorID, requestID -func (_m *Engine) GetStateSummaryFrontierFailed(validatorID ids.NodeID, requestID uint32) error { - ret := _m.Called(validatorID, requestID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32) error); ok { - r0 = rf(validatorID, requestID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// GetVM provides a mock function with given fields: -func (_m *Engine) GetVM() common.VM { - ret := _m.Called() - - var r0 common.VM - if rf, ok := ret.Get(0).(func() common.VM); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(common.VM) - } - } - - return r0 -} - -// Gossip provides a mock function with given fields: -func (_m *Engine) Gossip() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Halt provides a mock function with given fields: -func (_m *Engine) Halt() { - _m.Called() -} - -// HealthCheck provides a mock function with given fields: -func (_m *Engine) HealthCheck() (interface{}, error) { - ret := _m.Called() - - var r0 interface{} - if rf, ok := ret.Get(0).(func() interface{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(interface{}) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Notify provides a mock function with given fields: _a0 -func (_m *Engine) Notify(_a0 common.Message) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(common.Message) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// PullQuery provides a mock function with given fields: validatorID, requestID, containerID -func (_m *Engine) PullQuery(validatorID ids.NodeID, requestID uint32, containerID ids.ID) error { - ret := _m.Called(validatorID, requestID, containerID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, ids.ID) error); ok { - r0 = rf(validatorID, requestID, containerID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// PushQuery provides a mock function with given fields: validatorID, requestID, container -func (_m *Engine) PushQuery(validatorID ids.NodeID, requestID uint32, container []byte) error { - ret := _m.Called(validatorID, requestID, container) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, []byte) error); ok { - r0 = rf(validatorID, requestID, container) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Put provides a mock function with given fields: validatorID, requestID, container -func (_m *Engine) Put(validatorID ids.NodeID, requestID uint32, container []byte) error { - ret := _m.Called(validatorID, requestID, container) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, []byte) error); ok { - r0 = rf(validatorID, requestID, container) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// QueryFailed provides a mock function with given fields: validatorID, requestID -func (_m *Engine) QueryFailed(validatorID ids.NodeID, requestID uint32) error { - ret := _m.Called(validatorID, requestID) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32) error); ok { - r0 = rf(validatorID, requestID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Shutdown provides a mock function with given fields: -func (_m *Engine) Shutdown() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Start provides a mock function with given fields: startReqID -func (_m *Engine) Start(startReqID uint32) error { - ret := _m.Called(startReqID) - - var r0 error - if rf, ok := ret.Get(0).(func(uint32) error); ok { - r0 = rf(startReqID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// StateSummaryFrontier provides a mock function with given fields: validatorID, requestID, summary -func (_m *Engine) StateSummaryFrontier(validatorID ids.NodeID, requestID uint32, summary []byte) error { - ret := _m.Called(validatorID, requestID, summary) - - var r0 error - if rf, ok := ret.Get(0).(func(ids.NodeID, uint32, []byte) error); ok { - r0 = rf(validatorID, requestID, summary) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Timeout provides a mock function with given fields: -func (_m *Engine) Timeout() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// NewEngine creates a new instance of Engine. It also registers the testing.TB interface on the mock and a cleanup function to assert the mocks expectations. -func NewEngine(t testing.TB) *Engine { - mock := &Engine{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/avalanchego/snow/engine/snowman/syncer/config.go b/avalanchego/snow/engine/snowman/syncer/config.go index 0348b5e8..7b2d59f5 100644 --- a/avalanchego/snow/engine/snowman/syncer/config.go +++ b/avalanchego/snow/engine/snowman/syncer/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package syncer @@ -49,7 +49,8 @@ func NewConfig( if len(stateSyncerIDs) != 0 { stateSyncBeacons = validators.NewSet() for _, peerID := range stateSyncerIDs { - if err := stateSyncBeacons.AddWeight(peerID, 1); err != nil { + // Invariant: We never use the TxID or BLS keys populated here. + if err := stateSyncBeacons.Add(peerID, nil, ids.Empty, 1); err != nil { return Config{}, err } } diff --git a/avalanchego/snow/engine/snowman/syncer/state_syncer.go b/avalanchego/snow/engine/snowman/syncer/state_syncer.go index 11e890ae..fe75d137 100644 --- a/avalanchego/snow/engine/snowman/syncer/state_syncer.go +++ b/avalanchego/snow/engine/snowman/syncer/state_syncer.go @@ -1,11 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package syncer import ( + "context" "fmt" - "time" stdmath "math" @@ -13,15 +13,17 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" ) -var _ common.StateSyncer = &stateSyncer{} +var _ common.StateSyncer = (*stateSyncer)(nil) // summary content as received from network, along with accumulated weight. type weightedSummary struct { @@ -47,7 +49,7 @@ type stateSyncer struct { requestID uint32 stateSyncVM block.StateSyncableVM - onDoneStateSyncing func(lastReqID uint32) error + onDoneStateSyncing func(ctx context.Context, lastReqID uint32) error // we track the (possibly nil) local summary to help engine // choosing among multiple validated summaries @@ -59,27 +61,27 @@ type stateSyncer struct { frontierSeeders validators.Set // IDs of validators we should request state summary frontier from. // Will be consumed seeders are reached out for frontier. - targetSeeders ids.NodeIDSet + targetSeeders set.Set[ids.NodeID] // IDs of validators we requested a state summary frontier from // but haven't received a reply yet. ID is cleared if/when reply arrives. - pendingSeeders ids.NodeIDSet + pendingSeeders set.Set[ids.NodeID] // IDs of validators that failed to respond with their state summary frontier - failedSeeders ids.NodeIDSet + failedSeeders set.Set[ids.NodeID] // IDs of validators we should request filtering the accepted state summaries from - targetVoters ids.NodeIDSet + targetVoters set.Set[ids.NodeID] // IDs of validators we requested filtering the accepted state summaries from // but haven't received a reply yet. ID is cleared if/when reply arrives. - pendingVoters ids.NodeIDSet + pendingVoters set.Set[ids.NodeID] // IDs of validators that failed to respond with their filtered accepted state summaries - failedVoters ids.NodeIDSet + failedVoters set.Set[ids.NodeID] // summaryID --> (summary, weight) weightedSummaries map[ids.ID]*weightedSummary // summaries received may be different even if referring to the same height - // we keep a list of deduplcated height ready for voting - summariesHeights map[uint64]struct{} + // we keep a list of deduplicated height ready for voting + summariesHeights set.Set[uint64] uniqueSummariesHeights []uint64 // number of times the state sync has been attempted @@ -88,7 +90,7 @@ type stateSyncer struct { func New( cfg Config, - onDoneStateSyncing func(lastReqID uint32) error, + onDoneStateSyncing func(ctx context.Context, lastReqID uint32) error, ) common.StateSyncer { ssVM, _ := cfg.VM.(block.StateSyncableVM) return &stateSyncer{ @@ -99,13 +101,13 @@ func New( PutHandler: common.NewNoOpPutHandler(cfg.Ctx.Log), QueryHandler: common.NewNoOpQueryHandler(cfg.Ctx.Log), ChitsHandler: common.NewNoOpChitsHandler(cfg.Ctx.Log), - AppHandler: common.NewNoOpAppHandler(cfg.Ctx.Log), + AppHandler: cfg.VM, stateSyncVM: ssVM, onDoneStateSyncing: onDoneStateSyncing, } } -func (ss *stateSyncer) StateSummaryFrontier(nodeID ids.NodeID, requestID uint32, summaryBytes []byte) error { +func (ss *stateSyncer) StateSummaryFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryBytes []byte) error { // ignores any late responses if requestID != ss.requestID { ss.Ctx.Log.Debug("received out-of-sync StateSummaryFrontier message", @@ -129,14 +131,14 @@ func (ss *stateSyncer) StateSummaryFrontier(nodeID ids.NodeID, requestID uint32, // retrieve summary ID and register frontier; // make sure next beacons are reached out // even in case invalid summaries are received - if summary, err := ss.stateSyncVM.ParseStateSummary(summaryBytes); err == nil { + if summary, err := ss.stateSyncVM.ParseStateSummary(ctx, summaryBytes); err == nil { ss.weightedSummaries[summary.ID()] = &weightedSummary{ summary: summary, } height := summary.Height() - if _, exists := ss.summariesHeights[height]; !exists { - ss.summariesHeights[height] = struct{}{} + if !ss.summariesHeights.Contains(height) { + ss.summariesHeights.Add(height) ss.uniqueSummariesHeights = append(ss.uniqueSummariesHeights, height) } } else { @@ -149,10 +151,10 @@ func (ss *stateSyncer) StateSummaryFrontier(nodeID ids.NodeID, requestID uint32, ) } - return ss.receivedStateSummaryFrontier() + return ss.receivedStateSummaryFrontier(ctx) } -func (ss *stateSyncer) GetStateSummaryFrontierFailed(nodeID ids.NodeID, requestID uint32) error { +func (ss *stateSyncer) GetStateSummaryFrontierFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { // ignores any late responses if requestID != ss.requestID { ss.Ctx.Log.Debug("received out-of-sync GetStateSummaryFrontierFailed message", @@ -167,11 +169,11 @@ func (ss *stateSyncer) GetStateSummaryFrontierFailed(nodeID ids.NodeID, requestI ss.failedSeeders.Add(nodeID) ss.pendingSeeders.Remove(nodeID) - return ss.receivedStateSummaryFrontier() + return ss.receivedStateSummaryFrontier(ctx) } -func (ss *stateSyncer) receivedStateSummaryFrontier() error { - ss.sendGetStateSummaryFrontiers() +func (ss *stateSyncer) receivedStateSummaryFrontier(ctx context.Context) error { + ss.sendGetStateSummaryFrontiers(ctx) // still waiting on requests if ss.pendingSeeders.Len() != 0 { @@ -186,10 +188,7 @@ func (ss *stateSyncer) receivedStateSummaryFrontier() error { // problems will go away and we can collect a qualified frontier. // We assume the frontier is qualified after an alpha proportion of frontier seeders have responded frontierAlpha := float64(ss.frontierSeeders.Weight()*ss.Alpha) / float64(ss.StateSyncBeacons.Weight()) - failedBeaconWeight, err := ss.StateSyncBeacons.SubsetWeight(ss.failedSeeders) - if err != nil { - return err - } + failedBeaconWeight := ss.StateSyncBeacons.SubsetWeight(ss.failedSeeders) frontierStake := ss.frontierSeeders.Weight() - failedBeaconWeight if float64(frontierStake) < frontierAlpha { @@ -200,16 +199,16 @@ func (ss *stateSyncer) receivedStateSummaryFrontier() error { if ss.Config.RetryBootstrap { ss.Ctx.Log.Debug("restarting state sync") - return ss.restart() + return ss.restart(ctx) } } ss.requestID++ - ss.sendGetAcceptedStateSummaries() + ss.sendGetAcceptedStateSummaries(ctx) return nil } -func (ss *stateSyncer) AcceptedStateSummary(nodeID ids.NodeID, requestID uint32, summaryIDs []ids.ID) error { +func (ss *stateSyncer) AcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs []ids.ID) error { // ignores any late responses if requestID != ss.requestID { ss.Ctx.Log.Debug("received out-of-sync AcceptedStateSummary message", @@ -230,7 +229,7 @@ func (ss *stateSyncer) AcceptedStateSummary(nodeID ids.NodeID, requestID uint32, // Mark that we received a response from [nodeID] ss.pendingVoters.Remove(nodeID) - weight, _ := ss.StateSyncBeacons.GetWeight(nodeID) + weight := ss.StateSyncBeacons.GetWeight(nodeID) for _, summaryID := range summaryIDs { ws, ok := ss.weightedSummaries[summaryID] if !ok { @@ -254,7 +253,7 @@ func (ss *stateSyncer) AcceptedStateSummary(nodeID ids.NodeID, requestID uint32, ws.weight = newWeight } - ss.sendGetAcceptedStateSummaries() + ss.sendGetAcceptedStateSummaries(ctx) // wait on pending responses if ss.pendingVoters.Len() != 0 { @@ -278,10 +277,7 @@ func (ss *stateSyncer) AcceptedStateSummary(nodeID ids.NodeID, requestID uint32, size := len(ss.weightedSummaries) if size == 0 { // retry the state sync if the weight is not enough to state sync - failedBeaconWeight, err := ss.StateSyncBeacons.SubsetWeight(ss.failedVoters) - if err != nil { - return err - } + failedBeaconWeight := ss.StateSyncBeacons.SubsetWeight(ss.failedVoters) // if we had too many timeouts when asking for validator votes, we should restart // state sync hoping for the network problems to go away; otherwise, we received @@ -296,7 +292,7 @@ func (ss *stateSyncer) AcceptedStateSummary(nodeID ids.NodeID, requestID uint32, zap.Int("numFailedSyncers", ss.failedVoters.Len()), zap.Int("numAttempts", ss.attempts), ) - return ss.restart() + return ss.restart(ctx) } ss.Ctx.Log.Info("skipping state sync", @@ -304,27 +300,42 @@ func (ss *stateSyncer) AcceptedStateSummary(nodeID ids.NodeID, requestID uint32, ) // if we do not restart state sync, move on to bootstrapping. - return ss.onDoneStateSyncing(ss.requestID) + return ss.onDoneStateSyncing(ctx, ss.requestID) } preferredStateSummary := ss.selectSyncableStateSummary() - ss.Ctx.Log.Info("selected summary start state sync", + syncMode, err := preferredStateSummary.Accept(ctx) + if err != nil { + return err + } + + ss.Ctx.Log.Info("accepted state summary", zap.Stringer("summaryID", preferredStateSummary.ID()), + zap.Stringer("syncMode", syncMode), zap.Int("numTotalSummaries", size), ) - startedSyncing, err := preferredStateSummary.Accept() - if err != nil { - return err - } - if startedSyncing { - // summary was accepted and VM is state syncing. + switch syncMode { + case block.StateSyncSkipped: + // VM did not accept the summary, move on to bootstrapping. + return ss.onDoneStateSyncing(ctx, ss.requestID) + case block.StateSyncStatic: + // Summary was accepted and VM is state syncing. // Engine will wait for notification of state sync done. + ss.Ctx.StateSyncing.Set(true) return nil + case block.StateSyncDynamic: + // Summary was accepted and VM is state syncing. + // Engine will continue into bootstrapping and the VM will sync in the + // background. + ss.Ctx.StateSyncing.Set(true) + return ss.onDoneStateSyncing(ctx, ss.requestID) + default: + ss.Ctx.Log.Warn("unhandled state summary mode, proceeding to bootstrap", + zap.Stringer("syncMode", syncMode), + ) + return ss.onDoneStateSyncing(ctx, ss.requestID) } - - // VM did not accept the summary, move on to bootstrapping. - return ss.onDoneStateSyncing(ss.requestID) } // selectSyncableStateSummary chooses a state summary from all @@ -351,7 +362,7 @@ func (ss *stateSyncer) selectSyncableStateSummary() block.StateSummary { return preferredStateSummary } -func (ss *stateSyncer) GetAcceptedStateSummaryFailed(nodeID ids.NodeID, requestID uint32) error { +func (ss *stateSyncer) GetAcceptedStateSummaryFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { // ignores any late responses if requestID != ss.requestID { ss.Ctx.Log.Debug("received out-of-sync GetAcceptedStateSummaryFailed message", @@ -367,14 +378,17 @@ func (ss *stateSyncer) GetAcceptedStateSummaryFailed(nodeID ids.NodeID, requestI // accepted ss.failedVoters.Add(nodeID) - return ss.AcceptedStateSummary(nodeID, requestID, nil) + return ss.AcceptedStateSummary(ctx, nodeID, requestID, nil) } -func (ss *stateSyncer) Start(startReqID uint32) error { +func (ss *stateSyncer) Start(ctx context.Context, startReqID uint32) error { ss.Ctx.Log.Info("starting state sync") - ss.Ctx.SetState(snow.StateSyncing) - if err := ss.VM.SetState(snow.StateSyncing); err != nil { + ss.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.StateSyncing, + }) + if err := ss.VM.SetState(ctx, snow.StateSyncing); err != nil { return fmt.Errorf("failed to notify VM that state syncing has started: %w", err) } @@ -385,20 +399,20 @@ func (ss *stateSyncer) Start(startReqID uint32) error { } ss.started = true - return ss.startup() + return ss.startup(ctx) } // startup do start the whole state sync process by // sampling frontier seeders, listing state syncers to request votes to -// and reaching out frontier seeders if any. Othewise it move immediately +// and reaching out frontier seeders if any. Otherwise, it moves immediately // to bootstrapping. Unlike Start, startup does not check // whether sufficient stake amount is connected. -func (ss *stateSyncer) startup() error { +func (ss *stateSyncer) startup(ctx context.Context) error { ss.Config.Ctx.Log.Info("starting state sync") // clear up messages trackers ss.weightedSummaries = make(map[ids.ID]*weightedSummary) - ss.summariesHeights = make(map[uint64]struct{}) + ss.summariesHeights.Clear() ss.uniqueSummariesHeights = nil ss.targetSeeders.Clear() @@ -409,31 +423,34 @@ func (ss *stateSyncer) startup() error { ss.failedVoters.Clear() // sample K beacons to retrieve frontier from - beacons, err := ss.StateSyncBeacons.Sample(ss.Config.SampleK) + beaconIDs, err := ss.StateSyncBeacons.Sample(ss.Config.SampleK) if err != nil { return err } ss.frontierSeeders = validators.NewSet() - if err = ss.frontierSeeders.Set(beacons); err != nil { - return err - } - - for _, vdr := range beacons { - vdrID := vdr.ID() - ss.targetSeeders.Add(vdrID) + for _, nodeID := range beaconIDs { + if !ss.frontierSeeders.Contains(nodeID) { + // Invariant: We never use the TxID or BLS keys populated here. + err = ss.frontierSeeders.Add(nodeID, nil, ids.Empty, 1) + } else { + err = ss.frontierSeeders.AddWeight(nodeID, 1) + } + if err != nil { + return err + } + ss.targetSeeders.Add(nodeID) } // list all beacons, to reach them for voting on frontier for _, vdr := range ss.StateSyncBeacons.List() { - vdrID := vdr.ID() - ss.targetVoters.Add(vdrID) + ss.targetVoters.Add(vdr.NodeID) } // check if there is an ongoing state sync; if so add its state summary // to the frontier to request votes on // Note: database.ErrNotFound means there is no ongoing summary - localSummary, err := ss.stateSyncVM.GetOngoingSyncStateSummary() + localSummary, err := ss.stateSyncVM.GetOngoingSyncStateSummary(ctx) switch err { case database.ErrNotFound: // no action needed @@ -444,7 +461,7 @@ func (ss *stateSyncer) startup() error { } height := localSummary.Height() - ss.summariesHeights[height] = struct{}{} + ss.summariesHeights.Add(height) ss.uniqueSummariesHeights = append(ss.uniqueSummariesHeights, height) default: return err @@ -454,29 +471,29 @@ func (ss *stateSyncer) startup() error { ss.attempts++ if ss.targetSeeders.Len() == 0 { ss.Ctx.Log.Info("State syncing skipped due to no provided syncers") - return ss.onDoneStateSyncing(ss.requestID) + return ss.onDoneStateSyncing(ctx, ss.requestID) } ss.requestID++ - ss.sendGetStateSummaryFrontiers() + ss.sendGetStateSummaryFrontiers(ctx) return nil } -func (ss *stateSyncer) restart() error { +func (ss *stateSyncer) restart(ctx context.Context) error { if ss.attempts > 0 && ss.attempts%ss.RetryBootstrapWarnFrequency == 0 { ss.Ctx.Log.Debug("check internet connection", zap.Int("numSyncAttempts", ss.attempts), ) } - return ss.startup() + return ss.startup(ctx) } // Ask up to [common.MaxOutstandingBroadcastRequests] state sync validators at a time // to send their accepted state summary. It is called again until there are // no more seeders to be reached in the pending set -func (ss *stateSyncer) sendGetStateSummaryFrontiers() { - vdrs := ids.NewNodeIDSet(1) +func (ss *stateSyncer) sendGetStateSummaryFrontiers(ctx context.Context) { + vdrs := set.NewSet[ids.NodeID](1) for ss.targetSeeders.Len() > 0 && ss.pendingSeeders.Len() < common.MaxOutstandingBroadcastRequests { vdr, _ := ss.targetSeeders.Pop() vdrs.Add(vdr) @@ -484,15 +501,15 @@ func (ss *stateSyncer) sendGetStateSummaryFrontiers() { } if vdrs.Len() > 0 { - ss.Sender.SendGetStateSummaryFrontier(vdrs, ss.requestID) + ss.Sender.SendGetStateSummaryFrontier(ctx, vdrs, ss.requestID) } } // Ask up to [common.MaxOutstandingStateSyncRequests] syncers validators to send // their filtered accepted frontier. It is called again until there are // no more voters to be reached in the pending set. -func (ss *stateSyncer) sendGetAcceptedStateSummaries() { - vdrs := ids.NewNodeIDSet(1) +func (ss *stateSyncer) sendGetAcceptedStateSummaries(ctx context.Context) { + vdrs := set.NewSet[ids.NodeID](1) for ss.targetVoters.Len() > 0 && ss.pendingVoters.Len() < common.MaxOutstandingBroadcastRequests { vdr, _ := ss.targetVoters.Pop() vdrs.Add(vdr) @@ -500,7 +517,7 @@ func (ss *stateSyncer) sendGetAcceptedStateSummaries() { } if len(vdrs) > 0 { - ss.Sender.SendGetAcceptedStateSummary(vdrs, ss.requestID, ss.uniqueSummariesHeights) + ss.Sender.SendGetAcceptedStateSummary(ctx, vdrs, ss.requestID, ss.uniqueSummariesHeights) ss.Ctx.Log.Debug("sent GetAcceptedStateSummary messages", zap.Int("numSent", vdrs.Len()), zap.Int("numPending", ss.targetVoters.Len()), @@ -508,34 +525,24 @@ func (ss *stateSyncer) sendGetAcceptedStateSummaries() { } } -func (ss *stateSyncer) AppRequest(nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { - return ss.VM.AppRequest(nodeID, requestID, deadline, request) -} - -func (ss *stateSyncer) AppResponse(nodeID ids.NodeID, requestID uint32, response []byte) error { - return ss.VM.AppResponse(nodeID, requestID, response) -} - -func (ss *stateSyncer) AppRequestFailed(nodeID ids.NodeID, requestID uint32) error { - return ss.VM.AppRequestFailed(nodeID, requestID) -} - -func (ss *stateSyncer) Notify(msg common.Message) error { +func (ss *stateSyncer) Notify(ctx context.Context, msg common.Message) error { if msg != common.StateSyncDone { ss.Ctx.Log.Warn("received an unexpected message from the VM", zap.Stringer("msg", msg), ) return nil } - return ss.onDoneStateSyncing(ss.requestID) + + ss.Ctx.StateSyncing.Set(false) + return ss.onDoneStateSyncing(ctx, ss.requestID) } -func (ss *stateSyncer) Connected(nodeID ids.NodeID, nodeVersion *version.Application) error { - if err := ss.VM.Connected(nodeID, nodeVersion); err != nil { +func (ss *stateSyncer) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { + if err := ss.VM.Connected(ctx, nodeID, nodeVersion); err != nil { return err } - if err := ss.StartupTracker.Connected(nodeID, nodeVersion); err != nil { + if err := ss.StartupTracker.Connected(ctx, nodeID, nodeVersion); err != nil { return err } @@ -544,30 +551,34 @@ func (ss *stateSyncer) Connected(nodeID ids.NodeID, nodeVersion *version.Applica } ss.started = true - return ss.startup() + return ss.startup(ctx) } -func (ss *stateSyncer) Disconnected(nodeID ids.NodeID) error { - if err := ss.VM.Disconnected(nodeID); err != nil { +func (ss *stateSyncer) Disconnected(ctx context.Context, nodeID ids.NodeID) error { + if err := ss.VM.Disconnected(ctx, nodeID); err != nil { return err } - return ss.StartupTracker.Disconnected(nodeID) + return ss.StartupTracker.Disconnected(ctx, nodeID) } -func (ss *stateSyncer) Gossip() error { return nil } +func (*stateSyncer) Gossip(context.Context) error { + return nil +} -func (ss *stateSyncer) Shutdown() error { +func (ss *stateSyncer) Shutdown(ctx context.Context) error { ss.Config.Ctx.Log.Info("shutting down state syncer") - return ss.VM.Shutdown() + return ss.VM.Shutdown(ctx) } -func (ss *stateSyncer) Halt() {} +func (*stateSyncer) Halt(context.Context) {} -func (ss *stateSyncer) Timeout() error { return nil } +func (*stateSyncer) Timeout(context.Context) error { + return nil +} -func (ss *stateSyncer) HealthCheck() (interface{}, error) { - vmIntf, vmErr := ss.VM.HealthCheck() +func (ss *stateSyncer) HealthCheck(ctx context.Context) (interface{}, error) { + vmIntf, vmErr := ss.VM.HealthCheck(ctx) intf := map[string]interface{}{ "consensus": struct{}{}, "vm": vmIntf, @@ -575,13 +586,15 @@ func (ss *stateSyncer) HealthCheck() (interface{}, error) { return intf, vmErr } -func (ss *stateSyncer) GetVM() common.VM { return ss.VM } +func (ss *stateSyncer) GetVM() common.VM { + return ss.VM +} -func (ss *stateSyncer) IsEnabled() (bool, error) { +func (ss *stateSyncer) IsEnabled(ctx context.Context) (bool, error) { if ss.stateSyncVM == nil { // state sync is not implemented return false, nil } - return ss.stateSyncVM.StateSyncEnabled() + return ss.stateSyncVM.StateSyncEnabled(ctx) } diff --git a/avalanchego/snow/engine/snowman/syncer/state_syncer_test.go b/avalanchego/snow/engine/snowman/syncer/state_syncer_test.go index 32bc12b2..0d5b0155 100644 --- a/avalanchego/snow/engine/snowman/syncer/state_syncer_test.go +++ b/avalanchego/snow/engine/snowman/syncer/state_syncer_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package syncer import ( "bytes" + "context" "errors" "math" "testing" @@ -18,11 +19,18 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/engine/snowman/getter" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" safeMath "github.com/ava-labs/avalanchego/utils/math" ) +var ( + errInvalidSummary = errors.New("invalid summary") + errEmptySummary = errors.New("empty summary") + errUnknownSummary = errors.New("unknown summary") +) + func TestStateSyncerIsEnabledIfVMSupportsStateSyncing(t *testing.T) { require := require.New(t) @@ -42,9 +50,11 @@ func TestStateSyncerIsEnabledIfVMSupportsStateSyncing(t *testing.T) { cfg, err := NewConfig(*commonCfg, nil, dummyGetter, nonStateSyncableVM) require.NoError(err) - syncer := New(cfg, func(lastReqID uint32) error { return nil }) + syncer := New(cfg, func(context.Context, uint32) error { + return nil + }) - enabled, err := syncer.IsEnabled() + enabled, err := syncer.IsEnabled(context.Background()) require.NoError(err) require.False(enabled) @@ -64,17 +74,23 @@ func TestStateSyncerIsEnabledIfVMSupportsStateSyncing(t *testing.T) { cfg, err = NewConfig(*commonCfg, nil, dummyGetter, fullVM) require.NoError(err) - syncer = New(cfg, func(lastReqID uint32) error { return nil }) + syncer = New(cfg, func(context.Context, uint32) error { + return nil + }) // test: VM does not support state syncing - fullVM.StateSyncEnabledF = func() (bool, error) { return false, nil } - enabled, err = syncer.IsEnabled() + fullVM.StateSyncEnabledF = func(context.Context) (bool, error) { + return false, nil + } + enabled, err = syncer.IsEnabled(context.Background()) require.NoError(err) require.False(enabled) // test: VM does support state syncing - fullVM.StateSyncEnabledF = func() (bool, error) { return true, nil } - enabled, err = syncer.IsEnabled() + fullVM.StateSyncEnabledF = func(context.Context) (bool, error) { + return true, nil + } + enabled, err = syncer.IsEnabled(context.Background()) require.NoError(err) require.True(enabled) } @@ -92,7 +108,6 @@ func TestStateSyncingStartsOnlyIfEnoughStakeIsConnected(t *testing.T) { commonCfg := common.Config{ Ctx: snow.DefaultConsensusContextTest(), - Validators: vdrs, Beacons: vdrs, SampleK: vdrs.Len(), Alpha: alpha, @@ -101,30 +116,30 @@ func TestStateSyncingStartsOnlyIfEnoughStakeIsConnected(t *testing.T) { syncer, _, sender := buildTestsObjects(t, &commonCfg) sender.CantSendGetStateSummaryFrontier = true - sender.SendGetStateSummaryFrontierF = func(ss ids.NodeIDSet, u uint32) {} + sender.SendGetStateSummaryFrontierF = func(context.Context, set.Set[ids.NodeID], uint32) {} startReqID := uint32(0) // attempt starting bootstrapper with no stake connected. Bootstrapper should stall. require.False(commonCfg.StartupTracker.ShouldStart()) - require.NoError(syncer.Start(startReqID)) + require.NoError(syncer.Start(context.Background(), startReqID)) require.False(syncer.started) // attempt starting bootstrapper with not enough stake connected. Bootstrapper should stall. vdr0 := ids.GenerateTestNodeID() - require.NoError(vdrs.AddWeight(vdr0, startupAlpha/2)) - require.NoError(syncer.Connected(vdr0, version.CurrentApp)) + require.NoError(vdrs.Add(vdr0, nil, ids.Empty, startupAlpha/2)) + require.NoError(syncer.Connected(context.Background(), vdr0, version.CurrentApp)) require.False(commonCfg.StartupTracker.ShouldStart()) - require.NoError(syncer.Start(startReqID)) + require.NoError(syncer.Start(context.Background(), startReqID)) require.False(syncer.started) // finally attempt starting bootstrapper with enough stake connected. Frontiers should be requested. vdr := ids.GenerateTestNodeID() - require.NoError(vdrs.AddWeight(vdr, startupAlpha)) - require.NoError(syncer.Connected(vdr, version.CurrentApp)) + require.NoError(vdrs.Add(vdr, nil, ids.Empty, startupAlpha)) + require.NoError(syncer.Connected(context.Background(), vdr, version.CurrentApp)) require.True(commonCfg.StartupTracker.ShouldStart()) - require.NoError(syncer.Start(startReqID)) + require.NoError(syncer.Start(context.Background(), startReqID)) require.True(syncer.started) } @@ -154,13 +169,13 @@ func TestStateSyncLocalSummaryIsIncludedAmongFrontiersIfAvailable(t *testing.T) BytesV: summaryBytes, } fullVM.CantStateSyncGetOngoingSummary = true - fullVM.GetOngoingSyncStateSummaryF = func() (block.StateSummary, error) { + fullVM.GetOngoingSyncStateSummaryF = func(context.Context) (block.StateSummary, error) { return localSummary, nil } // Connect enough stake to start syncer for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(vdr.ID(), version.CurrentApp)) + require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) } require.True(syncer.locallyAvailableSummary == localSummary) @@ -191,13 +206,13 @@ func TestStateSyncNotFoundOngoingSummaryIsNotIncludedAmongFrontiers(t *testing.T // mock VM to simulate a no summary returned fullVM.CantStateSyncGetOngoingSummary = true - fullVM.GetOngoingSyncStateSummaryF = func() (block.StateSummary, error) { + fullVM.GetOngoingSyncStateSummaryF = func(context.Context) (block.StateSummary, error) { return nil, database.ErrNotFound } // Connect enough stake to start syncer for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(vdr.ID(), version.CurrentApp)) + require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) } require.Nil(syncer.locallyAvailableSummary) @@ -224,15 +239,15 @@ func TestBeaconsAreReachedForFrontiersUponStartup(t *testing.T) { syncer, _, sender := buildTestsObjects(t, &commonCfg) // set sender to track nodes reached out - contactedFrontiersProviders := ids.NewNodeIDSet(3) + contactedFrontiersProviders := set.NewSet[ids.NodeID](3) sender.CantSendGetStateSummaryFrontier = true - sender.SendGetStateSummaryFrontierF = func(ss ids.NodeIDSet, u uint32) { + sender.SendGetStateSummaryFrontierF = func(_ context.Context, ss set.Set[ids.NodeID], _ uint32) { contactedFrontiersProviders.Union(ss) } // Connect enough stake to start syncer for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(vdr.ID(), version.CurrentApp)) + require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) } // check that vdrs are reached out for frontiers @@ -268,7 +283,7 @@ func TestUnRequestedStateSummaryFrontiersAreDropped(t *testing.T) { // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map sender.CantSendGetStateSummaryFrontier = true - sender.SendGetStateSummaryFrontierF = func(ss ids.NodeIDSet, reqID uint32) { + sender.SendGetStateSummaryFrontierF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32) { for nodeID := range ss { contactedFrontiersProviders[nodeID] = reqID } @@ -276,7 +291,7 @@ func TestUnRequestedStateSummaryFrontiersAreDropped(t *testing.T) { // Connect enough stake to start syncer for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(vdr.ID(), version.CurrentApp)) + require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) } initiallyReachedOutBeaconsSize := len(contactedFrontiersProviders) @@ -285,7 +300,7 @@ func TestUnRequestedStateSummaryFrontiersAreDropped(t *testing.T) { // mock VM to simulate a valid summary is returned fullVM.CantParseStateSummary = true - fullVM.ParseStateSummaryF = func(summaryBytes []byte) (block.StateSummary, error) { + fullVM.ParseStateSummaryF = func(_ context.Context, summaryBytes []byte) (block.StateSummary, error) { return &block.TestStateSummary{ HeightV: key, IDV: summaryID, @@ -299,6 +314,7 @@ func TestUnRequestedStateSummaryFrontiersAreDropped(t *testing.T) { // check a response with wrong request ID is dropped require.NoError(syncer.StateSummaryFrontier( + context.Background(), responsiveBeaconID, math.MaxInt32, summaryBytes, @@ -309,6 +325,7 @@ func TestUnRequestedStateSummaryFrontiersAreDropped(t *testing.T) { // check a response from unsolicited node is dropped unsolicitedNodeID := ids.GenerateTestNodeID() require.NoError(syncer.StateSummaryFrontier( + context.Background(), unsolicitedNodeID, responsiveBeaconReqID, summaryBytes, @@ -317,6 +334,7 @@ func TestUnRequestedStateSummaryFrontiersAreDropped(t *testing.T) { // check a valid response is duly recorded require.NoError(syncer.StateSummaryFrontier( + context.Background(), responsiveBeaconID, responsiveBeaconReqID, summaryBytes, @@ -358,7 +376,7 @@ func TestMalformedStateSummaryFrontiersAreDropped(t *testing.T) { // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map sender.CantSendGetStateSummaryFrontier = true - sender.SendGetStateSummaryFrontierF = func(ss ids.NodeIDSet, reqID uint32) { + sender.SendGetStateSummaryFrontierF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32) { for nodeID := range ss { contactedFrontiersProviders[nodeID] = reqID } @@ -366,7 +384,7 @@ func TestMalformedStateSummaryFrontiersAreDropped(t *testing.T) { // Connect enough stake to start syncer for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(vdr.ID(), version.CurrentApp)) + require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) } initiallyReachedOutBeaconsSize := len(contactedFrontiersProviders) @@ -377,9 +395,9 @@ func TestMalformedStateSummaryFrontiersAreDropped(t *testing.T) { summary := []byte{'s', 'u', 'm', 'm', 'a', 'r', 'y'} isSummaryDecoded := false fullVM.CantParseStateSummary = true - fullVM.ParseStateSummaryF = func(summaryBytes []byte) (block.StateSummary, error) { + fullVM.ParseStateSummaryF = func(context.Context, []byte) (block.StateSummary, error) { isSummaryDecoded = true - return nil, errors.New("invalid state summary") + return nil, errInvalidSummary } // pick one of the vdrs that have been reached out @@ -388,6 +406,7 @@ func TestMalformedStateSummaryFrontiersAreDropped(t *testing.T) { // response is valid, but invalid summary is not recorded require.NoError(syncer.StateSummaryFrontier( + context.Background(), responsiveBeaconID, responsiveBeaconReqID, summary, @@ -429,7 +448,7 @@ func TestLateResponsesFromUnresponsiveFrontiersAreNotRecorded(t *testing.T) { // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map sender.CantSendGetStateSummaryFrontier = true - sender.SendGetStateSummaryFrontierF = func(ss ids.NodeIDSet, reqID uint32) { + sender.SendGetStateSummaryFrontierF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32) { for nodeID := range ss { contactedFrontiersProviders[nodeID] = reqID } @@ -437,7 +456,7 @@ func TestLateResponsesFromUnresponsiveFrontiersAreNotRecorded(t *testing.T) { // Connect enough stake to start syncer for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(vdr.ID(), version.CurrentApp)) + require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) } initiallyReachedOutBeaconsSize := len(contactedFrontiersProviders) @@ -449,13 +468,14 @@ func TestLateResponsesFromUnresponsiveFrontiersAreNotRecorded(t *testing.T) { unresponsiveBeaconReqID := contactedFrontiersProviders[unresponsiveBeaconID] fullVM.CantParseStateSummary = true - fullVM.ParseStateSummaryF = func(summaryBytes []byte) (block.StateSummary, error) { - require.True(len(summaryBytes) == 0) - return nil, errors.New("empty summary") + fullVM.ParseStateSummaryF = func(_ context.Context, summaryBytes []byte) (block.StateSummary, error) { + require.Empty(summaryBytes) + return nil, errEmptySummary } // assume timeout is reached and vdrs is marked as unresponsive require.NoError(syncer.GetStateSummaryFrontierFailed( + context.Background(), unresponsiveBeaconID, unresponsiveBeaconReqID, )) @@ -472,7 +492,7 @@ func TestLateResponsesFromUnresponsiveFrontiersAreNotRecorded(t *testing.T) { // mock VM to simulate a valid but late summary is returned fullVM.CantParseStateSummary = true - fullVM.ParseStateSummaryF = func(summaryBytes []byte) (block.StateSummary, error) { + fullVM.ParseStateSummaryF = func(_ context.Context, summaryBytes []byte) (block.StateSummary, error) { return &block.TestStateSummary{ HeightV: key, IDV: summaryID, @@ -482,6 +502,7 @@ func TestLateResponsesFromUnresponsiveFrontiersAreNotRecorded(t *testing.T) { // check a valid but late response is not recorded require.NoError(syncer.StateSummaryFrontier( + context.Background(), unresponsiveBeaconID, unresponsiveBeaconReqID, summaryBytes, @@ -515,7 +536,7 @@ func TestStateSyncIsRestartedIfTooManyFrontierSeedersTimeout(t *testing.T) { // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map sender.CantSendGetStateSummaryFrontier = true - sender.SendGetStateSummaryFrontierF = func(ss ids.NodeIDSet, reqID uint32) { + sender.SendGetStateSummaryFrontierF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32) { for nodeID := range ss { contactedFrontiersProviders[nodeID] = reqID } @@ -523,7 +544,7 @@ func TestStateSyncIsRestartedIfTooManyFrontierSeedersTimeout(t *testing.T) { // mock VM to simulate a valid summary is returned fullVM.CantParseStateSummary = true - fullVM.ParseStateSummaryF = func(b []byte) (block.StateSummary, error) { + fullVM.ParseStateSummaryF = func(_ context.Context, b []byte) (block.StateSummary, error) { switch { case bytes.Equal(b, summaryBytes): return &block.TestStateSummary{ @@ -532,15 +553,15 @@ func TestStateSyncIsRestartedIfTooManyFrontierSeedersTimeout(t *testing.T) { BytesV: summaryBytes, }, nil case bytes.Equal(b, nil): - return nil, errors.New("Empty Summary") + return nil, errEmptySummary default: - return nil, errors.New("unexpected Summary") + return nil, errUnknownSummary } } contactedVoters := make(map[ids.NodeID]uint32) // nodeID -> reqID map sender.CantSendGetAcceptedStateSummary = true - sender.SendGetAcceptedStateSummaryF = func(ss ids.NodeIDSet, reqID uint32, sl []uint64) { + sender.SendGetAcceptedStateSummaryF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32, _ []uint64) { for nodeID := range ss { contactedVoters[nodeID] = reqID } @@ -548,7 +569,7 @@ func TestStateSyncIsRestartedIfTooManyFrontierSeedersTimeout(t *testing.T) { // Connect enough stake to start syncer for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(vdr.ID(), version.CurrentApp)) + require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) } require.True(syncer.pendingSeeders.Len() != 0) @@ -562,12 +583,14 @@ func TestStateSyncIsRestartedIfTooManyFrontierSeedersTimeout(t *testing.T) { if maxResponses > 0 { require.NoError(syncer.StateSummaryFrontier( + context.Background(), beaconID, reqID, summaryBytes, )) } else { require.NoError(syncer.GetStateSummaryFrontierFailed( + context.Background(), beaconID, reqID, )) @@ -605,7 +628,7 @@ func TestVoteRequestsAreSentAsAllFrontierBeaconsResponded(t *testing.T) { // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map sender.CantSendGetStateSummaryFrontier = true - sender.SendGetStateSummaryFrontierF = func(ss ids.NodeIDSet, reqID uint32) { + sender.SendGetStateSummaryFrontierF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32) { for nodeID := range ss { contactedFrontiersProviders[nodeID] = reqID } @@ -613,7 +636,7 @@ func TestVoteRequestsAreSentAsAllFrontierBeaconsResponded(t *testing.T) { // mock VM to simulate a valid summary is returned fullVM.CantParseStateSummary = true - fullVM.ParseStateSummaryF = func(b []byte) (block.StateSummary, error) { + fullVM.ParseStateSummaryF = func(_ context.Context, b []byte) (block.StateSummary, error) { require.True(bytes.Equal(b, summaryBytes)) return &block.TestStateSummary{ HeightV: key, @@ -624,7 +647,7 @@ func TestVoteRequestsAreSentAsAllFrontierBeaconsResponded(t *testing.T) { contactedVoters := make(map[ids.NodeID]uint32) // nodeID -> reqID map sender.CantSendGetAcceptedStateSummary = true - sender.SendGetAcceptedStateSummaryF = func(ss ids.NodeIDSet, reqID uint32, sl []uint64) { + sender.SendGetAcceptedStateSummaryF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32, _ []uint64) { for nodeID := range ss { contactedVoters[nodeID] = reqID } @@ -632,7 +655,7 @@ func TestVoteRequestsAreSentAsAllFrontierBeaconsResponded(t *testing.T) { // Connect enough stake to start syncer for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(vdr.ID(), version.CurrentApp)) + require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) } require.True(syncer.pendingSeeders.Len() != 0) @@ -643,6 +666,7 @@ func TestVoteRequestsAreSentAsAllFrontierBeaconsResponded(t *testing.T) { reqID := contactedFrontiersProviders[beaconID] require.NoError(syncer.StateSummaryFrontier( + context.Background(), beaconID, reqID, summaryBytes, @@ -678,7 +702,7 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map sender.CantSendGetStateSummaryFrontier = true - sender.SendGetStateSummaryFrontierF = func(ss ids.NodeIDSet, reqID uint32) { + sender.SendGetStateSummaryFrontierF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32) { for nodeID := range ss { contactedFrontiersProviders[nodeID] = reqID } @@ -686,7 +710,7 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { // mock VM to simulate a valid summary is returned fullVM.CantParseStateSummary = true - fullVM.ParseStateSummaryF = func(summaryBytes []byte) (block.StateSummary, error) { + fullVM.ParseStateSummaryF = func(_ context.Context, summaryBytes []byte) (block.StateSummary, error) { return &block.TestStateSummary{ HeightV: key, IDV: summaryID, @@ -696,7 +720,7 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { contactedVoters := make(map[ids.NodeID]uint32) // nodeID -> reqID map sender.CantSendGetAcceptedStateSummary = true - sender.SendGetAcceptedStateSummaryF = func(ss ids.NodeIDSet, reqID uint32, sl []uint64) { + sender.SendGetAcceptedStateSummaryF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32, _ []uint64) { for nodeID := range ss { contactedVoters[nodeID] = reqID } @@ -704,7 +728,7 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { // Connect enough stake to start syncer for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(vdr.ID(), version.CurrentApp)) + require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) } require.True(syncer.pendingSeeders.Len() != 0) @@ -715,6 +739,7 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { reqID := contactedFrontiersProviders[beaconID] require.NoError(syncer.StateSummaryFrontier( + context.Background(), beaconID, reqID, summaryBytes, @@ -736,6 +761,7 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { // check a response with wrong request ID is dropped require.NoError(syncer.AcceptedStateSummary( + context.Background(), responsiveVoterID, math.MaxInt32, []ids.ID{summaryID}, @@ -748,6 +774,7 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { // check a response from unsolicited node is dropped unsolicitedVoterID := ids.GenerateTestNodeID() require.NoError(syncer.AcceptedStateSummary( + context.Background(), unsolicitedVoterID, responsiveVoterReqID, []ids.ID{summaryID}, @@ -756,6 +783,7 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { // check a valid response is duly recorded require.NoError(syncer.AcceptedStateSummary( + context.Background(), responsiveVoterID, responsiveVoterReqID, []ids.ID{summaryID}, @@ -763,9 +791,8 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { // responsiveBeacon not pending anymore require.False(syncer.pendingSeeders.Contains(responsiveVoterID)) - voterWeight, found := vdrs.GetWeight(responsiveVoterID) - require.True(found) - require.True(syncer.weightedSummaries[summaryID].weight == voterWeight) + voterWeight := vdrs.GetWeight(responsiveVoterID) + require.Equal(voterWeight, syncer.weightedSummaries[summaryID].weight) // other listed voters are reached out require.True( @@ -795,7 +822,7 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map sender.CantSendGetStateSummaryFrontier = true - sender.SendGetStateSummaryFrontierF = func(ss ids.NodeIDSet, reqID uint32) { + sender.SendGetStateSummaryFrontierF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32) { for nodeID := range ss { contactedFrontiersProviders[nodeID] = reqID } @@ -803,7 +830,7 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { // mock VM to simulate a valid summary is returned fullVM.CantParseStateSummary = true - fullVM.ParseStateSummaryF = func(summaryBytes []byte) (block.StateSummary, error) { + fullVM.ParseStateSummaryF = func(_ context.Context, summaryBytes []byte) (block.StateSummary, error) { return &block.TestStateSummary{ HeightV: key, IDV: summaryID, @@ -813,7 +840,7 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { contactedVoters := make(map[ids.NodeID]uint32) // nodeID -> reqID map sender.CantSendGetAcceptedStateSummary = true - sender.SendGetAcceptedStateSummaryF = func(ss ids.NodeIDSet, reqID uint32, sl []uint64) { + sender.SendGetAcceptedStateSummaryF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32, _ []uint64) { for nodeID := range ss { contactedVoters[nodeID] = reqID } @@ -821,7 +848,7 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { // Connect enough stake to start syncer for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(vdr.ID(), version.CurrentApp)) + require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) } require.True(syncer.pendingSeeders.Len() != 0) @@ -832,6 +859,7 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { reqID := contactedFrontiersProviders[beaconID] require.NoError(syncer.StateSummaryFrontier( + context.Background(), beaconID, reqID, summaryBytes, @@ -853,6 +881,7 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { // check a response for unRequested summary is dropped require.NoError(syncer.AcceptedStateSummary( + context.Background(), responsiveVoterID, responsiveVoterReqID, []ids.ID{unknownSummaryID}, @@ -863,6 +892,7 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { // check that responsiveVoter cannot cast another vote require.False(syncer.pendingSeeders.Contains(responsiveVoterID)) require.NoError(syncer.AcceptedStateSummary( + context.Background(), responsiveVoterID, responsiveVoterReqID, []ids.ID{summaryID}, @@ -898,7 +928,7 @@ func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map sender.CantSendGetStateSummaryFrontier = true - sender.SendGetStateSummaryFrontierF = func(ss ids.NodeIDSet, reqID uint32) { + sender.SendGetStateSummaryFrontierF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32) { for nodeID := range ss { contactedFrontiersProviders[nodeID] = reqID } @@ -919,20 +949,20 @@ func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { } fullVM.CantParseStateSummary = true - fullVM.ParseStateSummaryF = func(b []byte) (block.StateSummary, error) { + fullVM.ParseStateSummaryF = func(_ context.Context, b []byte) (block.StateSummary, error) { switch { case bytes.Equal(b, summaryBytes): return summary, nil case bytes.Equal(b, minoritySummaryBytes): return minoritySummary, nil default: - return nil, errors.New("unknown state summary") + return nil, errUnknownSummary } } contactedVoters := make(map[ids.NodeID]uint32) // nodeID -> reqID map sender.CantSendGetAcceptedStateSummary = true - sender.SendGetAcceptedStateSummaryF = func(ss ids.NodeIDSet, reqID uint32, sl []uint64) { + sender.SendGetAcceptedStateSummaryF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32, _ []uint64) { for nodeID := range ss { contactedVoters[nodeID] = reqID } @@ -940,7 +970,7 @@ func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { // Connect enough stake to start syncer for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(vdr.ID(), version.CurrentApp)) + require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) } require.True(syncer.pendingSeeders.Len() != 0) @@ -956,12 +986,14 @@ func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { if reachedSeeders%2 == 0 { require.NoError(syncer.StateSummaryFrontier( + context.Background(), beaconID, reqID, summaryBytes, )) } else { require.NoError(syncer.StateSummaryFrontier( + context.Background(), beaconID, reqID, minoritySummaryBytes, @@ -972,13 +1004,13 @@ func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { majoritySummaryCalled := false minoritySummaryCalled := false - summary.AcceptF = func() (bool, error) { + summary.AcceptF = func(context.Context) (block.StateSyncMode, error) { majoritySummaryCalled = true - return true, nil + return block.StateSyncStatic, nil } - minoritySummary.AcceptF = func() (bool, error) { + minoritySummary.AcceptF = func(context.Context) (block.StateSyncMode, error) { minoritySummaryCalled = true - return true, nil + return block.StateSyncStatic, nil } // let a majority of voters return summaryID, and a minority return minoritySummaryID. The rest timeout. @@ -991,24 +1023,25 @@ func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { switch { case cumulatedWeight < commonCfg.Alpha/2: require.NoError(syncer.AcceptedStateSummary( + context.Background(), voterID, reqID, []ids.ID{summaryID, minoritySummaryID}, )) - bw, _ := vdrs.GetWeight(voterID) - cumulatedWeight += bw + cumulatedWeight += vdrs.GetWeight(voterID) case cumulatedWeight < commonCfg.Alpha: require.NoError(syncer.AcceptedStateSummary( + context.Background(), voterID, reqID, []ids.ID{summaryID}, )) - bw, _ := vdrs.GetWeight(voterID) - cumulatedWeight += bw + cumulatedWeight += vdrs.GetWeight(voterID) default: require.NoError(syncer.GetAcceptedStateSummaryFailed( + context.Background(), voterID, reqID, )) @@ -1044,7 +1077,7 @@ func TestVotingIsRestartedIfMajorityIsNotReachedDueToTimeouts(t *testing.T) { // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map sender.CantSendGetStateSummaryFrontier = true - sender.SendGetStateSummaryFrontierF = func(ss ids.NodeIDSet, reqID uint32) { + sender.SendGetStateSummaryFrontierF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32) { for nodeID := range ss { contactedFrontiersProviders[nodeID] = reqID } @@ -1058,13 +1091,13 @@ func TestVotingIsRestartedIfMajorityIsNotReachedDueToTimeouts(t *testing.T) { T: t, } fullVM.CantParseStateSummary = true - fullVM.ParseStateSummaryF = func(summaryBytes []byte) (block.StateSummary, error) { + fullVM.ParseStateSummaryF = func(context.Context, []byte) (block.StateSummary, error) { return minoritySummary, nil } contactedVoters := make(map[ids.NodeID]uint32) // nodeID -> reqID map sender.CantSendGetAcceptedStateSummary = true - sender.SendGetAcceptedStateSummaryF = func(ss ids.NodeIDSet, reqID uint32, sl []uint64) { + sender.SendGetAcceptedStateSummaryF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32, _ []uint64) { for nodeID := range ss { contactedVoters[nodeID] = reqID } @@ -1072,7 +1105,7 @@ func TestVotingIsRestartedIfMajorityIsNotReachedDueToTimeouts(t *testing.T) { // Connect enough stake to start syncer for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(vdr.ID(), version.CurrentApp)) + require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) } require.True(syncer.pendingSeeders.Len() != 0) @@ -1083,6 +1116,7 @@ func TestVotingIsRestartedIfMajorityIsNotReachedDueToTimeouts(t *testing.T) { reqID := contactedFrontiersProviders[beaconID] require.NoError(syncer.StateSummaryFrontier( + context.Background(), beaconID, reqID, summaryBytes, @@ -1091,9 +1125,9 @@ func TestVotingIsRestartedIfMajorityIsNotReachedDueToTimeouts(t *testing.T) { require.False(syncer.pendingSeeders.Len() != 0) minoritySummaryCalled := false - minoritySummary.AcceptF = func() (bool, error) { + minoritySummary.AcceptF = func(context.Context) (block.StateSyncMode, error) { minoritySummaryCalled = true - return true, nil + return block.StateSyncStatic, nil } // Let a majority of voters timeout. @@ -1106,13 +1140,14 @@ func TestVotingIsRestartedIfMajorityIsNotReachedDueToTimeouts(t *testing.T) { // vdr carries the largest weight by far. Make sure it fails if timedOutWeight <= commonCfg.Alpha { require.NoError(syncer.GetAcceptedStateSummaryFailed( + context.Background(), voterID, reqID, )) - bw, _ := vdrs.GetWeight(voterID) - timedOutWeight += bw + timedOutWeight += vdrs.GetWeight(voterID) } else { require.NoError(syncer.AcceptedStateSummary( + context.Background(), voterID, reqID, []ids.ID{summaryID}, @@ -1150,7 +1185,7 @@ func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing. // set sender to track nodes reached out contactedFrontiersProviders := make(map[ids.NodeID]uint32) // nodeID -> reqID map sender.CantSendGetStateSummaryFrontier = true - sender.SendGetStateSummaryFrontierF = func(ss ids.NodeIDSet, reqID uint32) { + sender.SendGetStateSummaryFrontierF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32) { for nodeID := range ss { contactedFrontiersProviders[nodeID] = reqID } @@ -1171,20 +1206,20 @@ func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing. } fullVM.CantParseStateSummary = true - fullVM.ParseStateSummaryF = func(b []byte) (block.StateSummary, error) { + fullVM.ParseStateSummaryF = func(_ context.Context, b []byte) (block.StateSummary, error) { switch { case bytes.Equal(b, summaryBytes): return minoritySummary1, nil case bytes.Equal(b, minoritySummaryBytes): return minoritySummary2, nil default: - return nil, errors.New("unknown state summary") + return nil, errUnknownSummary } } contactedVoters := make(map[ids.NodeID]uint32) // nodeID -> reqID map sender.CantSendGetAcceptedStateSummary = true - sender.SendGetAcceptedStateSummaryF = func(ss ids.NodeIDSet, reqID uint32, sl []uint64) { + sender.SendGetAcceptedStateSummaryF = func(_ context.Context, ss set.Set[ids.NodeID], reqID uint32, _ []uint64) { for nodeID := range ss { contactedVoters[nodeID] = reqID } @@ -1192,7 +1227,7 @@ func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing. // Connect enough stake to start syncer for _, vdr := range vdrs.List() { - require.NoError(syncer.Connected(vdr.ID(), version.CurrentApp)) + require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) } require.True(syncer.pendingSeeders.Len() != 0) @@ -1208,12 +1243,14 @@ func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing. if reachedSeeders%2 == 0 { require.NoError(syncer.StateSummaryFrontier( + context.Background(), beaconID, reqID, summaryBytes, )) } else { require.NoError(syncer.StateSummaryFrontier( + context.Background(), beaconID, reqID, minoritySummaryBytes, @@ -1224,17 +1261,17 @@ func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing. majoritySummaryCalled := false minoritySummaryCalled := false - minoritySummary1.AcceptF = func() (bool, error) { + minoritySummary1.AcceptF = func(context.Context) (block.StateSyncMode, error) { majoritySummaryCalled = true - return true, nil + return block.StateSyncStatic, nil } - minoritySummary2.AcceptF = func() (bool, error) { + minoritySummary2.AcceptF = func(context.Context) (block.StateSyncMode, error) { minoritySummaryCalled = true - return true, nil + return block.StateSyncStatic, nil } stateSyncFullyDone := false - syncer.onDoneStateSyncing = func(lastReqID uint32) error { + syncer.onDoneStateSyncing = func(context.Context, uint32) error { stateSyncFullyDone = true return nil } @@ -1250,21 +1287,21 @@ func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing. switch { case votingWeightStake < commonCfg.Alpha/2: require.NoError(syncer.AcceptedStateSummary( + context.Background(), voterID, reqID, []ids.ID{minoritySummary1.ID(), minoritySummary2.ID()}, )) - bw, _ := vdrs.GetWeight(voterID) - votingWeightStake += bw + votingWeightStake += vdrs.GetWeight(voterID) default: require.NoError(syncer.AcceptedStateSummary( + context.Background(), voterID, reqID, []ids.ID{{'u', 'n', 'k', 'n', 'o', 'w', 'n', 'I', 'D'}}, )) - bw, _ := vdrs.GetWeight(voterID) - votingWeightStake += bw + votingWeightStake += vdrs.GetWeight(voterID) } } @@ -1297,12 +1334,12 @@ func TestStateSyncIsDoneOnceVMNotifies(t *testing.T) { _ = fullVM stateSyncFullyDone := false - syncer.onDoneStateSyncing = func(lastReqID uint32) error { + syncer.onDoneStateSyncing = func(context.Context, uint32) error { stateSyncFullyDone = true return nil } // Any Put response before StateSyncDone is received from VM is dropped - require.NoError(syncer.Notify(common.StateSyncDone)) + require.NoError(syncer.Notify(context.Background(), common.StateSyncDone)) require.True(stateSyncFullyDone) } diff --git a/avalanchego/snow/engine/snowman/syncer/utils_test.go b/avalanchego/snow/engine/snowman/syncer/utils_test.go index 57923d1a..01303743 100644 --- a/avalanchego/snow/engine/snowman/syncer/utils_test.go +++ b/avalanchego/snow/engine/snowman/syncer/utils_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package syncer import ( + "context" "testing" "github.com/stretchr/testify/require" @@ -59,7 +60,7 @@ func buildTestPeers(t *testing.T) validators.Set { vdrs := validators.NewSet() for idx := 0; idx < 2*common.MaxOutstandingBroadcastRequests; idx++ { beaconID := ids.GenerateTestNodeID() - require.NoError(t, vdrs.AddWeight(beaconID, uint64(1))) + require.NoError(t, vdrs.Add(beaconID, nil, ids.Empty, 1)) } return vdrs } @@ -85,12 +86,14 @@ func buildTestsObjects(t *testing.T, commonCfg *common.Config) ( cfg, err := NewConfig(*commonCfg, nil, dummyGetter, fullVM) require.NoError(t, err) - commonSyncer := New(cfg, func(lastReqID uint32) error { return nil }) + commonSyncer := New(cfg, func(context.Context, uint32) error { + return nil + }) syncer, ok := commonSyncer.(*stateSyncer) require.True(t, ok) require.True(t, syncer.stateSyncVM != nil) - fullVM.GetOngoingSyncStateSummaryF = func() (block.StateSummary, error) { + fullVM.GetOngoingSyncStateSummaryF = func(context.Context) (block.StateSummary, error) { return nil, database.ErrNotFound } diff --git a/avalanchego/snow/engine/snowman/test_snowman_engine.go b/avalanchego/snow/engine/snowman/test_engine.go similarity index 68% rename from avalanchego/snow/engine/snowman/test_snowman_engine.go rename to avalanchego/snow/engine/snowman/test_engine.go index d90e7ee7..c5c897b4 100644 --- a/avalanchego/snow/engine/snowman/test_snowman_engine.go +++ b/avalanchego/snow/engine/snowman/test_engine.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman import ( + "context" "errors" "github.com/ava-labs/avalanchego/ids" @@ -12,7 +13,7 @@ import ( ) var ( - _ Engine = &EngineTest{} + _ Engine = (*EngineTest)(nil) errGetBlock = errors.New("unexpectedly called GetBlock") ) @@ -22,7 +23,7 @@ type EngineTest struct { common.EngineTest CantGetBlock bool - GetBlockF func(ids.ID) (snowman.Block, error) + GetBlockF func(context.Context, ids.ID) (snowman.Block, error) } func (e *EngineTest) Default(cant bool) { @@ -30,9 +31,9 @@ func (e *EngineTest) Default(cant bool) { e.CantGetBlock = false } -func (e *EngineTest) GetBlock(blkID ids.ID) (snowman.Block, error) { +func (e *EngineTest) GetBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) { if e.GetBlockF != nil { - return e.GetBlockF(blkID) + return e.GetBlockF(ctx, blkID) } if e.CantGetBlock && e.T != nil { e.T.Fatalf("Unexpectedly called GetBlock") diff --git a/avalanchego/snow/engine/snowman/traced_engine.go b/avalanchego/snow/engine/snowman/traced_engine.go new file mode 100644 index 00000000..56b46de4 --- /dev/null +++ b/avalanchego/snow/engine/snowman/traced_engine.go @@ -0,0 +1,42 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + + oteltrace "go.opentelemetry.io/otel/trace" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/trace" +) + +var _ Engine = (*tracedEngine)(nil) + +type tracedEngine struct { + common.Engine + engine Engine + tracer trace.Tracer +} + +func TraceEngine(engine Engine, tracer trace.Tracer) Engine { + return &tracedEngine{ + Engine: common.TraceEngine(engine, tracer), + engine: engine, + tracer: tracer, + } +} + +func (e *tracedEngine) GetBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) { + ctx, span := e.tracer.Start(ctx, "tracedEngine.GetBlock", oteltrace.WithAttributes( + attribute.Stringer("blkID", blkID), + )) + defer span.End() + + return e.engine.GetBlock(ctx, blkID) +} diff --git a/avalanchego/snow/engine/snowman/transitive.go b/avalanchego/snow/engine/snowman/transitive.go index 27e1d77e..bf3f8908 100644 --- a/avalanchego/snow/engine/snowman/transitive.go +++ b/avalanchego/snow/engine/snowman/transitive.go @@ -1,30 +1,34 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman import ( + "context" "fmt" - "time" "go.uber.org/zap" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/cache/metercacher" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/consensus/snowman/poll" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/events" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/bag" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/avalanchego/version" ) const nonVerifiedCacheSize = 128 -var _ Engine = &Transitive{} +var _ Engine = (*Transitive)(nil) func New(config Config) (Engine, error) { return newTransitive(config) @@ -42,6 +46,8 @@ type Transitive struct { common.AcceptedFrontierHandler common.AcceptedHandler common.AncestorsHandler + common.AppHandler + validators.Connector RequestID uint32 @@ -62,7 +68,10 @@ type Transitive struct { // A block is put into this cache if it was not able to be issued. A block // fails to be issued if verification on the block or one of its ancestors // occurs. - nonVerifiedCache cache.Cacher + nonVerifiedCache cache.Cacher[ids.ID, snowman.Block] + + // acceptedFrontiers of the other validators of this chain + acceptedFrontiers tracker.Accepted // operations that are blocked on a block being issued. This could be // issuing another block, responding to a query, or applying votes to consensus @@ -79,14 +88,18 @@ type Transitive struct { func newTransitive(config Config) (*Transitive, error) { config.Ctx.Log.Info("initializing consensus engine") - nonVerifiedCache, err := metercacher.New( + nonVerifiedCache, err := metercacher.New[ids.ID, snowman.Block]( "non_verified_cache", config.Ctx.Registerer, - &cache.LRU{Size: nonVerifiedCacheSize}, + &cache.LRU[ids.ID, snowman.Block]{Size: nonVerifiedCacheSize}, ) if err != nil { return nil, err } + + acceptedFrontiers := tracker.NewAccepted() + config.Validators.RegisterCallbackListener(acceptedFrontiers) + factory := poll.NewEarlyTermNoTraversalFactory(config.Params.Alpha) t := &Transitive{ Config: config, @@ -95,9 +108,12 @@ func newTransitive(config Config) (*Transitive, error) { AcceptedFrontierHandler: common.NewNoOpAcceptedFrontierHandler(config.Ctx.Log), AcceptedHandler: common.NewNoOpAcceptedHandler(config.Ctx.Log), AncestorsHandler: common.NewNoOpAncestorsHandler(config.Ctx.Log), + AppHandler: config.VM, + Connector: config.VM, pending: make(map[ids.ID]snowman.Block), nonVerifieds: NewAncestorTree(), nonVerifiedCache: nonVerifiedCache, + acceptedFrontiers: acceptedFrontiers, polls: poll.NewSet(factory, config.Ctx.Log, "", @@ -108,8 +124,8 @@ func newTransitive(config Config) (*Transitive, error) { return t, t.metrics.Initialize("", config.Ctx.Registerer) } -func (t *Transitive) Put(nodeID ids.NodeID, requestID uint32, blkBytes []byte) error { - blk, err := t.VM.ParseBlock(blkBytes) +func (t *Transitive) Put(ctx context.Context, nodeID ids.NodeID, requestID uint32, blkBytes []byte) error { + blk, err := t.VM.ParseBlock(ctx, blkBytes) if err != nil { t.Ctx.Log.Debug("failed to parse block", zap.Stringer("nodeID", nodeID), @@ -125,7 +141,7 @@ func (t *Transitive) Put(nodeID ids.NodeID, requestID uint32, blkBytes []byte) e // because GetFailed doesn't utilize the assumption that we actually // sent a Get message, we can safely call GetFailed here to potentially // abandon the request. - return t.GetFailed(nodeID, requestID) + return t.GetFailed(ctx, nodeID, requestID) } actualBlkID := blk.ID() @@ -141,7 +157,7 @@ func (t *Transitive) Put(nodeID ids.NodeID, requestID uint32, blkBytes []byte) e ) // We assume that [blk] is useless because it doesn't match what we // expected. - return t.GetFailed(nodeID, requestID) + return t.GetFailed(ctx, nodeID, requestID) } if t.wasIssued(blk) { @@ -153,13 +169,13 @@ func (t *Transitive) Put(nodeID ids.NodeID, requestID uint32, blkBytes []byte) e // receive requests to fill the ancestry. dependencies that have already // been fetched, but with missing dependencies themselves won't be requested // from the vdr. - if _, err := t.issueFrom(nodeID, blk); err != nil { + if _, err := t.issueFrom(ctx, nodeID, blk); err != nil { return err } - return t.buildBlocks() + return t.buildBlocks(ctx) } -func (t *Transitive) GetFailed(nodeID ids.NodeID, requestID uint32) error { +func (t *Transitive) GetFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { // We don't assume that this function is called after a failed Get message. // Check to see if we have an outstanding request and also get what the request was for if it exists. blkID, ok := t.blkReqs.Remove(nodeID, requestID) @@ -172,28 +188,28 @@ func (t *Transitive) GetFailed(nodeID ids.NodeID, requestID uint32) error { } // Because the get request was dropped, we no longer expect blkID to be issued. - t.blocked.Abandon(blkID) + t.blocked.Abandon(ctx, blkID) t.metrics.numRequests.Set(float64(t.blkReqs.Len())) t.metrics.numBlockers.Set(float64(t.blocked.Len())) - return t.buildBlocks() + return t.buildBlocks(ctx) } -func (t *Transitive) PullQuery(nodeID ids.NodeID, requestID uint32, blkID ids.ID) error { - t.Sender.SendChits(nodeID, requestID, []ids.ID{t.Consensus.Preference()}) +func (t *Transitive) PullQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, blkID ids.ID) error { + t.sendChits(ctx, nodeID, requestID) // Try to issue [blkID] to consensus. // If we're missing an ancestor, request it from [vdr] - if _, err := t.issueFromByID(nodeID, blkID); err != nil { + if _, err := t.issueFromByID(ctx, nodeID, blkID); err != nil { return err } - return t.buildBlocks() + return t.buildBlocks(ctx) } -func (t *Transitive) PushQuery(nodeID ids.NodeID, requestID uint32, blkBytes []byte) error { - t.Sender.SendChits(nodeID, requestID, []ids.ID{t.Consensus.Preference()}) +func (t *Transitive) PushQuery(ctx context.Context, nodeID ids.NodeID, requestID uint32, blkBytes []byte) error { + t.sendChits(ctx, nodeID, requestID) - blk, err := t.VM.ParseBlock(blkBytes) + blk, err := t.VM.ParseBlock(ctx, blkBytes) // If parsing fails, we just drop the request, as we didn't ask for it if err != nil { t.Ctx.Log.Debug("failed to parse block", @@ -219,14 +235,16 @@ func (t *Transitive) PushQuery(nodeID ids.NodeID, requestID uint32, blkBytes []b // receive requests to fill the ancestry. dependencies that have already // been fetched, but with missing dependencies themselves won't be requested // from the vdr. - if _, err := t.issueFrom(nodeID, blk); err != nil { + if _, err := t.issueFrom(ctx, nodeID, blk); err != nil { return err } - return t.buildBlocks() + return t.buildBlocks(ctx) } -func (t *Transitive) Chits(nodeID ids.NodeID, requestID uint32, votes []ids.ID) error { +func (t *Transitive) Chits(ctx context.Context, nodeID ids.NodeID, requestID uint32, votes []ids.ID, accepted []ids.ID) error { + t.acceptedFrontiers.SetAcceptedFrontier(nodeID, accepted) + // Since this is a linear chain, there should only be one ID in the vote set if len(votes) != 1 { t.Ctx.Log.Debug("failing Chits", @@ -238,7 +256,7 @@ func (t *Transitive) Chits(nodeID ids.NodeID, requestID uint32, votes []ids.ID) // because QueryFailed doesn't utilize the assumption that we actually // sent a Query message, we can safely call QueryFailed here to // potentially abandon the request. - return t.QueryFailed(nodeID, requestID) + return t.QueryFailed(ctx, nodeID, requestID) } blkID := votes[0] @@ -255,7 +273,7 @@ func (t *Transitive) Chits(nodeID ids.NodeID, requestID uint32, votes []ids.ID) response: blkID, } - added, err := t.issueFromByID(nodeID, blkID) + added, err := t.issueFromByID(ctx, nodeID, blkID) if err != nil { return err } @@ -264,57 +282,42 @@ func (t *Transitive) Chits(nodeID ids.NodeID, requestID uint32, votes []ids.ID) v.deps.Add(blkID) } - t.blocked.Register(v) + t.blocked.Register(ctx, v) t.metrics.numBlockers.Set(float64(t.blocked.Len())) - return t.buildBlocks() + return t.buildBlocks(ctx) } -func (t *Transitive) QueryFailed(vdr ids.NodeID, requestID uint32) error { - t.blocked.Register(&voter{ - t: t, - vdr: vdr, - requestID: requestID, - }) - t.metrics.numBlockers.Set(float64(t.blocked.Len())) - return t.buildBlocks() -} - -func (t *Transitive) AppRequest(nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { - // Notify the VM of this request - return t.VM.AppRequest(nodeID, requestID, deadline, request) -} - -func (t *Transitive) AppRequestFailed(nodeID ids.NodeID, requestID uint32) error { - // Notify the VM that a request it made failed - return t.VM.AppRequestFailed(nodeID, requestID) -} - -func (t *Transitive) AppResponse(nodeID ids.NodeID, requestID uint32, response []byte) error { - // Notify the VM of a response to its request - return t.VM.AppResponse(nodeID, requestID, response) -} - -func (t *Transitive) AppGossip(nodeID ids.NodeID, msg []byte) error { - // Notify the VM of this message which has been gossiped to it - return t.VM.AppGossip(nodeID, msg) -} +func (t *Transitive) QueryFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + lastAccepted := t.acceptedFrontiers.AcceptedFrontier(nodeID) + if len(lastAccepted) == 1 { + // Chits calls QueryFailed if [votes] doesn't have length 1, so this + // check is required to avoid infinite mutual recursion. + return t.Chits(ctx, nodeID, requestID, lastAccepted, lastAccepted) + } -func (t *Transitive) Connected(nodeID ids.NodeID, nodeVersion *version.Application) error { - return t.VM.Connected(nodeID, nodeVersion) + t.blocked.Register( + ctx, + &voter{ + t: t, + vdr: nodeID, + requestID: requestID, + }, + ) + t.metrics.numBlockers.Set(float64(t.blocked.Len())) + return t.buildBlocks(ctx) } -func (t *Transitive) Disconnected(nodeID ids.NodeID) error { - return t.VM.Disconnected(nodeID) +func (*Transitive) Timeout(context.Context) error { + return nil } -func (t *Transitive) Timeout() error { return nil } - -func (t *Transitive) Gossip() error { - blkID, err := t.VM.LastAccepted() +func (t *Transitive) Gossip(ctx context.Context) error { + blkID, err := t.VM.LastAccepted(ctx) if err != nil { return err } - blk, err := t.GetBlock(blkID) + + blk, err := t.GetBlock(ctx, blkID) if err != nil { t.Ctx.Log.Warn("dropping gossip request", zap.String("reason", "block couldn't be loaded"), @@ -326,41 +329,46 @@ func (t *Transitive) Gossip() error { t.Ctx.Log.Verbo("gossiping accepted block to the network", zap.Stringer("blkID", blkID), ) - t.Sender.SendGossip(blk.Bytes()) + t.Sender.SendGossip(ctx, blk.Bytes()) return nil } -func (t *Transitive) Halt() {} +func (*Transitive) Halt(context.Context) {} -func (t *Transitive) Shutdown() error { +func (t *Transitive) Shutdown(ctx context.Context) error { t.Ctx.Log.Info("shutting down consensus engine") - return t.VM.Shutdown() + return t.VM.Shutdown(ctx) } -func (t *Transitive) Notify(msg common.Message) error { - if msg != common.PendingTxs { +func (t *Transitive) Notify(ctx context.Context, msg common.Message) error { + switch msg { + case common.PendingTxs: + // the pending txs message means we should attempt to build a block. + t.pendingBuildBlocks++ + return t.buildBlocks(ctx) + case common.StateSyncDone: + t.Ctx.StateSyncing.Set(false) + return nil + default: t.Ctx.Log.Warn("received an unexpected message from the VM", zap.Stringer("messageString", msg), ) return nil } - - // the pending txs message means we should attempt to build a block. - t.pendingBuildBlocks++ - return t.buildBlocks() } func (t *Transitive) Context() *snow.ConsensusContext { return t.Ctx } -func (t *Transitive) Start(startReqID uint32) error { +func (t *Transitive) Start(ctx context.Context, startReqID uint32) error { t.RequestID = startReqID - lastAcceptedID, err := t.VM.LastAccepted() + lastAcceptedID, err := t.VM.LastAccepted(ctx) if err != nil { return err } - lastAccepted, err := t.GetBlock(lastAcceptedID) + + lastAccepted, err := t.GetBlock(ctx, lastAcceptedID) if err != nil { t.Ctx.Log.Error("failed to get last accepted block", zap.Error(err), @@ -369,19 +377,19 @@ func (t *Transitive) Start(startReqID uint32) error { } // initialize consensus to the last accepted blockID - if err := t.Consensus.Initialize(t.Ctx, t.Params, lastAcceptedID, lastAccepted.Height()); err != nil { + if err := t.Consensus.Initialize(t.Ctx, t.Params, lastAcceptedID, lastAccepted.Height(), lastAccepted.Timestamp()); err != nil { return err } // to maintain the invariant that oracle blocks are issued in the correct // preferences, we need to handle the case that we are bootstrapping into an oracle block if oracleBlk, ok := lastAccepted.(snowman.OracleBlock); ok { - options, err := oracleBlk.Options() + options, err := oracleBlk.Options(ctx) switch { case err == snowman.ErrNotOracle: // if there aren't blocks we need to deliver on startup, we need to set // the preference to the last accepted block - if err := t.VM.SetPreference(lastAcceptedID); err != nil { + if err := t.VM.SetPreference(ctx, lastAcceptedID); err != nil { return err } case err != nil: @@ -389,12 +397,12 @@ func (t *Transitive) Start(startReqID uint32) error { default: for _, blk := range options { // note that deliver will set the VM's preference - if err := t.deliver(blk); err != nil { + if err := t.deliver(ctx, blk); err != nil { return err } } } - } else if err := t.VM.SetPreference(lastAcceptedID); err != nil { + } else if err := t.VM.SetPreference(ctx, lastAcceptedID); err != nil { return err } @@ -403,17 +411,20 @@ func (t *Transitive) Start(startReqID uint32) error { ) t.metrics.bootstrapFinished.Set(1) - t.Ctx.SetState(snow.NormalOp) - if err := t.VM.SetState(snow.NormalOp); err != nil { + t.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.NormalOp, + }) + if err := t.VM.SetState(ctx, snow.NormalOp); err != nil { return fmt.Errorf("failed to notify VM that consensus is starting: %w", err) } return nil } -func (t *Transitive) HealthCheck() (interface{}, error) { - consensusIntf, consensusErr := t.Consensus.HealthCheck() - vmIntf, vmErr := t.VM.HealthCheck() +func (t *Transitive) HealthCheck(ctx context.Context) (interface{}, error) { + consensusIntf, consensusErr := t.Consensus.HealthCheck(ctx) + vmIntf, vmErr := t.VM.HealthCheck(ctx) intf := map[string]interface{}{ "consensus": consensusIntf, "vm": vmIntf, @@ -424,33 +435,43 @@ func (t *Transitive) HealthCheck() (interface{}, error) { if vmErr == nil { return intf, consensusErr } - return intf, fmt.Errorf("vm: %s ; consensus: %s", vmErr, consensusErr) + return intf, fmt.Errorf("vm: %w ; consensus: %v", vmErr, consensusErr) } func (t *Transitive) GetVM() common.VM { return t.VM } -func (t *Transitive) GetBlock(blkID ids.ID) (snowman.Block, error) { +func (t *Transitive) GetBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) { if blk, ok := t.pending[blkID]; ok { return blk, nil } if blk, ok := t.nonVerifiedCache.Get(blkID); ok { - return blk.(snowman.Block), nil + return blk, nil + } + + return t.VM.GetBlock(ctx, blkID) +} + +func (t *Transitive) sendChits(ctx context.Context, nodeID ids.NodeID, requestID uint32) { + lastAccepted := t.Consensus.LastAccepted() + if t.Ctx.StateSyncing.Get() { + t.Sender.SendChits(ctx, nodeID, requestID, []ids.ID{lastAccepted}, []ids.ID{lastAccepted}) + } else { + t.Sender.SendChits(ctx, nodeID, requestID, []ids.ID{t.Consensus.Preference()}, []ids.ID{lastAccepted}) } - return t.VM.GetBlock(blkID) } // Build blocks if they have been requested and the number of processing blocks // is less than optimal. -func (t *Transitive) buildBlocks() error { +func (t *Transitive) buildBlocks(ctx context.Context) error { if err := t.errs.Err; err != nil { return err } for t.pendingBuildBlocks > 0 && t.Consensus.NumProcessing() < t.Params.OptimalProcessing { t.pendingBuildBlocks-- - blk, err := t.VM.BuildBlock() + blk, err := t.VM.BuildBlock(ctx) if err != nil { t.Ctx.Log.Debug("failed building block", zap.Error(err), @@ -479,7 +500,7 @@ func (t *Transitive) buildBlocks() error { ) } - added, err := t.issueWithAncestors(blk) + added, err := t.issueWithAncestors(ctx, blk) if err != nil { return err } @@ -496,46 +517,46 @@ func (t *Transitive) buildBlocks() error { // Issue another poll to the network, asking what it prefers given the block we prefer. // Helps move consensus along. -func (t *Transitive) repoll() { +func (t *Transitive) repoll(ctx context.Context) { // if we are issuing a repoll, we should gossip our current preferences to // propagate the most likely branch as quickly as possible prefID := t.Consensus.Preference() for i := t.polls.Len(); i < t.Params.ConcurrentRepolls; i++ { - t.pullQuery(prefID) + t.pullQuery(ctx, prefID) } } // issueFromByID attempts to issue the branch ending with a block [blkID] into consensus. // If we do not have [blkID], request it. // Returns true if the block is processing in consensus or is decided. -func (t *Transitive) issueFromByID(nodeID ids.NodeID, blkID ids.ID) (bool, error) { - blk, err := t.GetBlock(blkID) +func (t *Transitive) issueFromByID(ctx context.Context, nodeID ids.NodeID, blkID ids.ID) (bool, error) { + blk, err := t.GetBlock(ctx, blkID) if err != nil { - t.sendRequest(nodeID, blkID) + t.sendRequest(ctx, nodeID, blkID) return false, nil } - return t.issueFrom(nodeID, blk) + return t.issueFrom(ctx, nodeID, blk) } // issueFrom attempts to issue the branch ending with block [blkID] to consensus. // Returns true if the block is processing in consensus or is decided. // If a dependency is missing, request it from [vdr]. -func (t *Transitive) issueFrom(nodeID ids.NodeID, blk snowman.Block) (bool, error) { - blkID := blk.ID() +func (t *Transitive) issueFrom(ctx context.Context, nodeID ids.NodeID, blk snowman.Block) (bool, error) { // issue [blk] and its ancestors to consensus. + blkID := blk.ID() for !t.wasIssued(blk) { - if err := t.issue(blk); err != nil { + if err := t.issue(ctx, blk); err != nil { return false, err } blkID = blk.Parent() var err error - blk, err = t.GetBlock(blkID) + blk, err = t.GetBlock(ctx, blkID) // If we don't have this ancestor, request it from [vdr] if err != nil || !blk.Status().Fetched() { - t.sendRequest(nodeID, blkID) + t.sendRequest(ctx, nodeID, blkID) return false, nil } } @@ -548,7 +569,7 @@ func (t *Transitive) issueFrom(nodeID ids.NodeID, blk snowman.Block) (bool, erro // A dependency should never be waiting on a decided or processing // block. However, if the block was marked as rejected by the VM, the // dependencies may still be waiting. Therefore, they should abandoned. - t.blocked.Abandon(blkID) + t.blocked.Abandon(ctx, blkID) } // Tracks performance statistics @@ -560,17 +581,18 @@ func (t *Transitive) issueFrom(nodeID ids.NodeID, blk snowman.Block) (bool, erro // issueWithAncestors attempts to issue the branch ending with [blk] to consensus. // Returns true if the block is processing in consensus or is decided. // If a dependency is missing and the dependency hasn't been requested, the issuance will be abandoned. -func (t *Transitive) issueWithAncestors(blk snowman.Block) (bool, error) { +func (t *Transitive) issueWithAncestors(ctx context.Context, blk snowman.Block) (bool, error) { blkID := blk.ID() // issue [blk] and its ancestors into consensus status := blk.Status() for status.Fetched() && !t.wasIssued(blk) { - if err := t.issue(blk); err != nil { + err := t.issue(ctx, blk) + if err != nil { return false, err } blkID = blk.Parent() - var err error - if blk, err = t.GetBlock(blkID); err != nil { + blk, err = t.GetBlock(ctx, blkID) + if err != nil { status = choices.Unknown break } @@ -590,7 +612,7 @@ func (t *Transitive) issueWithAncestors(blk snowman.Block) (bool, error) { // We don't have this block and have no reason to expect that we will get it. // Abandon the block to avoid a memory leak. - t.blocked.Abandon(blkID) + t.blocked.Abandon(ctx, blkID) t.metrics.numBlockers.Set(float64(t.blocked.Len())) return false, t.errs.Err } @@ -604,7 +626,7 @@ func (t *Transitive) wasIssued(blk snowman.Block) bool { } // Issue [blk] to consensus once its ancestors have been issued. -func (t *Transitive) issue(blk snowman.Block) error { +func (t *Transitive) issue(ctx context.Context, blk snowman.Block) error { blkID := blk.ID() // mark that the block is queued to be added to consensus once its ancestors have been @@ -621,7 +643,7 @@ func (t *Transitive) issue(blk snowman.Block) error { // block on the parent if needed parentID := blk.Parent() - if parent, err := t.GetBlock(parentID); err != nil || !(t.Consensus.Decided(parent) || t.Consensus.Processing(parentID)) { + if parent, err := t.GetBlock(ctx, parentID); err != nil || !(t.Consensus.Decided(parent) || t.Consensus.Processing(parentID)) { t.Ctx.Log.Verbo("block waiting for parent to be issued", zap.Stringer("blkID", blkID), zap.Stringer("parentID", parentID), @@ -629,7 +651,7 @@ func (t *Transitive) issue(blk snowman.Block) error { i.deps.Add(parentID) } - t.blocked.Register(i) + t.blocked.Register(ctx, i) // Tracks performance statistics t.metrics.numRequests.Set(float64(t.blkReqs.Len())) @@ -639,7 +661,7 @@ func (t *Transitive) issue(blk snowman.Block) error { } // Request that [vdr] send us block [blkID] -func (t *Transitive) sendRequest(nodeID ids.NodeID, blkID ids.ID) { +func (t *Transitive) sendRequest(ctx context.Context, nodeID ids.NodeID, blkID ids.ID) { // There is already an outstanding request for this block if t.blkReqs.Contains(blkID) { return @@ -652,19 +674,19 @@ func (t *Transitive) sendRequest(nodeID ids.NodeID, blkID ids.ID) { zap.Uint32("requestID", t.RequestID), zap.Stringer("blkID", blkID), ) - t.Sender.SendGet(nodeID, t.RequestID, blkID) + t.Sender.SendGet(ctx, nodeID, t.RequestID, blkID) // Tracks performance statistics t.metrics.numRequests.Set(float64(t.blkReqs.Len())) } // send a pull query for this block ID -func (t *Transitive) pullQuery(blkID ids.ID) { +func (t *Transitive) pullQuery(ctx context.Context, blkID ids.ID) { t.Ctx.Log.Verbo("sampling from validators", zap.Stringer("validators", t.Validators), ) // The validators we will query - vdrs, err := t.Validators.Sample(t.Params.K) + vdrIDs, err := t.Validators.Sample(t.Params.K) if err != nil { t.Ctx.Log.Error("dropped query for block", zap.String("reason", "insufficient number of validators"), @@ -673,39 +695,37 @@ func (t *Transitive) pullQuery(blkID ids.ID) { return } - vdrBag := ids.NodeIDBag{} - for _, vdr := range vdrs { - vdrBag.Add(vdr.ID()) - } + vdrBag := bag.Bag[ids.NodeID]{} + vdrBag.Add(vdrIDs...) t.RequestID++ if t.polls.Add(t.RequestID, vdrBag) { vdrList := vdrBag.List() - vdrSet := ids.NewNodeIDSet(len(vdrList)) + vdrSet := set.NewSet[ids.NodeID](len(vdrList)) vdrSet.Add(vdrList...) - t.Sender.SendPullQuery(vdrSet, t.RequestID, blkID) + t.Sender.SendPullQuery(ctx, vdrSet, t.RequestID, blkID) } } // Send a query for this block. Some validators will be sent // a Push Query and some will be sent a Pull Query. -func (t *Transitive) sendMixedQuery(blk snowman.Block) { +func (t *Transitive) sendMixedQuery(ctx context.Context, blk snowman.Block) { t.Ctx.Log.Verbo("sampling from validators", zap.Stringer("validators", t.Validators), ) - vdrs, err := t.Validators.Sample(t.Params.K) + + blkID := blk.ID() + vdrIDs, err := t.Validators.Sample(t.Params.K) if err != nil { t.Ctx.Log.Error("dropped query for block", zap.String("reason", "insufficient number of validators"), - zap.Stringer("blkID", blk.ID()), + zap.Stringer("blkID", blkID), ) return } - vdrBag := ids.NodeIDBag{} - for _, vdr := range vdrs { - vdrBag.Add(vdr.ID()) - } + vdrBag := bag.Bag[ids.NodeID]{} + vdrBag.Add(vdrIDs...) t.RequestID++ if t.polls.Add(t.RequestID, vdrBag) { @@ -715,18 +735,19 @@ func (t *Transitive) sendMixedQuery(blk snowman.Block) { numPushTo = t.Params.MixedQueryNumPushNonVdr } common.SendMixedQuery( + ctx, t.Sender, vdrBag.List(), // Note that this doesn't contain duplicates; length may be < k numPushTo, t.RequestID, - blk.ID(), + blkID, blk.Bytes(), ) } } // issue [blk] to consensus -func (t *Transitive) deliver(blk snowman.Block) error { +func (t *Transitive) deliver(ctx context.Context, blk snowman.Block) error { blkID := blk.ID() if t.Consensus.Decided(blk) || t.Consensus.Processing(blkID) { return nil @@ -736,14 +757,14 @@ func (t *Transitive) deliver(blk snowman.Block) error { // longer pending t.removeFromPending(blk) parentID := blk.Parent() - parent, err := t.GetBlock(parentID) + parent, err := t.GetBlock(ctx, parentID) // Because the dependency must have been fulfilled by the time this function // is called - we don't expect [err] to be non-nil. But it is handled for // completness and future proofing. if err != nil || !(parent.Status() == choices.Accepted || t.Consensus.Processing(parentID)) { // if the parent isn't processing or the last accepted block, then this // block is effectively rejected - t.blocked.Abandon(blkID) + t.blocked.Abandon(ctx, blkID) t.metrics.numBlocked.Set(float64(len(t.pending))) // Tracks performance statistics t.metrics.numBlockers.Set(float64(t.blocked.Len())) return t.errs.Err @@ -752,12 +773,12 @@ func (t *Transitive) deliver(blk snowman.Block) error { // By ensuring that the parent is either processing or accepted, it is // guaranteed that the parent was successfully verified. This means that // calling Verify on this block is allowed. - blkAdded, err := t.addUnverifiedBlockToConsensus(blk) + blkAdded, err := t.addUnverifiedBlockToConsensus(ctx, blk) if err != nil { return err } if !blkAdded { - t.blocked.Abandon(blkID) + t.blocked.Abandon(ctx, blkID) t.metrics.numBlocked.Set(float64(len(t.pending))) // Tracks performance statistics t.metrics.numBlockers.Set(float64(t.blocked.Len())) return t.errs.Err @@ -769,14 +790,14 @@ func (t *Transitive) deliver(blk snowman.Block) error { added := []snowman.Block{} dropped := []snowman.Block{} if blk, ok := blk.(snowman.OracleBlock); ok { - options, err := blk.Options() + options, err := blk.Options(ctx) if err != snowman.ErrNotOracle { if err != nil { return err } for _, blk := range options { - blkAdded, err := t.addUnverifiedBlockToConsensus(blk) + blkAdded, err := t.addUnverifiedBlockToConsensus(ctx, blk) if err != nil { return err } @@ -789,36 +810,36 @@ func (t *Transitive) deliver(blk snowman.Block) error { } } - if err := t.VM.SetPreference(t.Consensus.Preference()); err != nil { + if err := t.VM.SetPreference(ctx, t.Consensus.Preference()); err != nil { return err } // If the block is now preferred, query the network for its preferences // with this new block. if t.Consensus.IsPreferred(blk) { - t.sendMixedQuery(blk) + t.sendMixedQuery(ctx, blk) } - t.blocked.Fulfill(blkID) + t.blocked.Fulfill(ctx, blkID) for _, blk := range added { if t.Consensus.IsPreferred(blk) { - t.sendMixedQuery(blk) + t.sendMixedQuery(ctx, blk) } blkID := blk.ID() t.removeFromPending(blk) - t.blocked.Fulfill(blkID) + t.blocked.Fulfill(ctx, blkID) t.blkReqs.RemoveAny(blkID) } for _, blk := range dropped { blkID := blk.ID() t.removeFromPending(blk) - t.blocked.Abandon(blkID) + t.blocked.Abandon(ctx, blkID) t.blkReqs.RemoveAny(blkID) } // If we should issue multiple queries at the same time, we need to repoll - t.repoll() + t.repoll(ctx) // Tracks performance statistics t.metrics.numRequests.Set(float64(t.blkReqs.Len())) @@ -856,9 +877,9 @@ func (t *Transitive) addToNonVerifieds(blk snowman.Block) { // addUnverifiedBlockToConsensus returns whether the block was added and an // error if one occurred while adding it to consensus. -func (t *Transitive) addUnverifiedBlockToConsensus(blk snowman.Block) (bool, error) { +func (t *Transitive) addUnverifiedBlockToConsensus(ctx context.Context, blk snowman.Block) (bool, error) { // make sure this block is valid - if err := blk.Verify(); err != nil { + if err := blk.Verify(ctx); err != nil { t.Ctx.Log.Debug("block verification failed", zap.Error(err), ) @@ -875,7 +896,7 @@ func (t *Transitive) addUnverifiedBlockToConsensus(blk snowman.Block) (bool, err t.Ctx.Log.Verbo("adding block to consensus", zap.Stringer("blkID", blkID), ) - return true, t.Consensus.Add(&memoryBlock{ + return true, t.Consensus.Add(ctx, &memoryBlock{ Block: blk, metrics: &t.metrics, tree: t.nonVerifieds, diff --git a/avalanchego/snow/engine/snowman/transitive_test.go b/avalanchego/snow/engine/snowman/transitive_test.go index 57b22374..30a23baf 100644 --- a/avalanchego/snow/engine/snowman/transitive_test.go +++ b/avalanchego/snow/engine/snowman/transitive_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman import ( "bytes" + "context" "errors" "fmt" "testing" @@ -20,13 +21,17 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/snowman/getter" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" ) var ( - errUnknownBlock = errors.New("unknown block") - errUnknownBytes = errors.New("unknown bytes") - Genesis = ids.GenerateTestID() + errUnknownBlock = errors.New("unknown block") + errUnknownBytes = errors.New("unknown bytes") + errInvalid = errors.New("invalid") + errUnexpectedCall = errors.New("unexpected call") + errTest = errors.New("non-nil test") + Genesis = ids.GenerateTestID() ) func setup(t *testing.T, commonCfg common.Config, engCfg Config) (ids.NodeID, validators.Set, *common.SenderTest, *block.TestVM, *Transitive, snowman.Block) { @@ -34,7 +39,7 @@ func setup(t *testing.T, commonCfg common.Config, engCfg Config) (ids.NodeID, va engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -62,9 +67,11 @@ func setup(t *testing.T, commonCfg common.Config, engCfg Config) (ids.NodeID, va StatusV: choices.Accepted, }} - vm.LastAcceptedF = func() (ids.ID, error) { return gBlk.ID(), nil } + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return gBlk.ID(), nil + } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -78,7 +85,7 @@ func setup(t *testing.T, commonCfg common.Config, engCfg Config) (ids.NodeID, va t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } @@ -96,9 +103,12 @@ func setupDefaultConfig(t *testing.T) (ids.NodeID, validators.Set, *common.Sende func TestEngineShutdown(t *testing.T) { _, _, _, vm, transitive, _ := setupDefaultConfig(t) vmShutdownCalled := false - vm.ShutdownF = func() error { vmShutdownCalled = true; return nil } + vm.ShutdownF = func(context.Context) error { + vmShutdownCalled = true + return nil + } vm.CantShutdown = false - if err := transitive.Shutdown(); err != nil { + if err := transitive.Shutdown(context.Background()); err != nil { t.Fatal(err) } if !vmShutdownCalled { @@ -129,7 +139,7 @@ func TestEngineAdd(t *testing.T) { asked := new(bool) reqID := new(uint32) - sender.SendGetF = func(inVdr ids.NodeID, requestID uint32, blkID ids.ID) { + sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { *reqID = requestID if *asked { t.Fatalf("Asked multiple times") @@ -143,14 +153,14 @@ func TestEngineAdd(t *testing.T) { } } - vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { if !bytes.Equal(b, blk.Bytes()) { t.Fatalf("Wrong bytes") } return blk, nil } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -161,7 +171,7 @@ func TestEngineAdd(t *testing.T) { } } - if err := te.Put(vdr, 0, blk.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, 0, blk.Bytes()); err != nil { t.Fatal(err) } @@ -175,9 +185,11 @@ func TestEngineAdd(t *testing.T) { t.Fatalf("Should have been blocking on request") } - vm.ParseBlockF = func(b []byte) (snowman.Block, error) { return nil, errUnknownBytes } + vm.ParseBlockF = func(context.Context, []byte) (snowman.Block, error) { + return nil, errUnknownBytes + } - if err := te.Put(vdr, *reqID, nil); err != nil { + if err := te.Put(context.Background(), vdr, *reqID, nil); err != nil { t.Fatal(err) } @@ -202,7 +214,7 @@ func TestEngineQuery(t *testing.T) { } chitted := new(bool) - sender.SendChitsF = func(inVdr ids.NodeID, requestID uint32, prefSet []ids.ID) { + sender.SendChitsF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, prefSet []ids.ID, accepted []ids.ID) { if *chitted { t.Fatalf("Sent multiple chits") } @@ -216,10 +228,16 @@ func TestEngineQuery(t *testing.T) { if gBlk.ID() != prefSet[0] { t.Fatalf("Wrong chits block") } + if len(accepted) != 1 { + t.Fatal("accepted should only have one element") + } + if gBlk.ID() != accepted[0] { + t.Fatalf("Wrong accepted frontier") + } } blocked := new(bool) - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { *blocked = true switch blkID { case gBlk.ID(): @@ -231,7 +249,7 @@ func TestEngineQuery(t *testing.T) { asked := new(bool) getRequestID := new(uint32) - sender.SendGetF = func(inVdr ids.NodeID, requestID uint32, blkID ids.ID) { + sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { if *asked { t.Fatalf("Asked multiple times") } @@ -245,7 +263,7 @@ func TestEngineQuery(t *testing.T) { } } - if err := te.PullQuery(vdr, 15, blk.ID()); err != nil { + if err := te.PullQuery(context.Background(), vdr, 15, blk.ID()); err != nil { t.Fatal(err) } if !*chitted { @@ -260,13 +278,13 @@ func TestEngineQuery(t *testing.T) { queried := new(bool) queryRequestID := new(uint32) - sender.SendPushQueryF = func(inVdrs ids.NodeIDSet, requestID uint32, blkBytes []byte) { + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { if *queried { t.Fatalf("Asked multiple times") } *queried = true *queryRequestID = requestID - vdrSet := ids.NodeIDSet{} + vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") @@ -276,13 +294,13 @@ func TestEngineQuery(t *testing.T) { } } - vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { if !bytes.Equal(b, blk.Bytes()) { t.Fatalf("Wrong bytes") } return blk, nil } - if err := te.Put(vdr, *getRequestID, blk.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, *getRequestID, blk.Bytes()); err != nil { t.Fatal(err) } vm.ParseBlockF = nil @@ -301,7 +319,7 @@ func TestEngineQuery(t *testing.T) { BytesV: []byte{5, 4, 3, 2, 1, 9}, } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case blk.ID(): return nil, errUnknownBlock @@ -313,7 +331,7 @@ func TestEngineQuery(t *testing.T) { } *asked = false - sender.SendGetF = func(inVdr ids.NodeID, requestID uint32, blkID ids.ID) { + sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { if *asked { t.Fatalf("Asked multiple times") } @@ -326,18 +344,18 @@ func TestEngineQuery(t *testing.T) { t.Fatalf("Asking for wrong block") } } - if err := te.Chits(vdr, *queryRequestID, []ids.ID{blk1.ID()}); err != nil { + if err := te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{blk1.ID()}, nil); err != nil { t.Fatal(err) } *queried = false - sender.SendPushQueryF = func(inVdrs ids.NodeIDSet, requestID uint32, blkBytes []byte) { + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { if *queried { t.Fatalf("Asked multiple times") } *queried = true *queryRequestID = requestID - vdrSet := ids.NodeIDSet{} + vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") @@ -347,12 +365,12 @@ func TestEngineQuery(t *testing.T) { } } - vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { if !bytes.Equal(b, blk1.Bytes()) { t.Fatalf("Wrong bytes") } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case blk.ID(): return blk, nil @@ -365,7 +383,7 @@ func TestEngineQuery(t *testing.T) { return blk1, nil } - if err := te.Put(vdr, *getRequestID, blk1.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, *getRequestID, blk1.Bytes()); err != nil { t.Fatal(err) } vm.ParseBlockF = nil @@ -379,7 +397,7 @@ func TestEngineQuery(t *testing.T) { _ = te.polls.String() // Shouldn't panic - if err := te.QueryFailed(vdr, *queryRequestID); err != nil { + if err := te.QueryFailed(context.Background(), vdr, *queryRequestID); err != nil { t.Fatal(err) } if len(te.blocked) != 0 { @@ -410,9 +428,9 @@ func TestEngineMultipleQuery(t *testing.T) { errs := wrappers.Errs{} errs.Add( - vals.AddWeight(vdr0, 1), - vals.AddWeight(vdr1, 1), - vals.AddWeight(vdr2, 1), + vals.Add(vdr0, nil, ids.Empty, 1), + vals.Add(vdr1, nil, ids.Empty, 1), + vals.Add(vdr2, nil, ids.Empty, 1), ) if errs.Errored() { t.Fatal(errs.Err) @@ -435,8 +453,10 @@ func TestEngineMultipleQuery(t *testing.T) { StatusV: choices.Accepted, }} - vm.LastAcceptedF = func() (ids.ID, error) { return gBlk.ID(), nil } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return gBlk.ID(), nil + } + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { if blkID != gBlk.ID() { t.Fatalf("Wrong block requested") } @@ -448,7 +468,7 @@ func TestEngineMultipleQuery(t *testing.T) { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } @@ -467,13 +487,13 @@ func TestEngineMultipleQuery(t *testing.T) { queried := new(bool) queryRequestID := new(uint32) - sender.SendPushQueryF = func(inVdrs ids.NodeIDSet, requestID uint32, blkBytes []byte) { + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { if *queried { t.Fatalf("Asked multiple times") } *queried = true *queryRequestID = requestID - vdrSet := ids.NodeIDSet{} + vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr0, vdr1, vdr2) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") @@ -483,7 +503,7 @@ func TestEngineMultipleQuery(t *testing.T) { } } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -492,7 +512,7 @@ func TestEngineMultipleQuery(t *testing.T) { } } - if err := te.issue(blk0); err != nil { + if err := te.issue(context.Background(), blk0); err != nil { t.Fatal(err) } @@ -506,7 +526,7 @@ func TestEngineMultipleQuery(t *testing.T) { BytesV: []byte{2}, } - vm.GetBlockF = func(id ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { switch id { case gBlk.ID(): return gBlk, nil @@ -521,7 +541,7 @@ func TestEngineMultipleQuery(t *testing.T) { asked := new(bool) getRequestID := new(uint32) - sender.SendGetF = func(inVdr ids.NodeID, requestID uint32, blkID ids.ID) { + sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { if *asked { t.Fatalf("Asked multiple times") } @@ -535,15 +555,15 @@ func TestEngineMultipleQuery(t *testing.T) { } } blkSet := []ids.ID{blk1.ID()} - if err := te.Chits(vdr0, *queryRequestID, blkSet); err != nil { + if err := te.Chits(context.Background(), vdr0, *queryRequestID, blkSet, nil); err != nil { t.Fatal(err) } - if err := te.Chits(vdr1, *queryRequestID, blkSet); err != nil { + if err := te.Chits(context.Background(), vdr1, *queryRequestID, blkSet, nil); err != nil { t.Fatal(err) } - vm.ParseBlockF = func(b []byte) (snowman.Block, error) { - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.ParseBlockF = func(context.Context, []byte) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch { case blkID == blk0.ID(): return blk0, nil @@ -559,13 +579,13 @@ func TestEngineMultipleQuery(t *testing.T) { *queried = false secondQueryRequestID := new(uint32) - sender.SendPushQueryF = func(inVdrs ids.NodeIDSet, requestID uint32, blkBytes []byte) { + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { if *queried { t.Fatalf("Asked multiple times") } *queried = true *secondQueryRequestID = requestID - vdrSet := ids.NodeIDSet{} + vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr0, vdr1, vdr2) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") @@ -574,13 +594,13 @@ func TestEngineMultipleQuery(t *testing.T) { t.Fatalf("Asking for wrong block") } } - if err := te.Put(vdr0, *getRequestID, blk1.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr0, *getRequestID, blk1.Bytes()); err != nil { t.Fatal(err) } // Should be dropped because the query was already filled blkSet = []ids.ID{blk0.ID()} - if err := te.Chits(vdr2, *queryRequestID, blkSet); err != nil { + if err := te.Chits(context.Background(), vdr2, *queryRequestID, blkSet, nil); err != nil { t.Fatal(err) } @@ -616,8 +636,8 @@ func TestEngineBlockedIssue(t *testing.T) { BytesV: []byte{2}, } - sender.SendGetF = func(ids.NodeID, uint32, ids.ID) {} - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + sender.SendGetF = func(context.Context, ids.NodeID, uint32, ids.ID) {} + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -628,12 +648,12 @@ func TestEngineBlockedIssue(t *testing.T) { } } - if err := te.issue(blk1); err != nil { + if err := te.issue(context.Background(), blk1); err != nil { t.Fatal(err) } blk0.StatusV = choices.Processing - if err := te.issue(blk0); err != nil { + if err := te.issue(context.Background(), blk0); err != nil { t.Fatal(err) } @@ -657,7 +677,7 @@ func TestEngineAbandonResponse(t *testing.T) { BytesV: []byte{1}, } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch { case blkID == gBlk.ID(): return gBlk, nil @@ -668,10 +688,10 @@ func TestEngineAbandonResponse(t *testing.T) { return nil, errUnknownBlock } - if err := te.issue(blk); err != nil { + if err := te.issue(context.Background(), blk); err != nil { t.Fatal(err) } - if err := te.QueryFailed(vdr, 1); err != nil { + if err := te.QueryFailed(context.Background(), vdr, 1); err != nil { t.Fatal(err) } @@ -685,7 +705,7 @@ func TestEngineFetchBlock(t *testing.T) { sender.Default(false) - vm.GetBlockF = func(id ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { if id == gBlk.ID() { return gBlk, nil } @@ -694,7 +714,7 @@ func TestEngineFetchBlock(t *testing.T) { } added := new(bool) - sender.SendPutF = func(inVdr ids.NodeID, requestID uint32, blk []byte) { + sender.SendPutF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blk []byte) { if vdr != inVdr { t.Fatalf("Wrong validator") } @@ -707,7 +727,7 @@ func TestEngineFetchBlock(t *testing.T) { *added = true } - if err := te.Get(vdr, 123, gBlk.ID()); err != nil { + if err := te.Get(context.Background(), vdr, 123, gBlk.ID()); err != nil { t.Fatal(err) } @@ -731,14 +751,14 @@ func TestEnginePushQuery(t *testing.T) { BytesV: []byte{1}, } - vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { if bytes.Equal(b, blk.Bytes()) { return blk, nil } return nil, errUnknownBytes } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -750,7 +770,7 @@ func TestEnginePushQuery(t *testing.T) { } chitted := new(bool) - sender.SendChitsF = func(inVdr ids.NodeID, requestID uint32, votes []ids.ID) { + sender.SendChitsF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, votes []ids.ID, accepted []ids.ID) { if *chitted { t.Fatalf("Sent chit multiple times") } @@ -767,15 +787,21 @@ func TestEnginePushQuery(t *testing.T) { if gBlk.ID() != votes[0] { t.Fatalf("Asking for wrong block") } + if len(accepted) != 1 { + t.Fatal("accepted should only have one element") + } + if gBlk.ID() != accepted[0] { + t.Fatalf("Wrong accepted frontier") + } } queried := new(bool) - sender.SendPushQueryF = func(inVdrs ids.NodeIDSet, _ uint32, blkBytes []byte) { + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], _ uint32, blkBytes []byte) { if *queried { t.Fatalf("Asked multiple times") } *queried = true - vdrSet := ids.NodeIDSet{} + vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") @@ -785,7 +811,7 @@ func TestEnginePushQuery(t *testing.T) { } } - if err := te.PushQuery(vdr, 20, blk.Bytes()); err != nil { + if err := te.PushQuery(context.Background(), vdr, 20, blk.Bytes()); err != nil { t.Fatal(err) } @@ -812,7 +838,7 @@ func TestEngineBuildBlock(t *testing.T) { BytesV: []byte{1}, } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -822,20 +848,22 @@ func TestEngineBuildBlock(t *testing.T) { } queried := new(bool) - sender.SendPushQueryF = func(inVdrs ids.NodeIDSet, _ uint32, blkBytes []byte) { + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], _ uint32, blkBytes []byte) { if *queried { t.Fatalf("Asked multiple times") } *queried = true - vdrSet := ids.NodeIDSet{} + vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } } - vm.BuildBlockF = func() (snowman.Block, error) { return blk, nil } - if err := te.Notify(common.PendingTxs); err != nil { + vm.BuildBlockF = func(context.Context) (snowman.Block, error) { + return blk, nil + } + if err := te.Notify(context.Background(), common.PendingTxs); err != nil { t.Fatal(err) } @@ -850,19 +878,19 @@ func TestEngineRepoll(t *testing.T) { sender.Default(true) queried := new(bool) - sender.SendPullQueryF = func(inVdrs ids.NodeIDSet, _ uint32, blkID ids.ID) { + sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], _ uint32, blkID ids.ID) { if *queried { t.Fatalf("Asked multiple times") } *queried = true - vdrSet := ids.NodeIDSet{} + vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } } - te.repoll() + te.repoll(context.Background()) if !*queried { t.Fatalf("Should have sent a query to the peer") @@ -892,9 +920,9 @@ func TestVoteCanceling(t *testing.T) { errs := wrappers.Errs{} errs.Add( - vals.AddWeight(vdr0, 1), - vals.AddWeight(vdr1, 1), - vals.AddWeight(vdr2, 1), + vals.Add(vdr0, nil, ids.Empty, 1), + vals.Add(vdr1, nil, ids.Empty, 1), + vals.Add(vdr2, nil, ids.Empty, 1), ) if errs.Errored() { t.Fatal(errs.Err) @@ -917,8 +945,10 @@ func TestVoteCanceling(t *testing.T) { StatusV: choices.Accepted, }} - vm.LastAcceptedF = func() (ids.ID, error) { return gBlk.ID(), nil } - vm.GetBlockF = func(id ids.ID) (snowman.Block, error) { + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return gBlk.ID(), nil + } + vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { switch id { case gBlk.ID(): return gBlk, nil @@ -933,7 +963,7 @@ func TestVoteCanceling(t *testing.T) { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } @@ -951,13 +981,13 @@ func TestVoteCanceling(t *testing.T) { queried := new(bool) queryRequestID := new(uint32) - sender.SendPushQueryF = func(inVdrs ids.NodeIDSet, requestID uint32, blkBytes []byte) { + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { if *queried { t.Fatalf("Asked multiple times") } *queried = true *queryRequestID = requestID - vdrSet := ids.NodeIDSet{} + vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr0, vdr1, vdr2) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") @@ -967,7 +997,7 @@ func TestVoteCanceling(t *testing.T) { } } - if err := te.issue(blk); err != nil { + if err := te.issue(context.Background(), blk); err != nil { t.Fatal(err) } @@ -975,7 +1005,7 @@ func TestVoteCanceling(t *testing.T) { t.Fatalf("Shouldn't have finished blocking issue") } - if err := te.QueryFailed(vdr0, *queryRequestID); err != nil { + if err := te.QueryFailed(context.Background(), vdr0, *queryRequestID); err != nil { t.Fatal(err) } @@ -984,10 +1014,10 @@ func TestVoteCanceling(t *testing.T) { } repolled := new(bool) - sender.SendPullQueryF = func(inVdrs ids.NodeIDSet, requestID uint32, blkID ids.ID) { + sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkID ids.ID) { *repolled = true } - if err := te.QueryFailed(vdr1, *queryRequestID); err != nil { + if err := te.QueryFailed(context.Background(), vdr1, *queryRequestID); err != nil { t.Fatal(err) } @@ -1010,9 +1040,11 @@ func TestEngineNoQuery(t *testing.T) { vm := &block.TestVM{} vm.T = t - vm.LastAcceptedF = func() (ids.ID, error) { return gBlk.ID(), nil } + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return gBlk.ID(), nil + } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { if blkID == gBlk.ID() { return gBlk, nil } @@ -1026,7 +1058,7 @@ func TestEngineNoQuery(t *testing.T) { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } @@ -1040,7 +1072,7 @@ func TestEngineNoQuery(t *testing.T) { BytesV: []byte{1}, } - if err := te.issue(blk); err != nil { + if err := te.issue(context.Background(), blk); err != nil { t.Fatal(err) } } @@ -1059,9 +1091,11 @@ func TestEngineNoRepollQuery(t *testing.T) { vm := &block.TestVM{} vm.T = t - vm.LastAcceptedF = func() (ids.ID, error) { return gBlk.ID(), nil } + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return gBlk.ID(), nil + } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { if blkID == gBlk.ID() { return gBlk, nil } @@ -1075,11 +1109,11 @@ func TestEngineNoRepollQuery(t *testing.T) { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } - te.repoll() + te.repoll(context.Background()) } func TestEngineAbandonQuery(t *testing.T) { @@ -1089,7 +1123,7 @@ func TestEngineAbandonQuery(t *testing.T) { blkID := ids.GenerateTestID() - vm.GetBlockF = func(id ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { switch id { case blkID: return nil, errUnknownBlock @@ -1100,13 +1134,13 @@ func TestEngineAbandonQuery(t *testing.T) { } reqID := new(uint32) - sender.SendGetF = func(_ ids.NodeID, requestID uint32, _ ids.ID) { + sender.SendGetF = func(_ context.Context, _ ids.NodeID, requestID uint32, _ ids.ID) { *reqID = requestID } sender.CantSendChits = false - if err := te.PullQuery(vdr, 0, blkID); err != nil { + if err := te.PullQuery(context.Background(), vdr, 0, blkID); err != nil { t.Fatal(err) } @@ -1114,7 +1148,7 @@ func TestEngineAbandonQuery(t *testing.T) { t.Fatalf("Should have issued request") } - if err := te.GetFailed(vdr, *reqID); err != nil { + if err := te.GetFailed(context.Background(), vdr, *reqID); err != nil { t.Fatal(err) } @@ -1124,6 +1158,8 @@ func TestEngineAbandonQuery(t *testing.T) { } func TestEngineAbandonChit(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) sender.Default(true) @@ -1138,7 +1174,7 @@ func TestEngineAbandonChit(t *testing.T) { BytesV: []byte{1}, } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -1149,43 +1185,100 @@ func TestEngineAbandonChit(t *testing.T) { return nil, errUnknownBlock } - sender.CantSendPushQuery = false - - if err := te.issue(blk); err != nil { - t.Fatal(err) + var reqID uint32 + sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, _ []byte) { + reqID = requestID } + err := te.issue(context.Background(), blk) + require.NoError(err) + fakeBlkID := ids.GenerateTestID() - vm.GetBlockF = func(id ids.ID) (snowman.Block, error) { - switch id { - case fakeBlkID: + vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { + require.Equal(fakeBlkID, id) + return nil, errUnknownBlock + } + + sender.SendGetF = func(_ context.Context, _ ids.NodeID, requestID uint32, _ ids.ID) { + reqID = requestID + } + + // Register a voter dependency on an unknown block. + err = te.Chits(context.Background(), vdr, reqID, []ids.ID{fakeBlkID}, nil) + require.NoError(err) + require.Len(te.blocked, 1) + + sender.CantSendPullQuery = false + + err = te.GetFailed(context.Background(), vdr, reqID) + require.NoError(err) + require.Empty(te.blocked) +} + +func TestEngineAbandonChitWithUnexpectedPutBlock(t *testing.T) { + require := require.New(t) + + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) + + sender.Default(true) + + blk := &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentV: gBlk.ID(), + HeightV: 1, + BytesV: []byte{1}, + } + + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { + switch blkID { + case gBlk.ID(): + return gBlk, nil + case blk.ID(): return nil, errUnknownBlock - default: - t.Fatalf("Loaded unknown block") - panic("Should have failed") } + t.Fatalf("Wrong block requested") + return nil, errUnknownBlock } - reqID := new(uint32) - sender.SendGetF = func(_ ids.NodeID, requestID uint32, _ ids.ID) { - *reqID = requestID + var reqID uint32 + sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, _ []byte) { + reqID = requestID } - if err := te.Chits(vdr, 0, []ids.ID{fakeBlkID}); err != nil { - t.Fatal(err) - } + err := te.issue(context.Background(), blk) + require.NoError(err) - if len(te.blocked) != 1 { - t.Fatalf("Should have blocked on request") + fakeBlkID := ids.GenerateTestID() + vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { + require.Equal(fakeBlkID, id) + return nil, errUnknownBlock } - if err := te.GetFailed(vdr, *reqID); err != nil { - t.Fatal(err) + sender.SendGetF = func(_ context.Context, _ ids.NodeID, requestID uint32, _ ids.ID) { + reqID = requestID } - if len(te.blocked) != 0 { - t.Fatalf("Should have removed request") + // Register a voter dependency on an unknown block. + err = te.Chits(context.Background(), vdr, reqID, []ids.ID{fakeBlkID}, nil) + require.NoError(err) + require.Len(te.blocked, 1) + + sender.CantSendPullQuery = false + + gBlkBytes := gBlk.Bytes() + vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { + require.Equal(gBlkBytes, b) + return gBlk, nil } + + // Respond with an unexpected block and verify that the request is correctly + // cleared. + err = te.Put(context.Background(), vdr, reqID, gBlkBytes) + require.NoError(err) + require.Empty(te.blocked) } func TestEngineBlockingChitRequest(t *testing.T) { @@ -1221,7 +1314,7 @@ func TestEngineBlockingChitRequest(t *testing.T) { BytesV: []byte{3}, } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -1232,9 +1325,9 @@ func TestEngineBlockingChitRequest(t *testing.T) { } } - sender.SendGetF = func(ids.NodeID, uint32, ids.ID) {} + sender.SendGetF = func(context.Context, ids.NodeID, uint32, ids.ID) {} - vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, blockingBlk.Bytes()): return blockingBlk, nil @@ -1244,13 +1337,13 @@ func TestEngineBlockingChitRequest(t *testing.T) { } } - if err := te.issue(parentBlk); err != nil { + if err := te.issue(context.Background(), parentBlk); err != nil { t.Fatal(err) } sender.CantSendChits = false - if err := te.PushQuery(vdr, 0, blockingBlk.Bytes()); err != nil { + if err := te.PushQuery(context.Background(), vdr, 0, blockingBlk.Bytes()); err != nil { t.Fatal(err) } @@ -1261,7 +1354,7 @@ func TestEngineBlockingChitRequest(t *testing.T) { sender.CantSendPushQuery = false missingBlk.StatusV = choices.Processing - if err := te.issue(missingBlk); err != nil { + if err := te.issue(context.Background(), missingBlk); err != nil { t.Fatal(err) } @@ -1303,7 +1396,7 @@ func TestEngineBlockingChitResponse(t *testing.T) { BytesV: []byte{3}, } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -1314,14 +1407,14 @@ func TestEngineBlockingChitResponse(t *testing.T) { } } - if err := te.issue(blockingBlk); err != nil { + if err := te.issue(context.Background(), blockingBlk); err != nil { t.Fatal(err) } queryRequestID := new(uint32) - sender.SendPushQueryF = func(inVdrs ids.NodeIDSet, requestID uint32, blkBytes []byte) { + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { *queryRequestID = requestID - vdrSet := ids.NodeIDSet{} + vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") @@ -1331,14 +1424,14 @@ func TestEngineBlockingChitResponse(t *testing.T) { } } - if err := te.issue(issuedBlk); err != nil { + if err := te.issue(context.Background(), issuedBlk); err != nil { t.Fatal(err) } sender.SendPushQueryF = nil sender.CantSendPushQuery = false - if err := te.Chits(vdr, *queryRequestID, []ids.ID{blockingBlk.ID()}); err != nil { + if err := te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{blockingBlk.ID()}, nil); err != nil { t.Fatal(err) } @@ -1346,7 +1439,7 @@ func TestEngineBlockingChitResponse(t *testing.T) { sender.CantSendPullQuery = false missingBlk.StatusV = choices.Processing - if err := te.issue(missingBlk); err != nil { + if err := te.issue(context.Background(), missingBlk); err != nil { t.Fatal(err) } } @@ -1369,30 +1462,30 @@ func TestEngineRetryFetch(t *testing.T) { vm.CantGetBlock = false reqID := new(uint32) - sender.SendGetF = func(_ ids.NodeID, requestID uint32, _ ids.ID) { + sender.SendGetF = func(_ context.Context, _ ids.NodeID, requestID uint32, _ ids.ID) { *reqID = requestID } sender.CantSendChits = false - if err := te.PullQuery(vdr, 0, missingBlk.ID()); err != nil { + if err := te.PullQuery(context.Background(), vdr, 0, missingBlk.ID()); err != nil { t.Fatal(err) } vm.CantGetBlock = true sender.SendGetF = nil - if err := te.GetFailed(vdr, *reqID); err != nil { + if err := te.GetFailed(context.Background(), vdr, *reqID); err != nil { t.Fatal(err) } vm.CantGetBlock = false called := new(bool) - sender.SendGetF = func(ids.NodeID, uint32, ids.ID) { + sender.SendGetF = func(context.Context, ids.NodeID, uint32, ids.ID) { *called = true } - if err := te.PullQuery(vdr, 0, missingBlk.ID()); err != nil { + if err := te.PullQuery(context.Background(), vdr, 0, missingBlk.ID()); err != nil { t.Fatal(err) } @@ -1425,19 +1518,19 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { }, ParentV: validBlk.IDV, HeightV: 2, - VerifyV: errors.New(""), + VerifyV: errTest, BytesV: []byte{2}, } invalidBlkID := invalidBlk.ID() reqID := new(uint32) - sender.SendPushQueryF = func(_ ids.NodeIDSet, requestID uint32, _ []byte) { + sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, _ []byte) { *reqID = requestID } - sender.SendPullQueryF = func(_ ids.NodeIDSet, requestID uint32, _ ids.ID) {} + sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, _ ids.ID) {} - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -1449,15 +1542,15 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { return nil, errUnknownBlock } } - if err := te.issue(validBlk); err != nil { + if err := te.issue(context.Background(), validBlk); err != nil { t.Fatal(err) } sender.SendPushQueryF = nil - if err := te.issue(invalidBlk); err != nil { + if err := te.issue(context.Background(), invalidBlk); err != nil { t.Fatal(err) } - if err := te.Chits(vdr, *reqID, []ids.ID{invalidBlkID}); err != nil { + if err := te.Chits(context.Background(), vdr, *reqID, []ids.ID{invalidBlkID}, nil); err != nil { t.Fatal(err) } @@ -1470,8 +1563,10 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { func TestEngineGossip(t *testing.T) { _, _, sender, vm, te, gBlk := setupDefaultConfig(t) - vm.LastAcceptedF = func() (ids.ID, error) { return gBlk.ID(), nil } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return gBlk.ID(), nil + } + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { if blkID == gBlk.ID() { return gBlk, nil } @@ -1480,14 +1575,14 @@ func TestEngineGossip(t *testing.T) { } called := new(bool) - sender.SendGossipF = func(blkBytes []byte) { + sender.SendGossipF = func(_ context.Context, blkBytes []byte) { *called = true if !bytes.Equal(blkBytes, gBlk.Bytes()) { t.Fatal(errUnknownBytes) } } - if err := te.Gossip(); err != nil { + if err := te.Gossip(context.Background()); err != nil { t.Fatal(err) } @@ -1500,7 +1595,7 @@ func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { vdr, vdrs, sender, vm, te, gBlk := setupDefaultConfig(t) secondVdr := ids.GenerateTestNodeID() - if err := vdrs.AddWeight(secondVdr, 1); err != nil { + if err := vdrs.Add(secondVdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -1526,7 +1621,7 @@ func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { } parsed := new(bool) - vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { if bytes.Equal(b, pendingBlk.Bytes()) { *parsed = true return pendingBlk, nil @@ -1534,7 +1629,7 @@ func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { return nil, errUnknownBlock } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -1549,7 +1644,7 @@ func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { } reqID := new(uint32) - sender.SendGetF = func(reqVdr ids.NodeID, requestID uint32, blkID ids.ID) { + sender.SendGetF = func(_ context.Context, reqVdr ids.NodeID, requestID uint32, blkID ids.ID) { *reqID = requestID if reqVdr != vdr { t.Fatalf("Wrong validator requested") @@ -1560,23 +1655,23 @@ func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { } sender.CantSendChits = false - if err := te.PushQuery(vdr, 0, pendingBlk.Bytes()); err != nil { + if err := te.PushQuery(context.Background(), vdr, 0, pendingBlk.Bytes()); err != nil { t.Fatal(err) } - if err := te.Put(secondVdr, *reqID, []byte{3}); err != nil { + if err := te.Put(context.Background(), secondVdr, *reqID, []byte{3}); err != nil { t.Fatal(err) } *parsed = false - vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { if bytes.Equal(b, missingBlk.Bytes()) { *parsed = true return missingBlk, nil } return nil, errUnknownBlock } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -1593,7 +1688,7 @@ func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { missingBlk.StatusV = choices.Processing - if err := te.Put(vdr, *reqID, missingBlk.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, *reqID, missingBlk.Bytes()); err != nil { t.Fatal(err) } @@ -1628,7 +1723,7 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { } parsed := new(bool) - vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { if bytes.Equal(b, pendingBlk.Bytes()) { *parsed = true return pendingBlk, nil @@ -1636,7 +1731,7 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { return nil, errUnknownBlock } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -1651,7 +1746,7 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { } reqID := new(uint32) - sender.SendGetF = func(reqVdr ids.NodeID, requestID uint32, blkID ids.ID) { + sender.SendGetF = func(_ context.Context, reqVdr ids.NodeID, requestID uint32, blkID ids.ID) { *reqID = requestID if reqVdr != vdr { t.Fatalf("Wrong validator requested") @@ -1662,26 +1757,26 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { } sender.CantSendChits = false - if err := te.PushQuery(vdr, 0, pendingBlk.Bytes()); err != nil { + if err := te.PushQuery(context.Background(), vdr, 0, pendingBlk.Bytes()); err != nil { t.Fatal(err) } sender.SendGetF = nil sender.CantSendGet = false - if err := te.PushQuery(vdr, *reqID, []byte{3}); err != nil { + if err := te.PushQuery(context.Background(), vdr, *reqID, []byte{3}); err != nil { t.Fatal(err) } *parsed = false - vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { if bytes.Equal(b, missingBlk.Bytes()) { *parsed = true return missingBlk, nil } return nil, errUnknownBlock } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -1696,7 +1791,7 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { } sender.CantSendPushQuery = false - if err := te.Put(vdr, *reqID, missingBlk.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, *reqID, missingBlk.Bytes()); err != nil { t.Fatal(err) } @@ -1714,7 +1809,7 @@ func TestEngineAggressivePolling(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -1735,8 +1830,10 @@ func TestEngineAggressivePolling(t *testing.T) { StatusV: choices.Accepted, }} - vm.LastAcceptedF = func() (ids.ID, error) { return gBlk.ID(), nil } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return gBlk.ID(), nil + } + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { if blkID != gBlk.ID() { t.Fatalf("Wrong block requested") } @@ -1748,7 +1845,7 @@ func TestEngineAggressivePolling(t *testing.T) { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } @@ -1766,7 +1863,7 @@ func TestEngineAggressivePolling(t *testing.T) { } parsed := new(bool) - vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { if bytes.Equal(b, pendingBlk.Bytes()) { *parsed = true return pendingBlk, nil @@ -1774,7 +1871,7 @@ func TestEngineAggressivePolling(t *testing.T) { return nil, errUnknownBlock } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -1789,12 +1886,16 @@ func TestEngineAggressivePolling(t *testing.T) { } numPushed := new(int) - sender.SendPushQueryF = func(ids.NodeIDSet, uint32, []byte) { *numPushed++ } + sender.SendPushQueryF = func(context.Context, set.Set[ids.NodeID], uint32, []byte) { + *numPushed++ + } numPulled := new(int) - sender.SendPullQueryF = func(ids.NodeIDSet, uint32, ids.ID) { *numPulled++ } + sender.SendPullQueryF = func(context.Context, set.Set[ids.NodeID], uint32, ids.ID) { + *numPulled++ + } - if err := te.Put(vdr, 0, pendingBlk.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, 0, pendingBlk.Bytes()); err != nil { t.Fatal(err) } @@ -1827,10 +1928,10 @@ func TestEngineDoubleChit(t *testing.T) { vdr0 := ids.GenerateTestNodeID() vdr1 := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr0, 1); err != nil { + if err := vals.Add(vdr0, nil, ids.Empty, 1); err != nil { t.Fatal(err) } - if err := vals.AddWeight(vdr1, 1); err != nil { + if err := vals.Add(vdr1, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -1852,8 +1953,10 @@ func TestEngineDoubleChit(t *testing.T) { StatusV: choices.Accepted, }} - vm.LastAcceptedF = func() (ids.ID, error) { return gBlk.ID(), nil } - vm.GetBlockF = func(id ids.ID) (snowman.Block, error) { + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return gBlk.ID(), nil + } + vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { if id == gBlk.ID() { return gBlk, nil } @@ -1866,7 +1969,7 @@ func TestEngineDoubleChit(t *testing.T) { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } @@ -1884,13 +1987,13 @@ func TestEngineDoubleChit(t *testing.T) { queried := new(bool) queryRequestID := new(uint32) - sender.SendPushQueryF = func(inVdrs ids.NodeIDSet, requestID uint32, blkBytes []byte) { + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { if *queried { t.Fatalf("Asked multiple times") } *queried = true *queryRequestID = requestID - vdrSet := ids.NodeIDSet{} + vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr0, vdr1) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") @@ -1900,11 +2003,11 @@ func TestEngineDoubleChit(t *testing.T) { } } - if err := te.issue(blk); err != nil { + if err := te.issue(context.Background(), blk); err != nil { t.Fatal(err) } - vm.GetBlockF = func(id ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { switch id { case gBlk.ID(): return gBlk, nil @@ -1921,7 +2024,7 @@ func TestEngineDoubleChit(t *testing.T) { t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Processing) } - if err := te.Chits(vdr0, *queryRequestID, blkSet); err != nil { + if err := te.Chits(context.Background(), vdr0, *queryRequestID, blkSet, nil); err != nil { t.Fatal(err) } @@ -1929,7 +2032,7 @@ func TestEngineDoubleChit(t *testing.T) { t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Processing) } - if err := te.Chits(vdr0, *queryRequestID, blkSet); err != nil { + if err := te.Chits(context.Background(), vdr0, *queryRequestID, blkSet, nil); err != nil { t.Fatal(err) } @@ -1937,7 +2040,7 @@ func TestEngineDoubleChit(t *testing.T) { t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Processing) } - if err := te.Chits(vdr1, *queryRequestID, blkSet); err != nil { + if err := te.Chits(context.Background(), vdr1, *queryRequestID, blkSet, nil); err != nil { t.Fatal(err) } @@ -1956,7 +2059,7 @@ func TestEngineBuildBlockLimit(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.AddWeight(vdr, 1); err != nil { + if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } @@ -1977,8 +2080,10 @@ func TestEngineBuildBlockLimit(t *testing.T) { StatusV: choices.Accepted, }} - vm.LastAcceptedF = func() (ids.ID, error) { return gBlk.ID(), nil } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return gBlk.ID(), nil + } + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { if blkID != gBlk.ID() { t.Fatalf("Wrong block requested") } @@ -1990,7 +2095,7 @@ func TestEngineBuildBlockLimit(t *testing.T) { t.Fatal(err) } - if err := te.Start(0); err != nil { + if err := te.Start(context.Background(), 0); err != nil { t.Fatal(err) } @@ -2021,20 +2126,20 @@ func TestEngineBuildBlockLimit(t *testing.T) { queried bool reqID uint32 ) - sender.SendPushQueryF = func(inVdrs ids.NodeIDSet, rID uint32, _ []byte) { + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], rID uint32, _ []byte) { reqID = rID if queried { t.Fatalf("Asked multiple times") } queried = true - vdrSet := ids.NodeIDSet{} + vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") } } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -2044,7 +2149,7 @@ func TestEngineBuildBlockLimit(t *testing.T) { } blkToReturn := 0 - vm.BuildBlockF = func() (snowman.Block, error) { + vm.BuildBlockF = func(context.Context) (snowman.Block, error) { if blkToReturn >= len(blks) { t.Fatalf("Built too many blocks") } @@ -2052,7 +2157,7 @@ func TestEngineBuildBlockLimit(t *testing.T) { blkToReturn++ return blk, nil } - if err := te.Notify(common.PendingTxs); err != nil { + if err := te.Notify(context.Background(), common.PendingTxs); err != nil { t.Fatal(err) } @@ -2061,7 +2166,7 @@ func TestEngineBuildBlockLimit(t *testing.T) { } queried = false - if err := te.Notify(common.PendingTxs); err != nil { + if err := te.Notify(context.Background(), common.PendingTxs); err != nil { t.Fatal(err) } @@ -2069,7 +2174,7 @@ func TestEngineBuildBlockLimit(t *testing.T) { t.Fatalf("Shouldn't have sent a query to the peer") } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -2080,7 +2185,7 @@ func TestEngineBuildBlockLimit(t *testing.T) { } } - if err := te.Chits(vdr, reqID, []ids.ID{blk0.ID()}); err != nil { + if err := te.Chits(context.Background(), vdr, reqID, []ids.ID{blk0.ID()}, nil); err != nil { t.Fatal(err) } @@ -2120,7 +2225,7 @@ func TestEngineReceiveNewRejectedBlock(t *testing.T) { BytesV: []byte{3}, } - vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, acceptedBlk.Bytes()): return acceptedBlk, nil @@ -2134,7 +2239,7 @@ func TestEngineReceiveNewRejectedBlock(t *testing.T) { } } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -2149,12 +2254,12 @@ func TestEngineReceiveNewRejectedBlock(t *testing.T) { asked bool reqID uint32 ) - sender.SendPushQueryF = func(_ ids.NodeIDSet, rID uint32, blkBytes []byte) { + sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], rID uint32, blkBytes []byte) { asked = true reqID = rID } - if err := te.Put(vdr, 0, acceptedBlk.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes()); err != nil { t.Fatal(err) } @@ -2162,19 +2267,19 @@ func TestEngineReceiveNewRejectedBlock(t *testing.T) { t.Fatalf("Didn't query for the new block") } - if err := te.Chits(vdr, reqID, []ids.ID{acceptedBlk.ID()}); err != nil { + if err := te.Chits(context.Background(), vdr, reqID, []ids.ID{acceptedBlk.ID()}, nil); err != nil { t.Fatal(err) } sender.SendPushQueryF = nil asked = false - sender.SendGetF = func(_ ids.NodeID, rID uint32, _ ids.ID) { + sender.SendGetF = func(_ context.Context, _ ids.NodeID, rID uint32, _ ids.ID) { asked = true reqID = rID } - if err := te.Put(vdr, 0, pendingBlk.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, 0, pendingBlk.Bytes()); err != nil { t.Fatal(err) } @@ -2184,7 +2289,7 @@ func TestEngineReceiveNewRejectedBlock(t *testing.T) { rejectedBlk.StatusV = choices.Rejected - if err := te.Put(vdr, reqID, rejectedBlk.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, reqID, rejectedBlk.Bytes()); err != nil { t.Fatal(err) } @@ -2224,7 +2329,7 @@ func TestEngineRejectionAmplification(t *testing.T) { BytesV: []byte{3}, } - vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, acceptedBlk.Bytes()): return acceptedBlk, nil @@ -2238,7 +2343,7 @@ func TestEngineRejectionAmplification(t *testing.T) { } } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -2253,12 +2358,12 @@ func TestEngineRejectionAmplification(t *testing.T) { queried bool reqID uint32 ) - sender.SendPushQueryF = func(_ ids.NodeIDSet, rID uint32, _ []byte) { + sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], rID uint32, _ []byte) { queried = true reqID = rID } - if err := te.Put(vdr, 0, acceptedBlk.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes()); err != nil { t.Fatal(err) } @@ -2266,7 +2371,7 @@ func TestEngineRejectionAmplification(t *testing.T) { t.Fatalf("Didn't query for the new block") } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -2277,7 +2382,7 @@ func TestEngineRejectionAmplification(t *testing.T) { } } - if err := te.Chits(vdr, reqID, []ids.ID{acceptedBlk.ID()}); err != nil { + if err := te.Chits(context.Background(), vdr, reqID, []ids.ID{acceptedBlk.ID()}, nil); err != nil { t.Fatal(err) } @@ -2287,10 +2392,10 @@ func TestEngineRejectionAmplification(t *testing.T) { queried = false var asked bool - sender.SendPushQueryF = func(ids.NodeIDSet, uint32, []byte) { + sender.SendPushQueryF = func(context.Context, set.Set[ids.NodeID], uint32, []byte) { queried = true } - sender.SendGetF = func(_ ids.NodeID, rID uint32, blkID ids.ID) { + sender.SendGetF = func(_ context.Context, _ ids.NodeID, rID uint32, blkID ids.ID) { asked = true reqID = rID @@ -2299,7 +2404,7 @@ func TestEngineRejectionAmplification(t *testing.T) { } } - if err := te.Put(vdr, 0, pendingBlk.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, 0, pendingBlk.Bytes()); err != nil { t.Fatal(err) } @@ -2311,7 +2416,7 @@ func TestEngineRejectionAmplification(t *testing.T) { } rejectedBlk.StatusV = choices.Processing - if err := te.Put(vdr, reqID, rejectedBlk.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, reqID, rejectedBlk.Bytes()); err != nil { t.Fatal(err) } @@ -2346,7 +2451,7 @@ func TestEngineTransitiveRejectionAmplificationDueToRejectedParent(t *testing.T) pendingBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), - RejectV: errors.New("shouldn't have issued to consensus"), + RejectV: errUnexpectedCall, StatusV: choices.Processing, }, ParentV: rejectedBlk.IDV, @@ -2354,7 +2459,7 @@ func TestEngineTransitiveRejectionAmplificationDueToRejectedParent(t *testing.T) BytesV: []byte{3}, } - vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, acceptedBlk.Bytes()): return acceptedBlk, nil @@ -2368,7 +2473,7 @@ func TestEngineTransitiveRejectionAmplificationDueToRejectedParent(t *testing.T) } } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -2385,12 +2490,12 @@ func TestEngineTransitiveRejectionAmplificationDueToRejectedParent(t *testing.T) queried bool reqID uint32 ) - sender.SendPushQueryF = func(_ ids.NodeIDSet, rID uint32, _ []byte) { + sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], rID uint32, _ []byte) { queried = true reqID = rID } - if err := te.Put(vdr, 0, acceptedBlk.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes()); err != nil { t.Fatal(err) } @@ -2398,7 +2503,7 @@ func TestEngineTransitiveRejectionAmplificationDueToRejectedParent(t *testing.T) t.Fatalf("Didn't query for the new block") } - if err := te.Chits(vdr, reqID, []ids.ID{acceptedBlk.ID()}); err != nil { + if err := te.Chits(context.Background(), vdr, reqID, []ids.ID{acceptedBlk.ID()}, nil); err != nil { t.Fatal(err) } @@ -2406,7 +2511,7 @@ func TestEngineTransitiveRejectionAmplificationDueToRejectedParent(t *testing.T) t.Fatalf("Should have finalized the consensus instance") } - if err := te.Put(vdr, 0, pendingBlk.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, 0, pendingBlk.Bytes()); err != nil { t.Fatal(err) } @@ -2440,13 +2545,13 @@ func TestEngineTransitiveRejectionAmplificationDueToInvalidParent(t *testing.T) }, ParentV: gBlk.ID(), HeightV: 1, - VerifyV: errors.New("invalid"), + VerifyV: errUnexpectedCall, BytesV: []byte{2}, } pendingBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), - RejectV: errors.New("shouldn't have issued to consensus"), + RejectV: errUnexpectedCall, StatusV: choices.Processing, }, ParentV: rejectedBlk.IDV, @@ -2454,7 +2559,7 @@ func TestEngineTransitiveRejectionAmplificationDueToInvalidParent(t *testing.T) BytesV: []byte{3}, } - vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, acceptedBlk.Bytes()): return acceptedBlk, nil @@ -2468,7 +2573,7 @@ func TestEngineTransitiveRejectionAmplificationDueToInvalidParent(t *testing.T) } } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -2481,12 +2586,12 @@ func TestEngineTransitiveRejectionAmplificationDueToInvalidParent(t *testing.T) queried bool reqID uint32 ) - sender.SendPushQueryF = func(_ ids.NodeIDSet, rID uint32, blkBytes []byte) { + sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], rID uint32, blkBytes []byte) { queried = true reqID = rID } - if err := te.Put(vdr, 0, acceptedBlk.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes()); err != nil { t.Fatal(err) } @@ -2494,7 +2599,7 @@ func TestEngineTransitiveRejectionAmplificationDueToInvalidParent(t *testing.T) t.Fatalf("Didn't query for the new block") } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -2507,7 +2612,7 @@ func TestEngineTransitiveRejectionAmplificationDueToInvalidParent(t *testing.T) } } - if err := te.Chits(vdr, reqID, []ids.ID{acceptedBlk.ID()}); err != nil { + if err := te.Chits(context.Background(), vdr, reqID, []ids.ID{acceptedBlk.ID()}, nil); err != nil { t.Fatal(err) } @@ -2515,7 +2620,7 @@ func TestEngineTransitiveRejectionAmplificationDueToInvalidParent(t *testing.T) t.Fatalf("Should have finalized the consensus instance") } - if err := te.Put(vdr, 0, pendingBlk.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, 0, pendingBlk.Bytes()); err != nil { t.Fatal(err) } @@ -2551,7 +2656,7 @@ func TestEngineNonPreferredAmplification(t *testing.T) { BytesV: []byte{2}, } - vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, preferredBlk.Bytes()): return preferredBlk, nil @@ -2563,7 +2668,7 @@ func TestEngineNonPreferredAmplification(t *testing.T) { } } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -2572,22 +2677,22 @@ func TestEngineNonPreferredAmplification(t *testing.T) { } } - sender.SendPushQueryF = func(_ ids.NodeIDSet, _ uint32, blkBytes []byte) { + sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], _ uint32, blkBytes []byte) { if bytes.Equal(nonPreferredBlk.Bytes(), blkBytes) { t.Fatalf("gossiped non-preferred block") } } - sender.SendPullQueryF = func(_ ids.NodeIDSet, _ uint32, blkID ids.ID) { + sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], _ uint32, blkID ids.ID) { if blkID == nonPreferredBlk.ID() { t.Fatalf("gossiped non-preferred block") } } - if err := te.Put(vdr, 0, preferredBlk.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, 0, preferredBlk.Bytes()); err != nil { t.Fatal(err) } - if err := te.Put(vdr, 0, nonPreferredBlk.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, 0, nonPreferredBlk.Bytes()); err != nil { t.Fatal(err) } } @@ -2597,11 +2702,12 @@ func TestEngineNonPreferredAmplification(t *testing.T) { // to ensure that the consensus engine correctly handles the case that votes can // be bubbled correctly through a block that cannot pass verification until one // of its ancestors has been marked as accepted. -// G -// | -// A -// | -// B +// +// G +// | +// A +// | +// B func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) @@ -2625,11 +2731,11 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { ParentV: blk1.ID(), HeightV: 2, BytesV: []byte{2}, - VerifyV: errors.New("blk2 does not pass verification until after blk1 is accepted"), + VerifyV: errInvalid, } // The VM should be able to parse [blk1] and [blk2] - vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, blk1.Bytes()): return blk1, nil @@ -2643,7 +2749,7 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { // The VM should only be able to retrieve [gBlk] from storage // TODO GetBlockF should be updated after blocks are verified/accepted - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -2654,7 +2760,7 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { asked := new(bool) reqID := new(uint32) - sender.SendGetF = func(inVdr ids.NodeID, requestID uint32, blkID ids.ID) { + sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { *reqID = requestID if *asked { t.Fatalf("Asked multiple times") @@ -2669,7 +2775,7 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { } // Receive Gossip message for [blk2] first and expect the sender to issue a Get request for // its ancestor: [blk1]. - if err := te.Put(vdr, constants.GossipMsgRequestID, blk2.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, constants.GossipMsgRequestID, blk2.Bytes()); err != nil { t.Fatal(err) } @@ -2681,13 +2787,13 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { // [blk2] since it currently fails verification. queried := new(bool) queryRequestID := new(uint32) - sender.SendPushQueryF = func(inVdrs ids.NodeIDSet, requestID uint32, blkBytes []byte) { + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { if *queried { t.Fatalf("Asked multiple times") } *queried = true *queryRequestID = requestID - vdrSet := ids.NodeIDSet{} + vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") @@ -2699,12 +2805,12 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { // Answer the request, this should allow [blk1] to be issued and cause [blk2] to // fail verification. - if err := te.Put(vdr, *reqID, blk1.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, *reqID, blk1.Bytes()); err != nil { t.Fatal(err) } // now blk1 is verified, vm can return it - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -2723,7 +2829,7 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { reqVdr := new(ids.NodeID) // Update GetF to produce a more detailed error message in the case that receiving a Chits // message causes us to send another Get request. - sender.SendGetF = func(inVdr ids.NodeID, requestID uint32, blkID ids.ID) { + sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { switch blkID { case blk1.ID(): t.Fatal("Unexpectedly sent a Get request for blk1") @@ -2736,7 +2842,7 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { } } - sender.SendPullQueryF = func(_ ids.NodeIDSet, _ uint32, blkID ids.ID) { + sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], _ uint32, blkID ids.ID) { switch blkID { case blk1.ID(): t.Fatal("Unexpectedly sent a PullQuery request for blk1") @@ -2749,11 +2855,11 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { // Now we are expecting a Chits message, and we receive it for blk2 instead of blk1 // The votes should be bubbled through blk2 despite the fact that it is failing verification. - if err := te.Chits(vdr, *queryRequestID, []ids.ID{blk2.ID()}); err != nil { + if err := te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{blk2.ID()}, nil); err != nil { t.Fatal(err) } - if err := te.Put(*reqVdr, *sendReqID, blk2.Bytes()); err != nil { + if err := te.Put(context.Background(), *reqVdr, *sendReqID, blk2.Bytes()); err != nil { t.Fatal(err) } @@ -2764,7 +2870,7 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { // Now that [blk1] has been marked as Accepted, [blk2] can pass verification. blk2.VerifyV = nil - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -2778,13 +2884,13 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { } *queried = false // Prepare to PushQuery [blk2] after receiving a Gossip message with [blk2]. - sender.SendPushQueryF = func(inVdrs ids.NodeIDSet, requestID uint32, blkBytes []byte) { + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { if *queried { t.Fatalf("Asked multiple times") } *queried = true *queryRequestID = requestID - vdrSet := ids.NodeIDSet{} + vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") @@ -2794,7 +2900,7 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { } } // Expect that the Engine will send a PushQuery after receiving this Gossip message for [blk2]. - if err := te.Put(vdr, constants.GossipMsgRequestID, blk2.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, constants.GossipMsgRequestID, blk2.Bytes()); err != nil { t.Fatal(err) } @@ -2803,7 +2909,7 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { } // After a single vote for [blk2], it should be marked as accepted. - if err := te.Chits(vdr, *queryRequestID, []ids.ID{blk2.ID()}); err != nil { + if err := te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{blk2.ID()}, nil); err != nil { t.Fatal(err) } @@ -2817,13 +2923,14 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { // regression test to ensure that the consensus engine correctly handles the // case that votes can be bubbled correctly through a chain that cannot pass // verification until one of its ancestors has been marked as accepted. -// G -// | -// A -// | -// B -// | -// C +// +// G +// | +// A +// | +// B +// | +// C func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) @@ -2847,7 +2954,7 @@ func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { ParentV: blk1.ID(), HeightV: 2, BytesV: []byte{2}, - VerifyV: errors.New("blk2 does not pass verification until after blk1 is accepted"), + VerifyV: errInvalid, } // [blk3] is a child of [blk2] and will not attempt to be issued until // [blk2] has successfully been verified. @@ -2862,7 +2969,7 @@ func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { } // The VM should be able to parse [blk1], [blk2], and [blk3] - vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, blk1.Bytes()): return blk1, nil @@ -2877,7 +2984,7 @@ func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { } // The VM should be able to retrieve [gBlk] and [blk1] from storage - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -2890,7 +2997,7 @@ func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { asked := new(bool) reqID := new(uint32) - sender.SendGetF = func(inVdr ids.NodeID, requestID uint32, blkID ids.ID) { + sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { *reqID = requestID if *asked { t.Fatalf("Asked multiple times") @@ -2905,7 +3012,7 @@ func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { } // Receive Gossip message for [blk3] first and expect the sender to issue a // Get request for its ancestor: [blk2]. - if err := te.Put(vdr, constants.GossipMsgRequestID, blk3.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, constants.GossipMsgRequestID, blk3.Bytes()); err != nil { t.Fatal(err) } @@ -2918,13 +3025,13 @@ func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { // We should not PushQuery [blk3] because [blk2] wasn't issued. queried := new(bool) queryRequestID := new(uint32) - sender.SendPushQueryF = func(inVdrs ids.NodeIDSet, requestID uint32, blkBytes []byte) { + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { if *queried { t.Fatalf("Asked multiple times") } *queried = true *queryRequestID = requestID - vdrSet := ids.NodeIDSet{} + vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr) if !inVdrs.Equals(vdrSet) { t.Fatalf("Asking wrong validator for preference") @@ -2935,7 +3042,7 @@ func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { } // Answer the request, this should result in [blk1] being issued as well. - if err := te.Put(vdr, *reqID, blk2.Bytes()); err != nil { + if err := te.Put(context.Background(), vdr, *reqID, blk2.Bytes()); err != nil { t.Fatal(err) } @@ -2947,7 +3054,7 @@ func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { reqVdr := new(ids.NodeID) // Update GetF to produce a more detailed error message in the case that receiving a Chits // message causes us to send another Get request. - sender.SendGetF = func(inVdr ids.NodeID, requestID uint32, blkID ids.ID) { + sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { switch blkID { case blk1.ID(): t.Fatal("Unexpectedly sent a Get request for blk1") @@ -2966,7 +3073,7 @@ func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { } } - sender.SendPullQueryF = func(_ ids.NodeIDSet, _ uint32, blkID ids.ID) { + sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], _ uint32, blkID ids.ID) { switch blkID { case blk1.ID(): t.Fatal("Unexpectedly sent a PullQuery request for blk1") @@ -2981,14 +3088,14 @@ func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { // Now we are expecting a Chits message, and we receive it for [blk3] // instead of blk1. This will cause the node to again request [blk3]. - if err := te.Chits(vdr, *queryRequestID, []ids.ID{blk3.ID()}); err != nil { + if err := te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{blk3.ID()}, nil); err != nil { t.Fatal(err) } // Drop the re-request for blk3 to cause the poll to termindate. The votes // should be bubbled through blk3 despite the fact that it hasn't been // issued. - if err := te.GetFailed(*reqVdr, *sendReqID); err != nil { + if err := te.GetFailed(context.Background(), *reqVdr, *sendReqID); err != nil { t.Fatal(err) } @@ -3038,21 +3145,24 @@ func TestSendMixedQuery(t *testing.T) { engConfig.Params.MixedQueryNumPushNonVdr = 12 engConfig.Params.MixedQueryNumPushVdr = 14 engConfig.Params.K = 20 - _, vdrSet, sender, vm, te, gBlk := setup(t, commonCfg, engConfig) - - vdrsList := []validators.Validator{} - vdrs := ids.NodeIDSet{} - for i := 0; i < te.Config.Params.K; i++ { - vdr := ids.GenerateTestNodeID() - vdrs.Add(vdr) - vdrsList = append(vdrsList, validators.NewValidator(vdr, 1)) + _, _, sender, vm, te, gBlk := setup(t, commonCfg, engConfig) + + vdrs := set.Set[ids.NodeID]{} + te.Validators = validators.NewSet() + for i := 0; i < te.Params.K; i++ { + vdrID := ids.GenerateTestNodeID() + vdrs.Add(vdrID) + err := te.Validators.Add(vdrID, nil, ids.Empty, 1) + if err != nil { + t.Fatal(err) + } } if tt.isVdr { vdrs.Add(te.Ctx.NodeID) - vdrsList = append(vdrsList, validators.NewValidator(te.Ctx.NodeID, 1)) - } - if err := vdrSet.Set(vdrsList); err != nil { - t.Fatal(err) + err := te.Validators.Add(te.Ctx.NodeID, nil, ids.Empty, 1) + if err != nil { + t.Fatal(err) + } } // [blk1] is a child of [gBlk] and passes verification @@ -3067,7 +3177,7 @@ func TestSendMixedQuery(t *testing.T) { } // The VM should be able to parse [blk1] - vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, blk1.Bytes()): return blk1, nil @@ -3078,7 +3188,7 @@ func TestSendMixedQuery(t *testing.T) { } // The VM should only be able to retrieve [gBlk] from storage - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -3089,8 +3199,8 @@ func TestSendMixedQuery(t *testing.T) { pullQuerySent := new(bool) pullQueryReqID := new(uint32) - pullQueriedVdrs := ids.NodeIDSet{} - sender.SendPullQueryF = func(inVdrs ids.NodeIDSet, requestID uint32, blkID ids.ID) { + pullQueriedVdrs := set.Set[ids.NodeID]{} + sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkID ids.ID) { switch { case *pullQuerySent: t.Fatalf("Asked multiple times") @@ -3104,8 +3214,8 @@ func TestSendMixedQuery(t *testing.T) { pushQuerySent := new(bool) pushQueryReqID := new(uint32) - pushQueriedVdrs := ids.NodeIDSet{} - sender.SendPushQueryF = func(inVdrs ids.NodeIDSet, requestID uint32, blkBytes []byte) { + pushQueriedVdrs := set.Set[ids.NodeID]{} + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { switch { case *pushQuerySent: t.Fatal("Asked multiple times") @@ -3119,7 +3229,7 @@ func TestSendMixedQuery(t *testing.T) { // Give the engine blk1. It should insert it into consensus and send a mixed query // consisting of 12 push queries and 8 pull queries. - if err := te.Put(vdrSet.List()[0].ID(), constants.GossipMsgRequestID, blk1.Bytes()); err != nil { + if err := te.Put(context.Background(), te.Validators.List()[0].NodeID, constants.GossipMsgRequestID, blk1.Bytes()); err != nil { t.Fatal(err) } @@ -3171,7 +3281,7 @@ func TestEngineBuildBlockWithCachedNonVerifiedParent(t *testing.T) { }, ParentV: grandParentBlk.ID(), HeightV: 2, - VerifyV: errors.New(""), // Reports as invalid + VerifyV: errTest, // Reports as invalid BytesV: []byte{2}, } @@ -3198,12 +3308,12 @@ func TestEngineBuildBlockWithCachedNonVerifiedParent(t *testing.T) { BytesV: []byte{3}, } - vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { require.Equal(grandParentBlk.BytesV, b) return grandParentBlk, nil } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -3215,16 +3325,16 @@ func TestEngineBuildBlockWithCachedNonVerifiedParent(t *testing.T) { } queryRequestGPID := new(uint32) - sender.SendPushQueryF = func(_ ids.NodeIDSet, requestID uint32, blkBytes []byte) { + sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { require.Equal(grandParentBlk.Bytes(), blkBytes) *queryRequestGPID = requestID } // Give the engine the grandparent - err := te.Put(vdr, 0, grandParentBlk.BytesV) + err := te.Put(context.Background(), vdr, 0, grandParentBlk.BytesV) require.NoError(err) - vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { require.Equal(parentBlkA.BytesV, b) return parentBlkA, nil } @@ -3232,15 +3342,15 @@ func TestEngineBuildBlockWithCachedNonVerifiedParent(t *testing.T) { // Give the node [parentBlkA]/[parentBlkB]. // When it's parsed we get [parentBlkA] (not [parentBlkB]). // [parentBlkA] fails verification and gets put into [te.nonVerifiedCache]. - err = te.Put(vdr, 0, parentBlkA.BytesV) + err = te.Put(context.Background(), vdr, 0, parentBlkA.BytesV) require.NoError(err) - vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { require.Equal(parentBlkB.BytesV, b) return parentBlkB, nil } - vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlk.ID(): return gBlk, nil @@ -3254,7 +3364,7 @@ func TestEngineBuildBlockWithCachedNonVerifiedParent(t *testing.T) { } queryRequestAID := new(uint32) - sender.SendPushQueryF = func(_ ids.NodeIDSet, requestID uint32, blkBytes []byte) { + sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { require.Equal(parentBlkA.Bytes(), blkBytes) *queryRequestAID = requestID } @@ -3265,14 +3375,14 @@ func TestEngineBuildBlockWithCachedNonVerifiedParent(t *testing.T) { // When we fetch it using [GetBlockF] we get [parentBlkB]. // Note that [parentBlkB] doesn't fail verification and is issued into consensus. // This evicts [parentBlkA] from [te.nonVerifiedCache]. - err = te.Put(vdr, 0, parentBlkA.BytesV) + err = te.Put(context.Background(), vdr, 0, parentBlkA.BytesV) require.NoError(err) // Give 2 chits for [parentBlkA]/[parentBlkB] - err = te.Chits(vdr, *queryRequestAID, []ids.ID{parentBlkB.IDV}) + err = te.Chits(context.Background(), vdr, *queryRequestAID, []ids.ID{parentBlkB.IDV}, nil) require.NoError(err) - err = te.Chits(vdr, *queryRequestGPID, []ids.ID{parentBlkB.IDV}) + err = te.Chits(context.Background(), vdr, *queryRequestGPID, []ids.ID{parentBlkB.IDV}, nil) require.NoError(err) // Assert that the blocks' statuses are correct. @@ -3280,17 +3390,119 @@ func TestEngineBuildBlockWithCachedNonVerifiedParent(t *testing.T) { require.Equal(choices.Processing, parentBlkA.Status()) require.Equal(choices.Accepted, parentBlkB.Status()) - vm.BuildBlockF = func() (snowman.Block, error) { + vm.BuildBlockF = func(context.Context) (snowman.Block, error) { return childBlk, nil } sentQuery := new(bool) - sender.SendPushQueryF = func(ids.NodeIDSet, uint32, []byte) { + sender.SendPushQueryF = func(context.Context, set.Set[ids.NodeID], uint32, []byte) { *sentQuery = true } // Should issue a new block and send a query for it. - err = te.Notify(common.PendingTxs) + err = te.Notify(context.Background(), common.PendingTxs) require.NoError(err) require.True(*sentQuery) } + +func TestEngineApplyAcceptedFrontierInQueryFailed(t *testing.T) { + require := require.New(t) + + engCfg := DefaultConfigs() + engCfg.Params = snowball.Parameters{ + K: 1, + Alpha: 1, + BetaVirtuous: 2, + BetaRogue: 2, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + MixedQueryNumPushNonVdr: 1, + } + + vals := validators.NewSet() + engCfg.Validators = vals + + vdr := ids.GenerateTestNodeID() + require.NoError(vals.Add(vdr, nil, ids.Empty, 1)) + + sender := &common.SenderTest{T: t} + engCfg.Sender = sender + + sender.Default(true) + + vm := &block.TestVM{} + vm.T = t + engCfg.VM = vm + + vm.Default(true) + vm.CantSetState = false + vm.CantSetPreference = false + + gBlk := &snowman.TestBlock{TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Accepted, + }} + + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return gBlk.ID(), nil + } + vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { + require.Equal(gBlk.ID(), id) + return gBlk, nil + } + + te, err := newTransitive(engCfg) + require.NoError(err) + require.NoError(te.Start(context.Background(), 0)) + + vm.LastAcceptedF = nil + + blk := &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + ParentV: gBlk.IDV, + HeightV: 1, + BytesV: []byte{1}, + } + + queryRequestID := new(uint32) + sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { + require.Contains(inVdrs, vdr) + require.Equal(blk.Bytes(), blkBytes) + *queryRequestID = requestID + } + + require.NoError(te.issue(context.Background(), blk)) + + vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { + switch id { + case gBlk.ID(): + return gBlk, nil + case blk.ID(): + return blk, nil + } + t.Fatalf("unknown block") + panic("Should have errored") + } + + require.Equal(choices.Processing, blk.Status()) + + sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkID ids.ID) { + require.Contains(inVdrs, vdr) + require.Equal(blk.ID(), blkID) + *queryRequestID = requestID + } + + blkIDs := []ids.ID{blk.ID()} + require.NoError(te.Chits(context.Background(), vdr, *queryRequestID, blkIDs, blkIDs)) + + require.Equal(choices.Processing, blk.Status()) + + require.NoError(te.QueryFailed(context.Background(), vdr, *queryRequestID)) + + require.Equal(choices.Accepted, blk.Status()) +} diff --git a/avalanchego/snow/engine/snowman/voter.go b/avalanchego/snow/engine/snowman/voter.go index 33e35dfe..e2813851 100644 --- a/avalanchego/snow/engine/snowman/voter.go +++ b/avalanchego/snow/engine/snowman/voter.go @@ -1,12 +1,16 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snowman import ( + "context" + "go.uber.org/zap" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/bag" + "github.com/ava-labs/avalanchego/utils/set" ) // Voter records chits received from [vdr] once its dependencies are met. @@ -15,26 +19,30 @@ type voter struct { vdr ids.NodeID requestID uint32 response ids.ID - deps ids.Set + deps set.Set[ids.ID] } -func (v *voter) Dependencies() ids.Set { return v.deps } +func (v *voter) Dependencies() set.Set[ids.ID] { + return v.deps +} // Mark that a dependency has been met. -func (v *voter) Fulfill(id ids.ID) { +func (v *voter) Fulfill(ctx context.Context, id ids.ID) { v.deps.Remove(id) - v.Update() + v.Update(ctx) } // Abandon this attempt to record chits. -func (v *voter) Abandon(id ids.ID) { v.Fulfill(id) } +func (v *voter) Abandon(ctx context.Context, id ids.ID) { + v.Fulfill(ctx, id) +} -func (v *voter) Update() { +func (v *voter) Update(ctx context.Context) { if v.deps.Len() != 0 || v.t.errs.Errored() { return } - var results []ids.Bag + var results []bag.Bag[ids.ID] if v.response == ids.Empty { results = v.t.polls.Drop(v.requestID, v.vdr) } else { @@ -45,19 +53,19 @@ func (v *voter) Update() { return } - // To prevent any potential deadlocks with un-disclosed dependencies, votes - // must be bubbled to the nearest valid block - for i, result := range results { - results[i] = v.bubbleVotes(result) - } - for _, result := range results { result := result + v.t.Ctx.Log.Debug("filtering poll results", + zap.Stringer("result", &result), + ) + // To prevent any potential deadlocks with un-disclosed dependencies, + // votes must be bubbled to the nearest valid block + result = v.bubbleVotes(ctx, result) v.t.Ctx.Log.Debug("finishing poll", zap.Stringer("result", &result), ) - if err := v.t.Consensus.RecordPoll(result); err != nil { + if err := v.t.Consensus.RecordPoll(ctx, result); err != nil { v.t.errs.Add(err) } } @@ -66,7 +74,7 @@ func (v *voter) Update() { return } - if err := v.t.VM.SetPreference(v.t.Consensus.Preference()); err != nil { + if err := v.t.VM.SetPreference(ctx, v.t.Consensus.Preference()); err != nil { v.t.errs.Add(err) return } @@ -77,7 +85,7 @@ func (v *voter) Update() { } v.t.Ctx.Log.Debug("Snowman engine can't quiesce") - v.t.repoll() + v.t.repoll(ctx) } // bubbleVotes bubbles the [votes] a set of the number of votes for specific @@ -87,8 +95,8 @@ func (v *voter) Update() { // Note: bubbleVotes does not bubbleVotes to all of the ancestors in consensus, // just the most recent one. bubbling to the rest of the ancestors, which may // also be in consensus is handled in RecordPoll. -func (v *voter) bubbleVotes(votes ids.Bag) ids.Bag { - bubbledVotes := ids.Bag{} +func (v *voter) bubbleVotes(ctx context.Context, votes bag.Bag[ids.ID]) bag.Bag[ids.ID] { + bubbledVotes := bag.Bag[ids.ID]{} votesLoop: for _, vote := range votes.List() { @@ -101,7 +109,7 @@ votesLoop: zap.Stringer("parentID", rootID), ) - blk, err := v.t.GetBlock(rootID) + blk, err := v.t.GetBlock(ctx, rootID) // If we cannot retrieve the block, drop [vote] if err != nil { v.t.Ctx.Log.Debug("dropping vote(s)", @@ -136,7 +144,7 @@ votesLoop: ) blkID = parentID - blk, err = v.t.GetBlock(blkID) + blk, err = v.t.GetBlock(ctx, blkID) // If we cannot retrieve the block, drop [vote] if err != nil { v.t.Ctx.Log.Debug("dropping vote(s)", diff --git a/avalanchego/snow/events/blockable.go b/avalanchego/snow/events/blockable.go index 1698c6d3..233dae80 100644 --- a/avalanchego/snow/events/blockable.go +++ b/avalanchego/snow/events/blockable.go @@ -1,21 +1,24 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package events import ( + "context" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" ) // Blockable defines what an object must implement to be able to block on // dependent events being completed. type Blockable interface { // IDs that this object is blocking on - Dependencies() ids.Set + Dependencies() set.Set[ids.ID] // Notify this object that an event has been fulfilled - Fulfill(ids.ID) + Fulfill(context.Context, ids.ID) // Notify this object that an event has been abandoned - Abandon(ids.ID) + Abandon(context.Context, ids.ID) // Update the state of this object without changing the status of any events - Update() + Update(context.Context) } diff --git a/avalanchego/snow/events/blocker.go b/avalanchego/snow/events/blocker.go index 7e3ad2ca..f72f6729 100644 --- a/avalanchego/snow/events/blocker.go +++ b/avalanchego/snow/events/blocker.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package events import ( + "context" "fmt" "strings" @@ -34,39 +35,39 @@ func (b *Blocker) Len() int { // Fulfill notifies all objects blocking on the event whose ID is that // the event has happened -func (b *Blocker) Fulfill(id ids.ID) { +func (b *Blocker) Fulfill(ctx context.Context, id ids.ID) { b.init() blocking := (*b)[id] delete(*b, id) for _, pending := range blocking { - pending.Fulfill(id) + pending.Fulfill(ctx, id) } } // Abandon notifies all objects blocking on the event whose ID is that // the event has been abandoned -func (b *Blocker) Abandon(id ids.ID) { +func (b *Blocker) Abandon(ctx context.Context, id ids.ID) { b.init() blocking := (*b)[id] delete(*b, id) for _, pending := range blocking { - pending.Abandon(id) + pending.Abandon(ctx, id) } } // Register a new Blockable and its dependencies -func (b *Blocker) Register(pending Blockable) { +func (b *Blocker) Register(ctx context.Context, pending Blockable) { b.init() for pendingID := range pending.Dependencies() { (*b)[pendingID] = append((*b)[pendingID], pending) } - pending.Update() + pending.Update(ctx) } // PrefixedString returns the same value as the String function, with all the @@ -74,18 +75,18 @@ func (b *Blocker) Register(pending Blockable) { func (b *Blocker) PrefixedString(prefix string) string { b.init() - s := strings.Builder{} - - s.WriteString(fmt.Sprintf("Blocking on %d IDs:", len(*b))) - + sb := strings.Builder{} + sb.WriteString(fmt.Sprintf("Blocking on %d IDs:", len(*b))) for key, value := range *b { - s.WriteString(fmt.Sprintf("\n%sID[%s]: %d", + sb.WriteString(fmt.Sprintf("\n%sID[%s]: %d", prefix, key, - len(value))) + len(value), + )) } - - return strings.TrimSuffix(s.String(), "\n") + return strings.TrimSuffix(sb.String(), "\n") } -func (b *Blocker) String() string { return b.PrefixedString("") } +func (b *Blocker) String() string { + return b.PrefixedString("") +} diff --git a/avalanchego/snow/events/blocker_test.go b/avalanchego/snow/events/blocker_test.go index cad6f03a..d3710be9 100644 --- a/avalanchego/snow/events/blocker_test.go +++ b/avalanchego/snow/events/blocker_test.go @@ -1,12 +1,14 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package events import ( + "context" "testing" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" ) func TestBlocker(t *testing.T) { @@ -19,56 +21,56 @@ func TestBlocker(t *testing.T) { id2 := ids.GenerateTestID() calledDep := new(bool) - a.dependencies = func() ids.Set { + a.dependencies = func() set.Set[ids.ID] { *calledDep = true - s := ids.Set{} + s := set.Set[ids.ID]{} s.Add(id0, id1) return s } calledFill := new(bool) - a.fulfill = func(ids.ID) { + a.fulfill = func(context.Context, ids.ID) { *calledFill = true } calledAbandon := new(bool) - a.abandon = func(ids.ID) { + a.abandon = func(context.Context, ids.ID) { *calledAbandon = true } calledUpdate := new(bool) - a.update = func() { + a.update = func(context.Context) { *calledUpdate = true } - b.Register(a) + b.Register(context.Background(), a) switch { case !*calledDep, *calledFill, *calledAbandon, !*calledUpdate: t.Fatalf("Called wrong function") } - b.Fulfill(id2) - b.Abandon(id2) + b.Fulfill(context.Background(), id2) + b.Abandon(context.Background(), id2) switch { case !*calledDep, *calledFill, *calledAbandon, !*calledUpdate: t.Fatalf("Called wrong function") } - b.Fulfill(id0) + b.Fulfill(context.Background(), id0) switch { case !*calledDep, !*calledFill, *calledAbandon, !*calledUpdate: t.Fatalf("Called wrong function") } - b.Abandon(id0) + b.Abandon(context.Background(), id0) switch { case !*calledDep, !*calledFill, *calledAbandon, !*calledUpdate: t.Fatalf("Called wrong function") } - b.Abandon(id1) + b.Abandon(context.Background(), id1) switch { case !*calledDep, !*calledFill, !*calledAbandon, !*calledUpdate: @@ -77,22 +79,35 @@ func TestBlocker(t *testing.T) { } type testBlockable struct { - dependencies func() ids.Set - fulfill func(ids.ID) - abandon func(ids.ID) - update func() + dependencies func() set.Set[ids.ID] + fulfill func(context.Context, ids.ID) + abandon func(context.Context, ids.ID) + update func(context.Context) } func newTestBlockable() *testBlockable { return &testBlockable{ - dependencies: func() ids.Set { return ids.Set{} }, - fulfill: func(ids.ID) {}, - abandon: func(ids.ID) {}, - update: func() {}, + dependencies: func() set.Set[ids.ID] { + return set.Set[ids.ID]{} + }, + fulfill: func(context.Context, ids.ID) {}, + abandon: func(context.Context, ids.ID) {}, + update: func(context.Context) {}, } } -func (b *testBlockable) Dependencies() ids.Set { return b.dependencies() } -func (b *testBlockable) Fulfill(id ids.ID) { b.fulfill(id) } -func (b *testBlockable) Abandon(id ids.ID) { b.abandon(id) } -func (b *testBlockable) Update() { b.update() } +func (b *testBlockable) Dependencies() set.Set[ids.ID] { + return b.dependencies() +} + +func (b *testBlockable) Fulfill(ctx context.Context, id ids.ID) { + b.fulfill(ctx, id) +} + +func (b *testBlockable) Abandon(ctx context.Context, id ids.ID) { + b.abandon(ctx, id) +} + +func (b *testBlockable) Update(ctx context.Context) { + b.update(ctx) +} diff --git a/avalanchego/snow/networking/benchlist/benchable.go b/avalanchego/snow/networking/benchlist/benchable.go index 843f1f76..845eeeb7 100644 --- a/avalanchego/snow/networking/benchlist/benchable.go +++ b/avalanchego/snow/networking/benchlist/benchable.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package benchlist diff --git a/avalanchego/snow/networking/benchlist/benchlist.go b/avalanchego/snow/networking/benchlist/benchlist.go index 6e5197e6..4f7cb20a 100644 --- a/avalanchego/snow/networking/benchlist/benchlist.go +++ b/avalanchego/snow/networking/benchlist/benchlist.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package benchlist @@ -17,13 +17,14 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/utils/timer/mockable" safemath "github.com/ava-labs/avalanchego/utils/math" ) -var _ heap.Interface = &benchedQueue{} +var _ heap.Interface = (*benchedQueue)(nil) // If a peer consistently does not respond to queries, it will // increase latencies on the network whenever that peer is polled. @@ -54,8 +55,14 @@ type benchData struct { // Each element is a benched validator type benchedQueue []*benchData -func (bq benchedQueue) Len() int { return len(bq) } -func (bq benchedQueue) Less(i, j int) bool { return bq[i].benchedUntil.Before(bq[j].benchedUntil) } +func (bq benchedQueue) Len() int { + return len(bq) +} + +func (bq benchedQueue) Less(i, j int) bool { + return bq[i].benchedUntil.Before(bq[j].benchedUntil) +} + func (bq benchedQueue) Swap(i, j int) { bq[i], bq[j] = bq[j], bq[i] bq[i].index = i @@ -111,7 +118,7 @@ type benchlist struct { failureStreaks map[ids.NodeID]failureStreak // IDs of validators that are currently benched - benchlistSet ids.NodeIDSet + benchlistSet set.Set[ids.NodeID] // Min heap containing benched validators and their endtimes // Pop() returns the next validator to leave @@ -150,7 +157,7 @@ func NewBenchlist( chainID: chainID, log: log, failureStreaks: make(map[ids.NodeID]failureStreak), - benchlistSet: ids.NodeIDSet{}, + benchlistSet: set.Set[ids.NodeID]{}, benchable: benchable, vdrs: validators, threshold: threshold, @@ -196,14 +203,7 @@ func (b *benchlist) remove(node *benchData) { // Update metrics b.metrics.numBenched.Set(float64(b.benchedQueue.Len())) - benchedStake, err := b.vdrs.SubsetWeight(b.benchlistSet) - if err != nil { - // This should never happen - b.log.Error("couldn't get benched stake", - zap.Error(err), - ) - return - } + benchedStake := b.vdrs.SubsetWeight(b.benchlistSet) b.metrics.weightBenched.Set(float64(benchedStake)) } @@ -290,22 +290,14 @@ func (b *benchlist) RegisterFailure(nodeID ids.NodeID) { // Assumes [b.lock] is held // Assumes [nodeID] is not already benched func (b *benchlist) bench(nodeID ids.NodeID) { - benchedStake, err := b.vdrs.SubsetWeight(b.benchlistSet) - if err != nil { - // This should never happen - b.log.Error("couldn't get benched stake, resetting benchlist", - zap.Error(err), - ) - return - } - - validatorStake, isVdr := b.vdrs.GetWeight(nodeID) - if !isVdr { + validatorStake := b.vdrs.GetWeight(nodeID) + if validatorStake == 0 { // We might want to bench a non-validator because they don't respond to // my Get requests, but we choose to only bench validators. return } + benchedStake := b.vdrs.SubsetWeight(b.benchlistSet) newBenchedStake, err := safemath.Add64(benchedStake, validatorStake) if err != nil { // This should never happen diff --git a/avalanchego/snow/networking/benchlist/benchlist_test.go b/avalanchego/snow/networking/benchlist/benchlist_test.go index 60e82b44..f3f36d73 100644 --- a/avalanchego/snow/networking/benchlist/benchlist_test.go +++ b/avalanchego/snow/networking/benchlist/benchlist_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package benchlist @@ -22,19 +22,19 @@ var minimumFailingDuration = 5 * time.Minute // Test that validators are properly added to the bench func TestBenchlistAdd(t *testing.T) { vdrs := validators.NewSet() - vdr0 := validators.GenerateRandomValidator(50) - vdr1 := validators.GenerateRandomValidator(50) - vdr2 := validators.GenerateRandomValidator(50) - vdr3 := validators.GenerateRandomValidator(50) - vdr4 := validators.GenerateRandomValidator(50) + vdrID0 := ids.GenerateTestNodeID() + vdrID1 := ids.GenerateTestNodeID() + vdrID2 := ids.GenerateTestNodeID() + vdrID3 := ids.GenerateTestNodeID() + vdrID4 := ids.GenerateTestNodeID() errs := wrappers.Errs{} errs.Add( - vdrs.AddWeight(vdr0.ID(), vdr0.Weight()), - vdrs.AddWeight(vdr1.ID(), vdr1.Weight()), - vdrs.AddWeight(vdr2.ID(), vdr2.Weight()), - vdrs.AddWeight(vdr3.ID(), vdr3.Weight()), - vdrs.AddWeight(vdr4.ID(), vdr4.Weight()), + vdrs.Add(vdrID0, nil, ids.Empty, 50), + vdrs.Add(vdrID1, nil, ids.Empty, 50), + vdrs.Add(vdrID2, nil, ids.Empty, 50), + vdrs.Add(vdrID3, nil, ids.Empty, 50), + vdrs.Add(vdrID4, nil, ids.Empty, 50), ) if errs.Errored() { t.Fatal(errs.Err) @@ -67,11 +67,11 @@ func TestBenchlistAdd(t *testing.T) { // Nobody should be benched at the start b.lock.Lock() - require.False(t, b.isBenched(vdr0.ID())) - require.False(t, b.isBenched(vdr1.ID())) - require.False(t, b.isBenched(vdr2.ID())) - require.False(t, b.isBenched(vdr3.ID())) - require.False(t, b.isBenched(vdr4.ID())) + require.False(t, b.isBenched(vdrID0)) + require.False(t, b.isBenched(vdrID1)) + require.False(t, b.isBenched(vdrID2)) + require.False(t, b.isBenched(vdrID3)) + require.False(t, b.isBenched(vdrID4)) require.Len(t, b.failureStreaks, 0) require.Equal(t, b.benchedQueue.Len(), 0) require.Equal(t, b.benchlistSet.Len(), 0) @@ -79,25 +79,25 @@ func TestBenchlistAdd(t *testing.T) { // Register [threshold - 1] failures in a row for vdr0 for i := 0; i < threshold-1; i++ { - b.RegisterFailure(vdr0.ID()) + b.RegisterFailure(vdrID0) } // Still shouldn't be benched due to not enough consecutive failure - require.False(t, b.isBenched(vdr0.ID())) + require.False(t, b.isBenched(vdrID0)) require.Equal(t, b.benchedQueue.Len(), 0) require.Equal(t, b.benchlistSet.Len(), 0) require.Len(t, b.failureStreaks, 1) - fs := b.failureStreaks[vdr0.ID()] + fs := b.failureStreaks[vdrID0] require.Equal(t, threshold-1, fs.consecutive) require.True(t, fs.firstFailure.Equal(now)) // Register another failure - b.RegisterFailure(vdr0.ID()) + b.RegisterFailure(vdrID0) // Still shouldn't be benched because not enough time (any in this case) // has passed since the first failure b.lock.Lock() - require.False(t, b.isBenched(vdr0.ID())) + require.False(t, b.isBenched(vdrID0)) require.Equal(t, b.benchedQueue.Len(), 0) require.Equal(t, b.benchlistSet.Len(), 0) b.lock.Unlock() @@ -114,16 +114,16 @@ func TestBenchlistAdd(t *testing.T) { b.lock.Unlock() // Register another failure - b.RegisterFailure(vdr0.ID()) + b.RegisterFailure(vdrID0) // Now this validator should be benched b.lock.Lock() - require.True(t, b.isBenched(vdr0.ID())) + require.True(t, b.isBenched(vdrID0)) require.Equal(t, b.benchedQueue.Len(), 1) require.Equal(t, b.benchlistSet.Len(), 1) next := b.benchedQueue[0] - require.Equal(t, vdr0.ID(), next.nodeID) + require.Equal(t, vdrID0, next.nodeID) require.True(t, !next.benchedUntil.After(now.Add(duration))) require.True(t, !next.benchedUntil.Before(now.Add(duration/2))) require.Len(t, b.failureStreaks, 0) @@ -133,24 +133,24 @@ func TestBenchlistAdd(t *testing.T) { // Give another validator [threshold-1] failures for i := 0; i < threshold-1; i++ { - b.RegisterFailure(vdr1.ID()) + b.RegisterFailure(vdrID1) } // Register another failure - b.RegisterResponse(vdr1.ID()) + b.RegisterResponse(vdrID1) // vdr1 shouldn't be benched // The response should have cleared its consecutive failures b.lock.Lock() - require.True(t, b.isBenched(vdr0.ID())) - require.False(t, b.isBenched(vdr1.ID())) + require.True(t, b.isBenched(vdrID0)) + require.False(t, b.isBenched(vdrID1)) require.Equal(t, b.benchedQueue.Len(), 1) require.Equal(t, b.benchlistSet.Len(), 1) require.Len(t, b.failureStreaks, 0) b.lock.Unlock() // Register another failure for vdr0, who is benched - b.RegisterFailure(vdr0.ID()) + b.RegisterFailure(vdrID0) // A failure for an already benched validator should not count against it b.lock.Lock() @@ -161,20 +161,20 @@ func TestBenchlistAdd(t *testing.T) { // Test that the benchlist won't bench more than the maximum portion of stake func TestBenchlistMaxStake(t *testing.T) { vdrs := validators.NewSet() - vdr0 := validators.GenerateRandomValidator(1000) - vdr1 := validators.GenerateRandomValidator(1000) - vdr2 := validators.GenerateRandomValidator(1000) - vdr3 := validators.GenerateRandomValidator(2000) - vdr4 := validators.GenerateRandomValidator(100) - // Total weight is 5100 + vdrID0 := ids.GenerateTestNodeID() + vdrID1 := ids.GenerateTestNodeID() + vdrID2 := ids.GenerateTestNodeID() + vdrID3 := ids.GenerateTestNodeID() + vdrID4 := ids.GenerateTestNodeID() + // Total weight is 5100 errs := wrappers.Errs{} errs.Add( - vdrs.AddWeight(vdr0.ID(), vdr0.Weight()), - vdrs.AddWeight(vdr1.ID(), vdr1.Weight()), - vdrs.AddWeight(vdr2.ID(), vdr2.Weight()), - vdrs.AddWeight(vdr3.ID(), vdr3.Weight()), - vdrs.AddWeight(vdr4.ID(), vdr4.Weight()), + vdrs.Add(vdrID0, nil, ids.Empty, 1000), + vdrs.Add(vdrID1, nil, ids.Empty, 1000), + vdrs.Add(vdrID2, nil, ids.Empty, 1000), + vdrs.Add(vdrID3, nil, ids.Empty, 2000), + vdrs.Add(vdrID4, nil, ids.Empty, 100), ) if errs.Errored() { t.Fatal(errs.Err) @@ -204,9 +204,9 @@ func TestBenchlistMaxStake(t *testing.T) { b.clock.Set(now) // Register [threshold-1] failures for 3 validators - for _, vdr := range []validators.Validator{vdr0, vdr1, vdr2} { + for _, vdrID := range []ids.NodeID{vdrID0, vdrID1, vdrID2} { for i := 0; i < threshold-1; i++ { - b.RegisterFailure(vdr.ID()) + b.RegisterFailure(vdrID) } } @@ -217,28 +217,28 @@ func TestBenchlistMaxStake(t *testing.T) { b.lock.Unlock() // Register another failure for all three - for _, vdr := range []validators.Validator{vdr0, vdr1, vdr2} { - b.RegisterFailure(vdr.ID()) + for _, vdrID := range []ids.NodeID{vdrID0, vdrID1, vdrID2} { + b.RegisterFailure(vdrID) } // Only vdr0 and vdr1 should be benched (total weight 2000) // Benching vdr2 (weight 1000) would cause the amount benched // to exceed the maximum b.lock.Lock() - require.True(t, b.isBenched(vdr0.ID())) - require.True(t, b.isBenched(vdr1.ID())) - require.False(t, b.isBenched(vdr2.ID())) + require.True(t, b.isBenched(vdrID0)) + require.True(t, b.isBenched(vdrID1)) + require.False(t, b.isBenched(vdrID2)) require.Equal(t, b.benchedQueue.Len(), 2) require.Equal(t, b.benchlistSet.Len(), 2) require.Len(t, b.failureStreaks, 1) - fs := b.failureStreaks[vdr2.ID()] + fs := b.failureStreaks[vdrID2] fs.consecutive = threshold fs.firstFailure = now b.lock.Unlock() // Register threshold - 1 failures for vdr4 for i := 0; i < threshold-1; i++ { - b.RegisterFailure(vdr4.ID()) + b.RegisterFailure(vdrID4) } // Advance the time past min failing duration @@ -248,40 +248,40 @@ func TestBenchlistMaxStake(t *testing.T) { b.lock.Unlock() // Register another failure for vdr4 - b.RegisterFailure(vdr4.ID()) + b.RegisterFailure(vdrID4) // vdr4 should be benched now b.lock.Lock() - require.True(t, b.isBenched(vdr0.ID())) - require.True(t, b.isBenched(vdr1.ID())) - require.True(t, b.isBenched(vdr4.ID())) + require.True(t, b.isBenched(vdrID0)) + require.True(t, b.isBenched(vdrID1)) + require.True(t, b.isBenched(vdrID4)) require.Equal(t, 3, b.benchedQueue.Len()) require.Equal(t, 3, b.benchlistSet.Len()) - require.Contains(t, b.benchlistSet, vdr0.ID()) - require.Contains(t, b.benchlistSet, vdr1.ID()) - require.Contains(t, b.benchlistSet, vdr4.ID()) + require.Contains(t, b.benchlistSet, vdrID0) + require.Contains(t, b.benchlistSet, vdrID1) + require.Contains(t, b.benchlistSet, vdrID4) require.Len(t, b.failureStreaks, 1) // for vdr2 b.lock.Unlock() // More failures for vdr2 shouldn't add it to the bench // because the max bench amount would be exceeded for i := 0; i < threshold-1; i++ { - b.RegisterFailure(vdr2.ID()) + b.RegisterFailure(vdrID2) } b.lock.Lock() - require.True(t, b.isBenched(vdr0.ID())) - require.True(t, b.isBenched(vdr1.ID())) - require.True(t, b.isBenched(vdr4.ID())) - require.False(t, b.isBenched(vdr2.ID())) + require.True(t, b.isBenched(vdrID0)) + require.True(t, b.isBenched(vdrID1)) + require.True(t, b.isBenched(vdrID4)) + require.False(t, b.isBenched(vdrID2)) require.Equal(t, 3, b.benchedQueue.Len()) require.Equal(t, 3, b.benchlistSet.Len()) require.Len(t, b.failureStreaks, 1) - require.Contains(t, b.failureStreaks, vdr2.ID()) + require.Contains(t, b.failureStreaks, vdrID2) // Ensure the benched queue root has the min end time minEndTime := b.benchedQueue[0].benchedUntil - benchedIDs := []ids.NodeID{vdr0.ID(), vdr1.ID(), vdr4.ID()} + benchedIDs := []ids.NodeID{vdrID0, vdrID1, vdrID4} for _, benchedVdr := range b.benchedQueue { require.Contains(t, benchedIDs, benchedVdr.nodeID) require.True(t, !benchedVdr.benchedUntil.Before(minEndTime)) @@ -293,20 +293,20 @@ func TestBenchlistMaxStake(t *testing.T) { // Test validators are removed from the bench correctly func TestBenchlistRemove(t *testing.T) { vdrs := validators.NewSet() - vdr0 := validators.GenerateRandomValidator(1000) - vdr1 := validators.GenerateRandomValidator(1000) - vdr2 := validators.GenerateRandomValidator(1000) - vdr3 := validators.GenerateRandomValidator(1000) - vdr4 := validators.GenerateRandomValidator(1000) - // Total weight is 5100 + vdrID0 := ids.GenerateTestNodeID() + vdrID1 := ids.GenerateTestNodeID() + vdrID2 := ids.GenerateTestNodeID() + vdrID3 := ids.GenerateTestNodeID() + vdrID4 := ids.GenerateTestNodeID() + // Total weight is 5000 errs := wrappers.Errs{} errs.Add( - vdrs.AddWeight(vdr0.ID(), vdr0.Weight()), - vdrs.AddWeight(vdr1.ID(), vdr1.Weight()), - vdrs.AddWeight(vdr2.ID(), vdr2.Weight()), - vdrs.AddWeight(vdr3.ID(), vdr3.Weight()), - vdrs.AddWeight(vdr4.ID(), vdr4.Weight()), + vdrs.Add(vdrID0, nil, ids.Empty, 1000), + vdrs.Add(vdrID1, nil, ids.Empty, 1000), + vdrs.Add(vdrID2, nil, ids.Empty, 1000), + vdrs.Add(vdrID3, nil, ids.Empty, 1000), + vdrs.Add(vdrID4, nil, ids.Empty, 1000), ) if errs.Errored() { t.Fatal(errs.Err) @@ -346,9 +346,9 @@ func TestBenchlistRemove(t *testing.T) { b.lock.Unlock() // Register [threshold-1] failures for 3 validators - for _, vdr := range []validators.Validator{vdr0, vdr1, vdr2} { + for _, vdrID := range []ids.NodeID{vdrID0, vdrID1, vdrID2} { for i := 0; i < threshold-1; i++ { - b.RegisterFailure(vdr.ID()) + b.RegisterFailure(vdrID) } } @@ -358,22 +358,22 @@ func TestBenchlistRemove(t *testing.T) { b.lock.Lock() b.clock.Set(now) b.lock.Unlock() - for _, vdr := range []validators.Validator{vdr0, vdr1, vdr2} { - b.RegisterFailure(vdr.ID()) + for _, vdrID := range []ids.NodeID{vdrID0, vdrID1, vdrID2} { + b.RegisterFailure(vdrID) } // All 3 should be benched b.lock.Lock() - require.True(t, b.isBenched(vdr0.ID())) - require.True(t, b.isBenched(vdr1.ID())) - require.True(t, b.isBenched(vdr2.ID())) + require.True(t, b.isBenched(vdrID0)) + require.True(t, b.isBenched(vdrID1)) + require.True(t, b.isBenched(vdrID2)) require.Equal(t, 3, b.benchedQueue.Len()) require.Equal(t, 3, b.benchlistSet.Len()) require.Len(t, b.failureStreaks, 0) // Ensure the benched queue root has the min end time minEndTime := b.benchedQueue[0].benchedUntil - benchedIDs := []ids.NodeID{vdr0.ID(), vdr1.ID(), vdr2.ID()} + benchedIDs := []ids.NodeID{vdrID0, vdrID1, vdrID2} for _, benchedVdr := range b.benchedQueue { require.Contains(t, benchedIDs, benchedVdr.nodeID) require.True(t, !benchedVdr.benchedUntil.Before(minEndTime)) @@ -388,7 +388,7 @@ func TestBenchlistRemove(t *testing.T) { require.Eventually( t, func() bool { - return !b.IsBenched(vdr0.ID()) + return !b.IsBenched(vdrID0) }, duration+time.Second, // extra time.Second as grace period 100*time.Millisecond, @@ -397,7 +397,7 @@ func TestBenchlistRemove(t *testing.T) { require.Eventually( t, func() bool { - return !b.IsBenched(vdr1.ID()) + return !b.IsBenched(vdrID1) }, duration+time.Second, 100*time.Millisecond, @@ -406,7 +406,7 @@ func TestBenchlistRemove(t *testing.T) { require.Eventually( t, func() bool { - return !b.IsBenched(vdr2.ID()) + return !b.IsBenched(vdrID2) }, duration+time.Second, 100*time.Millisecond, diff --git a/avalanchego/snow/networking/benchlist/manager.go b/avalanchego/snow/networking/benchlist/manager.go index 614c7794..fb0daf33 100644 --- a/avalanchego/snow/networking/benchlist/manager.go +++ b/avalanchego/snow/networking/benchlist/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package benchlist @@ -17,7 +17,7 @@ import ( var ( errUnknownValidators = errors.New("unknown validator set for provided chain") - _ Manager = &manager{} + _ Manager = (*manager)(nil) ) // Manager provides an interface for a benchlist to register whether @@ -120,10 +120,10 @@ func (m *manager) RegisterChain(ctx *snow.ConsensusContext) error { ok bool ) if m.config.StakingEnabled { - vdrs, ok = m.config.Validators.GetValidators(ctx.SubnetID) + vdrs, ok = m.config.Validators.Get(ctx.SubnetID) } else { // If staking is disabled, everyone validates every chain - vdrs, ok = m.config.Validators.GetValidators(constants.PrimaryNetworkID) + vdrs, ok = m.config.Validators.Get(constants.PrimaryNetworkID) } if !ok { return errUnknownValidators @@ -173,10 +173,22 @@ func (m *manager) RegisterFailure(chainID ids.ID, nodeID ids.NodeID) { type noBenchlist struct{} // NewNoBenchlist returns an empty benchlist that will never stop any queries -func NewNoBenchlist() Manager { return &noBenchlist{} } +func NewNoBenchlist() Manager { + return &noBenchlist{} +} + +func (noBenchlist) RegisterChain(*snow.ConsensusContext) error { + return nil +} + +func (noBenchlist) RegisterResponse(ids.ID, ids.NodeID) {} -func (noBenchlist) RegisterChain(*snow.ConsensusContext) error { return nil } -func (noBenchlist) RegisterResponse(ids.ID, ids.NodeID) {} -func (noBenchlist) RegisterFailure(ids.ID, ids.NodeID) {} -func (noBenchlist) IsBenched(ids.NodeID, ids.ID) bool { return false } -func (noBenchlist) GetBenched(ids.NodeID) []ids.ID { return []ids.ID{} } +func (noBenchlist) RegisterFailure(ids.ID, ids.NodeID) {} + +func (noBenchlist) IsBenched(ids.NodeID, ids.ID) bool { + return false +} + +func (noBenchlist) GetBenched(ids.NodeID) []ids.ID { + return []ids.ID{} +} diff --git a/avalanchego/snow/networking/benchlist/metrics.go b/avalanchego/snow/networking/benchlist/metrics.go index 7b132a08..12da52d3 100644 --- a/avalanchego/snow/networking/benchlist/metrics.go +++ b/avalanchego/snow/networking/benchlist/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package benchlist diff --git a/avalanchego/snow/networking/benchlist/test_benchable.go b/avalanchego/snow/networking/benchlist/test_benchable.go index 1e525dd8..1655d808 100644 --- a/avalanchego/snow/networking/benchlist/test_benchable.go +++ b/avalanchego/snow/networking/benchlist/test_benchable.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package benchlist diff --git a/avalanchego/snow/networking/handler/engine.go b/avalanchego/snow/networking/handler/engine.go new file mode 100644 index 00000000..94ae54ff --- /dev/null +++ b/avalanchego/snow/networking/handler/engine.go @@ -0,0 +1,55 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package handler + +import ( + "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/engine/common" +) + +// Engine is a wrapper around a consensus engine's components. +type Engine struct { + StateSyncer common.StateSyncer + Bootstrapper common.BootstrapableEngine + Consensus common.Engine +} + +// Get returns the engine corresponding to the provided state, +// and whether its corresponding engine is initialized (not nil). +func (e *Engine) Get(state snow.State) (common.Engine, bool) { + if e == nil { + return nil, false + } + switch state { + case snow.StateSyncing: + return e.StateSyncer, e.StateSyncer != nil + case snow.Bootstrapping: + return e.Bootstrapper, e.Bootstrapper != nil + case snow.NormalOp: + return e.Consensus, e.Consensus != nil + default: + return nil, false + } +} + +// EngineManager resolves the engine that should be used given the current +// execution context of the chain. +type EngineManager struct { + Avalanche *Engine + Snowman *Engine +} + +// Get returns the engine corresponding to the provided type if possible. +// If an engine type is not specified, the initial engine type is returned. +func (e *EngineManager) Get(engineType p2p.EngineType) *Engine { + switch engineType { + case p2p.EngineType_ENGINE_TYPE_AVALANCHE: + return e.Avalanche + case p2p.EngineType_ENGINE_TYPE_SNOWMAN: + return e.Snowman + default: + return nil + } +} diff --git a/avalanchego/snow/networking/handler/engine_test.go b/avalanchego/snow/networking/handler/engine_test.go new file mode 100644 index 00000000..9eb6752c --- /dev/null +++ b/avalanchego/snow/networking/handler/engine_test.go @@ -0,0 +1,71 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package handler + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/proto/pb/p2p" +) + +func TestEngineManager_Get(t *testing.T) { + type args struct { + engineType p2p.EngineType + } + + avalanche := &Engine{} + snowman := &Engine{} + + type expected struct { + engine *Engine + } + + tests := []struct { + name string + args args + expected expected + }{ + { + name: "request unspecified engine", + args: args{ + engineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + }, + expected: expected{ + engine: nil, + }, + }, + { + name: "request avalanche engine", + args: args{ + engineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + }, + expected: expected{ + engine: avalanche, + }, + }, + { + name: "request snowman engine", + args: args{ + engineType: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + }, + expected: expected{ + engine: snowman, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + r := require.New(t) + + e := EngineManager{ + Avalanche: avalanche, + Snowman: snowman, + } + + r.Equal(test.expected.engine, e.Get(test.args.engineType)) + }) + } +} diff --git a/avalanchego/snow/networking/handler/handler.go b/avalanchego/snow/networking/handler/handler.go index 9b75f040..81b8a9c5 100644 --- a/avalanchego/snow/networking/handler/handler.go +++ b/avalanchego/snow/networking/handler/handler.go @@ -1,56 +1,69 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler import ( + "context" + "errors" "fmt" "sync" "time" "github.com/prometheus/client_golang/prometheus" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap" "github.com/ava-labs/avalanchego/api/health" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/networking/worker" "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/subnets" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/version" ) const ( - threadPoolSize = 2 numDispatchersToClose = 3 + // If a consensus message takes longer than this to process, the handler + // will log a warning. + syncProcessingTimeWarnLimit = 30 * time.Second ) -var _ Handler = &handler{} +var ( + _ Handler = (*handler)(nil) + + errMissingEngine = errors.New("missing engine") + errNoStartingGear = errors.New("failed to select starting gear") +) type Handler interface { common.Timer health.Checker Context() *snow.ConsensusContext - IsValidator(nodeID ids.NodeID) bool + // ShouldHandle returns true if the node with the given ID is allowed to send + // messages to this chain. If the node is not allowed to send messages to + // this chain, the message should be dropped. + ShouldHandle(nodeID ids.NodeID) bool - SetStateSyncer(engine common.StateSyncer) - StateSyncer() common.StateSyncer - SetBootstrapper(engine common.BootstrapableEngine) - Bootstrapper() common.BootstrapableEngine - SetConsensus(engine common.Engine) - Consensus() common.Engine + SetEngineManager(engineManager *EngineManager) + GetEngineManager() *EngineManager SetOnStopped(onStopped func()) - Start(recoverPanic bool) - Push(msg message.InboundMessage) - Stop() - StopWithError(err error) + Start(ctx context.Context, recoverPanic bool) + Push(ctx context.Context, msg Message) + Len() int + Stop(ctx context.Context) + StopWithError(ctx context.Context, err error) Stopped() chan struct{} } @@ -63,7 +76,6 @@ type handler struct { clock mockable.Clock ctx *snow.ConsensusContext - mc message.InternalMsgBuilder // The validator set that validates this chain validators validators.Set // Receives messages from the VM @@ -71,9 +83,8 @@ type handler struct { preemptTimeouts chan struct{} gossipFrequency time.Duration - stateSyncer common.StateSyncer - bootstrapper common.BootstrapableEngine - engine common.Engine + engineManager *EngineManager + // onStopped is called in a goroutine when this handler finishes shutting // down. If it is nil then it is skipped. onStopped func() @@ -96,31 +107,37 @@ type handler struct { numDispatchersClosed int // Closed when this handler and [engine] are done shutting down closed chan struct{} + + subnetConnector validators.SubnetConnector + + subnetAllower subnets.Allower } // Initialize this consensus handler // [engine] must be initialized before initializing this handler func New( - mc message.InternalMsgBuilder, ctx *snow.ConsensusContext, validators validators.Set, msgFromVMChan <-chan common.Message, - preemptTimeouts chan struct{}, gossipFrequency time.Duration, + threadPoolSize int, resourceTracker tracker.ResourceTracker, + subnetConnector validators.SubnetConnector, + subnet subnets.Subnet, ) (Handler, error) { h := &handler{ ctx: ctx, - mc: mc, validators: validators, msgFromVMChan: msgFromVMChan, - preemptTimeouts: preemptTimeouts, + preemptTimeouts: subnet.OnBootstrapCompleted(), gossipFrequency: gossipFrequency, asyncMessagePool: worker.NewPool(threadPoolSize), timeouts: make(chan struct{}, 1), closingChan: make(chan struct{}), closed: make(chan struct{}), resourceTracker: resourceTracker, + subnetConnector: subnetConnector, + subnetAllower: subnet, } var err error @@ -141,103 +158,129 @@ func New( return h, nil } -func (h *handler) Context() *snow.ConsensusContext { return h.ctx } - -func (h *handler) IsValidator(nodeID ids.NodeID) bool { - return !h.ctx.IsValidatorOnly() || - nodeID == h.ctx.NodeID || - h.validators.Contains(nodeID) +func (h *handler) Context() *snow.ConsensusContext { + return h.ctx } -func (h *handler) SetStateSyncer(engine common.StateSyncer) { h.stateSyncer = engine } -func (h *handler) StateSyncer() common.StateSyncer { return h.stateSyncer } +func (h *handler) ShouldHandle(nodeID ids.NodeID) bool { + return h.subnetAllower.IsAllowed(nodeID, h.validators.Contains(nodeID)) +} -func (h *handler) SetBootstrapper(engine common.BootstrapableEngine) { h.bootstrapper = engine } -func (h *handler) Bootstrapper() common.BootstrapableEngine { return h.bootstrapper } +func (h *handler) SetEngineManager(engineManager *EngineManager) { + h.engineManager = engineManager +} -func (h *handler) SetConsensus(engine common.Engine) { h.engine = engine } -func (h *handler) Consensus() common.Engine { return h.engine } +func (h *handler) GetEngineManager() *EngineManager { + return h.engineManager +} -func (h *handler) SetOnStopped(onStopped func()) { h.onStopped = onStopped } +func (h *handler) SetOnStopped(onStopped func()) { + h.onStopped = onStopped +} -func (h *handler) selectStartingGear() (common.Engine, error) { - if h.stateSyncer == nil { - return h.bootstrapper, nil +func (h *handler) selectStartingGear(ctx context.Context) (common.Engine, error) { + state := h.ctx.State.Get() + engines := h.engineManager.Get(state.Type) + if engines == nil { + return nil, errNoStartingGear + } + if engines.StateSyncer == nil { + return engines.Bootstrapper, nil } - stateSyncEnabled, err := h.stateSyncer.IsEnabled() + stateSyncEnabled, err := engines.StateSyncer.IsEnabled(ctx) if err != nil { return nil, err } if !stateSyncEnabled { - return h.bootstrapper, nil + return engines.Bootstrapper, nil } - // drop bootstrap state from previous runs - // before starting state sync - return h.stateSyncer, h.bootstrapper.Clear() + // drop bootstrap state from previous runs before starting state sync + return engines.StateSyncer, engines.Bootstrapper.Clear() } -func (h *handler) Start(recoverPanic bool) { +func (h *handler) Start(ctx context.Context, recoverPanic bool) { h.ctx.Lock.Lock() defer h.ctx.Lock.Unlock() - gear, err := h.selectStartingGear() + gear, err := h.selectStartingGear(ctx) if err != nil { h.ctx.Log.Error("chain failed to select starting gear", zap.Error(err), ) - h.shutdown() + h.shutdown(ctx) return } - if err := gear.Start(0); err != nil { + if err := gear.Start(ctx, 0); err != nil { h.ctx.Log.Error("chain failed to start", zap.Error(err), ) - h.shutdown() + h.shutdown(ctx) return } + detachedCtx := utils.Detach(ctx) + dispatchSync := func() { + h.dispatchSync(detachedCtx) + } + dispatchAsync := func() { + h.dispatchAsync(detachedCtx) + } + dispatchChans := func() { + h.dispatchChans(detachedCtx) + } if recoverPanic { - go h.ctx.Log.RecoverAndExit(h.dispatchSync, func() { + go h.ctx.Log.RecoverAndExit(dispatchSync, func() { h.ctx.Log.Error("chain was shutdown due to a panic in the sync dispatcher") }) - go h.ctx.Log.RecoverAndExit(h.dispatchAsync, func() { + go h.ctx.Log.RecoverAndExit(dispatchAsync, func() { h.ctx.Log.Error("chain was shutdown due to a panic in the async dispatcher") }) - go h.ctx.Log.RecoverAndExit(h.dispatchChans, func() { + go h.ctx.Log.RecoverAndExit(dispatchChans, func() { h.ctx.Log.Error("chain was shutdown due to a panic in the chan dispatcher") }) } else { - go h.ctx.Log.RecoverAndPanic(h.dispatchSync) - go h.ctx.Log.RecoverAndPanic(h.dispatchAsync) - go h.ctx.Log.RecoverAndPanic(h.dispatchChans) + go h.ctx.Log.RecoverAndPanic(dispatchSync) + go h.ctx.Log.RecoverAndPanic(dispatchAsync) + go h.ctx.Log.RecoverAndPanic(dispatchChans) } } -func (h *handler) HealthCheck() (interface{}, error) { +func (h *handler) HealthCheck(ctx context.Context) (interface{}, error) { h.ctx.Lock.Lock() defer h.ctx.Lock.Unlock() - engine, err := h.getEngine() - if err != nil { - return nil, err + state := h.ctx.State.Get() + engine, ok := h.engineManager.Get(state.Type).Get(state.State) + if !ok { + return nil, fmt.Errorf( + "%w %s running %s", + errMissingEngine, + state.State, + state.Type, + ) } - return engine.HealthCheck() + return engine.HealthCheck(ctx) } // Push the message onto the handler's queue -func (h *handler) Push(msg message.InboundMessage) { +func (h *handler) Push(ctx context.Context, msg Message) { switch msg.Op() { - case message.AppRequest, message.AppGossip, message.AppRequestFailed, message.AppResponse: - h.asyncMessageQueue.Push(msg) + case message.AppRequestOp, message.AppRequestFailedOp, message.AppResponseOp, message.AppGossipOp, + message.CrossChainAppRequestOp, message.CrossChainAppRequestFailedOp, message.CrossChainAppResponseOp: + h.asyncMessageQueue.Push(ctx, msg) default: - h.syncMessageQueue.Push(msg) + h.syncMessageQueue.Push(ctx, msg) } } +func (h *handler) Len() int { + return h.syncMessageQueue.Len() + h.asyncMessageQueue.Len() +} + func (h *handler) RegisterTimeout(d time.Duration) { go func() { timer := time.NewTimer(d) @@ -258,7 +301,8 @@ func (h *handler) RegisterTimeout(d time.Duration) { }() } -func (h *handler) Stop() { +// Note: It is possible for Stop to be called before/concurrently with Start. +func (h *handler) Stop(ctx context.Context) { h.closeOnce.Do(func() { // Must hold the locks here to ensure there's no race condition in where // we check the value of [h.closing] after the call to [Signal]. @@ -275,35 +319,45 @@ func (h *handler) Stop() { // [h.ctx.Lock] until the engine finished executing state transitions, // which may take a long time. As a result, the router would time out on // shutting down this chain. - h.bootstrapper.Halt() + state := h.ctx.State.Get() + bootstrapper, ok := h.engineManager.Get(state.Type).Get(snow.Bootstrapping) + if !ok { + h.ctx.Log.Error("bootstrapping engine doesn't exists", + zap.Stringer("type", state.Type), + ) + return + } + bootstrapper.Halt(ctx) }) } -func (h *handler) StopWithError(err error) { +func (h *handler) StopWithError(ctx context.Context, err error) { h.ctx.Log.Fatal("shutting down chain", zap.String("reason", "received an unexpected error"), zap.Error(err), ) - h.Stop() + h.Stop(ctx) } -func (h *handler) Stopped() chan struct{} { return h.closed } +func (h *handler) Stopped() chan struct{} { + return h.closed +} -func (h *handler) dispatchSync() { - defer h.closeDispatcher() +func (h *handler) dispatchSync(ctx context.Context) { + defer h.closeDispatcher(ctx) // Handle sync messages from the router for { // Get the next message we should process. If the handler is shutting // down, we may fail to pop a message. - msg, ok := h.popUnexpiredMsg(h.syncMessageQueue, h.metrics.expired) + ctx, msg, ok := h.popUnexpiredMsg(h.syncMessageQueue, h.metrics.expired) if !ok { return } // If there is an error handling the message, shut down the chain - if err := h.handleSyncMsg(msg); err != nil { - h.StopWithError(fmt.Errorf( + if err := h.handleSyncMsg(ctx, msg); err != nil { + h.StopWithError(ctx, fmt.Errorf( "%w while processing sync message: %s", err, msg, @@ -313,30 +367,30 @@ func (h *handler) dispatchSync() { } } -func (h *handler) dispatchAsync() { +func (h *handler) dispatchAsync(ctx context.Context) { defer func() { h.asyncMessagePool.Shutdown() - h.closeDispatcher() + h.closeDispatcher(ctx) }() // Handle async messages from the router for { // Get the next message we should process. If the handler is shutting // down, we may fail to pop a message. - msg, ok := h.popUnexpiredMsg(h.asyncMessageQueue, h.metrics.asyncExpired) + ctx, msg, ok := h.popUnexpiredMsg(h.asyncMessageQueue, h.metrics.asyncExpired) if !ok { return } - h.handleAsyncMsg(msg) + h.handleAsyncMsg(ctx, msg) } } -func (h *handler) dispatchChans() { +func (h *handler) dispatchChans(ctx context.Context) { gossiper := time.NewTicker(h.gossipFrequency) defer func() { gossiper.Stop() - h.closeDispatcher() + h.closeDispatcher(ctx) }() // Handle messages generated by the handler and the VM @@ -347,17 +401,17 @@ func (h *handler) dispatchChans() { return case vmMSG := <-h.msgFromVMChan: - msg = h.mc.InternalVMMessage(h.ctx.NodeID, uint32(vmMSG)) + msg = message.InternalVMMessage(h.ctx.NodeID, uint32(vmMSG)) case <-gossiper.C: - msg = h.mc.InternalGossipRequest(h.ctx.NodeID) + msg = message.InternalGossipRequest(h.ctx.NodeID) case <-h.timeouts: - msg = h.mc.InternalTimeout(h.ctx.NodeID) + msg = message.InternalTimeout(h.ctx.NodeID) } if err := h.handleChanMsg(msg); err != nil { - h.StopWithError(fmt.Errorf( + h.StopWithError(ctx, fmt.Errorf( "%w while processing async message: %s", err, msg, @@ -368,463 +422,311 @@ func (h *handler) dispatchChans() { } // Any returned error is treated as fatal -func (h *handler) handleSyncMsg(msg message.InboundMessage) error { - h.ctx.Log.Debug("forwarding sync message to consensus", - zap.Stringer("messageString", msg), - ) - +func (h *handler) handleSyncMsg(ctx context.Context, msg Message) error { var ( nodeID = msg.NodeID() op = msg.Op() + body = msg.Message() startTime = h.clock.Time() + // Check if the chain is in normal operation at the start of message + // execution (may change during execution) + isNormalOp = h.ctx.State.Get().State == snow.NormalOp + ) + h.ctx.Log.Debug("forwarding sync message to consensus", + zap.Stringer("nodeID", nodeID), + zap.Stringer("messageOp", op), + ) + h.ctx.Log.Verbo("forwarding sync message to consensus", + zap.Stringer("nodeID", nodeID), + zap.Stringer("messageOp", op), + zap.Any("message", body), ) h.resourceTracker.StartProcessing(nodeID, startTime) h.ctx.Lock.Lock() + lockAcquiredTime := h.clock.Time() defer func() { h.ctx.Lock.Unlock() var ( - endTime = h.clock.Time() - histogram = h.metrics.messages[op] + endTime = h.clock.Time() + messageHistograms = h.metrics.messages[op] + msgHandlingTime = lockAcquiredTime.Sub(startTime) + processingTime = endTime.Sub(startTime) ) h.resourceTracker.StopProcessing(nodeID, endTime) - histogram.Observe(float64(endTime.Sub(startTime))) + messageHistograms.msgHandlingTime.Observe(float64(msgHandlingTime)) + messageHistograms.processingTime.Observe(float64(processingTime)) msg.OnFinishedHandling() h.ctx.Log.Debug("finished handling sync message", zap.Stringer("messageOp", op), ) + if processingTime > syncProcessingTimeWarnLimit && isNormalOp { + h.ctx.Log.Warn("handling sync message took longer than expected", + zap.Duration("processingTime", processingTime), + zap.Duration("msgHandlingTime", msgHandlingTime), + zap.Stringer("nodeID", nodeID), + zap.Stringer("messageOp", op), + zap.Any("message", body), + ) + } }() - engine, err := h.getEngine() - if err != nil { - return err + // We will attempt to pass the message to the requested type for the state + // we are currently in. + currentState := h.ctx.State.Get() + if msg.EngineType == p2p.EngineType_ENGINE_TYPE_SNOWMAN && + currentState.Type == p2p.EngineType_ENGINE_TYPE_AVALANCHE { + // The peer is requesting an engine type that hasn't been initialized + // yet. This means we know that this isn't a response, so we can safely + // drop the message. + h.ctx.Log.Debug("dropping sync message", + zap.String("reason", "uninitialized engine type"), + zap.Stringer("messageOp", op), + zap.Stringer("currentEngineType", currentState.Type), + zap.Stringer("requestedEngineType", msg.EngineType), + ) + return nil + } + + var engineType p2p.EngineType + switch msg.EngineType { + case p2p.EngineType_ENGINE_TYPE_AVALANCHE, p2p.EngineType_ENGINE_TYPE_SNOWMAN: + // The peer is requesting an engine type that has been initialized, so + // we should attempt to honor the request. + engineType = msg.EngineType + default: + // Note: [msg.EngineType] may have been provided by the peer as an + // invalid option. I.E. not one of AVALANCHE, SNOWMAN, or UNSPECIFIED. + // In this case, we treat the value the same way as UNSPECIFIED. + // + // If the peer didn't request a specific engine type, we default to the + // current engine. + engineType = currentState.Type + } + + engine, ok := h.engineManager.Get(engineType).Get(currentState.State) + if !ok { + // This should only happen if the peer is not following the protocol. + // This can happen if the chain only has a Snowman engine and the peer + // requested an Avalanche engine handle the message. + h.ctx.Log.Debug("dropping sync message", + zap.String("reason", "uninitialized engine state"), + zap.Stringer("messageOp", op), + zap.Stringer("currentEngineType", currentState.Type), + zap.Stringer("requestedEngineType", msg.EngineType), + zap.Stringer("engineState", currentState.State), + ) + return nil } - // Invariant: msg.Get(message.RequestID) must never error. The [ChainRouter] - // should have already successfully called this function. // Invariant: Response messages can never be dropped here. This is because // the timeout has already been cleared. This means the engine // should be invoked with a failure message if parsing of the // response fails. - switch op { - case message.GetStateSummaryFrontier: - requestIDIntf, err := msg.Get(message.RequestID) - if err != nil { - return err - } - requestID := requestIDIntf.(uint32) + switch msg := body.(type) { + // State messages should always be sent to the snowman engine + case *p2p.GetStateSummaryFrontier: + return engine.GetStateSummaryFrontier(ctx, nodeID, msg.RequestId) - return engine.GetStateSummaryFrontier(nodeID, requestID) + case *p2p.StateSummaryFrontier: + return engine.StateSummaryFrontier(ctx, nodeID, msg.RequestId, msg.Summary) - case message.StateSummaryFrontier: - requestIDIntf, err := msg.Get(message.RequestID) - if err != nil { - return err - } - requestID := requestIDIntf.(uint32) + case *message.GetStateSummaryFrontierFailed: + return engine.GetStateSummaryFrontierFailed(ctx, nodeID, msg.RequestID) - summaryIntf, err := msg.Get(message.SummaryBytes) - if err != nil { + case *p2p.GetAcceptedStateSummary: + // TODO: Enforce that the numbers are sorted to make this verification + // more efficient. + if !utils.IsUnique(msg.Heights) { h.ctx.Log.Debug("message with invalid field", zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), - zap.Uint32("requestID", requestID), - zap.Stringer("field", message.SummaryBytes), - zap.Error(err), - ) - return engine.GetStateSummaryFrontierFailed(nodeID, requestID) - } - summary := summaryIntf.([]byte) - - return engine.StateSummaryFrontier(nodeID, requestID, summary) - - case message.GetStateSummaryFrontierFailed: - requestIDIntf, err := msg.Get(message.RequestID) - if err != nil { - return err - } - requestID := requestIDIntf.(uint32) - - return engine.GetStateSummaryFrontierFailed(nodeID, requestID) - - case message.GetAcceptedStateSummary: - requestIDIntf, err := msg.Get(message.RequestID) - if err != nil { - return err - } - requestID := requestIDIntf.(uint32) - - summaryHeights, err := getSummaryHeights(msg) - if err != nil { - h.ctx.Log.Debug("dropping message with invalid field", - zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), - zap.Uint32("requestID", requestID), - zap.Stringer("field", message.SummaryHeights), - zap.Error(err), + zap.Stringer("messageOp", message.GetAcceptedStateSummaryOp), + zap.Uint32("requestID", msg.RequestId), + zap.String("field", "Heights"), ) - return nil + return engine.GetAcceptedStateSummaryFailed(ctx, nodeID, msg.RequestId) } - return engine.GetAcceptedStateSummary(nodeID, requestID, summaryHeights) - - case message.AcceptedStateSummary: - requestIDIntf, err := msg.Get(message.RequestID) - if err != nil { - return err - } - requestID := requestIDIntf.(uint32) + return engine.GetAcceptedStateSummary( + ctx, + nodeID, + msg.RequestId, + msg.Heights, + ) - summaryIDs, err := getIDs(message.SummaryIDs, msg) + case *p2p.AcceptedStateSummary: + summaryIDs, err := getIDs(msg.SummaryIds) if err != nil { h.ctx.Log.Debug("message with invalid field", zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), - zap.Uint32("requestID", requestID), - zap.Stringer("field", message.SummaryIDs), + zap.Stringer("messageOp", message.AcceptedStateSummaryOp), + zap.Uint32("requestID", msg.RequestId), + zap.String("field", "SummaryIDs"), zap.Error(err), ) - return engine.GetAcceptedStateSummaryFailed(nodeID, requestID) - } - - return engine.AcceptedStateSummary(nodeID, requestID, summaryIDs) - - case message.GetAcceptedStateSummaryFailed: - requestIDIntf, err := msg.Get(message.RequestID) - if err != nil { - return err + return engine.GetAcceptedStateSummaryFailed(ctx, nodeID, msg.RequestId) } - requestID := requestIDIntf.(uint32) - return engine.GetAcceptedStateSummaryFailed(nodeID, requestID) + return engine.AcceptedStateSummary(ctx, nodeID, msg.RequestId, summaryIDs) - case message.GetAcceptedFrontier: - requestIDIntf, err := msg.Get(message.RequestID) - if err != nil { - return err - } - requestID := requestIDIntf.(uint32) + case *message.GetAcceptedStateSummaryFailed: + return engine.GetAcceptedStateSummaryFailed(ctx, nodeID, msg.RequestID) - return engine.GetAcceptedFrontier(nodeID, requestID) - - case message.AcceptedFrontier: - requestIDIntf, err := msg.Get(message.RequestID) - if err != nil { - return err - } - requestID := requestIDIntf.(uint32) + // Bootstrapping messages may be forwarded to either avalanche or snowman + // engines, depending on the EngineType field + case *p2p.GetAcceptedFrontier: + return engine.GetAcceptedFrontier(ctx, nodeID, msg.RequestId) - containerIDs, err := getIDs(message.ContainerIDs, msg) + case *p2p.AcceptedFrontier: + containerIDs, err := getIDs(msg.ContainerIds) if err != nil { h.ctx.Log.Debug("message with invalid field", zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), - zap.Uint32("requestID", requestID), - zap.Stringer("field", message.ContainerIDs), + zap.Stringer("messageOp", message.AcceptedFrontierOp), + zap.Uint32("requestID", msg.RequestId), + zap.String("field", "ContainerIDs"), zap.Error(err), ) - return engine.GetAcceptedFrontierFailed(nodeID, requestID) - } - - return engine.AcceptedFrontier(nodeID, requestID, containerIDs) - - case message.GetAcceptedFrontierFailed: - requestIDIntf, err := msg.Get(message.RequestID) - if err != nil { - return err + return engine.GetAcceptedFrontierFailed(ctx, nodeID, msg.RequestId) } - requestID := requestIDIntf.(uint32) - return engine.GetAcceptedFrontierFailed(nodeID, requestID) + return engine.AcceptedFrontier(ctx, nodeID, msg.RequestId, containerIDs) - case message.GetAccepted: - requestIDIntf, err := msg.Get(message.RequestID) - if err != nil { - return err - } - requestID := requestIDIntf.(uint32) + case *message.GetAcceptedFrontierFailed: + return engine.GetAcceptedFrontierFailed(ctx, nodeID, msg.RequestID) - containerIDs, err := getIDs(message.ContainerIDs, msg) + case *p2p.GetAccepted: + containerIDs, err := getIDs(msg.ContainerIds) if err != nil { - h.ctx.Log.Debug("dropping message with invalid field", + h.ctx.Log.Debug("message with invalid field", zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), - zap.Uint32("requestID", requestID), - zap.Stringer("field", message.ContainerIDs), + zap.Stringer("messageOp", message.GetAcceptedOp), + zap.Uint32("requestID", msg.RequestId), + zap.String("field", "ContainerIDs"), zap.Error(err), ) return nil } - return engine.GetAccepted(nodeID, requestID, containerIDs) + return engine.GetAccepted(ctx, nodeID, msg.RequestId, containerIDs) - case message.Accepted: - requestIDIntf, err := msg.Get(message.RequestID) - if err != nil { - return err - } - requestID := requestIDIntf.(uint32) - - containerIDs, err := getIDs(message.ContainerIDs, msg) + case *p2p.Accepted: + containerIDs, err := getIDs(msg.ContainerIds) if err != nil { h.ctx.Log.Debug("message with invalid field", zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), - zap.Uint32("requestID", requestID), - zap.Stringer("field", message.ContainerIDs), + zap.Stringer("messageOp", message.AcceptedOp), + zap.Uint32("requestID", msg.RequestId), + zap.String("field", "ContainerIDs"), zap.Error(err), ) - return engine.GetAcceptedFailed(nodeID, requestID) - } - - return engine.Accepted(nodeID, requestID, containerIDs) - - case message.GetAcceptedFailed: - requestIDIntf, err := msg.Get(message.RequestID) - if err != nil { - return err + return engine.GetAcceptedFailed(ctx, nodeID, msg.RequestId) } - requestID := requestIDIntf.(uint32) - return engine.GetAcceptedFailed(nodeID, requestID) + return engine.Accepted(ctx, nodeID, msg.RequestId, containerIDs) - case message.GetAncestors: - requestIDIntf, err := msg.Get(message.RequestID) - if err != nil { - return err - } - requestID := requestIDIntf.(uint32) + case *message.GetAcceptedFailed: + return engine.GetAcceptedFailed(ctx, nodeID, msg.RequestID) - containerIDIntf, err := msg.Get(message.ContainerID) + case *p2p.GetAncestors: + containerID, err := ids.ToID(msg.ContainerId) if err != nil { h.ctx.Log.Debug("dropping message with invalid field", zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), - zap.Uint32("requestID", requestID), - zap.Stringer("field", message.ContainerID), - zap.Error(err), - ) - return nil - } - containerIDBytes := containerIDIntf.([]byte) - containerID, err := ids.ToID(containerIDBytes) - if err != nil { - h.ctx.Log.Debug("dropping message with invalid field", - zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), - zap.Uint32("requestID", requestID), - zap.Stringer("field", message.ContainerID), + zap.Stringer("messageOp", message.GetAncestorsOp), + zap.Uint32("requestID", msg.RequestId), + zap.String("field", "ContainerID"), zap.Error(err), ) return nil } - return engine.GetAncestors(nodeID, requestID, containerID) - - case message.GetAncestorsFailed: - requestIDIntf, err := msg.Get(message.RequestID) - if err != nil { - return err - } - requestID := requestIDIntf.(uint32) + return engine.GetAncestors(ctx, nodeID, msg.RequestId, containerID) - return engine.GetAncestorsFailed(nodeID, requestID) + case *message.GetAncestorsFailed: + return engine.GetAncestorsFailed(ctx, nodeID, msg.RequestID) - case message.Ancestors: - requestIDIntf, err := msg.Get(message.RequestID) - if err != nil { - return err - } - requestID := requestIDIntf.(uint32) + case *p2p.Ancestors: + return engine.Ancestors(ctx, nodeID, msg.RequestId, msg.Containers) - containersIntf, err := msg.Get(message.MultiContainerBytes) - if err != nil { - h.ctx.Log.Debug("message with invalid field", - zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), - zap.Uint32("requestID", requestID), - zap.Stringer("field", message.MultiContainerBytes), - zap.Error(err), - ) - return engine.GetAncestorsFailed(nodeID, requestID) - } - containers := containersIntf.([][]byte) - - return engine.Ancestors(nodeID, requestID, containers) - - case message.Get: - requestIDIntf, err := msg.Get(message.RequestID) - if err != nil { - return err - } - requestID := requestIDIntf.(uint32) - - containerIDIntf, err := msg.Get(message.ContainerID) - if err != nil { - h.ctx.Log.Debug("dropping message with invalid field", - zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), - zap.Uint32("requestID", requestID), - zap.Stringer("field", message.ContainerID), - zap.Error(err), - ) - return nil - } - containerIDBytes := containerIDIntf.([]byte) - containerID, err := ids.ToID(containerIDBytes) + case *p2p.Get: + containerID, err := ids.ToID(msg.ContainerId) if err != nil { h.ctx.Log.Debug("dropping message with invalid field", zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), - zap.Uint32("requestID", requestID), - zap.Stringer("field", message.ContainerID), + zap.Stringer("messageOp", message.GetOp), + zap.Uint32("requestID", msg.RequestId), + zap.String("field", "ContainerID"), zap.Error(err), ) return nil } - return engine.Get(nodeID, requestID, containerID) - - case message.GetFailed: - requestIDIntf, err := msg.Get(message.RequestID) - if err != nil { - return err - } - requestID := requestIDIntf.(uint32) - - return engine.GetFailed(nodeID, requestID) - - case message.Put: - requestIDIntf, err := msg.Get(message.RequestID) - if err != nil { - return err - } - requestID := requestIDIntf.(uint32) - - containerIntf, err := msg.Get(message.ContainerBytes) - if err != nil { - // TODO: [requestID] can overflow, which means a timeout on the - // request before the overflow may not be handled properly. - if requestID == constants.GossipMsgRequestID { - h.ctx.Log.Debug("dropping message with invalid field", - zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), - zap.Uint32("requestID", requestID), - zap.Stringer("field", message.ContainerBytes), - zap.Error(err), - ) - return nil - } + return engine.Get(ctx, nodeID, msg.RequestId, containerID) - h.ctx.Log.Debug("message with invalid field", - zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), - zap.Uint32("requestID", requestID), - zap.Stringer("field", message.ContainerBytes), - zap.Error(err), - ) - return engine.GetFailed(nodeID, requestID) - } - container := containerIntf.([]byte) + case *message.GetFailed: + return engine.GetFailed(ctx, nodeID, msg.RequestID) - return engine.Put(nodeID, requestID, container) + case *p2p.Put: + return engine.Put(ctx, nodeID, msg.RequestId, msg.Container) - case message.PushQuery: - requestIDIntf, err := msg.Get(message.RequestID) - if err != nil { - return err - } - requestID := requestIDIntf.(uint32) + case *p2p.PushQuery: + return engine.PushQuery(ctx, nodeID, msg.RequestId, msg.Container) - containerIntf, err := msg.Get(message.ContainerBytes) + case *p2p.PullQuery: + containerID, err := ids.ToID(msg.ContainerId) if err != nil { h.ctx.Log.Debug("dropping message with invalid field", zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), - zap.Uint32("requestID", requestID), - zap.Stringer("field", message.ContainerBytes), + zap.Stringer("messageOp", message.PullQueryOp), + zap.Uint32("requestID", msg.RequestId), + zap.String("field", "ContainerID"), zap.Error(err), ) return nil } - container := containerIntf.([]byte) - return engine.PushQuery(nodeID, requestID, container) + return engine.PullQuery(ctx, nodeID, msg.RequestId, containerID) - case message.PullQuery: - requestIDIntf, err := msg.Get(message.RequestID) + case *p2p.Chits: + votes, err := getIDs(msg.PreferredContainerIds) if err != nil { - return err - } - requestID := requestIDIntf.(uint32) - - containerIDIntf, err := msg.Get(message.ContainerID) - if err != nil { - h.ctx.Log.Debug("dropping message with invalid field", - zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), - zap.Uint32("requestID", requestID), - zap.Stringer("field", message.ContainerID), - zap.Error(err), - ) - return nil - } - containerIDBytes := containerIDIntf.([]byte) - containerID, err := ids.ToID(containerIDBytes) - if err != nil { - h.ctx.Log.Debug("dropping message with invalid field", + h.ctx.Log.Debug("message with invalid field", zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), - zap.Uint32("requestID", requestID), - zap.Stringer("field", message.ContainerID), + zap.Stringer("messageOp", message.ChitsOp), + zap.Uint32("requestID", msg.RequestId), + zap.String("field", "PreferredContainerIDs"), zap.Error(err), ) - return nil + return engine.QueryFailed(ctx, nodeID, msg.RequestId) } - return engine.PullQuery(nodeID, requestID, containerID) - - case message.Chits: - requestIDIntf, err := msg.Get(message.RequestID) - if err != nil { - return err - } - requestID := requestIDIntf.(uint32) - - votes, err := getIDs(message.ContainerIDs, msg) + accepted, err := getIDs(msg.AcceptedContainerIds) if err != nil { h.ctx.Log.Debug("message with invalid field", zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), - zap.Uint32("requestID", requestID), - zap.Stringer("field", message.ContainerIDs), + zap.Stringer("messageOp", message.ChitsOp), + zap.Uint32("requestID", msg.RequestId), + zap.String("field", "AcceptedContainerIDs"), zap.Error(err), ) - return engine.QueryFailed(nodeID, requestID) + return engine.QueryFailed(ctx, nodeID, msg.RequestId) } - return engine.Chits(nodeID, requestID, votes) + return engine.Chits(ctx, nodeID, msg.RequestId, votes, accepted) - case message.QueryFailed: - requestIDIntf, err := msg.Get(message.RequestID) - if err != nil { - return err - } - requestID := requestIDIntf.(uint32) + case *message.QueryFailed: + return engine.QueryFailed(ctx, nodeID, msg.RequestID) - return engine.QueryFailed(nodeID, requestID) + // Connection messages can be sent to the currently executing engine + case *message.Connected: + return engine.Connected(ctx, nodeID, msg.NodeVersion) - case message.Connected: - peerVersionIntf, err := msg.Get(message.VersionStruct) - if err != nil { - return err - } - peerVersion := peerVersionIntf.(*version.Application) + case *message.ConnectedSubnet: + return h.subnetConnector.ConnectedSubnet(ctx, nodeID, msg.SubnetID) - return engine.Connected(nodeID, peerVersion) - - case message.Disconnected: - return engine.Disconnected(nodeID) + case *message.Disconnected: + return engine.Disconnected(ctx, nodeID) default: return fmt.Errorf( @@ -834,10 +736,10 @@ func (h *handler) handleSyncMsg(msg message.InboundMessage) error { } } -func (h *handler) handleAsyncMsg(msg message.InboundMessage) { +func (h *handler) handleAsyncMsg(ctx context.Context, msg Message) { h.asyncMessagePool.Send(func() { - if err := h.executeAsyncMsg(msg); err != nil { - h.StopWithError(fmt.Errorf( + if err := h.executeAsyncMsg(ctx, msg); err != nil { + h.StopWithError(ctx, fmt.Errorf( "%w while processing async message: %s", err, msg, @@ -847,103 +749,92 @@ func (h *handler) handleAsyncMsg(msg message.InboundMessage) { } // Any returned error is treated as fatal -func (h *handler) executeAsyncMsg(msg message.InboundMessage) error { - h.ctx.Log.Debug("forwarding async message to consensus", - zap.Stringer("messageString", msg), - ) - +func (h *handler) executeAsyncMsg(ctx context.Context, msg Message) error { var ( nodeID = msg.NodeID() op = msg.Op() + body = msg.Message() startTime = h.clock.Time() ) + h.ctx.Log.Debug("forwarding async message to consensus", + zap.Stringer("nodeID", nodeID), + zap.Stringer("messageOp", op), + ) + h.ctx.Log.Verbo("forwarding async message to consensus", + zap.Stringer("nodeID", nodeID), + zap.Stringer("messageOp", op), + zap.Any("message", body), + ) h.resourceTracker.StartProcessing(nodeID, startTime) defer func() { var ( - endTime = h.clock.Time() - histogram = h.metrics.messages[op] + endTime = h.clock.Time() + messageHistograms = h.metrics.messages[op] + processingTime = endTime.Sub(startTime) ) h.resourceTracker.StopProcessing(nodeID, endTime) - histogram.Observe(float64(endTime.Sub(startTime))) + // There is no lock grabbed here, so both metrics are identical + messageHistograms.processingTime.Observe(float64(processingTime)) + messageHistograms.msgHandlingTime.Observe(float64(processingTime)) msg.OnFinishedHandling() h.ctx.Log.Debug("finished handling async message", zap.Stringer("messageOp", op), ) }() - engine, err := h.getEngine() - if err != nil { - return err + state := h.ctx.State.Get() + engine, ok := h.engineManager.Get(state.Type).Get(state.State) + if !ok { + return fmt.Errorf( + "%w %s running %s", + errMissingEngine, + state.State, + state.Type, + ) } - switch op { - case message.AppRequest: - requestIDIntf, err := msg.Get(message.RequestID) - if err != nil { - return err - } - requestID := requestIDIntf.(uint32) - - appBytesIntf, err := msg.Get(message.AppBytes) - if err != nil { - h.ctx.Log.Debug("dropping message with invalid field", - zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), - zap.Uint32("requestID", requestID), - zap.Stringer("field", message.AppBytes), - zap.Error(err), - ) - return nil - } - appBytes := appBytesIntf.([]byte) - - return engine.AppRequest(nodeID, requestID, msg.ExpirationTime(), appBytes) - - case message.AppResponse: - requestIDIntf, err := msg.Get(message.RequestID) - if err != nil { - return err - } - requestID := requestIDIntf.(uint32) + switch m := body.(type) { + case *p2p.AppRequest: + return engine.AppRequest( + ctx, + nodeID, + m.RequestId, + msg.Expiration(), + m.AppBytes, + ) - appBytesIntf, err := msg.Get(message.AppBytes) - if err != nil { - h.ctx.Log.Debug("message with invalid field", - zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), - zap.Uint32("requestID", requestID), - zap.Stringer("field", message.AppBytes), - zap.Error(err), - ) - return engine.AppRequestFailed(nodeID, requestID) - } - appBytes := appBytesIntf.([]byte) + case *p2p.AppResponse: + return engine.AppResponse(ctx, nodeID, m.RequestId, m.AppBytes) - return engine.AppResponse(nodeID, requestID, appBytes) + case *message.AppRequestFailed: + return engine.AppRequestFailed(ctx, nodeID, m.RequestID) - case message.AppRequestFailed: - requestIDIntf, err := msg.Get(message.RequestID) - if err != nil { - return err - } - requestID := requestIDIntf.(uint32) + case *p2p.AppGossip: + return engine.AppGossip(ctx, nodeID, m.AppBytes) - return engine.AppRequestFailed(nodeID, requestID) + case *message.CrossChainAppRequest: + return engine.CrossChainAppRequest( + ctx, + m.SourceChainID, + m.RequestID, + msg.Expiration(), + m.Message, + ) - case message.AppGossip: - appBytesIntf, err := msg.Get(message.AppBytes) - if err != nil { - h.ctx.Log.Debug("dropping message with invalid field", - zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), - zap.Stringer("field", message.AppBytes), - zap.Error(err), - ) - return nil - } - appBytes := appBytesIntf.([]byte) + case *message.CrossChainAppResponse: + return engine.CrossChainAppResponse( + ctx, + m.SourceChainID, + m.RequestID, + m.Message, + ) - return engine.AppGossip(nodeID, appBytes) + case *message.CrossChainAppRequestFailed: + return engine.CrossChainAppRequestFailed( + ctx, + m.SourceChainID, + m.RequestID, + ) default: return fmt.Errorf( @@ -955,49 +846,81 @@ func (h *handler) executeAsyncMsg(msg message.InboundMessage) error { // Any returned error is treated as fatal func (h *handler) handleChanMsg(msg message.InboundMessage) error { - h.ctx.Log.Debug("forwarding chan message to consensus", - zap.Stringer("messageString", msg), - ) - var ( op = msg.Op() + body = msg.Message() startTime = h.clock.Time() + // Check if the chain is in normal operation at the start of message + // execution (may change during execution) + isNormalOp = h.ctx.State.Get().State == snow.NormalOp + ) + h.ctx.Log.Debug("forwarding chan message to consensus", + zap.Stringer("messageOp", op), + ) + h.ctx.Log.Verbo("forwarding chan message to consensus", + zap.Stringer("messageOp", op), + zap.Any("message", body), ) h.ctx.Lock.Lock() + lockAcquiredTime := h.clock.Time() defer func() { h.ctx.Lock.Unlock() var ( - endTime = h.clock.Time() - histogram = h.metrics.messages[op] + endTime = h.clock.Time() + messageHistograms = h.metrics.messages[op] + msgHandlingTime = lockAcquiredTime.Sub(startTime) + processingTime = endTime.Sub(startTime) ) - histogram.Observe(float64(endTime.Sub(startTime))) + messageHistograms.msgHandlingTime.Observe(float64(msgHandlingTime)) + messageHistograms.processingTime.Observe(float64(processingTime)) msg.OnFinishedHandling() h.ctx.Log.Debug("finished handling chan message", zap.Stringer("messageOp", op), ) + if processingTime > syncProcessingTimeWarnLimit && isNormalOp { + h.ctx.Log.Warn("handling chan message took longer than expected", + zap.Duration("processingTime", processingTime), + zap.Duration("msgHandlingTime", msgHandlingTime), + zap.Stringer("messageOp", op), + zap.Any("message", body), + ) + } }() - engine, err := h.getEngine() - if err != nil { - return err + state := h.ctx.State.Get() + engine, ok := h.engineManager.Get(state.Type).Get(state.State) + if !ok { + return fmt.Errorf( + "%w %s running %s", + errMissingEngine, + state.State, + state.Type, + ) } - switch op := msg.Op(); op { - case message.Notify: - vmMsgIntf, err := msg.Get(message.VMMessage) - if err != nil { - return err + switch msg := body.(type) { + case *message.VMMessage: + return engine.Notify(context.TODO(), common.Message(msg.Notification)) + + case *message.GossipRequest: + // TODO: After Cortina is activated, this can be removed as everyone + // will have accepted the StopVertex. + if state.Type == p2p.EngineType_ENGINE_TYPE_SNOWMAN { + avalancheEngine, ok := h.engineManager.Get(p2p.EngineType_ENGINE_TYPE_AVALANCHE).Get(state.State) + if ok { + // This chain was linearized, so we should gossip the Avalanche + // accepted frontier to make sure everyone eventually linearizes + // the chain. + if err := avalancheEngine.Gossip(context.TODO()); err != nil { + return err + } + } } - vmMsg := vmMsgIntf.(uint32) - - return engine.Notify(common.Message(vmMsg)) - - case message.GossipRequest: - return engine.Gossip() + return engine.Gossip(context.TODO()) - case message.Timeout: - return engine.Timeout() + case *message.Timeout: + return engine.Timeout(context.TODO()) default: return fmt.Errorf( @@ -1007,46 +930,39 @@ func (h *handler) handleChanMsg(msg message.InboundMessage) error { } } -func (h *handler) getEngine() (common.Engine, error) { - state := h.ctx.GetState() - switch state { - case snow.StateSyncing: - return h.stateSyncer, nil - case snow.Bootstrapping: - return h.bootstrapper, nil - case snow.NormalOp: - return h.engine, nil - default: - return nil, fmt.Errorf("unknown handler for state %s", state) - } -} - -func (h *handler) popUnexpiredMsg(queue MessageQueue, expired prometheus.Counter) (message.InboundMessage, bool) { +func (h *handler) popUnexpiredMsg( + queue MessageQueue, + expired prometheus.Counter, +) (context.Context, Message, bool) { for { // Get the next message we should process. If the handler is shutting // down, we may fail to pop a message. - msg, ok := queue.Pop() + ctx, msg, ok := queue.Pop() if !ok { - return nil, false + return nil, Message{}, false } // If this message's deadline has passed, don't process it. - if expirationTime := msg.ExpirationTime(); !expirationTime.IsZero() && h.clock.Time().After(expirationTime) { - h.ctx.Log.Verbo("dropping message", + if expiration := msg.Expiration(); h.clock.Time().After(expiration) { + h.ctx.Log.Debug("dropping message", zap.String("reason", "timeout"), zap.Stringer("nodeID", msg.NodeID()), - zap.Stringer("messageString", msg), + zap.Stringer("messageOp", msg.Op()), ) + span := trace.SpanFromContext(ctx) + span.AddEvent("dropping message", trace.WithAttributes( + attribute.String("reason", "timeout"), + )) expired.Inc() msg.OnFinishedHandling() continue } - return msg, true + return ctx, msg, true } } -func (h *handler) closeDispatcher() { +func (h *handler) closeDispatcher(ctx context.Context) { h.ctx.Lock.Lock() defer h.ctx.Lock.Unlock() @@ -1055,10 +971,11 @@ func (h *handler) closeDispatcher() { return } - h.shutdown() + h.shutdown(ctx) } -func (h *handler) shutdown() { +// Note: shutdown is only called after all message dispatchers have exited. +func (h *handler) shutdown(ctx context.Context) { defer func() { if h.onStopped != nil { go h.onStopped() @@ -1066,15 +983,17 @@ func (h *handler) shutdown() { close(h.closed) }() - currentEngine, err := h.getEngine() - if err != nil { + state := h.ctx.State.Get() + engine, ok := h.engineManager.Get(state.Type).Get(state.State) + if !ok { h.ctx.Log.Error("failed fetching current engine during shutdown", - zap.Error(err), + zap.Stringer("type", state.Type), + zap.Stringer("state", state.State), ) return } - if err := currentEngine.Shutdown(); err != nil { + if err := engine.Shutdown(ctx); err != nil { h.ctx.Log.Error("failed while shutting down the chain", zap.Error(err), ) diff --git a/avalanchego/snow/networking/handler/handler_test.go b/avalanchego/snow/networking/handler/handler_test.go index 14754d68..b9e757b7 100644 --- a/avalanchego/snow/networking/handler/handler_test.go +++ b/avalanchego/snow/networking/handler/handler_test.go @@ -1,51 +1,63 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler import ( + "context" "errors" + "sync" "testing" "time" + "github.com/golang/mock/gomock" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/subnets" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" ) +const testThreadPoolSize = 2 + +var errFatal = errors.New("error should cause handler to close") + func TestHandlerDropsTimedOutMessages(t *testing.T) { called := make(chan struct{}) - metrics := prometheus.NewRegistry() - mc, err := message.NewCreator(metrics, "dummyNamespace", true, 10*time.Second) - require.NoError(t, err) - ctx := snow.DefaultConsensusContextTest() vdrs := validators.NewSet() vdr0 := ids.GenerateTestNodeID() - err = vdrs.AddWeight(vdr0, 1) + err := vdrs.Add(vdr0, nil, ids.Empty, 1) require.NoError(t, err) - resourceTracker, err := tracker.NewResourceTracker(prometheus.NewRegistry(), resource.NoUsage, meter.ContinuousFactory{}, time.Second) + resourceTracker, err := tracker.NewResourceTracker( + prometheus.NewRegistry(), + resource.NoUsage, + meter.ContinuousFactory{}, + time.Second, + ) require.NoError(t, err) handlerIntf, err := New( - mc, ctx, vdrs, nil, - nil, time.Second, + testThreadPoolSize, resourceTracker, + validators.UnhandledSubnetConnector, + subnets.New(ctx.NodeID, subnets.Config{}), ) require.NoError(t, err) handler := handlerIntf.(*handler) @@ -59,40 +71,54 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { }, } bootstrapper.Default(false) - bootstrapper.ContextF = func() *snow.ConsensusContext { return ctx } - bootstrapper.GetAcceptedFrontierF = func(nodeID ids.NodeID, requestID uint32) error { + bootstrapper.ContextF = func() *snow.ConsensusContext { + return ctx + } + bootstrapper.GetAcceptedFrontierF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { t.Fatalf("GetAcceptedFrontier message should have timed out") return nil } - bootstrapper.GetAcceptedF = func(nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { + bootstrapper.GetAcceptedF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { called <- struct{}{} return nil } - handler.SetBootstrapper(bootstrapper) - ctx.SetState(snow.Bootstrapping) // assumed bootstrapping is ongoing + handler.SetEngineManager(&EngineManager{ + Snowman: &Engine{ + Bootstrapper: bootstrapper, + }, + }) + ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.Bootstrapping, // assumed bootstrap is ongoing + }) pastTime := time.Now() - mc.SetTime(pastTime) handler.clock.Set(pastTime) nodeID := ids.EmptyNodeID reqID := uint32(1) - deadline := time.Nanosecond chainID := ids.ID{} - msg := mc.InboundGetAcceptedFrontier(chainID, reqID, deadline, nodeID) - handler.Push(msg) + msg := Message{ + InboundMessage: message.InboundGetAcceptedFrontier(chainID, reqID, 0*time.Second, nodeID, p2p.EngineType_ENGINE_TYPE_SNOWMAN), + EngineType: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + } + handler.Push(context.Background(), msg) currentTime := time.Now().Add(time.Second) - mc.SetTime(currentTime) handler.clock.Set(currentTime) reqID++ - msg = mc.InboundGetAccepted(chainID, reqID, deadline, nil, nodeID) - handler.Push(msg) + msg = Message{ + InboundMessage: message.InboundGetAccepted(chainID, reqID, 1*time.Second, nil, nodeID, p2p.EngineType_ENGINE_TYPE_SNOWMAN), + EngineType: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + } + handler.Push(context.Background(), msg) - bootstrapper.StartF = func(startReqID uint32) error { return nil } + bootstrapper.StartF = func(context.Context, uint32) error { + return nil + } - handler.Start(false) + handler.Start(context.Background(), false) ticker := time.NewTicker(time.Second) defer ticker.Stop() @@ -108,22 +134,25 @@ func TestHandlerClosesOnError(t *testing.T) { ctx := snow.DefaultConsensusContextTest() vdrs := validators.NewSet() - err := vdrs.AddWeight(ids.GenerateTestNodeID(), 1) - require.NoError(t, err) - metrics := prometheus.NewRegistry() - mc, err := message.NewCreator(metrics, "dummyNamespace", true, 10*time.Second) + err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) require.NoError(t, err) - resourceTracker, err := tracker.NewResourceTracker(prometheus.NewRegistry(), resource.NoUsage, meter.ContinuousFactory{}, time.Second) + resourceTracker, err := tracker.NewResourceTracker( + prometheus.NewRegistry(), + resource.NoUsage, + meter.ContinuousFactory{}, + time.Second, + ) require.NoError(t, err) handlerIntf, err := New( - mc, ctx, vdrs, nil, - nil, time.Second, + testThreadPoolSize, resourceTracker, + validators.UnhandledSubnetConnector, + subnets.New(ctx.NodeID, subnets.Config{}), ) require.NoError(t, err) handler := handlerIntf.(*handler) @@ -142,30 +171,47 @@ func TestHandlerClosesOnError(t *testing.T) { }, } bootstrapper.Default(false) - bootstrapper.ContextF = func() *snow.ConsensusContext { return ctx } - bootstrapper.GetAcceptedFrontierF = func(nodeID ids.NodeID, requestID uint32) error { - return errors.New("Engine error should cause handler to close") + bootstrapper.ContextF = func() *snow.ConsensusContext { + return ctx + } + bootstrapper.GetAcceptedFrontierF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + return errFatal } - handler.SetBootstrapper(bootstrapper) engine := &common.EngineTest{T: t} engine.Default(false) - engine.ContextF = func() *snow.ConsensusContext { return ctx } - handler.SetConsensus(engine) + engine.ContextF = func() *snow.ConsensusContext { + return ctx + } + + handler.SetEngineManager(&EngineManager{ + Snowman: &Engine{ + Bootstrapper: bootstrapper, + Consensus: engine, + }, + }) // assume bootstrapping is ongoing so that InboundGetAcceptedFrontier // should normally be handled - ctx.SetState(snow.Bootstrapping) + ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.Bootstrapping, + }) - bootstrapper.StartF = func(startReqID uint32) error { return nil } + bootstrapper.StartF = func(context.Context, uint32) error { + return nil + } - handler.Start(false) + handler.Start(context.Background(), false) nodeID := ids.EmptyNodeID reqID := uint32(1) deadline := time.Nanosecond - msg := mc.InboundGetAcceptedFrontier(ids.ID{}, reqID, deadline, nodeID) - handler.Push(msg) + msg := Message{ + InboundMessage: message.InboundGetAcceptedFrontier(ids.ID{}, reqID, deadline, nodeID, 0), + EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + } + handler.Push(context.Background(), msg) ticker := time.NewTicker(time.Second) select { @@ -179,20 +225,25 @@ func TestHandlerDropsGossipDuringBootstrapping(t *testing.T) { closed := make(chan struct{}, 1) ctx := snow.DefaultConsensusContextTest() vdrs := validators.NewSet() - err := vdrs.AddWeight(ids.GenerateTestNodeID(), 1) + err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) require.NoError(t, err) - mc := message.NewInternalBuilder() - resourceTracker, err := tracker.NewResourceTracker(prometheus.NewRegistry(), resource.NoUsage, meter.ContinuousFactory{}, time.Second) + resourceTracker, err := tracker.NewResourceTracker( + prometheus.NewRegistry(), + resource.NoUsage, + meter.ContinuousFactory{}, + time.Second, + ) require.NoError(t, err) handlerIntf, err := New( - mc, ctx, vdrs, nil, - nil, 1, + testThreadPoolSize, resourceTracker, + validators.UnhandledSubnetConnector, + subnets.New(ctx.NodeID, subnets.Config{}), ) require.NoError(t, err) handler := handlerIntf.(*handler) @@ -208,23 +259,37 @@ func TestHandlerDropsGossipDuringBootstrapping(t *testing.T) { }, } bootstrapper.Default(false) - bootstrapper.ContextF = func() *snow.ConsensusContext { return ctx } - bootstrapper.GetFailedF = func(nodeID ids.NodeID, requestID uint32) error { + bootstrapper.ContextF = func() *snow.ConsensusContext { + return ctx + } + bootstrapper.GetFailedF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { closed <- struct{}{} return nil } - handler.SetBootstrapper(bootstrapper) - ctx.SetState(snow.Bootstrapping) // assumed bootstrapping is ongoing + handler.SetEngineManager(&EngineManager{ + Snowman: &Engine{ + Bootstrapper: bootstrapper, + }, + }) + ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.Bootstrapping, // assumed bootstrap is ongoing + }) - bootstrapper.StartF = func(startReqID uint32) error { return nil } + bootstrapper.StartF = func(context.Context, uint32) error { + return nil + } - handler.Start(false) + handler.Start(context.Background(), false) nodeID := ids.EmptyNodeID chainID := ids.Empty reqID := uint32(1) - inMsg := mc.InternalFailedRequest(message.GetFailed, nodeID, chainID, reqID) - handler.Push(inMsg) + inInboundMessage := Message{ + InboundMessage: message.InternalGetFailed(nodeID, chainID, reqID, p2p.EngineType_ENGINE_TYPE_SNOWMAN), + EngineType: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + } + handler.Push(context.Background(), inInboundMessage) ticker := time.NewTicker(time.Second) select { @@ -240,20 +305,25 @@ func TestHandlerDispatchInternal(t *testing.T) { ctx := snow.DefaultConsensusContextTest() msgFromVMChan := make(chan common.Message) vdrs := validators.NewSet() - err := vdrs.AddWeight(ids.GenerateTestNodeID(), 1) + err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) require.NoError(t, err) - mc := message.NewInternalBuilder() - resourceTracker, err := tracker.NewResourceTracker(prometheus.NewRegistry(), resource.NoUsage, meter.ContinuousFactory{}, time.Second) + resourceTracker, err := tracker.NewResourceTracker( + prometheus.NewRegistry(), + resource.NoUsage, + meter.ContinuousFactory{}, + time.Second, + ) require.NoError(t, err) handler, err := New( - mc, ctx, vdrs, msgFromVMChan, - nil, time.Second, + testThreadPoolSize, resourceTracker, + validators.UnhandledSubnetConnector, + subnets.New(ctx.NodeID, subnets.Config{}), ) require.NoError(t, err) @@ -266,21 +336,34 @@ func TestHandlerDispatchInternal(t *testing.T) { }, } bootstrapper.Default(false) - handler.SetBootstrapper(bootstrapper) engine := &common.EngineTest{T: t} engine.Default(false) - engine.ContextF = func() *snow.ConsensusContext { return ctx } - engine.NotifyF = func(common.Message) error { + engine.ContextF = func() *snow.ConsensusContext { + return ctx + } + engine.NotifyF = func(context.Context, common.Message) error { calledNotify <- struct{}{} return nil } - handler.SetConsensus(engine) - ctx.SetState(snow.NormalOp) // assumed bootstrapping is done - bootstrapper.StartF = func(startReqID uint32) error { return nil } + handler.SetEngineManager(&EngineManager{ + Snowman: &Engine{ + Bootstrapper: bootstrapper, + Consensus: engine, + }, + }) - handler.Start(false) + ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.NormalOp, // assumed bootstrap is done + }) + + bootstrapper.StartF = func(context.Context, uint32) error { + return nil + } + + handler.Start(context.Background(), false) msgFromVMChan <- 0 select { @@ -289,3 +372,253 @@ func TestHandlerDispatchInternal(t *testing.T) { case <-calledNotify: } } + +func TestHandlerSubnetConnector(t *testing.T) { + ctx := snow.DefaultConsensusContextTest() + vdrs := validators.NewSet() + err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) + require.NoError(t, err) + + resourceTracker, err := tracker.NewResourceTracker( + prometheus.NewRegistry(), + resource.NoUsage, + meter.ContinuousFactory{}, + time.Second, + ) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + connector := validators.NewMockSubnetConnector(ctrl) + + nodeID := ids.GenerateTestNodeID() + subnetID := ids.GenerateTestID() + + require.NoError(t, err) + handler, err := New( + ctx, + vdrs, + nil, + time.Second, + testThreadPoolSize, + resourceTracker, + connector, + subnets.New(ctx.NodeID, subnets.Config{}), + ) + require.NoError(t, err) + + bootstrapper := &common.BootstrapperTest{ + BootstrapableTest: common.BootstrapableTest{ + T: t, + }, + EngineTest: common.EngineTest{ + T: t, + }, + } + bootstrapper.Default(false) + + engine := &common.EngineTest{T: t} + engine.Default(false) + engine.ContextF = func() *snow.ConsensusContext { + return ctx + } + + handler.SetEngineManager(&EngineManager{ + Snowman: &Engine{ + Bootstrapper: bootstrapper, + Consensus: engine, + }, + }) + ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.NormalOp, // assumed bootstrap is done + }) + + bootstrapper.StartF = func(context.Context, uint32) error { + return nil + } + + handler.Start(context.Background(), false) + + // Handler should call subnet connector when ConnectedSubnet message is received + var wg sync.WaitGroup + connector.EXPECT().ConnectedSubnet(gomock.Any(), nodeID, subnetID).Do( + func(context.Context, ids.NodeID, ids.ID) { + wg.Done() + }) + + wg.Add(1) + defer wg.Wait() + + subnetInboundMessage := Message{ + InboundMessage: message.InternalConnectedSubnet(nodeID, subnetID), + EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + } + handler.Push(context.Background(), subnetInboundMessage) +} + +// Tests that messages are routed to the correct engine type +func TestDynamicEngineTypeDispatch(t *testing.T) { + tests := []struct { + name string + currentEngineType p2p.EngineType + requestedEngineType p2p.EngineType + setup func( + h Handler, + b common.BootstrapableEngine, + e common.Engine, + ) + }{ + { + name: "current - avalanche, requested - unspecified", + currentEngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + requestedEngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + setup: func(h Handler, b common.BootstrapableEngine, e common.Engine) { + h.SetEngineManager(&EngineManager{ + Avalanche: &Engine{ + StateSyncer: nil, + Bootstrapper: b, + Consensus: e, + }, + Snowman: nil, + }) + }, + }, + { + name: "current - avalanche, requested - avalanche", + currentEngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + requestedEngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + setup: func(h Handler, b common.BootstrapableEngine, e common.Engine) { + h.SetEngineManager(&EngineManager{ + Avalanche: &Engine{ + StateSyncer: nil, + Bootstrapper: b, + Consensus: e, + }, + Snowman: nil, + }) + }, + }, + { + name: "current - snowman, requested - unspecified", + currentEngineType: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + requestedEngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + setup: func(h Handler, b common.BootstrapableEngine, e common.Engine) { + h.SetEngineManager(&EngineManager{ + Avalanche: nil, + Snowman: &Engine{ + StateSyncer: nil, + Bootstrapper: b, + Consensus: e, + }, + }) + }, + }, + { + name: "current - snowman, requested - avalanche", + currentEngineType: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + requestedEngineType: p2p.EngineType_ENGINE_TYPE_AVALANCHE, + setup: func(h Handler, b common.BootstrapableEngine, e common.Engine) { + h.SetEngineManager(&EngineManager{ + Avalanche: &Engine{ + StateSyncer: nil, + Bootstrapper: nil, + Consensus: e, + }, + Snowman: &Engine{ + StateSyncer: nil, + Bootstrapper: b, + Consensus: nil, + }, + }) + }, + }, + { + name: "current - snowman, requested - snowman", + currentEngineType: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + requestedEngineType: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + setup: func(h Handler, b common.BootstrapableEngine, e common.Engine) { + h.SetEngineManager(&EngineManager{ + Avalanche: nil, + Snowman: &Engine{ + StateSyncer: nil, + Bootstrapper: b, + Consensus: e, + }, + }) + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + messageReceived := make(chan struct{}) + ctx := snow.DefaultConsensusContextTest() + vdrs := validators.NewSet() + err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) + require.NoError(t, err) + + resourceTracker, err := tracker.NewResourceTracker( + prometheus.NewRegistry(), + resource.NoUsage, + meter.ContinuousFactory{}, + time.Second, + ) + require.NoError(t, err) + handler, err := New( + ctx, + vdrs, + nil, + time.Second, + testThreadPoolSize, + resourceTracker, + validators.UnhandledSubnetConnector, + subnets.New(ids.EmptyNodeID, subnets.Config{}), + ) + require.NoError(t, err) + + bootstrapper := &common.BootstrapperTest{ + BootstrapableTest: common.BootstrapableTest{ + T: t, + }, + EngineTest: common.EngineTest{ + T: t, + }, + } + bootstrapper.Default(false) + + engine := &common.EngineTest{T: t} + engine.Default(false) + engine.ContextF = func() *snow.ConsensusContext { + return ctx + } + engine.ChitsF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, preferredIDs []ids.ID, acceptedIDs []ids.ID) error { + close(messageReceived) + return nil + } + + test.setup(handler, bootstrapper, engine) + + ctx.State.Set(snow.EngineState{ + Type: test.currentEngineType, + State: snow.NormalOp, // assumed bootstrap is done + }) + + bootstrapper.StartF = func(context.Context, uint32) error { + return nil + } + + handler.Start(context.Background(), false) + handler.Push(context.TODO(), Message{ + InboundMessage: message.InboundChits( + ids.Empty, + uint32(0), + nil, + nil, + ids.EmptyNodeID, + ), + EngineType: test.requestedEngineType, + }) + + <-messageReceived + }) + } +} diff --git a/avalanchego/snow/networking/handler/message_queue.go b/avalanchego/snow/networking/handler/message_queue.go index 38f41688..1dfeea5e 100644 --- a/avalanchego/snow/networking/handler/message_queue.go +++ b/avalanchego/snow/networking/handler/message_queue.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler import ( + "context" "sync" "github.com/prometheus/client_golang/prometheus" @@ -12,26 +13,37 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" ) -var _ MessageQueue = &messageQueue{} +var _ MessageQueue = (*messageQueue)(nil) + +// Message defines individual messages that have been parsed from the network +// and are now pending execution from the chain. +type Message struct { + // The original message from the peer + message.InboundMessage + // The desired engine type to execute this message. If not specified, + // the current executing engine type is used. + EngineType p2p.EngineType +} type MessageQueue interface { // Add a message. // // If called after [Shutdown], the message will immediately be marked as // having been handled. - Push(message.InboundMessage) + Push(context.Context, Message) - // Get and remove a message. + // Remove and return a message and its context. // // If there are no available messages, this function will block until a // message becomes available or the queue is [Shutdown]. - Pop() (message.InboundMessage, bool) + Pop() (context.Context, Message, bool) // Returns the number of messages currently on the queue Len() int @@ -59,7 +71,7 @@ type messageQueue struct { // Node ID --> Messages this node has in [msgs] nodeToUnprocessedMsgs map[ids.NodeID]int // Unprocessed messages - msgs []message.InboundMessage + msgAndCtxs []*msgAndContext } func NewMessageQueue( @@ -80,7 +92,7 @@ func NewMessageQueue( return m, m.metrics.initialize(metricsNamespace, metricsRegisterer, ops) } -func (m *messageQueue) Push(msg message.InboundMessage) { +func (m *messageQueue) Push(ctx context.Context, msg Message) { m.cond.L.Lock() defer m.cond.L.Unlock() @@ -90,7 +102,10 @@ func (m *messageQueue) Push(msg message.InboundMessage) { } // Add the message to the queue - m.msgs = append(m.msgs, msg) + m.msgAndCtxs = append(m.msgAndCtxs, &msgAndContext{ + msg: msg, + ctx: ctx, + }) m.nodeToUnprocessedMsgs[msg.NodeID()]++ // Update metrics @@ -104,21 +119,21 @@ func (m *messageQueue) Push(msg message.InboundMessage) { // FIFO, but skip over messages whose senders whose messages have caused us to // use excessive CPU recently. -func (m *messageQueue) Pop() (message.InboundMessage, bool) { +func (m *messageQueue) Pop() (context.Context, Message, bool) { m.cond.L.Lock() defer m.cond.L.Unlock() for { if m.closed { - return nil, false + return nil, Message{}, false } - if len(m.msgs) != 0 { + if len(m.msgAndCtxs) != 0 { break } m.cond.Wait() } - n := len(m.msgs) + n := len(m.msgAndCtxs) i := 0 for { if i == n { @@ -126,15 +141,21 @@ func (m *messageQueue) Pop() (message.InboundMessage, bool) { zap.Int("numMessages", n), ) } - msg := m.msgs[0] - m.msgs[0] = nil - nodeID := msg.NodeID() + + var ( + msgAndCtx = m.msgAndCtxs[0] + msg = msgAndCtx.msg + ctx = msgAndCtx.ctx + nodeID = msg.NodeID() + ) + m.msgAndCtxs[0] = nil + // See if it's OK to process [msg] next if m.canPop(msg) || i == n { // i should never == n but handle anyway as a fail-safe - if cap(m.msgs) == 1 { - m.msgs = nil // Give back memory if possible + if cap(m.msgAndCtxs) == 1 { + m.msgAndCtxs = nil // Give back memory if possible } else { - m.msgs = m.msgs[1:] + m.msgAndCtxs = m.msgAndCtxs[1:] } m.nodeToUnprocessedMsgs[nodeID]-- if m.nodeToUnprocessedMsgs[nodeID] == 0 { @@ -143,12 +164,12 @@ func (m *messageQueue) Pop() (message.InboundMessage, bool) { m.metrics.nodesWithMessages.Set(float64(len(m.nodeToUnprocessedMsgs))) m.metrics.len.Dec() m.metrics.ops[msg.Op()].Dec() - return msg, true + return ctx, msg, true } // [msg.nodeID] is causing excessive CPU usage. // Push [msg] to back of [m.msgs] and handle it later. - m.msgs = append(m.msgs, msg) - m.msgs = m.msgs[1:] + m.msgAndCtxs = append(m.msgAndCtxs, msgAndCtx) + m.msgAndCtxs = m.msgAndCtxs[1:] i++ m.metrics.numExcessiveCPU.Inc() } @@ -158,7 +179,7 @@ func (m *messageQueue) Len() int { m.cond.L.Lock() defer m.cond.L.Unlock() - return len(m.msgs) + return len(m.msgAndCtxs) } func (m *messageQueue) Shutdown() { @@ -166,10 +187,10 @@ func (m *messageQueue) Shutdown() { defer m.cond.L.Unlock() // Remove all the current messages from the queue - for _, msg := range m.msgs { - msg.OnFinishedHandling() + for _, msg := range m.msgAndCtxs { + msg.msg.OnFinishedHandling() } - m.msgs = nil + m.msgAndCtxs = nil m.nodeToUnprocessedMsgs = nil // Update metrics @@ -184,23 +205,20 @@ func (m *messageQueue) Shutdown() { // canPop will return true for at least one message in [m.msgs] func (m *messageQueue) canPop(msg message.InboundMessage) bool { // Always pop connected and disconnected messages. - if op := msg.Op(); op == message.Connected || op == message.Disconnected { + if op := msg.Op(); op == message.ConnectedOp || op == message.DisconnectedOp || op == message.ConnectedSubnetOp { return true } // If the deadline to handle [msg] has passed, always pop it. // It will be dropped immediately. - if expirationTime := msg.ExpirationTime(); !expirationTime.IsZero() && m.clock.Time().After(expirationTime) { + if expiration := msg.Expiration(); m.clock.Time().After(expiration) { return true } // Every node has some allowed CPU allocation depending on // the number of nodes with unprocessed messages. baseMaxCPU := 1 / float64(len(m.nodeToUnprocessedMsgs)) nodeID := msg.NodeID() - weight, isVdr := m.vdrs.GetWeight(nodeID) - if !isVdr { - weight = 0 - } + weight := m.vdrs.GetWeight(nodeID) // The sum of validator weights should never be 0, but handle // that case for completeness here to avoid divide by 0. portionWeight := float64(0) @@ -213,3 +231,8 @@ func (m *messageQueue) canPop(msg message.InboundMessage) bool { maxCPU := baseMaxCPU + (1.0-baseMaxCPU)*portionWeight return recentCPUUsage <= maxCPU } + +type msgAndContext struct { + msg Message + ctx context.Context +} diff --git a/avalanchego/snow/networking/handler/message_queue_metrics.go b/avalanchego/snow/networking/handler/message_queue_metrics.go index 245d0c7e..e165d045 100644 --- a/avalanchego/snow/networking/handler/message_queue_metrics.go +++ b/avalanchego/snow/networking/handler/message_queue_metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler diff --git a/avalanchego/snow/networking/handler/message_queue_test.go b/avalanchego/snow/networking/handler/message_queue_test.go index e94c4fa3..8e1bf550 100644 --- a/avalanchego/snow/networking/handler/message_queue_test.go +++ b/avalanchego/snow/networking/handler/message_queue_test.go @@ -1,10 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler import ( - "fmt" + "context" "testing" "time" @@ -16,143 +16,153 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/logging" ) +const engineType = p2p.EngineType_ENGINE_TYPE_SNOWMAN + func TestQueue(t *testing.T) { - for _, useProto := range []bool{false, true} { - t.Run(fmt.Sprintf("use proto buf message creator %v", useProto), func(tt *testing.T) { - ctrl := gomock.NewController(tt) - defer ctrl.Finish() - - require := require.New(tt) - cpuTracker := tracker.NewMockTracker(ctrl) - vdrs := validators.NewSet() - vdr1ID, vdr2ID := ids.GenerateTestNodeID(), ids.GenerateTestNodeID() - require.NoError(vdrs.AddWeight(vdr1ID, 1)) - require.NoError(vdrs.AddWeight(vdr2ID, 1)) - mIntf, err := NewMessageQueue(logging.NoLog{}, vdrs, cpuTracker, "", prometheus.NewRegistry(), message.SynchronousOps) - require.NoError(err) - u := mIntf.(*messageQueue) - currentTime := time.Now() - u.clock.Set(currentTime) - - metrics := prometheus.NewRegistry() - var mc message.Creator - if !useProto { - mc, err = message.NewCreator(metrics, "dummyNamespace", true, 10*time.Second) - } else { - mc, err = message.NewCreatorWithProto(metrics, "dummyNamespace", true, 10*time.Second) - } - require.NoError(err) - - mc.SetTime(currentTime) - msg1 := mc.InboundPut( - ids.Empty, - 0, - nil, - vdr1ID, - ) - - // Push then pop should work regardless of usage when there are no other - // messages on [u.msgs] - cpuTracker.EXPECT().Usage(vdr1ID, gomock.Any()).Return(0.1).Times(1) - u.Push(msg1) - require.EqualValues(1, u.nodeToUnprocessedMsgs[vdr1ID]) - require.EqualValues(1, u.Len()) - gotMsg1, ok := u.Pop() - require.True(ok) - require.Len(u.nodeToUnprocessedMsgs, 0) - require.EqualValues(0, u.Len()) - require.EqualValues(msg1, gotMsg1) - - cpuTracker.EXPECT().Usage(vdr1ID, gomock.Any()).Return(0.0).Times(1) - u.Push(msg1) - require.EqualValues(1, u.nodeToUnprocessedMsgs[vdr1ID]) - require.EqualValues(1, u.Len()) - gotMsg1, ok = u.Pop() - require.True(ok) - require.Len(u.nodeToUnprocessedMsgs, 0) - require.EqualValues(0, u.Len()) - require.EqualValues(msg1, gotMsg1) - - cpuTracker.EXPECT().Usage(vdr1ID, gomock.Any()).Return(1.0).Times(1) - u.Push(msg1) - require.EqualValues(1, u.nodeToUnprocessedMsgs[vdr1ID]) - require.EqualValues(1, u.Len()) - gotMsg1, ok = u.Pop() - require.True(ok) - require.Len(u.nodeToUnprocessedMsgs, 0) - require.EqualValues(0, u.Len()) - require.EqualValues(msg1, gotMsg1) - - cpuTracker.EXPECT().Usage(vdr1ID, gomock.Any()).Return(0.0).Times(1) - u.Push(msg1) - require.EqualValues(1, u.nodeToUnprocessedMsgs[vdr1ID]) - require.EqualValues(1, u.Len()) - gotMsg1, ok = u.Pop() - require.True(ok) - require.Len(u.nodeToUnprocessedMsgs, 0) - require.EqualValues(0, u.Len()) - require.EqualValues(msg1, gotMsg1) - - // Push msg1 from vdr1ID - u.Push(msg1) - require.EqualValues(1, u.nodeToUnprocessedMsgs[vdr1ID]) - require.EqualValues(1, u.Len()) - - msg2 := mc.InboundGet(ids.Empty, 0, 0, ids.Empty, vdr2ID) - - // Push msg2 from vdr2ID - u.Push(msg2) - require.EqualValues(2, u.Len()) - require.EqualValues(1, u.nodeToUnprocessedMsgs[vdr2ID]) - // Set vdr1's usage to 99% and vdr2's to .01 - cpuTracker.EXPECT().Usage(vdr1ID, gomock.Any()).Return(.99).Times(2) - cpuTracker.EXPECT().Usage(vdr2ID, gomock.Any()).Return(.01).Times(1) - // Pop should return msg2 first because vdr1 has exceeded it's portion of CPU time - gotMsg2, ok := u.Pop() - require.True(ok) - require.EqualValues(1, u.Len()) - require.EqualValues(msg2, gotMsg2) - gotMsg1, ok = u.Pop() - require.True(ok) - require.EqualValues(msg1, gotMsg1) - require.Len(u.nodeToUnprocessedMsgs, 0) - require.EqualValues(0, u.Len()) - - // u is now empty - // Non-validators should be able to put messages onto [u] - nonVdrNodeID1, nonVdrNodeID2 := ids.GenerateTestNodeID(), ids.GenerateTestNodeID() - msg3 := mc.InboundPullQuery(ids.Empty, 0, 0, ids.Empty, nonVdrNodeID1) - msg4 := mc.InboundPushQuery(ids.Empty, 0, 0, nil, nonVdrNodeID2) - u.Push(msg3) - u.Push(msg4) - u.Push(msg1) - require.EqualValues(3, u.Len()) - - // msg1 should get popped first because nonVdrNodeID1 and nonVdrNodeID2 - // exceeded their limit - cpuTracker.EXPECT().Usage(nonVdrNodeID1, gomock.Any()).Return(.34).Times(1) - cpuTracker.EXPECT().Usage(nonVdrNodeID2, gomock.Any()).Return(.34).Times(2) - cpuTracker.EXPECT().Usage(vdr1ID, gomock.Any()).Return(0.0).Times(1) - - // u.msgs is [msg3, msg4, msg1] - gotMsg1, ok = u.Pop() - require.True(ok) - require.EqualValues(msg1, gotMsg1) - // u.msgs is [msg3, msg4] - cpuTracker.EXPECT().Usage(nonVdrNodeID1, gomock.Any()).Return(.51).Times(2) - gotMsg4, ok := u.Pop() - require.True(ok) - require.EqualValues(msg4, gotMsg4) - // u.msgs is [msg3] - gotMsg3, ok := u.Pop() - require.True(ok) - require.EqualValues(msg3, gotMsg3) - require.EqualValues(0, u.Len()) - }) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + require := require.New(t) + cpuTracker := tracker.NewMockTracker(ctrl) + vdrs := validators.NewSet() + vdr1ID, vdr2ID := ids.GenerateTestNodeID(), ids.GenerateTestNodeID() + require.NoError(vdrs.Add(vdr1ID, nil, ids.Empty, 1)) + require.NoError(vdrs.Add(vdr2ID, nil, ids.Empty, 1)) + mIntf, err := NewMessageQueue(logging.NoLog{}, vdrs, cpuTracker, "", prometheus.NewRegistry(), message.SynchronousOps) + require.NoError(err) + u := mIntf.(*messageQueue) + currentTime := time.Now() + u.clock.Set(currentTime) + + msg1 := Message{ + InboundMessage: message.InboundPullQuery( + ids.Empty, + 0, + time.Second, + ids.GenerateTestID(), + vdr1ID, + engineType, + ), + EngineType: engineType, + } + + // Push then pop should work regardless of usage when there are no other + // messages on [u.msgs] + cpuTracker.EXPECT().Usage(vdr1ID, gomock.Any()).Return(0.1).Times(1) + u.Push(context.Background(), msg1) + require.EqualValues(1, u.nodeToUnprocessedMsgs[vdr1ID]) + require.EqualValues(1, u.Len()) + _, gotMsg1, ok := u.Pop() + require.True(ok) + require.Len(u.nodeToUnprocessedMsgs, 0) + require.EqualValues(0, u.Len()) + require.EqualValues(msg1, gotMsg1) + + cpuTracker.EXPECT().Usage(vdr1ID, gomock.Any()).Return(0.0).Times(1) + u.Push(context.Background(), msg1) + require.EqualValues(1, u.nodeToUnprocessedMsgs[vdr1ID]) + require.EqualValues(1, u.Len()) + _, gotMsg1, ok = u.Pop() + require.True(ok) + require.Len(u.nodeToUnprocessedMsgs, 0) + require.EqualValues(0, u.Len()) + require.EqualValues(msg1, gotMsg1) + + cpuTracker.EXPECT().Usage(vdr1ID, gomock.Any()).Return(1.0).Times(1) + u.Push(context.Background(), msg1) + require.EqualValues(1, u.nodeToUnprocessedMsgs[vdr1ID]) + require.EqualValues(1, u.Len()) + _, gotMsg1, ok = u.Pop() + require.True(ok) + require.Len(u.nodeToUnprocessedMsgs, 0) + require.EqualValues(0, u.Len()) + require.EqualValues(msg1, gotMsg1) + + cpuTracker.EXPECT().Usage(vdr1ID, gomock.Any()).Return(0.0).Times(1) + u.Push(context.Background(), msg1) + require.EqualValues(1, u.nodeToUnprocessedMsgs[vdr1ID]) + require.EqualValues(1, u.Len()) + _, gotMsg1, ok = u.Pop() + require.True(ok) + require.Len(u.nodeToUnprocessedMsgs, 0) + require.EqualValues(0, u.Len()) + require.EqualValues(msg1, gotMsg1) + + // Push msg1 from vdr1ID + u.Push(context.Background(), msg1) + require.EqualValues(1, u.nodeToUnprocessedMsgs[vdr1ID]) + require.EqualValues(1, u.Len()) + + msg2 := Message{ + InboundMessage: message.InboundPullQuery( + ids.Empty, + 0, + time.Second, + ids.GenerateTestID(), + vdr2ID, + engineType, + ), + EngineType: engineType, + } + + // Push msg2 from vdr2ID + u.Push(context.Background(), msg2) + require.EqualValues(2, u.Len()) + require.EqualValues(1, u.nodeToUnprocessedMsgs[vdr2ID]) + // Set vdr1's usage to 99% and vdr2's to .01 + cpuTracker.EXPECT().Usage(vdr1ID, gomock.Any()).Return(.99).Times(2) + cpuTracker.EXPECT().Usage(vdr2ID, gomock.Any()).Return(.01).Times(1) + // Pop should return msg2 first because vdr1 has exceeded it's portion of CPU time + _, gotMsg2, ok := u.Pop() + require.True(ok) + require.EqualValues(1, u.Len()) + require.EqualValues(msg2, gotMsg2) + _, gotMsg1, ok = u.Pop() + require.True(ok) + require.EqualValues(msg1, gotMsg1) + require.Len(u.nodeToUnprocessedMsgs, 0) + require.EqualValues(0, u.Len()) + + // u is now empty + // Non-validators should be able to put messages onto [u] + nonVdrNodeID1, nonVdrNodeID2 := ids.GenerateTestNodeID(), ids.GenerateTestNodeID() + msg3 := Message{ + InboundMessage: message.InboundPullQuery(ids.Empty, 0, 0, ids.Empty, nonVdrNodeID1, engineType), + EngineType: engineType, + } + msg4 := Message{ + InboundMessage: message.InboundPushQuery(ids.Empty, 0, 0, nil, nonVdrNodeID2, engineType), + EngineType: engineType, } + u.Push(context.Background(), msg3) + u.Push(context.Background(), msg4) + u.Push(context.Background(), msg1) + require.EqualValues(3, u.Len()) + + // msg1 should get popped first because nonVdrNodeID1 and nonVdrNodeID2 + // exceeded their limit + cpuTracker.EXPECT().Usage(nonVdrNodeID1, gomock.Any()).Return(.34).Times(1) + cpuTracker.EXPECT().Usage(nonVdrNodeID2, gomock.Any()).Return(.34).Times(2) + cpuTracker.EXPECT().Usage(vdr1ID, gomock.Any()).Return(0.0).Times(1) + + // u.msgs is [msg3, msg4, msg1] + _, gotMsg1, ok = u.Pop() + require.True(ok) + require.EqualValues(msg1, gotMsg1) + // u.msgs is [msg3, msg4] + cpuTracker.EXPECT().Usage(nonVdrNodeID1, gomock.Any()).Return(.51).Times(2) + _, gotMsg4, ok := u.Pop() + require.True(ok) + require.EqualValues(msg4, gotMsg4) + // u.msgs is [msg3] + _, gotMsg3, ok := u.Pop() + require.True(ok) + require.EqualValues(msg3, gotMsg3) + require.EqualValues(0, u.Len()) } diff --git a/avalanchego/snow/networking/handler/metrics.go b/avalanchego/snow/networking/handler/metrics.go index 853c0999..a8776b30 100644 --- a/avalanchego/snow/networking/handler/metrics.go +++ b/avalanchego/snow/networking/handler/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler @@ -16,7 +16,12 @@ import ( type metrics struct { expired prometheus.Counter asyncExpired prometheus.Counter - messages map[message.Op]metric.Averager + messages map[message.Op]*messageProcessing +} + +type messageProcessing struct { + processingTime metric.Averager + msgHandlingTime metric.Averager } func newMetrics(namespace string, reg prometheus.Registerer) (*metrics, error) { @@ -37,16 +42,26 @@ func newMetrics(namespace string, reg prometheus.Registerer) (*metrics, error) { reg.Register(asyncExpired), ) - messages := make(map[message.Op]metric.Averager, len(message.ConsensusOps)) + messages := make(map[message.Op]*messageProcessing, len(message.ConsensusOps)) for _, op := range message.ConsensusOps { opStr := op.String() - messages[op] = metric.NewAveragerWithErrs( - namespace, - opStr, - fmt.Sprintf("time (in ns) of processing a %s", opStr), - reg, - &errs, - ) + messageProcessing := &messageProcessing{ + processingTime: metric.NewAveragerWithErrs( + namespace, + opStr, + fmt.Sprintf("time (in ns) spent handling a %s", opStr), + reg, + &errs, + ), + msgHandlingTime: metric.NewAveragerWithErrs( + namespace, + fmt.Sprintf("%s_msg_handling", opStr), + fmt.Sprintf("time (in ns) spent handling a %s after grabbing the lock", opStr), + reg, + &errs, + ), + } + messages[op] = messageProcessing } return &metrics{ diff --git a/avalanchego/snow/networking/handler/mock_handler.go b/avalanchego/snow/networking/handler/mock_handler.go new file mode 100644 index 00000000..a94fd997 --- /dev/null +++ b/avalanchego/snow/networking/handler/mock_handler.go @@ -0,0 +1,210 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/snow/networking/handler (interfaces: Handler) + +// Package handler is a generated GoMock package. +package handler + +import ( + context "context" + reflect "reflect" + time "time" + + ids "github.com/ava-labs/avalanchego/ids" + snow "github.com/ava-labs/avalanchego/snow" + gomock "github.com/golang/mock/gomock" +) + +// MockHandler is a mock of Handler interface. +type MockHandler struct { + ctrl *gomock.Controller + recorder *MockHandlerMockRecorder +} + +// MockHandlerMockRecorder is the mock recorder for MockHandler. +type MockHandlerMockRecorder struct { + mock *MockHandler +} + +// NewMockHandler creates a new mock instance. +func NewMockHandler(ctrl *gomock.Controller) *MockHandler { + mock := &MockHandler{ctrl: ctrl} + mock.recorder = &MockHandlerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockHandler) EXPECT() *MockHandlerMockRecorder { + return m.recorder +} + +// Context mocks base method. +func (m *MockHandler) Context() *snow.ConsensusContext { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Context") + ret0, _ := ret[0].(*snow.ConsensusContext) + return ret0 +} + +// Context indicates an expected call of Context. +func (mr *MockHandlerMockRecorder) Context() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Context", reflect.TypeOf((*MockHandler)(nil).Context)) +} + +// GetEngineManager mocks base method. +func (m *MockHandler) GetEngineManager() *EngineManager { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEngineManager") + ret0, _ := ret[0].(*EngineManager) + return ret0 +} + +// GetEngineManager indicates an expected call of GetEngineManager. +func (mr *MockHandlerMockRecorder) GetEngineManager() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEngineManager", reflect.TypeOf((*MockHandler)(nil).GetEngineManager)) +} + +// HealthCheck mocks base method. +func (m *MockHandler) HealthCheck(arg0 context.Context) (interface{}, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HealthCheck", arg0) + ret0, _ := ret[0].(interface{}) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HealthCheck indicates an expected call of HealthCheck. +func (mr *MockHandlerMockRecorder) HealthCheck(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HealthCheck", reflect.TypeOf((*MockHandler)(nil).HealthCheck), arg0) +} + +// Len mocks base method. +func (m *MockHandler) Len() int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Len") + ret0, _ := ret[0].(int) + return ret0 +} + +// Len indicates an expected call of Len. +func (mr *MockHandlerMockRecorder) Len() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Len", reflect.TypeOf((*MockHandler)(nil).Len)) +} + +// Push mocks base method. +func (m *MockHandler) Push(arg0 context.Context, arg1 Message) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Push", arg0, arg1) +} + +// Push indicates an expected call of Push. +func (mr *MockHandlerMockRecorder) Push(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Push", reflect.TypeOf((*MockHandler)(nil).Push), arg0, arg1) +} + +// RegisterTimeout mocks base method. +func (m *MockHandler) RegisterTimeout(arg0 time.Duration) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RegisterTimeout", arg0) +} + +// RegisterTimeout indicates an expected call of RegisterTimeout. +func (mr *MockHandlerMockRecorder) RegisterTimeout(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterTimeout", reflect.TypeOf((*MockHandler)(nil).RegisterTimeout), arg0) +} + +// SetEngineManager mocks base method. +func (m *MockHandler) SetEngineManager(arg0 *EngineManager) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetEngineManager", arg0) +} + +// SetEngineManager indicates an expected call of SetEngineManager. +func (mr *MockHandlerMockRecorder) SetEngineManager(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetEngineManager", reflect.TypeOf((*MockHandler)(nil).SetEngineManager), arg0) +} + +// SetOnStopped mocks base method. +func (m *MockHandler) SetOnStopped(arg0 func()) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetOnStopped", arg0) +} + +// SetOnStopped indicates an expected call of SetOnStopped. +func (mr *MockHandlerMockRecorder) SetOnStopped(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetOnStopped", reflect.TypeOf((*MockHandler)(nil).SetOnStopped), arg0) +} + +// ShouldHandle mocks base method. +func (m *MockHandler) ShouldHandle(arg0 ids.NodeID) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ShouldHandle", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// ShouldHandle indicates an expected call of ShouldHandle. +func (mr *MockHandlerMockRecorder) ShouldHandle(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ShouldHandle", reflect.TypeOf((*MockHandler)(nil).ShouldHandle), arg0) +} + +// Start mocks base method. +func (m *MockHandler) Start(arg0 context.Context, arg1 bool) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Start", arg0, arg1) +} + +// Start indicates an expected call of Start. +func (mr *MockHandlerMockRecorder) Start(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockHandler)(nil).Start), arg0, arg1) +} + +// Stop mocks base method. +func (m *MockHandler) Stop(arg0 context.Context) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Stop", arg0) +} + +// Stop indicates an expected call of Stop. +func (mr *MockHandlerMockRecorder) Stop(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stop", reflect.TypeOf((*MockHandler)(nil).Stop), arg0) +} + +// StopWithError mocks base method. +func (m *MockHandler) StopWithError(arg0 context.Context, arg1 error) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "StopWithError", arg0, arg1) +} + +// StopWithError indicates an expected call of StopWithError. +func (mr *MockHandlerMockRecorder) StopWithError(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopWithError", reflect.TypeOf((*MockHandler)(nil).StopWithError), arg0, arg1) +} + +// Stopped mocks base method. +func (m *MockHandler) Stopped() chan struct{} { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Stopped") + ret0, _ := ret[0].(chan struct{}) + return ret0 +} + +// Stopped indicates an expected call of Stopped. +func (mr *MockHandlerMockRecorder) Stopped() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stopped", reflect.TypeOf((*MockHandler)(nil).Stopped)) +} diff --git a/avalanchego/snow/networking/handler/parser.go b/avalanchego/snow/networking/handler/parser.go index d55759f3..9349b073 100644 --- a/avalanchego/snow/networking/handler/parser.go +++ b/avalanchego/snow/networking/handler/parser.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package handler @@ -7,23 +7,14 @@ import ( "errors" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/utils/set" ) -var ( - errDuplicatedID = errors.New("inbound message contains duplicated ID") - errDuplicatedHeight = errors.New("inbound message contains duplicated height") -) - -func getIDs(field message.Field, msg message.InboundMessage) ([]ids.ID, error) { - idsBytesIntf, err := msg.Get(field) - if err != nil { - return nil, err - } - idsBytes := idsBytesIntf.([][]byte) +var errDuplicatedID = errors.New("inbound message contains duplicated ID") +func getIDs(idsBytes [][]byte) ([]ids.ID, error) { res := make([]ids.ID, len(idsBytes)) - idSet := ids.NewSet(len(idsBytes)) + idSet := set.NewSet[ids.ID](len(idsBytes)) for i, bytes := range idsBytes { id, err := ids.ToID(bytes) if err != nil { @@ -37,20 +28,3 @@ func getIDs(field message.Field, msg message.InboundMessage) ([]ids.ID, error) { } return res, nil } - -func getSummaryHeights(msg message.InboundMessage) ([]uint64, error) { - heightsIntf, err := msg.Get(message.SummaryHeights) - if err != nil { - return nil, err - } - heights := heightsIntf.([]uint64) - - heightsSet := make(map[uint64]struct{}, len(heights)) - for _, height := range heights { - if _, found := heightsSet[height]; found { - return nil, errDuplicatedHeight - } - heightsSet[height] = struct{}{} - } - return heights, nil -} diff --git a/avalanchego/snow/networking/router/chain_router.go b/avalanchego/snow/networking/router/chain_router.go index 172e8261..d8e7eced 100644 --- a/avalanchego/snow/networking/router/chain_router.go +++ b/avalanchego/snow/networking/router/chain_router.go @@ -1,10 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package router import ( - "encoding/binary" + "context" "errors" "fmt" "strings" @@ -17,23 +17,24 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow/networking/benchlist" "github.com/ava-labs/avalanchego/snow/networking/handler" "github.com/ava-labs/avalanchego/snow/networking/timeout" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/linkedhashmap" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" ) var ( - errUnknownChain = errors.New("received message for unknown chain") + errUnknownChain = errors.New("received message for unknown chain") + errUnallowedNode = errors.New("received message from non-allowed node") - _ Router = &ChainRouter{} - _ benchlist.Benchable = &ChainRouter{} + _ Router = (*ChainRouter)(nil) + _ benchlist.Benchable = (*ChainRouter)(nil) ) type requestEntry struct { @@ -41,23 +42,29 @@ type requestEntry struct { time time.Time // The type of request that was made op message.Op + // The engine type of the request that was made + engineType p2p.EngineType } type peer struct { - version *version.Application - trackedSubnets ids.Set + version *version.Application + // The subnets that this peer is currently tracking + trackedSubnets set.Set[ids.ID] + // The subnets that this peer actually has a connection to. + // This is a subset of trackedSubnets. + connectedSubnets set.Set[ids.ID] } // ChainRouter routes incoming messages from the validator network // to the consensus engines that the messages are intended for. // Note that consensus engines are uniquely identified by the ID of the chain // that they are working on. +// Invariant: P-chain must be registered before processing any messages type ChainRouter struct { - clock mockable.Clock - log logging.Logger - msgCreator message.InternalMsgBuilder - lock sync.Mutex - chains map[ids.ID]handler.Handler + clock mockable.Clock + log logging.Logger + lock sync.Mutex + chainHandlers map[ids.ID]handler.Handler // It is only safe to call [RegisterResponse] with the router lock held. Any // other calls to the timeout manager with the router lock held could cause @@ -65,20 +72,19 @@ type ChainRouter struct { timeoutManager timeout.Manager closeTimeout time.Duration + myNodeID ids.NodeID peers map[ids.NodeID]*peer // node ID --> chains that node is benched on // invariant: if a node is benched on any chain, it is treated as disconnected on all chains - benched map[ids.NodeID]ids.Set - criticalChains ids.Set + benched map[ids.NodeID]set.Set[ids.ID] + criticalChains set.Set[ids.ID] + stakingEnabled bool onFatal func(exitCode int) metrics *routerMetrics // Parameters for doing health checks healthConfig HealthConfig // aggregator of requests based on their time - timedRequests linkedhashmap.LinkedHashmap[ids.ID, requestEntry] - // Must only be accessed in method [createRequestID]. - // [lock] must be held when [requestIDBytes] is accessed. - requestIDBytes []byte + timedRequests linkedhashmap.LinkedHashmap[ids.RequestID, requestEntry] } // Initialize the router. @@ -89,34 +95,34 @@ type ChainRouter struct { func (cr *ChainRouter) Initialize( nodeID ids.NodeID, log logging.Logger, - msgCreator message.InternalMsgBuilder, timeoutManager timeout.Manager, closeTimeout time.Duration, - criticalChains ids.Set, - whitelistedSubnets ids.Set, + criticalChains set.Set[ids.ID], + stakingEnabled bool, + trackedSubnets set.Set[ids.ID], onFatal func(exitCode int), healthConfig HealthConfig, metricsNamespace string, metricsRegisterer prometheus.Registerer, ) error { cr.log = log - cr.msgCreator = msgCreator - cr.chains = make(map[ids.ID]handler.Handler) + cr.chainHandlers = make(map[ids.ID]handler.Handler) cr.timeoutManager = timeoutManager cr.closeTimeout = closeTimeout - cr.benched = make(map[ids.NodeID]ids.Set) + cr.benched = make(map[ids.NodeID]set.Set[ids.ID]) cr.criticalChains = criticalChains + cr.stakingEnabled = stakingEnabled cr.onFatal = onFatal - cr.timedRequests = linkedhashmap.New[ids.ID, requestEntry]() + cr.timedRequests = linkedhashmap.New[ids.RequestID, requestEntry]() cr.peers = make(map[ids.NodeID]*peer) cr.healthConfig = healthConfig - cr.requestIDBytes = make([]byte, hashing.AddrLen+hashing.HashLen+wrappers.IntLen+wrappers.ByteLen) // Validator ID, Chain ID, Request ID, Msg Type // Mark myself as connected + cr.myNodeID = nodeID myself := &peer{ version: version.CurrentApp, } - myself.trackedSubnets.Union(whitelistedSubnets) + myself.trackedSubnets.Union(trackedSubnets) myself.trackedSubnets.Add(constants.PrimaryNetworkID) cr.peers[nodeID] = myself @@ -129,72 +135,95 @@ func (cr *ChainRouter) Initialize( return nil } -// RegisterRequest marks that we should expect to receive a reply from the given -// validator regarding the given chain and the reply should have the given -// requestID. +// RegisterRequest marks that we should expect to receive a reply for a request +// issued by [requestingChainID] from the given node's [respondingChainID] and +// the reply should have the given requestID. +// // The type of message we expect is [op]. +// // Every registered request must be cleared either by receiving a valid reply // and passing it to the appropriate chain or by a timeout. // This method registers a timeout that calls such methods if we don't get a // reply in time. func (cr *ChainRouter) RegisterRequest( + ctx context.Context, nodeID ids.NodeID, - chainID ids.ID, + requestingChainID ids.ID, + respondingChainID ids.ID, requestID uint32, op message.Op, + timeoutMsg message.InboundMessage, + engineType p2p.EngineType, ) { cr.lock.Lock() // When we receive a response message type (Chits, Put, Accepted, etc.) // we validate that we actually sent the corresponding request. // Give this request a unique ID so we can do that validation. - uniqueRequestID := cr.createRequestID(nodeID, chainID, requestID, op) + // + // For cross-chain messages, the responding chain is the source of the + // response which is sent to the requester which is the destination, + // which is why we flip the two in request id generation. + uniqueRequestID := ids.RequestID{ + NodeID: nodeID, + SourceChainID: respondingChainID, + DestinationChainID: requestingChainID, + RequestID: requestID, + Op: byte(op), + } // Add to the set of unfulfilled requests cr.timedRequests.Put(uniqueRequestID, requestEntry{ - time: cr.clock.Time(), - op: op, + time: cr.clock.Time(), + op: op, + engineType: engineType, }) cr.metrics.outstandingRequests.Set(float64(cr.timedRequests.Len())) cr.lock.Unlock() - failedOp, exists := message.ResponseToFailedOps[op] - if !exists { - // This should never happen - cr.log.Error("failed to convert message operation", - zap.Stringer("messageOp", op), - ) - return - } + // Determine whether we should include the latency of this request in our + // measurements. + // - Don't measure messages from ourself since these don't go over the + // network. + // - Don't measure Puts because an adversary can cause us to issue a Get + // request to them and not respond, causing a timeout, skewing latency + // measurements. + shouldMeasureLatency := nodeID != cr.myNodeID && op != message.PutOp // Register a timeout to fire if we don't get a reply in time. - cr.timeoutManager.RegisterRequest(nodeID, chainID, op, uniqueRequestID, func() { - msg := cr.msgCreator.InternalFailedRequest(failedOp, nodeID, chainID, requestID) - cr.HandleInbound(msg) - }) + cr.timeoutManager.RegisterRequest( + nodeID, + respondingChainID, + shouldMeasureLatency, + uniqueRequestID, + func() { + cr.HandleInbound(ctx, timeoutMsg) + }, + ) } -func (cr *ChainRouter) HandleInbound(msg message.InboundMessage) { +func (cr *ChainRouter) HandleInbound(ctx context.Context, msg message.InboundMessage) { nodeID := msg.NodeID() op := msg.Op() - chainIDIntf, err := msg.Get(message.ChainID) + m := msg.Message() + destinationChainID, err := message.GetChainID(m) if err != nil { cr.log.Debug("dropping message with invalid field", zap.Stringer("nodeID", nodeID), zap.Stringer("messageOp", op), - zap.Stringer("field", message.ChainID), + zap.String("field", "ChainID"), zap.Error(err), ) msg.OnFinishedHandling() return } - chainIDBytes := chainIDIntf.([]byte) - chainID, err := ids.ToID(chainIDBytes) + + sourceChainID, err := message.GetSourceChainID(m) if err != nil { cr.log.Debug("dropping message with invalid field", zap.Stringer("nodeID", nodeID), zap.Stringer("messageOp", op), - zap.Stringer("field", message.ChainID), + zap.String("field", "SourceChainID"), zap.Error(err), ) @@ -202,71 +231,78 @@ func (cr *ChainRouter) HandleInbound(msg message.InboundMessage) { return } - // AppGossip is the only message currently not containing a requestID - // Here we assign the requestID already in use for gossiped containers - // to allow a uniform handling of all messages - var requestID uint32 - if op == message.AppGossip { - requestID = constants.GossipMsgRequestID - } else { - // Invariant: Getting a [RequestID] must never error in the handler. Any - // verification performed by the message is done here. - requestIDIntf, err := msg.Get(message.RequestID) - if err != nil { - cr.log.Debug("dropping message with invalid field", - zap.Stringer("nodeID", nodeID), - zap.Stringer("messageOp", op), - zap.Stringer("field", message.RequestID), - zap.Error(err), - ) + requestID, ok := message.GetRequestID(m) + if !ok { + cr.log.Debug("dropping message with invalid field", + zap.Stringer("nodeID", nodeID), + zap.Stringer("messageOp", op), + zap.String("field", "RequestID"), + ) - msg.OnFinishedHandling() - return - } - requestID = requestIDIntf.(uint32) + msg.OnFinishedHandling() + return } cr.lock.Lock() defer cr.lock.Unlock() // Get the chain, if it exists - chain, exists := cr.chains[chainID] - if !exists || !chain.IsValidator(nodeID) { + chain, exists := cr.chainHandlers[destinationChainID] + if !exists { cr.log.Debug("dropping message", zap.Stringer("messageOp", op), zap.Stringer("nodeID", nodeID), - zap.Stringer("chainID", chainID), + zap.Stringer("chainID", destinationChainID), zap.Error(errUnknownChain), ) + msg.OnFinishedHandling() + return + } + if !chain.ShouldHandle(nodeID) { + cr.log.Debug("dropping message", + zap.Stringer("messageOp", op), + zap.Stringer("nodeID", nodeID), + zap.Stringer("chainID", destinationChainID), + zap.Error(errUnallowedNode), + ) msg.OnFinishedHandling() return } - ctx := chain.Context() + chainCtx := chain.Context() // TODO: [requestID] can overflow, which means a timeout on the request // before the overflow may not be handled properly. - if _, notRequested := message.UnrequestedOps[op]; notRequested || - (op == message.Put && requestID == constants.GossipMsgRequestID) { - if ctx.IsExecuting() { + if notRequested := message.UnrequestedOps.Contains(op); notRequested || + (op == message.PutOp && requestID == constants.GossipMsgRequestID) { + if chainCtx.Executing.Get() { cr.log.Debug("dropping message and skipping queue", zap.String("reason", "the chain is currently executing"), zap.Stringer("messageOp", op), ) cr.metrics.droppedRequests.Inc() - msg.OnFinishedHandling() return } - chain.Push(msg) + + // Note: engineType is not guaranteed to be one of the explicitly named + // enum values. If it was not specified it defaults to UNSPECIFIED. + engineType, _ := message.GetEngineType(m) + chain.Push( + ctx, + handler.Message{ + InboundMessage: msg, + EngineType: engineType, + }, + ) return } if expectedResponse, isFailed := message.FailedToResponseOps[op]; isFailed { // Create the request ID of the request we sent that this message is in // response to. - uniqueRequestID, req := cr.clearRequest(expectedResponse, nodeID, chainID, requestID) + uniqueRequestID, req := cr.clearRequest(expectedResponse, nodeID, sourceChainID, destinationChainID, requestID) if req == nil { // This was a duplicated response. msg.OnFinishedHandling() @@ -277,22 +313,27 @@ func (cr *ChainRouter) HandleInbound(msg message.InboundMessage) { cr.timeoutManager.RemoveRequest(uniqueRequestID) // Pass the failure to the chain - chain.Push(msg) + chain.Push( + ctx, + handler.Message{ + InboundMessage: msg, + EngineType: req.engineType, + }, + ) return } - if ctx.IsExecuting() { + if chainCtx.Executing.Get() { cr.log.Debug("dropping message and skipping queue", zap.String("reason", "the chain is currently executing"), zap.Stringer("messageOp", op), ) cr.metrics.droppedRequests.Inc() - msg.OnFinishedHandling() return } - uniqueRequestID, req := cr.clearRequest(op, nodeID, chainID, requestID) + uniqueRequestID, req := cr.clearRequest(op, nodeID, sourceChainID, destinationChainID, requestID) if req == nil { // We didn't request this message. msg.OnFinishedHandling() @@ -303,22 +344,28 @@ func (cr *ChainRouter) HandleInbound(msg message.InboundMessage) { latency := cr.clock.Time().Sub(req.time) // Tell the timeout manager we got a response - cr.timeoutManager.RegisterResponse(nodeID, chainID, uniqueRequestID, req.op, latency) + cr.timeoutManager.RegisterResponse(nodeID, destinationChainID, uniqueRequestID, req.op, latency) // Pass the response to the chain - chain.Push(msg) + chain.Push( + ctx, + handler.Message{ + InboundMessage: msg, + EngineType: req.engineType, + }, + ) } // Shutdown shuts down this router -func (cr *ChainRouter) Shutdown() { +func (cr *ChainRouter) Shutdown(ctx context.Context) { cr.log.Info("shutting down chain router") cr.lock.Lock() - prevChains := cr.chains - cr.chains = map[ids.ID]handler.Handler{} + prevChains := cr.chainHandlers + cr.chainHandlers = map[ids.ID]handler.Handler{} cr.lock.Unlock() for _, chain := range prevChains { - chain.Stop() + chain.Stop(ctx) } ticker := time.NewTicker(cr.closeTimeout) @@ -336,7 +383,7 @@ func (cr *ChainRouter) Shutdown() { // AddChain registers the specified chain so that incoming // messages can be routed to it -func (cr *ChainRouter) AddChain(chain handler.Handler) { +func (cr *ChainRouter) AddChain(ctx context.Context, chain handler.Handler) { cr.lock.Lock() defer cr.lock.Unlock() @@ -345,18 +392,52 @@ func (cr *ChainRouter) AddChain(chain handler.Handler) { zap.Stringer("chainID", chainID), ) chain.SetOnStopped(func() { - cr.removeChain(chainID) + cr.removeChain(ctx, chainID) }) - cr.chains[chainID] = chain + cr.chainHandlers[chainID] = chain // Notify connected validators subnetID := chain.Context().SubnetID for validatorID, peer := range cr.peers { - // If this validator is benched on any chain, treat them as disconnected on all chains - if _, benched := cr.benched[validatorID]; !benched && peer.trackedSubnets.Contains(subnetID) { - msg := cr.msgCreator.InternalConnected(validatorID, peer.version) - chain.Push(msg) + // If this validator is benched on any chain, treat them as disconnected + // on all chains + _, benched := cr.benched[validatorID] + if benched { + continue } + + // If this peer isn't running this chain, then we shouldn't mark them as + // connected + if !peer.trackedSubnets.Contains(subnetID) && cr.stakingEnabled { + continue + } + + msg := message.InternalConnected(validatorID, peer.version) + chain.Push(ctx, + handler.Message{ + InboundMessage: msg, + EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + }, + ) + } + + // When we register the P-chain, we mark ourselves as connected on all of + // the subnets that we have tracked. + if chainID != constants.PlatformChainID { + return + } + + // If we have currently benched ourselves, we will mark ourselves as + // connected when we unbench. So skip connecting now. + // This is not "theoretically" possible, but keeping this here prevents us + // from keeping an invariant that we never bench ourselves. + if _, benched := cr.benched[cr.myNodeID]; benched { + return + } + + myself := cr.peers[cr.myNodeID] + for subnetID := range myself.trackedSubnets { + cr.connectedSubnet(myself, cr.myNodeID, subnetID) } } @@ -379,15 +460,32 @@ func (cr *ChainRouter) Connected(nodeID ids.NodeID, nodeVersion *version.Applica return } - msg := cr.msgCreator.InternalConnected(nodeID, nodeVersion) - - // TODO: fire up an event when validator state changes i.e when they leave set, disconnect. - // we cannot put a subnet-only validator check here since Disconnected would not be handled properly. - for _, chain := range cr.chains { - if subnetID == chain.Context().SubnetID { - chain.Push(msg) + msg := message.InternalConnected(nodeID, nodeVersion) + + // TODO: fire up an event when validator state changes i.e when they leave + // set, disconnect. we cannot put a subnet-only validator check here since + // Disconnected would not be handled properly. + // + // When staking is disabled, we only want this clause to happen once. + // Therefore, we only update the chains during the connection of the primary + // network, which is guaranteed to happen for every peer. + if cr.stakingEnabled || subnetID == constants.PrimaryNetworkID { + for _, chain := range cr.chainHandlers { + // If staking is disabled, send a Connected message to every chain + // when connecting to the primary network + if subnetID == chain.Context().SubnetID || !cr.stakingEnabled { + chain.Push( + context.TODO(), + handler.Message{ + InboundMessage: msg, + EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + }, + ) + } } } + + cr.connectedSubnet(connectedPeer, nodeID, subnetID) } // Disconnected routes an incoming notification that a validator was connected @@ -401,13 +499,20 @@ func (cr *ChainRouter) Disconnected(nodeID ids.NodeID) { return } - msg := cr.msgCreator.InternalDisconnected(nodeID) - - // TODO: fire up an event when validator state changes i.e when they leave set, disconnect. - // we cannot put a subnet-only validator check here since if a validator connects then it leaves validator-set, it would not be disconnected properly. - for _, chain := range cr.chains { - if peer.trackedSubnets.Contains(chain.Context().SubnetID) { - chain.Push(msg) + msg := message.InternalDisconnected(nodeID) + + // TODO: fire up an event when validator state changes i.e when they leave + // set, disconnect. we cannot put a subnet-only validator check here since + // if a validator connects then it leaves validator-set, it would not be + // disconnected properly. + for _, chain := range cr.chainHandlers { + if peer.trackedSubnets.Contains(chain.Context().SubnetID) || !cr.stakingEnabled { + chain.Push( + context.TODO(), + handler.Message{ + InboundMessage: msg, + EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + }) } } } @@ -426,13 +531,22 @@ func (cr *ChainRouter) Benched(chainID ids.ID, nodeID ids.NodeID) { return } - msg := cr.msgCreator.InternalDisconnected(nodeID) - - for _, chain := range cr.chains { - if peer.trackedSubnets.Contains(chain.Context().SubnetID) { - chain.Push(msg) + // This will disconnect the node from all subnets when issued to P-chain. + // Even if there is no chain in the subnet. + msg := message.InternalDisconnected(nodeID) + + for _, chain := range cr.chainHandlers { + if peer.trackedSubnets.Contains(chain.Context().SubnetID) || !cr.stakingEnabled { + chain.Push( + context.TODO(), + handler.Message{ + InboundMessage: msg, + EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + }) } } + + peer.connectedSubnets.Clear() } // Unbenched routes an incoming notification that a validator was just unbenched @@ -442,31 +556,43 @@ func (cr *ChainRouter) Unbenched(chainID ids.ID, nodeID ids.NodeID) { benchedChains := cr.benched[nodeID] benchedChains.Remove(chainID) - if benchedChains.Len() == 0 { - delete(cr.benched, nodeID) - } else { + if benchedChains.Len() != 0 { cr.benched[nodeID] = benchedChains return // This node is still benched } + delete(cr.benched, nodeID) + peer, found := cr.peers[nodeID] if !found { return } - msg := cr.msgCreator.InternalConnected(nodeID, peer.version) + msg := message.InternalConnected(nodeID, peer.version) - for _, chain := range cr.chains { - if peer.trackedSubnets.Contains(chain.Context().SubnetID) { - chain.Push(msg) + for _, chain := range cr.chainHandlers { + if peer.trackedSubnets.Contains(chain.Context().SubnetID) || !cr.stakingEnabled { + chain.Push( + context.TODO(), + handler.Message{ + InboundMessage: msg, + EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + }) } } + + // This will unbench the node from all its subnets. + // We handle this case separately because the node may have been benched on + // a subnet that has no chains. + for subnetID := range peer.trackedSubnets { + cr.connectedSubnet(peer, nodeID, subnetID) + } } // HealthCheck returns results of router health checks. Returns: // 1) Information about health check results // 2) An error if the health check reports unhealthy -func (cr *ChainRouter) HealthCheck() (interface{}, error) { +func (cr *ChainRouter) HealthCheck(context.Context) (interface{}, error) { cr.lock.Lock() defer cr.lock.Unlock() @@ -505,9 +631,9 @@ func (cr *ChainRouter) HealthCheck() (interface{}, error) { // RemoveChain removes the specified chain so that incoming // messages can't be routed to it -func (cr *ChainRouter) removeChain(chainID ids.ID) { +func (cr *ChainRouter) removeChain(ctx context.Context, chainID ids.ID) { cr.lock.Lock() - chain, exists := cr.chains[chainID] + chain, exists := cr.chainHandlers[chainID] if !exists { cr.log.Debug("can't remove unknown chain", zap.Stringer("chainID", chainID), @@ -515,10 +641,10 @@ func (cr *ChainRouter) removeChain(chainID ids.ID) { cr.lock.Unlock() return } - delete(cr.chains, chainID) + delete(cr.chainHandlers, chainID) cr.lock.Unlock() - chain.Stop() + chain.Stop(ctx) ticker := time.NewTicker(cr.closeTimeout) defer ticker.Stop() @@ -536,11 +662,18 @@ func (cr *ChainRouter) removeChain(chainID ids.ID) { func (cr *ChainRouter) clearRequest( op message.Op, nodeID ids.NodeID, - chainID ids.ID, + sourceChainID ids.ID, + destinationChainID ids.ID, requestID uint32, -) (ids.ID, *requestEntry) { +) (ids.RequestID, *requestEntry) { // Create the request ID of the request we sent that this message is (allegedly) in response to. - uniqueRequestID := cr.createRequestID(nodeID, chainID, requestID, op) + uniqueRequestID := ids.RequestID{ + NodeID: nodeID, + SourceChainID: sourceChainID, + DestinationChainID: destinationChainID, + RequestID: requestID, + Op: byte(op), + } // Mark that an outstanding request has been fulfilled request, exists := cr.timedRequests.Get(uniqueRequestID) if !exists { @@ -552,12 +685,45 @@ func (cr *ChainRouter) clearRequest( return uniqueRequestID, &request } -// Assumes [cr.lock] is held. -// Assumes [message.Op] is an alias of byte. -func (cr *ChainRouter) createRequestID(nodeID ids.NodeID, chainID ids.ID, requestID uint32, op message.Op) ids.ID { - copy(cr.requestIDBytes, nodeID[:]) - copy(cr.requestIDBytes[hashing.AddrLen:], chainID[:]) - binary.BigEndian.PutUint32(cr.requestIDBytes[hashing.AddrLen+hashing.HashLen:], requestID) - cr.requestIDBytes[hashing.AddrLen+hashing.HashLen+wrappers.IntLen] = byte(op) - return hashing.ComputeHash256Array(cr.requestIDBytes) +// connectedSubnet pushes an InternalSubnetConnected message with [nodeID] and +// [subnetID] to the P-chain. This should be called when a node is either first +// connecting to [subnetID] or when a node that was already connected is +// unbenched on [subnetID]. This is a noop if [subnetID] is the Primary Network +// or if the peer is already marked as connected to the subnet. +// Invariant: should be called after *message.Connected is pushed to the P-chain +// Invariant: should be called after the P-chain was provided in [AddChain] +func (cr *ChainRouter) connectedSubnet(peer *peer, nodeID ids.NodeID, subnetID ids.ID) { + // if connected to primary network, we can skip this + // because Connected has its own internal message + if subnetID == constants.PrimaryNetworkID { + return + } + + // peer already connected to this subnet + if peer.connectedSubnets.Contains(subnetID) { + return + } + + msg := message.InternalConnectedSubnet(nodeID, subnetID) + // We only push this message to the P-chain because it is the only chain + // that cares about the connectivity of all subnets. Others chains learn + // about the connectivity of their own subnet when they receive a + // *message.Connected. + platformChain, ok := cr.chainHandlers[constants.PlatformChainID] + if !ok { + cr.log.Error("trying to issue InternalConnectedSubnet message, but platform chain is not registered", + zap.Stringer("nodeID", nodeID), + zap.Stringer("subnetID", subnetID), + ) + return + } + platformChain.Push( + context.TODO(), + handler.Message{ + InboundMessage: msg, + EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + }, + ) + + peer.connectedSubnets.Add(subnetID) } diff --git a/avalanchego/snow/networking/router/chain_router_metrics.go b/avalanchego/snow/networking/router/chain_router_metrics.go index 487771df..cfcc9613 100644 --- a/avalanchego/snow/networking/router/chain_router_metrics.go +++ b/avalanchego/snow/networking/router/chain_router_metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package router diff --git a/avalanchego/snow/networking/router/chain_router_test.go b/avalanchego/snow/networking/router/chain_router_test.go index 124aa190..f235064b 100644 --- a/avalanchego/snow/networking/router/chain_router_test.go +++ b/avalanchego/snow/networking/router/chain_router_test.go @@ -1,19 +1,24 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package router import ( + "context" "sync" "testing" "time" + "github.com/golang/mock/gomock" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/networking/benchlist" @@ -21,16 +26,24 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/timeout" "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/subnets" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/version" ) +const ( + engineType = p2p.EngineType_ENGINE_TYPE_AVALANCHE + testThreadPoolSize = 2 +) + func TestShutdown(t *testing.T) { vdrs := validators.NewSet() - err := vdrs.AddWeight(ids.GenerateTestNodeID(), 1) + err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) require.NoError(t, err) benchlist := benchlist.NewNoBenchlist() tm, err := timeout.NewManager( @@ -49,24 +62,40 @@ func TestShutdown(t *testing.T) { go tm.Dispatch() chainRouter := ChainRouter{} - - mc := message.NewInternalBuilder() - err = chainRouter.Initialize(ids.EmptyNodeID, logging.NoLog{}, mc, tm, time.Second, ids.Set{}, ids.Set{}, nil, HealthConfig{}, "", prometheus.NewRegistry()) + err = chainRouter.Initialize( + ids.EmptyNodeID, + logging.NoLog{}, + tm, + time.Second, + set.Set[ids.ID]{}, + true, + set.Set[ids.ID]{}, + nil, + HealthConfig{}, + "", + prometheus.NewRegistry(), + ) require.NoError(t, err) shutdownCalled := make(chan struct{}, 1) ctx := snow.DefaultConsensusContextTest() - resourceTracker, err := tracker.NewResourceTracker(prometheus.NewRegistry(), resource.NoUsage, meter.ContinuousFactory{}, time.Second) + resourceTracker, err := tracker.NewResourceTracker( + prometheus.NewRegistry(), + resource.NoUsage, + meter.ContinuousFactory{}, + time.Second, + ) require.NoError(t, err) - handler, err := handler.New( - mc, + h, err := handler.New( ctx, vdrs, nil, - nil, time.Second, + testThreadPoolSize, resourceTracker, + validators.UnhandledSubnetConnector, + subnets.New(ctx.NodeID, subnets.Config{}), ) require.NoError(t, err) @@ -80,28 +109,57 @@ func TestShutdown(t *testing.T) { } bootstrapper.Default(true) bootstrapper.CantGossip = false - bootstrapper.ContextF = func() *snow.ConsensusContext { return ctx } - bootstrapper.ShutdownF = func() error { shutdownCalled <- struct{}{}; return nil } - bootstrapper.ConnectedF = func(nodeID ids.NodeID, nodeVersion *version.Application) error { return nil } - bootstrapper.HaltF = func() {} - handler.SetBootstrapper(bootstrapper) + bootstrapper.ContextF = func() *snow.ConsensusContext { + return ctx + } + bootstrapper.ShutdownF = func(context.Context) error { + shutdownCalled <- struct{}{} + return nil + } + bootstrapper.ConnectedF = func(context.Context, ids.NodeID, *version.Application) error { + return nil + } + bootstrapper.HaltF = func(context.Context) {} engine := &common.EngineTest{T: t} engine.Default(true) engine.CantGossip = false - engine.ContextF = func() *snow.ConsensusContext { return ctx } - engine.ShutdownF = func() error { shutdownCalled <- struct{}{}; return nil } - engine.ConnectedF = func(nodeID ids.NodeID, nodeVersion *version.Application) error { return nil } - engine.HaltF = func() {} - handler.SetConsensus(engine) - ctx.SetState(snow.NormalOp) // assumed bootstrap is done + engine.ContextF = func() *snow.ConsensusContext { + return ctx + } + engine.ShutdownF = func(context.Context) error { + shutdownCalled <- struct{}{} + return nil + } + engine.ConnectedF = func(context.Context, ids.NodeID, *version.Application) error { + return nil + } + engine.HaltF = func(context.Context) {} + h.SetEngineManager(&handler.EngineManager{ + Avalanche: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: bootstrapper, + Consensus: engine, + }, + Snowman: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: bootstrapper, + Consensus: engine, + }, + }) + ctx.State.Set(snow.EngineState{ + Type: engineType, + State: snow.NormalOp, // assumed bootstrapping is done + }) - chainRouter.AddChain(handler) + chainRouter.AddChain(context.Background(), h) - bootstrapper.StartF = func(startReqID uint32) error { return nil } - handler.Start(false) + bootstrapper.StartF = func(context.Context, uint32) error { + return nil + } + h.Start(context.Background(), false) - chainRouter.Shutdown() + chainRouter.Shutdown(context.Background()) ticker := time.NewTicker(250 * time.Millisecond) select { @@ -111,7 +169,7 @@ func TestShutdown(t *testing.T) { } select { - case <-handler.Stopped(): + case <-h.Stopped(): default: t.Fatal("handler shutdown but never closed its closing channel") } @@ -120,7 +178,7 @@ func TestShutdown(t *testing.T) { func TestShutdownTimesOut(t *testing.T) { nodeID := ids.EmptyNodeID vdrs := validators.NewSet() - err := vdrs.AddWeight(ids.GenerateTestNodeID(), 1) + err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) require.NoError(t, err) benchlist := benchlist.NewNoBenchlist() metrics := prometheus.NewRegistry() @@ -142,16 +200,14 @@ func TestShutdownTimesOut(t *testing.T) { chainRouter := ChainRouter{} - mc, err := message.NewCreator(metrics, "dummyNamespace", true, 10*time.Second) - require.NoError(t, err) - - err = chainRouter.Initialize(ids.EmptyNodeID, + err = chainRouter.Initialize( + ids.EmptyNodeID, logging.NoLog{}, - mc, tm, time.Millisecond, - ids.Set{}, - ids.Set{}, + set.Set[ids.ID]{}, + true, + set.Set[ids.ID]{}, nil, HealthConfig{}, "", @@ -160,16 +216,22 @@ func TestShutdownTimesOut(t *testing.T) { require.NoError(t, err) ctx := snow.DefaultConsensusContextTest() - resourceTracker, err := tracker.NewResourceTracker(prometheus.NewRegistry(), resource.NoUsage, meter.ContinuousFactory{}, time.Second) + resourceTracker, err := tracker.NewResourceTracker( + prometheus.NewRegistry(), + resource.NoUsage, + meter.ContinuousFactory{}, + time.Second, + ) require.NoError(t, err) - handler, err := handler.New( - mc, + h, err := handler.New( ctx, vdrs, nil, - nil, time.Second, + testThreadPoolSize, resourceTracker, + validators.UnhandledSubnetConnector, + subnets.New(ctx.NodeID, subnets.Config{}), ) require.NoError(t, err) @@ -184,40 +246,67 @@ func TestShutdownTimesOut(t *testing.T) { } bootstrapper.Default(true) bootstrapper.CantGossip = false - bootstrapper.ContextF = func() *snow.ConsensusContext { return ctx } - bootstrapper.ConnectedF = func(nodeID ids.NodeID, nodeVersion *version.Application) error { return nil } - bootstrapper.HaltF = func() {} - bootstrapper.AncestorsF = func(nodeID ids.NodeID, requestID uint32, containers [][]byte) error { + bootstrapper.ContextF = func() *snow.ConsensusContext { + return ctx + } + bootstrapper.ConnectedF = func(context.Context, ids.NodeID, *version.Application) error { + return nil + } + bootstrapper.HaltF = func(context.Context) {} + bootstrapper.PullQueryF = func(context.Context, ids.NodeID, uint32, ids.ID) error { // Ancestors blocks for two seconds time.Sleep(2 * time.Second) bootstrapFinished <- struct{}{} return nil } - handler.SetBootstrapper(bootstrapper) engine := &common.EngineTest{T: t} engine.Default(false) - engine.ContextF = func() *snow.ConsensusContext { return ctx } + engine.ContextF = func() *snow.ConsensusContext { + return ctx + } closed := new(int) - engine.ShutdownF = func() error { *closed++; return nil } - handler.SetConsensus(engine) - ctx.SetState(snow.NormalOp) // assumed bootstrapping is done + engine.ShutdownF = func(context.Context) error { + *closed++ + return nil + } + h.SetEngineManager(&handler.EngineManager{ + Avalanche: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: bootstrapper, + Consensus: engine, + }, + Snowman: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: bootstrapper, + Consensus: engine, + }, + }) + ctx.State.Set(snow.EngineState{ + Type: engineType, + State: snow.NormalOp, // assumed bootstrapping is done + }) - chainRouter.AddChain(handler) + chainRouter.AddChain(context.Background(), h) - bootstrapper.StartF = func(startReqID uint32) error { return nil } - handler.Start(false) + bootstrapper.StartF = func(context.Context, uint32) error { + return nil + } + h.Start(context.Background(), false) shutdownFinished := make(chan struct{}, 1) go func() { chainID := ids.ID{} - msg := mc.InboundAncestors(chainID, 1, nil, nodeID) - handler.Push(msg) + msg := handler.Message{ + InboundMessage: message.InboundPullQuery(chainID, 1, time.Hour, ids.GenerateTestID(), nodeID, engineType), + EngineType: engineType, + } + h.Push(context.Background(), msg) time.Sleep(50 * time.Millisecond) // Pause to ensure message gets processed - chainRouter.Shutdown() + chainRouter.Shutdown(context.Background()) shutdownFinished <- struct{}{} }() @@ -230,6 +319,7 @@ func TestShutdownTimesOut(t *testing.T) { // Ensure that a timeout fires if we don't get a response to a request func TestRouterTimeout(t *testing.T) { + require := require.New(t) // Create a timeout manager maxTimeout := 25 * time.Millisecond tm, err := timeout.NewManager( @@ -244,42 +334,63 @@ func TestRouterTimeout(t *testing.T) { "", prometheus.NewRegistry(), ) - require.NoError(t, err) + require.NoError(err) go tm.Dispatch() // Create a router chainRouter := ChainRouter{} - mc := message.NewInternalBuilder() - err = chainRouter.Initialize(ids.EmptyNodeID, logging.NoLog{}, mc, tm, time.Millisecond, ids.Set{}, ids.Set{}, nil, HealthConfig{}, "", prometheus.NewRegistry()) - require.NoError(t, err) + err = chainRouter.Initialize( + ids.EmptyNodeID, + logging.NoLog{}, + tm, + time.Millisecond, + set.Set[ids.ID]{}, + true, + set.Set[ids.ID]{}, + nil, + HealthConfig{}, + "", + prometheus.NewRegistry(), + ) + require.NoError(err) // Create bootstrapper, engine and handler var ( - calledGetFailed, calledGetAncestorsFailed, - calledQueryFailed, calledQueryFailed2, - calledGetAcceptedFailed, calledGetAcceptedFrontierFailed bool + calledGetStateSummaryFrontierFailed, calledGetAcceptedStateSummaryFailed, + calledGetAcceptedFrontierFailed, calledGetAcceptedFailed, + calledGetAncestorsFailed, + calledGetFailed, calledQueryFailed, + calledAppRequestFailed, + calledCrossChainAppRequestFailed bool wg = sync.WaitGroup{} ) ctx := snow.DefaultConsensusContextTest() vdrs := validators.NewSet() - err = vdrs.AddWeight(ids.GenerateTestNodeID(), 1) - require.NoError(t, err) + err = vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) + require.NoError(err) - resourceTracker, err := tracker.NewResourceTracker(prometheus.NewRegistry(), resource.NoUsage, meter.ContinuousFactory{}, time.Second) - require.NoError(t, err) - handler, err := handler.New( - mc, + resourceTracker, err := tracker.NewResourceTracker( + prometheus.NewRegistry(), + resource.NoUsage, + meter.ContinuousFactory{}, + time.Second, + ) + require.NoError(err) + + h, err := handler.New( ctx, vdrs, nil, - nil, time.Second, + testThreadPoolSize, resourceTracker, + validators.UnhandledSubnetConnector, + subnets.New(ctx.NodeID, subnets.Config{}), ) - require.NoError(t, err) + require.NoError(err) bootstrapper := &common.BootstrapperTest{ BootstrapableTest: common.BootstrapableTest{ @@ -291,66 +402,283 @@ func TestRouterTimeout(t *testing.T) { } bootstrapper.Default(true) bootstrapper.CantGossip = false - bootstrapper.ContextF = func() *snow.ConsensusContext { return ctx } - bootstrapper.ConnectedF = func(nodeID ids.NodeID, nodeVersion *version.Application) error { return nil } - bootstrapper.HaltF = func() {} - bootstrapper.GetFailedF = func(nodeID ids.NodeID, requestID uint32) error { wg.Done(); calledGetFailed = true; return nil } - bootstrapper.GetAncestorsFailedF = func(nodeID ids.NodeID, requestID uint32) error { + bootstrapper.ContextF = func() *snow.ConsensusContext { + return ctx + } + bootstrapper.ConnectedF = func(context.Context, ids.NodeID, *version.Application) error { + return nil + } + bootstrapper.HaltF = func(context.Context) {} + + bootstrapper.GetStateSummaryFrontierFailedF = func(context.Context, ids.NodeID, uint32) error { defer wg.Done() - calledGetAncestorsFailed = true + calledGetStateSummaryFrontierFailed = true return nil } - bootstrapper.QueryFailedF = func(nodeID ids.NodeID, requestID uint32) error { + bootstrapper.GetAcceptedStateSummaryFailedF = func(context.Context, ids.NodeID, uint32) error { defer wg.Done() - if !calledQueryFailed { - calledQueryFailed = true - return nil - } - calledQueryFailed2 = true + calledGetAcceptedStateSummaryFailed = true + return nil + } + bootstrapper.GetAcceptedFrontierFailedF = func(context.Context, ids.NodeID, uint32) error { + defer wg.Done() + calledGetAcceptedFrontierFailed = true return nil } - bootstrapper.GetAcceptedFailedF = func(nodeID ids.NodeID, requestID uint32) error { + bootstrapper.GetAncestorsFailedF = func(context.Context, ids.NodeID, uint32) error { + defer wg.Done() + calledGetAncestorsFailed = true + return nil + } + bootstrapper.GetAcceptedFailedF = func(context.Context, ids.NodeID, uint32) error { defer wg.Done() calledGetAcceptedFailed = true return nil } - bootstrapper.GetAcceptedFrontierFailedF = func(nodeID ids.NodeID, requestID uint32) error { + bootstrapper.GetFailedF = func(context.Context, ids.NodeID, uint32) error { defer wg.Done() - calledGetAcceptedFrontierFailed = true + calledGetFailed = true return nil } - handler.SetBootstrapper(bootstrapper) - ctx.SetState(snow.Bootstrapping) // assumed bootstrapping is ongoing + bootstrapper.QueryFailedF = func(context.Context, ids.NodeID, uint32) error { + defer wg.Done() + calledQueryFailed = true + return nil + } + bootstrapper.AppRequestFailedF = func(context.Context, ids.NodeID, uint32) error { + defer wg.Done() + calledAppRequestFailed = true + return nil + } + bootstrapper.CrossChainAppRequestFailedF = func(context.Context, ids.ID, uint32) error { + defer wg.Done() + calledCrossChainAppRequestFailed = true + return nil + } + ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.Bootstrapping, // assumed bootstrapping is ongoing + }) + + chainRouter.AddChain(context.Background(), h) + + bootstrapper.StartF = func(context.Context, uint32) error { + return nil + } + h.SetEngineManager(&handler.EngineManager{ + Avalanche: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: bootstrapper, + Consensus: nil, + }, + Snowman: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: bootstrapper, + Consensus: nil, + }, + }) + h.Start(context.Background(), false) + + nodeID := ids.GenerateTestNodeID() + requestID := uint32(0) + { + wg.Add(1) + chainRouter.RegisterRequest( + context.Background(), + nodeID, + ctx.ChainID, + ctx.ChainID, + requestID, + message.StateSummaryFrontierOp, + message.InternalGetStateSummaryFrontierFailed( + nodeID, + ctx.ChainID, + requestID, + ), + p2p.EngineType_ENGINE_TYPE_SNOWMAN, + ) + } - chainRouter.AddChain(handler) + { + wg.Add(1) + requestID++ + chainRouter.RegisterRequest( + context.Background(), + nodeID, + ctx.ChainID, + ctx.ChainID, + requestID, + message.AcceptedStateSummaryOp, + message.InternalGetAcceptedStateSummaryFailed( + nodeID, + ctx.ChainID, + requestID, + ), + p2p.EngineType_ENGINE_TYPE_SNOWMAN, + ) + } - bootstrapper.StartF = func(startReqID uint32) error { return nil } - handler.Start(false) + { + wg.Add(1) + requestID++ + chainRouter.RegisterRequest( + context.Background(), + nodeID, + ctx.ChainID, + ctx.ChainID, + requestID, + message.AcceptedFrontierOp, + message.InternalGetAcceptedFrontierFailed( + nodeID, + ctx.ChainID, + requestID, + p2p.EngineType_ENGINE_TYPE_SNOWMAN, + ), + p2p.EngineType_ENGINE_TYPE_SNOWMAN, + ) + } - // Register requests for each request type - msgs := []message.Op{ - message.Put, - message.Ancestors, - message.Chits, - message.Chits, - message.Accepted, - message.AcceptedFrontier, + { + wg.Add(1) + requestID++ + chainRouter.RegisterRequest( + context.Background(), + nodeID, + ctx.ChainID, + ctx.ChainID, + requestID, + message.AcceptedOp, + message.InternalGetAcceptedFailed( + nodeID, + ctx.ChainID, + requestID, + p2p.EngineType_ENGINE_TYPE_SNOWMAN, + ), + p2p.EngineType_ENGINE_TYPE_SNOWMAN, + ) } - wg.Add(len(msgs)) + { + wg.Add(1) + requestID++ + chainRouter.RegisterRequest( + context.Background(), + nodeID, + ctx.ChainID, + ctx.ChainID, + requestID, + message.AncestorsOp, + message.InternalGetAncestorsFailed( + nodeID, + ctx.ChainID, + requestID, + p2p.EngineType_ENGINE_TYPE_SNOWMAN, + ), + p2p.EngineType_ENGINE_TYPE_SNOWMAN, + ) + } - for i, msg := range msgs { - chainRouter.RegisterRequest(ids.GenerateTestNodeID(), ctx.ChainID, uint32(i), msg) + { + wg.Add(1) + requestID++ + chainRouter.RegisterRequest( + context.Background(), + nodeID, + ctx.ChainID, + ctx.ChainID, + requestID, + message.PutOp, + message.InternalGetFailed( + nodeID, + ctx.ChainID, + requestID, + p2p.EngineType_ENGINE_TYPE_SNOWMAN, + ), + p2p.EngineType_ENGINE_TYPE_SNOWMAN, + ) + } + + { + wg.Add(1) + requestID++ + chainRouter.RegisterRequest( + context.Background(), + nodeID, + ctx.ChainID, + ctx.ChainID, + requestID, + message.ChitsOp, + message.InternalQueryFailed( + nodeID, + ctx.ChainID, + requestID, + p2p.EngineType_ENGINE_TYPE_SNOWMAN, + ), + p2p.EngineType_ENGINE_TYPE_SNOWMAN, + ) + } + + { + wg.Add(1) + requestID++ + chainRouter.RegisterRequest( + context.Background(), + nodeID, + ctx.ChainID, + ctx.ChainID, + requestID, + message.AppResponseOp, + message.InternalAppRequestFailed( + nodeID, + ctx.ChainID, + requestID, + ), + p2p.EngineType_ENGINE_TYPE_SNOWMAN, + ) + } + + { + wg.Add(1) + requestID++ + chainRouter.RegisterRequest( + context.Background(), + nodeID, + ctx.ChainID, + ctx.ChainID, + requestID, + message.CrossChainAppResponseOp, + message.InternalCrossChainAppRequestFailed( + nodeID, + ctx.ChainID, + ctx.ChainID, + requestID, + ), + p2p.EngineType_ENGINE_TYPE_SNOWMAN, + ) } wg.Wait() chainRouter.lock.Lock() defer chainRouter.lock.Unlock() - require.True(t, calledGetFailed && calledGetAncestorsFailed && calledQueryFailed2 && calledGetAcceptedFailed && calledGetAcceptedFrontierFailed) + + require.True(calledGetStateSummaryFrontierFailed) + require.True(calledGetAcceptedStateSummaryFailed) + require.True(calledGetAcceptedFrontierFailed) + require.True(calledGetAcceptedFailed) + require.True(calledGetAncestorsFailed) + require.True(calledGetFailed) + require.True(calledQueryFailed) + require.True(calledAppRequestFailed) + require.True(calledCrossChainAppRequestFailed) } -func TestRouterClearTimeouts(t *testing.T) { +func TestRouterHonorsRequestedEngine(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + require := require.New(t) + // Create a timeout manager tm, err := timeout.NewManager( &timer.AdaptiveTimeoutConfig{ @@ -364,35 +692,172 @@ func TestRouterClearTimeouts(t *testing.T) { "", prometheus.NewRegistry(), ) - require.NoError(t, err) + require.NoError(err) go tm.Dispatch() // Create a router chainRouter := ChainRouter{} + err = chainRouter.Initialize( + ids.EmptyNodeID, + logging.NoLog{}, + tm, + time.Millisecond, + set.Set[ids.ID]{}, + true, + set.Set[ids.ID]{}, + nil, + HealthConfig{}, + "", + prometheus.NewRegistry(), + ) + require.NoError(err) - metrics := prometheus.NewRegistry() - mc, err := message.NewCreator(metrics, "dummyNamespace", true, 10*time.Second) + h := handler.NewMockHandler(ctrl) + + ctx := snow.DefaultConsensusContextTest() + h.EXPECT().Context().Return(ctx).AnyTimes() + h.EXPECT().SetOnStopped(gomock.Any()).AnyTimes() + + h.EXPECT().Push(gomock.Any(), gomock.Any()).Times(1) + chainRouter.AddChain(context.Background(), h) + + h.EXPECT().ShouldHandle(gomock.Any()).Return(true).AnyTimes() + + nodeID := ids.GenerateTestNodeID() + requestID := uint32(0) + { + chainRouter.RegisterRequest( + context.Background(), + nodeID, + ctx.ChainID, + ctx.ChainID, + requestID, + message.StateSummaryFrontierOp, + message.InternalGetStateSummaryFrontierFailed( + nodeID, + ctx.ChainID, + requestID, + ), + p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + ) + msg := message.InboundStateSummaryFrontier( + ctx.ChainID, + requestID, + nil, + nodeID, + ) + + h.EXPECT().Push(gomock.Any(), gomock.Any()).Do(func(_ context.Context, msg handler.Message) { + require.Equal(p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, msg.EngineType) + }) + chainRouter.HandleInbound(context.Background(), msg) + } + + { + requestID++ + chainRouter.RegisterRequest( + context.Background(), + nodeID, + ctx.ChainID, + ctx.ChainID, + requestID, + message.AcceptedStateSummaryOp, + message.InternalGetAcceptedStateSummaryFailed( + nodeID, + ctx.ChainID, + requestID, + ), + engineType, + ) + msg := message.InboundAcceptedStateSummary( + ctx.ChainID, + requestID, + nil, + nodeID, + ) + + h.EXPECT().Push(gomock.Any(), gomock.Any()).Do(func(_ context.Context, msg handler.Message) { + require.Equal(engineType, msg.EngineType) + }) + chainRouter.HandleInbound(context.Background(), msg) + } + + { + requestID++ + msg := message.InboundPushQuery( + ctx.ChainID, + requestID, + 0, + nil, + nodeID, + 100, + ) + + h.EXPECT().Push(gomock.Any(), gomock.Any()).Do(func(_ context.Context, msg handler.Message) { + require.EqualValues(100, msg.EngineType) + }) + chainRouter.HandleInbound(context.Background(), msg) + } + + require.Equal(0, chainRouter.timedRequests.Len()) +} + +func TestRouterClearTimeouts(t *testing.T) { + // Create a timeout manager + tm, err := timeout.NewManager( + &timer.AdaptiveTimeoutConfig{ + InitialTimeout: 3 * time.Second, + MinimumTimeout: 3 * time.Second, + MaximumTimeout: 5 * time.Minute, + TimeoutCoefficient: 1, + TimeoutHalflife: 5 * time.Minute, + }, + benchlist.NewNoBenchlist(), + "", + prometheus.NewRegistry(), + ) require.NoError(t, err) + go tm.Dispatch() - err = chainRouter.Initialize(ids.EmptyNodeID, logging.NoLog{}, mc, tm, time.Millisecond, ids.Set{}, ids.Set{}, nil, HealthConfig{}, "", prometheus.NewRegistry()) + // Create a router + chainRouter := ChainRouter{} + err = chainRouter.Initialize( + ids.EmptyNodeID, + logging.NoLog{}, + tm, + time.Millisecond, + set.Set[ids.ID]{}, + true, + set.Set[ids.ID]{}, + nil, + HealthConfig{}, + "", + prometheus.NewRegistry(), + ) require.NoError(t, err) // Create bootstrapper, engine and handler ctx := snow.DefaultConsensusContextTest() vdrs := validators.NewSet() - err = vdrs.AddWeight(ids.GenerateTestNodeID(), 1) + err = vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) require.NoError(t, err) - resourceTracker, err := tracker.NewResourceTracker(prometheus.NewRegistry(), resource.NoUsage, meter.ContinuousFactory{}, time.Second) + resourceTracker, err := tracker.NewResourceTracker( + prometheus.NewRegistry(), + resource.NoUsage, + meter.ContinuousFactory{}, + time.Second, + ) require.NoError(t, err) - handler, err := handler.New( - mc, + h, err := handler.New( ctx, vdrs, nil, - nil, time.Second, + testThreadPoolSize, resourceTracker, + validators.UnhandledSubnetConnector, + subnets.New(ctx.NodeID, subnets.Config{}), ) require.NoError(t, err) @@ -405,59 +870,222 @@ func TestRouterClearTimeouts(t *testing.T) { }, } bootstrapper.Default(false) - bootstrapper.ContextF = func() *snow.ConsensusContext { return ctx } - handler.SetBootstrapper(bootstrapper) + bootstrapper.ContextF = func() *snow.ConsensusContext { + return ctx + } engine := &common.EngineTest{T: t} engine.Default(false) - engine.ContextF = func() *snow.ConsensusContext { return ctx } - handler.SetConsensus(engine) - ctx.SetState(snow.NormalOp) // assumed bootstrapping is done - - chainRouter.AddChain(handler) + engine.ContextF = func() *snow.ConsensusContext { + return ctx + } + h.SetEngineManager(&handler.EngineManager{ + Avalanche: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: bootstrapper, + Consensus: engine, + }, + Snowman: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: bootstrapper, + Consensus: engine, + }, + }) + ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.NormalOp, // assumed bootstrapping is done + }) - bootstrapper.StartF = func(startReqID uint32) error { return nil } - handler.Start(false) + chainRouter.AddChain(context.Background(), h) - // Register requests for each request type - ops := []message.Op{ - message.Put, - message.Ancestors, - message.Chits, - message.Accepted, - message.AcceptedFrontier, + bootstrapper.StartF = func(context.Context, uint32) error { + return nil } - - vID := ids.GenerateTestNodeID() - for i, op := range ops { - chainRouter.RegisterRequest(vID, ctx.ChainID, uint32(i), op) + h.Start(context.Background(), false) + + nodeID := ids.GenerateTestNodeID() + requestID := uint32(0) + { + chainRouter.RegisterRequest( + context.Background(), + nodeID, + ctx.ChainID, + ctx.ChainID, + requestID, + message.StateSummaryFrontierOp, + message.InternalGetStateSummaryFrontierFailed( + nodeID, + ctx.ChainID, + requestID, + ), + engineType, + ) + msg := message.InboundStateSummaryFrontier( + ctx.ChainID, + requestID, + nil, + nodeID, + ) + chainRouter.HandleInbound(context.Background(), msg) } - // Clear each timeout by simulating responses to the queries - // Note: Depends on the ordering of [msgs] - var inMsg message.InboundMessage + { + requestID++ + chainRouter.RegisterRequest( + context.Background(), + nodeID, + ctx.ChainID, + ctx.ChainID, + requestID, + message.AcceptedStateSummaryOp, + message.InternalGetAcceptedStateSummaryFailed( + nodeID, + ctx.ChainID, + requestID, + ), + engineType, + ) + msg := message.InboundAcceptedStateSummary( + ctx.ChainID, + requestID, + nil, + nodeID, + ) + chainRouter.HandleInbound(context.Background(), msg) + } - // Put - inMsg = mc.InboundPut(ctx.ChainID, 0, nil, vID) - chainRouter.HandleInbound(inMsg) + { + requestID++ + chainRouter.RegisterRequest( + context.Background(), + nodeID, + ctx.ChainID, + ctx.ChainID, + requestID, + message.AcceptedFrontierOp, + message.InternalGetAcceptedFrontierFailed( + nodeID, + ctx.ChainID, + requestID, + engineType, + ), + engineType, + ) + msg := message.InboundAcceptedFrontier( + ctx.ChainID, + requestID, + nil, + nodeID, + ) + chainRouter.HandleInbound(context.Background(), msg) + } - // Ancestors - inMsg = mc.InboundAncestors(ctx.ChainID, 1, nil, vID) - chainRouter.HandleInbound(inMsg) + { + requestID++ + chainRouter.RegisterRequest( + context.Background(), + nodeID, + ctx.ChainID, + ctx.ChainID, + requestID, + message.AcceptedOp, + message.InternalGetAcceptedFailed( + nodeID, + ctx.ChainID, + requestID, + engineType, + ), + engineType, + ) + msg := message.InboundAccepted( + ctx.ChainID, + requestID, + nil, + nodeID, + ) + chainRouter.HandleInbound(context.Background(), msg) + } - // Chits - inMsg = mc.InboundChits(ctx.ChainID, 2, nil, vID) - chainRouter.HandleInbound(inMsg) + { + requestID++ + chainRouter.RegisterRequest( + context.Background(), + nodeID, + ctx.ChainID, + ctx.ChainID, + requestID, + message.ChitsOp, + message.InternalQueryFailed( + nodeID, + ctx.ChainID, + requestID, + engineType, + ), + engineType, + ) + msg := message.InboundChits( + ctx.ChainID, + requestID, + nil, + nil, + nodeID, + ) + chainRouter.HandleInbound(context.Background(), msg) + } - // Accepted - inMsg = mc.InboundAccepted(ctx.ChainID, 3, nil, vID) - chainRouter.HandleInbound(inMsg) + { + requestID++ + chainRouter.RegisterRequest( + context.Background(), + nodeID, + ctx.ChainID, + ctx.ChainID, + requestID, + message.AppResponseOp, + message.InternalAppRequestFailed( + nodeID, + ctx.ChainID, + requestID, + ), + engineType, + ) + msg := message.InboundAppResponse( + ctx.ChainID, + requestID, + nil, + nodeID, + ) + chainRouter.HandleInbound(context.Background(), msg) + } - // Accepted Frontier - inMsg = mc.InboundAcceptedFrontier(ctx.ChainID, 4, nil, vID) - chainRouter.HandleInbound(inMsg) + { + requestID++ + chainRouter.RegisterRequest( + context.Background(), + nodeID, + ctx.ChainID, + ctx.ChainID, + requestID, + message.CrossChainAppResponseOp, + message.InternalCrossChainAppRequestFailed( + nodeID, + ctx.ChainID, + ctx.ChainID, + requestID, + ), + p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + ) + msg := message.InternalCrossChainAppResponse( + nodeID, + ctx.ChainID, + ctx.ChainID, + requestID, + nil, + ) + chainRouter.HandleInbound(context.Background(), msg) + } - require.Equal(t, chainRouter.timedRequests.Len(), 0) + require.Equal(t, 0, chainRouter.timedRequests.Len()) } func TestValidatorOnlyMessageDrops(t *testing.T) { @@ -480,12 +1108,19 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { // Create a router chainRouter := ChainRouter{} - - metrics := prometheus.NewRegistry() - mc, err := message.NewCreator(metrics, "dummyNamespace", true, 10*time.Second) - require.NoError(t, err) - - err = chainRouter.Initialize(ids.EmptyNodeID, logging.NoLog{}, mc, tm, time.Millisecond, ids.Set{}, ids.Set{}, nil, HealthConfig{}, "", prometheus.NewRegistry()) + err = chainRouter.Initialize( + ids.EmptyNodeID, + logging.NoLog{}, + tm, + time.Millisecond, + set.Set[ids.ID]{}, + true, + set.Set[ids.ID]{}, + nil, + HealthConfig{}, + "", + prometheus.NewRegistry(), + ) require.NoError(t, err) // Create bootstrapper, engine and handler @@ -493,21 +1128,27 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { wg := sync.WaitGroup{} ctx := snow.DefaultConsensusContextTest() - ctx.SetValidatorOnly() + sb := subnets.New(ctx.NodeID, subnets.Config{ValidatorOnly: true}) vdrs := validators.NewSet() vID := ids.GenerateTestNodeID() - err = vdrs.AddWeight(vID, 1) + err = vdrs.Add(vID, nil, ids.Empty, 1) require.NoError(t, err) - resourceTracker, err := tracker.NewResourceTracker(prometheus.NewRegistry(), resource.NoUsage, meter.ContinuousFactory{}, time.Second) + resourceTracker, err := tracker.NewResourceTracker( + prometheus.NewRegistry(), + resource.NoUsage, + meter.ContinuousFactory{}, + time.Second, + ) require.NoError(t, err) - handler, err := handler.New( - mc, + h, err := handler.New( ctx, vdrs, nil, - nil, time.Second, + testThreadPoolSize, resourceTracker, + validators.UnhandledSubnetConnector, + sb, ) require.NoError(t, err) @@ -520,24 +1161,43 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { }, } bootstrapper.Default(false) - bootstrapper.ContextF = func() *snow.ConsensusContext { return ctx } - bootstrapper.PullQueryF = func(nodeID ids.NodeID, requestID uint32, containerID ids.ID) error { + bootstrapper.ContextF = func() *snow.ConsensusContext { + return ctx + } + bootstrapper.PullQueryF = func(context.Context, ids.NodeID, uint32, ids.ID) error { defer wg.Done() calledF = true return nil } - handler.SetBootstrapper(bootstrapper) - ctx.SetState(snow.Bootstrapping) // assumed bootstrapping is ongoing + ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.Bootstrapping, // assumed bootstrapping is ongoing + }) engine := &common.EngineTest{T: t} - engine.ContextF = func() *snow.ConsensusContext { return ctx } + engine.ContextF = func() *snow.ConsensusContext { + return ctx + } engine.Default(false) - handler.SetConsensus(engine) + h.SetEngineManager(&handler.EngineManager{ + Avalanche: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: bootstrapper, + Consensus: engine, + }, + Snowman: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: bootstrapper, + Consensus: engine, + }, + }) - chainRouter.AddChain(handler) + chainRouter.AddChain(context.Background(), h) - bootstrapper.StartF = func(startReqID uint32) error { return nil } - handler.Start(false) + bootstrapper.StartF = func(context.Context, uint32) error { + return nil + } + h.Start(context.Background(), false) var inMsg message.InboundMessage dummyContainerID := ids.GenerateTestID() @@ -547,38 +1207,460 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { nID := ids.GenerateTestNodeID() calledF = false - inMsg = mc.InboundPullQuery(ctx.ChainID, reqID, time.Hour, dummyContainerID, + inMsg = message.InboundPullQuery( + ctx.ChainID, + reqID, + time.Hour, + dummyContainerID, nID, + p2p.EngineType_ENGINE_TYPE_SNOWMAN, ) - chainRouter.HandleInbound(inMsg) + chainRouter.HandleInbound(context.Background(), inMsg) require.False(t, calledF) // should not be called // Validator case calledF = false reqID++ - inMsg = mc.InboundPullQuery(ctx.ChainID, reqID, time.Hour, dummyContainerID, + inMsg = message.InboundPullQuery( + ctx.ChainID, + reqID, + time.Hour, + dummyContainerID, vID, + p2p.EngineType_ENGINE_TYPE_SNOWMAN, ) wg.Add(1) - chainRouter.HandleInbound(inMsg) + chainRouter.HandleInbound(context.Background(), inMsg) wg.Wait() require.True(t, calledF) // should be called since this is a validator request +} - // register a validator request - reqID++ - chainRouter.RegisterRequest(vID, ctx.ChainID, reqID, message.Get) - require.Equal(t, 1, chainRouter.timedRequests.Len()) +func TestRouterCrossChainMessages(t *testing.T) { + tm, err := timeout.NewManager( + &timer.AdaptiveTimeoutConfig{ + InitialTimeout: 3 * time.Second, + MinimumTimeout: 3 * time.Second, + MaximumTimeout: 5 * time.Minute, + TimeoutCoefficient: 1, + TimeoutHalflife: 5 * time.Minute, + }, + benchlist.NewNoBenchlist(), + "timeoutManager", + prometheus.NewRegistry(), + ) + require.NoError(t, err) + go tm.Dispatch() + + // Create chain router + nodeID := ids.GenerateTestNodeID() + chainRouter := ChainRouter{} + err = chainRouter.Initialize( + nodeID, + logging.NoLog{}, + tm, + time.Millisecond, + set.Set[ids.ID]{}, + true, + set.Set[ids.ID]{}, + nil, + HealthConfig{}, + "", + prometheus.NewRegistry(), + ) + require.NoError(t, err) + + // Set up validators + vdrs := validators.NewSet() + require.NoError(t, vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) + + // Create bootstrapper, engine and handler + requester := snow.DefaultConsensusContextTest() + requester.ChainID = ids.GenerateTestID() + requester.Registerer = prometheus.NewRegistry() + requester.Metrics = metrics.NewOptionalGatherer() + requester.Executing.Set(false) + + resourceTracker, err := tracker.NewResourceTracker( + prometheus.NewRegistry(), + resource.NoUsage, + meter.ContinuousFactory{}, + time.Second, + ) + require.NoError(t, err) + + requesterHandler, err := handler.New( + requester, + vdrs, + nil, + time.Second, + testThreadPoolSize, + resourceTracker, + validators.UnhandledSubnetConnector, + subnets.New(requester.NodeID, subnets.Config{}), + ) + require.NoError(t, err) + + responder := snow.DefaultConsensusContextTest() + responder.ChainID = ids.GenerateTestID() + responder.Registerer = prometheus.NewRegistry() + responder.Metrics = metrics.NewOptionalGatherer() + responder.Executing.Set(false) + + responderHandler, err := handler.New( + responder, + vdrs, + nil, + time.Second, + testThreadPoolSize, + resourceTracker, + validators.UnhandledSubnetConnector, + subnets.New(responder.NodeID, subnets.Config{}), + ) + require.NoError(t, err) + + // assumed bootstrapping is done + responder.State.Set(snow.EngineState{ + Type: engineType, + State: snow.NormalOp, + }) + requester.State.Set(snow.EngineState{ + Type: engineType, + State: snow.NormalOp, + }) + + // router tracks two chains - one will send a message to the other + chainRouter.AddChain(context.Background(), requesterHandler) + chainRouter.AddChain(context.Background(), responderHandler) + + // Each chain should start off with a connected message + require.Equal(t, 1, chainRouter.chainHandlers[requester.ChainID].Len()) + require.Equal(t, 1, chainRouter.chainHandlers[responder.ChainID].Len()) + + // Requester sends a request to the responder + msgBytes := []byte("foobar") + msg := message.InternalCrossChainAppRequest( + requester.NodeID, + requester.ChainID, + responder.ChainID, + uint32(1), + time.Minute, + msgBytes, + ) + chainRouter.HandleInbound(context.Background(), msg) + require.Equal(t, 2, chainRouter.chainHandlers[responder.ChainID].Len()) + + // We register the cross-chain response on the requester-side so we don't + // drop it. + chainRouter.RegisterRequest( + context.Background(), + nodeID, + requester.ChainID, + responder.ChainID, + uint32(1), + message.CrossChainAppResponseOp, + message.InternalCrossChainAppRequestFailed( + nodeID, + responder.ChainID, + requester.ChainID, + uint32(1), + ), + p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + ) + // Responder sends a response back to the requester. + msg = message.InternalCrossChainAppResponse( + nodeID, + responder.ChainID, + requester.ChainID, + uint32(1), + msgBytes, + ) + chainRouter.HandleInbound(context.Background(), msg) + require.Equal(t, 2, chainRouter.chainHandlers[requester.ChainID].Len()) +} + +func TestConnectedSubnet(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() - // remove it from validators - err = vdrs.Set(validators.NewSet().List()) + tm, err := timeout.NewManager( + &timer.AdaptiveTimeoutConfig{ + InitialTimeout: 3 * time.Second, + MinimumTimeout: 3 * time.Second, + MaximumTimeout: 5 * time.Minute, + TimeoutCoefficient: 1, + TimeoutHalflife: 5 * time.Minute, + }, + benchlist.NewNoBenchlist(), + "timeoutManager", + prometheus.NewRegistry(), + ) + require.NoError(t, err) + go tm.Dispatch() + + // Create chain router + myNodeID := ids.GenerateTestNodeID() + peerNodeID := ids.GenerateTestNodeID() + subnetID0 := ids.GenerateTestID() + subnetID1 := ids.GenerateTestID() + trackedSubnets := set.Set[ids.ID]{} + trackedSubnets.Add(subnetID0, subnetID1) + chainRouter := ChainRouter{} + err = chainRouter.Initialize( + myNodeID, + logging.NoLog{}, + tm, + time.Millisecond, + set.Set[ids.ID]{}, + true, + trackedSubnets, + nil, + HealthConfig{}, + "", + prometheus.NewRegistry(), + ) require.NoError(t, err) - inMsg = mc.InboundPut(ctx.ChainID, reqID, nil, nID) - chainRouter.HandleInbound(inMsg) + // Create bootstrapper, engine and handler + platform := snow.DefaultConsensusContextTest() + platform.ChainID = constants.PlatformChainID + platform.SubnetID = constants.PrimaryNetworkID + platform.Registerer = prometheus.NewRegistry() + platform.Metrics = metrics.NewOptionalGatherer() + platform.Executing.Set(false) + platform.State.Set(snow.EngineState{ + Type: engineType, + State: snow.NormalOp, + }) + + myConnectedMsg := handler.Message{ + InboundMessage: message.InternalConnected(myNodeID, version.CurrentApp), + EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + } + mySubnetConnectedMsg0 := handler.Message{ + InboundMessage: message.InternalConnectedSubnet(myNodeID, subnetID0), + EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + } + mySubnetConnectedMsg1 := handler.Message{ + InboundMessage: message.InternalConnectedSubnet(myNodeID, subnetID1), + EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + } - // shouldn't clear out timed request, as the request should be cleared when - // the GetFailed message is sent - require.Equal(t, 1, chainRouter.timedRequests.Len()) + platformHandler := handler.NewMockHandler(ctrl) + platformHandler.EXPECT().Context().Return(platform).AnyTimes() + platformHandler.EXPECT().SetOnStopped(gomock.Any()).AnyTimes() + platformHandler.EXPECT().Push(gomock.Any(), myConnectedMsg).Times(1) + platformHandler.EXPECT().Push(gomock.Any(), mySubnetConnectedMsg0).Times(1) + platformHandler.EXPECT().Push(gomock.Any(), mySubnetConnectedMsg1).Times(1) + + chainRouter.AddChain(context.Background(), platformHandler) + + peerConnectedMsg := handler.Message{ + InboundMessage: message.InternalConnected(peerNodeID, version.CurrentApp), + EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + } + platformHandler.EXPECT().Push(gomock.Any(), peerConnectedMsg).Times(1) + chainRouter.Connected(peerNodeID, version.CurrentApp, constants.PrimaryNetworkID) + + peerSubnetConnectedMsg0 := handler.Message{ + InboundMessage: message.InternalConnectedSubnet(peerNodeID, subnetID0), + EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + } + platformHandler.EXPECT().Push(gomock.Any(), peerSubnetConnectedMsg0).Times(1) + chainRouter.Connected(peerNodeID, version.CurrentApp, subnetID0) + + myDisconnectedMsg := handler.Message{ + InboundMessage: message.InternalDisconnected(myNodeID), + EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + } + platformHandler.EXPECT().Push(gomock.Any(), myDisconnectedMsg).Times(1) + chainRouter.Benched(constants.PlatformChainID, myNodeID) + + peerDisconnectedMsg := handler.Message{ + InboundMessage: message.InternalDisconnected(peerNodeID), + EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + } + platformHandler.EXPECT().Push(gomock.Any(), peerDisconnectedMsg).Times(1) + chainRouter.Benched(constants.PlatformChainID, peerNodeID) + + platformHandler.EXPECT().Push(gomock.Any(), myConnectedMsg).Times(1) + platformHandler.EXPECT().Push(gomock.Any(), mySubnetConnectedMsg0).Times(1) + platformHandler.EXPECT().Push(gomock.Any(), mySubnetConnectedMsg1).Times(1) + + chainRouter.Unbenched(constants.PlatformChainID, myNodeID) + + platformHandler.EXPECT().Push(gomock.Any(), peerConnectedMsg).Times(1) + platformHandler.EXPECT().Push(gomock.Any(), peerSubnetConnectedMsg0).Times(1) + + chainRouter.Unbenched(constants.PlatformChainID, peerNodeID) + + platformHandler.EXPECT().Push(gomock.Any(), peerDisconnectedMsg).Times(1) + chainRouter.Disconnected(peerNodeID) +} + +func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { + // Create a timeout manager + maxTimeout := 25 * time.Millisecond + tm, err := timeout.NewManager( + &timer.AdaptiveTimeoutConfig{ + InitialTimeout: 10 * time.Millisecond, + MinimumTimeout: 10 * time.Millisecond, + MaximumTimeout: maxTimeout, + TimeoutCoefficient: 1, + TimeoutHalflife: 5 * time.Minute, + }, + benchlist.NewNoBenchlist(), + "", + prometheus.NewRegistry(), + ) + require.NoError(t, err) + go tm.Dispatch() + + // Create a router + chainRouter := ChainRouter{} + err = chainRouter.Initialize( + ids.EmptyNodeID, + logging.NoLog{}, + tm, + time.Millisecond, + set.Set[ids.ID]{}, + true, + set.Set[ids.ID]{}, + nil, + HealthConfig{}, + "", + prometheus.NewRegistry(), + ) + require.NoError(t, err) + + // Create bootstrapper, engine and handler + calledF := false + wg := sync.WaitGroup{} + + ctx := snow.DefaultConsensusContextTest() + allowedID := ids.GenerateTestNodeID() + allowedSet := set.NewSet[ids.NodeID](1) + allowedSet.Add(allowedID) + sb := subnets.New(ctx.NodeID, subnets.Config{ValidatorOnly: true, AllowedNodes: allowedSet}) + + vdrs := validators.NewSet() + vID := ids.GenerateTestNodeID() + err = vdrs.Add(vID, nil, ids.Empty, 1) + require.NoError(t, err) + + resourceTracker, err := tracker.NewResourceTracker( + prometheus.NewRegistry(), + resource.NoUsage, + meter.ContinuousFactory{}, + time.Second, + ) + require.NoError(t, err) + + h, err := handler.New( + ctx, + vdrs, + nil, + time.Second, + testThreadPoolSize, + resourceTracker, + validators.UnhandledSubnetConnector, + sb, + ) + require.NoError(t, err) + + bootstrapper := &common.BootstrapperTest{ + BootstrapableTest: common.BootstrapableTest{ + T: t, + }, + EngineTest: common.EngineTest{ + T: t, + }, + } + bootstrapper.Default(false) + bootstrapper.ContextF = func() *snow.ConsensusContext { + return ctx + } + bootstrapper.PullQueryF = func(context.Context, ids.NodeID, uint32, ids.ID) error { + defer wg.Done() + calledF = true + return nil + } + ctx.State.Set(snow.EngineState{ + Type: engineType, + State: snow.Bootstrapping, // assumed bootstrapping is ongoing + }) + engine := &common.EngineTest{T: t} + engine.ContextF = func() *snow.ConsensusContext { + return ctx + } + engine.Default(false) + + h.SetEngineManager(&handler.EngineManager{ + Avalanche: &handler.Engine{ + Bootstrapper: bootstrapper, + Consensus: engine, + }, + }) + + chainRouter.AddChain(context.Background(), h) + + bootstrapper.StartF = func(context.Context, uint32) error { + return nil + } + h.Start(context.Background(), false) + + var inMsg message.InboundMessage + dummyContainerID := ids.GenerateTestID() + reqID := uint32(0) + + // Non-validator case + nID := ids.GenerateTestNodeID() + + calledF = false + inMsg = message.InboundPullQuery( + ctx.ChainID, + reqID, + time.Hour, + dummyContainerID, + nID, + engineType, + ) + chainRouter.HandleInbound(context.Background(), inMsg) + + require.False(t, calledF) // should not be called for unallowed node ID + + // Allowed NodeID case + calledF = false + reqID++ + inMsg = message.InboundPullQuery( + ctx.ChainID, + reqID, + time.Hour, + dummyContainerID, + allowedID, + engineType, + ) + wg.Add(1) + chainRouter.HandleInbound(context.Background(), inMsg) + + wg.Wait() + require.True(t, calledF) // should be called since this is a allowed node request + + // Validator case + calledF = false + reqID++ + inMsg = message.InboundPullQuery( + ctx.ChainID, + reqID, + time.Hour, + dummyContainerID, + vID, + engineType, + ) + wg.Add(1) + chainRouter.HandleInbound(context.Background(), inMsg) + + wg.Wait() + require.True(t, calledF) // should be called since this is a validator request } diff --git a/avalanchego/snow/networking/router/health.go b/avalanchego/snow/networking/router/health.go index 8721de6a..d678f0f1 100644 --- a/avalanchego/snow/networking/router/health.go +++ b/avalanchego/snow/networking/router/health.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package router diff --git a/avalanchego/snow/networking/router/inbound_handler.go b/avalanchego/snow/networking/router/inbound_handler.go index 5c5aa6f5..cfd6d5fa 100644 --- a/avalanchego/snow/networking/router/inbound_handler.go +++ b/avalanchego/snow/networking/router/inbound_handler.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package router import ( + "context" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/version" @@ -13,16 +15,16 @@ var _ InboundHandler = InboundHandlerFunc(nil) // InboundHandler handles inbound messages type InboundHandler interface { - HandleInbound(msg message.InboundMessage) + HandleInbound(context.Context, message.InboundMessage) } // The ExternalRouterFunc type is an adapter to allow the use of ordinary // functions as ExternalRouters. If f is a function with the appropriate // signature, ExternalRouterFunc(f) is an ExternalRouter that calls f. -type InboundHandlerFunc func(msg message.InboundMessage) +type InboundHandlerFunc func(context.Context, message.InboundMessage) -func (f InboundHandlerFunc) HandleInbound(msg message.InboundMessage) { - f(msg) +func (f InboundHandlerFunc) HandleInbound(ctx context.Context, msg message.InboundMessage) { + f(ctx, msg) } // ExternalHandler handles messages from external parties diff --git a/avalanchego/snow/networking/router/mock_router.go b/avalanchego/snow/networking/router/mock_router.go new file mode 100644 index 00000000..60d1d393 --- /dev/null +++ b/avalanchego/snow/networking/router/mock_router.go @@ -0,0 +1,173 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/snow/networking/router (interfaces: Router) + +// Package router is a generated GoMock package. +package router + +import ( + context "context" + reflect "reflect" + time "time" + + ids "github.com/ava-labs/avalanchego/ids" + message "github.com/ava-labs/avalanchego/message" + p2p "github.com/ava-labs/avalanchego/proto/pb/p2p" + handler "github.com/ava-labs/avalanchego/snow/networking/handler" + timeout "github.com/ava-labs/avalanchego/snow/networking/timeout" + logging "github.com/ava-labs/avalanchego/utils/logging" + set "github.com/ava-labs/avalanchego/utils/set" + version "github.com/ava-labs/avalanchego/version" + gomock "github.com/golang/mock/gomock" + prometheus "github.com/prometheus/client_golang/prometheus" +) + +// MockRouter is a mock of Router interface. +type MockRouter struct { + ctrl *gomock.Controller + recorder *MockRouterMockRecorder +} + +// MockRouterMockRecorder is the mock recorder for MockRouter. +type MockRouterMockRecorder struct { + mock *MockRouter +} + +// NewMockRouter creates a new mock instance. +func NewMockRouter(ctrl *gomock.Controller) *MockRouter { + mock := &MockRouter{ctrl: ctrl} + mock.recorder = &MockRouterMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockRouter) EXPECT() *MockRouterMockRecorder { + return m.recorder +} + +// AddChain mocks base method. +func (m *MockRouter) AddChain(arg0 context.Context, arg1 handler.Handler) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddChain", arg0, arg1) +} + +// AddChain indicates an expected call of AddChain. +func (mr *MockRouterMockRecorder) AddChain(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddChain", reflect.TypeOf((*MockRouter)(nil).AddChain), arg0, arg1) +} + +// Benched mocks base method. +func (m *MockRouter) Benched(arg0 ids.ID, arg1 ids.NodeID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Benched", arg0, arg1) +} + +// Benched indicates an expected call of Benched. +func (mr *MockRouterMockRecorder) Benched(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Benched", reflect.TypeOf((*MockRouter)(nil).Benched), arg0, arg1) +} + +// Connected mocks base method. +func (m *MockRouter) Connected(arg0 ids.NodeID, arg1 *version.Application, arg2 ids.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Connected", arg0, arg1, arg2) +} + +// Connected indicates an expected call of Connected. +func (mr *MockRouterMockRecorder) Connected(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Connected", reflect.TypeOf((*MockRouter)(nil).Connected), arg0, arg1, arg2) +} + +// Disconnected mocks base method. +func (m *MockRouter) Disconnected(arg0 ids.NodeID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Disconnected", arg0) +} + +// Disconnected indicates an expected call of Disconnected. +func (mr *MockRouterMockRecorder) Disconnected(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Disconnected", reflect.TypeOf((*MockRouter)(nil).Disconnected), arg0) +} + +// HandleInbound mocks base method. +func (m *MockRouter) HandleInbound(arg0 context.Context, arg1 message.InboundMessage) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "HandleInbound", arg0, arg1) +} + +// HandleInbound indicates an expected call of HandleInbound. +func (mr *MockRouterMockRecorder) HandleInbound(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandleInbound", reflect.TypeOf((*MockRouter)(nil).HandleInbound), arg0, arg1) +} + +// HealthCheck mocks base method. +func (m *MockRouter) HealthCheck(arg0 context.Context) (interface{}, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HealthCheck", arg0) + ret0, _ := ret[0].(interface{}) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HealthCheck indicates an expected call of HealthCheck. +func (mr *MockRouterMockRecorder) HealthCheck(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HealthCheck", reflect.TypeOf((*MockRouter)(nil).HealthCheck), arg0) +} + +// Initialize mocks base method. +func (m *MockRouter) Initialize(arg0 ids.NodeID, arg1 logging.Logger, arg2 timeout.Manager, arg3 time.Duration, arg4 set.Set[ids.ID], arg5 bool, arg6 set.Set[ids.ID], arg7 func(int), arg8 HealthConfig, arg9 string, arg10 prometheus.Registerer) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Initialize", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10) + ret0, _ := ret[0].(error) + return ret0 +} + +// Initialize indicates an expected call of Initialize. +func (mr *MockRouterMockRecorder) Initialize(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockRouter)(nil).Initialize), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10) +} + +// RegisterRequest mocks base method. +func (m *MockRouter) RegisterRequest(arg0 context.Context, arg1 ids.NodeID, arg2, arg3 ids.ID, arg4 uint32, arg5 message.Op, arg6 message.InboundMessage, arg7 p2p.EngineType) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RegisterRequest", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) +} + +// RegisterRequest indicates an expected call of RegisterRequest. +func (mr *MockRouterMockRecorder) RegisterRequest(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterRequest", reflect.TypeOf((*MockRouter)(nil).RegisterRequest), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) +} + +// Shutdown mocks base method. +func (m *MockRouter) Shutdown(arg0 context.Context) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Shutdown", arg0) +} + +// Shutdown indicates an expected call of Shutdown. +func (mr *MockRouterMockRecorder) Shutdown(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Shutdown", reflect.TypeOf((*MockRouter)(nil).Shutdown), arg0) +} + +// Unbenched mocks base method. +func (m *MockRouter) Unbenched(arg0 ids.ID, arg1 ids.NodeID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Unbenched", arg0, arg1) +} + +// Unbenched indicates an expected call of Unbenched. +func (mr *MockRouterMockRecorder) Unbenched(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Unbenched", reflect.TypeOf((*MockRouter)(nil).Unbenched), arg0, arg1) +} diff --git a/avalanchego/snow/networking/router/router.go b/avalanchego/snow/networking/router/router.go index 8eac6d5f..dca02f39 100644 --- a/avalanchego/snow/networking/router/router.go +++ b/avalanchego/snow/networking/router/router.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package router import ( + "context" "time" "github.com/prometheus/client_golang/prometheus" @@ -11,10 +12,12 @@ import ( "github.com/ava-labs/avalanchego/api/health" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow/networking/benchlist" "github.com/ava-labs/avalanchego/snow/networking/handler" "github.com/ava-labs/avalanchego/snow/networking/timeout" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" ) // Router routes consensus messages to the Handler of the consensus @@ -26,18 +29,18 @@ type Router interface { Initialize( nodeID ids.NodeID, log logging.Logger, - msgCreator message.InternalMsgBuilder, timeouts timeout.Manager, shutdownTimeout time.Duration, - criticalChains ids.Set, - whiteListedSubnets ids.Set, + criticalChains set.Set[ids.ID], + stakingEnabled bool, + trackedSubnets set.Set[ids.ID], onFatal func(exitCode int), healthConfig HealthConfig, metricsNamespace string, metricsRegisterer prometheus.Registerer, ) error - Shutdown() - AddChain(chain handler.Handler) + Shutdown(context.Context) + AddChain(ctx context.Context, chain handler.Handler) health.Checker } @@ -46,9 +49,13 @@ type InternalHandler interface { benchlist.Benchable RegisterRequest( + ctx context.Context, nodeID ids.NodeID, - chainID ids.ID, + sourceChainID ids.ID, + destinationChainID ids.ID, requestID uint32, op message.Op, + failedMsg message.InboundMessage, + engineType p2p.EngineType, ) } diff --git a/avalanchego/snow/networking/router/traced_router.go b/avalanchego/snow/networking/router/traced_router.go new file mode 100644 index 00000000..13e3c1bf --- /dev/null +++ b/avalanchego/snow/networking/router/traced_router.go @@ -0,0 +1,155 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package router + +import ( + "context" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "go.opentelemetry.io/otel/attribute" + + oteltrace "go.opentelemetry.io/otel/trace" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/proto/pb/p2p" + "github.com/ava-labs/avalanchego/snow/networking/handler" + "github.com/ava-labs/avalanchego/snow/networking/timeout" + "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/version" +) + +var _ Router = (*tracedRouter)(nil) + +type tracedRouter struct { + router Router + tracer trace.Tracer +} + +func Trace(router Router, tracer trace.Tracer) Router { + return &tracedRouter{ + router: router, + tracer: tracer, + } +} + +func (r *tracedRouter) Initialize( + nodeID ids.NodeID, + log logging.Logger, + timeoutManager timeout.Manager, + closeTimeout time.Duration, + criticalChains set.Set[ids.ID], + stakingEnabled bool, + trackedSubnets set.Set[ids.ID], + onFatal func(exitCode int), + healthConfig HealthConfig, + metricsNamespace string, + metricsRegisterer prometheus.Registerer, +) error { + return r.router.Initialize( + nodeID, + log, + timeoutManager, + closeTimeout, + criticalChains, + stakingEnabled, + trackedSubnets, + onFatal, + healthConfig, + metricsNamespace, + metricsRegisterer, + ) +} + +func (r *tracedRouter) RegisterRequest( + ctx context.Context, + nodeID ids.NodeID, + requestingChainID ids.ID, + respondingChainID ids.ID, + requestID uint32, + op message.Op, + failedMsg message.InboundMessage, + engineType p2p.EngineType, +) { + r.router.RegisterRequest( + ctx, + nodeID, + requestingChainID, + respondingChainID, + requestID, + op, + failedMsg, + engineType, + ) +} + +func (r *tracedRouter) HandleInbound(ctx context.Context, msg message.InboundMessage) { + m := msg.Message() + destinationChainID, err := message.GetChainID(m) + if err != nil { + r.router.HandleInbound(ctx, msg) + return + } + + sourceChainID, err := message.GetSourceChainID(m) + if err != nil { + r.router.HandleInbound(ctx, msg) + return + } + + ctx, span := r.tracer.Start(ctx, "tracedRouter.HandleInbound", oteltrace.WithAttributes( + attribute.Stringer("nodeID", msg.NodeID()), + attribute.Stringer("messageOp", msg.Op()), + attribute.Stringer("chainID", destinationChainID), + attribute.Stringer("sourceChainID", sourceChainID), + )) + defer span.End() + + r.router.HandleInbound(ctx, msg) +} + +func (r *tracedRouter) Shutdown(ctx context.Context) { + ctx, span := r.tracer.Start(ctx, "tracedRouter.Shutdown") + defer span.End() + + r.router.Shutdown(ctx) +} + +func (r *tracedRouter) AddChain(ctx context.Context, chain handler.Handler) { + chainCtx := chain.Context() + ctx, span := r.tracer.Start(ctx, "tracedRouter.AddChain", oteltrace.WithAttributes( + attribute.Stringer("subnetID", chainCtx.SubnetID), + attribute.Stringer("chainID", chainCtx.ChainID), + )) + defer span.End() + + r.router.AddChain(ctx, chain) +} + +func (r *tracedRouter) Connected(nodeID ids.NodeID, nodeVersion *version.Application, subnetID ids.ID) { + r.router.Connected(nodeID, nodeVersion, subnetID) +} + +func (r *tracedRouter) Disconnected(nodeID ids.NodeID) { + r.router.Disconnected(nodeID) +} + +func (r *tracedRouter) Benched(chainID ids.ID, nodeID ids.NodeID) { + r.router.Benched(chainID, nodeID) +} + +func (r *tracedRouter) Unbenched(chainID ids.ID, nodeID ids.NodeID) { + r.router.Unbenched(chainID, nodeID) +} + +func (r *tracedRouter) HealthCheck(ctx context.Context) (interface{}, error) { + ctx, span := r.tracer.Start(ctx, "tracedRouter.HealthCheck") + defer span.End() + + return r.router.HealthCheck(ctx) +} diff --git a/avalanchego/snow/networking/sender/external_sender.go b/avalanchego/snow/networking/sender/external_sender.go index e772b97b..72d9539d 100644 --- a/avalanchego/snow/networking/sender/external_sender.go +++ b/avalanchego/snow/networking/sender/external_sender.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sender @@ -6,6 +6,8 @@ package sender import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/subnets" + "github.com/ava-labs/avalanchego/utils/set" ) // ExternalSender sends consensus messages to other validators @@ -14,19 +16,19 @@ type ExternalSender interface { // Send a message to a specific set of nodes Send( msg message.OutboundMessage, - nodeIDs ids.NodeIDSet, + nodeIDs set.Set[ids.NodeID], subnetID ids.ID, - validatorOnly bool, - ) ids.NodeIDSet + allower subnets.Allower, + ) set.Set[ids.NodeID] // Send a message to a random group of nodes in a subnet. // Nodes are sampled based on their validator status. Gossip( msg message.OutboundMessage, subnetID ids.ID, - validatorOnly bool, numValidatorsToSend int, numNonValidatorsToSend int, numPeersToSend int, - ) ids.NodeIDSet + allower subnets.Allower, + ) set.Set[ids.NodeID] } diff --git a/avalanchego/snow/networking/sender/mock_external_sender.go b/avalanchego/snow/networking/sender/mock_external_sender.go new file mode 100644 index 00000000..322a7d17 --- /dev/null +++ b/avalanchego/snow/networking/sender/mock_external_sender.go @@ -0,0 +1,69 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/snow/networking/sender (interfaces: ExternalSender) + +// Package sender is a generated GoMock package. +package sender + +import ( + reflect "reflect" + + ids "github.com/ava-labs/avalanchego/ids" + message "github.com/ava-labs/avalanchego/message" + subnets "github.com/ava-labs/avalanchego/subnets" + set "github.com/ava-labs/avalanchego/utils/set" + gomock "github.com/golang/mock/gomock" +) + +// MockExternalSender is a mock of ExternalSender interface. +type MockExternalSender struct { + ctrl *gomock.Controller + recorder *MockExternalSenderMockRecorder +} + +// MockExternalSenderMockRecorder is the mock recorder for MockExternalSender. +type MockExternalSenderMockRecorder struct { + mock *MockExternalSender +} + +// NewMockExternalSender creates a new mock instance. +func NewMockExternalSender(ctrl *gomock.Controller) *MockExternalSender { + mock := &MockExternalSender{ctrl: ctrl} + mock.recorder = &MockExternalSenderMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockExternalSender) EXPECT() *MockExternalSenderMockRecorder { + return m.recorder +} + +// Gossip mocks base method. +func (m *MockExternalSender) Gossip(arg0 message.OutboundMessage, arg1 ids.ID, arg2, arg3, arg4 int, arg5 subnets.Allower) set.Set[ids.NodeID] { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Gossip", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].(set.Set[ids.NodeID]) + return ret0 +} + +// Gossip indicates an expected call of Gossip. +func (mr *MockExternalSenderMockRecorder) Gossip(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Gossip", reflect.TypeOf((*MockExternalSender)(nil).Gossip), arg0, arg1, arg2, arg3, arg4, arg5) +} + +// Send mocks base method. +func (m *MockExternalSender) Send(arg0 message.OutboundMessage, arg1 set.Set[ids.NodeID], arg2 ids.ID, arg3 subnets.Allower) set.Set[ids.NodeID] { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Send", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(set.Set[ids.NodeID]) + return ret0 +} + +// Send indicates an expected call of Send. +func (mr *MockExternalSenderMockRecorder) Send(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Send", reflect.TypeOf((*MockExternalSender)(nil).Send), arg0, arg1, arg2, arg3) +} diff --git a/avalanchego/snow/networking/sender/sender.go b/avalanchego/snow/networking/sender/sender.go index e58db4d7..014c4b8a 100644 --- a/avalanchego/snow/networking/sender/sender.go +++ b/avalanchego/snow/networking/sender/sender.go @@ -1,11 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sender import ( + "context" "fmt" - "time" "github.com/prometheus/client_golang/prometheus" @@ -13,27 +13,18 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/networking/router" "github.com/ava-labs/avalanchego/snow/networking/timeout" + "github.com/ava-labs/avalanchego/subnets" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/utils/set" ) -var _ common.Sender = &sender{} - -type GossipConfig struct { - AcceptedFrontierValidatorSize uint `json:"gossipAcceptedFrontierValidatorSize" yaml:"gossipAcceptedFrontierValidatorSize"` - AcceptedFrontierNonValidatorSize uint `json:"gossipAcceptedFrontierNonValidatorSize" yaml:"gossipAcceptedFrontierNonValidatorSize"` - AcceptedFrontierPeerSize uint `json:"gossipAcceptedFrontierPeerSize" yaml:"gossipAcceptedFrontierPeerSize"` - OnAcceptValidatorSize uint `json:"gossipOnAcceptValidatorSize" yaml:"gossipOnAcceptValidatorSize"` - OnAcceptNonValidatorSize uint `json:"gossipOnAcceptNonValidatorSize" yaml:"gossipOnAcceptNonValidatorSize"` - OnAcceptPeerSize uint `json:"gossipOnAcceptPeerSize" yaml:"gossipOnAcceptPeerSize"` - AppGossipValidatorSize uint `json:"appGossipValidatorSize" yaml:"appGossipValidatorSize"` - AppGossipNonValidatorSize uint `json:"appGossipNonValidatorSize" yaml:"appGossipNonValidatorSize"` - AppGossipPeerSize uint `json:"appGossipPeerSize" yaml:"appGossipPeerSize"` -} +var _ common.Sender = (*sender)(nil) // sender is a wrapper around an ExternalSender. // Messages to this node are put directly into [router] rather than @@ -41,46 +32,38 @@ type GossipConfig struct { // sender registers outbound requests with [router] so that [router] // fires a timeout if we don't get a response to the request. type sender struct { - ctx *snow.ConsensusContext - msgCreator message.Creator - msgCreatorWithProto message.Creator - - // TODO: remove this once we complete banff migration - banffTime time.Time - - clock mockable.Clock + ctx *snow.ConsensusContext + msgCreator message.OutboundMsgBuilder sender ExternalSender // Actually does the sending over the network router router.Router timeouts timeout.Manager - gossipConfig GossipConfig - // Request message type --> Counts how many of that request // have failed because the node was benched failedDueToBench map[message.Op]prometheus.Counter + engineType p2p.EngineType + subnet subnets.Subnet } func New( ctx *snow.ConsensusContext, - msgCreator message.Creator, - msgCreatorWithProto message.Creator, - banffTime time.Time, + msgCreator message.OutboundMsgBuilder, externalSender ExternalSender, router router.Router, timeouts timeout.Manager, - gossipConfig GossipConfig, + engineType p2p.EngineType, + subnet subnets.Subnet, ) (common.Sender, error) { s := &sender{ - ctx: ctx, - msgCreator: msgCreator, - msgCreatorWithProto: msgCreatorWithProto, - banffTime: banffTime, - sender: externalSender, - router: router, - timeouts: timeouts, - gossipConfig: gossipConfig, - failedDueToBench: make(map[message.Op]prometheus.Counter, len(message.ConsensusRequestOps)), + ctx: ctx, + msgCreator: msgCreator, + sender: externalSender, + router: router, + timeouts: timeouts, + failedDueToBench: make(map[message.Op]prometheus.Counter, len(message.ConsensusRequestOps)), + engineType: engineType, + subnet: subnet, } for _, op := range message.ConsensusRequestOps { @@ -90,23 +73,28 @@ func New( Help: fmt.Sprintf("# of times a %s request was not sent because the node was benched", op), }, ) - if err := ctx.Registerer.Register(counter); err != nil { - return nil, fmt.Errorf("couldn't register metric for %s: %w", op, err) + + switch engineType { + case p2p.EngineType_ENGINE_TYPE_SNOWMAN: + if err := ctx.Registerer.Register(counter); err != nil { + return nil, fmt.Errorf("couldn't register metric for %s: %w", op, err) + } + case p2p.EngineType_ENGINE_TYPE_AVALANCHE: + if err := ctx.AvalancheRegisterer.Register(counter); err != nil { + return nil, fmt.Errorf("couldn't register metric for %s: %w", op, err) + } + default: + return nil, fmt.Errorf("unknown engine type %s", engineType) } + s.failedDueToBench[op] = counter } return s, nil } -func (s *sender) getMsgCreator() message.Creator { - now := s.clock.Time() - if now.Before(s.banffTime) { - return s.msgCreator - } - return s.msgCreatorWithProto -} +func (s *sender) SendGetStateSummaryFrontier(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32) { + ctx = utils.Detach(ctx) -func (s *sender) SendGetStateSummaryFrontier(nodeIDs ids.NodeIDSet, requestID uint32) { // Note that this timeout duration won't exactly match the one that gets // registered. That's OK. deadline := s.timeouts.TimeoutDuration() @@ -117,29 +105,55 @@ func (s *sender) SendGetStateSummaryFrontier(nodeIDs ids.NodeIDSet, requestID ui // to send them a message, to avoid busy looping when disconnected from // the internet. for nodeID := range nodeIDs { - s.router.RegisterRequest(nodeID, s.ctx.ChainID, requestID, message.StateSummaryFrontier) + inMsg := message.InternalGetStateSummaryFrontierFailed( + nodeID, + s.ctx.ChainID, + requestID, + ) + s.router.RegisterRequest( + ctx, + nodeID, + s.ctx.ChainID, + s.ctx.ChainID, + requestID, + message.StateSummaryFrontierOp, + inMsg, + p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + ) } - msgCreator := s.getMsgCreator() - // Sending a message to myself. No need to send it over the network. // Just put it right into the router. Asynchronously to avoid deadlock. if nodeIDs.Contains(s.ctx.NodeID) { nodeIDs.Remove(s.ctx.NodeID) - inMsg := msgCreator.InboundGetStateSummaryFrontier(s.ctx.ChainID, requestID, deadline, s.ctx.NodeID) - go s.router.HandleInbound(inMsg) + inMsg := message.InboundGetStateSummaryFrontier( + s.ctx.ChainID, + requestID, + deadline, + s.ctx.NodeID, + ) + go s.router.HandleInbound(ctx, inMsg) } // Create the outbound message. - outMsg, err := msgCreator.GetStateSummaryFrontier(s.ctx.ChainID, requestID, deadline) + outMsg, err := s.msgCreator.GetStateSummaryFrontier( + s.ctx.ChainID, + requestID, + deadline, + ) // Send the message over the network. - var sentTo ids.NodeIDSet + var sentTo set.Set[ids.NodeID] if err == nil { - sentTo = s.sender.Send(outMsg, nodeIDs, s.ctx.SubnetID, s.ctx.IsValidatorOnly()) + sentTo = s.sender.Send( + outMsg, + nodeIDs, + s.ctx.SubnetID, + s.subnet, + ) } else { s.ctx.Log.Error("failed to build message", - zap.Stringer("messageOp", message.GetStateSummaryFrontier), + zap.Stringer("messageOp", message.GetStateSummaryFrontierOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), zap.Duration("deadline", deadline), @@ -150,7 +164,7 @@ func (s *sender) SendGetStateSummaryFrontier(nodeIDs ids.NodeIDSet, requestID ui for nodeID := range nodeIDs { if !sentTo.Contains(nodeID) { s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.GetStateSummaryFrontier), + zap.Stringer("messageOp", message.GetStateSummaryFrontierOp), zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), @@ -159,21 +173,30 @@ func (s *sender) SendGetStateSummaryFrontier(nodeIDs ids.NodeIDSet, requestID ui } } -func (s *sender) SendStateSummaryFrontier(nodeID ids.NodeID, requestID uint32, summary []byte) { - msgCreator := s.getMsgCreator() +func (s *sender) SendStateSummaryFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, summary []byte) { + ctx = utils.Detach(ctx) // Sending this message to myself. if nodeID == s.ctx.NodeID { - inMsg := msgCreator.InboundStateSummaryFrontier(s.ctx.ChainID, requestID, summary, nodeID) - go s.router.HandleInbound(inMsg) + inMsg := message.InboundStateSummaryFrontier( + s.ctx.ChainID, + requestID, + summary, + nodeID, + ) + go s.router.HandleInbound(ctx, inMsg) return } // Create the outbound message. - outMsg, err := msgCreator.StateSummaryFrontier(s.ctx.ChainID, requestID, summary) + outMsg, err := s.msgCreator.StateSummaryFrontier( + s.ctx.ChainID, + requestID, + summary, + ) if err != nil { s.ctx.Log.Error("failed to build message", - zap.Stringer("messageOp", message.StateSummaryFrontier), + zap.Stringer("messageOp", message.StateSummaryFrontierOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), zap.Binary("summaryBytes", summary), @@ -183,17 +206,23 @@ func (s *sender) SendStateSummaryFrontier(nodeID ids.NodeID, requestID uint32, s } // Send the message over the network. - nodeIDs := ids.NewNodeIDSet(1) + nodeIDs := set.NewSet[ids.NodeID](1) nodeIDs.Add(nodeID) - if sentTo := s.sender.Send(outMsg, nodeIDs, s.ctx.SubnetID, s.ctx.IsValidatorOnly()); sentTo.Len() == 0 { + sentTo := s.sender.Send( + outMsg, + nodeIDs, + s.ctx.SubnetID, + s.subnet, + ) + if sentTo.Len() == 0 { s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.StateSummaryFrontier), + zap.Stringer("messageOp", message.StateSummaryFrontierOp), zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), ) s.ctx.Log.Verbo("failed to send message", - zap.Stringer("messageOp", message.StateSummaryFrontier), + zap.Stringer("messageOp", message.StateSummaryFrontierOp), zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), @@ -202,7 +231,9 @@ func (s *sender) SendStateSummaryFrontier(nodeID ids.NodeID, requestID uint32, s } } -func (s *sender) SendGetAcceptedStateSummary(nodeIDs ids.NodeIDSet, requestID uint32, heights []uint64) { +func (s *sender) SendGetAcceptedStateSummary(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, heights []uint64) { + ctx = utils.Detach(ctx) + // Note that this timeout duration won't exactly match the one that gets // registered. That's OK. deadline := s.timeouts.TimeoutDuration() @@ -213,29 +244,57 @@ func (s *sender) SendGetAcceptedStateSummary(nodeIDs ids.NodeIDSet, requestID ui // to send them a message, to avoid busy looping when disconnected from // the internet. for nodeID := range nodeIDs { - s.router.RegisterRequest(nodeID, s.ctx.ChainID, requestID, message.AcceptedStateSummary) + inMsg := message.InternalGetAcceptedStateSummaryFailed( + nodeID, + s.ctx.ChainID, + requestID, + ) + s.router.RegisterRequest( + ctx, + nodeID, + s.ctx.ChainID, + s.ctx.ChainID, + requestID, + message.AcceptedStateSummaryOp, + inMsg, + p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + ) } - msgCreator := s.getMsgCreator() - // Sending a message to myself. No need to send it over the network. // Just put it right into the router. Asynchronously to avoid deadlock. if nodeIDs.Contains(s.ctx.NodeID) { nodeIDs.Remove(s.ctx.NodeID) - inMsg := msgCreator.InboundGetAcceptedStateSummary(s.ctx.ChainID, requestID, heights, deadline, s.ctx.NodeID) - go s.router.HandleInbound(inMsg) + inMsg := message.InboundGetAcceptedStateSummary( + s.ctx.ChainID, + requestID, + heights, + deadline, + s.ctx.NodeID, + ) + go s.router.HandleInbound(ctx, inMsg) } // Create the outbound message. - outMsg, err := msgCreator.GetAcceptedStateSummary(s.ctx.ChainID, requestID, deadline, heights) + outMsg, err := s.msgCreator.GetAcceptedStateSummary( + s.ctx.ChainID, + requestID, + deadline, + heights, + ) // Send the message over the network. - var sentTo ids.NodeIDSet + var sentTo set.Set[ids.NodeID] if err == nil { - sentTo = s.sender.Send(outMsg, nodeIDs, s.ctx.SubnetID, s.ctx.IsValidatorOnly()) + sentTo = s.sender.Send( + outMsg, + nodeIDs, + s.ctx.SubnetID, + s.subnet, + ) } else { s.ctx.Log.Error("failed to build message", - zap.Stringer("messageOp", message.GetAcceptedStateSummary), + zap.Stringer("messageOp", message.GetAcceptedStateSummaryOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), zap.Uint64s("heights", heights), @@ -246,7 +305,7 @@ func (s *sender) SendGetAcceptedStateSummary(nodeIDs ids.NodeIDSet, requestID ui for nodeID := range nodeIDs { if !sentTo.Contains(nodeID) { s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.GetAcceptedStateSummary), + zap.Stringer("messageOp", message.GetAcceptedStateSummaryOp), zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), @@ -256,43 +315,60 @@ func (s *sender) SendGetAcceptedStateSummary(nodeIDs ids.NodeIDSet, requestID ui } } -func (s *sender) SendAcceptedStateSummary(nodeID ids.NodeID, requestID uint32, summaryIDs []ids.ID) { - msgCreator := s.getMsgCreator() +func (s *sender) SendAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs []ids.ID) { + ctx = utils.Detach(ctx) if nodeID == s.ctx.NodeID { - inMsg := msgCreator.InboundAcceptedStateSummary(s.ctx.ChainID, requestID, summaryIDs, nodeID) - go s.router.HandleInbound(inMsg) + inMsg := message.InboundAcceptedStateSummary( + s.ctx.ChainID, + requestID, + summaryIDs, + nodeID, + ) + go s.router.HandleInbound(ctx, inMsg) return } // Create the outbound message. - outMsg, err := msgCreator.AcceptedStateSummary(s.ctx.ChainID, requestID, summaryIDs) + outMsg, err := s.msgCreator.AcceptedStateSummary( + s.ctx.ChainID, + requestID, + summaryIDs, + ) if err != nil { s.ctx.Log.Error("failed to build message", - zap.Stringer("messageOp", message.AcceptedStateSummary), + zap.Stringer("messageOp", message.AcceptedStateSummaryOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), - zap.Stringer("summaryIDs", ids.SliceStringer(summaryIDs)), + zap.Stringers("summaryIDs", summaryIDs), zap.Error(err), ) return } // Send the message over the network. - nodeIDs := ids.NewNodeIDSet(1) + nodeIDs := set.NewSet[ids.NodeID](1) nodeIDs.Add(nodeID) - if sentTo := s.sender.Send(outMsg, nodeIDs, s.ctx.SubnetID, s.ctx.IsValidatorOnly()); sentTo.Len() == 0 { + sentTo := s.sender.Send( + outMsg, + nodeIDs, + s.ctx.SubnetID, + s.subnet, + ) + if sentTo.Len() == 0 { s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.AcceptedStateSummary), + zap.Stringer("messageOp", message.AcceptedStateSummaryOp), zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), - zap.Stringer("summaryIDs", ids.SliceStringer(summaryIDs)), + zap.Stringers("summaryIDs", summaryIDs), ) } } -func (s *sender) SendGetAcceptedFrontier(nodeIDs ids.NodeIDSet, requestID uint32) { +func (s *sender) SendGetAcceptedFrontier(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32) { + ctx = utils.Detach(ctx) + // Note that this timeout duration won't exactly match the one that gets // registered. That's OK. deadline := s.timeouts.TimeoutDuration() @@ -303,29 +379,58 @@ func (s *sender) SendGetAcceptedFrontier(nodeIDs ids.NodeIDSet, requestID uint32 // to send them a message, to avoid busy looping when disconnected from // the internet. for nodeID := range nodeIDs { - s.router.RegisterRequest(nodeID, s.ctx.ChainID, requestID, message.AcceptedFrontier) + inMsg := message.InternalGetAcceptedFrontierFailed( + nodeID, + s.ctx.ChainID, + requestID, + s.engineType, + ) + s.router.RegisterRequest( + ctx, + nodeID, + s.ctx.ChainID, + s.ctx.ChainID, + requestID, + message.AcceptedFrontierOp, + inMsg, + s.engineType, + ) } - msgCreator := s.getMsgCreator() - // Sending a message to myself. No need to send it over the network. // Just put it right into the router. Asynchronously to avoid deadlock. if nodeIDs.Contains(s.ctx.NodeID) { nodeIDs.Remove(s.ctx.NodeID) - inMsg := msgCreator.InboundGetAcceptedFrontier(s.ctx.ChainID, requestID, deadline, s.ctx.NodeID) - go s.router.HandleInbound(inMsg) + inMsg := message.InboundGetAcceptedFrontier( + s.ctx.ChainID, + requestID, + deadline, + s.ctx.NodeID, + s.engineType, + ) + go s.router.HandleInbound(ctx, inMsg) } // Create the outbound message. - outMsg, err := msgCreator.GetAcceptedFrontier(s.ctx.ChainID, requestID, deadline) + outMsg, err := s.msgCreator.GetAcceptedFrontier( + s.ctx.ChainID, + requestID, + deadline, + s.engineType, + ) // Send the message over the network. - var sentTo ids.NodeIDSet + var sentTo set.Set[ids.NodeID] if err == nil { - sentTo = s.sender.Send(outMsg, nodeIDs, s.ctx.SubnetID, s.ctx.IsValidatorOnly()) + sentTo = s.sender.Send( + outMsg, + nodeIDs, + s.ctx.SubnetID, + s.subnet, + ) } else { s.ctx.Log.Error("failed to build message", - zap.Stringer("messageOp", message.GetAcceptedFrontier), + zap.Stringer("messageOp", message.GetAcceptedFrontierOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), zap.Duration("deadline", deadline), @@ -336,7 +441,7 @@ func (s *sender) SendGetAcceptedFrontier(nodeIDs ids.NodeIDSet, requestID uint32 for nodeID := range nodeIDs { if !sentTo.Contains(nodeID) { s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.GetAcceptedFrontier), + zap.Stringer("messageOp", message.GetAcceptedFrontierOp), zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), @@ -345,44 +450,61 @@ func (s *sender) SendGetAcceptedFrontier(nodeIDs ids.NodeIDSet, requestID uint32 } } -func (s *sender) SendAcceptedFrontier(nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) { - msgCreator := s.getMsgCreator() +func (s *sender) SendAcceptedFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) { + ctx = utils.Detach(ctx) // Sending this message to myself. if nodeID == s.ctx.NodeID { - inMsg := msgCreator.InboundAcceptedFrontier(s.ctx.ChainID, requestID, containerIDs, nodeID) - go s.router.HandleInbound(inMsg) + inMsg := message.InboundAcceptedFrontier( + s.ctx.ChainID, + requestID, + containerIDs, + nodeID, + ) + go s.router.HandleInbound(ctx, inMsg) return } // Create the outbound message. - outMsg, err := msgCreator.AcceptedFrontier(s.ctx.ChainID, requestID, containerIDs) + outMsg, err := s.msgCreator.AcceptedFrontier( + s.ctx.ChainID, + requestID, + containerIDs, + ) if err != nil { s.ctx.Log.Error("failed to build message", - zap.Stringer("messageOp", message.AcceptedFrontier), + zap.Stringer("messageOp", message.AcceptedFrontierOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), - zap.Stringer("containerIDs", ids.SliceStringer(containerIDs)), + zap.Stringers("containerIDs", containerIDs), zap.Error(err), ) return } // Send the message over the network. - nodeIDs := ids.NewNodeIDSet(1) + nodeIDs := set.NewSet[ids.NodeID](1) nodeIDs.Add(nodeID) - if sentTo := s.sender.Send(outMsg, nodeIDs, s.ctx.SubnetID, s.ctx.IsValidatorOnly()); sentTo.Len() == 0 { + sentTo := s.sender.Send( + outMsg, + nodeIDs, + s.ctx.SubnetID, + s.subnet, + ) + if sentTo.Len() == 0 { s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.AcceptedFrontier), + zap.Stringer("messageOp", message.AcceptedFrontierOp), zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), - zap.Stringer("containerIDs", ids.SliceStringer(containerIDs)), + zap.Stringers("containerIDs", containerIDs), ) } } -func (s *sender) SendGetAccepted(nodeIDs ids.NodeIDSet, requestID uint32, containerIDs []ids.ID) { +func (s *sender) SendGetAccepted(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, containerIDs []ids.ID) { + ctx = utils.Detach(ctx) + // Note that this timeout duration won't exactly match the one that gets // registered. That's OK. deadline := s.timeouts.TimeoutDuration() @@ -393,32 +515,63 @@ func (s *sender) SendGetAccepted(nodeIDs ids.NodeIDSet, requestID uint32, contai // to send them a message, to avoid busy looping when disconnected from // the internet. for nodeID := range nodeIDs { - s.router.RegisterRequest(nodeID, s.ctx.ChainID, requestID, message.Accepted) + inMsg := message.InternalGetAcceptedFailed( + nodeID, + s.ctx.ChainID, + requestID, + s.engineType, + ) + s.router.RegisterRequest( + ctx, + nodeID, + s.ctx.ChainID, + s.ctx.ChainID, + requestID, + message.AcceptedOp, + inMsg, + s.engineType, + ) } - msgCreator := s.getMsgCreator() - // Sending a message to myself. No need to send it over the network. // Just put it right into the router. Asynchronously to avoid deadlock. if nodeIDs.Contains(s.ctx.NodeID) { nodeIDs.Remove(s.ctx.NodeID) - inMsg := msgCreator.InboundGetAccepted(s.ctx.ChainID, requestID, deadline, containerIDs, s.ctx.NodeID) - go s.router.HandleInbound(inMsg) + inMsg := message.InboundGetAccepted( + s.ctx.ChainID, + requestID, + deadline, + containerIDs, + s.ctx.NodeID, + s.engineType, + ) + go s.router.HandleInbound(ctx, inMsg) } // Create the outbound message. - outMsg, err := msgCreator.GetAccepted(s.ctx.ChainID, requestID, deadline, containerIDs) + outMsg, err := s.msgCreator.GetAccepted( + s.ctx.ChainID, + requestID, + deadline, + containerIDs, + s.engineType, + ) // Send the message over the network. - var sentTo ids.NodeIDSet + var sentTo set.Set[ids.NodeID] if err == nil { - sentTo = s.sender.Send(outMsg, nodeIDs, s.ctx.SubnetID, s.ctx.IsValidatorOnly()) + sentTo = s.sender.Send( + outMsg, + nodeIDs, + s.ctx.SubnetID, + s.subnet, + ) } else { s.ctx.Log.Error("failed to build message", - zap.Stringer("messageOp", message.GetAccepted), + zap.Stringer("messageOp", message.GetAcceptedOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), - zap.Stringer("containerIDs", ids.SliceStringer(containerIDs)), + zap.Stringers("containerIDs", containerIDs), zap.Error(err), ) } @@ -426,73 +579,97 @@ func (s *sender) SendGetAccepted(nodeIDs ids.NodeIDSet, requestID uint32, contai for nodeID := range nodeIDs { if !sentTo.Contains(nodeID) { s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.GetAccepted), + zap.Stringer("messageOp", message.GetAcceptedOp), zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), - zap.Stringer("containerIDs", ids.SliceStringer(containerIDs)), + zap.Stringers("containerIDs", containerIDs), ) } } } -func (s *sender) SendAccepted(nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) { - msgCreator := s.getMsgCreator() +func (s *sender) SendAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) { + ctx = utils.Detach(ctx) if nodeID == s.ctx.NodeID { - inMsg := msgCreator.InboundAccepted(s.ctx.ChainID, requestID, containerIDs, nodeID) - go s.router.HandleInbound(inMsg) + inMsg := message.InboundAccepted( + s.ctx.ChainID, + requestID, + containerIDs, + nodeID, + ) + go s.router.HandleInbound(ctx, inMsg) return } // Create the outbound message. - outMsg, err := msgCreator.Accepted(s.ctx.ChainID, requestID, containerIDs) + outMsg, err := s.msgCreator.Accepted(s.ctx.ChainID, requestID, containerIDs) if err != nil { s.ctx.Log.Error("failed to build message", - zap.Stringer("messageOp", message.Accepted), + zap.Stringer("messageOp", message.AcceptedOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), - zap.Stringer("containerIDs", ids.SliceStringer(containerIDs)), + zap.Stringers("containerIDs", containerIDs), zap.Error(err), ) return } // Send the message over the network. - nodeIDs := ids.NewNodeIDSet(1) + nodeIDs := set.NewSet[ids.NodeID](1) nodeIDs.Add(nodeID) - if sentTo := s.sender.Send(outMsg, nodeIDs, s.ctx.SubnetID, s.ctx.IsValidatorOnly()); sentTo.Len() == 0 { + sentTo := s.sender.Send( + outMsg, + nodeIDs, + s.ctx.SubnetID, + s.subnet, + ) + if sentTo.Len() == 0 { s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.Accepted), + zap.Stringer("messageOp", message.AcceptedOp), zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), - zap.Stringer("containerIDs", ids.SliceStringer(containerIDs)), + zap.Stringers("containerIDs", containerIDs), ) } } -func (s *sender) SendGetAncestors(nodeID ids.NodeID, requestID uint32, containerID ids.ID) { +func (s *sender) SendGetAncestors(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) { + ctx = utils.Detach(ctx) + // Tell the router to expect a response message or a message notifying // that we won't get a response from this node. - s.router.RegisterRequest(nodeID, s.ctx.ChainID, requestID, message.Ancestors) - - msgCreator := s.getMsgCreator() + inMsg := message.InternalGetAncestorsFailed( + nodeID, + s.ctx.ChainID, + requestID, + s.engineType, + ) + s.router.RegisterRequest( + ctx, + nodeID, + s.ctx.ChainID, + s.ctx.ChainID, + requestID, + message.AncestorsOp, + inMsg, + s.engineType, + ) // Sending a GetAncestors to myself always fails. if nodeID == s.ctx.NodeID { - inMsg := msgCreator.InternalFailedRequest(message.GetAncestorsFailed, nodeID, s.ctx.ChainID, requestID) - go s.router.HandleInbound(inMsg) + go s.router.HandleInbound(ctx, inMsg) return } - // [nodeID] may be benched. That is, they've been unresponsive - // so we don't even bother sending requests to them. We just have them immediately fail. + // [nodeID] may be benched. That is, they've been unresponsive so we don't + // even bother sending requests to them. We just have them immediately fail. if s.timeouts.IsBenched(nodeID, s.ctx.ChainID) { - s.failedDueToBench[message.GetAncestors].Inc() // update metric + s.failedDueToBench[message.GetAncestorsOp].Inc() // update metric s.timeouts.RegisterRequestToUnreachableValidator() - inMsg := msgCreator.InternalFailedRequest(message.GetAncestorsFailed, nodeID, s.ctx.ChainID, requestID) - go s.router.HandleInbound(inMsg) + go s.router.HandleInbound(ctx, inMsg) return } @@ -500,27 +677,38 @@ func (s *sender) SendGetAncestors(nodeID ids.NodeID, requestID uint32, container // registered. That's OK. deadline := s.timeouts.TimeoutDuration() // Create the outbound message. - outMsg, err := msgCreator.GetAncestors(s.ctx.ChainID, requestID, deadline, containerID) + outMsg, err := s.msgCreator.GetAncestors( + s.ctx.ChainID, + requestID, + deadline, + containerID, + s.engineType, + ) if err != nil { s.ctx.Log.Error("failed to build message", - zap.Stringer("messageOp", message.GetAncestors), + zap.Stringer("messageOp", message.GetAncestorsOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), zap.Stringer("containerID", containerID), zap.Error(err), ) - inMsg := msgCreator.InternalFailedRequest(message.GetAncestorsFailed, nodeID, s.ctx.ChainID, requestID) - go s.router.HandleInbound(inMsg) + go s.router.HandleInbound(ctx, inMsg) return } // Send the message over the network. - nodeIDs := ids.NewNodeIDSet(1) + nodeIDs := set.NewSet[ids.NodeID](1) nodeIDs.Add(nodeID) - if sentTo := s.sender.Send(outMsg, nodeIDs, s.ctx.SubnetID, s.ctx.IsValidatorOnly()); sentTo.Len() == 0 { + sentTo := s.sender.Send( + outMsg, + nodeIDs, + s.ctx.SubnetID, + s.subnet, + ) + if sentTo.Len() == 0 { s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.GetAncestors), + zap.Stringer("messageOp", message.GetAncestorsOp), zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), @@ -528,22 +716,19 @@ func (s *sender) SendGetAncestors(nodeID ids.NodeID, requestID uint32, container ) s.timeouts.RegisterRequestToUnreachableValidator() - inMsg := msgCreator.InternalFailedRequest(message.GetAncestorsFailed, nodeID, s.ctx.ChainID, requestID) - go s.router.HandleInbound(inMsg) + go s.router.HandleInbound(ctx, inMsg) } } -// SendAncestors sends an Ancestors message to the consensus engine running on the specified chain -// on the specified node. +// SendAncestors sends an Ancestors message to the consensus engine running on +// the specified chain on the specified node. // The Ancestors message gives the recipient the contents of several containers. -func (s *sender) SendAncestors(nodeID ids.NodeID, requestID uint32, containers [][]byte) { - msgCreator := s.getMsgCreator() - +func (s *sender) SendAncestors(_ context.Context, nodeID ids.NodeID, requestID uint32, containers [][]byte) { // Create the outbound message. - outMsg, err := msgCreator.Ancestors(s.ctx.ChainID, requestID, containers) + outMsg, err := s.msgCreator.Ancestors(s.ctx.ChainID, requestID, containers) if err != nil { s.ctx.Log.Error("failed to build message", - zap.Stringer("messageOp", message.Ancestors), + zap.Stringer("messageOp", message.AncestorsOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), zap.Int("numContainers", len(containers)), @@ -553,11 +738,17 @@ func (s *sender) SendAncestors(nodeID ids.NodeID, requestID uint32, containers [ } // Send the message over the network. - nodeIDs := ids.NewNodeIDSet(1) + nodeIDs := set.NewSet[ids.NodeID](1) nodeIDs.Add(nodeID) - if sentTo := s.sender.Send(outMsg, nodeIDs, s.ctx.SubnetID, s.ctx.IsValidatorOnly()); sentTo.Len() == 0 { + sentTo := s.sender.Send( + outMsg, + nodeIDs, + s.ctx.SubnetID, + s.subnet, + ) + if sentTo.Len() == 0 { s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.Ancestors), + zap.Stringer("messageOp", message.AncestorsOp), zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), @@ -570,27 +761,40 @@ func (s *sender) SendAncestors(nodeID ids.NodeID, requestID uint32, containers [ // chain to the specified node. The Get message signifies that this // consensus engine would like the recipient to send this consensus engine the // specified container. -func (s *sender) SendGet(nodeID ids.NodeID, requestID uint32, containerID ids.ID) { +func (s *sender) SendGet(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) { + ctx = utils.Detach(ctx) + // Tell the router to expect a response message or a message notifying // that we won't get a response from this node. - s.router.RegisterRequest(nodeID, s.ctx.ChainID, requestID, message.Put) - - msgCreator := s.getMsgCreator() + inMsg := message.InternalGetFailed( + nodeID, + s.ctx.ChainID, + requestID, + s.engineType, + ) + s.router.RegisterRequest( + ctx, + nodeID, + s.ctx.ChainID, + s.ctx.ChainID, + requestID, + message.PutOp, + inMsg, + s.engineType, + ) // Sending a Get to myself always fails. if nodeID == s.ctx.NodeID { - inMsg := msgCreator.InternalFailedRequest(message.GetFailed, nodeID, s.ctx.ChainID, requestID) - go s.router.HandleInbound(inMsg) + go s.router.HandleInbound(ctx, inMsg) return } - // [nodeID] may be benched. That is, they've been unresponsive - // so we don't even bother sending requests to them. We just have them immediately fail. + // [nodeID] may be benched. That is, they've been unresponsive so we don't + // even bother sending requests to them. We just have them immediately fail. if s.timeouts.IsBenched(nodeID, s.ctx.ChainID) { - s.failedDueToBench[message.Get].Inc() // update metric + s.failedDueToBench[message.GetOp].Inc() // update metric s.timeouts.RegisterRequestToUnreachableValidator() - inMsg := msgCreator.InternalFailedRequest(message.GetFailed, nodeID, s.ctx.ChainID, requestID) - go s.router.HandleInbound(inMsg) + go s.router.HandleInbound(ctx, inMsg) return } @@ -598,17 +802,28 @@ func (s *sender) SendGet(nodeID ids.NodeID, requestID uint32, containerID ids.ID // registered. That's OK. deadline := s.timeouts.TimeoutDuration() // Create the outbound message. - outMsg, err := msgCreator.Get(s.ctx.ChainID, requestID, deadline, containerID) + outMsg, err := s.msgCreator.Get( + s.ctx.ChainID, + requestID, + deadline, + containerID, + s.engineType, + ) // Send the message over the network. - var sentTo ids.NodeIDSet + var sentTo set.Set[ids.NodeID] if err == nil { - nodeIDs := ids.NewNodeIDSet(1) + nodeIDs := set.NewSet[ids.NodeID](1) nodeIDs.Add(nodeID) - sentTo = s.sender.Send(outMsg, nodeIDs, s.ctx.SubnetID, s.ctx.IsValidatorOnly()) + sentTo = s.sender.Send( + outMsg, + nodeIDs, + s.ctx.SubnetID, + s.subnet, + ) } else { s.ctx.Log.Error("failed to build message", - zap.Stringer("messageOp", message.Get), + zap.Stringer("messageOp", message.GetOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), zap.Duration("deadline", deadline), @@ -619,7 +834,7 @@ func (s *sender) SendGet(nodeID ids.NodeID, requestID uint32, containerID ids.ID if sentTo.Len() == 0 { s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.Get), + zap.Stringer("messageOp", message.GetOp), zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), @@ -627,23 +842,20 @@ func (s *sender) SendGet(nodeID ids.NodeID, requestID uint32, containerID ids.ID ) s.timeouts.RegisterRequestToUnreachableValidator() - inMsg := msgCreator.InternalFailedRequest(message.GetFailed, nodeID, s.ctx.ChainID, requestID) - go s.router.HandleInbound(inMsg) + go s.router.HandleInbound(ctx, inMsg) } } -// SendPut sends a Put message to the consensus engine running on the specified chain -// on the specified node. -// The Put message signifies that this consensus engine is giving to the recipient -// the contents of the specified container. -func (s *sender) SendPut(nodeID ids.NodeID, requestID uint32, container []byte) { - msgCreator := s.getMsgCreator() - +// SendPut sends a Put message to the consensus engine running on the specified +// chain on the specified node. +// The Put message signifies that this consensus engine is giving to the +// recipient the contents of the specified container. +func (s *sender) SendPut(_ context.Context, nodeID ids.NodeID, requestID uint32, container []byte) { // Create the outbound message. - outMsg, err := msgCreator.Put(s.ctx.ChainID, requestID, container) + outMsg, err := s.msgCreator.Put(s.ctx.ChainID, requestID, container, s.engineType) if err != nil { s.ctx.Log.Error("failed to build message", - zap.Stringer("messageOp", message.Put), + zap.Stringer("messageOp", message.PutOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), zap.Binary("container", container), @@ -653,17 +865,23 @@ func (s *sender) SendPut(nodeID ids.NodeID, requestID uint32, container []byte) } // Send the message over the network. - nodeIDs := ids.NewNodeIDSet(1) + nodeIDs := set.NewSet[ids.NodeID](1) nodeIDs.Add(nodeID) - if sentTo := s.sender.Send(outMsg, nodeIDs, s.ctx.SubnetID, s.ctx.IsValidatorOnly()); sentTo.Len() == 0 { + sentTo := s.sender.Send( + outMsg, + nodeIDs, + s.ctx.SubnetID, + s.subnet, + ) + if sentTo.Len() == 0 { s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.Put), + zap.Stringer("messageOp", message.PutOp), zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), ) s.ctx.Log.Verbo("failed to send message", - zap.Stringer("messageOp", message.Put), + zap.Stringer("messageOp", message.PutOp), zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), @@ -672,59 +890,100 @@ func (s *sender) SendPut(nodeID ids.NodeID, requestID uint32, container []byte) } } -// SendPushQuery sends a PushQuery message to the consensus engines running on the specified chains -// on the specified nodes. -// The PushQuery message signifies that this consensus engine would like each node to send -// their preferred frontier given the existence of the specified container. -func (s *sender) SendPushQuery(nodeIDs ids.NodeIDSet, requestID uint32, container []byte) { +// SendPushQuery sends a PushQuery message to the consensus engines running on +// the specified chains on the specified nodes. +// The PushQuery message signifies that this consensus engine would like each +// node to send their preferred frontier given the existence of the specified +// container. +func (s *sender) SendPushQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, container []byte) { + ctx = utils.Detach(ctx) + // Tell the router to expect a response message or a message notifying // that we won't get a response from each of these nodes. // We register timeouts for all nodes, regardless of whether we fail // to send them a message, to avoid busy looping when disconnected from // the internet. for nodeID := range nodeIDs { - s.router.RegisterRequest(nodeID, s.ctx.ChainID, requestID, message.Chits) + inMsg := message.InternalQueryFailed( + nodeID, + s.ctx.ChainID, + requestID, + s.engineType, + ) + s.router.RegisterRequest( + ctx, + nodeID, + s.ctx.ChainID, + s.ctx.ChainID, + requestID, + message.ChitsOp, + inMsg, + s.engineType, + ) } // Note that this timeout duration won't exactly match the one that gets // registered. That's OK. deadline := s.timeouts.TimeoutDuration() - msgCreator := s.getMsgCreator() - - // Sending a message to myself. No need to send it over the network. - // Just put it right into the router. Do so asynchronously to avoid deadlock. + // Sending a message to myself. No need to send it over the network. Just + // put it right into the router. Do so asynchronously to avoid deadlock. if nodeIDs.Contains(s.ctx.NodeID) { nodeIDs.Remove(s.ctx.NodeID) - inMsg := msgCreator.InboundPushQuery(s.ctx.ChainID, requestID, deadline, container, s.ctx.NodeID) - go s.router.HandleInbound(inMsg) + inMsg := message.InboundPushQuery( + s.ctx.ChainID, + requestID, + deadline, + container, + s.ctx.NodeID, + s.engineType, + ) + go s.router.HandleInbound(ctx, inMsg) } - // Some of [nodeIDs] may be benched. That is, they've been unresponsive - // so we don't even bother sending messages to them. We just have them immediately fail. + // Some of [nodeIDs] may be benched. That is, they've been unresponsive so + // we don't even bother sending messages to them. We just have them + // immediately fail. for nodeID := range nodeIDs { if s.timeouts.IsBenched(nodeID, s.ctx.ChainID) { - s.failedDueToBench[message.PushQuery].Inc() // update metric + s.failedDueToBench[message.PushQueryOp].Inc() // update metric nodeIDs.Remove(nodeID) s.timeouts.RegisterRequestToUnreachableValidator() - // Immediately register a failure. Do so asynchronously to avoid deadlock. - inMsg := msgCreator.InternalFailedRequest(message.QueryFailed, nodeID, s.ctx.ChainID, requestID) - go s.router.HandleInbound(inMsg) + // Immediately register a failure. Do so asynchronously to avoid + // deadlock. + inMsg := message.InternalQueryFailed( + nodeID, + s.ctx.ChainID, + requestID, + s.engineType, + ) + go s.router.HandleInbound(ctx, inMsg) } } // Create the outbound message. - // [sentTo] are the IDs of validators who may receive the message. - outMsg, err := msgCreator.PushQuery(s.ctx.ChainID, requestID, deadline, container) + outMsg, err := s.msgCreator.PushQuery( + s.ctx.ChainID, + requestID, + deadline, + container, + s.engineType, + ) // Send the message over the network. - var sentTo ids.NodeIDSet + // [sentTo] are the IDs of validators who may receive the message. + var sentTo set.Set[ids.NodeID] if err == nil { - sentTo = s.sender.Send(outMsg, nodeIDs, s.ctx.SubnetID, s.ctx.IsValidatorOnly()) + sentTo = s.sender.Send( + outMsg, + nodeIDs, + s.ctx.SubnetID, + s.subnet, + ) } else { s.ctx.Log.Error("failed to build message", - zap.Stringer("messageOp", message.PushQuery), + zap.Stringer("messageOp", message.PushQueryOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), zap.Binary("container", container), @@ -735,13 +994,13 @@ func (s *sender) SendPushQuery(nodeIDs ids.NodeIDSet, requestID uint32, containe for nodeID := range nodeIDs { if !sentTo.Contains(nodeID) { s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.PushQuery), + zap.Stringer("messageOp", message.PushQueryOp), zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), ) s.ctx.Log.Verbo("failed to send message", - zap.Stringer("messageOp", message.PushQuery), + zap.Stringer("messageOp", message.PushQueryOp), zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), @@ -750,63 +1009,108 @@ func (s *sender) SendPushQuery(nodeIDs ids.NodeIDSet, requestID uint32, containe // Register failures for nodes we didn't send a request to. s.timeouts.RegisterRequestToUnreachableValidator() - inMsg := msgCreator.InternalFailedRequest(message.QueryFailed, nodeID, s.ctx.ChainID, requestID) - go s.router.HandleInbound(inMsg) + inMsg := message.InternalQueryFailed( + nodeID, + s.ctx.ChainID, + requestID, + s.engineType, + ) + go s.router.HandleInbound(ctx, inMsg) } } } -// SendPullQuery sends a PullQuery message to the consensus engines running on the specified chains -// on the specified nodes. -// The PullQuery message signifies that this consensus engine would like each node to send -// their preferred frontier. -func (s *sender) SendPullQuery(nodeIDs ids.NodeIDSet, requestID uint32, containerID ids.ID) { +// SendPullQuery sends a PullQuery message to the consensus engines running on +// the specified chains on the specified nodes. +// The PullQuery message signifies that this consensus engine would like each +// node to send their preferred frontier. +func (s *sender) SendPullQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, containerID ids.ID) { + ctx = utils.Detach(ctx) + // Tell the router to expect a response message or a message notifying // that we won't get a response from each of these nodes. // We register timeouts for all nodes, regardless of whether we fail // to send them a message, to avoid busy looping when disconnected from // the internet. for nodeID := range nodeIDs { - s.router.RegisterRequest(nodeID, s.ctx.ChainID, requestID, message.Chits) + inMsg := message.InternalQueryFailed( + nodeID, + s.ctx.ChainID, + requestID, + s.engineType, + ) + s.router.RegisterRequest( + ctx, + nodeID, + s.ctx.ChainID, + s.ctx.ChainID, + requestID, + message.ChitsOp, + inMsg, + s.engineType, + ) } // Note that this timeout duration won't exactly match the one that gets // registered. That's OK. deadline := s.timeouts.TimeoutDuration() - msgCreator := s.getMsgCreator() - - // Sending a message to myself. No need to send it over the network. - // Just put it right into the router. Do so asynchronously to avoid deadlock. + // Sending a message to myself. No need to send it over the network. Just + // put it right into the router. Do so asynchronously to avoid deadlock. if nodeIDs.Contains(s.ctx.NodeID) { nodeIDs.Remove(s.ctx.NodeID) - inMsg := msgCreator.InboundPullQuery(s.ctx.ChainID, requestID, deadline, containerID, s.ctx.NodeID) - go s.router.HandleInbound(inMsg) + inMsg := message.InboundPullQuery( + s.ctx.ChainID, + requestID, + deadline, + containerID, + s.ctx.NodeID, + s.engineType, + ) + go s.router.HandleInbound(ctx, inMsg) } - // Some of the nodes in [nodeIDs] may be benched. That is, they've been unresponsive - // so we don't even bother sending messages to them. We just have them immediately fail. + // Some of the nodes in [nodeIDs] may be benched. That is, they've been + // unresponsive so we don't even bother sending messages to them. We just + // have them immediately fail. for nodeID := range nodeIDs { if s.timeouts.IsBenched(nodeID, s.ctx.ChainID) { - s.failedDueToBench[message.PullQuery].Inc() // update metric + s.failedDueToBench[message.PullQueryOp].Inc() // update metric nodeIDs.Remove(nodeID) s.timeouts.RegisterRequestToUnreachableValidator() - // Immediately register a failure. Do so asynchronously to avoid deadlock. - inMsg := msgCreator.InternalFailedRequest(message.QueryFailed, nodeID, s.ctx.ChainID, requestID) - go s.router.HandleInbound(inMsg) + // Immediately register a failure. Do so asynchronously to avoid + // deadlock. + inMsg := message.InternalQueryFailed( + nodeID, + s.ctx.ChainID, + requestID, + s.engineType, + ) + go s.router.HandleInbound(ctx, inMsg) } } // Create the outbound message. - outMsg, err := msgCreator.PullQuery(s.ctx.ChainID, requestID, deadline, containerID) + outMsg, err := s.msgCreator.PullQuery( + s.ctx.ChainID, + requestID, + deadline, + containerID, + s.engineType, + ) // Send the message over the network. - var sentTo ids.NodeIDSet + var sentTo set.Set[ids.NodeID] if err == nil { - sentTo = s.sender.Send(outMsg, nodeIDs, s.ctx.SubnetID, s.ctx.IsValidatorOnly()) + sentTo = s.sender.Send( + outMsg, + nodeIDs, + s.ctx.SubnetID, + s.subnet, + ) } else { s.ctx.Log.Error("failed to build message", - zap.Stringer("messageOp", message.PullQuery), + zap.Stringer("messageOp", message.PullQueryOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), zap.Duration("deadline", deadline), @@ -818,7 +1122,7 @@ func (s *sender) SendPullQuery(nodeIDs ids.NodeIDSet, requestID uint32, containe for nodeID := range nodeIDs { if !sentTo.Contains(nodeID) { s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.PullQuery), + zap.Stringer("messageOp", message.PullQueryOp), zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), @@ -827,102 +1131,203 @@ func (s *sender) SendPullQuery(nodeIDs ids.NodeIDSet, requestID uint32, containe // Register failures for nodes we didn't send a request to. s.timeouts.RegisterRequestToUnreachableValidator() - inMsg := msgCreator.InternalFailedRequest(message.QueryFailed, nodeID, s.ctx.ChainID, requestID) - go s.router.HandleInbound(inMsg) + inMsg := message.InternalQueryFailed( + nodeID, + s.ctx.ChainID, + requestID, + s.engineType, + ) + go s.router.HandleInbound(ctx, inMsg) } } } // SendChits sends chits -func (s *sender) SendChits(nodeID ids.NodeID, requestID uint32, votes []ids.ID) { - msgCreator := s.getMsgCreator() +func (s *sender) SendChits(ctx context.Context, nodeID ids.NodeID, requestID uint32, votes, accepted []ids.ID) { + ctx = utils.Detach(ctx) // If [nodeID] is myself, send this message directly // to my own router rather than sending it over the network if nodeID == s.ctx.NodeID { - inMsg := msgCreator.InboundChits(s.ctx.ChainID, requestID, votes, nodeID) - go s.router.HandleInbound(inMsg) + inMsg := message.InboundChits( + s.ctx.ChainID, + requestID, + votes, + accepted, + nodeID, + ) + go s.router.HandleInbound(ctx, inMsg) return } // Create the outbound message. - outMsg, err := msgCreator.Chits(s.ctx.ChainID, requestID, votes) + outMsg, err := s.msgCreator.Chits(s.ctx.ChainID, requestID, votes, accepted) if err != nil { s.ctx.Log.Error("failed to build message", - zap.Stringer("messageOp", message.Chits), + zap.Stringer("messageOp", message.ChitsOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), - zap.Stringer("containerIDs", ids.SliceStringer(votes)), + zap.Stringers("containerIDs", votes), zap.Error(err), ) return } // Send the message over the network. - nodeIDs := ids.NewNodeIDSet(1) + nodeIDs := set.NewSet[ids.NodeID](1) nodeIDs.Add(nodeID) - if sentTo := s.sender.Send(outMsg, nodeIDs, s.ctx.SubnetID, s.ctx.IsValidatorOnly()); sentTo.Len() == 0 { + sentTo := s.sender.Send( + outMsg, + nodeIDs, + s.ctx.SubnetID, + s.subnet, + ) + if sentTo.Len() == 0 { s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.Chits), + zap.Stringer("messageOp", message.ChitsOp), zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), - zap.Stringer("containerIDs", ids.SliceStringer(votes)), + zap.Stringers("containerIDs", votes), ) } } +func (s *sender) SendCrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, appRequestBytes []byte) error { + ctx = utils.Detach(ctx) + + // The failed message is treated as if it was sent by the requested chain + failedMsg := message.InternalCrossChainAppRequestFailed( + s.ctx.NodeID, + chainID, + s.ctx.ChainID, + requestID, + ) + s.router.RegisterRequest( + ctx, + s.ctx.NodeID, + s.ctx.ChainID, + chainID, + requestID, + message.CrossChainAppResponseOp, + failedMsg, + p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + ) + + inMsg := message.InternalCrossChainAppRequest( + s.ctx.NodeID, + s.ctx.ChainID, + chainID, + requestID, + s.timeouts.TimeoutDuration(), + appRequestBytes, + ) + go s.router.HandleInbound(ctx, inMsg) + return nil +} + +func (s *sender) SendCrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, appResponseBytes []byte) error { + ctx = utils.Detach(ctx) + + inMsg := message.InternalCrossChainAppResponse( + s.ctx.NodeID, + s.ctx.ChainID, + chainID, + requestID, + appResponseBytes, + ) + go s.router.HandleInbound(ctx, inMsg) + return nil +} + // SendAppRequest sends an application-level request to the given nodes. -// The meaning of this request, and how it should be handled, is defined by the VM. -func (s *sender) SendAppRequest(nodeIDs ids.NodeIDSet, requestID uint32, appRequestBytes []byte) error { +// The meaning of this request, and how it should be handled, is defined by the +// VM. +func (s *sender) SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, appRequestBytes []byte) error { + ctx = utils.Detach(ctx) + // Tell the router to expect a response message or a message notifying // that we won't get a response from each of these nodes. // We register timeouts for all nodes, regardless of whether we fail // to send them a message, to avoid busy looping when disconnected from // the internet. for nodeID := range nodeIDs { - s.router.RegisterRequest(nodeID, s.ctx.ChainID, requestID, message.AppResponse) + inMsg := message.InternalAppRequestFailed( + nodeID, + s.ctx.ChainID, + requestID, + ) + s.router.RegisterRequest( + ctx, + nodeID, + s.ctx.ChainID, + s.ctx.ChainID, + requestID, + message.AppResponseOp, + inMsg, + p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, + ) } // Note that this timeout duration won't exactly match the one that gets // registered. That's OK. deadline := s.timeouts.TimeoutDuration() - msgCreator := s.getMsgCreator() - - // Sending a message to myself. No need to send it over the network. - // Just put it right into the router. Do so asynchronously to avoid deadlock. + // Sending a message to myself. No need to send it over the network. Just + // put it right into the router. Do so asynchronously to avoid deadlock. if nodeIDs.Contains(s.ctx.NodeID) { nodeIDs.Remove(s.ctx.NodeID) - inMsg := msgCreator.InboundAppRequest(s.ctx.ChainID, requestID, deadline, appRequestBytes, s.ctx.NodeID) - go s.router.HandleInbound(inMsg) + inMsg := message.InboundAppRequest( + s.ctx.ChainID, + requestID, + deadline, + appRequestBytes, + s.ctx.NodeID, + ) + go s.router.HandleInbound(ctx, inMsg) } - // Some of the nodes in [nodeIDs] may be benched. That is, they've been unresponsive - // so we don't even bother sending messages to them. We just have them immediately fail. + // Some of the nodes in [nodeIDs] may be benched. That is, they've been + // unresponsive so we don't even bother sending messages to them. We just + // have them immediately fail. for nodeID := range nodeIDs { if s.timeouts.IsBenched(nodeID, s.ctx.ChainID) { - s.failedDueToBench[message.AppRequest].Inc() // update metric + s.failedDueToBench[message.AppRequestOp].Inc() // update metric nodeIDs.Remove(nodeID) s.timeouts.RegisterRequestToUnreachableValidator() - // Immediately register a failure. Do so asynchronously to avoid deadlock. - inMsg := msgCreator.InternalFailedRequest(message.AppRequestFailed, nodeID, s.ctx.ChainID, requestID) - go s.router.HandleInbound(inMsg) + // Immediately register a failure. Do so asynchronously to avoid + // deadlock. + inMsg := message.InternalAppRequestFailed( + nodeID, + s.ctx.ChainID, + requestID, + ) + go s.router.HandleInbound(ctx, inMsg) } } // Create the outbound message. - // [sentTo] are the IDs of nodes who may receive the message. - outMsg, err := msgCreator.AppRequest(s.ctx.ChainID, requestID, deadline, appRequestBytes) + outMsg, err := s.msgCreator.AppRequest( + s.ctx.ChainID, + requestID, + deadline, + appRequestBytes, + ) // Send the message over the network. - var sentTo ids.NodeIDSet + // [sentTo] are the IDs of nodes who may receive the message. + var sentTo set.Set[ids.NodeID] if err == nil { - sentTo = s.sender.Send(outMsg, nodeIDs, s.ctx.SubnetID, s.ctx.IsValidatorOnly()) + sentTo = s.sender.Send( + outMsg, + nodeIDs, + s.ctx.SubnetID, + s.subnet, + ) } else { s.ctx.Log.Error("failed to build message", - zap.Stringer("messageOp", message.AppRequest), + zap.Stringer("messageOp", message.AppRequestOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), zap.Binary("payload", appRequestBytes), @@ -933,13 +1338,13 @@ func (s *sender) SendAppRequest(nodeIDs ids.NodeIDSet, requestID uint32, appRequ for nodeID := range nodeIDs { if !sentTo.Contains(nodeID) { s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.AppRequest), + zap.Stringer("messageOp", message.AppRequestOp), zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), ) s.ctx.Log.Verbo("failed to send message", - zap.Stringer("messageOp", message.AppRequest), + zap.Stringer("messageOp", message.AppRequestOp), zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), @@ -948,8 +1353,12 @@ func (s *sender) SendAppRequest(nodeIDs ids.NodeIDSet, requestID uint32, appRequ // Register failures for nodes we didn't send a request to. s.timeouts.RegisterRequestToUnreachableValidator() - inMsg := msgCreator.InternalFailedRequest(message.AppRequestFailed, nodeID, s.ctx.ChainID, requestID) - go s.router.HandleInbound(inMsg) + inMsg := message.InternalAppRequestFailed( + nodeID, + s.ctx.ChainID, + requestID, + ) + go s.router.HandleInbound(ctx, inMsg) } } return nil @@ -957,20 +1366,29 @@ func (s *sender) SendAppRequest(nodeIDs ids.NodeIDSet, requestID uint32, appRequ // SendAppResponse sends a response to an application-level request from the // given node -func (s *sender) SendAppResponse(nodeID ids.NodeID, requestID uint32, appResponseBytes []byte) error { - msgCreator := s.getMsgCreator() +func (s *sender) SendAppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, appResponseBytes []byte) error { + ctx = utils.Detach(ctx) if nodeID == s.ctx.NodeID { - inMsg := msgCreator.InboundAppResponse(s.ctx.ChainID, requestID, appResponseBytes, nodeID) - go s.router.HandleInbound(inMsg) + inMsg := message.InboundAppResponse( + s.ctx.ChainID, + requestID, + appResponseBytes, + nodeID, + ) + go s.router.HandleInbound(ctx, inMsg) return nil } // Create the outbound message. - outMsg, err := msgCreator.AppResponse(s.ctx.ChainID, requestID, appResponseBytes) + outMsg, err := s.msgCreator.AppResponse( + s.ctx.ChainID, + requestID, + appResponseBytes, + ) if err != nil { s.ctx.Log.Error("failed to build message", - zap.Stringer("messageOp", message.AppResponse), + zap.Stringer("messageOp", message.AppResponseOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), zap.Binary("payload", appResponseBytes), @@ -980,17 +1398,23 @@ func (s *sender) SendAppResponse(nodeID ids.NodeID, requestID uint32, appRespons } // Send the message over the network. - nodeIDs := ids.NewNodeIDSet(1) + nodeIDs := set.NewSet[ids.NodeID](1) nodeIDs.Add(nodeID) - if sentTo := s.sender.Send(outMsg, nodeIDs, s.ctx.SubnetID, s.ctx.IsValidatorOnly()); sentTo.Len() == 0 { + sentTo := s.sender.Send( + outMsg, + nodeIDs, + s.ctx.SubnetID, + s.subnet, + ) + if sentTo.Len() == 0 { s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.AppResponse), + zap.Stringer("messageOp", message.AppResponseOp), zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), ) s.ctx.Log.Verbo("failed to send message", - zap.Stringer("messageOp", message.AppResponse), + zap.Stringer("messageOp", message.AppResponseOp), zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Uint32("requestID", requestID), @@ -1000,14 +1424,12 @@ func (s *sender) SendAppResponse(nodeID ids.NodeID, requestID uint32, appRespons return nil } -func (s *sender) SendAppGossipSpecific(nodeIDs ids.NodeIDSet, appGossipBytes []byte) error { - msgCreator := s.getMsgCreator() - +func (s *sender) SendAppGossipSpecific(_ context.Context, nodeIDs set.Set[ids.NodeID], appGossipBytes []byte) error { // Create the outbound message. - outMsg, err := msgCreator.AppGossip(s.ctx.ChainID, appGossipBytes) + outMsg, err := s.msgCreator.AppGossip(s.ctx.ChainID, appGossipBytes) if err != nil { s.ctx.Log.Error("failed to build message", - zap.Stringer("messageOp", message.AppGossip), + zap.Stringer("messageOp", message.AppGossipOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Binary("payload", appGossipBytes), zap.Error(err), @@ -1016,16 +1438,22 @@ func (s *sender) SendAppGossipSpecific(nodeIDs ids.NodeIDSet, appGossipBytes []b } // Send the message over the network. - if sentTo := s.sender.Send(outMsg, nodeIDs, s.ctx.SubnetID, s.ctx.IsValidatorOnly()); sentTo.Len() == 0 { + sentTo := s.sender.Send( + outMsg, + nodeIDs, + s.ctx.SubnetID, + s.subnet, + ) + if sentTo.Len() == 0 { for nodeID := range nodeIDs { if !sentTo.Contains(nodeID) { s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.AppGossip), + zap.Stringer("messageOp", message.AppGossipOp), zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), ) s.ctx.Log.Verbo("failed to send message", - zap.Stringer("messageOp", message.AppGossip), + zap.Stringer("messageOp", message.AppGossipOp), zap.Stringer("nodeID", nodeID), zap.Stringer("chainID", s.ctx.ChainID), zap.Binary("payload", appGossipBytes), @@ -1037,14 +1465,12 @@ func (s *sender) SendAppGossipSpecific(nodeIDs ids.NodeIDSet, appGossipBytes []b } // SendAppGossip sends an application-level gossip message. -func (s *sender) SendAppGossip(appGossipBytes []byte) error { - msgCreator := s.getMsgCreator() - +func (s *sender) SendAppGossip(_ context.Context, appGossipBytes []byte) error { // Create the outbound message. - outMsg, err := msgCreator.AppGossip(s.ctx.ChainID, appGossipBytes) + outMsg, err := s.msgCreator.AppGossip(s.ctx.ChainID, appGossipBytes) if err != nil { s.ctx.Log.Error("failed to build message", - zap.Stringer("messageOp", message.AppGossip), + zap.Stringer("messageOp", message.AppGossipOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Binary("payload", appGossipBytes), zap.Error(err), @@ -1052,18 +1478,26 @@ func (s *sender) SendAppGossip(appGossipBytes []byte) error { return nil } - validatorSize := int(s.gossipConfig.AppGossipValidatorSize) - nonValidatorSize := int(s.gossipConfig.AppGossipNonValidatorSize) - peerSize := int(s.gossipConfig.AppGossipPeerSize) + gossipConfig := s.subnet.Config().GossipConfig + validatorSize := int(gossipConfig.AppGossipValidatorSize) + nonValidatorSize := int(gossipConfig.AppGossipNonValidatorSize) + peerSize := int(gossipConfig.AppGossipPeerSize) - sentTo := s.sender.Gossip(outMsg, s.ctx.SubnetID, s.ctx.IsValidatorOnly(), validatorSize, nonValidatorSize, peerSize) + sentTo := s.sender.Gossip( + outMsg, + s.ctx.SubnetID, + validatorSize, + nonValidatorSize, + peerSize, + s.subnet, + ) if sentTo.Len() == 0 { s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.AppGossip), + zap.Stringer("messageOp", message.AppGossipOp), zap.Stringer("chainID", s.ctx.ChainID), ) s.ctx.Log.Verbo("failed to send message", - zap.Stringer("messageOp", message.AppGossip), + zap.Stringer("messageOp", message.AppGossipOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Binary("payload", appGossipBytes), ) @@ -1072,14 +1506,17 @@ func (s *sender) SendAppGossip(appGossipBytes []byte) error { } // SendGossip gossips the provided container -func (s *sender) SendGossip(container []byte) { - msgCreator := s.getMsgCreator() - +func (s *sender) SendGossip(_ context.Context, container []byte) { // Create the outbound message. - outMsg, err := msgCreator.Put(s.ctx.ChainID, constants.GossipMsgRequestID, container) + outMsg, err := s.msgCreator.Put( + s.ctx.ChainID, + constants.GossipMsgRequestID, + container, + s.engineType, + ) if err != nil { s.ctx.Log.Error("failed to build message", - zap.Stringer("messageOp", message.Put), + zap.Stringer("messageOp", message.PutOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Binary("container", container), zap.Error(err), @@ -1087,21 +1524,22 @@ func (s *sender) SendGossip(container []byte) { return } + gossipConfig := s.subnet.Config().GossipConfig sentTo := s.sender.Gossip( outMsg, s.ctx.SubnetID, - s.ctx.IsValidatorOnly(), - int(s.gossipConfig.AcceptedFrontierValidatorSize), - int(s.gossipConfig.AcceptedFrontierNonValidatorSize), - int(s.gossipConfig.AcceptedFrontierPeerSize), + int(gossipConfig.AcceptedFrontierValidatorSize), + int(gossipConfig.AcceptedFrontierNonValidatorSize), + int(gossipConfig.AcceptedFrontierPeerSize), + s.subnet, ) if sentTo.Len() == 0 { s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.Put), + zap.Stringer("messageOp", message.PutOp), zap.Stringer("chainID", s.ctx.ChainID), ) s.ctx.Log.Verbo("failed to send message", - zap.Stringer("messageOp", message.Put), + zap.Stringer("messageOp", message.PutOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Binary("container", container), ) @@ -1110,18 +1548,21 @@ func (s *sender) SendGossip(container []byte) { // Accept is called after every consensus decision func (s *sender) Accept(ctx *snow.ConsensusContext, _ ids.ID, container []byte) error { - if ctx.GetState() != snow.NormalOp { + if ctx.State.Get().State != snow.NormalOp { // don't gossip during bootstrapping return nil } - msgCreator := s.getMsgCreator() - // Create the outbound message. - outMsg, err := msgCreator.Put(s.ctx.ChainID, constants.GossipMsgRequestID, container) + outMsg, err := s.msgCreator.Put( + s.ctx.ChainID, + constants.GossipMsgRequestID, + container, + s.engineType, + ) if err != nil { s.ctx.Log.Error("failed to build message", - zap.Stringer("messageOp", message.Put), + zap.Stringer("messageOp", message.PutOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Binary("container", container), zap.Error(err), @@ -1129,21 +1570,22 @@ func (s *sender) Accept(ctx *snow.ConsensusContext, _ ids.ID, container []byte) return nil } + gossipConfig := s.subnet.Config().GossipConfig sentTo := s.sender.Gossip( outMsg, s.ctx.SubnetID, - s.ctx.IsValidatorOnly(), - int(s.gossipConfig.OnAcceptValidatorSize), - int(s.gossipConfig.OnAcceptNonValidatorSize), - int(s.gossipConfig.OnAcceptPeerSize), + int(gossipConfig.OnAcceptValidatorSize), + int(gossipConfig.OnAcceptNonValidatorSize), + int(gossipConfig.OnAcceptPeerSize), + s.subnet, ) if sentTo.Len() == 0 { s.ctx.Log.Debug("failed to send message", - zap.Stringer("messageOp", message.Put), + zap.Stringer("messageOp", message.PutOp), zap.Stringer("chainID", s.ctx.ChainID), ) s.ctx.Log.Verbo("failed to send message", - zap.Stringer("messageOp", message.Put), + zap.Stringer("messageOp", message.PutOp), zap.Stringer("chainID", s.ctx.ChainID), zap.Binary("container", container), ) diff --git a/avalanchego/snow/networking/sender/sender_test.go b/avalanchego/snow/networking/sender/sender_test.go index 7171f86e..04bfeea6 100644 --- a/avalanchego/snow/networking/sender/sender_test.go +++ b/avalanchego/snow/networking/sender/sender_test.go @@ -1,20 +1,24 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sender import ( + "context" "math/rand" "sync" "testing" "time" + "github.com/golang/mock/gomock" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/networking/benchlist" @@ -23,24 +27,32 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/timeout" "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/subnets" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/version" ) -var defaultGossipConfig = GossipConfig{ - AcceptedFrontierPeerSize: 2, - OnAcceptPeerSize: 2, - AppGossipValidatorSize: 2, - AppGossipNonValidatorSize: 2, +const testThreadPoolSize = 2 + +var defaultSubnetConfig = subnets.Config{ + GossipConfig: subnets.GossipConfig{ + AcceptedFrontierPeerSize: 2, + OnAcceptPeerSize: 2, + AppGossipValidatorSize: 2, + AppGossipNonValidatorSize: 2, + }, } func TestTimeout(t *testing.T) { + require := require.New(t) vdrs := validators.NewSet() - err := vdrs.AddWeight(ids.GenerateTestNodeID(), 1) - require.NoError(t, err) + err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) + require.NoError(err) benchlist := benchlist.NewNoBenchlist() tm, err := timeout.NewManager( &timer.AdaptiveTimeoutConfig{ @@ -54,43 +66,70 @@ func TestTimeout(t *testing.T) { "", prometheus.NewRegistry(), ) - require.NoError(t, err) + require.NoError(err) go tm.Dispatch() chainRouter := router.ChainRouter{} metrics := prometheus.NewRegistry() - mc, err := message.NewCreator(metrics, "dummyNamespace", true, 10*time.Second) - require.NoError(t, err) - mcProto, err := message.NewCreatorWithProto(metrics, "dummyNamespace", true, 10*time.Second) - require.NoError(t, err) + mc, err := message.NewCreator( + logging.NoLog{}, + metrics, + "dummyNamespace", + constants.DefaultNetworkCompressionType, + 10*time.Second, + ) + require.NoError(err) - err = chainRouter.Initialize(ids.EmptyNodeID, logging.NoLog{}, mc, tm, time.Second, ids.Set{}, ids.Set{}, nil, router.HealthConfig{}, "", prometheus.NewRegistry()) - require.NoError(t, err) + err = chainRouter.Initialize( + ids.EmptyNodeID, + logging.NoLog{}, + tm, + time.Second, + set.Set[ids.ID]{}, + true, + set.Set[ids.ID]{}, + nil, + router.HealthConfig{}, + "", + prometheus.NewRegistry(), + ) + require.NoError(err) - context := snow.DefaultConsensusContextTest() + ctx := snow.DefaultConsensusContextTest() externalSender := &ExternalSenderTest{TB: t} externalSender.Default(false) - sender, err := New(context, mc, mcProto, time.Now().Add(time.Hour) /* TODO: test with banff accepted */, externalSender, &chainRouter, tm, defaultGossipConfig) - require.NoError(t, err) - - wg := sync.WaitGroup{} - wg.Add(2) - failedVDRs := ids.NodeIDSet{} - ctx := snow.DefaultConsensusContextTest() - resourceTracker, err := tracker.NewResourceTracker(prometheus.NewRegistry(), resource.NoUsage, meter.ContinuousFactory{}, time.Second) - require.NoError(t, err) - handler, err := handler.New( - mc, + sender, err := New( ctx, + mc, + externalSender, + &chainRouter, + tm, + p2p.EngineType_ENGINE_TYPE_SNOWMAN, + subnets.New(ctx.NodeID, defaultSubnetConfig), + ) + require.NoError(err) + + ctx2 := snow.DefaultConsensusContextTest() + resourceTracker, err := tracker.NewResourceTracker( + prometheus.NewRegistry(), + resource.NoUsage, + meter.ContinuousFactory{}, + time.Second, + ) + require.NoError(err) + h, err := handler.New( + ctx2, vdrs, nil, - nil, time.Hour, + testThreadPoolSize, resourceTracker, + validators.UnhandledSubnetConnector, + subnets.New(ctx.NodeID, subnets.Config{}), ) - require.NoError(t, err) + require.NoError(err) bootstrapper := &common.BootstrapperTest{ BootstrapableTest: common.BootstrapableTest{ @@ -102,37 +141,189 @@ func TestTimeout(t *testing.T) { } bootstrapper.Default(true) bootstrapper.CantGossip = false - bootstrapper.ContextF = func() *snow.ConsensusContext { return ctx } - bootstrapper.ConnectedF = func(nodeID ids.NodeID, nodeVersion *version.Application) error { return nil } - bootstrapper.QueryFailedF = func(nodeID ids.NodeID, _ uint32) error { + bootstrapper.ContextF = func() *snow.ConsensusContext { + return ctx + } + bootstrapper.ConnectedF = func(context.Context, ids.NodeID, *version.Application) error { + return nil + } + h.SetEngineManager(&handler.EngineManager{ + Avalanche: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: bootstrapper, + Consensus: nil, + }, + Snowman: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: bootstrapper, + Consensus: nil, + }, + }) + ctx2.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.Bootstrapping, // assumed bootstrap is ongoing + }) + + chainRouter.AddChain(context.Background(), h) + + bootstrapper.StartF = func(context.Context, uint32) error { + return nil + } + h.Start(context.Background(), false) + + var ( + wg = sync.WaitGroup{} + vdrIDs = set.Set[ids.NodeID]{} + chains = set.Set[ids.ID]{} + requestID uint32 + failedLock sync.Mutex + failedVDRs = set.Set[ids.NodeID]{} + failedChains = set.Set[ids.ID]{} + ) + + cancelledCtx, cancel := context.WithCancel(context.Background()) + cancel() + + failed := func(ctx context.Context, nodeID ids.NodeID, _ uint32) error { + require.NoError(ctx.Err()) + + failedLock.Lock() + defer failedLock.Unlock() + failedVDRs.Add(nodeID) wg.Done() return nil } - handler.SetBootstrapper(bootstrapper) - ctx.SetState(snow.Bootstrapping) // assumed bootstrap is ongoing - chainRouter.AddChain(handler) + bootstrapper.GetStateSummaryFrontierFailedF = failed + bootstrapper.GetAcceptedStateSummaryFailedF = failed + bootstrapper.GetAcceptedFrontierFailedF = failed + bootstrapper.GetAcceptedFailedF = failed + bootstrapper.GetAncestorsFailedF = failed + bootstrapper.GetFailedF = failed + bootstrapper.QueryFailedF = failed + bootstrapper.AppRequestFailedF = failed + bootstrapper.CrossChainAppRequestFailedF = func(ctx context.Context, chainID ids.ID, _ uint32) error { + require.NoError(ctx.Err()) - bootstrapper.StartF = func(startReqID uint32) error { return nil } - handler.Start(false) + failedLock.Lock() + defer failedLock.Unlock() + + failedChains.Add(chainID) + wg.Done() + return nil + } + + sendAll := func() { + { + nodeIDs := set.Set[ids.NodeID]{ + ids.GenerateTestNodeID(): struct{}{}, + } + vdrIDs.Union(nodeIDs) + wg.Add(1) + requestID++ + sender.SendGetStateSummaryFrontier(cancelledCtx, nodeIDs, requestID) + } + { + nodeIDs := set.Set[ids.NodeID]{ + ids.GenerateTestNodeID(): struct{}{}, + } + vdrIDs.Union(nodeIDs) + wg.Add(1) + requestID++ + sender.SendGetAcceptedStateSummary(cancelledCtx, nodeIDs, requestID, nil) + } + { + nodeIDs := set.Set[ids.NodeID]{ + ids.GenerateTestNodeID(): struct{}{}, + } + vdrIDs.Union(nodeIDs) + wg.Add(1) + requestID++ + sender.SendGetAcceptedFrontier(cancelledCtx, nodeIDs, requestID) + } + { + nodeIDs := set.Set[ids.NodeID]{ + ids.GenerateTestNodeID(): struct{}{}, + } + vdrIDs.Union(nodeIDs) + wg.Add(1) + requestID++ + sender.SendGetAccepted(cancelledCtx, nodeIDs, requestID, nil) + } + { + nodeID := ids.GenerateTestNodeID() + vdrIDs.Add(nodeID) + wg.Add(1) + requestID++ + sender.SendGetAncestors(cancelledCtx, nodeID, requestID, ids.Empty) + } + { + nodeID := ids.GenerateTestNodeID() + vdrIDs.Add(nodeID) + wg.Add(1) + requestID++ + sender.SendGet(cancelledCtx, nodeID, requestID, ids.Empty) + } + { + nodeIDs := set.Set[ids.NodeID]{ + ids.GenerateTestNodeID(): struct{}{}, + } + vdrIDs.Union(nodeIDs) + wg.Add(1) + requestID++ + sender.SendPullQuery(cancelledCtx, nodeIDs, requestID, ids.Empty) + } + { + nodeIDs := set.Set[ids.NodeID]{ + ids.GenerateTestNodeID(): struct{}{}, + } + vdrIDs.Union(nodeIDs) + wg.Add(1) + requestID++ + sender.SendPushQuery(cancelledCtx, nodeIDs, requestID, nil) + } + { + nodeIDs := set.Set[ids.NodeID]{ + ids.GenerateTestNodeID(): struct{}{}, + } + vdrIDs.Union(nodeIDs) + wg.Add(1) + requestID++ + err := sender.SendAppRequest(cancelledCtx, nodeIDs, requestID, nil) + require.NoError(err) + } + { + chainID := ids.GenerateTestID() + chains.Add(chainID) + wg.Add(1) + requestID++ + err := sender.SendCrossChainAppRequest(cancelledCtx, chainID, requestID, nil) + require.NoError(err) + } + } - vdrIDs := ids.NodeIDSet{} - vdrIDs.Add(ids.NodeID{255}) - vdrIDs.Add(ids.NodeID{254}) + // Send messages to disconnected peers + externalSender.SendF = func(_ message.OutboundMessage, nodeIDs set.Set[ids.NodeID], _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { + return nil + } + sendAll() - sender.SendPullQuery(vdrIDs, 0, ids.Empty) + // Send messages to connected peers + externalSender.SendF = func(_ message.OutboundMessage, nodeIDs set.Set[ids.NodeID], _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { + return nodeIDs + } + sendAll() wg.Wait() - if !failedVDRs.Equals(vdrIDs) { - t.Fatalf("Timeouts should have fired") - } + require.Equal(vdrIDs, failedVDRs) + require.Equal(chains, failedChains) } func TestReliableMessages(t *testing.T) { vdrs := validators.NewSet() - err := vdrs.AddWeight(ids.NodeID{1}, 1) + err := vdrs.Add(ids.NodeID{1}, nil, ids.Empty, 1) require.NoError(t, err) benchlist := benchlist.NewNoBenchlist() tm, err := timeout.NewManager( @@ -154,33 +345,63 @@ func TestReliableMessages(t *testing.T) { chainRouter := router.ChainRouter{} metrics := prometheus.NewRegistry() - mc, err := message.NewCreator(metrics, "dummyNamespace", true, 10*time.Second) - require.NoError(t, err) - mcProto, err := message.NewCreatorWithProto(metrics, "dummyNamespace", true, 10*time.Second) + mc, err := message.NewCreator( + logging.NoLog{}, + metrics, + "dummyNamespace", + constants.DefaultNetworkCompressionType, + 10*time.Second, + ) require.NoError(t, err) - err = chainRouter.Initialize(ids.EmptyNodeID, logging.NoLog{}, mc, tm, time.Second, ids.Set{}, ids.Set{}, nil, router.HealthConfig{}, "", prometheus.NewRegistry()) + err = chainRouter.Initialize( + ids.EmptyNodeID, + logging.NoLog{}, + tm, + time.Second, + set.Set[ids.ID]{}, + true, + set.Set[ids.ID]{}, + nil, + router.HealthConfig{}, + "", + prometheus.NewRegistry(), + ) require.NoError(t, err) - context := snow.DefaultConsensusContextTest() + ctx := snow.DefaultConsensusContextTest() externalSender := &ExternalSenderTest{TB: t} externalSender.Default(false) - sender, err := New(context, mc, mcProto, time.Now().Add(time.Hour) /* TODO: test with banff accepted */, externalSender, &chainRouter, tm, defaultGossipConfig) + sender, err := New( + ctx, + mc, + externalSender, + &chainRouter, + tm, + p2p.EngineType_ENGINE_TYPE_SNOWMAN, + subnets.New(ctx.NodeID, defaultSubnetConfig), + ) require.NoError(t, err) - ctx := snow.DefaultConsensusContextTest() - resourceTracker, err := tracker.NewResourceTracker(prometheus.NewRegistry(), resource.NoUsage, meter.ContinuousFactory{}, time.Second) + ctx2 := snow.DefaultConsensusContextTest() + resourceTracker, err := tracker.NewResourceTracker( + prometheus.NewRegistry(), + resource.NoUsage, + meter.ContinuousFactory{}, + time.Second, + ) require.NoError(t, err) - handler, err := handler.New( - mc, - ctx, + h, err := handler.New( + ctx2, vdrs, nil, - nil, 1, + testThreadPoolSize, resourceTracker, + validators.UnhandledSubnetConnector, + subnets.New(ctx.NodeID, subnets.Config{}), ) require.NoError(t, err) @@ -194,32 +415,52 @@ func TestReliableMessages(t *testing.T) { } bootstrapper.Default(true) bootstrapper.CantGossip = false - bootstrapper.ContextF = func() *snow.ConsensusContext { return ctx } - bootstrapper.ConnectedF = func(nodeID ids.NodeID, nodeVersion *version.Application) error { return nil } + bootstrapper.ContextF = func() *snow.ConsensusContext { + return ctx2 + } + bootstrapper.ConnectedF = func(context.Context, ids.NodeID, *version.Application) error { + return nil + } queriesToSend := 1000 awaiting := make([]chan struct{}, queriesToSend) for i := 0; i < queriesToSend; i++ { awaiting[i] = make(chan struct{}, 1) } - bootstrapper.QueryFailedF = func(nodeID ids.NodeID, reqID uint32) error { + bootstrapper.QueryFailedF = func(_ context.Context, _ ids.NodeID, reqID uint32) error { close(awaiting[int(reqID)]) return nil } bootstrapper.CantGossip = false - handler.SetBootstrapper(bootstrapper) - ctx.SetState(snow.Bootstrapping) // assumed bootstrap is ongoing + h.SetEngineManager(&handler.EngineManager{ + Avalanche: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: bootstrapper, + Consensus: nil, + }, + Snowman: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: bootstrapper, + Consensus: nil, + }, + }) + ctx2.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.Bootstrapping, // assumed bootstrap is ongoing + }) - chainRouter.AddChain(handler) + chainRouter.AddChain(context.Background(), h) - bootstrapper.StartF = func(startReqID uint32) error { return nil } - handler.Start(false) + bootstrapper.StartF = func(context.Context, uint32) error { + return nil + } + h.Start(context.Background(), false) go func() { for i := 0; i < queriesToSend; i++ { - vdrIDs := ids.NodeIDSet{} + vdrIDs := set.Set[ids.NodeID]{} vdrIDs.Add(ids.NodeID{1}) - sender.SendPullQuery(vdrIDs, uint32(i), ids.Empty) + sender.SendPullQuery(context.Background(), vdrIDs, uint32(i), ids.Empty) time.Sleep(time.Duration(rand.Float64() * float64(time.Microsecond))) // #nosec G404 } }() @@ -232,7 +473,7 @@ func TestReliableMessages(t *testing.T) { func TestReliableMessagesToMyself(t *testing.T) { benchlist := benchlist.NewNoBenchlist() vdrs := validators.NewSet() - err := vdrs.AddWeight(ids.GenerateTestNodeID(), 1) + err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) require.NoError(t, err) tm, err := timeout.NewManager( &timer.AdaptiveTimeoutConfig{ @@ -253,33 +494,63 @@ func TestReliableMessagesToMyself(t *testing.T) { chainRouter := router.ChainRouter{} metrics := prometheus.NewRegistry() - mc, err := message.NewCreator(metrics, "dummyNamespace", true, 10*time.Second) - require.NoError(t, err) - mcProto, err := message.NewCreatorWithProto(metrics, "dummyNamespace", true, 10*time.Second) + mc, err := message.NewCreator( + logging.NoLog{}, + metrics, + "dummyNamespace", + constants.DefaultNetworkCompressionType, + 10*time.Second, + ) require.NoError(t, err) - err = chainRouter.Initialize(ids.EmptyNodeID, logging.NoLog{}, mc, tm, time.Second, ids.Set{}, ids.Set{}, nil, router.HealthConfig{}, "", prometheus.NewRegistry()) + err = chainRouter.Initialize( + ids.EmptyNodeID, + logging.NoLog{}, + tm, + time.Second, + set.Set[ids.ID]{}, + true, + set.Set[ids.ID]{}, + nil, + router.HealthConfig{}, + "", + prometheus.NewRegistry(), + ) require.NoError(t, err) - context := snow.DefaultConsensusContextTest() + ctx := snow.DefaultConsensusContextTest() externalSender := &ExternalSenderTest{TB: t} externalSender.Default(false) - sender, err := New(context, mc, mcProto, time.Now().Add(time.Hour) /* TODO: test with banff accepted */, externalSender, &chainRouter, tm, defaultGossipConfig) + sender, err := New( + ctx, + mc, + externalSender, + &chainRouter, + tm, + p2p.EngineType_ENGINE_TYPE_SNOWMAN, + subnets.New(ctx.NodeID, defaultSubnetConfig), + ) require.NoError(t, err) - ctx := snow.DefaultConsensusContextTest() - resourceTracker, err := tracker.NewResourceTracker(prometheus.NewRegistry(), resource.NoUsage, meter.ContinuousFactory{}, time.Second) + ctx2 := snow.DefaultConsensusContextTest() + resourceTracker, err := tracker.NewResourceTracker( + prometheus.NewRegistry(), + resource.NoUsage, + meter.ContinuousFactory{}, + time.Second, + ) require.NoError(t, err) - handler, err := handler.New( - mc, - ctx, + h, err := handler.New( + ctx2, vdrs, nil, - nil, time.Second, + testThreadPoolSize, resourceTracker, + validators.UnhandledSubnetConnector, + subnets.New(ctx.NodeID, subnets.Config{}), ) require.NoError(t, err) @@ -293,33 +564,53 @@ func TestReliableMessagesToMyself(t *testing.T) { } bootstrapper.Default(true) bootstrapper.CantGossip = false - bootstrapper.ContextF = func() *snow.ConsensusContext { return ctx } - bootstrapper.ConnectedF = func(nodeID ids.NodeID, nodeVersion *version.Application) error { return nil } + bootstrapper.ContextF = func() *snow.ConsensusContext { + return ctx2 + } + bootstrapper.ConnectedF = func(context.Context, ids.NodeID, *version.Application) error { + return nil + } queriesToSend := 2 awaiting := make([]chan struct{}, queriesToSend) for i := 0; i < queriesToSend; i++ { awaiting[i] = make(chan struct{}, 1) } - bootstrapper.QueryFailedF = func(nodeID ids.NodeID, reqID uint32) error { + bootstrapper.QueryFailedF = func(_ context.Context, _ ids.NodeID, reqID uint32) error { close(awaiting[int(reqID)]) return nil } - handler.SetBootstrapper(bootstrapper) - ctx.SetState(snow.Bootstrapping) // assumed bootstrap is ongoing + h.SetEngineManager(&handler.EngineManager{ + Avalanche: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: bootstrapper, + Consensus: nil, + }, + Snowman: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: bootstrapper, + Consensus: nil, + }, + }) + ctx2.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.Bootstrapping, // assumed bootstrap is ongoing + }) - chainRouter.AddChain(handler) + chainRouter.AddChain(context.Background(), h) - bootstrapper.StartF = func(startReqID uint32) error { return nil } - handler.Start(false) + bootstrapper.StartF = func(context.Context, uint32) error { + return nil + } + h.Start(context.Background(), false) go func() { for i := 0; i < queriesToSend; i++ { // Send a pull query to some random peer that won't respond // because they don't exist. This will almost immediately trigger // a query failed message - vdrIDs := ids.NodeIDSet{} + vdrIDs := set.Set[ids.NodeID]{} vdrIDs.Add(ids.GenerateTestNodeID()) - sender.SendPullQuery(vdrIDs, uint32(i), ids.Empty) + sender.SendPullQuery(context.Background(), vdrIDs, uint32(i), ids.Empty) } }() @@ -327,3 +618,760 @@ func TestReliableMessagesToMyself(t *testing.T) { <-await } } + +func TestSender_Bootstrap_Requests(t *testing.T) { + var ( + chainID = ids.GenerateTestID() + subnetID = ids.GenerateTestID() + myNodeID = ids.GenerateTestNodeID() + successNodeID = ids.GenerateTestNodeID() + failedNodeID = ids.GenerateTestNodeID() + deadline = time.Second + requestID = uint32(1337) + ctx = snow.DefaultContextTest() + heights = []uint64{1, 2, 3} + containerIDs = []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} + engineType = p2p.EngineType_ENGINE_TYPE_SNOWMAN + ) + ctx.ChainID = chainID + ctx.SubnetID = subnetID + ctx.NodeID = myNodeID + snowCtx := &snow.ConsensusContext{ + Context: ctx, + Registerer: prometheus.NewRegistry(), + AvalancheRegisterer: prometheus.NewRegistry(), + } + + type test struct { + name string + failedMsgF func(nodeID ids.NodeID) message.InboundMessage + assertMsgToMyself func(require *require.Assertions, msg message.InboundMessage) + expectedResponseOp message.Op + setMsgCreatorExpect func(msgCreator *message.MockOutboundMsgBuilder) + setExternalSenderExpect func(externalSender *MockExternalSender) + sendF func(require *require.Assertions, sender common.Sender, nodeIDs set.Set[ids.NodeID]) + engineType p2p.EngineType + } + + tests := []test{ + { + name: "GetStateSummaryFrontier", + failedMsgF: func(nodeID ids.NodeID) message.InboundMessage { + return message.InternalGetStateSummaryFrontierFailed( + nodeID, + chainID, + requestID, + ) + }, + assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { + innerMsg, ok := msg.Message().(*p2p.GetStateSummaryFrontier) + require.True(ok) + require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(requestID, innerMsg.RequestId) + require.Equal(uint64(deadline), innerMsg.Deadline) + }, + expectedResponseOp: message.StateSummaryFrontierOp, + setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { + msgCreator.EXPECT().GetStateSummaryFrontier( + chainID, + requestID, + deadline, + ).Return(nil, nil) + }, + setExternalSenderExpect: func(externalSender *MockExternalSender) { + externalSender.EXPECT().Send( + gomock.Any(), // Outbound message + set.Set[ids.NodeID]{ // Note [myNodeID] is not in this set + successNodeID: struct{}{}, + failedNodeID: struct{}{}, + }, // Node IDs + subnetID, // Subnet ID + gomock.Any(), + ).Return(set.Set[ids.NodeID]{ + successNodeID: struct{}{}, + }) + }, + sendF: func(_ *require.Assertions, sender common.Sender, nodeIDs set.Set[ids.NodeID]) { + sender.SendGetStateSummaryFrontier( + context.Background(), + nodeIDs, + requestID, + ) + }, + }, + { + name: "GetAcceptedStateSummary", + failedMsgF: func(nodeID ids.NodeID) message.InboundMessage { + return message.InternalGetAcceptedStateSummaryFailed( + nodeID, + chainID, + requestID, + ) + }, + assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { + innerMsg, ok := msg.Message().(*p2p.GetAcceptedStateSummary) + require.True(ok) + require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(requestID, innerMsg.RequestId) + require.Equal(uint64(deadline), innerMsg.Deadline) + require.Equal(heights, innerMsg.Heights) + }, + expectedResponseOp: message.AcceptedStateSummaryOp, + setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { + msgCreator.EXPECT().GetAcceptedStateSummary( + chainID, + requestID, + deadline, + heights, + ).Return(nil, nil) + }, + setExternalSenderExpect: func(externalSender *MockExternalSender) { + externalSender.EXPECT().Send( + gomock.Any(), // Outbound message + set.Set[ids.NodeID]{ // Note [myNodeID] is not in this set + successNodeID: struct{}{}, + failedNodeID: struct{}{}, + }, // Node IDs + subnetID, // Subnet ID + gomock.Any(), + ).Return(set.Set[ids.NodeID]{ + successNodeID: struct{}{}, + }) + }, + sendF: func(_ *require.Assertions, sender common.Sender, nodeIDs set.Set[ids.NodeID]) { + sender.SendGetAcceptedStateSummary(context.Background(), nodeIDs, requestID, heights) + }, + }, + { + name: "GetAcceptedFrontier", + failedMsgF: func(nodeID ids.NodeID) message.InboundMessage { + return message.InternalGetAcceptedFrontierFailed( + nodeID, + chainID, + requestID, + engineType, + ) + }, + assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { + innerMsg, ok := msg.Message().(*p2p.GetAcceptedFrontier) + require.True(ok) + require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(requestID, innerMsg.RequestId) + require.Equal(uint64(deadline), innerMsg.Deadline) + require.Equal(engineType, innerMsg.EngineType) + }, + expectedResponseOp: message.AcceptedFrontierOp, + setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { + msgCreator.EXPECT().GetAcceptedFrontier( + chainID, + requestID, + deadline, + engineType, + ).Return(nil, nil) + }, + setExternalSenderExpect: func(externalSender *MockExternalSender) { + externalSender.EXPECT().Send( + gomock.Any(), // Outbound message + set.Set[ids.NodeID]{ // Note [myNodeID] is not in this set + successNodeID: struct{}{}, + failedNodeID: struct{}{}, + }, // Node IDs + subnetID, // Subnet ID + gomock.Any(), + ).Return(set.Set[ids.NodeID]{ + successNodeID: struct{}{}, + }) + }, + sendF: func(_ *require.Assertions, sender common.Sender, nodeIDs set.Set[ids.NodeID]) { + sender.SendGetAcceptedFrontier(context.Background(), nodeIDs, requestID) + }, + engineType: engineType, + }, + { + name: "GetAccepted", + failedMsgF: func(nodeID ids.NodeID) message.InboundMessage { + return message.InternalGetAcceptedFailed( + nodeID, + chainID, + requestID, + engineType, + ) + }, + assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { + innerMsg, ok := msg.Message().(*p2p.GetAccepted) + require.True(ok) + require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(requestID, innerMsg.RequestId) + require.Equal(uint64(deadline), innerMsg.Deadline) + require.Equal(engineType, innerMsg.EngineType) + }, + expectedResponseOp: message.AcceptedOp, + setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { + msgCreator.EXPECT().GetAccepted( + chainID, + requestID, + deadline, + containerIDs, + engineType, + ).Return(nil, nil) + }, + setExternalSenderExpect: func(externalSender *MockExternalSender) { + externalSender.EXPECT().Send( + gomock.Any(), // Outbound message + set.Set[ids.NodeID]{ // Note [myNodeID] is not in this set + successNodeID: struct{}{}, + failedNodeID: struct{}{}, + }, // Node IDs + subnetID, // Subnet ID + gomock.Any(), + ).Return(set.Set[ids.NodeID]{ + successNodeID: struct{}{}, + }) + }, + sendF: func(_ *require.Assertions, sender common.Sender, nodeIDs set.Set[ids.NodeID]) { + sender.SendGetAccepted(context.Background(), nodeIDs, requestID, containerIDs) + }, + engineType: engineType, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + var ( + msgCreator = message.NewMockOutboundMsgBuilder(ctrl) + externalSender = NewMockExternalSender(ctrl) + timeoutManager = timeout.NewMockManager(ctrl) + router = router.NewMockRouter(ctrl) + nodeIDs = set.Set[ids.NodeID]{ + successNodeID: struct{}{}, + failedNodeID: struct{}{}, + myNodeID: struct{}{}, + } + nodeIDsCopy set.Set[ids.NodeID] + ) + nodeIDsCopy.Union(nodeIDs) + snowCtx.Registerer = prometheus.NewRegistry() + + sender, err := New( + snowCtx, + msgCreator, + externalSender, + router, + timeoutManager, + engineType, + subnets.New(ctx.NodeID, defaultSubnetConfig), + ) + require.NoError(err) + + // Set the timeout (deadline) + timeoutManager.EXPECT().TimeoutDuration().Return(deadline).AnyTimes() + + // Make sure we register requests with the router + for nodeID := range nodeIDs { + expectedFailedMsg := tt.failedMsgF(nodeID) + router.EXPECT().RegisterRequest( + gomock.Any(), // Context + nodeID, // Node ID + chainID, // Source Chain + chainID, // Destination Chain + requestID, // Request ID + tt.expectedResponseOp, // Operation + expectedFailedMsg, // Failure Message + tt.engineType, + ) + } + + // Make sure we send a message to ourselves since [myNodeID] + // is in [nodeIDs]. + // Note that HandleInbound is called in a separate goroutine + // so we need to use a channel to synchronize the test. + calledHandleInbound := make(chan struct{}) + router.EXPECT().HandleInbound(gomock.Any(), gomock.Any()).Do( + func(_ context.Context, msg message.InboundMessage) { + // Make sure we're sending ourselves + // the expected message. + tt.assertMsgToMyself(require, msg) + close(calledHandleInbound) + }, + ) + + // Make sure we're making the correct outbound message. + tt.setMsgCreatorExpect(msgCreator) + + // Make sure we're sending the message + tt.setExternalSenderExpect(externalSender) + + tt.sendF(require, sender, nodeIDsCopy) + + <-calledHandleInbound + }) + } +} + +func TestSender_Bootstrap_Responses(t *testing.T) { + var ( + chainID = ids.GenerateTestID() + subnetID = ids.GenerateTestID() + myNodeID = ids.GenerateTestNodeID() + destinationNodeID = ids.GenerateTestNodeID() + deadline = time.Second + requestID = uint32(1337) + ctx = snow.DefaultContextTest() + summaryIDs = []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()} + summary = []byte{1, 2, 3} + engineType = p2p.EngineType_ENGINE_TYPE_AVALANCHE + ) + ctx.ChainID = chainID + ctx.SubnetID = subnetID + ctx.NodeID = myNodeID + snowCtx := &snow.ConsensusContext{ + Context: ctx, + Registerer: prometheus.NewRegistry(), + AvalancheRegisterer: prometheus.NewRegistry(), + } + + type test struct { + name string + assertMsgToMyself func(require *require.Assertions, msg message.InboundMessage) + setMsgCreatorExpect func(msgCreator *message.MockOutboundMsgBuilder) + setExternalSenderExpect func(externalSender *MockExternalSender) + sendF func(require *require.Assertions, sender common.Sender, nodeID ids.NodeID) + } + + tests := []test{ + { + name: "StateSummaryFrontier", + setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { + msgCreator.EXPECT().StateSummaryFrontier( + chainID, + requestID, + summary, + ).Return(nil, nil) // Don't care about the message + }, + assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { + innerMsg, ok := msg.Message().(*p2p.StateSummaryFrontier) + require.True(ok) + require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(requestID, innerMsg.RequestId) + require.Equal(summary, innerMsg.Summary) + }, + setExternalSenderExpect: func(externalSender *MockExternalSender) { + externalSender.EXPECT().Send( + gomock.Any(), // Outbound message + set.Set[ids.NodeID]{destinationNodeID: struct{}{}}, // Node IDs + subnetID, // Subnet ID + gomock.Any(), + ).Return(nil) + }, + sendF: func(_ *require.Assertions, sender common.Sender, nodeID ids.NodeID) { + sender.SendStateSummaryFrontier(context.Background(), nodeID, requestID, summary) + }, + }, + { + name: "AcceptedStateSummary", + setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { + msgCreator.EXPECT().AcceptedStateSummary( + chainID, + requestID, + summaryIDs, + ).Return(nil, nil) // Don't care about the message + }, + assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { + innerMsg, ok := msg.Message().(*p2p.AcceptedStateSummary) + require.True(ok) + require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(requestID, innerMsg.RequestId) + for i, summaryID := range summaryIDs { + require.Equal(summaryID[:], innerMsg.SummaryIds[i]) + } + }, + setExternalSenderExpect: func(externalSender *MockExternalSender) { + externalSender.EXPECT().Send( + gomock.Any(), // Outbound message + set.Set[ids.NodeID]{destinationNodeID: struct{}{}}, // Node IDs + subnetID, // Subnet ID + gomock.Any(), + ).Return(nil) + }, + sendF: func(_ *require.Assertions, sender common.Sender, nodeID ids.NodeID) { + sender.SendAcceptedStateSummary(context.Background(), nodeID, requestID, summaryIDs) + }, + }, + { + name: "AcceptedFrontier", + setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { + msgCreator.EXPECT().AcceptedFrontier( + chainID, + requestID, + summaryIDs, + ).Return(nil, nil) // Don't care about the message + }, + assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { + innerMsg, ok := msg.Message().(*p2p.AcceptedFrontier) + require.True(ok) + require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(requestID, innerMsg.RequestId) + for i, summaryID := range summaryIDs { + require.Equal(summaryID[:], innerMsg.ContainerIds[i]) + } + }, + setExternalSenderExpect: func(externalSender *MockExternalSender) { + externalSender.EXPECT().Send( + gomock.Any(), // Outbound message + set.Set[ids.NodeID]{destinationNodeID: struct{}{}}, // Node IDs + subnetID, // Subnet ID + gomock.Any(), + ).Return(nil) + }, + sendF: func(_ *require.Assertions, sender common.Sender, nodeID ids.NodeID) { + sender.SendAcceptedFrontier(context.Background(), nodeID, requestID, summaryIDs) + }, + }, + { + name: "Accepted", + setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { + msgCreator.EXPECT().Accepted( + chainID, + requestID, + summaryIDs, + ).Return(nil, nil) // Don't care about the message + }, + assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { + innerMsg, ok := msg.Message().(*p2p.Accepted) + require.True(ok) + require.Equal(chainID[:], innerMsg.ChainId) + require.Equal(requestID, innerMsg.RequestId) + for i, summaryID := range summaryIDs { + require.Equal(summaryID[:], innerMsg.ContainerIds[i]) + } + }, + setExternalSenderExpect: func(externalSender *MockExternalSender) { + externalSender.EXPECT().Send( + gomock.Any(), // Outbound message + set.Set[ids.NodeID]{destinationNodeID: struct{}{}}, // Node IDs + subnetID, // Subnet ID + gomock.Any(), + ).Return(nil) + }, + sendF: func(_ *require.Assertions, sender common.Sender, nodeID ids.NodeID) { + sender.SendAccepted(context.Background(), nodeID, requestID, summaryIDs) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + var ( + msgCreator = message.NewMockOutboundMsgBuilder(ctrl) + externalSender = NewMockExternalSender(ctrl) + timeoutManager = timeout.NewMockManager(ctrl) + router = router.NewMockRouter(ctrl) + ) + + // Instantiate new registerers to avoid duplicate metrics + // registration + snowCtx.Registerer = prometheus.NewRegistry() + snowCtx.AvalancheRegisterer = prometheus.NewRegistry() + + sender, err := New( + snowCtx, + msgCreator, + externalSender, + router, + timeoutManager, + engineType, + subnets.New(ctx.NodeID, defaultSubnetConfig), + ) + require.NoError(err) + + // Set the timeout (deadline) + timeoutManager.EXPECT().TimeoutDuration().Return(deadline).AnyTimes() + + // Case: sending to ourselves + { + calledHandleInbound := make(chan struct{}) + router.EXPECT().HandleInbound(gomock.Any(), gomock.Any()).Do( + func(_ context.Context, msg message.InboundMessage) { + // Make sure we're sending ourselves + // the expected message. + tt.assertMsgToMyself(require, msg) + close(calledHandleInbound) + }, + ) + tt.sendF(require, sender, myNodeID) + <-calledHandleInbound + } + + // Case: not sending to ourselves + + // Make sure we're making the correct outbound message. + tt.setMsgCreatorExpect(msgCreator) + + // Make sure we're sending the message + tt.setExternalSenderExpect(externalSender) + + tt.sendF(require, sender, destinationNodeID) + }) + } +} + +func TestSender_Single_Request(t *testing.T) { + var ( + chainID = ids.GenerateTestID() + subnetID = ids.GenerateTestID() + myNodeID = ids.GenerateTestNodeID() + destinationNodeID = ids.GenerateTestNodeID() + deadline = time.Second + requestID = uint32(1337) + ctx = snow.DefaultContextTest() + containerID = ids.GenerateTestID() + engineType = p2p.EngineType_ENGINE_TYPE_SNOWMAN + ) + ctx.ChainID = chainID + ctx.SubnetID = subnetID + ctx.NodeID = myNodeID + snowCtx := &snow.ConsensusContext{ + Context: ctx, + Registerer: prometheus.NewRegistry(), + AvalancheRegisterer: prometheus.NewRegistry(), + } + + type test struct { + name string + failedMsgF func(nodeID ids.NodeID) message.InboundMessage + assertMsgToMyself func(require *require.Assertions, msg message.InboundMessage) + expectedResponseOp message.Op + setMsgCreatorExpect func(msgCreator *message.MockOutboundMsgBuilder) + setExternalSenderExpect func(externalSender *MockExternalSender, sentTo set.Set[ids.NodeID]) + sendF func(require *require.Assertions, sender common.Sender, nodeID ids.NodeID) + } + + tests := []test{ + { + name: "GetAncestors", + failedMsgF: func(nodeID ids.NodeID) message.InboundMessage { + return message.InternalGetAncestorsFailed( + nodeID, + chainID, + requestID, + engineType, + ) + }, + assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { + innerMsg, ok := msg.Message().(*message.GetAncestorsFailed) + require.True(ok) + require.Equal(chainID, innerMsg.ChainID) + require.Equal(requestID, innerMsg.RequestID) + require.Equal(engineType, innerMsg.EngineType) + }, + expectedResponseOp: message.AncestorsOp, + setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { + msgCreator.EXPECT().GetAncestors( + chainID, + requestID, + deadline, + containerID, + engineType, + ).Return(nil, nil) + }, + setExternalSenderExpect: func(externalSender *MockExternalSender, sentTo set.Set[ids.NodeID]) { + externalSender.EXPECT().Send( + gomock.Any(), // Outbound message + set.Set[ids.NodeID]{destinationNodeID: struct{}{}}, // Node IDs + subnetID, + gomock.Any(), + ).Return(sentTo) + }, + sendF: func(_ *require.Assertions, sender common.Sender, nodeID ids.NodeID) { + sender.SendGetAncestors(context.Background(), nodeID, requestID, containerID) + }, + }, + { + name: "Get", + failedMsgF: func(nodeID ids.NodeID) message.InboundMessage { + return message.InternalGetFailed( + nodeID, + chainID, + requestID, + engineType, + ) + }, + assertMsgToMyself: func(require *require.Assertions, msg message.InboundMessage) { + innerMsg, ok := msg.Message().(*message.GetFailed) + require.True(ok) + require.Equal(chainID, innerMsg.ChainID) + require.Equal(requestID, innerMsg.RequestID) + require.Equal(engineType, innerMsg.EngineType) + }, + expectedResponseOp: message.PutOp, + setMsgCreatorExpect: func(msgCreator *message.MockOutboundMsgBuilder) { + msgCreator.EXPECT().Get( + chainID, + requestID, + deadline, + containerID, + engineType, + ).Return(nil, nil) + }, + setExternalSenderExpect: func(externalSender *MockExternalSender, sentTo set.Set[ids.NodeID]) { + externalSender.EXPECT().Send( + gomock.Any(), // Outbound message + set.Set[ids.NodeID]{destinationNodeID: struct{}{}}, // Node IDs + subnetID, + gomock.Any(), + ).Return(sentTo) + }, + sendF: func(_ *require.Assertions, sender common.Sender, nodeID ids.NodeID) { + sender.SendGet(context.Background(), nodeID, requestID, containerID) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + var ( + msgCreator = message.NewMockOutboundMsgBuilder(ctrl) + externalSender = NewMockExternalSender(ctrl) + timeoutManager = timeout.NewMockManager(ctrl) + router = router.NewMockRouter(ctrl) + ) + snowCtx.Registerer = prometheus.NewRegistry() + + sender, err := New( + snowCtx, + msgCreator, + externalSender, + router, + timeoutManager, + engineType, + subnets.New(ctx.NodeID, defaultSubnetConfig), + ) + require.NoError(err) + + // Set the timeout (deadline) + timeoutManager.EXPECT().TimeoutDuration().Return(deadline).AnyTimes() + + // Case: sending to myself + { + // Make sure we register requests with the router + expectedFailedMsg := tt.failedMsgF(myNodeID) + router.EXPECT().RegisterRequest( + gomock.Any(), // Context + myNodeID, // Node ID + chainID, // Source Chain + chainID, // Destination Chain + requestID, // Request ID + tt.expectedResponseOp, // Operation + expectedFailedMsg, // Failure Message + engineType, // Engine Type + ) + + // Note that HandleInbound is called in a separate goroutine + // so we need to use a channel to synchronize the test. + calledHandleInbound := make(chan struct{}) + router.EXPECT().HandleInbound(gomock.Any(), gomock.Any()).Do( + func(_ context.Context, msg message.InboundMessage) { + // Make sure we're sending ourselves + // the expected message. + tt.assertMsgToMyself(require, msg) + close(calledHandleInbound) + }, + ) + + tt.sendF(require, sender, myNodeID) + + <-calledHandleInbound + } + + // Case: Node is benched + { + timeoutManager.EXPECT().IsBenched(destinationNodeID, chainID).Return(true) + + timeoutManager.EXPECT().RegisterRequestToUnreachableValidator() + + // Make sure we register requests with the router + expectedFailedMsg := tt.failedMsgF(destinationNodeID) + router.EXPECT().RegisterRequest( + gomock.Any(), // Context + destinationNodeID, // Node ID + chainID, // Source Chain + chainID, // Destination Chain + requestID, // Request ID + tt.expectedResponseOp, // Operation + expectedFailedMsg, // Failure Message + engineType, // Engine Type + ) + + // Note that HandleInbound is called in a separate goroutine + // so we need to use a channel to synchronize the test. + calledHandleInbound := make(chan struct{}) + router.EXPECT().HandleInbound(gomock.Any(), gomock.Any()).Do( + func(_ context.Context, msg message.InboundMessage) { + // Make sure we're sending ourselves + // the expected message. + tt.assertMsgToMyself(require, msg) + close(calledHandleInbound) + }, + ) + + tt.sendF(require, sender, destinationNodeID) + + <-calledHandleInbound + } + + // Case: Node is not myself, not benched and send fails + { + timeoutManager.EXPECT().IsBenched(destinationNodeID, chainID).Return(false) + + timeoutManager.EXPECT().RegisterRequestToUnreachableValidator() + + // Make sure we register requests with the router + expectedFailedMsg := tt.failedMsgF(destinationNodeID) + router.EXPECT().RegisterRequest( + gomock.Any(), // Context + destinationNodeID, // Node ID + chainID, // Source Chain + chainID, // Destination Chain + requestID, // Request ID + tt.expectedResponseOp, // Operation + expectedFailedMsg, // Failure Message + engineType, // Engine Type + ) + + // Note that HandleInbound is called in a separate goroutine + // so we need to use a channel to synchronize the test. + calledHandleInbound := make(chan struct{}) + router.EXPECT().HandleInbound(gomock.Any(), gomock.Any()).Do( + func(_ context.Context, msg message.InboundMessage) { + // Make sure we're sending ourselves + // the expected message. + tt.assertMsgToMyself(require, msg) + close(calledHandleInbound) + }, + ) + + // Make sure we're making the correct outbound message. + tt.setMsgCreatorExpect(msgCreator) + + // Make sure we're sending the message + tt.setExternalSenderExpect(externalSender, set.Set[ids.NodeID]{}) + + tt.sendF(require, sender, destinationNodeID) + + <-calledHandleInbound + } + }) + } +} diff --git a/avalanchego/snow/networking/sender/test_external_sender.go b/avalanchego/snow/networking/sender/test_external_sender.go index 03b497a1..7b8bef90 100644 --- a/avalanchego/snow/networking/sender/test_external_sender.go +++ b/avalanchego/snow/networking/sender/test_external_sender.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sender @@ -9,6 +9,8 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/subnets" + "github.com/ava-labs/avalanchego/utils/set" ) var ( @@ -22,8 +24,8 @@ type ExternalSenderTest struct { CantSend, CantGossip bool - SendF func(msg message.OutboundMessage, nodeIDs ids.NodeIDSet, subnetID ids.ID, validatorOnly bool) ids.NodeIDSet - GossipF func(msg message.OutboundMessage, subnetID ids.ID, validatorOnly bool, numValidatorsToSend, numNonValidatorsToSend, numPeersToSend int) ids.NodeIDSet + SendF func(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], subnetID ids.ID, allower subnets.Allower) set.Set[ids.NodeID] + GossipF func(msg message.OutboundMessage, subnetID ids.ID, numValidatorsToSend, numNonValidatorsToSend, numPeersToSend int, allower subnets.Allower) set.Set[ids.NodeID] } // Default set the default callable value to [cant] @@ -34,12 +36,12 @@ func (s *ExternalSenderTest) Default(cant bool) { func (s *ExternalSenderTest) Send( msg message.OutboundMessage, - nodeIDs ids.NodeIDSet, + nodeIDs set.Set[ids.NodeID], subnetID ids.ID, - validatorOnly bool, -) ids.NodeIDSet { + allower subnets.Allower, +) set.Set[ids.NodeID] { if s.SendF != nil { - return s.SendF(msg, nodeIDs, subnetID, validatorOnly) + return s.SendF(msg, nodeIDs, subnetID, allower) } if s.CantSend { if s.TB != nil { @@ -56,13 +58,13 @@ func (s *ExternalSenderTest) Send( func (s *ExternalSenderTest) Gossip( msg message.OutboundMessage, subnetID ids.ID, - validatorOnly bool, numValidatorsToSend int, numNonValidatorsToSend int, numPeersToSend int, -) ids.NodeIDSet { + allower subnets.Allower, +) set.Set[ids.NodeID] { if s.GossipF != nil { - return s.GossipF(msg, subnetID, validatorOnly, numValidatorsToSend, numNonValidatorsToSend, numPeersToSend) + return s.GossipF(msg, subnetID, numValidatorsToSend, numNonValidatorsToSend, numPeersToSend, allower) } if s.CantGossip { if s.TB != nil { diff --git a/avalanchego/snow/networking/sender/traced_sender.go b/avalanchego/snow/networking/sender/traced_sender.go new file mode 100644 index 00000000..a6ffe7f4 --- /dev/null +++ b/avalanchego/snow/networking/sender/traced_sender.go @@ -0,0 +1,264 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sender + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + + oteltrace "go.opentelemetry.io/otel/trace" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/utils/set" +) + +var _ common.Sender = (*tracedSender)(nil) + +type tracedSender struct { + sender common.Sender + tracer trace.Tracer +} + +func Trace(sender common.Sender, tracer trace.Tracer) common.Sender { + return &tracedSender{ + sender: sender, + tracer: tracer, + } +} + +func (s *tracedSender) SendGetStateSummaryFrontier(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32) { + ctx, span := s.tracer.Start(ctx, "tracedSender.SendGetStateSummaryFrontier", oteltrace.WithAttributes( + attribute.Int64("requestID", int64(requestID)), + )) + defer span.End() + + s.sender.SendGetStateSummaryFrontier(ctx, nodeIDs, requestID) +} + +func (s *tracedSender) SendStateSummaryFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, summary []byte) { + ctx, span := s.tracer.Start(ctx, "tracedSender.SendStateSummaryFrontier", oteltrace.WithAttributes( + attribute.Stringer("recipients", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int("summaryLen", len(summary)), + )) + defer span.End() + + s.sender.SendStateSummaryFrontier(ctx, nodeID, requestID, summary) +} + +func (s *tracedSender) SendGetAcceptedStateSummary(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, heights []uint64) { + ctx, span := s.tracer.Start(ctx, "tracedSender.SendGetAcceptedStateSummary", oteltrace.WithAttributes( + attribute.Int64("requestID", int64(requestID)), + attribute.Int("numHeights", len(heights)), + )) + defer span.End() + + s.sender.SendGetAcceptedStateSummary(ctx, nodeIDs, requestID, heights) +} + +func (s *tracedSender) SendAcceptedStateSummary(ctx context.Context, nodeID ids.NodeID, requestID uint32, summaryIDs []ids.ID) { + ctx, span := s.tracer.Start(ctx, "tracedSender.SendAcceptedStateSummary", oteltrace.WithAttributes( + attribute.Stringer("recipients", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int("numSummaryIDs", len(summaryIDs)), + )) + defer span.End() + + s.sender.SendAcceptedStateSummary(ctx, nodeID, requestID, summaryIDs) +} + +func (s *tracedSender) SendGetAcceptedFrontier(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32) { + ctx, span := s.tracer.Start(ctx, "tracedSender.SendGetAcceptedFrontier", oteltrace.WithAttributes( + attribute.Int64("requestID", int64(requestID)), + )) + defer span.End() + + s.sender.SendGetAcceptedFrontier(ctx, nodeIDs, requestID) +} + +func (s *tracedSender) SendAcceptedFrontier(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) { + ctx, span := s.tracer.Start(ctx, "tracedSender.SendAcceptedFrontier", oteltrace.WithAttributes( + attribute.Stringer("recipients", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int("numContainerIDs", len(containerIDs)), + )) + defer span.End() + + s.sender.SendAcceptedFrontier(ctx, nodeID, requestID, containerIDs) +} + +func (s *tracedSender) SendGetAccepted(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, containerIDs []ids.ID) { + ctx, span := s.tracer.Start(ctx, "tracedSender.SendGetAccepted", oteltrace.WithAttributes( + attribute.Int64("requestID", int64(requestID)), + attribute.Int("numContainerIDs", len(containerIDs)), + )) + defer span.End() + + s.sender.SendGetAccepted(ctx, nodeIDs, requestID, containerIDs) +} + +func (s *tracedSender) SendAccepted(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) { + ctx, span := s.tracer.Start(ctx, "tracedSender.SendAccepted", oteltrace.WithAttributes( + attribute.Stringer("recipients", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int("numContainerIDs", len(containerIDs)), + )) + defer span.End() + + s.sender.SendAccepted(ctx, nodeID, requestID, containerIDs) +} + +func (s *tracedSender) SendGetAncestors(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) { + ctx, span := s.tracer.Start(ctx, "tracedSender.SendGetAncestors", oteltrace.WithAttributes( + attribute.Stringer("recipients", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Stringer("containerID", containerID), + )) + defer span.End() + + s.sender.SendGetAncestors(ctx, nodeID, requestID, containerID) +} + +func (s *tracedSender) SendAncestors(ctx context.Context, nodeID ids.NodeID, requestID uint32, containers [][]byte) { + _, span := s.tracer.Start(ctx, "tracedSender.SendAncestors", oteltrace.WithAttributes( + attribute.Stringer("recipients", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int("numContainers", len(containers)), + )) + defer span.End() + + s.sender.SendAncestors(ctx, nodeID, requestID, containers) +} + +func (s *tracedSender) SendGet(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerID ids.ID) { + ctx, span := s.tracer.Start(ctx, "tracedSender.SendGet", oteltrace.WithAttributes( + attribute.Stringer("recipients", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Stringer("containerID", containerID), + )) + defer span.End() + + s.sender.SendGet(ctx, nodeID, requestID, containerID) +} + +func (s *tracedSender) SendPut(ctx context.Context, nodeID ids.NodeID, requestID uint32, container []byte) { + _, span := s.tracer.Start(ctx, "tracedSender.SendPut", oteltrace.WithAttributes( + attribute.Stringer("recipients", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int("containerLen", len(container)), + )) + defer span.End() + + s.sender.SendPut(ctx, nodeID, requestID, container) +} + +func (s *tracedSender) SendPushQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, container []byte) { + ctx, span := s.tracer.Start(ctx, "tracedSender.SendPushQuery", oteltrace.WithAttributes( + attribute.Int64("requestID", int64(requestID)), + attribute.Int("containerLen", len(container)), + )) + defer span.End() + + s.sender.SendPushQuery(ctx, nodeIDs, requestID, container) +} + +func (s *tracedSender) SendPullQuery(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, containerID ids.ID) { + ctx, span := s.tracer.Start(ctx, "tracedSender.SendPullQuery", oteltrace.WithAttributes( + attribute.Int64("requestID", int64(requestID)), + attribute.Stringer("containerID", containerID), + )) + defer span.End() + + s.sender.SendPullQuery(ctx, nodeIDs, requestID, containerID) +} + +func (s *tracedSender) SendChits(ctx context.Context, nodeID ids.NodeID, requestID uint32, votes []ids.ID, accepted []ids.ID) { + ctx, span := s.tracer.Start(ctx, "tracedSender.SendChits", oteltrace.WithAttributes( + attribute.Stringer("recipients", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int("numVotes", len(votes)), + attribute.Int("numAccepted", len(accepted)), + )) + defer span.End() + + s.sender.SendChits(ctx, nodeID, requestID, votes, accepted) +} + +func (s *tracedSender) SendCrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, appRequestBytes []byte) error { + ctx, span := s.tracer.Start(ctx, "tracedSender.SendCrossChainAppRequest", oteltrace.WithAttributes( + attribute.Stringer("chainID", chainID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int("requestLen", len(appRequestBytes)), + )) + defer span.End() + + return s.sender.SendCrossChainAppRequest(ctx, chainID, requestID, appRequestBytes) +} + +func (s *tracedSender) SendCrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, appResponseBytes []byte) error { + ctx, span := s.tracer.Start(ctx, "tracedSender.SendCrossChainAppResponse", oteltrace.WithAttributes( + attribute.Stringer("chainID", chainID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int("responseLen", len(appResponseBytes)), + )) + defer span.End() + + return s.sender.SendCrossChainAppResponse(ctx, chainID, requestID, appResponseBytes) +} + +func (s *tracedSender) SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, appRequestBytes []byte) error { + ctx, span := s.tracer.Start(ctx, "tracedSender.SendAppRequest", oteltrace.WithAttributes( + attribute.Int64("requestID", int64(requestID)), + attribute.Int("requestLen", len(appRequestBytes)), + )) + defer span.End() + + return s.sender.SendAppRequest(ctx, nodeIDs, requestID, appRequestBytes) +} + +func (s *tracedSender) SendAppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, appResponseBytes []byte) error { + ctx, span := s.tracer.Start(ctx, "tracedSender.SendAppResponse", oteltrace.WithAttributes( + attribute.Stringer("recipients", nodeID), + attribute.Int64("requestID", int64(requestID)), + attribute.Int("responseLen", len(appResponseBytes)), + )) + defer span.End() + + return s.sender.SendAppResponse(ctx, nodeID, requestID, appResponseBytes) +} + +func (s *tracedSender) SendAppGossipSpecific(ctx context.Context, nodeIDs set.Set[ids.NodeID], appGossipBytes []byte) error { + _, span := s.tracer.Start(ctx, "tracedSender.SendAppGossipSpecific", oteltrace.WithAttributes( + attribute.Int("gossipLen", len(appGossipBytes)), + )) + defer span.End() + + return s.sender.SendAppGossipSpecific(ctx, nodeIDs, appGossipBytes) +} + +func (s *tracedSender) SendAppGossip(ctx context.Context, appGossipBytes []byte) error { + _, span := s.tracer.Start(ctx, "tracedSender.SendAppGossip", oteltrace.WithAttributes( + attribute.Int("gossipLen", len(appGossipBytes)), + )) + defer span.End() + + return s.sender.SendAppGossip(ctx, appGossipBytes) +} + +func (s *tracedSender) SendGossip(ctx context.Context, container []byte) { + _, span := s.tracer.Start(ctx, "tracedSender.SendGossip", oteltrace.WithAttributes( + attribute.Int("containerLen", len(container)), + )) + defer span.End() + + s.sender.SendGossip(ctx, container) +} + +func (s *tracedSender) Accept(ctx *snow.ConsensusContext, containerID ids.ID, container []byte) error { + return s.sender.Accept(ctx, containerID, container) +} diff --git a/avalanchego/snow/networking/timeout/manager.go b/avalanchego/snow/networking/timeout/manager.go index bf78e4bf..6846151b 100644 --- a/avalanchego/snow/networking/timeout/manager.go +++ b/avalanchego/snow/networking/timeout/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timeout @@ -16,7 +16,7 @@ import ( "github.com/ava-labs/avalanchego/utils/timer" ) -var _ Manager = &manager{} +var _ Manager = (*manager)(nil) // Manages timeouts for requests sent to peers. type Manager interface { @@ -38,8 +38,8 @@ type Manager interface { RegisterRequest( nodeID ids.NodeID, chainID ids.ID, - op message.Op, - requestID ids.ID, + measureLatency bool, + requestID ids.RequestID, timeoutHandler func(), ) // Registers that we would have sent a request to a validator but they @@ -55,13 +55,13 @@ type Manager interface { RegisterResponse( nodeID ids.NodeID, chainID ids.ID, - requestID ids.ID, + requestID ids.RequestID, op message.Op, latency time.Duration, ) // Mark that we no longer expect a response to this request we sent. // Does not modify the timeout. - RemoveRequest(requestID ids.ID) + RemoveRequest(requestID ids.RequestID) } func NewManager( @@ -120,8 +120,8 @@ func (m *manager) RegisterChain(ctx *snow.ConsensusContext) error { func (m *manager) RegisterRequest( nodeID ids.NodeID, chainID ids.ID, - op message.Op, - requestID ids.ID, + measureLatency bool, + requestID ids.RequestID, timeoutHandler func(), ) { newTimeoutHandler := func() { @@ -129,7 +129,7 @@ func (m *manager) RegisterRequest( m.benchlistMgr.RegisterFailure(chainID, nodeID) timeoutHandler() } - m.tm.Put(requestID, op, newTimeoutHandler) + m.tm.Put(requestID, measureLatency, newTimeoutHandler) } // RegisterResponse registers that we received a response from [nodeID] @@ -137,7 +137,7 @@ func (m *manager) RegisterRequest( func (m *manager) RegisterResponse( nodeID ids.NodeID, chainID ids.ID, - requestID ids.ID, + requestID ids.RequestID, op message.Op, latency time.Duration, ) { @@ -146,7 +146,7 @@ func (m *manager) RegisterResponse( m.tm.Remove(requestID) } -func (m *manager) RemoveRequest(requestID ids.ID) { +func (m *manager) RemoveRequest(requestID ids.RequestID) { m.tm.Remove(requestID) } diff --git a/avalanchego/snow/networking/timeout/manager_test.go b/avalanchego/snow/networking/timeout/manager_test.go index 1bc52273..19866817 100644 --- a/avalanchego/snow/networking/timeout/manager_test.go +++ b/avalanchego/snow/networking/timeout/manager_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timeout @@ -11,7 +11,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/snow/networking/benchlist" "github.com/ava-labs/avalanchego/utils/timer" ) @@ -38,45 +37,13 @@ func TestManagerFire(t *testing.T) { wg := sync.WaitGroup{} wg.Add(1) - manager.RegisterRequest(ids.NodeID{}, ids.ID{}, message.PullQuery, ids.GenerateTestID(), wg.Done) - - wg.Wait() -} - -func TestManagerCancel(t *testing.T) { - benchlist := benchlist.NewNoBenchlist() - manager, err := NewManager( - &timer.AdaptiveTimeoutConfig{ - InitialTimeout: time.Millisecond, - MinimumTimeout: time.Millisecond, - MaximumTimeout: 10 * time.Second, - TimeoutCoefficient: 1.25, - TimeoutHalflife: 5 * time.Minute, - }, - benchlist, - "", - prometheus.NewRegistry(), + manager.RegisterRequest( + ids.NodeID{}, + ids.ID{}, + true, + ids.RequestID{}, + wg.Done, ) - if err != nil { - t.Fatal(err) - } - go manager.Dispatch() - - wg := sync.WaitGroup{} - wg.Add(1) - - fired := new(bool) - - id := ids.GenerateTestID() - manager.RegisterRequest(ids.NodeID{}, ids.ID{}, message.PullQuery, id, func() { *fired = true }) - - manager.RegisterResponse(ids.NodeID{}, ids.ID{}, id, message.Get, 1*time.Second) - - manager.RegisterRequest(ids.NodeID{}, ids.ID{}, message.PullQuery, ids.GenerateTestID(), wg.Done) wg.Wait() - - if *fired { - t.Fatalf("Should have cancelled the function") - } } diff --git a/avalanchego/snow/networking/timeout/metrics.go b/avalanchego/snow/networking/timeout/metrics.go index e650b8fd..6be45fd2 100644 --- a/avalanchego/snow/networking/timeout/metrics.go +++ b/avalanchego/snow/networking/timeout/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timeout diff --git a/avalanchego/snow/networking/timeout/mock_manager.go b/avalanchego/snow/networking/timeout/mock_manager.go new file mode 100644 index 00000000..f53a5f85 --- /dev/null +++ b/avalanchego/snow/networking/timeout/mock_manager.go @@ -0,0 +1,143 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/snow/networking/timeout (interfaces: Manager) + +// Package timeout is a generated GoMock package. +package timeout + +import ( + reflect "reflect" + time "time" + + ids "github.com/ava-labs/avalanchego/ids" + message "github.com/ava-labs/avalanchego/message" + snow "github.com/ava-labs/avalanchego/snow" + gomock "github.com/golang/mock/gomock" +) + +// MockManager is a mock of Manager interface. +type MockManager struct { + ctrl *gomock.Controller + recorder *MockManagerMockRecorder +} + +// MockManagerMockRecorder is the mock recorder for MockManager. +type MockManagerMockRecorder struct { + mock *MockManager +} + +// NewMockManager creates a new mock instance. +func NewMockManager(ctrl *gomock.Controller) *MockManager { + mock := &MockManager{ctrl: ctrl} + mock.recorder = &MockManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockManager) EXPECT() *MockManagerMockRecorder { + return m.recorder +} + +// Dispatch mocks base method. +func (m *MockManager) Dispatch() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Dispatch") +} + +// Dispatch indicates an expected call of Dispatch. +func (mr *MockManagerMockRecorder) Dispatch() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Dispatch", reflect.TypeOf((*MockManager)(nil).Dispatch)) +} + +// IsBenched mocks base method. +func (m *MockManager) IsBenched(arg0 ids.NodeID, arg1 ids.ID) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsBenched", arg0, arg1) + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsBenched indicates an expected call of IsBenched. +func (mr *MockManagerMockRecorder) IsBenched(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsBenched", reflect.TypeOf((*MockManager)(nil).IsBenched), arg0, arg1) +} + +// RegisterChain mocks base method. +func (m *MockManager) RegisterChain(arg0 *snow.ConsensusContext) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RegisterChain", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// RegisterChain indicates an expected call of RegisterChain. +func (mr *MockManagerMockRecorder) RegisterChain(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterChain", reflect.TypeOf((*MockManager)(nil).RegisterChain), arg0) +} + +// RegisterRequest mocks base method. +func (m *MockManager) RegisterRequest(arg0 ids.NodeID, arg1 ids.ID, arg2 bool, arg3 ids.RequestID, arg4 func()) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RegisterRequest", arg0, arg1, arg2, arg3, arg4) +} + +// RegisterRequest indicates an expected call of RegisterRequest. +func (mr *MockManagerMockRecorder) RegisterRequest(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterRequest", reflect.TypeOf((*MockManager)(nil).RegisterRequest), arg0, arg1, arg2, arg3, arg4) +} + +// RegisterRequestToUnreachableValidator mocks base method. +func (m *MockManager) RegisterRequestToUnreachableValidator() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RegisterRequestToUnreachableValidator") +} + +// RegisterRequestToUnreachableValidator indicates an expected call of RegisterRequestToUnreachableValidator. +func (mr *MockManagerMockRecorder) RegisterRequestToUnreachableValidator() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterRequestToUnreachableValidator", reflect.TypeOf((*MockManager)(nil).RegisterRequestToUnreachableValidator)) +} + +// RegisterResponse mocks base method. +func (m *MockManager) RegisterResponse(arg0 ids.NodeID, arg1 ids.ID, arg2 ids.RequestID, arg3 message.Op, arg4 time.Duration) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RegisterResponse", arg0, arg1, arg2, arg3, arg4) +} + +// RegisterResponse indicates an expected call of RegisterResponse. +func (mr *MockManagerMockRecorder) RegisterResponse(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterResponse", reflect.TypeOf((*MockManager)(nil).RegisterResponse), arg0, arg1, arg2, arg3, arg4) +} + +// RemoveRequest mocks base method. +func (m *MockManager) RemoveRequest(arg0 ids.RequestID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RemoveRequest", arg0) +} + +// RemoveRequest indicates an expected call of RemoveRequest. +func (mr *MockManagerMockRecorder) RemoveRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveRequest", reflect.TypeOf((*MockManager)(nil).RemoveRequest), arg0) +} + +// TimeoutDuration mocks base method. +func (m *MockManager) TimeoutDuration() time.Duration { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "TimeoutDuration") + ret0, _ := ret[0].(time.Duration) + return ret0 +} + +// TimeoutDuration indicates an expected call of TimeoutDuration. +func (mr *MockManagerMockRecorder) TimeoutDuration() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TimeoutDuration", reflect.TypeOf((*MockManager)(nil).TimeoutDuration)) +} diff --git a/avalanchego/snow/networking/tracker/mock_resource_tracker.go b/avalanchego/snow/networking/tracker/mock_resource_tracker.go index 57db287d..924b4f84 100644 --- a/avalanchego/snow/networking/tracker/mock_resource_tracker.go +++ b/avalanchego/snow/networking/tracker/mock_resource_tracker.go @@ -1,5 +1,8 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. -// Source: snow/networking/tracker/resource_tracker.go +// Source: github.com/ava-labs/avalanchego/snow/networking/tracker (interfaces: Tracker) // Package tracker is a generated GoMock package. package tracker @@ -36,17 +39,17 @@ func (m *MockTracker) EXPECT() *MockTrackerMockRecorder { } // TimeUntilUsage mocks base method. -func (m *MockTracker) TimeUntilUsage(nodeID ids.NodeID, now time.Time, value float64) time.Duration { +func (m *MockTracker) TimeUntilUsage(arg0 ids.NodeID, arg1 time.Time, arg2 float64) time.Duration { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "TimeUntilUsage", nodeID, now, value) + ret := m.ctrl.Call(m, "TimeUntilUsage", arg0, arg1, arg2) ret0, _ := ret[0].(time.Duration) return ret0 } // TimeUntilUsage indicates an expected call of TimeUntilUsage. -func (mr *MockTrackerMockRecorder) TimeUntilUsage(nodeID, now, value interface{}) *gomock.Call { +func (mr *MockTrackerMockRecorder) TimeUntilUsage(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TimeUntilUsage", reflect.TypeOf((*MockTracker)(nil).TimeUntilUsage), nodeID, now, value) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TimeUntilUsage", reflect.TypeOf((*MockTracker)(nil).TimeUntilUsage), arg0, arg1, arg2) } // TotalUsage mocks base method. @@ -64,90 +67,15 @@ func (mr *MockTrackerMockRecorder) TotalUsage() *gomock.Call { } // Usage mocks base method. -func (m *MockTracker) Usage(nodeID ids.NodeID, now time.Time) float64 { +func (m *MockTracker) Usage(arg0 ids.NodeID, arg1 time.Time) float64 { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Usage", nodeID, now) + ret := m.ctrl.Call(m, "Usage", arg0, arg1) ret0, _ := ret[0].(float64) return ret0 } // Usage indicates an expected call of Usage. -func (mr *MockTrackerMockRecorder) Usage(nodeID, now interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Usage", reflect.TypeOf((*MockTracker)(nil).Usage), nodeID, now) -} - -// MockResourceTracker is a mock of ResourceTracker interface. -type MockResourceTracker struct { - ctrl *gomock.Controller - recorder *MockResourceTrackerMockRecorder -} - -// MockResourceTrackerMockRecorder is the mock recorder for MockResourceTracker. -type MockResourceTrackerMockRecorder struct { - mock *MockResourceTracker -} - -// NewMockResourceTracker creates a new mock instance. -func NewMockResourceTracker(ctrl *gomock.Controller) *MockResourceTracker { - mock := &MockResourceTracker{ctrl: ctrl} - mock.recorder = &MockResourceTrackerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockResourceTracker) EXPECT() *MockResourceTrackerMockRecorder { - return m.recorder -} - -// CPUTracker mocks base method. -func (m *MockResourceTracker) CPUTracker() Tracker { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CPUTracker") - ret0, _ := ret[0].(Tracker) - return ret0 -} - -// CPUTracker indicates an expected call of CPUTracker. -func (mr *MockResourceTrackerMockRecorder) CPUTracker() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CPUTracker", reflect.TypeOf((*MockResourceTracker)(nil).CPUTracker)) -} - -// DiskTracker mocks base method. -func (m *MockResourceTracker) DiskTracker() Tracker { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "DiskTracker") - ret0, _ := ret[0].(Tracker) - return ret0 -} - -// DiskTracker indicates an expected call of DiskTracker. -func (mr *MockResourceTrackerMockRecorder) DiskTracker() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DiskTracker", reflect.TypeOf((*MockResourceTracker)(nil).DiskTracker)) -} - -// StartProcessing mocks base method. -func (m *MockResourceTracker) StartProcessing(arg0 ids.NodeID, arg1 time.Time) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "StartProcessing", arg0, arg1) -} - -// StartProcessing indicates an expected call of StartProcessing. -func (mr *MockResourceTrackerMockRecorder) StartProcessing(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartProcessing", reflect.TypeOf((*MockResourceTracker)(nil).StartProcessing), arg0, arg1) -} - -// StopProcessing mocks base method. -func (m *MockResourceTracker) StopProcessing(arg0 ids.NodeID, arg1 time.Time) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "StopProcessing", arg0, arg1) -} - -// StopProcessing indicates an expected call of StopProcessing. -func (mr *MockResourceTrackerMockRecorder) StopProcessing(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockTrackerMockRecorder) Usage(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StopProcessing", reflect.TypeOf((*MockResourceTracker)(nil).StopProcessing), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Usage", reflect.TypeOf((*MockTracker)(nil).Usage), arg0, arg1) } diff --git a/avalanchego/snow/networking/tracker/mock_targeter.go b/avalanchego/snow/networking/tracker/mock_targeter.go index cfb2f27d..f8b545fd 100644 --- a/avalanchego/snow/networking/tracker/mock_targeter.go +++ b/avalanchego/snow/networking/tracker/mock_targeter.go @@ -1,5 +1,8 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. -// Source: snow/networking/tracker/targeter.go +// Source: github.com/ava-labs/avalanchego/snow/networking/tracker (interfaces: Targeter) // Package tracker is a generated GoMock package. package tracker @@ -35,15 +38,15 @@ func (m *MockTargeter) EXPECT() *MockTargeterMockRecorder { } // TargetUsage mocks base method. -func (m *MockTargeter) TargetUsage(nodeID ids.NodeID) float64 { +func (m *MockTargeter) TargetUsage(arg0 ids.NodeID) float64 { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "TargetUsage", nodeID) + ret := m.ctrl.Call(m, "TargetUsage", arg0) ret0, _ := ret[0].(float64) return ret0 } // TargetUsage indicates an expected call of TargetUsage. -func (mr *MockTargeterMockRecorder) TargetUsage(nodeID interface{}) *gomock.Call { +func (mr *MockTargeterMockRecorder) TargetUsage(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TargetUsage", reflect.TypeOf((*MockTargeter)(nil).TargetUsage), nodeID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "TargetUsage", reflect.TypeOf((*MockTargeter)(nil).TargetUsage), arg0) } diff --git a/avalanchego/snow/networking/tracker/resource_tracker.go b/avalanchego/snow/networking/tracker/resource_tracker.go index b6758c4d..721d531e 100644 --- a/avalanchego/snow/networking/tracker/resource_tracker.go +++ b/avalanchego/snow/networking/tracker/resource_tracker.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracker @@ -19,7 +19,7 @@ import ( const epsilon = 1e-9 -var _ ResourceTracker = &resourceTracker{} +var _ ResourceTracker = (*resourceTracker)(nil) type Tracker interface { // Returns the current usage for the given node. diff --git a/avalanchego/snow/networking/tracker/resource_tracker_test.go b/avalanchego/snow/networking/tracker/resource_tracker_test.go index c7a6bc68..11904e48 100644 --- a/avalanchego/snow/networking/tracker/resource_tracker_test.go +++ b/avalanchego/snow/networking/tracker/resource_tracker_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracker diff --git a/avalanchego/snow/networking/tracker/targeter.go b/avalanchego/snow/networking/tracker/targeter.go index e9c863ef..216bb9ec 100644 --- a/avalanchego/snow/networking/tracker/targeter.go +++ b/avalanchego/snow/networking/tracker/targeter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracker @@ -10,7 +10,7 @@ import ( "github.com/ava-labs/avalanchego/snow/validators" ) -var _ Targeter = &targeter{} +var _ Targeter = (*targeter)(nil) type Targeter interface { // Returns the target usage of the given node. @@ -60,7 +60,7 @@ func (t *targeter) TargetUsage(nodeID ids.NodeID) float64 { baseAlloc = math.Min(baseAlloc, t.maxNonVdrNodeUsage) // This node gets a stake-weighted portion of the validator allocation. - weight, _ := t.vdrs.GetWeight(nodeID) + weight := t.vdrs.GetWeight(nodeID) vdrAlloc := t.vdrAlloc * float64(weight) / float64(t.vdrs.Weight()) return vdrAlloc + baseAlloc } diff --git a/avalanchego/snow/networking/tracker/targeter_test.go b/avalanchego/snow/networking/tracker/targeter_test.go index 1f363c6b..11d2cca4 100644 --- a/avalanchego/snow/networking/tracker/targeter_test.go +++ b/avalanchego/snow/networking/tracker/targeter_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tracker @@ -50,10 +50,10 @@ func TestTarget(t *testing.T) { totalVdrWeight := uint64(10) nonVdr := ids.NodeID{2} vdrs := validators.NewSet() - if err := vdrs.AddWeight(vdr, 1); err != nil { + if err := vdrs.Add(vdr, nil, ids.Empty, 1); err != nil { t.Fatal(err) } - if err := vdrs.AddWeight(ids.GenerateTestNodeID(), totalVdrWeight-vdrWeight); err != nil { + if err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, totalVdrWeight-vdrWeight); err != nil { t.Fatal(err) } diff --git a/avalanchego/snow/networking/worker/mock_pool.go b/avalanchego/snow/networking/worker/mock_pool.go deleted file mode 100644 index 3d3dac00..00000000 --- a/avalanchego/snow/networking/worker/mock_pool.go +++ /dev/null @@ -1,22 +0,0 @@ -// Code generated by mockery v2.9.4. DO NOT EDIT. - -package worker - -import ( - mock "github.com/stretchr/testify/mock" -) - -// Pool is an autogenerated mock type for the Pool type -type MockPool struct { - mock.Mock -} - -// Send provides a mock function with given fields: _a0 -func (_m *MockPool) Send(_a0 Request) { - _m.Called(_a0) -} - -// Shutdown provides a mock function with given fields: -func (_m *MockPool) Shutdown() { - _m.Called() -} diff --git a/avalanchego/snow/networking/worker/pool.go b/avalanchego/snow/networking/worker/pool.go index 8617fd4c..b574f7dd 100644 --- a/avalanchego/snow/networking/worker/pool.go +++ b/avalanchego/snow/networking/worker/pool.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package worker @@ -7,7 +7,7 @@ import ( "sync" ) -var _ Pool = &pool{} +var _ Pool = (*pool)(nil) type Request func() diff --git a/avalanchego/snow/state.go b/avalanchego/snow/state.go index 845819d6..97fd18df 100644 --- a/avalanchego/snow/state.go +++ b/avalanchego/snow/state.go @@ -1,9 +1,13 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package snow -import "errors" +import ( + "errors" + + "github.com/ava-labs/avalanchego/proto/pb/p2p" +) const ( Initializing = iota @@ -30,3 +34,8 @@ func (st State) String() string { return "Unknown state" } } + +type EngineState struct { + Type p2p.EngineType + State State +} diff --git a/avalanchego/snow/uptime/locked_calculator.go b/avalanchego/snow/uptime/locked_calculator.go index 0dd353fa..687b5f59 100644 --- a/avalanchego/snow/uptime/locked_calculator.go +++ b/avalanchego/snow/uptime/locked_calculator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package uptime @@ -13,20 +13,20 @@ import ( ) var ( - errNotReady = errors.New("should not be called") + errStillBootstrapping = errors.New("still bootstrapping") - _ LockedCalculator = &lockedCalculator{} + _ LockedCalculator = (*lockedCalculator)(nil) ) type LockedCalculator interface { Calculator - SetCalculator(isBootstrapped *utils.AtomicBool, lock sync.Locker, newC Calculator) + SetCalculator(isBootstrapped *utils.Atomic[bool], lock sync.Locker, newC Calculator) } type lockedCalculator struct { lock sync.RWMutex - isBootstrapped *utils.AtomicBool + isBootstrapped *utils.Atomic[bool] calculatorLock sync.Locker c Calculator } @@ -35,49 +35,49 @@ func NewLockedCalculator() LockedCalculator { return &lockedCalculator{} } -func (c *lockedCalculator) CalculateUptime(nodeID ids.NodeID) (time.Duration, time.Time, error) { +func (c *lockedCalculator) CalculateUptime(nodeID ids.NodeID, subnetID ids.ID) (time.Duration, time.Time, error) { c.lock.RLock() defer c.lock.RUnlock() - if c.isBootstrapped == nil || !c.isBootstrapped.GetValue() { - return 0, time.Time{}, errNotReady + if c.isBootstrapped == nil || !c.isBootstrapped.Get() { + return 0, time.Time{}, errStillBootstrapping } c.calculatorLock.Lock() defer c.calculatorLock.Unlock() - return c.c.CalculateUptime(nodeID) + return c.c.CalculateUptime(nodeID, subnetID) } -func (c *lockedCalculator) CalculateUptimePercent(nodeID ids.NodeID) (float64, error) { +func (c *lockedCalculator) CalculateUptimePercent(nodeID ids.NodeID, subnetID ids.ID) (float64, error) { c.lock.RLock() defer c.lock.RUnlock() - if c.isBootstrapped == nil || !c.isBootstrapped.GetValue() { - return 0, errNotReady + if c.isBootstrapped == nil || !c.isBootstrapped.Get() { + return 0, errStillBootstrapping } c.calculatorLock.Lock() defer c.calculatorLock.Unlock() - return c.c.CalculateUptimePercent(nodeID) + return c.c.CalculateUptimePercent(nodeID, subnetID) } -func (c *lockedCalculator) CalculateUptimePercentFrom(nodeID ids.NodeID, startTime time.Time) (float64, error) { +func (c *lockedCalculator) CalculateUptimePercentFrom(nodeID ids.NodeID, subnetID ids.ID, startTime time.Time) (float64, error) { c.lock.RLock() defer c.lock.RUnlock() - if c.isBootstrapped == nil || !c.isBootstrapped.GetValue() { - return 0, errNotReady + if c.isBootstrapped == nil || !c.isBootstrapped.Get() { + return 0, errStillBootstrapping } c.calculatorLock.Lock() defer c.calculatorLock.Unlock() - return c.c.CalculateUptimePercentFrom(nodeID, startTime) + return c.c.CalculateUptimePercentFrom(nodeID, subnetID, startTime) } -func (c *lockedCalculator) SetCalculator(isBootstrapped *utils.AtomicBool, lock sync.Locker, newC Calculator) { +func (c *lockedCalculator) SetCalculator(isBootstrapped *utils.Atomic[bool], lock sync.Locker, newC Calculator) { c.lock.Lock() defer c.lock.Unlock() diff --git a/avalanchego/snow/uptime/locked_calculator_test.go b/avalanchego/snow/uptime/locked_calculator_test.go index 30fd1f69..3123a5b7 100644 --- a/avalanchego/snow/uptime/locked_calculator_test.go +++ b/avalanchego/snow/uptime/locked_calculator_test.go @@ -1,59 +1,67 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package uptime import ( - "errors" "sync" "testing" "time" - "github.com/stretchr/testify/mock" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/uptime/mocks" "github.com/ava-labs/avalanchego/utils" ) func TestLockedCalculator(t *testing.T) { require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + lc := NewLockedCalculator() require.NotNil(t) // Should still error because ctx is nil nodeID := ids.GenerateTestNodeID() - _, _, err := lc.CalculateUptime(nodeID) - require.EqualValues(errNotReady, err) - _, err = lc.CalculateUptimePercent(nodeID) - require.EqualValues(errNotReady, err) - _, err = lc.CalculateUptimePercentFrom(nodeID, time.Now()) - require.EqualValues(errNotReady, err) + subnetID := ids.GenerateTestID() + _, _, err := lc.CalculateUptime(nodeID, subnetID) + require.ErrorIs(err, errStillBootstrapping) + + _, err = lc.CalculateUptimePercent(nodeID, subnetID) + require.ErrorIs(err, errStillBootstrapping) + + _, err = lc.CalculateUptimePercentFrom(nodeID, subnetID, time.Now()) + require.ErrorIs(err, errStillBootstrapping) - var isBootstrapped utils.AtomicBool - mockCalc := &mocks.Calculator{} + var isBootstrapped utils.Atomic[bool] + mockCalc := NewMockCalculator(ctrl) // Should still error because ctx is not bootstrapped lc.SetCalculator(&isBootstrapped, &sync.Mutex{}, mockCalc) - _, _, err = lc.CalculateUptime(nodeID) - require.EqualValues(errNotReady, err) - _, err = lc.CalculateUptimePercent(nodeID) - require.EqualValues(errNotReady, err) - _, err = lc.CalculateUptimePercentFrom(nodeID, time.Now()) - require.EqualValues(errNotReady, err) + _, _, err = lc.CalculateUptime(nodeID, subnetID) + require.ErrorIs(err, errStillBootstrapping) - isBootstrapped.SetValue(true) + _, err = lc.CalculateUptimePercent(nodeID, subnetID) + require.ErrorIs(err, errStillBootstrapping) + + _, err = lc.CalculateUptimePercentFrom(nodeID, subnetID, time.Now()) + require.EqualValues(errStillBootstrapping, err) + + isBootstrapped.Set(true) // Should return the value from the mocked inner calculator - mockErr := errors.New("mock error") - mockCalc.On("CalculateUptime", mock.Anything).Return(time.Duration(0), time.Time{}, mockErr) - _, _, err = lc.CalculateUptime(nodeID) - require.EqualValues(mockErr, err) - mockCalc.On("CalculateUptimePercent", mock.Anything).Return(float64(0), mockErr) - _, err = lc.CalculateUptimePercent(nodeID) - require.EqualValues(mockErr, err) - mockCalc.On("CalculateUptimePercentFrom", mock.Anything, mock.Anything).Return(float64(0), mockErr) - _, err = lc.CalculateUptimePercentFrom(nodeID, time.Now()) - require.EqualValues(mockErr, err) + mockCalc.EXPECT().CalculateUptime(gomock.Any(), gomock.Any()).AnyTimes().Return(time.Duration(0), time.Time{}, errTest) + _, _, err = lc.CalculateUptime(nodeID, subnetID) + require.ErrorIs(err, errTest) + + mockCalc.EXPECT().CalculateUptimePercent(gomock.Any(), gomock.Any()).AnyTimes().Return(float64(0), errTest) + _, err = lc.CalculateUptimePercent(nodeID, subnetID) + require.ErrorIs(err, errTest) + + mockCalc.EXPECT().CalculateUptimePercentFrom(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(float64(0), errTest) + _, err = lc.CalculateUptimePercentFrom(nodeID, subnetID, time.Now()) + require.ErrorIs(err, errTest) } diff --git a/avalanchego/snow/uptime/manager.go b/avalanchego/snow/uptime/manager.go index 2aa694f4..1cee24b6 100644 --- a/avalanchego/snow/uptime/manager.go +++ b/avalanchego/snow/uptime/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package uptime @@ -8,10 +8,11 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" ) -var _ TestManager = &manager{} +var _ TestManager = (*manager)(nil) type Manager interface { Tracker @@ -19,22 +20,19 @@ type Manager interface { } type Tracker interface { - // Should only be called once - StartTracking(nodeIDs []ids.NodeID) error + StartTracking(nodeIDs []ids.NodeID, subnetID ids.ID) error + StopTracking(nodeIDs []ids.NodeID, subnetID ids.ID) error - // Should only be called once - Shutdown(nodeIDs []ids.NodeID) error - - Connect(nodeID ids.NodeID) error - IsConnected(nodeID ids.NodeID) bool + Connect(nodeID ids.NodeID, subnetID ids.ID) error + IsConnected(nodeID ids.NodeID, subnetID ids.ID) bool Disconnect(nodeID ids.NodeID) error } type Calculator interface { - CalculateUptime(nodeID ids.NodeID) (time.Duration, time.Time, error) - CalculateUptimePercent(nodeID ids.NodeID) (float64, error) + CalculateUptime(nodeID ids.NodeID, subnetID ids.ID) (time.Duration, time.Time, error) + CalculateUptimePercent(nodeID ids.NodeID, subnetID ids.ID) (float64, error) // CalculateUptimePercentFrom expects [startTime] to be truncated (floored) to the nearest second - CalculateUptimePercentFrom(nodeID ids.NodeID, startTime time.Time) (float64, error) + CalculateUptimePercentFrom(nodeID ids.NodeID, subnetID ids.ID, startTime time.Time) (float64, error) } type TestManager interface { @@ -46,22 +44,22 @@ type manager struct { // Used to get time. Useful for faking time during tests. clock mockable.Clock - state State - connections map[ids.NodeID]time.Time - startedTracking bool + state State + connections map[ids.NodeID]map[ids.ID]time.Time // nodeID -> subnetID -> time + trackedSubnets set.Set[ids.ID] } func NewManager(state State) Manager { return &manager{ state: state, - connections: make(map[ids.NodeID]time.Time), + connections: make(map[ids.NodeID]map[ids.ID]time.Time), } } -func (m *manager) StartTracking(nodeIDs []ids.NodeID) error { +func (m *manager) StartTracking(nodeIDs []ids.NodeID, subnetID ids.ID) error { now := m.clock.UnixTime() for _, nodeID := range nodeIDs { - upDuration, lastUpdated, err := m.state.GetUptime(nodeID) + upDuration, lastUpdated, err := m.state.GetUptime(nodeID, subnetID) if err != nil { return err } @@ -74,25 +72,33 @@ func (m *manager) StartTracking(nodeIDs []ids.NodeID) error { durationOffline := now.Sub(lastUpdated) newUpDuration := upDuration + durationOffline - if err := m.state.SetUptime(nodeID, newUpDuration, now); err != nil { + if err := m.state.SetUptime(nodeID, subnetID, newUpDuration, now); err != nil { return err } } - m.startedTracking = true + m.trackedSubnets.Add(subnetID) return nil } -func (m *manager) Shutdown(nodeIDs []ids.NodeID) error { +func (m *manager) StopTracking(nodeIDs []ids.NodeID, subnetID ids.ID) error { now := m.clock.UnixTime() for _, nodeID := range nodeIDs { - if _, connected := m.connections[nodeID]; connected { - if err := m.Disconnect(nodeID); err != nil { + connectedSubnets := m.connections[nodeID] + // If the node is already connected to this subnet, then we can just + // update the uptime in the state and remove the connection + if _, isConnected := connectedSubnets[subnetID]; isConnected { + if err := m.updateSubnetUptime(nodeID, subnetID); err != nil { + delete(connectedSubnets, subnetID) return err } + delete(connectedSubnets, subnetID) continue } - upDuration, lastUpdated, err := m.state.GetUptime(nodeID) + // if the node is not connected to this subnet, then we need to update + // the uptime in the state from the last time the node was connected to + // this subnet to now. + upDuration, lastUpdated, err := m.state.GetUptime(nodeID, subnetID) if err != nil { return err } @@ -103,43 +109,41 @@ func (m *manager) Shutdown(nodeIDs []ids.NodeID) error { continue } - if err := m.state.SetUptime(nodeID, upDuration, now); err != nil { + if err := m.state.SetUptime(nodeID, subnetID, upDuration, now); err != nil { return err } } return nil } -func (m *manager) Connect(nodeID ids.NodeID) error { - m.connections[nodeID] = m.clock.UnixTime() +func (m *manager) Connect(nodeID ids.NodeID, subnetID ids.ID) error { + subnetConnections, ok := m.connections[nodeID] + if !ok { + subnetConnections = make(map[ids.ID]time.Time) + m.connections[nodeID] = subnetConnections + } + subnetConnections[subnetID] = m.clock.UnixTime() return nil } -func (m *manager) IsConnected(nodeID ids.NodeID) bool { - _, connected := m.connections[nodeID] +func (m *manager) IsConnected(nodeID ids.NodeID, subnetID ids.ID) bool { + _, connected := m.connections[nodeID][subnetID] return connected } func (m *manager) Disconnect(nodeID ids.NodeID) error { - if !m.startedTracking { - delete(m.connections, nodeID) - return nil + // Update every subnet that this node was connected to + for subnetID := range m.connections[nodeID] { + if err := m.updateSubnetUptime(nodeID, subnetID); err != nil { + return err + } } - - newDuration, newLastUpdated, err := m.CalculateUptime(nodeID) delete(m.connections, nodeID) - if err == database.ErrNotFound { - // If a non-validator disconnects, we don't care - return nil - } - if err != nil { - return err - } - return m.state.SetUptime(nodeID, newDuration, newLastUpdated) + return nil } -func (m *manager) CalculateUptime(nodeID ids.NodeID) (time.Duration, time.Time, error) { - upDuration, lastUpdated, err := m.state.GetUptime(nodeID) +func (m *manager) CalculateUptime(nodeID ids.NodeID, subnetID ids.ID) (time.Duration, time.Time, error) { + upDuration, lastUpdated, err := m.state.GetUptime(nodeID, subnetID) if err != nil { return 0, time.Time{}, err } @@ -151,7 +155,13 @@ func (m *manager) CalculateUptime(nodeID ids.NodeID) (time.Duration, time.Time, return upDuration, lastUpdated, nil } - timeConnected, isConnected := m.connections[nodeID] + if !m.trackedSubnets.Contains(subnetID) { + durationOffline := now.Sub(lastUpdated) + newUpDuration := upDuration + durationOffline + return newUpDuration, now, nil + } + + timeConnected, isConnected := m.connections[nodeID][subnetID] if !isConnected { return upDuration, now, nil } @@ -175,16 +185,16 @@ func (m *manager) CalculateUptime(nodeID ids.NodeID) (time.Duration, time.Time, return newUpDuration, now, nil } -func (m *manager) CalculateUptimePercent(nodeID ids.NodeID) (float64, error) { - startTime, err := m.state.GetStartTime(nodeID) +func (m *manager) CalculateUptimePercent(nodeID ids.NodeID, subnetID ids.ID) (float64, error) { + startTime, err := m.state.GetStartTime(nodeID, subnetID) if err != nil { return 0, err } - return m.CalculateUptimePercentFrom(nodeID, startTime) + return m.CalculateUptimePercentFrom(nodeID, subnetID, startTime) } -func (m *manager) CalculateUptimePercentFrom(nodeID ids.NodeID, startTime time.Time) (float64, error) { - upDuration, now, err := m.CalculateUptime(nodeID) +func (m *manager) CalculateUptimePercentFrom(nodeID ids.NodeID, subnetID ids.ID, startTime time.Time) (float64, error) { + upDuration, now, err := m.CalculateUptime(nodeID, subnetID) if err != nil { return 0, err } @@ -199,3 +209,23 @@ func (m *manager) CalculateUptimePercentFrom(nodeID ids.NodeID, startTime time.T func (m *manager) SetTime(newTime time.Time) { m.clock.Set(newTime) } + +// updateSubnetUptime updates the subnet uptime of the node on the state by the amount +// of time that the node has been connected to the subnet. +func (m *manager) updateSubnetUptime(nodeID ids.NodeID, subnetID ids.ID) error { + // we're not tracking this subnet, skip updating it. + if !m.trackedSubnets.Contains(subnetID) { + return nil + } + + newDuration, newLastUpdated, err := m.CalculateUptime(nodeID, subnetID) + if err == database.ErrNotFound { + // If a non-validator disconnects, we don't care + return nil + } + if err != nil { + return err + } + + return m.state.SetUptime(nodeID, subnetID, newDuration, newLastUpdated) +} diff --git a/avalanchego/snow/uptime/manager_test.go b/avalanchego/snow/uptime/manager_test.go index f5a9addc..bea1533e 100644 --- a/avalanchego/snow/uptime/manager_test.go +++ b/avalanchego/snow/uptime/manager_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package uptime @@ -13,24 +13,27 @@ import ( "github.com/ava-labs/avalanchego/ids" ) +var errTest = errors.New("non-nil error") + func TestStartTracking(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() + subnetID := ids.GenerateTestID() startTime := time.Now() s := NewTestState() - s.AddNode(nodeID0, startTime) + s.AddNode(nodeID0, subnetID, startTime) up := NewManager(s).(*manager) currentTime := startTime.Add(time.Second) up.clock.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}) + err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) require.NoError(err) - duration, lastUpdated, err := up.CalculateUptime(nodeID0) + duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(time.Second, duration) require.Equal(up.clock.UnixTime(), lastUpdated) @@ -40,18 +43,19 @@ func TestStartTrackingDBError(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() + subnetID := ids.GenerateTestID() startTime := time.Now() s := NewTestState() - s.dbWriteError = errors.New("err") - s.AddNode(nodeID0, startTime) + s.dbWriteError = errTest + s.AddNode(nodeID0, subnetID, startTime) up := NewManager(s).(*manager) currentTime := startTime.Add(time.Second) up.clock.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}) + err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) require.Error(err) } @@ -62,7 +66,9 @@ func TestStartTrackingNonValidator(t *testing.T) { up := NewManager(s).(*manager) nodeID0 := ids.GenerateTestNodeID() - err := up.StartTracking([]ids.NodeID{nodeID0}) + subnetID := ids.GenerateTestID() + + err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) require.Error(err) } @@ -70,253 +76,286 @@ func TestStartTrackingInThePast(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() + subnetID := ids.GenerateTestID() startTime := time.Now() s := NewTestState() - s.AddNode(nodeID0, startTime) + s.AddNode(nodeID0, subnetID, startTime) up := NewManager(s).(*manager) currentTime := startTime.Add(-time.Second) up.clock.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}) + err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) require.NoError(err) - duration, lastUpdated, err := up.CalculateUptime(nodeID0) + duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(time.Duration(0), duration) require.Equal(startTime.Truncate(time.Second), lastUpdated) } -func TestShutdownDecreasesUptime(t *testing.T) { +func TestStopTrackingDecreasesUptime(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() + subnetID := ids.GenerateTestID() currentTime := time.Now() startTime := currentTime s := NewTestState() - s.AddNode(nodeID0, startTime) + s.AddNode(nodeID0, subnetID, startTime) up := NewManager(s).(*manager) up.clock.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}) + err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) require.NoError(err) currentTime = startTime.Add(time.Second) up.clock.Set(currentTime) - err = up.Shutdown([]ids.NodeID{nodeID0}) + err = up.StopTracking([]ids.NodeID{nodeID0}, subnetID) require.NoError(err) up = NewManager(s).(*manager) up.clock.Set(currentTime) - err = up.StartTracking([]ids.NodeID{nodeID0}) + err = up.StartTracking([]ids.NodeID{nodeID0}, subnetID) require.NoError(err) - duration, lastUpdated, err := up.CalculateUptime(nodeID0) + duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(time.Duration(0), duration) require.Equal(up.clock.UnixTime(), lastUpdated) } -func TestShutdownIncreasesUptime(t *testing.T) { +func TestStopTrackingIncreasesUptime(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() + subnetID := ids.GenerateTestID() currentTime := time.Now() startTime := currentTime s := NewTestState() - s.AddNode(nodeID0, startTime) + s.AddNode(nodeID0, subnetID, startTime) up := NewManager(s).(*manager) up.clock.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}) + err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) require.NoError(err) - err = up.Connect(nodeID0) + err = up.Connect(nodeID0, subnetID) require.NoError(err) currentTime = startTime.Add(time.Second) up.clock.Set(currentTime) - err = up.Shutdown([]ids.NodeID{nodeID0}) + err = up.StopTracking([]ids.NodeID{nodeID0}, subnetID) require.NoError(err) up = NewManager(s).(*manager) up.clock.Set(currentTime) - err = up.StartTracking([]ids.NodeID{nodeID0}) + err = up.StartTracking([]ids.NodeID{nodeID0}, subnetID) require.NoError(err) - duration, lastUpdated, err := up.CalculateUptime(nodeID0) + duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(time.Second, duration) require.Equal(up.clock.UnixTime(), lastUpdated) } -func TestShutdownDisconnectedNonValidator(t *testing.T) { +func TestStopTrackingDisconnectedNonValidator(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() + subnetID := ids.GenerateTestID() s := NewTestState() up := NewManager(s).(*manager) - err := up.StartTracking(nil) + err := up.StartTracking(nil, subnetID) require.NoError(err) - err = up.Shutdown([]ids.NodeID{nodeID0}) + err = up.StopTracking([]ids.NodeID{nodeID0}, subnetID) require.Error(err) } -func TestShutdownConnectedDBError(t *testing.T) { +func TestStopTrackingConnectedDBError(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() + subnetID := ids.GenerateTestID() startTime := time.Now() s := NewTestState() - s.AddNode(nodeID0, startTime) + s.AddNode(nodeID0, subnetID, startTime) up := NewManager(s).(*manager) - err := up.StartTracking(nil) + err := up.StartTracking(nil, subnetID) require.NoError(err) - err = up.Connect(nodeID0) + err = up.Connect(nodeID0, subnetID) require.NoError(err) - s.dbReadError = errors.New("err") - err = up.Shutdown([]ids.NodeID{nodeID0}) + s.dbReadError = errTest + err = up.StopTracking([]ids.NodeID{nodeID0}, subnetID) require.Error(err) } -func TestShutdownNonConnectedPast(t *testing.T) { +func TestStopTrackingNonConnectedPast(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() + subnetID := ids.GenerateTestID() currentTime := time.Now() startTime := currentTime s := NewTestState() - s.AddNode(nodeID0, startTime) + s.AddNode(nodeID0, subnetID, startTime) up := NewManager(s).(*manager) up.clock.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}) + err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) require.NoError(err) currentTime = currentTime.Add(-time.Second) up.clock.Set(currentTime) - err = up.Shutdown([]ids.NodeID{nodeID0}) + err = up.StopTracking([]ids.NodeID{nodeID0}, subnetID) require.NoError(err) - duration, lastUpdated, err := s.GetUptime(nodeID0) + duration, lastUpdated, err := s.GetUptime(nodeID0, subnetID) require.NoError(err) require.Equal(time.Duration(0), duration) require.Equal(startTime.Truncate(time.Second), lastUpdated) } -func TestShutdownNonConnectedDBError(t *testing.T) { +func TestStopTrackingNonConnectedDBError(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() + subnetID := ids.GenerateTestID() currentTime := time.Now() startTime := currentTime s := NewTestState() - s.AddNode(nodeID0, startTime) + s.AddNode(nodeID0, subnetID, startTime) up := NewManager(s).(*manager) up.clock.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}) + err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) require.NoError(err) currentTime = currentTime.Add(time.Second) up.clock.Set(currentTime) - s.dbWriteError = errors.New("err") - err = up.Shutdown([]ids.NodeID{nodeID0}) + s.dbWriteError = errTest + err = up.StopTracking([]ids.NodeID{nodeID0}, subnetID) require.Error(err) } func TestConnectAndDisconnect(t *testing.T) { - require := require.New(t) - - nodeID0 := ids.GenerateTestNodeID() - currentTime := time.Now() - startTime := currentTime - - s := NewTestState() - s.AddNode(nodeID0, startTime) - - up := NewManager(s).(*manager) - up.clock.Set(currentTime) - - connected := up.IsConnected(nodeID0) - require.False(connected) - - err := up.StartTracking([]ids.NodeID{nodeID0}) - require.NoError(err) - - connected = up.IsConnected(nodeID0) - require.False(connected) - - duration, lastUpdated, err := up.CalculateUptime(nodeID0) - require.NoError(err) - require.Equal(time.Duration(0), duration) - require.Equal(up.clock.UnixTime(), lastUpdated) - - err = up.Connect(nodeID0) - require.NoError(err) - - connected = up.IsConnected(nodeID0) - require.True(connected) - - currentTime = currentTime.Add(time.Second) - up.clock.Set(currentTime) - - duration, lastUpdated, err = up.CalculateUptime(nodeID0) - require.NoError(err) - require.Equal(time.Second, duration) - require.Equal(up.clock.UnixTime(), lastUpdated) - - err = up.Disconnect(nodeID0) - require.NoError(err) - - connected = up.IsConnected(nodeID0) - require.False(connected) - - currentTime = currentTime.Add(time.Second) - up.clock.Set(currentTime) - - duration, lastUpdated, err = up.CalculateUptime(nodeID0) - require.NoError(err) - require.Equal(time.Second, duration) - require.Equal(up.clock.UnixTime(), lastUpdated) + tests := []struct { + name string + subnetIDs []ids.ID + }{ + { + name: "Single Subnet", + subnetIDs: []ids.ID{ids.GenerateTestID()}, + }, + { + name: "Multiple Subnets", + subnetIDs: []ids.ID{ids.GenerateTestID(), ids.GenerateTestID()}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + nodeID0 := ids.GenerateTestNodeID() + currentTime := time.Now() + startTime := currentTime + + s := NewTestState() + up := NewManager(s).(*manager) + up.clock.Set(currentTime) + + for _, subnetID := range tt.subnetIDs { + s.AddNode(nodeID0, subnetID, startTime) + + connected := up.IsConnected(nodeID0, subnetID) + require.False(connected) + + err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) + require.NoError(err) + + connected = up.IsConnected(nodeID0, subnetID) + require.False(connected) + + duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) + require.NoError(err) + require.Equal(time.Duration(0), duration) + require.Equal(up.clock.UnixTime(), lastUpdated) + + err = up.Connect(nodeID0, subnetID) + require.NoError(err) + + connected = up.IsConnected(nodeID0, subnetID) + require.True(connected) + } + + currentTime = currentTime.Add(time.Second) + up.clock.Set(currentTime) + + for _, subnetID := range tt.subnetIDs { + duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) + require.NoError(err) + require.Equal(time.Second, duration) + require.Equal(up.clock.UnixTime(), lastUpdated) + } + + err := up.Disconnect(nodeID0) + require.NoError(err) + + for _, subnetID := range tt.subnetIDs { + connected := up.IsConnected(nodeID0, subnetID) + require.False(connected) + } + + currentTime = currentTime.Add(time.Second) + up.clock.Set(currentTime) + + for _, subnetID := range tt.subnetIDs { + duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) + require.NoError(err) + require.Equal(time.Second, duration) + require.Equal(up.clock.UnixTime(), lastUpdated) + } + }) + } } func TestConnectAndDisconnectBeforeTracking(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() + subnetID := ids.GenerateTestID() currentTime := time.Now() startTime := currentTime s := NewTestState() - s.AddNode(nodeID0, startTime) + s.AddNode(nodeID0, subnetID, startTime) up := NewManager(s).(*manager) currentTime = currentTime.Add(time.Second) up.clock.Set(currentTime) - err := up.Connect(nodeID0) + err := up.Connect(nodeID0, subnetID) require.NoError(err) currentTime = currentTime.Add(time.Second) @@ -325,10 +364,10 @@ func TestConnectAndDisconnectBeforeTracking(t *testing.T) { err = up.Disconnect(nodeID0) require.NoError(err) - err = up.StartTracking([]ids.NodeID{nodeID0}) + err = up.StartTracking([]ids.NodeID{nodeID0}, subnetID) require.NoError(err) - duration, lastUpdated, err := up.CalculateUptime(nodeID0) + duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(2*time.Second, duration) require.Equal(up.clock.UnixTime(), lastUpdated) @@ -338,34 +377,35 @@ func TestUnrelatedNodeDisconnect(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() + subnetID := ids.GenerateTestID() nodeID1 := ids.GenerateTestNodeID() currentTime := time.Now() startTime := currentTime s := NewTestState() - s.AddNode(nodeID0, startTime) + s.AddNode(nodeID0, subnetID, startTime) up := NewManager(s).(*manager) up.clock.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}) + err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) require.NoError(err) - duration, lastUpdated, err := up.CalculateUptime(nodeID0) + duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(time.Duration(0), duration) require.Equal(up.clock.UnixTime(), lastUpdated) - err = up.Connect(nodeID0) + err = up.Connect(nodeID0, subnetID) require.NoError(err) - err = up.Connect(nodeID1) + err = up.Connect(nodeID1, subnetID) require.NoError(err) currentTime = currentTime.Add(time.Second) up.clock.Set(currentTime) - duration, lastUpdated, err = up.CalculateUptime(nodeID0) + duration, lastUpdated, err = up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(time.Second, duration) require.Equal(up.clock.UnixTime(), lastUpdated) @@ -376,62 +416,93 @@ func TestUnrelatedNodeDisconnect(t *testing.T) { currentTime = currentTime.Add(time.Second) up.clock.Set(currentTime) - duration, lastUpdated, err = up.CalculateUptime(nodeID0) + duration, lastUpdated, err = up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(2*time.Second, duration) require.Equal(up.clock.UnixTime(), lastUpdated) } +func TestCalculateUptimeWhenNeverTracked(t *testing.T) { + require := require.New(t) + + nodeID0 := ids.GenerateTestNodeID() + subnetID := ids.GenerateTestID() + startTime := time.Now() + + s := NewTestState() + s.AddNode(nodeID0, subnetID, startTime) + + up := NewManager(s).(*manager) + + currentTime := startTime.Add(time.Second) + up.clock.Set(currentTime) + + duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) + require.NoError(err) + require.Equal(time.Second, duration) + require.Equal(up.clock.UnixTime(), lastUpdated) + + uptime, err := up.CalculateUptimePercentFrom(nodeID0, subnetID, startTime.Truncate(time.Second)) + require.NoError(err) + require.Equal(float64(1), uptime) +} + func TestCalculateUptimeWhenNeverConnected(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() + subnetID := ids.GenerateTestID() startTime := time.Now() s := NewTestState() - s.AddNode(nodeID0, startTime) up := NewManager(s).(*manager) + err := up.StartTracking([]ids.NodeID{}, subnetID) + require.NoError(err) + + s.AddNode(nodeID0, subnetID, startTime) + currentTime := startTime.Add(time.Second) up.clock.Set(currentTime) - duration, lastUpdated, err := up.CalculateUptime(nodeID0) + duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(time.Duration(0), duration) require.Equal(up.clock.UnixTime(), lastUpdated) - uptime, err := up.CalculateUptimePercentFrom(nodeID0, startTime) + uptime, err := up.CalculateUptimePercentFrom(nodeID0, subnetID, startTime) require.NoError(err) - require.Equal(0., uptime) + require.Equal(float64(0), uptime) } func TestCalculateUptimeWhenConnectedBeforeTracking(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() + subnetID := ids.GenerateTestID() currentTime := time.Now() startTime := currentTime s := NewTestState() - s.AddNode(nodeID0, startTime) + s.AddNode(nodeID0, subnetID, startTime) up := NewManager(s).(*manager) up.clock.Set(currentTime) - err := up.Connect(nodeID0) + err := up.Connect(nodeID0, subnetID) require.NoError(err) currentTime = currentTime.Add(time.Second) up.clock.Set(currentTime) - err = up.StartTracking([]ids.NodeID{nodeID0}) + err = up.StartTracking([]ids.NodeID{nodeID0}, subnetID) require.NoError(err) currentTime = currentTime.Add(time.Second) up.clock.Set(currentTime) - duration, lastUpdated, err := up.CalculateUptime(nodeID0) + duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(2*time.Second, duration) require.Equal(up.clock.UnixTime(), lastUpdated) @@ -441,28 +512,29 @@ func TestCalculateUptimeWhenConnectedInFuture(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() + subnetID := ids.GenerateTestID() currentTime := time.Now() startTime := currentTime s := NewTestState() - s.AddNode(nodeID0, startTime) + s.AddNode(nodeID0, subnetID, startTime) up := NewManager(s).(*manager) up.clock.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}) + err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) require.NoError(err) currentTime = currentTime.Add(2 * time.Second) up.clock.Set(currentTime) - err = up.Connect(nodeID0) + err = up.Connect(nodeID0, subnetID) require.NoError(err) currentTime = currentTime.Add(-time.Second) up.clock.Set(currentTime) - duration, lastUpdated, err := up.CalculateUptime(nodeID0) + duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(time.Duration(0), duration) require.Equal(up.clock.UnixTime(), lastUpdated) @@ -472,13 +544,14 @@ func TestCalculateUptimeNonValidator(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() + subnetID := ids.GenerateTestID() startTime := time.Now() s := NewTestState() up := NewManager(s).(*manager) - _, err := up.CalculateUptimePercentFrom(nodeID0, startTime) + _, err := up.CalculateUptimePercentFrom(nodeID0, subnetID, startTime) require.Error(err) } @@ -486,16 +559,17 @@ func TestCalculateUptimePercentageDivBy0(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() + subnetID := ids.GenerateTestID() currentTime := time.Now() startTime := currentTime s := NewTestState() - s.AddNode(nodeID0, startTime) + s.AddNode(nodeID0, subnetID, startTime) up := NewManager(s).(*manager) up.clock.Set(currentTime) - uptime, err := up.CalculateUptimePercentFrom(nodeID0, startTime.Truncate(time.Second)) + uptime, err := up.CalculateUptimePercentFrom(nodeID0, subnetID, startTime.Truncate(time.Second)) require.NoError(err) require.Equal(float64(1), uptime) } @@ -505,44 +579,49 @@ func TestCalculateUptimePercentage(t *testing.T) { nodeID0 := ids.GenerateTestNodeID() currentTime := time.Now() + subnetID := ids.GenerateTestID() startTime := currentTime s := NewTestState() - s.AddNode(nodeID0, startTime) + s.AddNode(nodeID0, subnetID, startTime) up := NewManager(s).(*manager) + err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) + require.NoError(err) + currentTime = currentTime.Add(time.Second) up.clock.Set(currentTime) - uptime, err := up.CalculateUptimePercentFrom(nodeID0, startTime) + uptime, err := up.CalculateUptimePercentFrom(nodeID0, subnetID, startTime.Truncate(time.Second)) require.NoError(err) require.Equal(float64(0), uptime) } -func TestShutdownUnixTimeRegression(t *testing.T) { +func TestStopTrackingUnixTimeRegression(t *testing.T) { require := require.New(t) nodeID0 := ids.GenerateTestNodeID() currentTime := time.Now() + subnetID := ids.GenerateTestID() startTime := currentTime s := NewTestState() - s.AddNode(nodeID0, startTime) + s.AddNode(nodeID0, subnetID, startTime) up := NewManager(s).(*manager) up.clock.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}) + err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) require.NoError(err) - err = up.Connect(nodeID0) + err = up.Connect(nodeID0, subnetID) require.NoError(err) currentTime = startTime.Add(time.Second) up.clock.Set(currentTime) - err = up.Shutdown([]ids.NodeID{nodeID0}) + err = up.StopTracking([]ids.NodeID{nodeID0}, subnetID) require.NoError(err) currentTime = startTime.Add(time.Second) @@ -553,16 +632,16 @@ func TestShutdownUnixTimeRegression(t *testing.T) { currentTime = startTime.Add(time.Second) up.clock.Set(currentTime) - err = up.StartTracking([]ids.NodeID{nodeID0}) + err = up.StartTracking([]ids.NodeID{nodeID0}, subnetID) require.NoError(err) - err = up.Connect(nodeID0) + err = up.Connect(nodeID0, subnetID) require.NoError(err) currentTime = startTime.Add(time.Second) up.clock.Set(currentTime) - perc, err := up.CalculateUptimePercent(nodeID0) + perc, err := up.CalculateUptimePercent(nodeID0, subnetID) require.NoError(err) require.GreaterOrEqual(float64(1), perc) } diff --git a/avalanchego/snow/uptime/mock_calculator.go b/avalanchego/snow/uptime/mock_calculator.go new file mode 100644 index 00000000..02f5a85c --- /dev/null +++ b/avalanchego/snow/uptime/mock_calculator.go @@ -0,0 +1,85 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/snow/uptime (interfaces: Calculator) + +// Package uptime is a generated GoMock package. +package uptime + +import ( + reflect "reflect" + time "time" + + ids "github.com/ava-labs/avalanchego/ids" + gomock "github.com/golang/mock/gomock" +) + +// MockCalculator is a mock of Calculator interface. +type MockCalculator struct { + ctrl *gomock.Controller + recorder *MockCalculatorMockRecorder +} + +// MockCalculatorMockRecorder is the mock recorder for MockCalculator. +type MockCalculatorMockRecorder struct { + mock *MockCalculator +} + +// NewMockCalculator creates a new mock instance. +func NewMockCalculator(ctrl *gomock.Controller) *MockCalculator { + mock := &MockCalculator{ctrl: ctrl} + mock.recorder = &MockCalculatorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockCalculator) EXPECT() *MockCalculatorMockRecorder { + return m.recorder +} + +// CalculateUptime mocks base method. +func (m *MockCalculator) CalculateUptime(arg0 ids.NodeID, arg1 ids.ID) (time.Duration, time.Time, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CalculateUptime", arg0, arg1) + ret0, _ := ret[0].(time.Duration) + ret1, _ := ret[1].(time.Time) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// CalculateUptime indicates an expected call of CalculateUptime. +func (mr *MockCalculatorMockRecorder) CalculateUptime(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CalculateUptime", reflect.TypeOf((*MockCalculator)(nil).CalculateUptime), arg0, arg1) +} + +// CalculateUptimePercent mocks base method. +func (m *MockCalculator) CalculateUptimePercent(arg0 ids.NodeID, arg1 ids.ID) (float64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CalculateUptimePercent", arg0, arg1) + ret0, _ := ret[0].(float64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CalculateUptimePercent indicates an expected call of CalculateUptimePercent. +func (mr *MockCalculatorMockRecorder) CalculateUptimePercent(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CalculateUptimePercent", reflect.TypeOf((*MockCalculator)(nil).CalculateUptimePercent), arg0, arg1) +} + +// CalculateUptimePercentFrom mocks base method. +func (m *MockCalculator) CalculateUptimePercentFrom(arg0 ids.NodeID, arg1 ids.ID, arg2 time.Time) (float64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CalculateUptimePercentFrom", arg0, arg1, arg2) + ret0, _ := ret[0].(float64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CalculateUptimePercentFrom indicates an expected call of CalculateUptimePercentFrom. +func (mr *MockCalculatorMockRecorder) CalculateUptimePercentFrom(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CalculateUptimePercentFrom", reflect.TypeOf((*MockCalculator)(nil).CalculateUptimePercentFrom), arg0, arg1, arg2) +} diff --git a/avalanchego/snow/uptime/mocks/calculator.go b/avalanchego/snow/uptime/mocks/calculator.go deleted file mode 100644 index 65897d37..00000000 --- a/avalanchego/snow/uptime/mocks/calculator.go +++ /dev/null @@ -1,85 +0,0 @@ -// Code generated by mockery v2.9.4. DO NOT EDIT. - -package mocks - -import ( - ids "github.com/ava-labs/avalanchego/ids" - mock "github.com/stretchr/testify/mock" - - time "time" -) - -// Calculator is an autogenerated mock type for the Calculator type -type Calculator struct { - mock.Mock -} - -// CalculateUptime provides a mock function with given fields: nodeID -func (_m *Calculator) CalculateUptime(nodeID ids.NodeID) (time.Duration, time.Time, error) { - ret := _m.Called(nodeID) - - var r0 time.Duration - if rf, ok := ret.Get(0).(func(ids.NodeID) time.Duration); ok { - r0 = rf(nodeID) - } else { - r0 = ret.Get(0).(time.Duration) - } - - var r1 time.Time - if rf, ok := ret.Get(1).(func(ids.NodeID) time.Time); ok { - r1 = rf(nodeID) - } else { - r1 = ret.Get(1).(time.Time) - } - - var r2 error - if rf, ok := ret.Get(2).(func(ids.NodeID) error); ok { - r2 = rf(nodeID) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// CalculateUptimePercent provides a mock function with given fields: nodeID -func (_m *Calculator) CalculateUptimePercent(nodeID ids.NodeID) (float64, error) { - ret := _m.Called(nodeID) - - var r0 float64 - if rf, ok := ret.Get(0).(func(ids.NodeID) float64); ok { - r0 = rf(nodeID) - } else { - r0 = ret.Get(0).(float64) - } - - var r1 error - if rf, ok := ret.Get(1).(func(ids.NodeID) error); ok { - r1 = rf(nodeID) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CalculateUptimePercentFrom provides a mock function with given fields: nodeID, startTime -func (_m *Calculator) CalculateUptimePercentFrom(nodeID ids.NodeID, startTime time.Time) (float64, error) { - ret := _m.Called(nodeID, startTime) - - var r0 float64 - if rf, ok := ret.Get(0).(func(ids.NodeID, time.Time) float64); ok { - r0 = rf(nodeID, startTime) - } else { - r0 = ret.Get(0).(float64) - } - - var r1 error - if rf, ok := ret.Get(1).(func(ids.NodeID, time.Time) error); ok { - r1 = rf(nodeID, startTime) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/avalanchego/snow/uptime/no_op_calculator.go b/avalanchego/snow/uptime/no_op_calculator.go new file mode 100644 index 00000000..44c688e3 --- /dev/null +++ b/avalanchego/snow/uptime/no_op_calculator.go @@ -0,0 +1,26 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package uptime + +import ( + "time" + + "github.com/ava-labs/avalanchego/ids" +) + +var NoOpCalculator Calculator = noOpCalculator{} + +type noOpCalculator struct{} + +func (noOpCalculator) CalculateUptime(ids.NodeID, ids.ID) (time.Duration, time.Time, error) { + return 0, time.Time{}, nil +} + +func (noOpCalculator) CalculateUptimePercent(ids.NodeID, ids.ID) (float64, error) { + return 0, nil +} + +func (noOpCalculator) CalculateUptimePercentFrom(ids.NodeID, ids.ID, time.Time) (float64, error) { + return 0, nil +} diff --git a/avalanchego/snow/uptime/state.go b/avalanchego/snow/uptime/state.go index 5b5c8ad4..5b2592ac 100644 --- a/avalanchego/snow/uptime/state.go +++ b/avalanchego/snow/uptime/state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package uptime @@ -10,10 +10,34 @@ import ( ) type State interface { - // GetUptime returns [upDuration] and [lastUpdated] to be truncated (floored) to the nearest second - GetUptime(nodeID ids.NodeID) (upDuration time.Duration, lastUpdated time.Time, err error) - // SetUptime expects [upDuration] and [lastUpdated] to be truncated (floored) to the nearest second - SetUptime(nodeID ids.NodeID, upDuration time.Duration, lastUpdated time.Time) error - // GetStartTime returns [startTime] truncated (floored) to the nearest second - GetStartTime(nodeID ids.NodeID) (startTime time.Time, err error) + // GetUptime returns [upDuration] and [lastUpdated] of [nodeID] on + // [subnetID]. + // Returns [database.ErrNotFound] if [nodeID] isn't currently a validator of + // the subnet. + GetUptime( + nodeID ids.NodeID, + subnetID ids.ID, + ) (upDuration time.Duration, lastUpdated time.Time, err error) + + // SetUptime updates [upDuration] and [lastUpdated] of [nodeID] on + // [subnetID]. + // Returns [database.ErrNotFound] if [nodeID] isn't currently a validator of + // the subnet. + // Invariant: expects [lastUpdated] to be truncated (floored) to the nearest + // second. + SetUptime( + nodeID ids.NodeID, + subnetID ids.ID, + upDuration time.Duration, + lastUpdated time.Time, + ) error + + // GetStartTime returns the time that [nodeID] started validating + // [subnetID]. + // Returns [database.ErrNotFound] if [nodeID] isn't currently a validator of + // the subnet. + GetStartTime( + nodeID ids.NodeID, + subnetID ids.ID, + ) (startTime time.Time, err error) } diff --git a/avalanchego/snow/uptime/test_state.go b/avalanchego/snow/uptime/test_state.go index 32a6da41..58687e16 100644 --- a/avalanchego/snow/uptime/test_state.go +++ b/avalanchego/snow/uptime/test_state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package uptime @@ -10,7 +10,7 @@ import ( "github.com/ava-labs/avalanchego/ids" ) -var _ State = &TestState{} +var _ State = (*TestState)(nil) type uptime struct { upDuration time.Duration @@ -21,33 +21,38 @@ type uptime struct { type TestState struct { dbReadError error dbWriteError error - nodes map[ids.NodeID]*uptime + nodes map[ids.NodeID]map[ids.ID]*uptime } func NewTestState() *TestState { return &TestState{ - nodes: make(map[ids.NodeID]*uptime), + nodes: make(map[ids.NodeID]map[ids.ID]*uptime), } } -func (s *TestState) AddNode(nodeID ids.NodeID, startTime time.Time) { +func (s *TestState) AddNode(nodeID ids.NodeID, subnetID ids.ID, startTime time.Time) { + subnetUptimes, ok := s.nodes[nodeID] + if !ok { + subnetUptimes = make(map[ids.ID]*uptime) + s.nodes[nodeID] = subnetUptimes + } st := time.Unix(startTime.Unix(), 0) - s.nodes[nodeID] = &uptime{ + subnetUptimes[subnetID] = &uptime{ lastUpdated: st, startTime: st, } } -func (s *TestState) GetUptime(nodeID ids.NodeID) (time.Duration, time.Time, error) { - up, exists := s.nodes[nodeID] +func (s *TestState) GetUptime(nodeID ids.NodeID, subnetID ids.ID) (time.Duration, time.Time, error) { + up, exists := s.nodes[nodeID][subnetID] if !exists { return 0, time.Time{}, database.ErrNotFound } return up.upDuration, up.lastUpdated, s.dbReadError } -func (s *TestState) SetUptime(nodeID ids.NodeID, upDuration time.Duration, lastUpdated time.Time) error { - up, exists := s.nodes[nodeID] +func (s *TestState) SetUptime(nodeID ids.NodeID, subnetID ids.ID, upDuration time.Duration, lastUpdated time.Time) error { + up, exists := s.nodes[nodeID][subnetID] if !exists { return database.ErrNotFound } @@ -56,8 +61,8 @@ func (s *TestState) SetUptime(nodeID ids.NodeID, upDuration time.Duration, lastU return s.dbWriteError } -func (s *TestState) GetStartTime(nodeID ids.NodeID) (time.Time, error) { - up, exists := s.nodes[nodeID] +func (s *TestState) GetStartTime(nodeID ids.NodeID, subnetID ids.ID) (time.Time, error) { + up, exists := s.nodes[nodeID][subnetID] if !exists { return time.Time{}, database.ErrNotFound } diff --git a/avalanchego/snow/validators/connector.go b/avalanchego/snow/validators/connector.go index 4a1d78cd..abb28d08 100644 --- a/avalanchego/snow/validators/connector.go +++ b/avalanchego/snow/validators/connector.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators import ( + "context" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/version" ) @@ -11,6 +13,10 @@ import ( // Connector represents a handler that is called when a connection is marked as // connected or disconnected type Connector interface { - Connected(id ids.NodeID, nodeVersion *version.Application) error - Disconnected(id ids.NodeID) error + Connected( + ctx context.Context, + nodeID ids.NodeID, + nodeVersion *version.Application, + ) error + Disconnected(ctx context.Context, nodeID ids.NodeID) error } diff --git a/avalanchego/snow/validators/custom.go b/avalanchego/snow/validators/custom.go deleted file mode 100644 index 7bc98aa2..00000000 --- a/avalanchego/snow/validators/custom.go +++ /dev/null @@ -1,192 +0,0 @@ -package validators - -import ( - "errors" - "fmt" - "os" - "strings" - "time" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/constants" -) - -const ( - songbirdValidatorWeight = 50_000 - costonValidatorWeight = 200_000 - customValidatorWeight = 200_000 - customValidatorEnv = "CUSTOM_VALIDATORS" - customValidatorExpEnv = "CUSTOM_VALIDATORS_EXPIRATION" -) - -var ( - // Set dates before release - songbirdValidatorsExpTime = time.Date(10000, time.January, 1, 0, 0, 0, 0, time.UTC) - costonValidatorsExpTime = time.Date(10000, time.January, 1, 0, 0, 0, 0, time.UTC) - customValidatorsExpTime = time.Date(10000, time.January, 1, 0, 0, 0, 0, time.UTC) -) - -var ( - defaultValidators = defaultValidatorSet{} - errNotInitialized = errors.New("default validator set not initialized") -) - -func DefaultValidatorList() []Validator { - return defaultValidators.list() -} - -func IsDefaultValidator(vdrID ids.NodeID) bool { - return defaultValidators.isValidator(vdrID) -} - -func InitializeDefaultValidators(networkID uint32, timestamp time.Time) { - defaultValidators.initialize(networkID, timestamp) -} - -func ExpiredDefaultValidators(networkID uint32, timestamp time.Time) []Validator { - return defaultValidators.expiredValidators(networkID, timestamp) -} - -type defaultValidatorSet struct { - initialized bool - vdrMap map[ids.NodeID]Validator -} - -func (dvs *defaultValidatorSet) initialize(networkID uint32, timestamp time.Time) { - if dvs.initialized { - return - } - - var vdrs []Validator - switch networkID { - case constants.LocalID: - vdrs = loadCustomValidators(timestamp) - case constants.SongbirdID: - vdrs = loadSongbirdValidators(timestamp) - case constants.CostonID: - vdrs = loadCostonValidators(timestamp) - } - dvs.vdrMap = make(map[ids.NodeID]Validator) - for _, vdr := range vdrs { - dvs.vdrMap[vdr.ID()] = vdr - } - dvs.initialized = true -} - -func (dvs *defaultValidatorSet) expiredValidators(networkID uint32, timestamp time.Time) []Validator { - if !dvs.initialized { - panic(errNotInitialized) - } - - switch networkID { - case constants.LocalID: - if !timestamp.Before(customValidatorsExpTime) { - return dvs.list() - } - case constants.SongbirdID: - if !timestamp.Before(songbirdValidatorsExpTime) { - return dvs.list() - } - case constants.CostonID: - if !timestamp.Before(costonValidatorsExpTime) { - return dvs.list() - } - } - return nil -} - -func (dvs *defaultValidatorSet) list() []Validator { - if !dvs.initialized { - panic(errNotInitialized) - } - vdrs := make([]Validator, 0, len(dvs.vdrMap)) - for _, vdr := range dvs.vdrMap { - vdrs = append(vdrs, vdr) - } - return vdrs -} - -func (dvs *defaultValidatorSet) isValidator(vdrID ids.NodeID) bool { - if !dvs.initialized { - panic(errNotInitialized) - } - _, ok := dvs.vdrMap[vdrID] - return ok -} - -func loadCustomValidators(timestamp time.Time) []Validator { - customValidatorList := os.Getenv(customValidatorEnv) - customValidatorExpString := os.Getenv(customValidatorExpEnv) - if len(customValidatorExpString) > 0 { - if t, err := time.Parse(time.RFC3339, customValidatorExpString); err == nil { - customValidatorsExpTime = t - } - // Ignore if error occurs, use default expiration time - } - if !timestamp.Before(customValidatorsExpTime) { - return nil - } - nodeIDs := strings.Split(customValidatorList, ",") - return createValidators(nodeIDs, uint64(customValidatorWeight)) -} - -func loadCostonValidators(timestamp time.Time) []Validator { - if !timestamp.Before(costonValidatorsExpTime) { - return nil - } - nodeIDs := []string{ - "NodeID-5dDZXn99LCkDoEi6t9gTitZuQmhokxQTc", - "NodeID-EkH8wyEshzEQBToAdR7Fexxcj9rrmEEHZ", - "NodeID-FPAwqHjs8Mw8Cuki5bkm3vSVisZr8t2Lu", - "NodeID-AQghDJTU3zuQj73itPtfTZz6CxsTQVD3R", - "NodeID-HaZ4HpanjndqSuN252chFsTysmdND5meA", - } - return createValidators(nodeIDs, uint64(costonValidatorWeight)) -} - -func loadSongbirdValidators(timestamp time.Time) []Validator { - if !timestamp.Before(songbirdValidatorsExpTime) { - return nil - } - nodeIDs := []string{ - "NodeID-3M9KVT6ixi4gVMisbm5TnPXYXgFN5LHuv", - "NodeID-NnX4fajAmyvpL9RLfheNdc47FKKDuQW8i", - "NodeID-AzdF8JNU468uwZYGquHt7bhDrsggZpK67", - "NodeID-FqeGcnLAXbDTthd382aP9uyu1i47paRRh", - "NodeID-B9HuZ5hDkRodyRRsiMEHWgMmmMF7xSKbj", - "NodeID-Jx3E1F7mfkseZmqnFgDUFV3eusMxVdT6Z", - "NodeID-FnvWuwvJGezs4uaBLujkfeM8U3gmAUY3Z", - "NodeID-LhVs6hzHjBcEkzA1Eu8Qxb9nEQAk1Qbgf", - "NodeID-9SqDo3MxpvEDN4bE4rLTyM7HkkKAw4h96", - "NodeID-4tStYRTi3KDxFmv1YHTZAQxbzeyMA7z52", - "NodeID-8XnMh17zo6pB8Pa2zptRBi9TbbMZgij2t", - "NodeID-Cn9P5wgg7d9RNLqm4dFLCUV2diCxpkj7f", - "NodeID-PEDdah7g7Efiii1xw8ex2dH58oMfByzjb", - "NodeID-QCt9AxMPt5nn445CQGoA3yktqkChnKmPY", - "NodeID-9bWz6J61B8WbQtzeSyA1jsXosyVbuUJd1", - "NodeID-DLMnewsEwtSH8Qk7p9RGzUVyZAaZVMKsk", - "NodeID-7meEpyjmGbL577th58dm4nvvtVZiJusFp", - "NodeID-JeYnnrUkuArAAe2Sjo47Z3X5yfeF7cw43", - "NodeID-Fdwp9Wtjh5rxzuTCF9z4zrSM31y7ZzBQS", - "NodeID-JdEBRLS98PansyFKQUzFKqk4xqrVZ41nC", - } - return createValidators(nodeIDs, uint64(songbirdValidatorWeight)) -} - -func createValidators(nodeIDs []string, weight uint64) (vdrs []Validator) { - for _, nodeID := range nodeIDs { - if nodeID == "" { - continue - } - - shortID, err := ids.ShortFromPrefixedString(nodeID, ids.NodeIDPrefix) - if err != nil { - panic(fmt.Sprintf("invalid validator node ID: %s", nodeID)) - } - vdrs = append(vdrs, &validator{ - nodeID: ids.NodeID(shortID), - weight: weight, - }) - } - return -} diff --git a/avalanchego/snow/validators/custom_test.go b/avalanchego/snow/validators/custom_test.go deleted file mode 100644 index 0b2b1370..00000000 --- a/avalanchego/snow/validators/custom_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package validators - -import ( - "testing" - "time" - - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/stretchr/testify/require" -) - -func TestValidatorsBeforeExpiration(t *testing.T) { - songbirdValidatorsExpTime = time.Date(2024, time.February, 1, 0, 0, 0, 0, time.UTC) - - vs := defaultValidatorSet{} - vs.initialize(constants.SongbirdID, time.Date(2024, time.January, 1, 0, 0, 0, 0, time.UTC)) - - vds := vs.list() - require.Len(t, vds, 20) - - expVdrs := vs.expiredValidators(constants.SongbirdID, time.Date(2024, time.February, 2, 0, 0, 0, 0, time.UTC)) - require.Len(t, expVdrs, 20) -} - -func TestValidatorsAfterExpiration(t *testing.T) { - songbirdValidatorsExpTime = time.Date(2024, time.February, 1, 0, 0, 0, 0, time.UTC) - - vs := defaultValidatorSet{} - vs.initialize(constants.SongbirdID, time.Date(2024, time.March, 1, 0, 0, 0, 0, time.UTC)) - - vds := vs.list() - require.Len(t, vds, 0) -} diff --git a/avalanchego/snow/validators/gvalidators/validator_state_client.go b/avalanchego/snow/validators/gvalidators/validator_state_client.go new file mode 100644 index 00000000..4760212a --- /dev/null +++ b/avalanchego/snow/validators/gvalidators/validator_state_client.go @@ -0,0 +1,87 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gvalidators + +import ( + "context" + + "google.golang.org/protobuf/types/known/emptypb" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + + pb "github.com/ava-labs/avalanchego/proto/pb/validatorstate" +) + +var _ validators.State = (*Client)(nil) + +type Client struct { + client pb.ValidatorStateClient +} + +func NewClient(client pb.ValidatorStateClient) *Client { + return &Client{client: client} +} + +func (c *Client) GetMinimumHeight(ctx context.Context) (uint64, error) { + resp, err := c.client.GetMinimumHeight(ctx, &emptypb.Empty{}) + if err != nil { + return 0, err + } + return resp.Height, nil +} + +func (c *Client) GetCurrentHeight(ctx context.Context) (uint64, error) { + resp, err := c.client.GetCurrentHeight(ctx, &emptypb.Empty{}) + if err != nil { + return 0, err + } + return resp.Height, nil +} + +func (c *Client) GetSubnetID(ctx context.Context, chainID ids.ID) (ids.ID, error) { + resp, err := c.client.GetSubnetID(ctx, &pb.GetSubnetIDRequest{ + ChainId: chainID[:], + }) + if err != nil { + return ids.Empty, err + } + return ids.ToID(resp.SubnetId) +} + +func (c *Client) GetValidatorSet( + ctx context.Context, + height uint64, + subnetID ids.ID, +) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + resp, err := c.client.GetValidatorSet(ctx, &pb.GetValidatorSetRequest{ + Height: height, + SubnetId: subnetID[:], + }) + if err != nil { + return nil, err + } + + vdrs := make(map[ids.NodeID]*validators.GetValidatorOutput, len(resp.Validators)) + for _, validator := range resp.Validators { + nodeID, err := ids.ToNodeID(validator.NodeId) + if err != nil { + return nil, err + } + var publicKey *bls.PublicKey + if len(validator.PublicKey) > 0 { + publicKey, err = bls.PublicKeyFromBytes(validator.PublicKey) + if err != nil { + return nil, err + } + } + vdrs[nodeID] = &validators.GetValidatorOutput{ + NodeID: nodeID, + PublicKey: publicKey, + Weight: validator.Weight, + } + } + return vdrs, nil +} diff --git a/avalanchego/snow/validators/gvalidators/validator_state_server.go b/avalanchego/snow/validators/gvalidators/validator_state_server.go new file mode 100644 index 00000000..949022ca --- /dev/null +++ b/avalanchego/snow/validators/gvalidators/validator_state_server.go @@ -0,0 +1,79 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gvalidators + +import ( + "context" + + "google.golang.org/protobuf/types/known/emptypb" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + + pb "github.com/ava-labs/avalanchego/proto/pb/validatorstate" +) + +var _ pb.ValidatorStateServer = (*Server)(nil) + +type Server struct { + pb.UnsafeValidatorStateServer + state validators.State +} + +func NewServer(state validators.State) *Server { + return &Server{state: state} +} + +func (s *Server) GetMinimumHeight(ctx context.Context, _ *emptypb.Empty) (*pb.GetMinimumHeightResponse, error) { + height, err := s.state.GetMinimumHeight(ctx) + return &pb.GetMinimumHeightResponse{Height: height}, err +} + +func (s *Server) GetCurrentHeight(ctx context.Context, _ *emptypb.Empty) (*pb.GetCurrentHeightResponse, error) { + height, err := s.state.GetCurrentHeight(ctx) + return &pb.GetCurrentHeightResponse{Height: height}, err +} + +func (s *Server) GetSubnetID(ctx context.Context, req *pb.GetSubnetIDRequest) (*pb.GetSubnetIDResponse, error) { + chainID, err := ids.ToID(req.ChainId) + if err != nil { + return nil, err + } + + subnetID, err := s.state.GetSubnetID(ctx, chainID) + return &pb.GetSubnetIDResponse{ + SubnetId: subnetID[:], + }, err +} + +func (s *Server) GetValidatorSet(ctx context.Context, req *pb.GetValidatorSetRequest) (*pb.GetValidatorSetResponse, error) { + subnetID, err := ids.ToID(req.SubnetId) + if err != nil { + return nil, err + } + + vdrs, err := s.state.GetValidatorSet(ctx, req.Height, subnetID) + if err != nil { + return nil, err + } + + resp := &pb.GetValidatorSetResponse{ + Validators: make([]*pb.Validator, len(vdrs)), + } + + i := 0 + for _, vdr := range vdrs { + vdrPB := &pb.Validator{ + NodeId: vdr.NodeID[:], + Weight: vdr.Weight, + } + if vdr.PublicKey != nil { + vdrPB.PublicKey = bls.PublicKeyToBytes(vdr.PublicKey) + } + resp.Validators[i] = vdrPB + i++ + } + return resp, nil +} diff --git a/avalanchego/snow/validators/gvalidators/validator_state_test.go b/avalanchego/snow/validators/gvalidators/validator_state_test.go new file mode 100644 index 00000000..d9a6d409 --- /dev/null +++ b/avalanchego/snow/validators/gvalidators/validator_state_test.go @@ -0,0 +1,183 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gvalidators + +import ( + "context" + "errors" + "testing" + + "github.com/golang/mock/gomock" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/vms/rpcchainvm/grpcutils" + + pb "github.com/ava-labs/avalanchego/proto/pb/validatorstate" +) + +var errCustom = errors.New("custom") + +type testState struct { + client *Client + server *validators.MockState + closeFn func() +} + +func setupState(t testing.TB, ctrl *gomock.Controller) *testState { + t.Helper() + + state := &testState{ + server: validators.NewMockState(ctrl), + } + + listener, err := grpcutils.NewListener() + if err != nil { + t.Fatalf("Failed to create listener: %s", err) + } + serverCloser := grpcutils.ServerCloser{} + + server := grpcutils.NewServer() + pb.RegisterValidatorStateServer(server, NewServer(state.server)) + serverCloser.Add(server) + + go grpcutils.Serve(listener, server) + + conn, err := grpcutils.Dial(listener.Addr().String()) + if err != nil { + t.Fatalf("Failed to dial: %s", err) + } + + state.client = NewClient(pb.NewValidatorStateClient(conn)) + state.closeFn = func() { + serverCloser.Stop() + _ = conn.Close() + _ = listener.Close() + } + return state +} + +func TestGetMinimumHeight(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := setupState(t, ctrl) + defer state.closeFn() + + // Happy path + expectedHeight := uint64(1337) + state.server.EXPECT().GetMinimumHeight(gomock.Any()).Return(expectedHeight, nil) + + height, err := state.client.GetMinimumHeight(context.Background()) + require.NoError(err) + require.Equal(expectedHeight, height) + + // Error path + state.server.EXPECT().GetMinimumHeight(gomock.Any()).Return(expectedHeight, errCustom) + + _, err = state.client.GetMinimumHeight(context.Background()) + require.Error(err) +} + +func TestGetCurrentHeight(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := setupState(t, ctrl) + defer state.closeFn() + + // Happy path + expectedHeight := uint64(1337) + state.server.EXPECT().GetCurrentHeight(gomock.Any()).Return(expectedHeight, nil) + + height, err := state.client.GetCurrentHeight(context.Background()) + require.NoError(err) + require.Equal(expectedHeight, height) + + // Error path + state.server.EXPECT().GetCurrentHeight(gomock.Any()).Return(expectedHeight, errCustom) + + _, err = state.client.GetCurrentHeight(context.Background()) + require.Error(err) +} + +func TestGetSubnetID(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := setupState(t, ctrl) + defer state.closeFn() + + // Happy path + chainID := ids.GenerateTestID() + expectedSubnetID := ids.GenerateTestID() + state.server.EXPECT().GetSubnetID(gomock.Any(), chainID).Return(expectedSubnetID, nil) + + subnetID, err := state.client.GetSubnetID(context.Background(), chainID) + require.NoError(err) + require.Equal(expectedSubnetID, subnetID) + + // Error path + state.server.EXPECT().GetSubnetID(gomock.Any(), chainID).Return(expectedSubnetID, errCustom) + + _, err = state.client.GetSubnetID(context.Background(), chainID) + require.Error(err) +} + +func TestGetValidatorSet(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := setupState(t, ctrl) + defer state.closeFn() + + // Happy path + sk0, err := bls.NewSecretKey() + require.NoError(err) + vdr0 := &validators.GetValidatorOutput{ + NodeID: ids.GenerateTestNodeID(), + PublicKey: bls.PublicFromSecretKey(sk0), + Weight: 1, + } + + sk1, err := bls.NewSecretKey() + require.NoError(err) + vdr1 := &validators.GetValidatorOutput{ + NodeID: ids.GenerateTestNodeID(), + PublicKey: bls.PublicFromSecretKey(sk1), + Weight: 2, + } + + vdr2 := &validators.GetValidatorOutput{ + NodeID: ids.GenerateTestNodeID(), + PublicKey: nil, + Weight: 3, + } + + expectedVdrs := map[ids.NodeID]*validators.GetValidatorOutput{ + vdr0.NodeID: vdr0, + vdr1.NodeID: vdr1, + vdr2.NodeID: vdr2, + } + height := uint64(1337) + subnetID := ids.GenerateTestID() + state.server.EXPECT().GetValidatorSet(gomock.Any(), height, subnetID).Return(expectedVdrs, nil) + + vdrs, err := state.client.GetValidatorSet(context.Background(), height, subnetID) + require.NoError(err) + require.Equal(expectedVdrs, vdrs) + + // Error path + state.server.EXPECT().GetValidatorSet(gomock.Any(), height, subnetID).Return(expectedVdrs, errCustom) + + _, err = state.client.GetValidatorSet(context.Background(), height, subnetID) + require.Error(err) +} diff --git a/avalanchego/snow/validators/logger.go b/avalanchego/snow/validators/logger.go new file mode 100644 index 00000000..98d47d30 --- /dev/null +++ b/avalanchego/snow/validators/logger.go @@ -0,0 +1,91 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package validators + +import ( + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/types" +) + +var _ SetCallbackListener = (*logger)(nil) + +type logger struct { + log logging.Logger + enabled *utils.Atomic[bool] + subnetID ids.ID + nodeIDs set.Set[ids.NodeID] +} + +// NewLogger returns a callback listener that will log validator set changes for +// the specified validators +func NewLogger( + log logging.Logger, + enabled *utils.Atomic[bool], + subnetID ids.ID, + nodeIDs ...ids.NodeID, +) SetCallbackListener { + nodeIDSet := set.NewSet[ids.NodeID](len(nodeIDs)) + nodeIDSet.Add(nodeIDs...) + return &logger{ + log: log, + enabled: enabled, + subnetID: subnetID, + nodeIDs: nodeIDSet, + } +} + +func (l *logger) OnValidatorAdded( + nodeID ids.NodeID, + pk *bls.PublicKey, + txID ids.ID, + weight uint64, +) { + if l.enabled.Get() && l.nodeIDs.Contains(nodeID) { + var pkBytes []byte + if pk != nil { + pkBytes = bls.PublicKeyToBytes(pk) + } + l.log.Info("node added to validator set", + zap.Stringer("subnetID", l.subnetID), + zap.Stringer("nodeID", nodeID), + zap.Reflect("publicKey", types.JSONByteSlice(pkBytes)), + zap.Stringer("txID", txID), + zap.Uint64("weight", weight), + ) + } +} + +func (l *logger) OnValidatorRemoved( + nodeID ids.NodeID, + weight uint64, +) { + if l.enabled.Get() && l.nodeIDs.Contains(nodeID) { + l.log.Info("node removed from validator set", + zap.Stringer("subnetID", l.subnetID), + zap.Stringer("nodeID", nodeID), + zap.Uint64("weight", weight), + ) + } +} + +func (l *logger) OnValidatorWeightChanged( + nodeID ids.NodeID, + oldWeight uint64, + newWeight uint64, +) { + if l.enabled.Get() && l.nodeIDs.Contains(nodeID) { + l.log.Info("validator weight changed", + zap.Stringer("subnetID", l.subnetID), + zap.Stringer("nodeID", nodeID), + zap.Uint64("previousWeight ", oldWeight), + zap.Uint64("newWeight ", newWeight), + ) + } +} diff --git a/avalanchego/snow/validators/manager.go b/avalanchego/snow/validators/manager.go index 45e91006..0d0bc563 100644 --- a/avalanchego/snow/validators/manager.go +++ b/avalanchego/snow/validators/manager.go @@ -1,45 +1,40 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators import ( + "errors" "fmt" "strings" "sync" + "golang.org/x/exp/maps" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/crypto/bls" ) -var _ Manager = &manager{} +var ( + _ Manager = (*manager)(nil) + + errMissingValidators = errors.New("missing validators") +) // Manager holds the validator set of each subnet type Manager interface { fmt.Stringer - // Set a subnet's validator set - Set(ids.ID, Set) error - - // AddWeight adds weight to a given validator on the given subnet - AddWeight(ids.ID, ids.NodeID, uint64) error + // Add a subnet's validator set to the manager. + // + // If the subnet had previously registered a validator set, false will be + // returned and the manager will not be modified. + Add(subnetID ids.ID, set Set) bool - // RemoveWeight removes weight from a given validator on a given subnet - RemoveWeight(ids.ID, ids.NodeID, uint64) error - - // GetValidators returns the validator set for the given subnet + // Get returns the validator set for the given subnet // Returns false if the subnet doesn't exist - GetValidators(ids.ID) (Set, bool) - - // MaskValidator hides the named validator from future samplings - MaskValidator(ids.NodeID) error - - // RevealValidator ensures the named validator is not hidden from future - // samplings - RevealValidator(ids.NodeID) error - - // Contains returns true if there is a validator with the specified ID - // currently in the set. - Contains(ids.ID, ids.NodeID) bool + Get(ids.ID) (Set, bool) } // NewManager returns a new, empty manager @@ -55,50 +50,21 @@ type manager struct { // Key: Subnet ID // Value: The validators that validate the subnet subnetToVdrs map[ids.ID]Set - - maskedVdrs ids.NodeIDSet } -func (m *manager) Set(subnetID ids.ID, newSet Set) error { +func (m *manager) Add(subnetID ids.ID, set Set) bool { m.lock.Lock() defer m.lock.Unlock() - oldSet, exists := m.subnetToVdrs[subnetID] - if !exists { - m.subnetToVdrs[subnetID] = newSet - return nil + if _, exists := m.subnetToVdrs[subnetID]; exists { + return false } - return oldSet.Set(newSet.List()) -} - -func (m *manager) AddWeight(subnetID ids.ID, vdrID ids.NodeID, weight uint64) error { - m.lock.Lock() - defer m.lock.Unlock() - vdrs, ok := m.subnetToVdrs[subnetID] - if !ok { - vdrs = NewSet() - for _, maskedVdrID := range m.maskedVdrs.List() { - if err := vdrs.MaskValidator(maskedVdrID); err != nil { - return err - } - } - m.subnetToVdrs[subnetID] = vdrs - } - return vdrs.AddWeight(vdrID, weight) + m.subnetToVdrs[subnetID] = set + return true } -func (m *manager) RemoveWeight(subnetID ids.ID, vdrID ids.NodeID, weight uint64) error { - m.lock.Lock() - defer m.lock.Unlock() - - if vdrs, ok := m.subnetToVdrs[subnetID]; ok { - return vdrs.RemoveWeight(vdrID, weight) - } - return nil -} - -func (m *manager) GetValidators(subnetID ids.ID) (Set, bool) { +func (m *manager) Get(subnetID ids.ID) (Set, bool) { m.lock.RLock() defer m.lock.RUnlock() @@ -106,60 +72,12 @@ func (m *manager) GetValidators(subnetID ids.ID) (Set, bool) { return vdrs, ok } -func (m *manager) MaskValidator(vdrID ids.NodeID) error { - m.lock.Lock() - defer m.lock.Unlock() - - if m.maskedVdrs.Contains(vdrID) { - return nil - } - m.maskedVdrs.Add(vdrID) - - for _, vdrs := range m.subnetToVdrs { - if err := vdrs.MaskValidator(vdrID); err != nil { - return err - } - } - return nil -} - -func (m *manager) RevealValidator(vdrID ids.NodeID) error { - m.lock.Lock() - defer m.lock.Unlock() - - if !m.maskedVdrs.Contains(vdrID) { - return nil - } - m.maskedVdrs.Remove(vdrID) - - for _, vdrs := range m.subnetToVdrs { - if err := vdrs.RevealValidator(vdrID); err != nil { - return err - } - } - return nil -} - -func (m *manager) Contains(subnetID ids.ID, vdrID ids.NodeID) bool { - m.lock.RLock() - defer m.lock.RUnlock() - - vdrs, ok := m.subnetToVdrs[subnetID] - if ok { - return vdrs.Contains(vdrID) - } - return false -} - func (m *manager) String() string { m.lock.RLock() defer m.lock.RUnlock() - subnets := make([]ids.ID, 0, len(m.subnetToVdrs)) - for subnetID := range m.subnetToVdrs { - subnets = append(subnets, subnetID) - } - ids.SortIDs(subnets) + subnets := maps.Keys(m.subnetToVdrs) + utils.Sort(subnets) sb := strings.Builder{} @@ -177,3 +95,53 @@ func (m *manager) String() string { return sb.String() } + +// Add is a helper that fetches the validator set of [subnetID] from [m] and +// adds [nodeID] to the validator set. +// Returns an error if: +// - [subnetID] does not have a registered validator set in [m] +// - adding [nodeID] to the validator set returns an error +func Add(m Manager, subnetID ids.ID, nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) error { + vdrs, ok := m.Get(subnetID) + if !ok { + return fmt.Errorf("%w: %s", errMissingValidators, subnetID) + } + return vdrs.Add(nodeID, pk, txID, weight) +} + +// AddWeight is a helper that fetches the validator set of [subnetID] from [m] +// and adds [weight] to [nodeID] in the validator set. +// Returns an error if: +// - [subnetID] does not have a registered validator set in [m] +// - adding [weight] to [nodeID] in the validator set returns an error +func AddWeight(m Manager, subnetID ids.ID, nodeID ids.NodeID, weight uint64) error { + vdrs, ok := m.Get(subnetID) + if !ok { + return fmt.Errorf("%w: %s", errMissingValidators, subnetID) + } + return vdrs.AddWeight(nodeID, weight) +} + +// RemoveWeight is a helper that fetches the validator set of [subnetID] from +// [m] and removes [weight] from [nodeID] in the validator set. +// Returns an error if: +// - [subnetID] does not have a registered validator set in [m] +// - removing [weight] from [nodeID] in the validator set returns an error +func RemoveWeight(m Manager, subnetID ids.ID, nodeID ids.NodeID, weight uint64) error { + vdrs, ok := m.Get(subnetID) + if !ok { + return fmt.Errorf("%w: %s", errMissingValidators, subnetID) + } + return vdrs.RemoveWeight(nodeID, weight) +} + +// Contains is a helper that fetches the validator set of [subnetID] from [m] +// and returns if the validator set contains [nodeID]. If [m] does not contain a +// validator set for [subnetID], false is returned. +func Contains(m Manager, subnetID ids.ID, nodeID ids.NodeID) bool { + vdrs, ok := m.Get(subnetID) + if !ok { + return false + } + return vdrs.Contains(nodeID) +} diff --git a/avalanchego/snow/validators/manager_test.go b/avalanchego/snow/validators/manager_test.go new file mode 100644 index 00000000..433033ca --- /dev/null +++ b/avalanchego/snow/validators/manager_test.go @@ -0,0 +1,120 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package validators + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" +) + +func TestAdd(t *testing.T) { + require := require.New(t) + + m := NewManager() + + subnetID := ids.GenerateTestID() + nodeID := ids.GenerateTestNodeID() + + err := Add(m, subnetID, nodeID, nil, ids.Empty, 1) + require.ErrorIs(err, errMissingValidators) + + s := NewSet() + m.Add(subnetID, s) + + err = Add(m, subnetID, nodeID, nil, ids.Empty, 1) + require.NoError(err) + + weight := s.Weight() + require.EqualValues(1, weight) +} + +func TestAddWeight(t *testing.T) { + require := require.New(t) + + m := NewManager() + + subnetID := ids.GenerateTestID() + nodeID := ids.GenerateTestNodeID() + + err := AddWeight(m, subnetID, nodeID, 1) + require.ErrorIs(err, errMissingValidators) + + s := NewSet() + m.Add(subnetID, s) + + err = AddWeight(m, subnetID, nodeID, 1) + require.ErrorIs(err, errMissingValidator) + + err = Add(m, subnetID, nodeID, nil, ids.Empty, 1) + require.NoError(err) + + err = AddWeight(m, subnetID, nodeID, 1) + require.NoError(err) + + weight := s.Weight() + require.EqualValues(2, weight) +} + +func TestRemoveWeight(t *testing.T) { + require := require.New(t) + + m := NewManager() + + subnetID := ids.GenerateTestID() + nodeID := ids.GenerateTestNodeID() + + err := RemoveWeight(m, subnetID, nodeID, 1) + require.ErrorIs(err, errMissingValidators) + + s := NewSet() + m.Add(subnetID, s) + + err = Add(m, subnetID, nodeID, nil, ids.Empty, 2) + require.NoError(err) + + err = RemoveWeight(m, subnetID, nodeID, 1) + require.NoError(err) + + weight := s.Weight() + require.EqualValues(1, weight) + + err = RemoveWeight(m, subnetID, nodeID, 1) + require.NoError(err) + + weight = s.Weight() + require.Zero(weight) +} + +func TestContains(t *testing.T) { + require := require.New(t) + + m := NewManager() + + subnetID := ids.GenerateTestID() + nodeID := ids.GenerateTestNodeID() + + contains := Contains(m, subnetID, nodeID) + require.False(contains) + + s := NewSet() + m.Add(subnetID, s) + + contains = Contains(m, subnetID, nodeID) + require.False(contains) + + err := Add(m, subnetID, nodeID, nil, ids.Empty, 1) + require.NoError(err) + + contains = Contains(m, subnetID, nodeID) + require.True(contains) + + err = RemoveWeight(m, subnetID, nodeID, 1) + require.NoError(err) + + contains = Contains(m, subnetID, nodeID) + require.False(contains) +} diff --git a/avalanchego/snow/validators/mock_manager.go b/avalanchego/snow/validators/mock_manager.go new file mode 100644 index 00000000..ef92abc2 --- /dev/null +++ b/avalanchego/snow/validators/mock_manager.go @@ -0,0 +1,81 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/snow/validators (interfaces: Manager) + +// Package validators is a generated GoMock package. +package validators + +import ( + reflect "reflect" + + ids "github.com/ava-labs/avalanchego/ids" + gomock "github.com/golang/mock/gomock" +) + +// MockManager is a mock of Manager interface. +type MockManager struct { + ctrl *gomock.Controller + recorder *MockManagerMockRecorder +} + +// MockManagerMockRecorder is the mock recorder for MockManager. +type MockManagerMockRecorder struct { + mock *MockManager +} + +// NewMockManager creates a new mock instance. +func NewMockManager(ctrl *gomock.Controller) *MockManager { + mock := &MockManager{ctrl: ctrl} + mock.recorder = &MockManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockManager) EXPECT() *MockManagerMockRecorder { + return m.recorder +} + +// Add mocks base method. +func (m *MockManager) Add(arg0 ids.ID, arg1 Set) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Add", arg0, arg1) + ret0, _ := ret[0].(bool) + return ret0 +} + +// Add indicates an expected call of Add. +func (mr *MockManagerMockRecorder) Add(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockManager)(nil).Add), arg0, arg1) +} + +// Get mocks base method. +func (m *MockManager) Get(arg0 ids.ID) (Set, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", arg0) + ret0, _ := ret[0].(Set) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockManagerMockRecorder) Get(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockManager)(nil).Get), arg0) +} + +// String mocks base method. +func (m *MockManager) String() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "String") + ret0, _ := ret[0].(string) + return ret0 +} + +// String indicates an expected call of String. +func (mr *MockManagerMockRecorder) String() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "String", reflect.TypeOf((*MockManager)(nil).String)) +} diff --git a/avalanchego/snow/validators/mock_set.go b/avalanchego/snow/validators/mock_set.go new file mode 100644 index 00000000..29b7fbcd --- /dev/null +++ b/avalanchego/snow/validators/mock_set.go @@ -0,0 +1,236 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/snow/validators (interfaces: Set) + +// Package validators is a generated GoMock package. +package validators + +import ( + reflect "reflect" + + ids "github.com/ava-labs/avalanchego/ids" + bls "github.com/ava-labs/avalanchego/utils/crypto/bls" + set "github.com/ava-labs/avalanchego/utils/set" + gomock "github.com/golang/mock/gomock" +) + +// MockSet is a mock of Set interface. +type MockSet struct { + ctrl *gomock.Controller + recorder *MockSetMockRecorder +} + +// MockSetMockRecorder is the mock recorder for MockSet. +type MockSetMockRecorder struct { + mock *MockSet +} + +// NewMockSet creates a new mock instance. +func NewMockSet(ctrl *gomock.Controller) *MockSet { + mock := &MockSet{ctrl: ctrl} + mock.recorder = &MockSetMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSet) EXPECT() *MockSetMockRecorder { + return m.recorder +} + +// Add mocks base method. +func (m *MockSet) Add(arg0 ids.NodeID, arg1 *bls.PublicKey, arg2 ids.ID, arg3 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Add", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// Add indicates an expected call of Add. +func (mr *MockSetMockRecorder) Add(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockSet)(nil).Add), arg0, arg1, arg2, arg3) +} + +// AddWeight mocks base method. +func (m *MockSet) AddWeight(arg0 ids.NodeID, arg1 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddWeight", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// AddWeight indicates an expected call of AddWeight. +func (mr *MockSetMockRecorder) AddWeight(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddWeight", reflect.TypeOf((*MockSet)(nil).AddWeight), arg0, arg1) +} + +// Contains mocks base method. +func (m *MockSet) Contains(arg0 ids.NodeID) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Contains", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// Contains indicates an expected call of Contains. +func (mr *MockSetMockRecorder) Contains(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Contains", reflect.TypeOf((*MockSet)(nil).Contains), arg0) +} + +// Get mocks base method. +func (m *MockSet) Get(arg0 ids.NodeID) (*Validator, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", arg0) + ret0, _ := ret[0].(*Validator) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// Get indicates an expected call of Get. +func (mr *MockSetMockRecorder) Get(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockSet)(nil).Get), arg0) +} + +// GetWeight mocks base method. +func (m *MockSet) GetWeight(arg0 ids.NodeID) uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetWeight", arg0) + ret0, _ := ret[0].(uint64) + return ret0 +} + +// GetWeight indicates an expected call of GetWeight. +func (mr *MockSetMockRecorder) GetWeight(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetWeight", reflect.TypeOf((*MockSet)(nil).GetWeight), arg0) +} + +// Len mocks base method. +func (m *MockSet) Len() int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Len") + ret0, _ := ret[0].(int) + return ret0 +} + +// Len indicates an expected call of Len. +func (mr *MockSetMockRecorder) Len() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Len", reflect.TypeOf((*MockSet)(nil).Len)) +} + +// List mocks base method. +func (m *MockSet) List() []*Validator { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "List") + ret0, _ := ret[0].([]*Validator) + return ret0 +} + +// List indicates an expected call of List. +func (mr *MockSetMockRecorder) List() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockSet)(nil).List)) +} + +// PrefixedString mocks base method. +func (m *MockSet) PrefixedString(arg0 string) string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PrefixedString", arg0) + ret0, _ := ret[0].(string) + return ret0 +} + +// PrefixedString indicates an expected call of PrefixedString. +func (mr *MockSetMockRecorder) PrefixedString(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrefixedString", reflect.TypeOf((*MockSet)(nil).PrefixedString), arg0) +} + +// RegisterCallbackListener mocks base method. +func (m *MockSet) RegisterCallbackListener(arg0 SetCallbackListener) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RegisterCallbackListener", arg0) +} + +// RegisterCallbackListener indicates an expected call of RegisterCallbackListener. +func (mr *MockSetMockRecorder) RegisterCallbackListener(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterCallbackListener", reflect.TypeOf((*MockSet)(nil).RegisterCallbackListener), arg0) +} + +// RemoveWeight mocks base method. +func (m *MockSet) RemoveWeight(arg0 ids.NodeID, arg1 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveWeight", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveWeight indicates an expected call of RemoveWeight. +func (mr *MockSetMockRecorder) RemoveWeight(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveWeight", reflect.TypeOf((*MockSet)(nil).RemoveWeight), arg0, arg1) +} + +// Sample mocks base method. +func (m *MockSet) Sample(arg0 int) ([]ids.NodeID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Sample", arg0) + ret0, _ := ret[0].([]ids.NodeID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Sample indicates an expected call of Sample. +func (mr *MockSetMockRecorder) Sample(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Sample", reflect.TypeOf((*MockSet)(nil).Sample), arg0) +} + +// String mocks base method. +func (m *MockSet) String() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "String") + ret0, _ := ret[0].(string) + return ret0 +} + +// String indicates an expected call of String. +func (mr *MockSetMockRecorder) String() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "String", reflect.TypeOf((*MockSet)(nil).String)) +} + +// SubsetWeight mocks base method. +func (m *MockSet) SubsetWeight(arg0 set.Set[ids.NodeID]) uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubsetWeight", arg0) + ret0, _ := ret[0].(uint64) + return ret0 +} + +// SubsetWeight indicates an expected call of SubsetWeight. +func (mr *MockSetMockRecorder) SubsetWeight(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubsetWeight", reflect.TypeOf((*MockSet)(nil).SubsetWeight), arg0) +} + +// Weight mocks base method. +func (m *MockSet) Weight() uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Weight") + ret0, _ := ret[0].(uint64) + return ret0 +} + +// Weight indicates an expected call of Weight. +func (mr *MockSetMockRecorder) Weight() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Weight", reflect.TypeOf((*MockSet)(nil).Weight)) +} diff --git a/avalanchego/snow/validators/mock_state.go b/avalanchego/snow/validators/mock_state.go new file mode 100644 index 00000000..6efc0896 --- /dev/null +++ b/avalanchego/snow/validators/mock_state.go @@ -0,0 +1,99 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/snow/validators (interfaces: State) + +// Package validators is a generated GoMock package. +package validators + +import ( + context "context" + reflect "reflect" + + ids "github.com/ava-labs/avalanchego/ids" + gomock "github.com/golang/mock/gomock" +) + +// MockState is a mock of State interface. +type MockState struct { + ctrl *gomock.Controller + recorder *MockStateMockRecorder +} + +// MockStateMockRecorder is the mock recorder for MockState. +type MockStateMockRecorder struct { + mock *MockState +} + +// NewMockState creates a new mock instance. +func NewMockState(ctrl *gomock.Controller) *MockState { + mock := &MockState{ctrl: ctrl} + mock.recorder = &MockStateMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockState) EXPECT() *MockStateMockRecorder { + return m.recorder +} + +// GetCurrentHeight mocks base method. +func (m *MockState) GetCurrentHeight(arg0 context.Context) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCurrentHeight", arg0) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCurrentHeight indicates an expected call of GetCurrentHeight. +func (mr *MockStateMockRecorder) GetCurrentHeight(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentHeight", reflect.TypeOf((*MockState)(nil).GetCurrentHeight), arg0) +} + +// GetMinimumHeight mocks base method. +func (m *MockState) GetMinimumHeight(arg0 context.Context) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetMinimumHeight", arg0) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetMinimumHeight indicates an expected call of GetMinimumHeight. +func (mr *MockStateMockRecorder) GetMinimumHeight(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetMinimumHeight", reflect.TypeOf((*MockState)(nil).GetMinimumHeight), arg0) +} + +// GetSubnetID mocks base method. +func (m *MockState) GetSubnetID(arg0 context.Context, arg1 ids.ID) (ids.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSubnetID", arg0, arg1) + ret0, _ := ret[0].(ids.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSubnetID indicates an expected call of GetSubnetID. +func (mr *MockStateMockRecorder) GetSubnetID(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetID", reflect.TypeOf((*MockState)(nil).GetSubnetID), arg0, arg1) +} + +// GetValidatorSet mocks base method. +func (m *MockState) GetValidatorSet(arg0 context.Context, arg1 uint64, arg2 ids.ID) (map[ids.NodeID]*GetValidatorOutput, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetValidatorSet", arg0, arg1, arg2) + ret0, _ := ret[0].(map[ids.NodeID]*GetValidatorOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetValidatorSet indicates an expected call of GetValidatorSet. +func (mr *MockStateMockRecorder) GetValidatorSet(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValidatorSet", reflect.TypeOf((*MockState)(nil).GetValidatorSet), arg0, arg1, arg2) +} diff --git a/avalanchego/snow/validators/mock_subnet_connector.go b/avalanchego/snow/validators/mock_subnet_connector.go new file mode 100644 index 00000000..2f8cd3bf --- /dev/null +++ b/avalanchego/snow/validators/mock_subnet_connector.go @@ -0,0 +1,53 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/snow/validators (interfaces: SubnetConnector) + +// Package validators is a generated GoMock package. +package validators + +import ( + context "context" + reflect "reflect" + + ids "github.com/ava-labs/avalanchego/ids" + gomock "github.com/golang/mock/gomock" +) + +// MockSubnetConnector is a mock of SubnetConnector interface. +type MockSubnetConnector struct { + ctrl *gomock.Controller + recorder *MockSubnetConnectorMockRecorder +} + +// MockSubnetConnectorMockRecorder is the mock recorder for MockSubnetConnector. +type MockSubnetConnectorMockRecorder struct { + mock *MockSubnetConnector +} + +// NewMockSubnetConnector creates a new mock instance. +func NewMockSubnetConnector(ctrl *gomock.Controller) *MockSubnetConnector { + mock := &MockSubnetConnector{ctrl: ctrl} + mock.recorder = &MockSubnetConnectorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockSubnetConnector) EXPECT() *MockSubnetConnectorMockRecorder { + return m.recorder +} + +// ConnectedSubnet mocks base method. +func (m *MockSubnetConnector) ConnectedSubnet(arg0 context.Context, arg1 ids.NodeID, arg2 ids.ID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ConnectedSubnet", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ConnectedSubnet indicates an expected call of ConnectedSubnet. +func (mr *MockSubnetConnectorMockRecorder) ConnectedSubnet(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConnectedSubnet", reflect.TypeOf((*MockSubnetConnector)(nil).ConnectedSubnet), arg0, arg1, arg2) +} diff --git a/avalanchego/snow/validators/set.go b/avalanchego/snow/validators/set.go index 49738708..0e243c49 100644 --- a/avalanchego/snow/validators/set.go +++ b/avalanchego/snow/validators/set.go @@ -1,53 +1,67 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators import ( + "errors" "fmt" "strings" "sync" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/formatting" + "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/sampler" - - safemath "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/set" ) -const ( - // If, when the validator set is reset, cap(set)/len(set) > MaxExcessCapacityFactor, - // the underlying arrays' capacities will be reduced by a factor of capacityReductionFactor. - // Higher value for maxExcessCapacityFactor --> less aggressive array downsizing --> less memory allocations - // but more unnecessary data in the underlying array that can't be garbage collected. - // Higher value for capacityReductionFactor --> more aggressive array downsizing --> more memory allocations - // but less unnecessary data in the underlying array that can't be garbage collected. - maxExcessCapacityFactor = 4 - capacityReductionFactor = 2 -) +var ( + _ Set = (*vdrSet)(nil) -var _ Set = &set{} + errZeroWeight = errors.New("weight must be non-zero") + errDuplicateValidator = errors.New("duplicate validator") + errMissingValidator = errors.New("missing validator") +) // Set of validators that can be sampled type Set interface { - fmt.Stringer - PrefixedString(string) string - - // Set removes all the current validators and adds all the provided - // validators to the set. - Set([]Validator) error - - // AddWeight to a staker. - AddWeight(ids.NodeID, uint64) error + formatting.PrefixedStringer + + // Add a new staker to the set. + // Returns an error if: + // - [weight] is 0 + // - [nodeID] is already in the validator set + // - the total weight of the validator set would overflow uint64 + // If an error is returned, the set will be unmodified. + Add(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) error + + // AddWeight to an existing staker. + // Returns an error if: + // - [weight] is 0 + // - [nodeID] is not already in the validator set + // - the total weight of the validator set would overflow uint64 + // If an error is returned, the set will be unmodified. + AddWeight(nodeID ids.NodeID, weight uint64) error // GetWeight retrieves the validator weight from the set. - GetWeight(ids.NodeID) (uint64, bool) + GetWeight(ids.NodeID) uint64 + + // Get returns the validator tied to the specified ID. + Get(ids.NodeID) (*Validator, bool) // SubsetWeight returns the sum of the weights of the validators. - SubsetWeight(ids.NodeIDSet) (uint64, error) + SubsetWeight(set.Set[ids.NodeID]) uint64 - // RemoveWeight from a staker. - RemoveWeight(ids.NodeID, uint64) error + // RemoveWeight from a staker. If the staker's weight becomes 0, the staker + // will be removed from the validator set. + // Returns an error if: + // - [weight] is 0 + // - [nodeID] is not already in the validator set + // - the weight of the validator would become negative + // If an error is returned, the set will be unmodified. + RemoveWeight(nodeID ids.NodeID, weight uint64) error // Contains returns true if there is a validator with the specified ID // currently in the set. @@ -57,519 +71,364 @@ type Set interface { Len() int // List all the validators in this group - List() []Validator + List() []*Validator // Weight returns the cumulative weight of all validators in the set. Weight() uint64 - // Sample returns a collection of validators, potentially with duplicates. + // Sample returns a collection of validatorIDs, potentially with duplicates. // If sampling the requested size isn't possible, an error will be returned. - Sample(size int) ([]Validator, error) - - // MaskValidator hides the named validator from future samplings - MaskValidator(ids.NodeID) error + Sample(size int) ([]ids.NodeID, error) // When a validator's weight changes, or a validator is added/removed, // this listener is called. RegisterCallbackListener(SetCallbackListener) - - RevealValidator(ids.NodeID) error } type SetCallbackListener interface { - OnValidatorAdded(validatorID ids.NodeID, weight uint64) + OnValidatorAdded(validatorID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) OnValidatorRemoved(validatorID ids.NodeID, weight uint64) OnValidatorWeightChanged(validatorID ids.NodeID, oldWeight, newWeight uint64) } // NewSet returns a new, empty set of validators. func NewSet() Set { - return &set{ - vdrMap: make(map[ids.NodeID]int), + return &vdrSet{ + vdrs: make(map[ids.NodeID]*Validator), sampler: sampler.NewWeightedWithoutReplacement(), } } // NewBestSet returns a new, empty set of validators. func NewBestSet(expectedSampleSize int) Set { - return &set{ - vdrMap: make(map[ids.NodeID]int), + return &vdrSet{ + vdrs: make(map[ids.NodeID]*Validator), sampler: sampler.NewBestWeightedWithoutReplacement(expectedSampleSize), } } -// set of validators. Validator function results are cached. Therefore, to -// update a validators weight, one should ensure to call add with the updated -// validator. -type set struct { - initialized bool - lock sync.RWMutex - vdrMap map[ids.NodeID]int - vdrSlice []*validator - vdrWeights []uint64 - vdrMaskedWeights []uint64 - sampler sampler.WeightedWithoutReplacement - totalWeight uint64 - maskedVdrs ids.NodeIDSet +type vdrSet struct { + lock sync.RWMutex + vdrs map[ids.NodeID]*Validator + vdrSlice []*Validator + weights []uint64 + totalWeight uint64 + + samplerInitialized bool + sampler sampler.WeightedWithoutReplacement + callbackListeners []SetCallbackListener } -func (s *set) Set(vdrs []Validator) error { +func (s *vdrSet) Add(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) error { + if weight == 0 { + return errZeroWeight + } + s.lock.Lock() defer s.lock.Unlock() - return s.set(vdrs) + return s.add(nodeID, pk, txID, weight) } -func (s *set) set(vdrs []Validator) error { - // find all the nodes that are going to be added or have their weight changed - nodesInResultSet := ids.NewNodeIDSet(len(vdrs)) - for _, vdr := range vdrs { - vdrID := vdr.ID() - if nodesInResultSet.Contains(vdrID) { - continue - } - nodesInResultSet.Add(vdrID) - - newWeight := vdr.Weight() - index, contains := s.vdrMap[vdrID] - if !contains { - s.callValidatorAddedCallbacks(vdrID, newWeight) - continue - } - - existingWeight := s.vdrWeights[index] - if existingWeight != newWeight { - s.callWeightChangeCallbacks(vdrID, existingWeight, newWeight) - } +func (s *vdrSet) add(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) error { + _, nodeExists := s.vdrs[nodeID] + if nodeExists { + return errDuplicateValidator } - // find all nodes that are going to be removed - for nodeID, index := range s.vdrMap { - if !nodesInResultSet.Contains(nodeID) { - s.callValidatorRemovedCallbacks(nodeID, s.vdrWeights[index]) - } + // We first calculate the new total weight of the set, as this guarantees + // that none of the following operations can overflow. + newTotalWeight, err := math.Add64(s.totalWeight, weight) + if err != nil { + return err } - lenVdrs := len(vdrs) - // If the underlying arrays are much larger than necessary, resize them to - // allow garbage collection of unused memory - if cap(s.vdrSlice) > len(s.vdrSlice)*maxExcessCapacityFactor { - newCap := cap(s.vdrSlice) / capacityReductionFactor - if newCap < lenVdrs { - newCap = lenVdrs - } - s.vdrSlice = make([]*validator, 0, newCap) - s.vdrWeights = make([]uint64, 0, newCap) - s.vdrMaskedWeights = make([]uint64, 0, newCap) - } else { - s.vdrSlice = s.vdrSlice[:0] - s.vdrWeights = s.vdrWeights[:0] - s.vdrMaskedWeights = s.vdrMaskedWeights[:0] + vdr := &Validator{ + NodeID: nodeID, + PublicKey: pk, + TxID: txID, + Weight: weight, + index: len(s.vdrSlice), } - s.vdrMap = make(map[ids.NodeID]int, lenVdrs) - s.totalWeight = 0 - s.initialized = false - - for _, vdr := range vdrs { - vdrID := vdr.ID() - if s.contains(vdrID) { - continue - } - w := vdr.Weight() - if w == 0 { - continue // This validator would never be sampled anyway - } - - i := len(s.vdrSlice) - s.vdrMap[vdrID] = i - s.vdrSlice = append(s.vdrSlice, &validator{ - nodeID: vdr.ID(), - weight: vdr.Weight(), - }) - s.vdrWeights = append(s.vdrWeights, w) - s.vdrMaskedWeights = append(s.vdrMaskedWeights, 0) - - if s.maskedVdrs.Contains(vdrID) { - continue - } - s.vdrMaskedWeights[len(s.vdrMaskedWeights)-1] = w + s.vdrs[nodeID] = vdr + s.vdrSlice = append(s.vdrSlice, vdr) + s.weights = append(s.weights, weight) + s.totalWeight = newTotalWeight + s.samplerInitialized = false - newTotalWeight, err := safemath.Add64(s.totalWeight, w) - if err != nil { - return err - } - s.totalWeight = newTotalWeight - } + s.callValidatorAddedCallbacks(nodeID, pk, txID, weight) return nil } -func (s *set) AddWeight(vdrID ids.NodeID, weight uint64) error { +func (s *vdrSet) AddWeight(nodeID ids.NodeID, weight uint64) error { if weight == 0 { - return nil // This validator would never be sampled anyway + return errZeroWeight } + s.lock.Lock() defer s.lock.Unlock() - return s.addWeight(vdrID, weight) + return s.addWeight(nodeID, weight) } -func (s *set) addWeight(vdrID ids.NodeID, weight uint64) error { - var vdr *validator - i, nodeExists := s.vdrMap[vdrID] +func (s *vdrSet) addWeight(nodeID ids.NodeID, weight uint64) error { + vdr, nodeExists := s.vdrs[nodeID] if !nodeExists { - vdr = &validator{ - nodeID: vdrID, - } - i = len(s.vdrSlice) - s.vdrSlice = append(s.vdrSlice, vdr) - s.vdrWeights = append(s.vdrWeights, 0) - s.vdrMaskedWeights = append(s.vdrMaskedWeights, 0) - s.vdrMap[vdrID] = i - s.callValidatorAddedCallbacks(vdrID, weight) - } else { - vdr = s.vdrSlice[i] - } - - oldWeight := s.vdrWeights[i] - s.vdrWeights[i] += weight - vdr.addWeight(weight) - - if nodeExists { - s.callWeightChangeCallbacks(vdrID, oldWeight, vdr.weight) + return errMissingValidator } - if s.maskedVdrs.Contains(vdrID) { - return nil - } - s.vdrMaskedWeights[i] += weight - - newTotalWeight, err := safemath.Add64(s.totalWeight, weight) + // We first calculate the new total weight of the set, as this guarantees + // that none of the following operations can overflow. + newTotalWeight, err := math.Add64(s.totalWeight, weight) if err != nil { - return nil + return err } + + oldWeight := vdr.Weight + vdr.Weight += weight + s.weights[vdr.index] += weight s.totalWeight = newTotalWeight - s.initialized = false + s.samplerInitialized = false + + s.callWeightChangeCallbacks(nodeID, oldWeight, vdr.Weight) return nil } -func (s *set) GetWeight(vdrID ids.NodeID) (uint64, bool) { +func (s *vdrSet) GetWeight(nodeID ids.NodeID) uint64 { s.lock.RLock() defer s.lock.RUnlock() - return s.getWeight(vdrID) + return s.getWeight(nodeID) } -func (s *set) getWeight(vdrID ids.NodeID) (uint64, bool) { - if index, ok := s.vdrMap[vdrID]; ok { - return s.vdrMaskedWeights[index], true +func (s *vdrSet) getWeight(nodeID ids.NodeID) uint64 { + if vdr, ok := s.vdrs[nodeID]; ok { + return vdr.Weight } - return 0, false + return 0 } -func (s *set) SubsetWeight(subset ids.NodeIDSet) (uint64, error) { +func (s *vdrSet) SubsetWeight(subset set.Set[ids.NodeID]) uint64 { s.lock.RLock() defer s.lock.RUnlock() - totalWeight := uint64(0) - for vdrID := range subset { - weight, ok := s.getWeight(vdrID) - if !ok { - continue - } - newWeight, err := safemath.Add64(totalWeight, weight) - if err != nil { - return 0, err - } - totalWeight = newWeight + return s.subsetWeight(subset) +} + +func (s *vdrSet) subsetWeight(subset set.Set[ids.NodeID]) uint64 { + var totalWeight uint64 + for nodeID := range subset { + // Because [totalWeight] will be <= [s.totalWeight], we are guaranteed + // this will not overflow. + totalWeight += s.getWeight(nodeID) } - return totalWeight, nil + return totalWeight } -func (s *set) RemoveWeight(vdrID ids.NodeID, weight uint64) error { +func (s *vdrSet) RemoveWeight(nodeID ids.NodeID, weight uint64) error { if weight == 0 { - return nil + return errZeroWeight } + s.lock.Lock() defer s.lock.Unlock() - return s.removeWeight(vdrID, weight) + return s.removeWeight(nodeID, weight) } -func (s *set) removeWeight(vdrID ids.NodeID, weight uint64) error { - i, ok := s.vdrMap[vdrID] +func (s *vdrSet) removeWeight(nodeID ids.NodeID, weight uint64) error { + vdr, ok := s.vdrs[nodeID] if !ok { - return nil + return errMissingValidator } - // Validator exists - vdr := s.vdrSlice[i] - - oldWeight := s.vdrWeights[i] - weight = safemath.Min64(oldWeight, weight) - s.vdrWeights[i] -= weight - vdr.removeWeight(weight) - if !s.maskedVdrs.Contains(vdrID) { - s.totalWeight -= weight - s.vdrMaskedWeights[i] -= weight + oldWeight := vdr.Weight + // We first calculate the new weight of the validator, as this guarantees + // that none of the following operations can underflow. + newWeight, err := math.Sub(oldWeight, weight) + if err != nil { + return err } - if vdr.Weight() == 0 { - s.callValidatorRemovedCallbacks(vdrID, oldWeight) - if err := s.remove(vdrID); err != nil { - return err - } + if newWeight == 0 { + // Get the last element + lastIndex := len(s.vdrSlice) - 1 + vdrToSwap := s.vdrSlice[lastIndex] + + // Move element at last index --> index of removed validator + vdrToSwap.index = vdr.index + s.vdrSlice[vdr.index] = vdrToSwap + s.weights[vdr.index] = vdrToSwap.Weight + + // Remove validator + delete(s.vdrs, nodeID) + s.vdrSlice[lastIndex] = nil + s.vdrSlice = s.vdrSlice[:lastIndex] + s.weights = s.weights[:lastIndex] + + s.callValidatorRemovedCallbacks(nodeID, oldWeight) } else { - s.callWeightChangeCallbacks(vdrID, oldWeight, vdr.weight) + vdr.Weight = newWeight + s.weights[vdr.index] = newWeight + + s.callWeightChangeCallbacks(nodeID, oldWeight, newWeight) } - s.initialized = false + s.totalWeight -= weight + s.samplerInitialized = false return nil } -func (s *set) Get(vdrID ids.NodeID) (Validator, bool) { +func (s *vdrSet) Get(nodeID ids.NodeID) (*Validator, bool) { s.lock.RLock() defer s.lock.RUnlock() - return s.get(vdrID) + return s.get(nodeID) } -func (s *set) get(vdrID ids.NodeID) (Validator, bool) { - index, ok := s.vdrMap[vdrID] +func (s *vdrSet) get(nodeID ids.NodeID) (*Validator, bool) { + vdr, ok := s.vdrs[nodeID] if !ok { return nil, false } - return s.vdrSlice[index], true -} - -func (s *set) remove(vdrID ids.NodeID) error { - // Get the element to remove - i, contains := s.vdrMap[vdrID] - if !contains { - return nil - } - - // Get the last element - e := len(s.vdrSlice) - 1 - eVdr := s.vdrSlice[e] - - // Move e -> i - iElem := s.vdrSlice[i] - s.vdrMap[eVdr.ID()] = i - s.vdrSlice[i] = eVdr - s.vdrWeights[i] = s.vdrWeights[e] - s.vdrMaskedWeights[i] = s.vdrMaskedWeights[e] - - // Remove i - delete(s.vdrMap, vdrID) - s.vdrSlice[e] = nil - s.vdrSlice = s.vdrSlice[:e] - s.vdrWeights = s.vdrWeights[:e] - s.vdrMaskedWeights = s.vdrMaskedWeights[:e] - - if !s.maskedVdrs.Contains(vdrID) { - newTotalWeight, err := safemath.Sub64(s.totalWeight, iElem.Weight()) - if err != nil { - return err - } - s.totalWeight = newTotalWeight - } - s.initialized = false - return nil + copiedVdr := *vdr + return &copiedVdr, true } -func (s *set) Contains(vdrID ids.NodeID) bool { +func (s *vdrSet) Contains(nodeID ids.NodeID) bool { s.lock.RLock() defer s.lock.RUnlock() - return s.contains(vdrID) + return s.contains(nodeID) } -func (s *set) contains(vdrID ids.NodeID) bool { - _, contains := s.vdrMap[vdrID] +func (s *vdrSet) contains(nodeID ids.NodeID) bool { + _, contains := s.vdrs[nodeID] return contains } -func (s *set) Len() int { +func (s *vdrSet) Len() int { s.lock.RLock() defer s.lock.RUnlock() return s.len() } -func (s *set) len() int { return len(s.vdrSlice) } +func (s *vdrSet) len() int { + return len(s.vdrSlice) +} -func (s *set) List() []Validator { +func (s *vdrSet) List() []*Validator { s.lock.RLock() defer s.lock.RUnlock() return s.list() } -func (s *set) list() []Validator { - list := make([]Validator, len(s.vdrSlice)) +func (s *vdrSet) list() []*Validator { + list := make([]*Validator, len(s.vdrSlice)) for i, vdr := range s.vdrSlice { - list[i] = vdr + copiedVdr := *vdr + list[i] = &copiedVdr } return list } -func (s *set) Sample(size int) ([]Validator, error) { +func (s *vdrSet) Sample(size int) ([]ids.NodeID, error) { if size == 0 { return nil, nil } + s.lock.Lock() defer s.lock.Unlock() return s.sample(size) } -func (s *set) sample(size int) ([]Validator, error) { - if !s.initialized { - if err := s.sampler.Initialize(s.vdrMaskedWeights); err != nil { +func (s *vdrSet) sample(size int) ([]ids.NodeID, error) { + if !s.samplerInitialized { + if err := s.sampler.Initialize(s.weights); err != nil { return nil, err } - s.initialized = true + s.samplerInitialized = true } + indices, err := s.sampler.Sample(size) if err != nil { return nil, err } - list := make([]Validator, size) + list := make([]ids.NodeID, size) for i, index := range indices { - list[i] = s.vdrSlice[index] + list[i] = s.vdrSlice[index].NodeID } return list, nil } -func (s *set) Weight() uint64 { +func (s *vdrSet) Weight() uint64 { s.lock.RLock() defer s.lock.RUnlock() return s.totalWeight } -func (s *set) String() string { +func (s *vdrSet) String() string { return s.PrefixedString("") } -func (s *set) PrefixedString(prefix string) string { +func (s *vdrSet) PrefixedString(prefix string) string { s.lock.RLock() defer s.lock.RUnlock() return s.prefixedString(prefix) } -func (s *set) prefixedString(prefix string) string { +func (s *vdrSet) prefixedString(prefix string) string { sb := strings.Builder{} - totalWeight := uint64(0) - for _, weight := range s.vdrWeights { - totalWeight += weight - } - - sb.WriteString(fmt.Sprintf("Validator Set: (Size = %d, SampleableWeight = %d, Weight = %d)", + sb.WriteString(fmt.Sprintf("Validator Set: (Size = %d, Weight = %d)", len(s.vdrSlice), s.totalWeight, - totalWeight, )) - format := fmt.Sprintf("\n%s Validator[%s]: %%33s, %%d/%%d", prefix, formatting.IntFormat(len(s.vdrSlice)-1)) + format := fmt.Sprintf("\n%s Validator[%s]: %%33s, %%d", prefix, formatting.IntFormat(len(s.vdrSlice)-1)) for i, vdr := range s.vdrSlice { - sb.WriteString(fmt.Sprintf(format, + sb.WriteString(fmt.Sprintf( + format, i, - vdr.ID(), - s.vdrMaskedWeights[i], - vdr.Weight())) + vdr.NodeID, + vdr.Weight, + )) } return sb.String() } -func (s *set) MaskValidator(vdrID ids.NodeID) error { - s.lock.Lock() - defer s.lock.Unlock() - - return s.maskValidator(vdrID) -} - -func (s *set) maskValidator(vdrID ids.NodeID) error { - if s.maskedVdrs.Contains(vdrID) { - return nil - } - - s.maskedVdrs.Add(vdrID) - - // Get the element to mask - i, contains := s.vdrMap[vdrID] - if !contains { - return nil - } - - s.vdrMaskedWeights[i] = 0 - s.totalWeight -= s.vdrWeights[i] - s.initialized = false - - return nil -} - -func (s *set) RevealValidator(vdrID ids.NodeID) error { - s.lock.Lock() - defer s.lock.Unlock() - - return s.revealValidator(vdrID) -} - -func (s *set) revealValidator(vdrID ids.NodeID) error { - if !s.maskedVdrs.Contains(vdrID) { - return nil - } - - s.maskedVdrs.Remove(vdrID) - - // Get the element to reveal - i, contains := s.vdrMap[vdrID] - if !contains { - return nil - } - - weight := s.vdrWeights[i] - s.vdrMaskedWeights[i] = weight - newTotalWeight, err := safemath.Add64(s.totalWeight, weight) - if err != nil { - return err - } - s.totalWeight = newTotalWeight - s.initialized = false - - return nil -} - -func (s *set) RegisterCallbackListener(callbackListener SetCallbackListener) { +func (s *vdrSet) RegisterCallbackListener(callbackListener SetCallbackListener) { s.lock.Lock() defer s.lock.Unlock() s.callbackListeners = append(s.callbackListeners, callbackListener) - for node, index := range s.vdrMap { - callbackListener.OnValidatorAdded(node, s.vdrWeights[index]) + for _, vdr := range s.vdrSlice { + callbackListener.OnValidatorAdded(vdr.NodeID, vdr.PublicKey, vdr.TxID, vdr.Weight) } } // Assumes [s.lock] is held -func (s *set) callWeightChangeCallbacks(node ids.NodeID, oldWeight, newWeight uint64) { +func (s *vdrSet) callWeightChangeCallbacks(node ids.NodeID, oldWeight, newWeight uint64) { for _, callbackListener := range s.callbackListeners { callbackListener.OnValidatorWeightChanged(node, oldWeight, newWeight) } } // Assumes [s.lock] is held -func (s *set) callValidatorAddedCallbacks(node ids.NodeID, weight uint64) { +func (s *vdrSet) callValidatorAddedCallbacks(node ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { for _, callbackListener := range s.callbackListeners { - callbackListener.OnValidatorAdded(node, weight) + callbackListener.OnValidatorAdded(node, pk, txID, weight) } } // Assumes [s.lock] is held -func (s *set) callValidatorRemovedCallbacks(node ids.NodeID, weight uint64) { +func (s *vdrSet) callValidatorRemovedCallbacks(node ids.NodeID, weight uint64) { for _, callbackListener := range s.callbackListeners { callbackListener.OnValidatorRemoved(node, weight) } diff --git a/avalanchego/snow/validators/set_test.go b/avalanchego/snow/validators/set_test.go index 4c8fbd81..645a5215 100644 --- a/avalanchego/snow/validators/set_test.go +++ b/avalanchego/snow/validators/set_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators @@ -6,270 +6,438 @@ package validators import ( "math" "testing" - "time" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/set" ) -func TestSetSet(t *testing.T) { - vdr0 := NewValidator(ids.EmptyNodeID, 1) - vdr1 := NewValidator(ids.NodeID{0xFF}, math.MaxInt64-1) - // Should be discarded, because it has a weight of 0 - vdr2 := NewValidator(ids.NodeID{0xAA}, 0) +func TestSetAddZeroWeight(t *testing.T) { + require := require.New(t) s := NewSet() - err := s.Set([]Validator{vdr0, vdr1, vdr2}) - require.NoError(t, err) + err := s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 0) + require.ErrorIs(err, errZeroWeight) +} - length := s.Len() - require.Equal(t, 2, length, "should have two validators") +func TestSetAddDuplicate(t *testing.T) { + require := require.New(t) - contains := s.Contains(vdr0.ID()) - require.True(t, contains, "should have contained vdr0") + s := NewSet() - contains = s.Contains(vdr1.ID()) - require.True(t, contains, "should have contained vdr1") + nodeID := ids.GenerateTestNodeID() + err := s.Add(nodeID, nil, ids.Empty, 1) + require.NoError(err) - sampled, err := s.Sample(1) - require.NoError(t, err) - require.Len(t, sampled, 1, "should have only sampled one validator") - require.Equal(t, vdr1.ID(), sampled[0].ID(), "should have sampled vdr1") + err = s.Add(nodeID, nil, ids.Empty, 1) + require.ErrorIs(err, errDuplicateValidator) } -func TestSamplerSample(t *testing.T) { - vdr0 := ids.GenerateTestNodeID() - vdr1 := ids.GenerateTestNodeID() +func TestSetAddOverflow(t *testing.T) { + require := require.New(t) s := NewSet() - err := s.AddWeight(vdr0, 1) - require.NoError(t, err) + err := s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) + require.NoError(err) - sampled, err := s.Sample(1) - require.NoError(t, err) - require.Len(t, sampled, 1, "should have only sampled one validator") - require.Equal(t, vdr0, sampled[0].ID(), "should have sampled vdr0") + err = s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, math.MaxUint64) + require.Error(err) - _, err = s.Sample(2) - require.Error(t, err, "should have errored during sampling") + weight := s.Weight() + require.EqualValues(1, weight) +} - err = s.AddWeight(vdr1, math.MaxInt64-1) - require.NoError(t, err) +func TestSetAddWeightZeroWeight(t *testing.T) { + require := require.New(t) - sampled, err = s.Sample(1) - require.NoError(t, err) - require.Len(t, sampled, 1, "should have only sampled one validator") - require.Equal(t, vdr1, sampled[0].ID(), "should have sampled vdr1") + s := NewSet() - sampled, err = s.Sample(2) - require.NoError(t, err) - require.Len(t, sampled, 2, "should have sampled two validators") - require.Equal(t, vdr1, sampled[0].ID(), "should have sampled vdr1") - require.Equal(t, vdr1, sampled[1].ID(), "should have sampled vdr1") + nodeID := ids.GenerateTestNodeID() + err := s.Add(nodeID, nil, ids.Empty, 1) + require.NoError(err) - sampled, err = s.Sample(3) - require.NoError(t, err) - require.Len(t, sampled, 3, "should have sampled three validators") - require.Equal(t, vdr1, sampled[0].ID(), "should have sampled vdr1") - require.Equal(t, vdr1, sampled[1].ID(), "should have sampled vdr1") - require.Equal(t, vdr1, sampled[2].ID(), "should have sampled vdr1") + err = s.AddWeight(nodeID, 0) + require.ErrorIs(err, errZeroWeight) } -func TestSamplerDuplicate(t *testing.T) { - vdr0 := ids.GenerateTestNodeID() - vdr1 := ids.GenerateTestNodeID() +func TestSetAddWeightOverflow(t *testing.T) { + require := require.New(t) s := NewSet() - err := s.AddWeight(vdr0, 1) - require.NoError(t, err) - err = s.AddWeight(vdr1, 1) - require.NoError(t, err) + err := s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) + require.NoError(err) - err = s.AddWeight(vdr1, math.MaxInt64-2) - require.NoError(t, err) + nodeID := ids.GenerateTestNodeID() + err = s.Add(nodeID, nil, ids.Empty, 1) + require.NoError(err) - sampled, err := s.Sample(1) - require.NoError(t, err) - require.Len(t, sampled, 1, "should have only sampled one validator") - require.Equal(t, vdr1, sampled[0].ID(), "should have sampled vdr1") + err = s.AddWeight(nodeID, math.MaxUint64-1) + require.Error(err) + + weight := s.Weight() + require.EqualValues(2, weight) } -func TestSamplerContains(t *testing.T) { - vdr := ids.GenerateTestNodeID() +func TestSetGetWeight(t *testing.T) { + require := require.New(t) s := NewSet() - err := s.AddWeight(vdr, 1) - require.NoError(t, err) - contains := s.Contains(vdr) - require.True(t, contains, "should have contained validator") + nodeID := ids.GenerateTestNodeID() + weight := s.GetWeight(nodeID) + require.Zero(weight) - err = s.RemoveWeight(vdr, 1) - require.NoError(t, err) + err := s.Add(nodeID, nil, ids.Empty, 1) + require.NoError(err) - contains = s.Contains(vdr) - require.False(t, contains, "shouldn't have contained validator") + weight = s.GetWeight(nodeID) + require.EqualValues(1, weight) } -func TestSamplerString(t *testing.T) { - vdr0 := ids.EmptyNodeID - vdr1 := ids.NodeID{ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - } +func TestSetSubsetWeight(t *testing.T) { + require := require.New(t) + + nodeID0 := ids.GenerateTestNodeID() + nodeID1 := ids.GenerateTestNodeID() + nodeID2 := ids.GenerateTestNodeID() + + weight0 := uint64(93) + weight1 := uint64(123) + weight2 := uint64(810) + + subset := set.Set[ids.NodeID]{} + subset.Add(nodeID0) + subset.Add(nodeID1) s := NewSet() - err := s.AddWeight(vdr0, 1) - require.NoError(t, err) - err = s.AddWeight(vdr1, math.MaxInt64-1) - require.NoError(t, err) + err := s.Add(nodeID0, nil, ids.Empty, weight0) + require.NoError(err) - expected := "Validator Set: (Size = 2, SampleableWeight = 9223372036854775807, Weight = 9223372036854775807)\n" + - " Validator[0]: NodeID-111111111111111111116DBWJs, 1/1\n" + - " Validator[1]: NodeID-QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 9223372036854775806/9223372036854775806" - result := s.String() - require.Equal(t, expected, result, "wrong string returned") + err = s.Add(nodeID1, nil, ids.Empty, weight1) + require.NoError(err) + + err = s.Add(nodeID2, nil, ids.Empty, weight2) + require.NoError(err) + + expectedWeight := weight0 + weight1 + subsetWeight := s.SubsetWeight(subset) + require.Equal(expectedWeight, subsetWeight) } -func TestSetWeight(t *testing.T) { - vdr0 := ids.NodeID{1} - weight0 := uint64(93) - vdr1 := ids.NodeID{2} - weight1 := uint64(123) +func TestSetRemoveWeightZeroWeight(t *testing.T) { + require := require.New(t) s := NewSet() - err := s.AddWeight(vdr0, weight0) - require.NoError(t, err) - err = s.AddWeight(vdr1, weight1) - require.NoError(t, err) + nodeID := ids.GenerateTestNodeID() + err := s.Add(nodeID, nil, ids.Empty, 1) + require.NoError(err) - setWeight := s.Weight() - expectedWeight := weight0 + weight1 - require.Equal(t, expectedWeight, setWeight, "wrong set weight") + err = s.RemoveWeight(nodeID, 0) + require.ErrorIs(err, errZeroWeight) } -func TestSetSubsetWeight(t *testing.T) { +func TestSetRemoveWeightMissingValidator(t *testing.T) { + require := require.New(t) + + s := NewSet() + + err := s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) + require.NoError(err) + + err = s.RemoveWeight(ids.GenerateTestNodeID(), 1) + require.ErrorIs(err, errMissingValidator) +} + +func TestSetRemoveWeightUnderflow(t *testing.T) { + require := require.New(t) + + s := NewSet() + + err := s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) + require.NoError(err) + + nodeID := ids.GenerateTestNodeID() + err = s.Add(nodeID, nil, ids.Empty, 1) + require.NoError(err) + + err = s.RemoveWeight(nodeID, 2) + require.Error(err) + + weight := s.Weight() + require.EqualValues(2, weight) +} + +func TestSetGet(t *testing.T) { + require := require.New(t) + + s := NewSet() + + nodeID := ids.GenerateTestNodeID() + _, ok := s.Get(nodeID) + require.False(ok) + + sk, err := bls.NewSecretKey() + require.NoError(err) + + pk := bls.PublicFromSecretKey(sk) + err = s.Add(nodeID, pk, ids.Empty, 1) + require.NoError(err) + + vdr0, ok := s.Get(nodeID) + require.True(ok) + require.Equal(nodeID, vdr0.NodeID) + require.Equal(pk, vdr0.PublicKey) + require.EqualValues(1, vdr0.Weight) + + err = s.AddWeight(nodeID, 1) + require.NoError(err) + + vdr1, ok := s.Get(nodeID) + require.True(ok) + require.Equal(nodeID, vdr0.NodeID) + require.Equal(pk, vdr0.PublicKey) + require.EqualValues(1, vdr0.Weight) + require.Equal(nodeID, vdr1.NodeID) + require.Equal(pk, vdr1.PublicKey) + require.EqualValues(2, vdr1.Weight) +} + +func TestSetContains(t *testing.T) { + require := require.New(t) + + s := NewSet() + + nodeID := ids.GenerateTestNodeID() + contains := s.Contains(nodeID) + require.False(contains) + + err := s.Add(nodeID, nil, ids.Empty, 1) + require.NoError(err) + + contains = s.Contains(nodeID) + require.True(contains) + + err = s.RemoveWeight(nodeID, 1) + require.NoError(err) + + contains = s.Contains(nodeID) + require.False(contains) +} + +func TestSetLen(t *testing.T) { + require := require.New(t) + + s := NewSet() + + len := s.Len() + require.Zero(len) + + nodeID0 := ids.GenerateTestNodeID() + err := s.Add(nodeID0, nil, ids.Empty, 1) + require.NoError(err) + + len = s.Len() + require.Equal(1, len) + + nodeID1 := ids.GenerateTestNodeID() + err = s.Add(nodeID1, nil, ids.Empty, 1) + require.NoError(err) + + len = s.Len() + require.Equal(2, len) + + err = s.RemoveWeight(nodeID1, 1) + require.NoError(err) + + len = s.Len() + require.Equal(1, len) + + err = s.RemoveWeight(nodeID0, 1) + require.NoError(err) + + len = s.Len() + require.Zero(len) +} + +func TestSetList(t *testing.T) { + require := require.New(t) + + s := NewSet() + + list := s.List() + require.Empty(list) + + sk, err := bls.NewSecretKey() + require.NoError(err) + + pk := bls.PublicFromSecretKey(sk) + nodeID0 := ids.GenerateTestNodeID() + err = s.Add(nodeID0, pk, ids.Empty, 2) + require.NoError(err) + + list = s.List() + require.Len(list, 1) + + node0 := list[0] + require.Equal(nodeID0, node0.NodeID) + require.Equal(pk, node0.PublicKey) + require.EqualValues(2, node0.Weight) + + nodeID1 := ids.GenerateTestNodeID() + err = s.Add(nodeID1, nil, ids.Empty, 1) + require.NoError(err) + + list = s.List() + require.Len(list, 2) + + node0 = list[0] + require.Equal(nodeID0, node0.NodeID) + require.Equal(pk, node0.PublicKey) + require.EqualValues(2, node0.Weight) + + node1 := list[1] + require.Equal(nodeID1, node1.NodeID) + require.Nil(node1.PublicKey) + require.EqualValues(1, node1.Weight) + + err = s.RemoveWeight(nodeID0, 1) + require.NoError(err) + require.Equal(nodeID0, node0.NodeID) + require.Equal(pk, node0.PublicKey) + require.EqualValues(2, node0.Weight) + + list = s.List() + require.Len(list, 2) + + node0 = list[0] + require.Equal(nodeID0, node0.NodeID) + require.Equal(pk, node0.PublicKey) + require.EqualValues(1, node0.Weight) + + node1 = list[1] + require.Equal(nodeID1, node1.NodeID) + require.Nil(node1.PublicKey) + require.EqualValues(1, node1.Weight) + + err = s.RemoveWeight(nodeID0, 1) + require.NoError(err) + + list = s.List() + require.Len(list, 1) + + node0 = list[0] + require.Equal(nodeID1, node0.NodeID) + require.Nil(node0.PublicKey) + require.EqualValues(1, node0.Weight) + + err = s.RemoveWeight(nodeID1, 1) + require.NoError(err) + + list = s.List() + require.Empty(list) +} + +func TestSetWeight(t *testing.T) { + require := require.New(t) + vdr0 := ids.NodeID{1} weight0 := uint64(93) vdr1 := ids.NodeID{2} weight1 := uint64(123) - vdr2 := ids.NodeID{3} - weight2 := uint64(810) - subset := ids.NodeIDSet{} - subset.Add(vdr0) - subset.Add(vdr1) s := NewSet() - err := s.AddWeight(vdr0, weight0) - require.NoError(t, err) + err := s.Add(vdr0, nil, ids.Empty, weight0) + require.NoError(err) - err = s.AddWeight(vdr1, weight1) - require.NoError(t, err) - err = s.AddWeight(vdr2, weight2) - require.NoError(t, err) + err = s.Add(vdr1, nil, ids.Empty, weight1) + require.NoError(err) - subsetWeight, err := s.SubsetWeight(subset) - if err != nil { - t.Fatal(err) - } + setWeight := s.Weight() expectedWeight := weight0 + weight1 - require.Equal(t, expectedWeight, subsetWeight, "wrong subset weight") + require.Equal(expectedWeight, setWeight) } -func TestSamplerMasked(t *testing.T) { - vdr0 := ids.EmptyNodeID - vdr1 := ids.NodeID{ - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, - } +func TestSetSample(t *testing.T) { + require := require.New(t) s := NewSet() - err := s.AddWeight(vdr0, 1) - require.NoError(t, err) - err = s.MaskValidator(vdr1) - require.NoError(t, err) + sampled, err := s.Sample(0) + require.NoError(err) + require.Empty(sampled) - { - expected := "Validator Set: (Size = 1, SampleableWeight = 1, Weight = 1)\n" + - " Validator[0]: NodeID-111111111111111111116DBWJs, 1/1" - result := s.String() - require.Equal(t, expected, result, "wrong string returned") - } + sk, err := bls.NewSecretKey() + require.NoError(err) - err = s.AddWeight(vdr1, math.MaxInt64-1) - require.NoError(t, err) + nodeID0 := ids.GenerateTestNodeID() + pk := bls.PublicFromSecretKey(sk) + err = s.Add(nodeID0, pk, ids.Empty, 1) + require.NoError(err) - { - expected := "Validator Set: (Size = 2, SampleableWeight = 1, Weight = 9223372036854775807)\n" + - " Validator[0]: NodeID-111111111111111111116DBWJs, 1/1\n" + - " Validator[1]: NodeID-QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 0/9223372036854775806" - result := s.String() - require.Equal(t, expected, result, "wrong string returned") - } + sampled, err = s.Sample(1) + require.NoError(err) + require.Equal([]ids.NodeID{nodeID0}, sampled) - err = s.RevealValidator(vdr1) - require.NoError(t, err) + _, err = s.Sample(2) + require.Error(err) - { - expected := "Validator Set: (Size = 2, SampleableWeight = 9223372036854775807, Weight = 9223372036854775807)\n" + - " Validator[0]: NodeID-111111111111111111116DBWJs, 1/1\n" + - " Validator[1]: NodeID-QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 9223372036854775806/9223372036854775806" - result := s.String() - require.Equal(t, expected, result, "wrong string returned") - } + nodeID1 := ids.GenerateTestNodeID() + err = s.Add(nodeID1, nil, ids.Empty, math.MaxInt64-1) + require.NoError(err) - err = s.MaskValidator(vdr1) - require.NoError(t, err) + sampled, err = s.Sample(1) + require.NoError(err) + require.Equal([]ids.NodeID{nodeID1}, sampled) - { - expected := "Validator Set: (Size = 2, SampleableWeight = 1, Weight = 9223372036854775807)\n" + - " Validator[0]: NodeID-111111111111111111116DBWJs, 1/1\n" + - " Validator[1]: NodeID-QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 0/9223372036854775806" - result := s.String() - require.Equal(t, expected, result, "wrong string returned") - } + sampled, err = s.Sample(2) + require.NoError(err) + require.Equal([]ids.NodeID{nodeID1, nodeID1}, sampled) - err = s.RevealValidator(vdr1) - require.NoError(t, err) + sampled, err = s.Sample(3) + require.NoError(err) + require.Equal([]ids.NodeID{nodeID1, nodeID1, nodeID1}, sampled) +} + +func TestSetString(t *testing.T) { + require := require.New(t) - { - expected := "Validator Set: (Size = 2, SampleableWeight = 9223372036854775807, Weight = 9223372036854775807)\n" + - " Validator[0]: NodeID-111111111111111111116DBWJs, 1/1\n" + - " Validator[1]: NodeID-QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 9223372036854775806/9223372036854775806" - result := s.String() - require.Equal(t, expected, result, "wrong string returned") + nodeID0 := ids.EmptyNodeID + nodeID1 := ids.NodeID{ + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, } - err = s.RevealValidator(vdr1) - require.NoError(t, err) + s := NewSet() + err := s.Add(nodeID0, nil, ids.Empty, 1) + require.NoError(err) - { - expected := "Validator Set: (Size = 2, SampleableWeight = 9223372036854775807, Weight = 9223372036854775807)\n" + - " Validator[0]: NodeID-111111111111111111116DBWJs, 1/1\n" + - " Validator[1]: NodeID-QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 9223372036854775806/9223372036854775806" - result := s.String() - require.Equal(t, expected, result, "wrong string returned") - } + err = s.Add(nodeID1, nil, ids.Empty, math.MaxInt64-1) + require.NoError(err) + + expected := "Validator Set: (Size = 2, Weight = 9223372036854775807)\n" + + " Validator[0]: NodeID-111111111111111111116DBWJs, 1\n" + + " Validator[1]: NodeID-QLbz7JHiBTspS962RLKV8GndWFwdYhk6V, 9223372036854775806" + result := s.String() + require.Equal(expected, result) } -var _ SetCallbackListener = &callbackListener{} +var _ SetCallbackListener = (*callbackListener)(nil) type callbackListener struct { t *testing.T + onAdd func(ids.NodeID, *bls.PublicKey, ids.ID, uint64) onWeight func(ids.NodeID, uint64, uint64) - onAdd func(ids.NodeID, uint64) onRemoved func(ids.NodeID, uint64) } -func (c *callbackListener) OnValidatorAdded(nodeID ids.NodeID, weight uint64) { +func (c *callbackListener) OnValidatorAdded(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { if c.onAdd != nil { - c.onAdd(nodeID, weight) + c.onAdd(nodeID, pk, txID, weight) } else { c.t.Fail() } @@ -291,163 +459,129 @@ func (c *callbackListener) OnValidatorWeightChanged(nodeID ids.NodeID, oldWeight } } -func TestSetAddWeightCallback(t *testing.T) { - vdr0 := ids.NodeID{1} +func TestSetAddCallback(t *testing.T) { + require := require.New(t) + + nodeID0 := ids.NodeID{1} + sk0, err := bls.NewSecretKey() + require.NoError(err) + pk0 := bls.PublicFromSecretKey(sk0) + txID0 := ids.GenerateTestID() weight0 := uint64(1) - weight1 := uint64(93) s := NewSet() - err := s.AddWeight(vdr0, weight0) - require.NoError(t, err) - callcount := 0 + callCount := 0 s.RegisterCallbackListener(&callbackListener{ t: t, - onAdd: func(nodeID ids.NodeID, weight uint64) { - if nodeID == vdr0 { - require.Equal(t, weight0, weight) - } - callcount++ - }, - onWeight: func(nodeID ids.NodeID, oldWeight, newWeight uint64) { - require.Equal(t, vdr0, nodeID) - require.Equal(t, weight0, oldWeight) - require.Equal(t, weight0+weight1, newWeight) - callcount++ + onAdd: func(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { + require.Equal(nodeID0, nodeID) + require.Equal(pk0, pk) + require.Equal(txID0, txID) + require.Equal(weight0, weight) + callCount++ }, }) - err = s.AddWeight(vdr0, weight1) - require.NoError(t, err) - require.Equal(t, 2, callcount) + err = s.Add(nodeID0, pk0, txID0, weight0) + require.NoError(err) + require.Equal(1, callCount) } -func TestSetRemoveWeightCallback(t *testing.T) { - vdr0 := ids.NodeID{1} - weight0 := uint64(93) - weight1 := uint64(92) +func TestSetAddWeightCallback(t *testing.T) { + require := require.New(t) + + nodeID0 := ids.NodeID{1} + txID0 := ids.GenerateTestID() + weight0 := uint64(1) + weight1 := uint64(93) s := NewSet() - callcount := 0 - err := s.AddWeight(vdr0, weight0) - require.NoError(t, err) + err := s.Add(nodeID0, nil, txID0, weight0) + require.NoError(err) + + callCount := 0 s.RegisterCallbackListener(&callbackListener{ t: t, - onAdd: func(nodeID ids.NodeID, weight uint64) { - if nodeID == vdr0 { - require.Equal(t, weight0, weight) - } - callcount++ + onAdd: func(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { + require.Equal(nodeID0, nodeID) + require.Nil(pk) + require.Equal(txID0, txID) + require.Equal(weight0, weight) + callCount++ }, onWeight: func(nodeID ids.NodeID, oldWeight, newWeight uint64) { - require.Equal(t, vdr0, nodeID) - require.Equal(t, weight0, oldWeight) - require.Equal(t, weight0-weight1, newWeight) - callcount++ + require.Equal(nodeID0, nodeID) + require.Equal(weight0, oldWeight) + require.Equal(weight0+weight1, newWeight) + callCount++ }, }) - err = s.RemoveWeight(vdr0, weight1) - require.NoError(t, err) - require.Equal(t, 2, callcount) + err = s.AddWeight(nodeID0, weight1) + require.NoError(err) + require.Equal(2, callCount) } -func TestSetValidatorRemovedCallback(t *testing.T) { - vdr0 := ids.NodeID{1} +func TestSetRemoveWeightCallback(t *testing.T) { + require := require.New(t) + + nodeID0 := ids.NodeID{1} + txID0 := ids.GenerateTestID() weight0 := uint64(93) + weight1 := uint64(92) s := NewSet() - callcount := 0 - err := s.AddWeight(vdr0, weight0) - require.NoError(t, err) + err := s.Add(nodeID0, nil, txID0, weight0) + require.NoError(err) + callCount := 0 s.RegisterCallbackListener(&callbackListener{ t: t, - onAdd: func(nodeID ids.NodeID, weight uint64) { - if nodeID == vdr0 { - require.Equal(t, weight0, weight) - } - callcount++ + onAdd: func(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { + require.Equal(nodeID0, nodeID) + require.Nil(pk) + require.Equal(txID0, txID) + require.Equal(weight0, weight) + callCount++ }, - onRemoved: func(nodeID ids.NodeID, weight uint64) { - require.Equal(t, vdr0, nodeID) - require.Equal(t, weight0, weight) - callcount++ + onWeight: func(nodeID ids.NodeID, oldWeight, newWeight uint64) { + require.Equal(nodeID0, nodeID) + require.Equal(weight0, oldWeight) + require.Equal(weight0-weight1, newWeight) + callCount++ }, }) - err = s.RemoveWeight(vdr0, weight0) - require.NoError(t, err) - require.Equal(t, 2, callcount) + err = s.RemoveWeight(nodeID0, weight1) + require.NoError(err) + require.Equal(2, callCount) } -func TestSetValidatorSetCallback(t *testing.T) { - vdr0 := ids.NodeID{1} +func TestSetValidatorRemovedCallback(t *testing.T) { + require := require.New(t) + + nodeID0 := ids.NodeID{1} + txID0 := ids.GenerateTestID() weight0 := uint64(93) - vdr1 := ids.NodeID{2} - weight1 := uint64(94) - vdr2 := ids.NodeID{3} - weight2 := uint64(95) s := NewSet() - err := s.AddWeight(vdr0, weight0) - require.NoError(t, err) - err = s.AddWeight(vdr1, weight1) - require.NoError(t, err) + err := s.Add(nodeID0, nil, txID0, weight0) + require.NoError(err) - newValidators := []Validator{&validator{nodeID: vdr0, weight: weight0}, &validator{nodeID: vdr2, weight: weight2}} - callcount := 0 + callCount := 0 s.RegisterCallbackListener(&callbackListener{ t: t, - onAdd: func(nodeID ids.NodeID, weight uint64) { - if nodeID == vdr0 { - require.Equal(t, weight0, weight) - } - if nodeID == vdr1 { - require.Equal(t, weight1, weight) - } - if nodeID == vdr2 { - require.Equal(t, weight2, weight) - } - callcount++ + onAdd: func(nodeID ids.NodeID, pk *bls.PublicKey, txID ids.ID, weight uint64) { + require.Equal(nodeID0, nodeID) + require.Nil(pk) + require.Equal(txID0, txID) + require.Equal(weight0, weight) + callCount++ }, onRemoved: func(nodeID ids.NodeID, weight uint64) { - require.Equal(t, vdr1, nodeID) - require.Equal(t, weight1, weight) - callcount++ + require.Equal(nodeID0, nodeID) + require.Equal(weight0, weight) + callCount++ }, }) - - err = s.Set(newValidators) - require.NoError(t, err) - require.Equal(t, 4, callcount) -} - -func TestDefaultValidatorExpiration(t *testing.T) { - set := NewSet() - - t.Setenv("CUSTOM_VALIDATORS", "NodeID-5dDZXn99LCkDoEi6t9gTitZuQmhokxQTc,NodeID-AQghDJTU3zuQj73itPtfTZz6CxsTQVD3R") - t.Setenv("CUSTOM_VALIDATORS_EXPIRATION", "2024-02-01T00:00:00Z") - - vs := defaultValidatorSet{} - vs.initialize(constants.LocalID, time.Date(2024, time.January, 1, 0, 0, 0, 0, time.UTC)) - - require.Len(t, vs.list(), 2) - - for _, v := range vs.list() { - require.NoError(t, set.AddWeight(v.ID(), v.Weight())) - } - - require.Equal(t, 2, set.Len()) - - // Get expired validators - expVdrs := vs.expiredValidators(constants.LocalID, time.Date(2024, time.February, 2, 0, 0, 0, 0, time.UTC)) - require.Len(t, expVdrs, 2) - - // Remove expired validators - for _, v := range expVdrs { - require.NoError(t, set.RemoveWeight(v.ID(), v.Weight())) - } - require.Equal(t, 0, set.Len()) - - // Removing expired validators again should not error - for _, v := range expVdrs { - require.NoError(t, set.RemoveWeight(v.ID(), v.Weight())) - } + err = s.RemoveWeight(nodeID0, weight0) + require.NoError(err) + require.Equal(2, callCount) } diff --git a/avalanchego/snow/validators/state.go b/avalanchego/snow/validators/state.go index 52c12cf1..fa9ef278 100644 --- a/avalanchego/snow/validators/state.go +++ b/avalanchego/snow/validators/state.go @@ -1,29 +1,37 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators import ( + "context" "sync" "github.com/ava-labs/avalanchego/ids" ) -var _ State = &lockedState{} +var _ State = (*lockedState)(nil) // State allows the lookup of validator sets on specified subnets at the // requested P-chain height. type State interface { // GetMinimumHeight returns the minimum height of the block still in the // proposal window. - GetMinimumHeight() (uint64, error) + GetMinimumHeight(context.Context) (uint64, error) // GetCurrentHeight returns the current height of the P-chain. - GetCurrentHeight() (uint64, error) + GetCurrentHeight(context.Context) (uint64, error) - // GetValidatorSet returns the weights of the nodeIDs for the provided - // subnet at the requested P-chain height. + // GetSubnetID returns the subnetID of the provided chain. + GetSubnetID(ctx context.Context, chainID ids.ID) (ids.ID, error) + + // GetValidatorSet returns the validators of the provided subnet at the + // requested P-chain height. // The returned map should not be modified. - GetValidatorSet(height uint64, subnetID ids.ID) (map[ids.NodeID]uint64, error) + GetValidatorSet( + ctx context.Context, + height uint64, + subnetID ids.ID, + ) (map[ids.NodeID]*GetValidatorOutput, error) } type lockedState struct { @@ -38,25 +46,36 @@ func NewLockedState(lock sync.Locker, s State) State { } } -func (s *lockedState) GetMinimumHeight() (uint64, error) { +func (s *lockedState) GetMinimumHeight(ctx context.Context) (uint64, error) { + s.lock.Lock() + defer s.lock.Unlock() + + return s.s.GetMinimumHeight(ctx) +} + +func (s *lockedState) GetCurrentHeight(ctx context.Context) (uint64, error) { s.lock.Lock() defer s.lock.Unlock() - return s.s.GetMinimumHeight() + return s.s.GetCurrentHeight(ctx) } -func (s *lockedState) GetCurrentHeight() (uint64, error) { +func (s *lockedState) GetSubnetID(ctx context.Context, chainID ids.ID) (ids.ID, error) { s.lock.Lock() defer s.lock.Unlock() - return s.s.GetCurrentHeight() + return s.s.GetSubnetID(ctx, chainID) } -func (s *lockedState) GetValidatorSet(height uint64, subnetID ids.ID) (map[ids.NodeID]uint64, error) { +func (s *lockedState) GetValidatorSet( + ctx context.Context, + height uint64, + subnetID ids.ID, +) (map[ids.NodeID]*GetValidatorOutput, error) { s.lock.Lock() defer s.lock.Unlock() - return s.s.GetValidatorSet(height, subnetID) + return s.s.GetValidatorSet(ctx, height, subnetID) } type noValidators struct { @@ -69,6 +88,6 @@ func NewNoValidatorsState(state State) State { } } -func (*noValidators) GetValidatorSet(uint64, ids.ID) (map[ids.NodeID]uint64, error) { +func (*noValidators) GetValidatorSet(context.Context, uint64, ids.ID) (map[ids.NodeID]*GetValidatorOutput, error) { return nil, nil } diff --git a/avalanchego/snow/validators/subnet_connector.go b/avalanchego/snow/validators/subnet_connector.go new file mode 100644 index 00000000..6b4a24bd --- /dev/null +++ b/avalanchego/snow/validators/subnet_connector.go @@ -0,0 +1,16 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package validators + +import ( + "context" + + "github.com/ava-labs/avalanchego/ids" +) + +// SubnetConnector represents a handler that is called when a connection is +// marked as connected to a subnet +type SubnetConnector interface { + ConnectedSubnet(ctx context.Context, nodeID ids.NodeID, subnetID ids.ID) error +} diff --git a/avalanchego/snow/validators/test_state.go b/avalanchego/snow/validators/test_state.go index d070f10c..6be85dcb 100644 --- a/avalanchego/snow/validators/test_state.go +++ b/avalanchego/snow/validators/test_state.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators import ( + "context" "errors" "testing" @@ -13,26 +14,29 @@ import ( var ( errMinimumHeight = errors.New("unexpectedly called GetMinimumHeight") errCurrentHeight = errors.New("unexpectedly called GetCurrentHeight") + errSubnetID = errors.New("unexpectedly called GetSubnetID") errGetValidatorSet = errors.New("unexpectedly called GetValidatorSet") ) -var _ State = &TestState{} +var _ State = (*TestState)(nil) type TestState struct { T *testing.T CantGetMinimumHeight, CantGetCurrentHeight, + CantGetSubnetID, CantGetValidatorSet bool - GetMinimumHeightF func() (uint64, error) - GetCurrentHeightF func() (uint64, error) - GetValidatorSetF func(height uint64, subnetID ids.ID) (map[ids.NodeID]uint64, error) + GetMinimumHeightF func(ctx context.Context) (uint64, error) + GetCurrentHeightF func(ctx context.Context) (uint64, error) + GetSubnetIDF func(ctx context.Context, chainID ids.ID) (ids.ID, error) + GetValidatorSetF func(ctx context.Context, height uint64, subnetID ids.ID) (map[ids.NodeID]*GetValidatorOutput, error) } -func (vm *TestState) GetMinimumHeight() (uint64, error) { +func (vm *TestState) GetMinimumHeight(ctx context.Context) (uint64, error) { if vm.GetMinimumHeightF != nil { - return vm.GetMinimumHeightF() + return vm.GetMinimumHeightF(ctx) } if vm.CantGetMinimumHeight && vm.T != nil { vm.T.Fatal(errMinimumHeight) @@ -40,9 +44,9 @@ func (vm *TestState) GetMinimumHeight() (uint64, error) { return 0, errMinimumHeight } -func (vm *TestState) GetCurrentHeight() (uint64, error) { +func (vm *TestState) GetCurrentHeight(ctx context.Context) (uint64, error) { if vm.GetCurrentHeightF != nil { - return vm.GetCurrentHeightF() + return vm.GetCurrentHeightF(ctx) } if vm.CantGetCurrentHeight && vm.T != nil { vm.T.Fatal(errCurrentHeight) @@ -50,9 +54,23 @@ func (vm *TestState) GetCurrentHeight() (uint64, error) { return 0, errCurrentHeight } -func (vm *TestState) GetValidatorSet(height uint64, subnetID ids.ID) (map[ids.NodeID]uint64, error) { +func (vm *TestState) GetSubnetID(ctx context.Context, chainID ids.ID) (ids.ID, error) { + if vm.GetSubnetIDF != nil { + return vm.GetSubnetIDF(ctx, chainID) + } + if vm.CantGetSubnetID && vm.T != nil { + vm.T.Fatal(errSubnetID) + } + return ids.Empty, errSubnetID +} + +func (vm *TestState) GetValidatorSet( + ctx context.Context, + height uint64, + subnetID ids.ID, +) (map[ids.NodeID]*GetValidatorOutput, error) { if vm.GetValidatorSetF != nil { - return vm.GetValidatorSetF(height, subnetID) + return vm.GetValidatorSetF(ctx, height, subnetID) } if vm.CantGetValidatorSet && vm.T != nil { vm.T.Fatal(errGetValidatorSet) diff --git a/avalanchego/snow/validators/traced_state.go b/avalanchego/snow/validators/traced_state.go new file mode 100644 index 00000000..e1f54720 --- /dev/null +++ b/avalanchego/snow/validators/traced_state.go @@ -0,0 +1,75 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package validators + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel/attribute" + + oteltrace "go.opentelemetry.io/otel/trace" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/trace" +) + +var _ State = (*tracedState)(nil) + +type tracedState struct { + s State + getMinimumHeightTag string + getCurrentHeightTag string + getSubnetIDTag string + getValidatorSetTag string + tracer trace.Tracer +} + +func Trace(s State, name string, tracer trace.Tracer) State { + return &tracedState{ + s: s, + getMinimumHeightTag: fmt.Sprintf("%s.GetMinimumHeight", name), + getCurrentHeightTag: fmt.Sprintf("%s.GetCurrentHeight", name), + getSubnetIDTag: fmt.Sprintf("%s.GetSubnetID", name), + getValidatorSetTag: fmt.Sprintf("%s.GetValidatorSet", name), + tracer: tracer, + } +} + +func (s *tracedState) GetMinimumHeight(ctx context.Context) (uint64, error) { + ctx, span := s.tracer.Start(ctx, s.getMinimumHeightTag) + defer span.End() + + return s.s.GetMinimumHeight(ctx) +} + +func (s *tracedState) GetCurrentHeight(ctx context.Context) (uint64, error) { + ctx, span := s.tracer.Start(ctx, s.getCurrentHeightTag) + defer span.End() + + return s.s.GetCurrentHeight(ctx) +} + +func (s *tracedState) GetSubnetID(ctx context.Context, chainID ids.ID) (ids.ID, error) { + ctx, span := s.tracer.Start(ctx, s.getValidatorSetTag, oteltrace.WithAttributes( + attribute.Stringer("chainID", chainID), + )) + defer span.End() + + return s.s.GetSubnetID(ctx, chainID) +} + +func (s *tracedState) GetValidatorSet( + ctx context.Context, + height uint64, + subnetID ids.ID, +) (map[ids.NodeID]*GetValidatorOutput, error) { + ctx, span := s.tracer.Start(ctx, s.getValidatorSetTag, oteltrace.WithAttributes( + attribute.Int64("height", int64(height)), + attribute.Stringer("subnetID", subnetID), + )) + defer span.End() + + return s.s.GetValidatorSet(ctx, height, subnetID) +} diff --git a/avalanchego/snow/validators/unhandled_subnet_connector.go b/avalanchego/snow/validators/unhandled_subnet_connector.go new file mode 100644 index 00000000..de7225aa --- /dev/null +++ b/avalanchego/snow/validators/unhandled_subnet_connector.go @@ -0,0 +1,23 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package validators + +import ( + "context" + "fmt" + + "github.com/ava-labs/avalanchego/ids" +) + +var UnhandledSubnetConnector SubnetConnector = &unhandledSubnetConnector{} + +type unhandledSubnetConnector struct{} + +func (unhandledSubnetConnector) ConnectedSubnet(_ context.Context, nodeID ids.NodeID, subnetID ids.ID) error { + return fmt.Errorf( + "unhandled ConnectedSubnet with nodeID=%q and subnetID=%q", + nodeID, + subnetID, + ) +} diff --git a/avalanchego/snow/validators/validator.go b/avalanchego/snow/validators/validator.go index bfee1ad9..56664ddc 100644 --- a/avalanchego/snow/validators/validator.go +++ b/avalanchego/snow/validators/validator.go @@ -1,70 +1,31 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package validators import ( - "math" - "github.com/ava-labs/avalanchego/ids" - - safemath "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/crypto/bls" ) -var _ Validator = &validator{} - -// Validator is the minimal description of someone that can be sampled. -type Validator interface { - // ID returns the node ID of this validator - ID() ids.NodeID - - // Weight that can be used for weighted sampling. If this validator is - // validating the primary network, returns the amount of AVAX staked. - Weight() uint64 -} - -// validator is a struct that contains the base values required by the validator -// interface. -type validator struct { - nodeID ids.NodeID - weight uint64 -} - -func (v *validator) ID() ids.NodeID { return v.nodeID } -func (v *validator) Weight() uint64 { return v.weight } - -func (v *validator) addWeight(weight uint64) { - newTotalWeight, err := safemath.Add64(weight, v.weight) - if err != nil { - newTotalWeight = math.MaxUint64 - } - v.weight = newTotalWeight -} - -func (v *validator) removeWeight(weight uint64) { - newTotalWeight, err := safemath.Sub64(v.weight, weight) - if err != nil { - newTotalWeight = 0 - } - v.weight = newTotalWeight -} - -// NewValidator returns a validator object that implements the Validator -// interface -func NewValidator( - nodeID ids.NodeID, - weight uint64, -) Validator { - return &validator{ - nodeID: nodeID, - weight: weight, - } +// Validator is a struct that contains the base values representing a validator +// of the Avalanche Network. +type Validator struct { + NodeID ids.NodeID + PublicKey *bls.PublicKey + TxID ids.ID + Weight uint64 + + // index is used to efficiently remove validators from the validator set. It + // represents the index of this validator in the vdrSlice and weights + // arrays. + index int } -// GenerateRandomValidator creates a random validator with the provided weight -func GenerateRandomValidator(weight uint64) Validator { - return NewValidator( - ids.GenerateTestNodeID(), - weight, - ) +// GetValidatorOutput is a struct that contains the publicly relevant values of +// a validator of the Avalanche Network for the output of GetValidator. +type GetValidatorOutput struct { + NodeID ids.NodeID + PublicKey *bls.PublicKey + Weight uint64 } diff --git a/avalanchego/staking/tls.go b/avalanchego/staking/tls.go index 6612d544..6a6a4640 100644 --- a/avalanchego/staking/tls.go +++ b/avalanchego/staking/tls.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package staking @@ -18,6 +18,7 @@ import ( "time" "github.com/ava-labs/avalanchego/utils/perms" + "github.com/ava-labs/avalanchego/utils/set" ) var errDuplicateExtension = errors.New("duplicate certificate extension") @@ -153,13 +154,13 @@ func NewCertAndKeyBytes() ([]byte, []byte, error) { } func VerifyCertificate(cert *x509.Certificate) error { - extensionSet := make(map[string]struct{}, len(cert.Extensions)) + extensionSet := set.NewSet[string](len(cert.Extensions)) for _, extension := range cert.Extensions { idStr := extension.Id.String() - if _, ok := extensionSet[idStr]; ok { + if extensionSet.Contains(idStr) { return errDuplicateExtension } - extensionSet[idStr] = struct{}{} + extensionSet.Add(idStr) } return nil } diff --git a/avalanchego/staking/tls_test.go b/avalanchego/staking/tls_test.go index 8e49ccf8..bcab2c98 100644 --- a/avalanchego/staking/tls_test.go +++ b/avalanchego/staking/tls_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package staking diff --git a/avalanchego/subnets/config.go b/avalanchego/subnets/config.go new file mode 100644 index 00000000..cdec292c --- /dev/null +++ b/avalanchego/subnets/config.go @@ -0,0 +1,60 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package subnets + +import ( + "errors" + "fmt" + "time" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/consensus/avalanche" + "github.com/ava-labs/avalanchego/utils/set" +) + +var errAllowedNodesWhenNotValidatorOnly = errors.New("allowedNodes can only be set when ValidatorOnly is true") + +type GossipConfig struct { + AcceptedFrontierValidatorSize uint `json:"gossipAcceptedFrontierValidatorSize" yaml:"gossipAcceptedFrontierValidatorSize"` + AcceptedFrontierNonValidatorSize uint `json:"gossipAcceptedFrontierNonValidatorSize" yaml:"gossipAcceptedFrontierNonValidatorSize"` + AcceptedFrontierPeerSize uint `json:"gossipAcceptedFrontierPeerSize" yaml:"gossipAcceptedFrontierPeerSize"` + OnAcceptValidatorSize uint `json:"gossipOnAcceptValidatorSize" yaml:"gossipOnAcceptValidatorSize"` + OnAcceptNonValidatorSize uint `json:"gossipOnAcceptNonValidatorSize" yaml:"gossipOnAcceptNonValidatorSize"` + OnAcceptPeerSize uint `json:"gossipOnAcceptPeerSize" yaml:"gossipOnAcceptPeerSize"` + AppGossipValidatorSize uint `json:"appGossipValidatorSize" yaml:"appGossipValidatorSize"` + AppGossipNonValidatorSize uint `json:"appGossipNonValidatorSize" yaml:"appGossipNonValidatorSize"` + AppGossipPeerSize uint `json:"appGossipPeerSize" yaml:"appGossipPeerSize"` +} + +type Config struct { + GossipConfig + + // ValidatorOnly indicates that this Subnet's Chains are available to only subnet validators. + // No chain related messages will go out to non-validators. + // Validators will drop messages received from non-validators. + // Also see [AllowedNodes] to allow non-validators to connect to this Subnet. + ValidatorOnly bool `json:"validatorOnly" yaml:"validatorOnly"` + // AllowedNodes is the set of node IDs that are explicitly allowed to connect to this Subnet when + // ValidatorOnly is enabled. + AllowedNodes set.Set[ids.NodeID] `json:"allowedNodes" yaml:"allowedNodes"` + ConsensusParameters avalanche.Parameters `json:"consensusParameters" yaml:"consensusParameters"` + + // ProposerMinBlockDelay is the minimum delay this node will enforce when + // building a snowman++ block. + // TODO: Remove this flag once all VMs throttle their own block production. + ProposerMinBlockDelay time.Duration `json:"proposerMinBlockDelay" yaml:"proposerMinBlockDelay"` + + // See comment on [MinPercentConnectedStakeHealthy] in platformvm.Config + MinPercentConnectedStakeHealthy float64 `json:"minPercentConnectedStakeHealthy" yaml:"minPercentConnectedStakeHealthy"` +} + +func (c *Config) Valid() error { + if err := c.ConsensusParameters.Valid(); err != nil { + return fmt.Errorf("consensus parameters are invalid: %w", err) + } + if !c.ValidatorOnly && c.AllowedNodes.Len() > 0 { + return errAllowedNodesWhenNotValidatorOnly + } + return nil +} diff --git a/avalanchego/subnets/config_test.go b/avalanchego/subnets/config_test.go new file mode 100644 index 00000000..f745a4f4 --- /dev/null +++ b/avalanchego/subnets/config_test.go @@ -0,0 +1,77 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package subnets + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/consensus/avalanche" + "github.com/ava-labs/avalanchego/snow/consensus/snowball" + "github.com/ava-labs/avalanchego/utils/set" +) + +var validParameters = avalanche.Parameters{ + Parents: 2, + BatchSize: 1, + Parameters: snowball.Parameters{ + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + }, +} + +func TestValid(t *testing.T) { + tests := []struct { + name string + s Config + err string + }{ + { + name: "invalid consensus parameters", + s: Config{ + ConsensusParameters: avalanche.Parameters{ + Parameters: snowball.Parameters{ + K: 2, + Alpha: 1, + }, + }, + }, + err: "consensus parameters are invalid", + }, + { + name: "invalid allowed node IDs", + s: Config{ + AllowedNodes: set.Set[ids.NodeID]{ids.GenerateTestNodeID(): struct{}{}}, + ValidatorOnly: false, + ConsensusParameters: validParameters, + }, + err: errAllowedNodesWhenNotValidatorOnly.Error(), + }, + { + name: "valid", + s: Config{ + ConsensusParameters: validParameters, + ValidatorOnly: false, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.s.Valid() + if tt.err != "" { + require.ErrorContains(t, err, tt.err) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/avalanchego/subnets/no_op_allower.go b/avalanchego/subnets/no_op_allower.go new file mode 100644 index 00000000..9d2d51ea --- /dev/null +++ b/avalanchego/subnets/no_op_allower.go @@ -0,0 +1,15 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package subnets + +import "github.com/ava-labs/avalanchego/ids" + +// NoOpAllower is an Allower that always returns true +var NoOpAllower Allower = noOpAllower{} + +type noOpAllower struct{} + +func (noOpAllower) IsAllowed(ids.NodeID, bool) bool { + return true +} diff --git a/avalanchego/subnets/subnet.go b/avalanchego/subnets/subnet.go new file mode 100644 index 00000000..31bc9dcb --- /dev/null +++ b/avalanchego/subnets/subnet.go @@ -0,0 +1,105 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package subnets + +import ( + "sync" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/set" +) + +var _ Subnet = (*subnet)(nil) + +type Allower interface { + // IsAllowed filters out nodes that are not allowed to connect to this subnet + IsAllowed(nodeID ids.NodeID, isValidator bool) bool +} + +// Subnet keeps track of the currently bootstrapping chains in a subnet. If no +// chains in the subnet are currently bootstrapping, the subnet is considered +// bootstrapped. +type Subnet interface { + common.BootstrapTracker + + // AddChain adds a chain to this Subnet + AddChain(chainID ids.ID) bool + + // Config returns config of this Subnet + Config() Config + + Allower +} + +type subnet struct { + lock sync.RWMutex + bootstrapping set.Set[ids.ID] + bootstrapped set.Set[ids.ID] + once sync.Once + bootstrappedSema chan struct{} + config Config + myNodeID ids.NodeID +} + +func New(myNodeID ids.NodeID, config Config) Subnet { + return &subnet{ + bootstrappedSema: make(chan struct{}), + config: config, + myNodeID: myNodeID, + } +} + +func (s *subnet) IsBootstrapped() bool { + s.lock.RLock() + defer s.lock.RUnlock() + + return s.bootstrapping.Len() == 0 +} + +func (s *subnet) Bootstrapped(chainID ids.ID) { + s.lock.Lock() + defer s.lock.Unlock() + + s.bootstrapping.Remove(chainID) + s.bootstrapped.Add(chainID) + if s.bootstrapping.Len() > 0 { + return + } + + s.once.Do(func() { + close(s.bootstrappedSema) + }) +} + +func (s *subnet) OnBootstrapCompleted() chan struct{} { + return s.bootstrappedSema +} + +func (s *subnet) AddChain(chainID ids.ID) bool { + s.lock.Lock() + defer s.lock.Unlock() + + if s.bootstrapping.Contains(chainID) || s.bootstrapped.Contains(chainID) { + return false + } + + s.bootstrapping.Add(chainID) + return true +} + +func (s *subnet) Config() Config { + return s.config +} + +func (s *subnet) IsAllowed(nodeID ids.NodeID, isValidator bool) bool { + // Case 1: NodeID is this node + // Case 2: This subnet is not validator-only subnet + // Case 3: NodeID is a validator for this chain + // Case 4: NodeID is explicitly allowed whether it's subnet validator or not + return nodeID == s.myNodeID || + !s.config.ValidatorOnly || + isValidator || + s.config.AllowedNodes.Contains(nodeID) +} diff --git a/avalanchego/subnets/subnet_test.go b/avalanchego/subnets/subnet_test.go new file mode 100644 index 00000000..3ae42e6f --- /dev/null +++ b/avalanchego/subnets/subnet_test.go @@ -0,0 +1,72 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package subnets + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" +) + +func TestSubnet(t *testing.T) { + require := require.New(t) + + myNodeID := ids.GenerateTestNodeID() + chainID0 := ids.GenerateTestID() + chainID1 := ids.GenerateTestID() + chainID2 := ids.GenerateTestID() + + s := New(myNodeID, Config{}) + s.AddChain(chainID0) + require.False(s.IsBootstrapped(), "A subnet with one chain in bootstrapping shouldn't be considered bootstrapped") + + s.Bootstrapped(chainID0) + require.True(s.IsBootstrapped(), "A subnet with only bootstrapped chains should be considered bootstrapped") + + s.AddChain(chainID1) + require.False(s.IsBootstrapped(), "A subnet with one chain in bootstrapping shouldn't be considered bootstrapped") + + s.AddChain(chainID2) + require.False(s.IsBootstrapped(), "A subnet with one chain in bootstrapping shouldn't be considered bootstrapped") + + s.Bootstrapped(chainID1) + require.False(s.IsBootstrapped(), "A subnet with one chain in bootstrapping shouldn't be considered bootstrapped") + + s.Bootstrapped(chainID2) + require.True(s.IsBootstrapped(), "A subnet with only bootstrapped chains should be considered bootstrapped") +} + +func TestIsAllowed(t *testing.T) { + require := require.New(t) + + myNodeID := ids.GenerateTestNodeID() + // Test with no rules + s := New(myNodeID, Config{}) + require.True(s.IsAllowed(ids.GenerateTestNodeID(), true), "Validator should be allowed with no rules") + require.True(s.IsAllowed(ids.GenerateTestNodeID(), false), "Non-validator should be allowed with no rules") + + // Test with validator only rules + s = New(myNodeID, Config{ + ValidatorOnly: true, + }) + require.True(s.IsAllowed(ids.GenerateTestNodeID(), true), "Validator should be allowed with validator only rules") + require.True(s.IsAllowed(myNodeID, false), "Self node should be allowed with validator only rules") + require.False(s.IsAllowed(ids.GenerateTestNodeID(), false), "Non-validator should not be allowed with validator only rules") + + // Test with validator only rules and allowed nodes + allowedNodeID := ids.GenerateTestNodeID() + s = New(myNodeID, Config{ + ValidatorOnly: true, + AllowedNodes: set.Set[ids.NodeID]{ + allowedNodeID: struct{}{}, + }, + }) + require.True(s.IsAllowed(allowedNodeID, true), "Validator should be allowed with validator only rules and allowed nodes") + require.True(s.IsAllowed(myNodeID, false), "Self node should be allowed with validator only rules") + require.False(s.IsAllowed(ids.GenerateTestNodeID(), false), "Non-validator should not be allowed with validator only rules and allowed nodes") + require.True(s.IsAllowed(allowedNodeID, true), "Non-validator allowed node should be allowed with validator only rules and allowed nodes") +} diff --git a/avalanchego/tests/colors.go b/avalanchego/tests/colors.go index 8e0fc5d6..4c50eb3a 100644 --- a/avalanchego/tests/colors.go +++ b/avalanchego/tests/colors.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tests @@ -11,13 +11,13 @@ import ( // Outputs to stdout. // -// e.g., -// Out("{{green}}{{bold}}hi there %q{{/}}", "aa") -// Out("{{magenta}}{{bold}}hi therea{{/}} {{cyan}}{{underline}}b{{/}}") +// Examples: // -// ref. -// https://github.com/onsi/ginkgo/blob/v2.0.0/formatter/formatter.go#L52-L73 +// - Out("{{green}}{{bold}}hi there %q{{/}}", "aa") +// - Out("{{magenta}}{{bold}}hi therea{{/}} {{cyan}}{{underline}}b{{/}}") // +// See https://github.com/onsi/ginkgo/blob/v2.0.0/formatter/formatter.go#L52-L73 +// for an exhaustive list of color options. func Outf(format string, args ...interface{}) { s := formatter.F(format, args...) fmt.Fprint(formatter.ColorableStdOut, s) diff --git a/avalanchego/tests/e2e/banff/suites.go b/avalanchego/tests/e2e/banff/suites.go index c6c6e544..ee07f50c 100644 --- a/avalanchego/tests/e2e/banff/suites.go +++ b/avalanchego/tests/e2e/banff/suites.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // Implements tests for the banff network upgrade. diff --git a/avalanchego/tests/e2e/describe.go b/avalanchego/tests/e2e/describe.go index 8e9bf861..b3a849a0 100644 --- a/avalanchego/tests/e2e/describe.go +++ b/avalanchego/tests/e2e/describe.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package e2e diff --git a/avalanchego/tests/e2e/e2e.go b/avalanchego/tests/e2e/e2e.go index 3328475a..d9f3425d 100644 --- a/avalanchego/tests/e2e/e2e.go +++ b/avalanchego/tests/e2e/e2e.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // e2e implements the e2e tests. @@ -6,6 +6,7 @@ package e2e import ( "context" + "errors" "fmt" "os" "strings" @@ -18,7 +19,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -40,11 +41,18 @@ const ( ) // Env is the global struct containing all we need to test -var Env = &TestEnvinronment{ - testEnvironmentConfig: &testEnvironmentConfig{ - clusterType: Unknown, - }, -} +var ( + Env = &TestEnvironment{ + testEnvironmentConfig: &testEnvironmentConfig{ + clusterType: Unknown, + }, + } + + errGRPCAndURIsSpecified = errors.New("either network-runner-grpc-endpoint or uris should be specified, not both") + errNoKeyFile = errors.New("test keys file not provided") + errUnknownClusterType = errors.New("unhandled cluster type") + errNotNetworkRunnerCLI = errors.New("not network-runner cli") +) type testEnvironmentConfig struct { clusterType ClusterType @@ -59,7 +67,7 @@ type testEnvironmentConfig struct { snapshotName string } -type TestEnvinronment struct { +type TestEnvironment struct { *testEnvironmentConfig runnerMu sync.RWMutex @@ -70,7 +78,7 @@ type TestEnvinronment struct { uris []string testKeysMu sync.RWMutex - testKeys []*crypto.PrivateKeySECP256K1R + testKeys []*secp256k1.PrivateKey snapMu sync.RWMutex snapped bool @@ -79,7 +87,7 @@ type TestEnvinronment struct { // should be called only once // must be called before StartCluster // Note that either networkRunnerGRPCEp or uris must be specified -func (te *TestEnvinronment) ConfigCluster( +func (te *TestEnvironment) ConfigCluster( logLevel string, networkRunnerGRPCEp string, avalancheGoExecPath string, @@ -127,14 +135,14 @@ func (te *TestEnvinronment) ConfigCluster( return nil default: - return fmt.Errorf("either network-runner-grpc-endpoint or uris should be specified, not both") + return errGRPCAndURIsSpecified } } -func (te *TestEnvinronment) LoadKeys() error { +func (te *TestEnvironment) LoadKeys() error { // load test keys if len(te.testKeysFile) == 0 { - return fmt.Errorf("test keys file not provided") + return errNoKeyFile } testKeys, err := tests.LoadHexTestKeys(te.testKeysFile) if err != nil { @@ -144,7 +152,7 @@ func (te *TestEnvinronment) LoadKeys() error { return nil } -func (te *TestEnvinronment) StartCluster() error { +func (te *TestEnvironment) StartCluster() error { switch te.clusterType { case StandAlone: tests.Outf("{{magenta}}starting network-runner with %q{{/}}\n", te.avalancheGoExecPath) @@ -175,11 +183,11 @@ func (te *TestEnvinronment) StartCluster() error { return nil // nothing to do, really default: - return fmt.Errorf("unhandled cluster type") + return errUnknownClusterType } } -func (te *TestEnvinronment) refreshURIs() error { +func (te *TestEnvironment) refreshURIs() error { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) uriSlice, err := te.GetRunnerClient().URIs(ctx) cancel() @@ -191,7 +199,7 @@ func (te *TestEnvinronment) refreshURIs() error { return nil } -func (te *TestEnvinronment) setRunnerClient(logLevel string, gRPCEp string) error { +func (te *TestEnvironment) setRunnerClient(logLevel string, gRPCEp string) error { te.runnerMu.Lock() defer te.runnerMu.Unlock() @@ -211,47 +219,47 @@ func (te *TestEnvinronment) setRunnerClient(logLevel string, gRPCEp string) erro return err } -func (te *TestEnvinronment) GetRunnerClient() (cli runner_sdk.Client) { +func (te *TestEnvironment) GetRunnerClient() (cli runner_sdk.Client) { te.runnerMu.RLock() cli = te.runnerCli te.runnerMu.RUnlock() return cli } -func (te *TestEnvinronment) closeRunnerClient() (err error) { +func (te *TestEnvironment) closeRunnerClient() (err error) { te.runnerMu.Lock() err = te.runnerCli.Close() te.runnerMu.Unlock() return err } -func (te *TestEnvinronment) GetRunnerGRPCEndpoint() (ep string) { +func (te *TestEnvironment) GetRunnerGRPCEndpoint() (ep string) { te.runnerMu.RLock() ep = te.runnerGRPCEp te.runnerMu.RUnlock() return ep } -func (te *TestEnvinronment) setURIs(us []string) { +func (te *TestEnvironment) setURIs(us []string) { te.urisMu.Lock() te.uris = us te.urisMu.Unlock() } -func (te *TestEnvinronment) GetURIs() []string { +func (te *TestEnvironment) GetURIs() []string { te.urisMu.RLock() us := te.uris te.urisMu.RUnlock() return us } -func (te *TestEnvinronment) setTestKeys(ks []*crypto.PrivateKeySECP256K1R) { +func (te *TestEnvironment) setTestKeys(ks []*secp256k1.PrivateKey) { te.testKeysMu.Lock() te.testKeys = ks te.testKeysMu.Unlock() } -func (te *TestEnvinronment) GetTestKeys() ([]*crypto.PrivateKeySECP256K1R, []ids.ShortID, *secp256k1fx.Keychain) { +func (te *TestEnvironment) GetTestKeys() ([]*secp256k1.PrivateKey, []ids.ShortID, *secp256k1fx.Keychain) { te.testKeysMu.RLock() testKeys := te.testKeys te.testKeysMu.RUnlock() @@ -263,7 +271,7 @@ func (te *TestEnvinronment) GetTestKeys() ([]*crypto.PrivateKeySECP256K1R, []ids return testKeys, testKeyAddrs, keyChain } -func (te *TestEnvinronment) ShutdownCluster() error { +func (te *TestEnvironment) ShutdownCluster() error { if te.GetRunnerGRPCEndpoint() == "" { // we connected directly to existing cluster // nothing to shutdown @@ -272,7 +280,7 @@ func (te *TestEnvinronment) ShutdownCluster() error { runnerCli := te.GetRunnerClient() if runnerCli == nil { - return fmt.Errorf("not network-runner cli") + return errNotNetworkRunnerCLI } tests.Outf("{{red}}shutting down network-runner cluster{{/}}\n") @@ -287,7 +295,7 @@ func (te *TestEnvinronment) ShutdownCluster() error { return te.closeRunnerClient() } -func (te *TestEnvinronment) SnapInitialState() error { +func (te *TestEnvironment) SnapInitialState() error { te.snapMu.RLock() defer te.snapMu.RUnlock() @@ -305,7 +313,7 @@ func (te *TestEnvinronment) SnapInitialState() error { return nil } -func (te *TestEnvinronment) RestoreInitialState(switchOffNetworkFirst bool) error { +func (te *TestEnvironment) RestoreInitialState(switchOffNetworkFirst bool) error { te.snapMu.Lock() defer te.snapMu.Unlock() diff --git a/avalanchego/tests/e2e/e2e_test.go b/avalanchego/tests/e2e/e2e_test.go index 00f501d5..2530c2cc 100644 --- a/avalanchego/tests/e2e/e2e_test.go +++ b/avalanchego/tests/e2e/e2e_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package e2e_test diff --git a/avalanchego/tests/e2e/p/permissionless_subnets.go b/avalanchego/tests/e2e/p/permissionless_subnets.go index c928e8d7..bcd6ac64 100644 --- a/avalanchego/tests/e2e/p/permissionless_subnets.go +++ b/avalanchego/tests/e2e/p/permissionless_subnets.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p @@ -26,7 +26,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/status" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" @@ -195,8 +195,8 @@ var _ = e2e.DescribePChain("[Permissionless Subnets]", func() { ginkgo.By("add permissionless validator", func() { ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) addSubnetValidatorTxID, err := pWallet.IssueAddPermissionlessValidatorTx( - &validator.SubnetValidator{ - Validator: validator.Validator{ + &txs.SubnetValidator{ + Validator: txs.Validator{ NodeID: genesis.LocalConfig.InitialStakers[0].NodeID, Start: uint64(validatorStartTime.Unix()), End: uint64(validatorStartTime.Add(5 * time.Second).Unix()), @@ -224,8 +224,8 @@ var _ = e2e.DescribePChain("[Permissionless Subnets]", func() { ginkgo.By("add permissionless delegator", func() { ctx, cancel := context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) addSubnetDelegatorTxID, err := pWallet.IssueAddPermissionlessDelegatorTx( - &validator.SubnetValidator{ - Validator: validator.Validator{ + &txs.SubnetValidator{ + Validator: txs.Validator{ NodeID: genesis.LocalConfig.InitialStakers[0].NodeID, Start: uint64(delegatorStartTime.Unix()), End: uint64(delegatorStartTime.Add(5 * time.Second).Unix()), diff --git a/avalanchego/tests/e2e/p/workflow.go b/avalanchego/tests/e2e/p/workflow.go index f4b40691..957124a4 100644 --- a/avalanchego/tests/e2e/p/workflow.go +++ b/avalanchego/tests/e2e/p/workflow.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p @@ -6,7 +6,6 @@ package p import ( "context" "errors" - "fmt" "time" ginkgo "github.com/onsi/ginkgo/v2" @@ -24,7 +23,7 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm" "github.com/ava-labs/avalanchego/vms/platformvm/status" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" @@ -98,7 +97,7 @@ var _ = e2e.DescribePChain("[Workflow]", func() { validatorStartTimeDiff := 30 * time.Second vdrStartTime := time.Now().Add(validatorStartTimeDiff) - vdr := &validator.Validator{ + vdr := &txs.Validator{ NodeID: ids.GenerateTestNodeID(), Start: uint64(vdrStartTime.Unix()), End: uint64(vdrStartTime.Add(72 * time.Hour).Unix()), @@ -210,7 +209,7 @@ var _ = e2e.DescribePChain("[Workflow]", func() { common.WithContext(ctx), ) cancel() - gomega.Expect(err).Should(gomega.BeNil(), fmt.Errorf("error timeout: %v", errors.Is(err, context.DeadlineExceeded))) + gomega.Expect(err).Should(gomega.BeNil(), "is context.DeadlineExceeded: %v", errors.Is(err, context.DeadlineExceeded)) ctx, cancel = context.WithTimeout(context.Background(), e2e.DefaultConfirmTxTimeout) txStatus, err := xChainClient.GetTxStatus(ctx, importTxID) diff --git a/avalanchego/tests/e2e/ping/suites.go b/avalanchego/tests/e2e/ping/suites.go index 5f9d6256..74d51d2a 100644 --- a/avalanchego/tests/e2e/ping/suites.go +++ b/avalanchego/tests/e2e/ping/suites.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // Implements ping tests, requires network-runner cluster. diff --git a/avalanchego/tests/e2e/static-handlers/suites.go b/avalanchego/tests/e2e/static-handlers/suites.go index 61d8c9c1..18b6c367 100644 --- a/avalanchego/tests/e2e/static-handlers/suites.go +++ b/avalanchego/tests/e2e/static-handlers/suites.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // Implements static handlers tests for avm and platformvm @@ -12,7 +12,7 @@ import ( "github.com/ava-labs/avalanchego/tests/e2e" "github.com/ava-labs/avalanchego/utils/cb58" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/json" @@ -122,8 +122,8 @@ var _ = ginkgo.Describe("[StaticHandlers]", func() { }) ginkgo.It("can make calls to platformvm static api", func() { - keys := []*crypto.PrivateKeySECP256K1R{} - factory := crypto.FactorySECP256K1R{} + keys := []*secp256k1.PrivateKey{} + factory := secp256k1.Factory{} for _, key := range []string{ "24jUJ9vZexUM6expyMcT48LBx27k1m7xpraoV62oSQAHdziao5", "2MMvUMsxx6zsHSNXJdFD8yc5XkancvwyKPwpw4xUK3TCGDuNBY", @@ -135,7 +135,7 @@ var _ = ginkgo.Describe("[StaticHandlers]", func() { gomega.Expect(err).Should(gomega.BeNil()) pk, err := factory.ToPrivateKey(privKeyBytes) gomega.Expect(err).Should(gomega.BeNil()) - keys = append(keys, pk.(*crypto.PrivateKeySECP256K1R)) + keys = append(keys, pk) } genesisUTXOs := make([]api.UTXO, len(keys)) diff --git a/avalanchego/tests/e2e/x/transfer/virtuous.go b/avalanchego/tests/e2e/x/transfer/virtuous.go index 462d677f..f162276a 100644 --- a/avalanchego/tests/e2e/x/transfer/virtuous.go +++ b/avalanchego/tests/e2e/x/transfer/virtuous.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // Implements X-chain transfer tests. @@ -14,6 +14,7 @@ import ( "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/tests" "github.com/ava-labs/avalanchego/tests/e2e" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/avm" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -25,9 +26,9 @@ import ( ) const ( - metricVtxProcessing = "avalanche_X_vtx_processing" - metricVtxAccepted = "avalanche_X_vtx_accepted_count" - metricVtxRejected = "avalanche_X_vtx_rejected_count" + metricVtxProcessing = "avalanche_X_avalanche_vtx_processing" + metricVtxAccepted = "avalanche_X_avalanche_vtx_accepted_count" + metricVtxRejected = "avalanche_X_avalanche_vtx_rejected_count" ) const totalRounds = 50 @@ -59,7 +60,9 @@ var _ = e2e.DescribeXChain("[Virtuous Transfer Tx AVAX]", func() { needPermute := round > 3 if needPermute { rand.Seed(time.Now().UnixNano()) - rand.Shuffle(len(testKeys), func(i, j int) { testKeys[i], testKeys[j] = testKeys[j], testKeys[i] }) + rand.Shuffle(len(testKeys), func(i, j int) { + testKeys[i], testKeys[j] = testKeys[j], testKeys[i] + }) } keyChain := secp256k1fx.NewKeychain(testKeys...) @@ -83,7 +86,7 @@ var _ = e2e.DescribeXChain("[Virtuous Transfer Tx AVAX]", func() { wallets[i] = primary.NewWalletWithOptions( baseWallet, - common.WithCustomAddresses(ids.ShortSet{ + common.WithCustomAddresses(set.Set[ids.ShortID]{ testKeys[i].PublicKey().Address(): struct{}{}, }), ) diff --git a/avalanchego/tests/e2e/x/whitelist-vtx/suites.go b/avalanchego/tests/e2e/x/whitelist-vtx/suites.go index 72c24380..75015762 100644 --- a/avalanchego/tests/e2e/x/whitelist-vtx/suites.go +++ b/avalanchego/tests/e2e/x/whitelist-vtx/suites.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // Implements X-Chain whitelist vtx (stop vertex) tests. @@ -12,6 +12,7 @@ import ( "github.com/ava-labs/avalanchego/tests" "github.com/ava-labs/avalanchego/tests/e2e" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/avm" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -23,13 +24,13 @@ import ( ) const ( - metricVtxIssueSuccess = "avalanche_X_whitelist_vtx_issue_success" - metricVtxIssueFailure = "avalanche_X_whitelist_vtx_issue_failure" - metricTxProcessing = "avalanche_X_whitelist_tx_processing" - metricTxAccepted = "avalanche_X_whitelist_tx_accepted_count" - metricTxRejected = "avalanche_X_whitelist_tx_rejected_count" - metricTxPollsAccepted = "avalanche_X_whitelist_tx_polls_accepted_count" - metricTxPollsRejected = "avalanche_X_whitelist_tx_polls_rejected_count" + metricVtxIssueSuccess = "avalanche_X_avalanche_whitelist_vtx_issue_success" + metricVtxIssueFailure = "avalanche_X_avalanche_whitelist_vtx_issue_failure" + metricTxProcessing = "avalanche_X_avalanche_whitelist_tx_processing" + metricTxAccepted = "avalanche_X_avalanche_whitelist_tx_accepted_count" + metricTxRejected = "avalanche_X_avalanche_whitelist_tx_rejected_count" + metricTxPollsAccepted = "avalanche_X_avalanche_whitelist_tx_polls_accepted_count" + metricTxPollsRejected = "avalanche_X_avalanche_whitelist_tx_polls_rejected_count" ) var _ = e2e.DescribeXChain("[WhitelistTx]", func() { @@ -66,7 +67,7 @@ var _ = e2e.DescribeXChain("[WhitelistTx]", func() { for i := range wallets { wallets[i] = primary.NewWalletWithOptions( baseWallet, - common.WithCustomAddresses(ids.ShortSet{ + common.WithCustomAddresses(set.Set[ids.ShortID]{ testKeys[i].PublicKey().Address(): struct{}{}, }), ) @@ -223,8 +224,8 @@ var _ = e2e.DescribeXChain("[WhitelistTx]", func() { // +0 since no node should ever successfully issue another whitelist vtx gomega.Expect(mm[metricVtxIssueSuccess]).Should(gomega.Equal(prev[metricVtxIssueSuccess])) - // +1 since the local node engine failed the conflicting whitelist vtx issue request - gomega.Expect(mm[metricVtxIssueFailure]).Should(gomega.Equal(prev[metricVtxIssueFailure] + 1)) + // +0 since the local node engine should have dropped the conflicting whitelist vtx issue request + gomega.Expect(mm[metricVtxIssueFailure]).Should(gomega.Equal(prev[metricVtxIssueFailure])) // +0 since the local node snowstorm successfully issued the whitelist tx "before", and no whitelist tx is being processed gomega.Expect(mm[metricTxProcessing]).Should(gomega.Equal(prev[metricTxProcessing])) @@ -241,7 +242,7 @@ var _ = e2e.DescribeXChain("[WhitelistTx]", func() { } }) - ginkgo.By("issue regular, virtuous X-Chain tx, after whitelist vtx, should fail", func() { + ginkgo.By("issue regular, virtuous X-Chain tx, after whitelist vtx, should pass", func() { balances, err := wallets[0].X().Builder().GetFTBalance() gomega.Expect(err).Should(gomega.BeNil()) @@ -269,16 +270,7 @@ var _ = e2e.DescribeXChain("[WhitelistTx]", func() { common.WithContext(ctx), ) cancel() - gomega.Expect(err.Error()).Should(gomega.ContainSubstring(context.DeadlineExceeded.Error())) - - ep := uris[0] + "/ext/metrics" - mm, err := tests.GetMetricsValue(ep, allMetrics...) gomega.Expect(err).Should(gomega.BeNil()) - - // regular, virtuous transaction should not change whitelist vtx metrics - prev := curMetrics[uris[0]] - gomega.Expect(mm).Should(gomega.Equal(prev)) - curMetrics[uris[0]] = mm }) }) }) diff --git a/avalanchego/tests/http.go b/avalanchego/tests/http.go index 4af08fa8..b4cb3204 100644 --- a/avalanchego/tests/http.go +++ b/avalanchego/tests/http.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tests import ( "bufio" + "context" "fmt" "io" "net/http" @@ -47,10 +48,16 @@ func GetMetricsValue(url string, metrics ...string) (map[string]float64, error) } func getHTTPLines(url string) ([]string, error) { - resp, err := http.Get(url) + req, err := http.NewRequestWithContext(context.TODO(), "GET", url, nil) if err != nil { return nil, err } + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + rd := bufio.NewReader(resp.Body) lines := []string{} for { @@ -59,10 +66,10 @@ func getHTTPLines(url string) ([]string, error) { if err == io.EOF { break } + _ = resp.Body.Close() return nil, err } lines = append(lines, strings.TrimSpace(line)) } - resp.Body.Close() - return lines, nil + return lines, resp.Body.Close() } diff --git a/avalanchego/tests/keys.go b/avalanchego/tests/keys.go index 390238b9..9b5945bb 100644 --- a/avalanchego/tests/keys.go +++ b/avalanchego/tests/keys.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tests @@ -6,15 +6,14 @@ package tests import ( "bufio" "encoding/hex" - "fmt" "os" "strings" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" ) // Loads a list of secp256k1 hex-encoded private keys from the file, new-line separated. -func LoadHexTestKeys(filePath string) (keys []*crypto.PrivateKeySECP256K1R, err error) { +func LoadHexTestKeys(filePath string) (keys []*secp256k1.PrivateKey, err error) { f, err := os.Open(filePath) if err != nil { return nil, err @@ -37,22 +36,13 @@ func LoadHexTestKeys(filePath string) (keys []*crypto.PrivateKeySECP256K1R, err return keys, nil } -var keyFactory = new(crypto.FactorySECP256K1R) +var keyFactory = new(secp256k1.Factory) -func decodeHexPrivateKey(enc string) (*crypto.PrivateKeySECP256K1R, error) { - rawPk := strings.Replace(enc, crypto.PrivateKeyPrefix, "", 1) - var skBytes []byte +func decodeHexPrivateKey(enc string) (*secp256k1.PrivateKey, error) { + rawPk := strings.Replace(enc, secp256k1.PrivateKeyPrefix, "", 1) skBytes, err := hex.DecodeString(rawPk) if err != nil { return nil, err } - rpk, err := keyFactory.ToPrivateKey(skBytes) - if err != nil { - return nil, err - } - privKey, ok := rpk.(*crypto.PrivateKeySECP256K1R) - if !ok { - return nil, fmt.Errorf("invalid type %T", rpk) - } - return privKey, nil + return keyFactory.ToPrivateKey(skBytes) } diff --git a/avalanchego/tests/keys_test.go b/avalanchego/tests/keys_test.go index 3ad59ef3..a3a31b47 100644 --- a/avalanchego/tests/keys_test.go +++ b/avalanchego/tests/keys_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tests @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" ) func TestLoadTestKeys(t *testing.T) { @@ -20,6 +20,6 @@ func TestLoadTestKeys(t *testing.T) { } } -func encodeShortAddr(pk *crypto.PrivateKeySECP256K1R) string { +func encodeShortAddr(pk *secp256k1.PrivateKey) string { return pk.PublicKey().Address().String() } diff --git a/avalanchego/tests/upgrade/upgrade_test.go b/avalanchego/tests/upgrade/upgrade_test.go index 1af66894..2a14a4b1 100644 --- a/avalanchego/tests/upgrade/upgrade_test.go +++ b/avalanchego/tests/upgrade/upgrade_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // Runs upgrade tests. diff --git a/avalanchego/trace/exporter.go b/avalanchego/trace/exporter.go new file mode 100644 index 00000000..25220097 --- /dev/null +++ b/avalanchego/trace/exporter.go @@ -0,0 +1,62 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package trace + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + + sdktrace "go.opentelemetry.io/otel/sdk/trace" +) + +const tracerProviderExportCreationTimeout = 5 * time.Second + +type ExporterConfig struct { + Type ExporterType `json:"type"` + + // Endpoint to send metrics to + Endpoint string `json:"endpoint"` + + // Headers to send with metrics + Headers map[string]string `json:"headers"` + + // If true, don't use TLS + Insecure bool `json:"insecure"` +} + +func newExporter(config ExporterConfig) (sdktrace.SpanExporter, error) { + var client otlptrace.Client + switch config.Type { + case GRPC: + opts := []otlptracegrpc.Option{ + otlptracegrpc.WithEndpoint(config.Endpoint), + otlptracegrpc.WithHeaders(config.Headers), + otlptracegrpc.WithTimeout(tracerExportTimeout), + } + if config.Insecure { + opts = append(opts, otlptracegrpc.WithInsecure()) + } + client = otlptracegrpc.NewClient(opts...) + case HTTP: + opts := []otlptracehttp.Option{ + otlptracehttp.WithEndpoint(config.Endpoint), + otlptracehttp.WithHeaders(config.Headers), + otlptracehttp.WithTimeout(tracerExportTimeout), + } + if config.Insecure { + opts = append(opts, otlptracehttp.WithInsecure()) + } + client = otlptracehttp.NewClient(opts...) + default: + return nil, errUnknownExporterType + } + + ctx, cancel := context.WithTimeout(context.Background(), tracerProviderExportCreationTimeout) + defer cancel() + return otlptrace.New(ctx, client) +} diff --git a/avalanchego/trace/exporter_type.go b/avalanchego/trace/exporter_type.go new file mode 100644 index 00000000..52d0124f --- /dev/null +++ b/avalanchego/trace/exporter_type.go @@ -0,0 +1,45 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package trace + +import ( + "errors" + "fmt" + "strings" +) + +const ( + GRPC ExporterType = iota + 1 + HTTP +) + +var errUnknownExporterType = errors.New("unknown exporter type") + +func ExporterTypeFromString(exporterTypeStr string) (ExporterType, error) { + switch strings.ToLower(exporterTypeStr) { + case GRPC.String(): + return GRPC, nil + case HTTP.String(): + return HTTP, nil + default: + return 0, fmt.Errorf("%w: %q", errUnknownExporterType, exporterTypeStr) + } +} + +type ExporterType byte + +func (t ExporterType) MarshalJSON() ([]byte, error) { + return []byte(`"` + t.String() + `"`), nil +} + +func (t ExporterType) String() string { + switch t { + case GRPC: + return "grpc" + case HTTP: + return "http" + default: + return "unknown" + } +} diff --git a/avalanchego/trace/noop.go b/avalanchego/trace/noop.go new file mode 100644 index 00000000..789a249b --- /dev/null +++ b/avalanchego/trace/noop.go @@ -0,0 +1,25 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package trace + +import ( + "context" + + "go.opentelemetry.io/otel/trace" +) + +var _ Tracer = (*noOpTracer)(nil) + +// noOpTracer is an implementation of trace.Tracer that does nothing. +type noOpTracer struct { + t trace.Tracer +} + +func (n noOpTracer) Start(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + return n.t.Start(ctx, spanName, opts...) +} + +func (noOpTracer) Close() error { + return nil +} diff --git a/avalanchego/trace/tracer.go b/avalanchego/trace/tracer.go new file mode 100644 index 00000000..6def495a --- /dev/null +++ b/avalanchego/trace/tracer.go @@ -0,0 +1,84 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package trace + +import ( + "context" + "io" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/trace" + + sdktrace "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.4.0" + + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/version" +) + +const ( + tracerExportTimeout = 10 * time.Second + // [tracerProviderShutdownTimeout] is longer than [tracerExportTimeout] so + // in-flight exports can finish before the tracer provider shuts down. + tracerProviderShutdownTimeout = 15 * time.Second +) + +type Config struct { + ExporterConfig `json:"exporterConfig"` + + // Used to flag if tracing should be performed + Enabled bool `json:"enabled"` + + // The fraction of traces to sample. + // If >= 1 always samples. + // If <= 0 never samples. + TraceSampleRate float64 `json:"traceSampleRate"` +} + +type Tracer interface { + trace.Tracer + io.Closer +} + +type tracer struct { + trace.Tracer + + tp *sdktrace.TracerProvider +} + +func (t *tracer) Close() error { + ctx, cancel := context.WithTimeout(context.Background(), tracerProviderShutdownTimeout) + defer cancel() + return t.tp.Shutdown(ctx) +} + +func New(config Config) (Tracer, error) { + if !config.Enabled { + return &noOpTracer{ + t: trace.NewNoopTracerProvider().Tracer(constants.AppName), + }, nil + } + + exporter, err := newExporter(config.ExporterConfig) + if err != nil { + return nil, err + } + + tracerProviderOpts := []sdktrace.TracerProviderOption{ + sdktrace.WithBatcher(exporter, sdktrace.WithExportTimeout(tracerExportTimeout)), + sdktrace.WithResource(resource.NewWithAttributes(semconv.SchemaURL, + attribute.Stringer("version", version.Current), + semconv.ServiceNameKey.String(constants.AppName), + )), + sdktrace.WithSampler(sdktrace.TraceIDRatioBased(config.TraceSampleRate)), + } + + tracerProvider := sdktrace.NewTracerProvider(tracerProviderOpts...) + return &tracer{ + Tracer: tracerProvider.Tracer(constants.AppName), + tp: tracerProvider, + }, nil +} diff --git a/avalanchego/utils/atomic.go b/avalanchego/utils/atomic.go new file mode 100644 index 00000000..d72dd4d4 --- /dev/null +++ b/avalanchego/utils/atomic.go @@ -0,0 +1,27 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package utils + +import ( + "sync" +) + +type Atomic[T any] struct { + lock sync.RWMutex + value T +} + +func (a *Atomic[T]) Get() T { + a.lock.RLock() + defer a.lock.RUnlock() + + return a.value +} + +func (a *Atomic[T]) Set(value T) { + a.lock.Lock() + defer a.lock.Unlock() + + a.value = value +} diff --git a/avalanchego/utils/atomic_bool.go b/avalanchego/utils/atomic_bool.go deleted file mode 100644 index c01008ac..00000000 --- a/avalanchego/utils/atomic_bool.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package utils - -import "sync/atomic" - -type AtomicBool struct { - value uint32 -} - -func (a *AtomicBool) GetValue() bool { - return atomic.LoadUint32(&a.value) != 0 -} - -func (a *AtomicBool) SetValue(b bool) { - var value uint32 - if b { - value = 1 - } - atomic.StoreUint32(&a.value, value) -} diff --git a/avalanchego/utils/atomic_interface.go b/avalanchego/utils/atomic_interface.go deleted file mode 100644 index d3c239aa..00000000 --- a/avalanchego/utils/atomic_interface.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package utils - -import ( - "sync" -) - -type AtomicInterface struct { - value interface{} - lock sync.RWMutex -} - -func NewAtomicInterface(v interface{}) *AtomicInterface { - mutexInterface := AtomicInterface{} - mutexInterface.SetValue(v) - return &mutexInterface -} - -func (a *AtomicInterface) GetValue() interface{} { - a.lock.RLock() - defer a.lock.RUnlock() - return a.value -} - -func (a *AtomicInterface) SetValue(v interface{}) { - a.lock.Lock() - defer a.lock.Unlock() - a.value = v -} diff --git a/avalanchego/utils/atomic_interface_test.go b/avalanchego/utils/atomic_interface_test.go deleted file mode 100644 index 2897e4dd..00000000 --- a/avalanchego/utils/atomic_interface_test.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package utils - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestAtomicInterface(t *testing.T) { - iface := NewAtomicInterface(nil) - require.Nil(t, iface.GetValue()) - iface.SetValue(nil) - require.Nil(t, iface.GetValue()) - val, ok := iface.GetValue().([]byte) - require.False(t, ok) - require.Nil(t, val) - iface.SetValue([]byte("test")) - require.Equal(t, []byte("test"), iface.GetValue().([]byte)) -} diff --git a/avalanchego/utils/atomic_test.go b/avalanchego/utils/atomic_test.go new file mode 100644 index 00000000..1af2ba49 --- /dev/null +++ b/avalanchego/utils/atomic_test.go @@ -0,0 +1,26 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package utils + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAtomic(t *testing.T) { + require := require.New(t) + + var a Atomic[bool] + require.Zero(a.Get()) + + a.Set(false) + require.False(a.Get()) + + a.Set(true) + require.True(a.Get()) + + a.Set(false) + require.False(a.Get()) +} diff --git a/avalanchego/utils/bag/bag.go b/avalanchego/utils/bag/bag.go new file mode 100644 index 00000000..cf889fa5 --- /dev/null +++ b/avalanchego/utils/bag/bag.go @@ -0,0 +1,170 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bag + +import ( + "fmt" + "strings" + + "golang.org/x/exp/maps" + + "github.com/ava-labs/avalanchego/utils/set" +) + +const minBagSize = 16 + +// Bag is a multiset. +type Bag[T comparable] struct { + counts map[T]int + size int + + threshold int + metThreshold set.Set[T] +} + +func (b *Bag[T]) init() { + if b.counts == nil { + b.counts = make(map[T]int, minBagSize) + } +} + +// SetThreshold sets the number of times an element must be added to be contained in +// the threshold set. +func (b *Bag[_]) SetThreshold(threshold int) { + if b.threshold == threshold { + return + } + + b.threshold = threshold + b.metThreshold.Clear() + for vote, count := range b.counts { + if count >= threshold { + b.metThreshold.Add(vote) + } + } +} + +// Add increases the number of times each element has been seen by one. +func (b *Bag[T]) Add(elts ...T) { + for _, elt := range elts { + b.AddCount(elt, 1) + } +} + +// AddCount increases the number of times the element has been seen by [count]. +// If [count] <= 0 this is a no-op. +func (b *Bag[T]) AddCount(elt T, count int) { + if count <= 0 { + return + } + + b.init() + + totalCount := b.counts[elt] + count + b.counts[elt] = totalCount + b.size += count + + if totalCount >= b.threshold { + b.metThreshold.Add(elt) + } +} + +// Count returns the number of [elt] in the bag. +func (b *Bag[T]) Count(elt T) int { + return b.counts[elt] +} + +// Len returns the number of elements in the bag. +func (b *Bag[_]) Len() int { + return b.size +} + +// List returns a list of unique elements that have been added. +// The returned list doesn't have duplicates. +func (b *Bag[T]) List() []T { + return maps.Keys(b.counts) +} + +// Equals returns true if the bags contain the same elements +func (b *Bag[T]) Equals(other Bag[T]) bool { + return b.size == other.size && maps.Equal(b.counts, other.counts) +} + +// Mode returns the most common element in the bag and the count of that element. +// If there's a tie, any of the tied element may be returned. +// TODO for Stephen: Does the above violate an assumption made by Snowball? +// If the bag is empty, the zero value and 0 are returned. +func (b *Bag[T]) Mode() (T, int) { + var ( + mode T + modeFreq int + ) + for elt, count := range b.counts { + if count > modeFreq { + mode = elt + modeFreq = count + } + } + + return mode, modeFreq +} + +// Threshold returns the elements that have been seen at least threshold times. +func (b *Bag[T]) Threshold() set.Set[T] { + return b.metThreshold +} + +// Returns a bag with the elements of this bag that return true for [filterFunc], +// along with their counts. +// For example, if X is in this bag with count 5, and filterFunc(X) returns true, +// then the returned bag contains X with count 5. +func (b *Bag[T]) Filter(filterFunc func(T) bool) Bag[T] { + newBag := Bag[T]{} + for vote, count := range b.counts { + if filterFunc(vote) { + newBag.AddCount(vote, count) + } + } + return newBag +} + +// Returns: +// 1. A bag containing the elements of this bag that return false for [splitFunc]. +// 2. A bag containing the elements of this bag that return true for [splitFunc]. +// Counts are preserved in the returned bags. +// For example, if X is in this bag with count 5, and splitFunc(X) is false, +// then the first returned bag has X in it with count 5. +func (b *Bag[T]) Split(splitFunc func(T) bool) [2]Bag[T] { + splitVotes := [2]Bag[T]{} + for vote, count := range b.counts { + if splitFunc(vote) { + splitVotes[1].AddCount(vote, count) + } else { + splitVotes[0].AddCount(vote, count) + } + } + return splitVotes +} + +// Remove all instances of [elt] from the bag. +func (b *Bag[T]) Remove(elt T) { + count := b.counts[elt] + delete(b.counts, elt) + b.size -= count +} + +func (b *Bag[_]) PrefixedString(prefix string) string { + sb := strings.Builder{} + + sb.WriteString(fmt.Sprintf("Bag: (Size = %d)", b.Len())) + for elt, count := range b.counts { + sb.WriteString(fmt.Sprintf("\n%s %v: %d", prefix, elt, count)) + } + + return sb.String() +} + +func (b *Bag[_]) String() string { + return b.PrefixedString("") +} diff --git a/avalanchego/ids/bag_benchmark_test.go b/avalanchego/utils/bag/bag_benchmark_test.go similarity index 57% rename from avalanchego/ids/bag_benchmark_test.go rename to avalanchego/utils/bag/bag_benchmark_test.go index 30e85120..833ce755 100644 --- a/avalanchego/ids/bag_benchmark_test.go +++ b/avalanchego/utils/bag/bag_benchmark_test.go @@ -1,22 +1,22 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package ids +package bag import ( - "crypto/rand" + "math/rand" "testing" ) +func init() { + rand.Seed(1337) // for determinism +} + func BenchmarkBagListSmall(b *testing.B) { smallLen := 5 - bag := Bag{} + bag := Bag[int]{} for i := 0; i < smallLen; i++ { - var id ID - if _, err := rand.Read(id[:]); err != nil { - b.Fatal(err) - } - bag.Add(id) + bag.Add(rand.Int()) // #nosec G404 } b.ResetTimer() for n := 0; n < b.N; n++ { @@ -26,13 +26,9 @@ func BenchmarkBagListSmall(b *testing.B) { func BenchmarkBagListMedium(b *testing.B) { mediumLen := 25 - bag := Bag{} + bag := Bag[int]{} for i := 0; i < mediumLen; i++ { - var id ID - if _, err := rand.Read(id[:]); err != nil { - b.Fatal(err) - } - bag.Add(id) + bag.Add(rand.Int()) // #nosec G404 } b.ResetTimer() @@ -43,13 +39,9 @@ func BenchmarkBagListMedium(b *testing.B) { func BenchmarkBagListLarge(b *testing.B) { largeLen := 100000 - bag := Bag{} + bag := Bag[int]{} for i := 0; i < largeLen; i++ { - var id ID - if _, err := rand.Read(id[:]); err != nil { - b.Fatal(err) - } - bag.Add(id) + bag.Add(rand.Int()) // #nosec G404 } b.ResetTimer() for n := 0; n < b.N; n++ { diff --git a/avalanchego/utils/bag/bag_test.go b/avalanchego/utils/bag/bag_test.go new file mode 100644 index 00000000..6a9dece6 --- /dev/null +++ b/avalanchego/utils/bag/bag_test.go @@ -0,0 +1,240 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bag + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestBagAdd(t *testing.T) { + require := require.New(t) + + elt0 := 0 + elt1 := 1 + + bag := Bag[int]{} + + require.Equal(0, bag.Count(elt0)) + require.Equal(0, bag.Count(elt1)) + require.Equal(0, bag.Len()) + require.Len(bag.List(), 0) + mode, freq := bag.Mode() + require.Equal(elt0, mode) + require.Equal(0, freq) + require.Len(bag.Threshold(), 0) + + bag.Add(elt0) + + require.Equal(1, bag.Count(elt0)) + require.Equal(0, bag.Count(elt1)) + require.Equal(1, bag.Len()) + require.Len(bag.List(), 1) + mode, freq = bag.Mode() + require.Equal(elt0, mode) + require.Equal(1, freq) + require.Len(bag.Threshold(), 1) + + bag.Add(elt0) + + require.Equal(2, bag.Count(elt0)) + require.Equal(0, bag.Count(elt1)) + require.Equal(2, bag.Len()) + require.Len(bag.List(), 1) + mode, freq = bag.Mode() + require.Equal(elt0, mode) + require.Equal(2, freq) + require.Len(bag.Threshold(), 1) + + bag.AddCount(elt1, 3) + + require.Equal(2, bag.Count(elt0)) + require.Equal(3, bag.Count(elt1)) + require.Equal(5, bag.Len()) + require.Len(bag.List(), 2) + mode, freq = bag.Mode() + require.Equal(elt1, mode) + require.Equal(3, freq) + require.Len(bag.Threshold(), 2) +} + +func TestBagSetThreshold(t *testing.T) { + require := require.New(t) + + elt0 := 0 + elt1 := 1 + + bag := Bag[int]{} + + bag.AddCount(elt0, 2) + bag.AddCount(elt1, 3) + + bag.SetThreshold(0) + + require.Equal(2, bag.Count(elt0)) + require.Equal(3, bag.Count(elt1)) + require.Equal(5, bag.Len()) + require.Len(bag.List(), 2) + mode, freq := bag.Mode() + require.Equal(elt1, mode) + require.Equal(3, freq) + require.Len(bag.Threshold(), 2) + + bag.SetThreshold(3) + + require.Equal(2, bag.Count(elt0)) + require.Equal(3, bag.Count(elt1)) + require.Equal(5, bag.Len()) + require.Len(bag.List(), 2) + mode, freq = bag.Mode() + require.Equal(elt1, mode) + require.Equal(3, freq) + require.Len(bag.Threshold(), 1) +} + +func TestBagFilter(t *testing.T) { + require := require.New(t) + + elt0 := 0 + elt1 := 1 + elt2 := 2 + + bag := Bag[int]{} + + bag.AddCount(elt0, 1) + bag.AddCount(elt1, 3) + bag.AddCount(elt2, 5) + + filterFunc := func(elt int) bool { + return elt%2 == 0 + } + even := bag.Filter(filterFunc) + + require.Equal(1, even.Count(elt0)) + require.Equal(0, even.Count(elt1)) + require.Equal(5, even.Count(elt2)) +} + +func TestBagSplit(t *testing.T) { + require := require.New(t) + + elt0 := 0 + elt1 := 1 + elt2 := 2 + + bag := Bag[int]{} + + bag.AddCount(elt0, 1) + bag.AddCount(elt1, 3) + bag.AddCount(elt2, 5) + + bags := bag.Split(func(i int) bool { + return i%2 != 0 + }) + + evens := bags[0] + odds := bags[1] + + require.Equal(1, evens.Count(elt0)) + require.Equal(0, evens.Count(elt1)) + require.Equal(5, evens.Count(elt2)) + require.Equal(0, odds.Count(elt0)) + require.Equal(3, odds.Count(elt1)) + require.Equal(0, odds.Count(elt2)) +} + +func TestBagString(t *testing.T) { + elt0 := 123 + + bag := Bag[int]{} + + bag.AddCount(elt0, 1337) + + expected := "Bag: (Size = 1337)\n" + + " 123: 1337" + + require.Equal(t, expected, bag.String()) +} + +func TestBagRemove(t *testing.T) { + require := require.New(t) + + elt0 := 0 + elt1 := 1 + elt2 := 2 + + bag := Bag[int]{} + + bag.Remove(elt0) + require.Equal(0, bag.Len()) + + bag.AddCount(elt0, 3) + bag.AddCount(elt1, 2) + bag.Add(elt2) + require.Equal(6, bag.Len()) + require.Len(bag.counts, 3) + mode, freq := bag.Mode() + require.Equal(elt0, mode) + require.Equal(3, freq) + + bag.Remove(elt0) + + require.Equal(0, bag.Count(elt0)) + require.Equal(2, bag.Count(elt1)) + require.Equal(1, bag.Count(elt2)) + require.Equal(3, bag.Len()) + require.Len(bag.counts, 2) + mode, freq = bag.Mode() + require.Equal(elt1, mode) + require.Equal(2, freq) + + bag.Remove(elt1) + require.Equal(0, bag.Count(elt0)) + require.Equal(0, bag.Count(elt1)) + require.Equal(1, bag.Count(elt2)) + require.Equal(1, bag.Len()) + require.Len(bag.counts, 1) + mode, freq = bag.Mode() + require.Equal(elt2, mode) + require.Equal(1, freq) +} + +func TestBagEquals(t *testing.T) { + require := require.New(t) + + bag1 := Bag[int]{} + bag2 := Bag[int]{} + + // Case: both empty + require.True(bag1.Equals(bag2)) + require.True(bag2.Equals(bag1)) + + // Case: one empty, one not + bag1.Add(0) + require.False(bag1.Equals(bag2)) + require.False(bag2.Equals(bag1)) + + bag2.Add(0) + require.True(bag1.Equals(bag2)) + require.True(bag2.Equals(bag1)) + + // Case: both non-empty, different elements + bag1.Add(1) + require.False(bag1.Equals(bag2)) + require.False(bag2.Equals(bag1)) + + bag2.Add(1) + require.True(bag1.Equals(bag2)) + require.True(bag2.Equals(bag1)) + + // Case: both non-empty, different counts + bag1.Add(0) + require.False(bag1.Equals(bag2)) + require.False(bag2.Equals(bag1)) + + bag2.Add(0) + require.True(bag1.Equals(bag2)) + require.True(bag2.Equals(bag1)) +} diff --git a/avalanchego/utils/bag/unique_bag.go b/avalanchego/utils/bag/unique_bag.go new file mode 100644 index 00000000..debad45b --- /dev/null +++ b/avalanchego/utils/bag/unique_bag.go @@ -0,0 +1,114 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bag + +import ( + "fmt" + "strings" + + "golang.org/x/exp/maps" + + "github.com/ava-labs/avalanchego/utils/set" +) + +// Maps a key to a bitset. +type UniqueBag[T comparable] map[T]set.Bits64 + +func (b *UniqueBag[T]) init() { + if *b == nil { + *b = make(map[T]set.Bits64, minBagSize) + } +} + +// Adds [n] to the bitset associated with each key in [keys]. +func (b *UniqueBag[T]) Add(n uint, keys ...T) { + var bs set.Bits64 + bs.Add(n) + + for _, key := range keys { + b.UnionSet(key, bs) + } +} + +// Unions [set] with the bitset associated with [key]. +func (b *UniqueBag[T]) UnionSet(key T, set set.Bits64) { + b.init() + + previousSet := (*b)[key] + previousSet.Union(set) + (*b)[key] = previousSet +} + +// Removes each element of [set] from the bitset associated with [key]. +func (b *UniqueBag[T]) DifferenceSet(key T, set set.Bits64) { + b.init() + + previousSet := (*b)[key] + previousSet.Difference(set) + (*b)[key] = previousSet +} + +// For each key/bitset pair in [diff], removes each element of the bitset +// from the bitset associated with the key in [b]. +// Keys in [diff] that are not in [b] are ignored. +// Bitset elements in [diff] that are not in the bitset associated with +// the key in [b] are ignored. +func (b *UniqueBag[T]) Difference(diff *UniqueBag[T]) { + b.init() + + for key, previousSet := range *b { + if previousSetDiff, exists := (*diff)[key]; exists { + previousSet.Difference(previousSetDiff) + } + (*b)[key] = previousSet + } +} + +// Returns the bitset associated with [key]. +func (b *UniqueBag[T]) GetSet(key T) set.Bits64 { + return (*b)[key] +} + +// Removes the bitset associated with [key]. +func (b *UniqueBag[T]) RemoveSet(key T) { + delete(*b, key) +} + +// Returns the keys. +func (b *UniqueBag[T]) List() []T { + return maps.Keys(*b) +} + +// Returns a bag with the given [threshold] where each key is +// in the bag once for each element in the key's bitset. +func (b *UniqueBag[T]) Bag(threshold int) Bag[T] { + bag := Bag[T]{ + counts: make(map[T]int, len(*b)), + } + bag.SetThreshold(threshold) + for key, bs := range *b { + bag.AddCount(key, bs.Len()) + } + return bag +} + +func (b *UniqueBag[_]) PrefixedString(prefix string) string { + sb := strings.Builder{} + + sb.WriteString(fmt.Sprintf("UniqueBag: (Size = %d)", len(*b))) + for key, set := range *b { + sb.WriteString(fmt.Sprintf("\n%s %v: %s", prefix, key, set)) + } + + return sb.String() +} + +func (b *UniqueBag[_]) String() string { + return b.PrefixedString("") +} + +// Removes all key --> bitset pairs. +func (b *UniqueBag[_]) Clear() { + maps.Clear(*b) +} diff --git a/avalanchego/utils/bag/unique_bag_test.go b/avalanchego/utils/bag/unique_bag_test.go new file mode 100644 index 00000000..e1920a21 --- /dev/null +++ b/avalanchego/utils/bag/unique_bag_test.go @@ -0,0 +1,103 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bag + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils/set" +) + +func TestUniqueBag(t *testing.T) { + require := require.New(t) + + var ub1 UniqueBag[int] + ub1.init() + require.NotNil(ub1) + require.Empty(ub1) + + elt1 := 1 + elt2 := 2 + + ub2 := make(UniqueBag[int]) + ub2.Add(1, elt1, elt2) + require.True(ub2.GetSet(elt1).Contains(1)) + require.True(ub2.GetSet(elt2).Contains(1)) + + var bs1 set.Bits64 + bs1.Add(2) + bs1.Add(4) + + ub3 := make(UniqueBag[int]) + + ub3.UnionSet(elt1, bs1) + + bs1.Clear() + bs1 = ub3.GetSet(elt1) + require.Equal(2, bs1.Len()) + require.True(bs1.Contains(2)) + require.True(bs1.Contains(4)) + + // Difference test + bs1.Clear() + + ub4 := make(UniqueBag[int]) + ub4.Add(1, elt1) + ub4.Add(2, elt1) + ub4.Add(5, elt2) + ub4.Add(8, elt2) + + ub5 := make(UniqueBag[int]) + ub5.Add(5, elt2) + ub5.Add(5, elt1) + require.Len(ub5.List(), 2) + + ub4.Difference(&ub5) + + ub4elt1 := ub4.GetSet(elt1) + require.Equal(2, ub4elt1.Len()) + require.True(ub4elt1.Contains(1)) + require.True(ub4elt1.Contains(2)) + + ub4elt2 := ub4.GetSet(elt2) + require.Equal(1, ub4elt2.Len()) + require.True(ub4elt2.Contains(8)) + + // DifferenceSet test + + ub6 := make(UniqueBag[int]) + ub6.Add(1, elt1) + ub6.Add(2, elt1) + ub6.Add(7, elt1) + + diffBitSet := set.Bits64(0) + diffBitSet.Add(1) + diffBitSet.Add(7) + + ub6.DifferenceSet(elt1, diffBitSet) + + ub6elt1 := ub6.GetSet(elt1) + require.Equal(1, ub6elt1.Len()) + require.True(ub6elt1.Contains(2)) +} + +func TestUniqueBagClear(t *testing.T) { + require := require.New(t) + + b := UniqueBag[int]{} + elt1, elt2 := 0, 1 + b.Add(0, elt1) + b.Add(1, elt1, elt2) + + b.Clear() + require.Empty(b.List()) + + bs := b.GetSet(elt1) + require.Equal(0, bs.Len()) + + bs = b.GetSet(elt2) + require.Equal(0, bs.Len()) +} diff --git a/avalanchego/utils/beacon/beacon.go b/avalanchego/utils/beacon/beacon.go index ecaaa613..47e41032 100644 --- a/avalanchego/utils/beacon/beacon.go +++ b/avalanchego/utils/beacon/beacon.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package beacon @@ -8,7 +8,7 @@ import ( "github.com/ava-labs/avalanchego/utils/ips" ) -var _ Beacon = &beacon{} +var _ Beacon = (*beacon)(nil) type Beacon interface { ID() ids.NodeID @@ -27,5 +27,10 @@ func New(id ids.NodeID, ip ips.IPPort) Beacon { } } -func (b *beacon) ID() ids.NodeID { return b.id } -func (b *beacon) IP() ips.IPPort { return b.ip } +func (b *beacon) ID() ids.NodeID { + return b.id +} + +func (b *beacon) IP() ips.IPPort { + return b.ip +} diff --git a/avalanchego/utils/beacon/set.go b/avalanchego/utils/beacon/set.go index 305c2d52..243f8399 100644 --- a/avalanchego/utils/beacon/set.go +++ b/avalanchego/utils/beacon/set.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package beacon @@ -12,7 +12,7 @@ import ( ) var ( - _ Set = &set{} + _ Set = (*set)(nil) errDuplicateID = errors.New("duplicated ID") errDuplicateIP = errors.New("duplicated IP") @@ -99,7 +99,9 @@ func (s *set) RemoveByIP(ip ips.IPPort) error { return s.RemoveByID(idToRemove) } -func (s *set) Len() int { return len(s.beacons) } +func (s *set) Len() int { + return len(s.beacons) +} func (s *set) IDsArg() string { sb := strings.Builder{} diff --git a/avalanchego/utils/beacon/set_test.go b/avalanchego/utils/beacon/set_test.go index c4eab3a8..4e8ada45 100644 --- a/avalanchego/utils/beacon/set_test.go +++ b/avalanchego/utils/beacon/set_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package beacon diff --git a/avalanchego/utils/bloom/bloom_filter.go b/avalanchego/utils/bloom/bloom_filter.go index 66c4039b..498c57d3 100644 --- a/avalanchego/utils/bloom/bloom_filter.go +++ b/avalanchego/utils/bloom/bloom_filter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bloom diff --git a/avalanchego/utils/bloom/bloom_filter_test.go b/avalanchego/utils/bloom/bloom_filter_test.go index a02d18ab..7e810add 100644 --- a/avalanchego/utils/bloom/bloom_filter_test.go +++ b/avalanchego/utils/bloom/bloom_filter_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bloom diff --git a/avalanchego/utils/bloom/map_filter.go b/avalanchego/utils/bloom/map_filter.go index 98684a87..19046bea 100644 --- a/avalanchego/utils/bloom/map_filter.go +++ b/avalanchego/utils/bloom/map_filter.go @@ -1,21 +1,21 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bloom import ( "sync" + + "github.com/ava-labs/avalanchego/utils/set" ) type mapFilter struct { lock sync.RWMutex - values map[string]struct{} + values set.Set[string] } func NewMap() Filter { - return &mapFilter{ - values: make(map[string]struct{}), - } + return &mapFilter{} } func (m *mapFilter) Add(bl ...[]byte) { @@ -23,7 +23,7 @@ func (m *mapFilter) Add(bl ...[]byte) { defer m.lock.Unlock() for _, b := range bl { - m.values[string(b)] = struct{}{} + m.values.Add(string(b)) } } @@ -31,6 +31,5 @@ func (m *mapFilter) Check(b []byte) bool { m.lock.RLock() defer m.lock.RUnlock() - _, exists := m.values[string(b)] - return exists + return m.values.Contains(string(b)) } diff --git a/avalanchego/utils/buffer/bounded_nonblocking_queue.go b/avalanchego/utils/buffer/bounded_nonblocking_queue.go new file mode 100644 index 00000000..0b5d5f94 --- /dev/null +++ b/avalanchego/utils/buffer/bounded_nonblocking_queue.go @@ -0,0 +1,90 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package buffer + +import "errors" + +var ( + _ Queue[struct{}] = (*boundedQueue[struct{}])(nil) + + errInvalidMaxSize = errors.New("maxSize must be greater than 0") +) + +// A FIFO queue. +type Queue[T any] interface { + // Pushes [elt] onto the queue. + // If the queue is full, the oldest element is evicted to make space. + Push(T) + // Pops the oldest element from the queue. + // Returns false if the queue is empty. + Pop() (T, bool) + // Returns the oldest element without removing it. + // Returns false if the queue is empty. + Peek() (T, bool) + // Returns the element at the given index without removing it. + // Index(0) returns the oldest element. + // Index(Len() - 1) returns the newest element. + // Returns false if there is no element at that index. + Index(int) (T, bool) + // Returns the number of elements in the queue. + Len() int + // Returns the queue elements from oldest to newest. + // This is an O(n) operation and should be used sparingly. + List() []T +} + +// Keeps up to [maxSize] entries in an ordered buffer +// and calls [onEvict] on any item that is evicted. +// Not safe for concurrent use. +type boundedQueue[T any] struct { + deque Deque[T] + maxSize int + onEvict func(T) +} + +// Returns a new bounded, non-blocking queue that holds up to [maxSize] elements. +// When an element is evicted, [onEvict] is called with the evicted element. +// If [onEvict] is nil, this is a no-op. +// [maxSize] must be >= 1. +// Not safe for concurrent use. +func NewBoundedQueue[T any](maxSize int, onEvict func(T)) (Queue[T], error) { + if maxSize < 1 { + return nil, errInvalidMaxSize + } + return &boundedQueue[T]{ + deque: NewUnboundedDeque[T](maxSize + 1), // +1 so we never resize + maxSize: maxSize, + onEvict: onEvict, + }, nil +} + +func (b *boundedQueue[T]) Push(elt T) { + if b.deque.Len() == b.maxSize { + evicted, _ := b.deque.PopLeft() + if b.onEvict != nil { + b.onEvict(evicted) + } + } + _ = b.deque.PushRight(elt) +} + +func (b *boundedQueue[T]) Pop() (T, bool) { + return b.deque.PopLeft() +} + +func (b *boundedQueue[T]) Peek() (T, bool) { + return b.deque.PeekLeft() +} + +func (b *boundedQueue[T]) Index(i int) (T, bool) { + return b.deque.Index(i) +} + +func (b *boundedQueue[T]) Len() int { + return b.deque.Len() +} + +func (b *boundedQueue[T]) List() []T { + return b.deque.List() +} diff --git a/avalanchego/utils/buffer/bounded_nonblocking_queue_test.go b/avalanchego/utils/buffer/bounded_nonblocking_queue_test.go new file mode 100644 index 00000000..402a089b --- /dev/null +++ b/avalanchego/utils/buffer/bounded_nonblocking_queue_test.go @@ -0,0 +1,142 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package buffer + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewBoundedQueue(t *testing.T) { + require := require.New(t) + + // Case: maxSize < 1 + _, err := NewBoundedQueue[bool](0, nil) + require.Error(err) + + // Case: maxSize == 1 and nil onEvict + b, err := NewBoundedQueue[bool](1, nil) + require.NoError(err) + + // Put 2 elements to make sure we don't panic on evict + b.Push(true) + b.Push(true) +} + +func TestBoundedQueue(t *testing.T) { + require := require.New(t) + + maxSize := 3 + evicted := []int{} + onEvict := func(elt int) { + evicted = append(evicted, elt) + } + b, err := NewBoundedQueue(maxSize, onEvict) + require.NoError(err) + + require.Equal(0, b.Len()) + + // Fill the queue + for i := 0; i < maxSize; i++ { + b.Push(i) + require.Equal(i+1, b.Len()) + got, ok := b.Peek() + require.True(ok) + require.Equal(0, got) + got, ok = b.Index(i) + require.True(ok) + require.Equal(i, got) + require.Len(b.List(), i+1) + } + require.Equal([]int{}, evicted) + require.Len(b.List(), maxSize) + // Queue is [0, 1, 2] + + // Empty the queue + for i := 0; i < maxSize; i++ { + got, ok := b.Pop() + require.True(ok) + require.Equal(i, got) + require.Equal(maxSize-i-1, b.Len()) + require.Len(b.List(), maxSize-i-1) + } + + // Queue is empty + + _, ok := b.Pop() + require.False(ok) + _, ok = b.Peek() + require.False(ok) + _, ok = b.Index(0) + require.False(ok) + require.Equal(0, b.Len()) + require.Empty(b.List()) + + // Fill the queue again + for i := 0; i < maxSize; i++ { + b.Push(i) + require.Equal(i+1, b.Len()) + } + + // Queue is [0, 1, 2] + + // Putting another element should evict the oldest. + b.Push(maxSize) + + // Queue is [1, 2, 3] + + require.Equal(maxSize, b.Len()) + require.Len(b.List(), maxSize) + got, ok := b.Peek() + require.True(ok) + require.Equal(1, got) + got, ok = b.Index(0) + require.True(ok) + require.Equal(1, got) + got, ok = b.Index(maxSize - 1) + require.True(ok) + require.Equal(maxSize, got) + require.Equal([]int{0}, evicted) + + // Put 2 more elements + b.Push(maxSize + 1) + b.Push(maxSize + 2) + + // Queue is [3, 4, 5] + + require.Equal(maxSize, b.Len()) + require.Equal([]int{0, 1, 2}, evicted) + got, ok = b.Peek() + require.True(ok) + require.Equal(3, got) + require.Equal([]int{3, 4, 5}, b.List()) + + for i := maxSize; i < 2*maxSize; i++ { + got, ok := b.Index(i - maxSize) + require.True(ok) + require.Equal(i, got) + } + + // Empty the queue + for i := 0; i < maxSize; i++ { + got, ok := b.Pop() + require.True(ok) + require.Equal(i+3, got) + require.Equal(maxSize-i-1, b.Len()) + require.Len(b.List(), maxSize-i-1) + } + + // Queue is empty + + require.Empty(b.List()) + require.Equal(0, b.Len()) + require.Equal([]int{0, 1, 2}, evicted) + _, ok = b.Pop() + require.False(ok) + _, ok = b.Peek() + require.False(ok) + _, ok = b.Index(0) + require.False(ok) +} diff --git a/avalanchego/utils/buffer/unbounded_blocking_deque.go b/avalanchego/utils/buffer/unbounded_blocking_deque.go new file mode 100644 index 00000000..078d8d90 --- /dev/null +++ b/avalanchego/utils/buffer/unbounded_blocking_deque.go @@ -0,0 +1,169 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package buffer + +import ( + "sync" + + "github.com/ava-labs/avalanchego/utils" +) + +var _ BlockingDeque[int] = (*unboundedBlockingDeque[int])(nil) + +type BlockingDeque[T any] interface { + Deque[T] + + // Close and empty the deque. + Close() +} + +// Returns a new unbounded deque with the given initial size. +// Note that the returned deque is always empty -- [initSize] is just +// a hint to prevent unnecessary resizing. +func NewUnboundedBlockingDeque[T any](initSize int) BlockingDeque[T] { + q := &unboundedBlockingDeque[T]{ + Deque: NewUnboundedDeque[T](initSize), + } + q.cond = sync.NewCond(&q.lock) + return q +} + +type unboundedBlockingDeque[T any] struct { + lock sync.RWMutex + cond *sync.Cond + closed bool + + Deque[T] +} + +// If the deque is closed returns false. +func (q *unboundedBlockingDeque[T]) PushRight(elt T) bool { + q.cond.L.Lock() + defer q.cond.L.Unlock() + + if q.closed { + return false + } + + // Add the item to the queue + q.Deque.PushRight(elt) + + // Signal a waiting thread + q.cond.Signal() + return true +} + +// If the deque is closed returns false. +func (q *unboundedBlockingDeque[T]) PopRight() (T, bool) { + q.cond.L.Lock() + defer q.cond.L.Unlock() + + for { + if q.closed { + return utils.Zero[T](), false + } + if q.Deque.Len() != 0 { + return q.Deque.PopRight() + } + q.cond.Wait() + } +} + +func (q *unboundedBlockingDeque[T]) PeekRight() (T, bool) { + q.lock.RLock() + defer q.lock.RUnlock() + + if q.closed { + return utils.Zero[T](), false + } + return q.Deque.PeekRight() +} + +// If the deque is closed returns false. +func (q *unboundedBlockingDeque[T]) PushLeft(elt T) bool { + q.cond.L.Lock() + defer q.cond.L.Unlock() + + if q.closed { + return false + } + + // Add the item to the queue + q.Deque.PushLeft(elt) + + // Signal a waiting thread + q.cond.Signal() + return true +} + +// If the deque is closed returns false. +func (q *unboundedBlockingDeque[T]) PopLeft() (T, bool) { + q.cond.L.Lock() + defer q.cond.L.Unlock() + + for { + if q.closed { + return utils.Zero[T](), false + } + if q.Deque.Len() != 0 { + return q.Deque.PopLeft() + } + q.cond.Wait() + } +} + +func (q *unboundedBlockingDeque[T]) PeekLeft() (T, bool) { + q.lock.RLock() + defer q.lock.RUnlock() + + if q.closed { + return utils.Zero[T](), false + } + return q.Deque.PeekLeft() +} + +func (q *unboundedBlockingDeque[T]) Index(i int) (T, bool) { + q.lock.RLock() + defer q.lock.RUnlock() + + if q.closed { + return utils.Zero[T](), false + } + return q.Deque.Index(i) +} + +func (q *unboundedBlockingDeque[T]) Len() int { + q.lock.RLock() + defer q.lock.RUnlock() + + if q.closed { + return 0 + } + return q.Deque.Len() +} + +func (q *unboundedBlockingDeque[T]) List() []T { + q.lock.RLock() + defer q.lock.RUnlock() + + if q.closed { + return nil + } + return q.Deque.List() +} + +func (q *unboundedBlockingDeque[T]) Close() { + q.cond.L.Lock() + defer q.cond.L.Unlock() + + if q.closed { + return + } + + q.Deque = nil + + // Mark the queue as closed + q.closed = true + q.cond.Broadcast() +} diff --git a/avalanchego/utils/buffer/unbounded_blocking_deque_test.go b/avalanchego/utils/buffer/unbounded_blocking_deque_test.go new file mode 100644 index 00000000..054d3a2e --- /dev/null +++ b/avalanchego/utils/buffer/unbounded_blocking_deque_test.go @@ -0,0 +1,99 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package buffer + +import ( + "sync" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestUnboundedBlockingDequePush(t *testing.T) { + require := require.New(t) + + deque := NewUnboundedBlockingDeque[int](2) + require.Empty(deque.List()) + _, ok := deque.Index(0) + require.False(ok) + + ok = deque.PushRight(1) + require.True(ok) + require.Equal([]int{1}, deque.List()) + got, ok := deque.Index(0) + require.True(ok) + require.Equal(1, got) + + ok = deque.PushRight(2) + require.True(ok) + require.Equal([]int{1, 2}, deque.List()) + got, ok = deque.Index(0) + require.True(ok) + require.Equal(1, got) + got, ok = deque.Index(1) + require.True(ok) + require.Equal(2, got) + _, ok = deque.Index(2) + require.False(ok) + + ch, ok := deque.PopLeft() + require.True(ok) + require.Equal(1, ch) + require.Equal([]int{2}, deque.List()) + got, ok = deque.Index(0) + require.True(ok) + require.Equal(2, got) +} + +func TestUnboundedBlockingDequePop(t *testing.T) { + require := require.New(t) + + deque := NewUnboundedBlockingDeque[int](2) + require.Empty(deque.List()) + + ok := deque.PushRight(1) + require.True(ok) + require.Equal([]int{1}, deque.List()) + got, ok := deque.Index(0) + require.True(ok) + require.Equal(1, got) + + ch, ok := deque.PopLeft() + require.True(ok) + require.Equal(1, ch) + require.Empty(deque.List()) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + ch, ok := deque.PopLeft() + require.True(ok) + require.Equal(2, ch) + wg.Done() + }() + + ok = deque.PushRight(2) + require.True(ok) + wg.Wait() + require.Empty(deque.List()) + _, ok = deque.Index(0) + require.False(ok) +} + +func TestUnboundedBlockingDequeClose(t *testing.T) { + require := require.New(t) + + deque := NewUnboundedBlockingDeque[int](2) + + ok := deque.PushLeft(1) + require.True(ok) + + deque.Close() + + _, ok = deque.PopRight() + require.False(ok) + + ok = deque.PushLeft(1) + require.False(ok) +} diff --git a/avalanchego/utils/buffer/unbounded_deque.go b/avalanchego/utils/buffer/unbounded_deque.go new file mode 100644 index 00000000..336f0869 --- /dev/null +++ b/avalanchego/utils/buffer/unbounded_deque.go @@ -0,0 +1,190 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package buffer + +import "github.com/ava-labs/avalanchego/utils" + +const defaultInitSize = 32 + +// An unbounded deque (double-ended queue). +// See https://en.wikipedia.org/wiki/Double-ended_queue +// Not safe for concurrent access. +type Deque[T any] interface { + // Place an element at the leftmost end of the deque. + // Returns true if the element was placed in the deque. + PushLeft(T) bool + // Place an element at the rightmost end of the deque. + // Returns true if the element was placed in the deque. + PushRight(T) bool + // Remove and return the leftmost element of the deque. + // Returns false if the deque is empty. + PopLeft() (T, bool) + // Remove and return the rightmost element of the deque. + // Returns false if the deque is empty. + PopRight() (T, bool) + // Return the leftmost element of the deque without removing it. + // Returns false if the deque is empty. + PeekLeft() (T, bool) + // Return the rightmost element of the deque without removing it. + // Returns false if the deque is empty. + PeekRight() (T, bool) + // Returns the element at the given index. + // Returns false if the index is out of bounds. + // The leftmost element is at index 0. + Index(int) (T, bool) + // Returns the number of elements in the deque. + Len() int + // Returns the elements in the deque from left to right. + List() []T +} + +// Returns a new unbounded deque with the given initial slice size. +// Note that the returned deque is always empty -- [initSize] is just +// a hint to prevent unnecessary resizing. +func NewUnboundedDeque[T any](initSize int) Deque[T] { + if initSize < 2 { + initSize = defaultInitSize + } + return &unboundedSliceDeque[T]{ + // Note that [initSize] must be >= 2 to satisfy invariants (1) and (2). + data: make([]T, initSize), + right: 1, + } +} + +// Invariants after each function call and before the first call: +// (1) The next element pushed left will be placed at data[left] +// (2) The next element pushed right will be placed at data[right] +// (3) There are [size] elements in the deque. +type unboundedSliceDeque[T any] struct { + size, left, right int + data []T +} + +func (b *unboundedSliceDeque[T]) PushRight(elt T) bool { + // Invariant (2) says it's safe to place the element without resizing. + b.data[b.right] = elt + b.size++ + b.right++ + b.right %= len(b.data) + + b.resize() + return true +} + +func (b *unboundedSliceDeque[T]) PushLeft(elt T) bool { + // Invariant (1) says it's safe to place the element without resizing. + b.data[b.left] = elt + b.size++ + b.left-- + if b.left < 0 { + b.left = len(b.data) - 1 // Wrap around + } + + b.resize() + return true +} + +func (b *unboundedSliceDeque[T]) PopLeft() (T, bool) { + if b.size == 0 { + return utils.Zero[T](), false + } + idx := b.leftmostEltIdx() + elt := b.data[idx] + // Zero out to prevent memory leak. + b.data[idx] = utils.Zero[T]() + b.size-- + b.left++ + b.left %= len(b.data) + return elt, true +} + +func (b *unboundedSliceDeque[T]) PeekLeft() (T, bool) { + if b.size == 0 { + return utils.Zero[T](), false + } + idx := b.leftmostEltIdx() + return b.data[idx], true +} + +func (b *unboundedSliceDeque[T]) PopRight() (T, bool) { + if b.size == 0 { + return utils.Zero[T](), false + } + idx := b.rightmostEltIdx() + elt := b.data[idx] + // Zero out to prevent memory leak. + b.data[idx] = utils.Zero[T]() + b.size-- + b.right-- + if b.right < 0 { + b.right = len(b.data) - 1 // Wrap around + } + return elt, true +} + +func (b *unboundedSliceDeque[T]) PeekRight() (T, bool) { + if b.size == 0 { + return utils.Zero[T](), false + } + idx := b.rightmostEltIdx() + return b.data[idx], true +} + +func (b *unboundedSliceDeque[T]) Index(idx int) (T, bool) { + if idx < 0 || idx >= b.size { + return utils.Zero[T](), false + } + leftmostIdx := b.leftmostEltIdx() + idx = (leftmostIdx + idx) % len(b.data) + return b.data[idx], true +} + +func (b *unboundedSliceDeque[T]) Len() int { + return b.size +} + +func (b *unboundedSliceDeque[T]) List() []T { + if b.size == 0 { + return nil + } + + list := make([]T, b.size) + leftmostIdx := b.leftmostEltIdx() + if numCopied := copy(list, b.data[leftmostIdx:]); numCopied < b.size { + // We copied all of the elements from the leftmost element index + // to the end of the underlying slice, but we still haven't copied + // all of the elements, so wrap around and copy the rest. + copy(list[numCopied:], b.data[:b.right]) + } + return list +} + +func (b *unboundedSliceDeque[T]) leftmostEltIdx() int { + if b.left == len(b.data)-1 { // Wrap around case + return 0 + } + return b.left + 1 // Normal case +} + +func (b *unboundedSliceDeque[T]) rightmostEltIdx() int { + if b.right == 0 { + return len(b.data) - 1 // Wrap around case + } + return b.right - 1 // Normal case +} + +func (b *unboundedSliceDeque[T]) resize() { + if b.size != len(b.data) { + return + } + newData := make([]T, b.size*2) + leftmostIdx := b.leftmostEltIdx() + copy(newData, b.data[leftmostIdx:]) + numCopied := len(b.data) - leftmostIdx + copy(newData[numCopied:], b.data[:b.right]) + b.data = newData + b.left = len(b.data) - 1 + b.right = b.size +} diff --git a/avalanchego/utils/buffer/unbounded_deque_test.go b/avalanchego/utils/buffer/unbounded_deque_test.go new file mode 100644 index 00000000..dcbfbe1c --- /dev/null +++ b/avalanchego/utils/buffer/unbounded_deque_test.go @@ -0,0 +1,672 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package buffer + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestUnboundedDeque_InitialCapGreaterThanMin(t *testing.T) { + require := require.New(t) + + bIntf := NewUnboundedDeque[int](10) + b, ok := bIntf.(*unboundedSliceDeque[int]) + require.True(ok) + require.Empty(b.List()) + require.Equal(0, b.Len()) + _, ok = b.Index(0) + require.False(ok) + + b.PushLeft(1) + require.Equal(1, b.Len()) + require.Equal([]int{1}, b.List()) + got, ok := b.Index(0) + require.True(ok) + require.Equal(1, got) + _, ok = b.Index(1) + require.False(ok) + + got, ok = b.PopLeft() + require.Equal(0, b.Len()) + require.True(ok) + require.Equal(1, got) + _, ok = b.Index(0) + require.False(ok) + + b.PushLeft(1) + require.Equal(1, b.Len()) + require.Equal([]int{1}, b.List()) + got, ok = b.Index(0) + require.True(ok) + require.Equal(1, got) + + got, ok = b.PopRight() + require.Equal(0, b.Len()) + require.True(ok) + require.Equal(1, got) + require.Empty(b.List()) + _, ok = b.Index(0) + require.False(ok) + + b.PushRight(1) + require.Equal(1, b.Len()) + require.Equal([]int{1}, b.List()) + got, ok = b.Index(0) + require.True(ok) + require.Equal(1, got) + + got, ok = b.PopRight() + require.Equal(0, b.Len()) + require.True(ok) + require.Equal(1, got) + require.Empty(b.List()) + _, ok = b.Index(0) + require.False(ok) + + b.PushRight(1) + require.Equal(1, b.Len()) + require.Equal([]int{1}, b.List()) + got, ok = b.Index(0) + require.True(ok) + require.Equal(1, got) + + got, ok = b.PopLeft() + require.Equal(0, b.Len()) + require.True(ok) + require.Equal(1, got) + require.Empty(b.List()) + _, ok = b.Index(0) + require.False(ok) + + b.PushLeft(1) + require.Equal(1, b.Len()) + require.Equal([]int{1}, b.List()) + got, ok = b.Index(0) + require.True(ok) + require.Equal(1, got) + + b.PushLeft(2) + require.Equal(2, b.Len()) + require.Equal([]int{2, 1}, b.List()) + + got, ok = b.PopLeft() + require.Equal(1, b.Len()) + require.True(ok) + require.Equal(2, got) + require.Equal([]int{1}, b.List()) + got, ok = b.Index(0) + require.True(ok) + require.Equal(1, got) + + got, ok = b.PopLeft() + require.Equal(0, b.Len()) + require.True(ok) + require.Equal(1, got) + require.Empty(b.List()) + _, ok = b.Index(0) + require.False(ok) + + b.PushRight(1) + require.Equal(1, b.Len()) + require.Equal([]int{1}, b.List()) + got, ok = b.Index(0) + require.True(ok) + require.Equal(1, got) + + b.PushRight(2) + require.Equal(2, b.Len()) + require.Equal([]int{1, 2}, b.List()) + + got, ok = b.PopRight() + require.Equal(1, b.Len()) + require.True(ok) + require.Equal(2, got) + require.Equal([]int{1}, b.List()) + got, ok = b.Index(0) + require.True(ok) + require.Equal(1, got) + + got, ok = b.PopRight() + require.Equal(0, b.Len()) + require.True(ok) + require.Equal(1, got) + require.Empty(b.List()) + _, ok = b.Index(0) + require.False(ok) + + b.PushLeft(1) + require.Equal(1, b.Len()) + require.Equal([]int{1}, b.List()) + got, ok = b.Index(0) + require.True(ok) + require.Equal(1, got) + + b.PushLeft(2) + require.Equal(2, b.Len()) + require.Equal([]int{2, 1}, b.List()) + + got, ok = b.PopRight() + require.Equal(1, b.Len()) + require.True(ok) + require.Equal(1, got) + require.Equal([]int{2}, b.List()) + got, ok = b.Index(0) + require.True(ok) + require.Equal(2, got) + + got, ok = b.PopLeft() + require.Equal(0, b.Len()) + require.True(ok) + require.Equal(2, got) + require.Empty(b.List()) + _, ok = b.Index(0) + require.False(ok) + + b.PushRight(1) + require.Equal(1, b.Len()) + require.Equal([]int{1}, b.List()) + got, ok = b.Index(0) + require.True(ok) + require.Equal(1, got) + + b.PushLeft(2) + require.Equal(2, b.Len()) + require.Equal([]int{2, 1}, b.List()) + + got, ok = b.PopRight() + require.Equal(1, b.Len()) + require.True(ok) + require.Equal(1, got) + require.Equal([]int{2}, b.List()) + got, ok = b.Index(0) + require.True(ok) + require.Equal(2, got) + + got, ok = b.PopLeft() + require.Equal(0, b.Len()) + require.True(ok) + require.Equal(2, got) + require.Empty(b.List()) + _, ok = b.Index(0) + require.False(ok) + + b.PushLeft(1) + require.Equal(1, b.Len()) + require.Equal([]int{1}, b.List()) + got, ok = b.Index(0) + require.True(ok) + require.Equal(1, got) + + b.PushRight(2) + require.Equal(2, b.Len()) + require.Equal([]int{1, 2}, b.List()) + + got, ok = b.PopLeft() + require.Equal(1, b.Len()) + require.True(ok) + require.Equal(1, got) + require.Equal([]int{2}, b.List()) + got, ok = b.Index(0) + require.True(ok) + require.Equal(2, got) + + got, ok = b.PopRight() + require.Equal(0, b.Len()) + require.True(ok) + require.Equal(2, got) + require.Empty(b.List()) + _, ok = b.Index(0) + require.False(ok) +} + +// Cases we test: +// 1. [left] moves to the left (no wrap around). +// 2. [left] moves to the right (no wrap around). +// 3. [left] wrapping around to the left side. +// 4. [left] wrapping around to the right side. +// 5. Resize. +func TestUnboundedSliceDequePushLeftPopLeft(t *testing.T) { + require := require.New(t) + + // Starts empty. + bIntf := NewUnboundedDeque[int](2) + b, ok := bIntf.(*unboundedSliceDeque[int]) + require.True(ok) + require.Equal(0, bIntf.Len()) + require.Equal(2, len(b.data)) + require.Equal(0, b.left) + require.Equal(1, b.right) + require.Empty(b.List()) + // slice is [EMPTY] + + _, ok = b.PopLeft() + require.False(ok) + _, ok = b.PeekLeft() + require.False(ok) + _, ok = b.PeekRight() + require.False(ok) + + b.PushLeft(1) // slice is [1,EMPTY] + require.Equal(1, b.Len()) + require.Equal(2, len(b.data)) + require.Equal(1, b.left) + require.Equal(1, b.right) + require.Equal([]int{1}, b.List()) + + got, ok := b.PeekLeft() + require.True(ok) + require.Equal(1, got) + + got, ok = b.PeekRight() + require.True(ok) + require.Equal(1, got) + + // This causes a resize + b.PushLeft(2) // slice is [2,1,EMPTY,EMPTY] + require.Equal(2, b.Len()) + require.Equal(4, len(b.data)) + require.Equal(3, b.left) + require.Equal(2, b.right) + require.Equal([]int{2, 1}, b.List()) + got, ok = b.Index(0) + require.True(ok) + require.Equal(2, got) + got, ok = b.Index(1) + require.True(ok) + require.Equal(1, got) + + got, ok = b.PeekLeft() + require.True(ok) + require.Equal(2, got) + + got, ok = b.PeekRight() + require.True(ok) + require.Equal(1, got) + + // Tests left moving left with no wrap around. + b.PushLeft(3) // slice is [2,1,EMPTY,3] + require.Equal(3, b.Len()) + require.Equal(4, len(b.data)) + require.Equal(2, b.left) + require.Equal(2, b.right) + require.Equal([]int{3, 2, 1}, b.List()) + got, ok = b.Index(0) + require.True(ok) + require.Equal(3, got) + got, ok = b.Index(1) + require.True(ok) + require.Equal(2, got) + got, ok = b.Index(2) + require.True(ok) + require.Equal(1, got) + _, ok = b.Index(3) + require.False(ok) + + got, ok = b.PeekLeft() + require.True(ok) + require.Equal(3, got) + + got, ok = b.PeekRight() + require.True(ok) + require.Equal(1, got) + + // Tests left moving right with no wrap around. + got, ok = b.PopLeft() // slice is [2,1,EMPTY,EMPTY] + require.True(ok) + require.Equal(3, got) + require.Equal(2, b.Len()) + require.Equal(4, len(b.data)) + require.Equal(3, b.left) + require.Equal(2, b.right) + require.Equal([]int{2, 1}, b.List()) + got, ok = b.Index(0) + require.True(ok) + require.Equal(2, got) + got, ok = b.Index(1) + require.True(ok) + require.Equal(1, got) + + got, ok = b.PeekLeft() + require.True(ok) + require.Equal(2, got) + + got, ok = b.PeekRight() + require.True(ok) + require.Equal(1, got) + + // Tests left wrapping around to the left side. + got, ok = b.PopLeft() // slice is [EMPTY,1,EMPTY,EMPTY] + require.True(ok) + require.Equal(2, got) + require.Equal(1, b.Len()) + require.Equal(4, len(b.data)) + require.Equal(0, b.left) + require.Equal(2, b.right) + require.Equal([]int{1}, b.List()) + got, ok = b.Index(0) + require.True(ok) + require.Equal(1, got) + + got, ok = b.PeekLeft() + require.True(ok) + require.Equal(1, got) + + got, ok = b.PeekRight() + require.True(ok) + require.Equal(1, got) + + // Test left wrapping around to the right side. + b.PushLeft(2) // slice is [2,1,EMPTY,EMPTY] + require.Equal(2, b.Len()) + require.Equal(4, len(b.data)) + require.Equal(3, b.left) + require.Equal(2, b.right) + require.Equal([]int{2, 1}, b.List()) + got, ok = b.Index(0) + require.True(ok) + require.Equal(2, got) + got, ok = b.Index(1) + require.True(ok) + require.Equal(1, got) + + got, ok = b.PeekLeft() + require.True(ok) + require.Equal(2, got) + + got, ok = b.PeekRight() + require.True(ok) + require.Equal(1, got) + + got, ok = b.PopLeft() // slice is [EMPTY,1,EMPTY,EMPTY] + require.True(ok) + require.Equal(2, got) + require.Equal(1, b.Len()) + require.Equal(4, len(b.data)) + require.Equal(0, b.left) + require.Equal(2, b.right) + require.Equal([]int{1}, b.List()) + got, ok = b.Index(0) + require.True(ok) + require.Equal(1, got) + + got, ok = b.PopLeft() // slice is [EMPTY,EMPTY,EMPTY,EMPTY] + require.True(ok) + require.Equal(1, got) + require.Equal(0, b.Len()) + require.Equal(4, len(b.data)) + require.Equal(1, b.left) + require.Equal(2, b.right) + require.Empty(b.List()) + _, ok = b.Index(0) + require.False(ok) + + _, ok = b.PopLeft() + require.False(ok) + _, ok = b.PeekLeft() + require.False(ok) + _, ok = b.PeekRight() + require.False(ok) +} + +func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { + require := require.New(t) + + // Starts empty. + bIntf := NewUnboundedDeque[int](2) + b, ok := bIntf.(*unboundedSliceDeque[int]) + require.True(ok) + require.Equal(0, bIntf.Len()) + require.Equal(2, len(b.data)) + require.Equal(0, b.left) + require.Equal(1, b.right) + require.Empty(b.List()) + // slice is [EMPTY] + + _, ok = b.PopRight() + require.False(ok) + _, ok = b.PeekLeft() + require.False(ok) + _, ok = b.PeekRight() + require.False(ok) + + b.PushRight(1) // slice is [1,EMPTY] + require.Equal(1, b.Len()) + require.Equal(2, len(b.data)) + require.Equal(0, b.left) + require.Equal(0, b.right) + require.Equal([]int{1}, b.List()) + got, ok := b.Index(0) + require.True(ok) + require.Equal(1, got) + + got, ok = b.PeekLeft() + require.True(ok) + require.Equal(1, got) + + got, ok = b.PeekRight() + require.True(ok) + require.Equal(1, got) + + // This causes a resize + b.PushRight(2) // slice is [1,2,EMPTY,EMPTY] + require.Equal(2, b.Len()) + require.Equal(4, len(b.data)) + require.Equal(3, b.left) + require.Equal(2, b.right) + require.Equal([]int{1, 2}, b.List()) + got, ok = b.Index(0) + require.True(ok) + require.Equal(1, got) + got, ok = b.Index(1) + require.True(ok) + require.Equal(2, got) + + got, ok = b.PeekLeft() + require.True(ok) + require.Equal(1, got) + + got, ok = b.PeekRight() + require.True(ok) + require.Equal(2, got) + + // Tests right moving right with no wrap around + b.PushRight(3) // slice is [1,2,3,EMPTY] + require.Equal(3, b.Len()) + require.Equal(4, len(b.data)) + require.Equal(3, b.left) + require.Equal(3, b.right) + require.Equal([]int{1, 2, 3}, b.List()) + got, ok = b.Index(0) + require.True(ok) + require.Equal(1, got) + got, ok = b.Index(1) + require.True(ok) + require.Equal(2, got) + got, ok = b.Index(2) + require.True(ok) + require.Equal(3, got) + + got, ok = b.PeekLeft() + require.True(ok) + require.Equal(1, got) + + got, ok = b.PeekRight() + require.True(ok) + require.Equal(3, got) + + // Tests right moving left with no wrap around + got, ok = b.PopRight() // slice is [1,2,EMPTY,EMPTY] + require.True(ok) + require.Equal(3, got) + require.Equal(2, b.Len()) + require.Equal(4, len(b.data)) + require.Equal(3, b.left) + require.Equal(2, b.right) + require.Equal([]int{1, 2}, b.List()) + got, ok = b.Index(0) + require.True(ok) + require.Equal(1, got) + got, ok = b.Index(1) + require.True(ok) + require.Equal(2, got) + _, ok = b.Index(2) + require.False(ok) + + got, ok = b.PeekLeft() + require.True(ok) + require.Equal(1, got) + + got, ok = b.PeekRight() + require.True(ok) + require.Equal(2, got) + + got, ok = b.PopRight() // slice is [1,EMPTY,EMPTY,EMPTY] + require.True(ok) + require.Equal(2, got) + require.Equal(1, b.Len()) + require.Equal(4, len(b.data)) + require.Equal(3, b.left) + require.Equal(1, b.right) + require.Equal([]int{1}, b.List()) + got, ok = b.Index(0) + require.True(ok) + require.Equal(1, got) + _, ok = b.Index(1) + require.False(ok) + + got, ok = b.PeekLeft() + require.True(ok) + require.Equal(1, got) + + got, ok = b.PeekRight() + require.True(ok) + require.Equal(1, got) + + got, ok = b.PopRight() // slice is [EMPTY,EMPTY,EMPTY,EMPTY] + require.True(ok) + require.Equal(1, got) + require.Equal(0, b.Len()) + require.Equal(4, len(b.data)) + require.Equal(3, b.left) + require.Equal(0, b.right) + require.Empty(b.List()) + require.Equal(0, b.Len()) + _, ok = b.Index(0) + require.False(ok) + + _, ok = b.PeekLeft() + require.False(ok) + _, ok = b.PeekRight() + require.False(ok) + _, ok = b.PopRight() + require.False(ok) + + b.PushLeft(1) // slice is [EMPTY,EMPTY,EMPTY,1] + require.Equal(1, b.Len()) + require.Equal(4, len(b.data)) + require.Equal(2, b.left) + require.Equal(0, b.right) + require.Equal([]int{1}, b.List()) + got, ok = b.Index(0) + require.True(ok) + require.Equal(1, got) + + got, ok = b.PeekLeft() + require.True(ok) + require.Equal(1, got) + + got, ok = b.PeekRight() + require.True(ok) + require.Equal(1, got) + + // Test right wrapping around to the right + got, ok = b.PopRight() // slice is [EMPTY,EMPTY,EMPTY,EMPTY] + require.True(ok) + require.Equal(1, got) + require.Equal(0, b.Len()) + require.Equal(4, len(b.data)) + require.Equal(2, b.left) + require.Equal(3, b.right) + require.Empty(b.List()) + require.Equal(0, b.Len()) + _, ok = b.Index(0) + require.False(ok) + + _, ok = b.PeekLeft() + require.False(ok) + + _, ok = b.PeekRight() + require.False(ok) + + // Tests right wrapping around to the left + b.PushRight(2) // slice is [EMPTY,EMPTY,EMPTY,2] + require.Equal(1, b.Len()) + require.Equal(4, len(b.data)) + require.Equal(2, b.left) + require.Equal(0, b.right) + require.Equal([]int{2}, b.List()) + got, ok = b.Index(0) + require.True(ok) + require.Equal(2, got) + + got, ok = b.PeekLeft() + require.True(ok) + require.Equal(2, got) + + got, ok = b.PeekRight() + require.True(ok) + require.Equal(2, got) + + got, ok = b.PopRight() // slice is [EMPTY,EMPTY,EMPTY,EMPTY] + require.True(ok) + require.Equal(2, got) + require.Empty(b.List()) + _, ok = b.Index(0) + require.False(ok) + + _, ok = b.PeekLeft() + require.False(ok) + _, ok = b.PeekRight() + require.False(ok) + _, ok = b.PopRight() + require.False(ok) +} + +func FuzzUnboundedSliceDeque(f *testing.F) { + f.Fuzz( + func(t *testing.T, initSize uint, input []byte) { + require := require.New(t) + b := NewUnboundedDeque[byte](int(initSize)) + for i, n := range input { + b.PushRight(n) + gotIndex, ok := b.Index(i) + require.True(ok) + require.Equal(n, gotIndex) + } + + list := b.List() + require.Equal(len(input), len(list)) + for i, n := range input { + require.Equal(n, list[i]) + } + + for i := 0; i < len(input); i++ { + _, _ = b.PopLeft() + list = b.List() + if i == len(input)-1 { + require.Empty(list) + _, ok := b.Index(0) + require.False(ok) + } else { + require.Equal(input[i+1:], list) + got, ok := b.Index(0) + require.True(ok) + require.Equal(input[i+1], got) + } + } + }, + ) +} diff --git a/avalanchego/utils/buffer/unbounded_queue.go b/avalanchego/utils/buffer/unbounded_queue.go deleted file mode 100644 index ee578cbe..00000000 --- a/avalanchego/utils/buffer/unbounded_queue.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package buffer - -import "github.com/ava-labs/avalanchego/utils" - -const defaultInitSize = 32 - -// An unbounded queue. -// Not safe for concurrent access. -type UnboundedQueue[T any] interface { - Enqueue(T) - // Returns false if the queue is empty. - Dequeue() (T, bool) - // Returns the oldest element without removing it. - // Returns false if the queue is empty. - PeekHead() (T, bool) - // Returns the newest without removing it. - // Returns false if the queue is empty. - PeekTail() (T, bool) - Len() int -} - -// Returns a new unbounded queue with the given initial slice size. -// Note that the returned queue is always empty -- [initSize] is just -// a hint to prevent unnecessary resizing. -func NewUnboundedSliceQueue[T any](initSize int) UnboundedQueue[T] { - if initSize <= 0 { - initSize = defaultInitSize - } - return &unboundedSliceQueue[T]{ - // Note that [initSize] must be > 0 to satisfy invariant (5). - data: make([]T, initSize), - } -} - -// Invariants after each function call and before the first call: -// (1) If head == tail then the queue is empty -// (2) If head < tail then the queue is data[head:tail] -// (3) If head > tail then the queue is data[head:len(data)] + data[0:tail] -// (4) The next element to be dequeued is data[head] -// (5) The next element will be enqueued at data[tail] -// (6) There are [size] elements in the queue. -type unboundedSliceQueue[T any] struct { - size, head, tail int - data []T -} - -func (b *unboundedSliceQueue[T]) Enqueue(elt T) { - // Invariant (5) says it's safe to place the element without resizing. - b.data[b.tail] = elt - b.size++ - b.tail++ - b.tail %= len(b.data) - - if b.head != b.tail { - return - } - // Invariant (1) says if the head and the tail are equal then the queue is empty. - // It isn't -- we just enqueued an element -- so we need to resize to honor invariant (1). - newData := make([]T, b.size*2) - copy(newData, b.data[b.head:]) - numCopied := len(b.data) - b.head - copy(newData[numCopied:], b.data[:b.tail]) - b.data = newData - b.head = 0 - b.tail = b.size -} - -func (b *unboundedSliceQueue[T]) Dequeue() (T, bool) { - if b.size == 0 { - return utils.Zero[T](), false - } - elt := b.data[b.head] - // Zero out to prevent memory leak. - b.data[b.head] = utils.Zero[T]() - b.size-- - b.head++ - b.head %= len(b.data) - return elt, true -} - -func (b *unboundedSliceQueue[T]) PeekHead() (T, bool) { - if b.size == 0 { - return utils.Zero[T](), false - } - return b.data[b.head], true -} - -func (b *unboundedSliceQueue[T]) PeekTail() (T, bool) { - if b.size == 0 { - return utils.Zero[T](), false - } - if b.tail == 0 { - return b.data[len(b.data)-1], true - } - return b.data[b.tail-1], true -} - -func (b *unboundedSliceQueue[T]) Len() int { - return b.size -} diff --git a/avalanchego/utils/buffer/unbounded_queue_test.go b/avalanchego/utils/buffer/unbounded_queue_test.go deleted file mode 100644 index 4560b899..00000000 --- a/avalanchego/utils/buffer/unbounded_queue_test.go +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package buffer - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestUnboundedSliceQueue(t *testing.T) { - require := require.New(t) - - // Starts empty. - bIntf := NewUnboundedSliceQueue[int](1) - b, ok := bIntf.(*unboundedSliceQueue[int]) - require.True(ok) - require.Equal(0, bIntf.Len()) - require.Equal(1, len(b.data)) - require.Equal(0, b.head) - require.Equal(0, b.tail) - // queue is [EMPTY] - - _, ok = b.Dequeue() - require.False(ok) - _, ok = b.PeekHead() - require.False(ok) - _, ok = b.PeekTail() - require.False(ok) - - // This causes a resize - b.Enqueue(1) - require.Equal(1, b.Len()) - require.Equal(2, len(b.data)) - require.Equal(0, b.head) - require.Equal(1, b.tail) - // queue is [1,EMPTY] - - got, ok := b.PeekHead() - require.True(ok) - require.Equal(1, got) - - got, ok = b.PeekTail() - require.True(ok) - require.Equal(1, got) - - // This causes a resize - b.Enqueue(2) - require.Equal(2, b.Len()) - require.Equal(4, len(b.data)) - require.Equal(0, b.head) - require.Equal(2, b.tail) - // queue is [1,2,EMPTY,EMPTY] - - got, ok = b.PeekHead() - require.True(ok) - require.Equal(1, got) - - got, ok = b.PeekTail() - require.True(ok) - require.Equal(2, got) - - got, ok = b.Dequeue() - require.True(ok) - require.Equal(1, got) - require.Equal(1, b.Len()) - require.Equal(4, len(b.data)) - require.Equal(1, b.head) - require.Equal(2, b.tail) - // queue is [EMPTY,2,EMPTY,EMPTY] - - got, ok = b.PeekHead() - require.True(ok) - require.Equal(2, got) - - got, ok = b.PeekTail() - require.True(ok) - require.Equal(2, got) - - got, ok = b.Dequeue() - require.True(ok) - require.Equal(2, got) - require.Equal(0, b.Len()) - require.Equal(4, len(b.data)) - require.Equal(2, b.head) - require.Equal(2, b.tail) - // queue is [EMPTY,EMPTY,EMPTY,EMPTY] - - _, ok = b.Dequeue() - require.False(ok) - _, ok = b.PeekHead() - require.False(ok) - _, ok = b.PeekTail() - require.False(ok) - - b.Enqueue(3) - require.Equal(1, b.Len()) - require.Equal(4, len(b.data)) - require.Equal(2, b.head) - require.Equal(3, b.tail) - // queue is [EMPTY,EMPTY,3,EMPTY] - - got, ok = b.PeekHead() - require.True(ok) - require.Equal(3, got) - - got, ok = b.PeekTail() - require.True(ok) - require.Equal(3, got) - - b.Enqueue(4) - require.Equal(2, b.Len()) - require.Equal(4, len(b.data)) - require.Equal(2, b.head) - require.Equal(0, b.tail) - // queue is [EMPTY,EMPTY,3,4] - - got, ok = b.PeekHead() - require.True(ok) - require.Equal(3, got) - - got, ok = b.PeekTail() - require.True(ok) - require.Equal(4, got) - - // This tests tail wrap around. - b.Enqueue(5) - require.Equal(3, b.Len()) - require.Equal(4, len(b.data)) - require.Equal(2, b.head) - require.Equal(1, b.tail) - // queue is [5,EMPTY,3,4] - - got, ok = b.PeekHead() - require.True(ok) - require.Equal(3, got) - - got, ok = b.PeekTail() - require.True(ok) - require.Equal(5, got) - - got, ok = b.Dequeue() - require.True(ok) - require.Equal(3, got) - require.Equal(2, b.Len()) - require.Equal(4, len(b.data)) - require.Equal(3, b.head) - require.Equal(1, b.tail) - // queue is [5,EMPTY,EMPTY,4] - - got, ok = b.PeekHead() - require.True(ok) - require.Equal(4, got) - - got, ok = b.PeekTail() - require.True(ok) - require.Equal(5, got) - - // This tests head wrap around. - got, ok = b.Dequeue() - require.True(ok) - require.Equal(4, got) - require.Equal(1, b.Len()) - require.Equal(4, len(b.data)) - require.Equal(0, b.head) - require.Equal(1, b.tail) - // queue is [5,EMPTY,EMPTY,EMPTY] - - got, ok = b.PeekHead() - require.True(ok) - require.Equal(5, got) - - got, ok = b.PeekTail() - require.True(ok) - require.Equal(5, got) - - got, ok = b.Dequeue() - require.True(ok) - require.Equal(5, got) - require.Equal(0, b.Len()) - require.Equal(4, len(b.data)) - require.Equal(1, b.head) - require.Equal(1, b.tail) - // queue is [EMPTY,EMPTY,EMPTY,EMPTY] - - _, ok = b.Dequeue() - require.False(ok) - _, ok = b.PeekHead() - require.False(ok) - _, ok = b.PeekTail() - require.False(ok) - - b.Enqueue(6) - require.Equal(1, b.Len()) - require.Equal(4, len(b.data)) - require.Equal(1, b.head) - require.Equal(2, b.tail) - // queue is [EMPTY,6,EMPTY,EMPTY] - - got, ok = b.PeekHead() - require.True(ok) - require.Equal(6, got) - - got, ok = b.PeekTail() - require.True(ok) - require.Equal(6, got) - - b.Enqueue(7) - require.Equal(2, b.Len()) - require.Equal(1, b.head) - require.Equal(3, b.tail) - // queue is [EMPTY,6,7,EMPTY] - - got, ok = b.PeekHead() - require.True(ok) - require.Equal(6, got) - - got, ok = b.PeekTail() - require.True(ok) - require.Equal(7, got) - - b.Enqueue(8) - require.Equal(3, b.Len()) - require.Equal(4, len(b.data)) - require.Equal(1, b.head) - require.Equal(0, b.tail) - // queue is [EMPTY,6,7,8] - - got, ok = b.PeekHead() - require.True(ok) - require.Equal(6, got) - - got, ok = b.PeekTail() - require.True(ok) - require.Equal(8, got) - - // This causes a resize - b.Enqueue(9) - require.Equal(4, b.Len()) - require.Equal(8, len(b.data)) - require.Equal(0, b.head) - require.Equal(4, b.tail) - // queue is [6,7,8,9,EMPTY,EMPTY,EMPTY,EMPTY] - - got, ok = b.PeekHead() - require.True(ok) - require.Equal(6, got) - - got, ok = b.PeekTail() - require.True(ok) - require.Equal(9, got) - - got, ok = b.Dequeue() - require.True(ok) - require.Equal(6, got) - // queue is [EMPTY,7,8,9,EMPTY,EMPTY,EMPTY,EMPTY] - - got, ok = b.PeekHead() - require.True(ok) - require.Equal(7, got) - - got, ok = b.PeekTail() - require.True(ok) - require.Equal(9, got) - - got, ok = b.Dequeue() - require.True(ok) - require.Equal(7, got) - // queue is [EMPTY,EMPTY,8,9,EMPTY,EMPTY,EMPTY,EMPTY] - - got, ok = b.PeekHead() - require.True(ok) - require.Equal(8, got) - - got, ok = b.PeekTail() - require.True(ok) - require.Equal(9, got) - - got, ok = b.Dequeue() - require.True(ok) - require.Equal(8, got) - // queue is [EMPTY,EMPTY,EMPTY,9,EMPTY,EMPTY,EMPTY,EMPTY] - - got, ok = b.PeekHead() - require.True(ok) - require.Equal(9, got) - - got, ok = b.PeekTail() - require.True(ok) - require.Equal(9, got) - - got, ok = b.Dequeue() - require.True(ok) - require.Equal(9, got) - require.Equal(0, b.Len()) - require.Equal(8, len(b.data)) - require.Equal(4, b.head) - require.Equal(4, b.tail) - // queue is [EMPTY,EMPTY,EMPTY,EMPTY,EMPTY,EMPTY,EMPTY,EMPTY] - - _, ok = b.PeekHead() - require.False(ok) - _, ok = b.PeekTail() - require.False(ok) -} diff --git a/avalanchego/utils/bytes.go b/avalanchego/utils/bytes.go index 17ae9686..31baeeb1 100644 --- a/avalanchego/utils/bytes.go +++ b/avalanchego/utils/bytes.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utils @@ -7,18 +7,6 @@ import ( "crypto/rand" ) -// CopyBytes returns a copy of the provided byte slice. If nil is provided, nil -// will be returned. -func CopyBytes(b []byte) []byte { - if b == nil { - return nil - } - - cb := make([]byte, len(b)) - copy(cb, b) - return cb -} - // RandomBytes returns a slice of n random bytes // Intended for use in testing func RandomBytes(n int) []byte { diff --git a/avalanchego/utils/bytes_test.go b/avalanchego/utils/bytes_test.go deleted file mode 100644 index fc516061..00000000 --- a/avalanchego/utils/bytes_test.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package utils - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestCopyBytesNil(t *testing.T) { - result := CopyBytes(nil) - require.Nil(t, result, "CopyBytes(nil) should have returned nil") -} - -func TestCopyBytes(t *testing.T) { - input := []byte{1} - result := CopyBytes(input) - require.Equal(t, input, result, "CopyBytes should have returned equal bytes") - - input[0] = 0 - require.NotEqual(t, input, result, "CopyBytes should have returned independent bytes") -} diff --git a/avalanchego/utils/cb58/cb58.go b/avalanchego/utils/cb58/cb58.go index 8be51e65..23cf9c49 100644 --- a/avalanchego/utils/cb58/cb58.go +++ b/avalanchego/utils/cb58/cb58.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cb58 diff --git a/avalanchego/utils/cb58/cb58_test.go b/avalanchego/utils/cb58/cb58_test.go index ac0c2dca..59710b62 100644 --- a/avalanchego/utils/cb58/cb58_test.go +++ b/avalanchego/utils/cb58/cb58_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package cb58 @@ -60,3 +60,19 @@ func TestEncodeDecode(t *testing.T) { } } } + +func FuzzEncodeDecode(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + require := require.New(t) + + // Encode bytes to string + dataStr, err := Encode(data) + require.NoError(err) + + // Decode string to bytes + gotData, err := Decode(dataStr) + require.NoError(err) + + require.Equal(data, gotData) + }) +} diff --git a/avalanchego/utils/compare/compare.go b/avalanchego/utils/compare/compare.go new file mode 100644 index 00000000..13ec52f3 --- /dev/null +++ b/avalanchego/utils/compare/compare.go @@ -0,0 +1,27 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package compare + +// Returns true iff the slices have the same elements, regardless of order. +func UnsortedEquals[T comparable](a, b []T) bool { + if len(a) != len(b) { + return false + } + m := make(map[T]int, len(a)) + for _, v := range a { + m[v]++ + } + for _, v := range b { + switch count := m[v]; count { + case 0: + // There were more instances of [v] in [b] than [a]. + return false + case 1: + delete(m, v) + default: + m[v] = count - 1 + } + } + return len(m) == 0 +} diff --git a/avalanchego/utils/compare/compare_test.go b/avalanchego/utils/compare/compare_test.go new file mode 100644 index 00000000..e46bc838 --- /dev/null +++ b/avalanchego/utils/compare/compare_test.go @@ -0,0 +1,26 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package compare + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestUnsortedEquals(t *testing.T) { + require := require.New(t) + + require.True(UnsortedEquals([]int{}, []int{})) + require.True(UnsortedEquals(nil, []int{})) + require.True(UnsortedEquals([]int{}, nil)) + require.False(UnsortedEquals([]int{1}, nil)) + require.False(UnsortedEquals(nil, []int{1})) + require.True(UnsortedEquals([]int{1}, []int{1})) + require.False(UnsortedEquals([]int{1, 2}, []int{})) + require.False(UnsortedEquals([]int{1, 2}, []int{1})) + require.False(UnsortedEquals([]int{1}, []int{1, 2})) + require.True(UnsortedEquals([]int{2, 1}, []int{1, 2})) + require.True(UnsortedEquals([]int{1, 2}, []int{2, 1})) +} diff --git a/avalanchego/utils/compression/compressor.go b/avalanchego/utils/compression/compressor.go index 5cc4d95b..f0848357 100644 --- a/avalanchego/utils/compression/compressor.go +++ b/avalanchego/utils/compression/compressor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package compression diff --git a/avalanchego/utils/compression/compressor_test.go b/avalanchego/utils/compression/compressor_test.go new file mode 100644 index 00000000..341fb4a2 --- /dev/null +++ b/avalanchego/utils/compression/compressor_test.go @@ -0,0 +1,248 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package compression + +import ( + "fmt" + "math" + "runtime" + "testing" + + _ "embed" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/units" +) + +const maxMessageSize = 2 * units.MiB // Max message size. Can't import due to cycle. + +var ( + newCompressorFuncs = map[Type]func(maxSize int64) (Compressor, error){ + TypeNone: func(int64) (Compressor, error) { //nolint:unparam // an error is needed to be returned to compile + return NewNoCompressor(), nil + }, + TypeGzip: NewGzipCompressor, + TypeZstd: NewZstdCompressor, + } + + //go:embed gzip_zip_bomb.bin + gzipZipBomb []byte + + //go:embed zstd_zip_bomb.bin + zstdZipBomb []byte + + zipBombs = map[Type][]byte{ + TypeGzip: gzipZipBomb, + TypeZstd: zstdZipBomb, + } +) + +func TestDecompressZipBombs(t *testing.T) { + for compressionType, zipBomb := range zipBombs { + // Make sure that the hardcoded zip bomb would be a valid message. + require.Less(t, len(zipBomb), maxMessageSize) + + newCompressorFunc := newCompressorFuncs[compressionType] + + t.Run(compressionType.String(), func(t *testing.T) { + compressor, err := newCompressorFunc(maxMessageSize) + require.NoError(t, err) + + var ( + beforeDecompressionStats runtime.MemStats + afterDecompressionStats runtime.MemStats + ) + runtime.ReadMemStats(&beforeDecompressionStats) + _, err = compressor.Decompress(zipBomb) + runtime.ReadMemStats(&afterDecompressionStats) + + require.ErrorIs(t, err, ErrDecompressedMsgTooLarge) + + // Make sure that we didn't allocate significantly more memory than + // the max message size. + bytesAllocatedDuringDecompression := afterDecompressionStats.TotalAlloc - beforeDecompressionStats.TotalAlloc + require.Less(t, bytesAllocatedDuringDecompression, uint64(10*maxMessageSize)) + }) + } +} + +func TestCompressDecompress(t *testing.T) { + for compressionType, newCompressorFunc := range newCompressorFuncs { + t.Run(compressionType.String(), func(t *testing.T) { + data := utils.RandomBytes(4096) + data2 := utils.RandomBytes(4096) + + compressor, err := newCompressorFunc(maxMessageSize) + require.NoError(t, err) + + dataCompressed, err := compressor.Compress(data) + require.NoError(t, err) + + data2Compressed, err := compressor.Compress(data2) + require.NoError(t, err) + + dataDecompressed, err := compressor.Decompress(dataCompressed) + require.NoError(t, err) + require.EqualValues(t, data, dataDecompressed) + + data2Decompressed, err := compressor.Decompress(data2Compressed) + require.NoError(t, err) + require.EqualValues(t, data2, data2Decompressed) + + dataDecompressed, err = compressor.Decompress(dataCompressed) + require.NoError(t, err) + require.EqualValues(t, data, dataDecompressed) + + maxMessage := utils.RandomBytes(maxMessageSize) + maxMessageCompressed, err := compressor.Compress(maxMessage) + require.NoError(t, err) + + maxMessageDecompressed, err := compressor.Decompress(maxMessageCompressed) + require.NoError(t, err) + + require.EqualValues(t, maxMessage, maxMessageDecompressed) + }) + } +} + +func TestSizeLimiting(t *testing.T) { + for compressionType, compressorFunc := range newCompressorFuncs { + if compressionType == TypeNone { + continue + } + t.Run(compressionType.String(), func(t *testing.T) { + compressor, err := compressorFunc(maxMessageSize) + require.NoError(t, err) + + data := make([]byte, maxMessageSize+1) + _, err = compressor.Compress(data) // should be too large + require.Error(t, err) + + compressor2, err := compressorFunc(2 * maxMessageSize) + require.NoError(t, err) + + dataCompressed, err := compressor2.Compress(data) + require.NoError(t, err) + + _, err = compressor.Decompress(dataCompressed) // should be too large + require.Error(t, err) + }) + } +} + +// Attempts to create a compressor with math.MaxInt64 +// which leads to undefined decompress behavior due to integer overflow +// in limit reader creation. +func TestNewCompressorWithInvalidLimit(t *testing.T) { + for compressionType, compressorFunc := range newCompressorFuncs { + if compressionType == TypeNone { + continue + } + t.Run(compressionType.String(), func(t *testing.T) { + require := require.New(t) + _, err := compressorFunc(math.MaxInt64) + require.ErrorIs(err, ErrInvalidMaxSizeCompressor) + }) + } +} + +func FuzzGzipCompressor(f *testing.F) { + fuzzHelper(f, TypeGzip) +} + +func FuzzZstdCompressor(f *testing.F) { + fuzzHelper(f, TypeZstd) +} + +func fuzzHelper(f *testing.F, compressionType Type) { + var ( + compressor Compressor + err error + ) + switch compressionType { + case TypeGzip: + compressor, err = NewGzipCompressor(maxMessageSize) + require.NoError(f, err) + case TypeZstd: + compressor, err = NewZstdCompressor(maxMessageSize) + require.NoError(f, err) + default: + f.Fatal("Unknown compression type") + } + + f.Fuzz(func(t *testing.T, data []byte) { + require := require.New(t) + + if len(data) > maxMessageSize { + _, err := compressor.Compress(data) + require.Error(err) + } + + compressed, err := compressor.Compress(data) + require.NoError(err) + + decompressed, err := compressor.Decompress(compressed) + require.NoError(err) + + require.Equal(data, decompressed) + }) +} + +func BenchmarkCompress(b *testing.B) { + sizes := []int{ + 0, + 256, + units.KiB, + units.MiB, + maxMessageSize, + } + for compressionType, newCompressorFunc := range newCompressorFuncs { + if compressionType == TypeNone { + continue + } + for _, size := range sizes { + b.Run(fmt.Sprintf("%s_%d", compressionType, size), func(b *testing.B) { + bytes := utils.RandomBytes(size) + compressor, err := newCompressorFunc(maxMessageSize) + require.NoError(b, err) + for n := 0; n < b.N; n++ { + _, err := compressor.Compress(bytes) + require.NoError(b, err) + } + }) + } + } +} + +func BenchmarkDecompress(b *testing.B) { + sizes := []int{ + 0, + 256, + units.KiB, + units.MiB, + maxMessageSize, + } + for compressionType, newCompressorFunc := range newCompressorFuncs { + if compressionType == TypeNone { + continue + } + for _, size := range sizes { + b.Run(fmt.Sprintf("%s_%d", compressionType, size), func(b *testing.B) { + bytes := utils.RandomBytes(size) + compressor, err := newCompressorFunc(maxMessageSize) + require.NoError(b, err) + + compressedBytes, err := compressor.Compress(bytes) + require.NoError(b, err) + + for n := 0; n < b.N; n++ { + _, err := compressor.Decompress(compressedBytes) + require.NoError(b, err) + } + }) + } + } +} diff --git a/avalanchego/utils/compression/gzip_compressor.go b/avalanchego/utils/compression/gzip_compressor.go index 63b806c5..a17c46f6 100644 --- a/avalanchego/utils/compression/gzip_compressor.go +++ b/avalanchego/utils/compression/gzip_compressor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package compression @@ -11,74 +11,62 @@ import ( "io" "math" "sync" - - "github.com/ava-labs/avalanchego/utils" ) var ( - _ Compressor = &gzipCompressor{} + _ Compressor = (*gzipCompressor)(nil) - ErrInvalidMaxSizeGzipCompressor = errors.New("invalid gzip compressor max size") + ErrInvalidMaxSizeCompressor = errors.New("invalid gzip compressor max size") + ErrDecompressedMsgTooLarge = errors.New("decompressed msg too large") + ErrMsgTooLarge = errors.New("msg too large to be compressed") ) type gzipCompressor struct { - maxSize int64 - - lock sync.Mutex - - writeBuffer *bytes.Buffer - gzipWriter *gzip.Writer - - bytesReader *bytes.Reader - gzipReader *gzip.Reader + maxSize int64 + gzipWriterPool sync.Pool } // Compress [msg] and returns the compressed bytes. func (g *gzipCompressor) Compress(msg []byte) ([]byte, error) { if int64(len(msg)) > g.maxSize { - return nil, fmt.Errorf("msg length (%d) > maximum msg length (%d)", len(msg), g.maxSize) + return nil, fmt.Errorf("%w: (%d) > (%d)", ErrMsgTooLarge, len(msg), g.maxSize) } - g.lock.Lock() - defer g.lock.Unlock() + var writeBuffer bytes.Buffer + gzipWriter := g.gzipWriterPool.Get().(*gzip.Writer) + gzipWriter.Reset(&writeBuffer) + defer g.gzipWriterPool.Put(gzipWriter) - g.writeBuffer.Reset() - g.gzipWriter.Reset(g.writeBuffer) - if _, err := g.gzipWriter.Write(msg); err != nil { + if _, err := gzipWriter.Write(msg); err != nil { return nil, err } - if err := g.gzipWriter.Close(); err != nil { + if err := gzipWriter.Close(); err != nil { return nil, err } - - compressed := g.writeBuffer.Bytes() - compressedCopy := utils.CopyBytes(compressed) - return compressedCopy, nil + return writeBuffer.Bytes(), nil } // Decompress decompresses [msg]. func (g *gzipCompressor) Decompress(msg []byte) ([]byte, error) { - g.lock.Lock() - defer g.lock.Unlock() - - g.bytesReader.Reset(msg) - if err := g.gzipReader.Reset(g.bytesReader); err != nil { + bytesReader := bytes.NewReader(msg) + gzipReader, err := gzip.NewReader(bytesReader) + if err != nil { return nil, err } // We allow [io.LimitReader] to read up to [g.maxSize + 1] bytes, so that if // the decompressed payload is greater than the maximum size, this function // will return the appropriate error instead of an incomplete byte slice. - limitedReader := io.LimitReader(g.gzipReader, g.maxSize+1) + limitedReader := io.LimitReader(gzipReader, g.maxSize+1) decompressed, err := io.ReadAll(limitedReader) if err != nil { return nil, err } if int64(len(decompressed)) > g.maxSize { - return nil, fmt.Errorf("msg length > maximum msg length (%d)", g.maxSize) + return nil, fmt.Errorf("%w: (%d) > (%d)", ErrDecompressedMsgTooLarge, len(decompressed), g.maxSize) } - return decompressed, g.gzipReader.Close() + return decompressed, gzipReader.Close() } // NewGzipCompressor returns a new gzip Compressor that compresses @@ -88,17 +76,15 @@ func NewGzipCompressor(maxSize int64) (Compressor, error) { // if the max size + 1 overflows, "io.LimitReader" reads nothing // returning 0 byte for the decompress call // require max size 8Y1zu)1%J^>|z=ny(YsgO>YI+Qdt%Y%cnL);2EnS&5IxT;{LBKQ)T&EujS zoFh&yXHJ^QAzPiY2|3>FbDzF|&NtUHh=|Mg8bAERfB)aB|A z{BNImB|v}x0RjXF5FkK+009C75*8T$uSr%a_O-)jcd_&kAV7cs0RjXF5FkK+009E2 z3+!smf4dPlecW~W9s&di5FkK+009C72oNAZAa#MO;USNA5Bsobq#i!(BS3%v0RjXF z5FkK+009C7mdkZkDHkyyKd)xJ)XRr`1PBlyK!5-N0t5&UAV7dX|E+ajkB0%jqrp@h zAwYlt0RjXF5FkK+009C7k{7uBe}5(Lk9l~xc@#$o5FkK+009C72oNAZfB=Ey1)eLl zi{da~{P%SIo_zW+kN^P!1PBlyK!5-N0t5&UxO{kR#~%s&gzswd<-mm$(IiU2@oJafB*pk1PBlyK!5;&L7>BB$*1PBlyK!5-N0t5&UAVA=)bzhH%0l%ZcR2v~cfB*pk1PBlyK!5-N0tAv5 zxIJzsu?5V-%gv)ULVy4P0t5&UAV7cs0RjXFBrot>sa+H?VEp%V{hoaKFpvNN0t5&U zAV7cs0RjXF5V(AJZO6lapYUBxzI+%+fB*pk1PBlyK!5-N0t5*3zb09!hym^J*;0t5&UAV7cs0RjXF5J+C&YIw+F z3)qKEBl+@SAOQjd2oNAZfB*pk1PBlyuv}-AauEaa^J>;hK7ANSfB*pk1PBlyK!5-N z0t5)WweIWjFyMDIm}(;g2oNAZfB*pk1PBlyK!8B<0=LKQB({Kgc)5AhMhFlfK!5-N z0t5&UAV7csf#e0AE47Ov28{onuHTbS9|jU2K!5-N0t5&UAV7cs0Roo~ukCmk@DskP z$(IiU2@oJafB*pk1PBlyK!5;&{?{Zc6)~V4KD!HTga82o1PBlyK!5-N0t5&UNM2x9 zYgXf7!0F?zqm2+CK!5-N0t5&UAV7cs0RqVjTn!I-YytbQX(V4h3?x8+009C72oNAZ zfB*pk1eWWpQZ8aZeqPOb$)^tk2@oJafB*pk1PBlyK!5;&x7K|<9tQl522*W>009C7 z2oNAZfB*pk1PBmFUf}k)ox~O}4=*>5+6Vyx1PBlyK!5-N0t5&UAdtMkbES4s#DMYN z)Af7u>BB$*1PBlyK!5-N0t5&UAVA>q;k6wP1AfAHHTm*kAOQjd2oNAZfB*pk1PBly z(Eplbr6LBj!)JG)jSwI}fB*pk1PBlyK!5-N0?7;PYRzgq3^;wbx0Bcc z=Hcb$Q5zvZfB*pk1PBlyK!5-N0tAv5c&^kgiWo5dd%Av4K7ANSfB*pk1PBlyK!5-N z0t5(LKD@T$VZcxLt|nhT3?x8+009C72oNAZfB*pk1o~f-tW?B+cKGZrv=IUX2oNAZ zfB*pk1PBlyKp=U6U9DM-hXJRLyN)(OfB*pk1PBlyK!5-N0t5&oFK{(Hm{E)3?x8+009C72oNAZfB*pk1m0Tr z^>`TYI~q*25ds7V5FkK+009C72oNAZAbEk?<8~5Tz&yO%JZd8Z2oNAZfB*pk1PBly zK!8B<0?(D&MG*tWe^1x%$)^tk2@oJafB*pk1PBlyK!5;&%ZJx?JPh~=-__*Hhk*nL z5FkK+009C72oNAZfI$Cil9h@W&<>y7g*HNf009C72oNAZfB*pk1PCNAu&Xtz@i5@@ zao5pC2oNAZfB*pk1PBlyK!5;&yC`D7`0wfZJ^A!u zAOQjd2oNAZfB*pk1PBlyaQX1sj)wt1;k%lA`7n?G0RjXF5FkK+009C72oUIhO|nuE z1KQ!UyU<1m5FkK+009C72oNAZfB=Ey1$MP&H68|>KJGf&2mt~F2oNAZfB*pk1PBly zki5Xv@Q}wAun(I?^5w%o0t5&UAV7cs0RjXF5FkKcxy~x(A_nB=)vT9%`Y@0H0RjXF z5FkK+009C72oQK{-Phw`!0%`<)kX*qAV7cs0RjXF5FkK+0D$8zbBtQ3?x8+009C72oNAZfB*pk1TG(5 z+wm~qCwy0vFCPXHAV7cs0RjXF5FkK+009F1uSr%aVn925b{E5ds7V5FkK+009C72oNAZAbEl3 zO6{VE0pq`?>-Xf-hk*nL5FkK+009C72oNAZfWYO$YdanW{Dkjn^5w%o0t5&UAV7cs z0RjXF5FkLH|24@t1aQe9GXd?s& z5FkK+009C72oNAZfI#vBSHnXdTfjbS8p)Rr0|^iyK!5-N0t5&UAV7csf#o`@l#3XU zpI5V9^6A4s0t5&UAV7cs0RjXF5FkL{t#x0IhXKE%!BiU|K!5-N0t5&UAV7cs0RjY) z7q~rcC$R<0!^_R1HbQ^^0RjXF5FkK+009C72qZ7?T&Z0YF<|`nbp4)u`Y@0H0RjXF z5FkK+009C72oSh@cx}hSfS>SPO}=~>NPqwV0t5&UAV7cs0RjXF^uH!qsfYpX@Y!8x zBLoN#AV7cs0RjXF5FkK+K=J~+TC*At15O`z9c_dF0RjXF5FkK+009C72oOkK;A(it zV++`aO(Xg8VITnl1PBlyK!5-N0t5&UAh29#m2wdS^7CreOFn%VNPqwV0t5&UAV7cs z0RjXFytVG@@i5?bG?;251PBlyK!5-N0t5&UAV7dX@&dQV?IgBb)J6ypAV7cs z0RjXF5FkK+0D;0t5&UAV7cs0RjXF5J+BN zS8G<|VZiC*uA_|*AV7cs0RjXF5FkK+009EY3tSBkd29juuxTV;J`5y4fB*pk1PBly zK!5-N0tA-ptWqvwKz?4$dda5`0|^iyK!5-N0t5&UAV7csfw$IuJst-9js{b0ga82o z1PBlyK!5-N0t5&UNM7LfxShlnFb^*`kJ<}t(wJPbH}+;y}O0t5&UAV7cs z0RjXF5FkJxd4a3pA&)I!A2yBT%ZGsk2oNAZfB*pk1PBlyK!CtH>1PBly zK!5-N0t5&UAV45_fnBXxjfVlJkGqaGLVy4P0t5&UAV7cs0RjXFBrk9^Jmj$j?8By! zeEBet009C72oNAZfB*pk1PBmVuCq$HhynR|HR~mxJ`5y4fB*pk1PBlyK!5-N0tDV# z_w{%f@H-kzwGjdY2oNAZfB*pk1PBlyKp=U6+v9c;TfjWL+&pR{1PBlyK!5-N0t5&U zAV7dX@&eD5+C>oq#(z)O@5!eR0|^iyK!5-N0t5&UAV7csfy;;2c03ID3E$P^%ZGsk z2oNAZfB*pk1PBlyK!8C1Ym$|U7|;%%-Gw$nfB*pk1PBlyK!5-N0t5&oFR-gMtMM@4 z^l{hGMhFlfK!5-N0t5&UAV7csf#e0QhKD@1fPL6Bk}n?y5+Fc;009C72oNAZfB*pk z%XL;M7cn3|uV%gE(}#fs2oNAZfB*pk1PBlyK!Cto>%JZj1Aa$?sWw7@009C72oNAZ zfB*pk1PCNAaC_WNVhfmumzzgzga82o1PBlyK!5-N0t5&UNM7K%QoAT(!1(X!`aSvd zVITnl1PBlyK!5-N0t5&UAaMEc+Kz_-KjFKYeEBet009C72oNAZfB*pk1PBo5e@(Jd z5d+%cv%Ann2oNAZfB*pk1PBlyK!5;&g1CUB4%vJ`5y4fB*pk1PBlyK!5-N0t7A} zUfc07;3s@nlP@0z5+Fc;009C72oNAZfB*pk{jW(@Dq=u8e0CSw2mt~F2oNAZfB*pk z1PBlyki5XI)~v?EfYZlaM;jqPfB*pk1PBlyK!5-N0tAv5xEdbv*aG%p(@4I27)XEs z0RjXF5FkK+009C72rSoGrCh{-{Jfg=l20E75+Fc;009C72oNAZfB*pkZ>{@!JPi09 z4W`-%0RjXF5FkK+009C72oNBUyuj^oJBck|9$s!9wGjdY2oNAZfB*pk1PBlyKp=U6 z=SuCOhymljr|b9R(}#fs2oNAZfB*pk1PBlyK!CvI!)rSp2Ku4hc z2oNAZfB*pk1PBlyK!8B<0$0OB9$UaZY#Paz4+9AhAV7cs0RjXF5FkK+0D4~)=NHp7)XEs0RjXF5FkK+ z009C72)woK>+vw)cQlx4BLoN#AV7cs0RjXF5FkK+K=J~&$L%DxfO&YidDKP-5FkK+ z009C72oNAZfB=Ey1)eLliy{V$|DLYjlTRN85+Fc;009C72oNAZfB*pkmk+P)co^^# zzN^WX4+9AhAV7cs0RjXF5FkK+0D=D3Br6p$pdCKD3vGk|0RjXF5FkK+009C72oOkK zU{`Ba<6*$*`><&wUp@>ZK!5-N0t5&U zAV7cs0RjY;>#R~PVnBXg&3ehF4+9AhAV7cs0RjXF5FkK+0D-sGeLWrq{Eh}wZG->; z0t5&UAV7cs0RjXF5J+C&_PCwI7BCMlH;>u~0RjXF5FkK+009C72oNBUyufp%c2UHD z@!!++d-CbSKmr5^5FkK+009C72oNAZ;PT`P(fZx$zs*MmJK!5-N0t5&UAV7cs0RqVj+#a`+ z*aGI^<>pZvAwYlt0RjXF5FkK+009C7k{5Wc)Gmq`F#dbGeosDq7)XEs0RjXF5FkK+ z009C72wXnAw&P*IPx!7TUp@>ZK!5-N0t5&UAV7cs0Rja2Uz4m<#DI4A>@Kts0t5&U zAV7cs0RjXF5FkJxd4XN6S&fGQr;odiHbQ^^0RjXF5FkK+009C72qZ6XH9X|81?;0t5&UAV7cs0RjXF5J+C&xl+3*V!-(C>H0nS z^kE;n?~~G!$1NA2oNAZfB*pk1PBlyKw!DfD&-;uXfV}A2oNAZfB*pk1PBlyK!5;& zsErUHK!5-N0t5&UAV7cs0RqVjJXdNLMGP4KJzc*ipFRvEK!5-N0t5&UAV7cs0RjXr zA70z>FyJSASCcOv1`;4ZfB*pk1PBlyK!5-N0{yQ^Rw`mZJA8H*+6Vyx1PBlyK!5-N z0t5&UAdtMkuGXx^!+_JrT}K-sK!5-N0t5&UAV7cs0RjY)7q}W8^4J3QVbe&yd>BZ8 z009C72oNAZfB*pk1PCnGS*2XWfc(6g^^#8?1`;4ZfB*pk1PBlyK!5-N0&lJRdOQsH z9Sx@12mt~F2oNAZfB*pk1PBlyki5X{aXX1EU>;s>9<>ny1PBlyK!5-N0t5&UAV45_ zf#*u?qKEBZ8009C72oNAZfB*pk1PJuMCRwS70qyYF zU1%c&2oNAZfB*pk1PBlyK!8B<0=rtX8V>_bA9o#Xga82o1PBlyK!5-N0t5&UNM7J- zc*tW5*oRFc`SM{P0RjXF5FkK+009C72oNB!TxXSX5d-q`YSv3WeHci9009C72oNAZ zfB*pk1PHvf?(6X|;CD2bY9j;)5FkK+009C72oNAZfI#vBx5w=ywt#tfxp~w^2oNAZ zfB*pk1PBlyK!5;&Eo`WjSwI}fB*pk1PBlyK!5-N0?7+p4G(#20sF9NBws!ZBtU=w0RjXF z5FkK+009C7mg}riE@D7_Ud?*Rrw;=O5FkK+009C72oNAZfB=EF)_px52KwH*%we!_P(`SM{P0RjXF5FkK+009C7 z2oNC9|C(f_A_la>XLq5E5FkK+009C72oNAZfB*pk$qVdi&1yUhIDOo8v=IUX2oNAZ zfB*pk1PBlyKp=U6tKlJ!EnpuujpWORfdmK;AV7cs0RjXF5FkK+z;c~c%0&#w&#PH4 z`Sf8R0RjXF5FkK+009C72oNCf*1E69!+_t>V5*G}AV7cs0RjXF5FkK+009EY3)~*J zlh^|0;pOI08zDe|009C72oNAZfB*pk1dur!m5Lb94xinHHbQ^^0RjXF5FkK+009C72qZ7Ct2L|f zFyQoY*U?4@5FkK+009C72oNAZfB=Ey1+IpNJhp&+*ff$a9|jU2K!5-N0t5&UAV7cs z0Rqc)Rw)-TAV05Wz2wt}fdmK;AV7cs0RjXF5FkK+z+3CS9uEV4M}w(0LVy4P0t5&U zAV7cs0RjXFBrkA#+)iQ(n1`2}M{R@v0RjXF5FkK+009C72oOkK;JH$}C}P0)@9FwI z`Sf8R0RjXF5FkK+009C72oNA}`S99~hXFs~yPACYFpvNN0t5&UAV7cs0RjXF5a@qR zvQiNP+TpXi&_)OlAV7cs0RjXF5FkK+0Dgd{>h%9|jU2K!5-N0t5&UAV7cs0RsK6NmeRiKs$VP7upB`0t5&UAV7cs z0RjXF5Fn7ez^>M;#>0Tq$6ZGoAwYlt0RjXF5FkK+009C7k{7rd9`e`%_F>aVzI+%+ zfB*pk1PBlyK!5-N0t5&w*IA`p#DM&~n)Q-T9|jU2K!5-N0t5&UAV7cs0RnHW`+7VK z_#F+V+6Vyx1PBlyK!5-N0t5&UAdtMk?QuJaEnpsAZXUG}0t5&UAV7cs0RjXF5FkJx zd4cCj?V^YQmm$(IiU2@oJafB*pk1PBlyK!5;&L7>BB$*1PBlyK!5-N0t5&UAVA=)bzhH%0l%ZcR2v~cfB*pk1PBlyK!5-N z0tAv5xIJzsu?5V-%gv)ULVy4P0t5&UAV7cs0RjXFBrot>sa+H?VEp%V{hoaKFpvNN z0t5&UAV7cs0RjXF5V(AJZO6lapYUBxzI+%+fB*pk1PBlyK!5-N0t5*3zb09!hym^J z*;0t5&UAV7cs0RjXF5J+C& zYIw+F3)qKEBl+@SAOQjd2oNAZfB*pk1PBlyuv}-AauEaa^J>;hK7ANSfB*pk1PBly zK!5-N0t5)WweIWjFyMDIm}(;g2oNAZfB*pk1PBlyK!8B<0=LKQB({Kgc)5AhMhFlf zK!5-N0t5&UAV7csf#e0AE47Ov28{onuHTbS9|jU2K!5-N0t5&UAV7cs0Roo~ukCmk z@DskP$(IiU2@oJafB*pk1PBlyK!5;&{?{Zc6)~V4KD!HTga82o1PBlyK!5-N0t5&U zNM2x9YgXf7!0F?zqm2+CK!5-N0t5&UAV7cs0RqVjTn!I-YytbQX(V4h3?x8+009C7 z2oNAZfB*pk1eWWpQZ8aZeqPOb$)^tk2@oJafB*pk1PBlyK!5;&x7K|<9tQl522*W> z009C72oNAZfB*pk1PBmFUf}k)ox~O}4=*>5+6Vyx1PBlyK!5-N0t5&UAdtMkbES4s z#DMYN)Af7u>BB$*1PBlyK!5-N0t5&UAVA>q;k6wP1AfAHHTm*kAOQjd2oNAZfB*pk z1PBly(Eplbr6LBj!)JG)jSwI}fB*pk1PBlyK!5-N0?7;PYRzgq3^;wb zx0Bcc=Hcb$Q5zvZfB*pk1PBlyK!5-N0tAv5c&^kgiWo5dd%Av4K7ANSfB*pk1PBly zK!5-N0t5(LKD@T$VZcxLt|nhT3?x8+009C72oNAZfB*pk1o~f-tW?B+cKGZrv=IUX z2oNAZfB*pk1PBlyKp=U6U9DM-hXJRLyN)(OfB*pk1PBlyK!5-N0t5&oFK{(Hm{E)3?x8+009C72oNAZfB*pk z1m0Tr^>`TYI~q*25ds7V5FkK+009C72oNAZAbEk?<8~5Tz&yO%JZd8Z2oNAZfB*pk z1PBlyK!8B<0?(D&MG*tWe^1x%$)^tk2@oJafB*pk1PBlyK!5;&%ZJx?JPh~=-__*H zhk*nL5FkK+009C72oNAZfI$Cil9h@W&<>y7g*HNf009C72oNAZfB*pk1PCNAu&Xtz z@i5@@ao5pC2oNAZfB*pk1PBlyK!5;&yC`D7`0wfZ zJ^A!uAOQjd2oNAZfB*pk1PBlyaQX1sj)wt1;k%lA`7n?G0RjXF5FkK+009C72oUIh zO|nuE1KQ!UyU<1m5FkK+009C72oNAZfB=Ey1$MP&H68|>KJGf&2mt~F2oNAZfB*pk z1PBlyki5Xv@Q}wAun(I?^5w%o0t5&UAV7cs0RjXF5FkKcxy~x(A_nB=)vT9%`Y@0H z0RjXF5FkK+009C72oQK{-Phw`!0%`<)kX*qAV7cs0RjXF5FkK+0D$8zbBtQ3?x8+009C72oNAZfB*pk z1TG(5+wm~qCwy0vFCPXHAV7cs0RjXF5FkK+009F1uSr%aVn925b{E5ds7V5FkK+009C72oNAZ zAbEl3O6{VE0pq`?>-Xf-hk*nL5FkK+009C72oNAZfWYO$YdanW{Dkjn^5w%o0t5&U zAV7cs0RjXF5FkLH|24@t1aQe9G zXd?s&5FkK+009C72oNAZfI#vBSHnXdTfjbS8p)Rr0|^iyK!5-N0t5&UAV7csf#o`@ zl#3XUpI5V9^6A4s0t5&UAV7cs0RjXF5FkL{t#x0IhXKE%!BiU|K!5-N0t5&UAV7cs z0RjY)7q~rcC$R<0!^_R1HbQ^^0RjXF5FkK+009C72qZ7?T&Z0YF<|`nbp4)u`Y@0H z0RjXF5FkK+009C72oSh@cx}hSfS>SPO}=~>NPqwV0t5&UAV7cs0RjXF^uH!qsfYpX z@Y!8xBLoN#AV7cs0RjXF5FkK+K=J~+TC*At15O`z9c_dF0RjXF5FkK+009C72oOkK z;A(itV++`aO(Xg8VITnl1PBlyK!5-N0t5&UAh29#m2wdS^7CreOFn%VNPqwV0t5&U zAV7cs0RjXFytVG@@i5?bG?;251PBlyK!5-N0t5&UAV7dX@&dQV?IgBb)J6yp zAV7cs0RjXF5FkK+0D;0t5&UAV7cs0RjXF z5J+BNS8G<|VZiC*uA_|*AV7cs0RjXF5FkK+009EY3tSBkd29juuxTV;J`5y4fB*pk z1PBlyK!5-N0tA-ptWqvwKz?4$dda5`0|^iyK!5-N0t5&UAV7csfw$IuJst-9js{b0 zga82o1PBlyK!5-N0t5&UNM7LfxShlnFb^*`kJ<}t(wJPbH}+;y}O0t5&U zAV7cs0RjXF5FkJxd4a3pA&)I!A2yBT%ZGsk2oNAZfB*pk1PBlyK!CtH> z1PBlyK!5-N0t5&UAV45_fnBXxjfVlJkGqaGLVy4P0t5&UAV7cs0RjXFBrk9^Jmj$j z?8By!eEBet009C72oNAZfB*pk1PBmVuCq$HhynR|HR~mxJ`5y4fB*pk1PBlyK!5-N z0tDV#_w{%f@H-kzwGjdY2oNAZfB*pk1PBlyKp=U6+v9c;TfjWL+&pR{1PBlyK!5-N z0t5&UAV7dX@&eD5+C>oq#(z)O@5!eR0|^iyK!5-N0t5&UAV7csfy;;2c03ID3E$P^ z%ZGsk2oNAZfB*pk1PBlyK!8C1Ym$|U7|;%%-Gw$nfB*pk1PBlyK!5-N0t5&oFR-gM ztMM@4^l{hGMhFlfK!5-N0t5&UAV7csf#e0QhKD@1fPL6Bk}n?y5+Fc;009C72oNAZ zfB*pk%XL;M7cn3|uV%gE(}#fs2oNAZfB*pk1PBlyK!Cto>%JZj1Aa$?sWw7@009C7 z2oNAZfB*pk1PCNAaC_WNVhfmumzzgzga82o1PBlyK!5-N0t5&UNM7K%QoAT(!1(X! z`aSvdVITnl1PBlyK!5-N0t5&UAaMEc+Kz_-KjFKYeEBet009C72oNAZfB*pk1PBo5 ze@(Jd5d+%cv%Ann2oNAZfB*pk1PBlyK!5;&g1CUB4%vJ`5y4fB*pk1PBlyK!5-N z0t7A}Ufc07;3s@nlP@0z5+Fc;009C72oNAZfB*pk{jW(@Dq=u8e0CSw2mt~F2oNAZ zfB*pk1PBlyki5XI)~v?EfYZlaM;jqPfB*pk1PBlyK!5-N0tAv5xEdbv*aG%p(@4I2 z7)XEs0RjXF5FkK+009C72rSoGrCh{-{Jfg=l20E75+Fc;009C72oNAZfB*pkZ>{@! zJPi094W`-%0RjXF5FkK+009C72oNBUyuj^oJBck|9$s!9wGjdY2oNAZfB*pk1PBly zKp=U6=SuCOhymljr|b9R(}#fs2oNAZfB*pk1PBlyK!CvI!)rSp2Ku4hc2oNAZfB*pk1PBlyK!8B<0$0OB9$UaZY#Paz4+9AhAV7cs0RjXF5FkK+0D4~)=NHp7)XEs0RjXF z5FkK+009C72)woK>+vw)cQlx4BLoN#AV7cs0RjXF5FkK+K=J~&$L%DxfO&YidDKP- z5FkK+009C72oNAZfB=Ey1)eLliy{V$|DLYjlTRN85+Fc;009C72oNAZfB*pkmk+P) zco^^#zN^WX4+9AhAV7cs0RjXF5FkK+0D=D3Br6p$pdCKD3vGk|0RjXF5FkK+009C7 z2oOkKU{`Ba<6*$*`><&wUp@>ZK!5-N z0t5&UAV7cs0RjY;>#R~PVnBXg&3ehF4+9AhAV7cs0RjXF5FkK+0D-sGeLWrq{Eh}w zZG->;0t5&UAV7cs0RjXF5J+C&_PCwI7BCMlH;>u~0RjXF5FkK+009C72oNBUyufp% zc2UHD@!!++d-CbSKmr5^5FkK+009C72oNAZ;PT`P(fZx$zs*MmJK!5-N0t5&UAV7cs0RqVj z+#a`+*aGI^<>pZvAwYlt0RjXF5FkK+009C7k{5Wc)Gmq`F#dbGeosDq7)XEs0RjXF z5FkK+009C72wXnAw&P*IPx!7TUp@>ZK!5-N0t5&UAV7cs0Rja2Uz4m<#DI4A>@Kts z0t5&UAV7cs0RjXF5FkJxd4XN6S&fGQr;odiHbQ^^0RjXF5FkK+009C72qZ6XH9X|8 z1?;0t5&UAV7cs0RjXF5J+C&xl+3*V!-(C z>H0nS^kE;n?~~G!$1NA2oNAZfB*pk1PBlyKw!DfD&-;uXfV}A2oNAZfB*pk1PBlyK!5;&sErUHK!5-N0t5&UAV7cs0RqVjJXdNLMGP4KJzc*ipFRvEK!5-N0t5&UAV7cs z0RjXrA70z>FyJSASCcOv1`;4ZfB*pk1PBlyK!5-N0{yQ^Rw`mZJA8H*+6Vyx1PBly zK!5-N0t5&UAdtMkuGXx^!+_JrT}K-sK!5-N0t5&UAV7cs0RjY)7q}W8^4J3QVbe&y zd>BZ8009C72oNAZfB*pk1PCnGS*2XWfc(6g^^#8?1`;4ZfB*pk1PBlyK!5-N0&lJR zdOQsH9Sx@12mt~F2oNAZfB*pk1PBlyki5X{aXX1EU>;s>9<>ny1PBlyK!5-N0t5&U zAV45_f#*u?qKEBZ8009C72oNAZfB*pk1PJuMCRwS7 z0qyYFU1%c&2oNAZfB*pk1PBlyK!8B<0=rtX8V>_bA9o#Xga82o1PBlyK!5-N0t5&U zNM7J-c*tW5*oRFc`SM{P0RjXF5FkK+009C72oNB!TxXSX5d-q`YSv3WeHci9009C7 z2oNAZfB*pk1PHvf?(6X|;CD2bY9j;)5FkK+009C72oNAZfI#vBx5w=ywt#tfxp~w^ z2oNAZfB*pk1PBlyK!5;&Eo`WjSwI}fB*pk1PBlyK!5-N0?7+p4G(#20sF9NBws!ZBtU=w z0RjXF5FkK+009C7mg}riE@D7_Ud?*Rrw;=O5FkK+009C72oNAZfB=EF)_px52KwH*%we!_P(`SM{P0RjXF5FkK+ z009C72oNC9|C(f_A_la>XLq5E5FkK+009C72oNAZfB*pk$qVdi&1yUhIDOo8v=IUX z2oNAZfB*pk1PBlyKp=U6tKlJ!EnpuujpWORfdmK;AV7cs0RjXF5FkK+z;c~c%0&#w z&#PH4`Sf8R0RjXF5FkK+009C72oNCf*1E69!+_t>V5*G}AV7cs0RjXF5FkK+009EY z3)~*Jlh^|0;pOI08zDe|009C72oNAZfB*pk1dur!m5Lb94xinHHbQ^^0RjXF5FkK+009C72qZ7C zt2L|fFyQoY*U?4@5FkK+009C72oNAZfB=Ey1+IpNJhp&+*ff$a9|jU2K!5-N0t5&U zAV7cs0Rqc)Rw)-TAV05Wz2wt}fdmK;AV7cs0RjXF5FkK+z+3CS9uEV4M}w(0LVy4P z0t5&UAV7cs0RjXFBrkA#+)iQ(n1`2}M{R@v0RjXF5FkK+009C72oOkK;JH$}C}P0) z@9FwI`Sf8R0RjXF5FkK+009C72oNA}`S99~hXFs~yPACYFpvNN0t5&UAV7cs0RjXF z5a@qRvQiNP+TpXi&_)OlAV7cs0RjXF5FkK+0D z.maxSize { + return nil, fmt.Errorf("%w: (%d) > (%d)", ErrMsgTooLarge, len(msg), z.maxSize) + } + return zstd.Compress(nil, msg) +} + +func (z *zstdCompressor) Decompress(msg []byte) ([]byte, error) { + reader := zstd.NewReader(bytes.NewReader(msg)) + defer reader.Close() + + // We allow [io.LimitReader] to read up to [z.maxSize + 1] bytes, so that if + // the decompressed payload is greater than the maximum size, this function + // will return the appropriate error instead of an incomplete byte slice. + limitReader := io.LimitReader(reader, z.maxSize+1) + decompressed, err := io.ReadAll(limitReader) + if err != nil { + return nil, err + } + if int64(len(decompressed)) > z.maxSize { + return nil, fmt.Errorf("%w: (%d) > (%d)", ErrDecompressedMsgTooLarge, len(decompressed), z.maxSize) + } + return decompressed, nil +} diff --git a/avalanchego/utils/compression/zstd_zip_bomb.bin b/avalanchego/utils/compression/zstd_zip_bomb.bin new file mode 100644 index 0000000000000000000000000000000000000000..6669e181c6bc2e9b62338cda23b5efa4aa9ebdc5 GIT binary patch literal 2097138 zcmeIwu?>JA07TJ)2^@pYnmsIB)SkVJ0hgeA#U;PKy!M=5?khQxl6Uq|>)+-B1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ z0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz z7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|Xg zfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_ z1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;= zV1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~0R|XgfB^;=V1NMz7+`<_1{h#~ V0R|XgfB^;=V1NMz7}#Z?Bnugj4T}H( literal 0 HcmV?d00001 diff --git a/avalanchego/utils/constants/aliases.go b/avalanchego/utils/constants/aliases.go index 8a76d111..dd94bd36 100644 --- a/avalanchego/utils/constants/aliases.go +++ b/avalanchego/utils/constants/aliases.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package constants diff --git a/avalanchego/utils/constants/application.go b/avalanchego/utils/constants/application.go index a32145a0..117b85d9 100644 --- a/avalanchego/utils/constants/application.go +++ b/avalanchego/utils/constants/application.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package constants diff --git a/avalanchego/utils/constants/network_ids.go b/avalanchego/utils/constants/network_ids.go index ee9650a4..4b5ef1cd 100644 --- a/avalanchego/utils/constants/network_ids.go +++ b/avalanchego/utils/constants/network_ids.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package constants diff --git a/avalanchego/utils/constants/network_ids_test.go b/avalanchego/utils/constants/network_ids_test.go index fda9f71b..13eb5bdd 100644 --- a/avalanchego/utils/constants/network_ids_test.go +++ b/avalanchego/utils/constants/network_ids_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package constants diff --git a/avalanchego/utils/constants/networking.go b/avalanchego/utils/constants/networking.go index 7046efd6..7290718c 100644 --- a/avalanchego/utils/constants/networking.go +++ b/avalanchego/utils/constants/networking.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package constants @@ -7,6 +7,7 @@ import ( "math" "time" + "github.com/ava-labs/avalanchego/utils/compression" "github.com/ava-labs/avalanchego/utils/units" ) @@ -25,4 +26,99 @@ const ( DefaultByteSliceCap = 128 MaxContainersLen = int(4 * DefaultMaxMessageSize / 5) + + // MinConnectedStakeBuffer is the safety buffer for calculation of MinConnectedStake. + // This increases the required stake percentage above alpha/k. Must be [0-1] + // 0 means MinConnectedStake = alpha/k, 1 means MinConnectedStake = 1 (fully connected) + MinConnectedStakeBuffer = .2 + + DefaultNetworkPeerListNumValidatorIPs = 15 + DefaultNetworkPeerListValidatorGossipSize = 20 + DefaultNetworkPeerListNonValidatorGossipSize = 0 + DefaultNetworkPeerListPeersGossipSize = 10 + DefaultNetworkPeerListGossipFreq = time.Minute + + // Inbound Connection Throttling + DefaultInboundConnUpgradeThrottlerCooldown = 10 * time.Second + DefaultInboundThrottlerMaxConnsPerSec = 256 + + // Outbound Connection Throttling + DefaultOutboundConnectionThrottlingRps = 50 + DefaultOutboundConnectionTimeout = 30 * time.Second + + // Timeouts + DefaultNetworkInitialTimeout = 5 * time.Second + DefaultNetworkMinimumTimeout = 2 * time.Second + DefaultNetworkMaximumTimeout = 10 * time.Second + DefaultNetworkMaximumInboundTimeout = 10 * time.Second + DefaultNetworkTimeoutHalflife = 5 * time.Minute + DefaultNetworkTimeoutCoefficient = 2 + DefaultNetworkReadHandshakeTimeout = 15 * time.Second + + DefaultNetworkCompressionEnabled = true // TODO remove when NetworkCompressionEnabledKey is removed + DefaultNetworkCompressionType = compression.TypeGzip + DefaultNetworkMaxClockDifference = time.Minute + DefaultNetworkAllowPrivateIPs = true + DefaultNetworkRequireValidatorToConnect = false + DefaultNetworkPeerReadBufferSize = 8 * units.KiB + DefaultNetworkPeerWriteBufferSize = 8 * units.KiB + + DefaultNetworkTCPProxyEnabled = false + + // The PROXY protocol specification recommends setting this value to be at + // least 3 seconds to cover a TCP retransmit. + // Ref: https://www.haproxy.org/download/2.3/doc/proxy-protocol.txt + // Specifying a timeout of 0 will actually result in a timeout of 200ms, but + // a timeout of 0 should generally not be provided. + DefaultNetworkTCPProxyReadTimeout = 3 * time.Second + + // Benchlist + DefaultBenchlistFailThreshold = 10 + DefaultBenchlistDuration = 15 * time.Minute + DefaultBenchlistMinFailingDuration = 2*time.Minute + 30*time.Second + + // Router + DefaultConsensusGossipFrequency = 10 * time.Second + DefaultConsensusAppConcurrency = 2 + DefaultConsensusShutdownTimeout = 30 * time.Second + DefaultConsensusGossipAcceptedFrontierValidatorSize = 0 + DefaultConsensusGossipAcceptedFrontierNonValidatorSize = 0 + DefaultConsensusGossipAcceptedFrontierPeerSize = 15 + DefaultConsensusGossipOnAcceptValidatorSize = 0 + DefaultConsensusGossipOnAcceptNonValidatorSize = 0 + DefaultConsensusGossipOnAcceptPeerSize = 10 + DefaultAppGossipValidatorSize = 10 + DefaultAppGossipNonValidatorSize = 0 + DefaultAppGossipPeerSize = 0 + + // Inbound Throttling + DefaultInboundThrottlerAtLargeAllocSize = 6 * units.MiB + DefaultInboundThrottlerVdrAllocSize = 32 * units.MiB + DefaultInboundThrottlerNodeMaxAtLargeBytes = DefaultMaxMessageSize + DefaultInboundThrottlerMaxProcessingMsgsPerNode = 1024 + DefaultInboundThrottlerBandwidthRefillRate = 512 * units.KiB + DefaultInboundThrottlerBandwidthMaxBurstSize = DefaultMaxMessageSize + DefaultInboundThrottlerCPUMaxRecheckDelay = 5 * time.Second + DefaultInboundThrottlerDiskMaxRecheckDelay = 5 * time.Second + + // Outbound Throttling + DefaultOutboundThrottlerAtLargeAllocSize = 32 * units.MiB + DefaultOutboundThrottlerVdrAllocSize = 32 * units.MiB + DefaultOutboundThrottlerNodeMaxAtLargeBytes = DefaultMaxMessageSize + + // Network Health + DefaultHealthCheckAveragerHalflife = 10 * time.Second + + DefaultNetworkHealthMaxTimeSinceMsgSent = time.Minute + DefaultNetworkHealthMaxTimeSinceMsgReceived = time.Minute + DefaultNetworkHealthMaxPortionSendQueueFill = 0.9 + DefaultNetworkHealthMinPeers = 1 + DefaultNetworkHealthMaxSendFailRate = .9 + + // Metrics + DefaultUptimeMetricFreq = 30 * time.Second + + // Delays + DefaultNetworkInitialReconnectDelay = time.Second + DefaultNetworkMaxReconnectDelay = time.Minute ) diff --git a/avalanchego/utils/constants/vm_ids.go b/avalanchego/utils/constants/vm_ids.go index 9955ba90..4fb887c4 100644 --- a/avalanchego/utils/constants/vm_ids.go +++ b/avalanchego/utils/constants/vm_ids.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package constants diff --git a/avalanchego/utils/context.go b/avalanchego/utils/context.go new file mode 100644 index 00000000..9ff30018 --- /dev/null +++ b/avalanchego/utils/context.go @@ -0,0 +1,35 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package utils + +import ( + "context" + "time" +) + +type detachedContext struct { + ctx context.Context +} + +func Detach(ctx context.Context) context.Context { + return &detachedContext{ + ctx: ctx, + } +} + +func (*detachedContext) Deadline() (time.Time, bool) { + return time.Time{}, false +} + +func (*detachedContext) Done() <-chan struct{} { + return nil +} + +func (*detachedContext) Err() error { + return nil +} + +func (c *detachedContext) Value(key any) any { + return c.ctx.Value(key) +} diff --git a/avalanchego/utils/crypto/bls/bls_benchmark_test.go b/avalanchego/utils/crypto/bls/bls_benchmark_test.go new file mode 100644 index 00000000..a4503260 --- /dev/null +++ b/avalanchego/utils/crypto/bls/bls_benchmark_test.go @@ -0,0 +1,88 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bls + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils" +) + +var sizes = []int{ + 2, + 4, + 8, + 16, + 32, + 64, + 128, + 256, + 512, + 1024, + 2048, + 4096, +} + +func BenchmarkSign(b *testing.B) { + require := require.New(b) + + privateKey, err := NewSecretKey() + require.NoError(err) + for _, messageSize := range sizes { + b.Run(fmt.Sprintf("%d", messageSize), func(b *testing.B) { + message := utils.RandomBytes(messageSize) + + b.ResetTimer() + + for n := 0; n < b.N; n++ { + _ = Sign(privateKey, message) + } + }) + } +} + +func BenchmarkVerify(b *testing.B) { + require := require.New(b) + + privateKey, err := NewSecretKey() + require.NoError(err) + publicKey := PublicFromSecretKey(privateKey) + + for _, messageSize := range sizes { + b.Run(fmt.Sprintf("%d", messageSize), func(b *testing.B) { + message := utils.RandomBytes(messageSize) + signature := Sign(privateKey, message) + + b.ResetTimer() + + for n := 0; n < b.N; n++ { + require.True(Verify(publicKey, signature, message)) + } + }) + } +} + +func BenchmarkAggregatePublicKeys(b *testing.B) { + keys := make([]*PublicKey, 4096) + for i := range keys { + privateKey, err := NewSecretKey() + require.NoError(b, err) + + keys[i] = PublicFromSecretKey(privateKey) + } + + for _, size := range sizes { + b.Run(fmt.Sprintf("%d", size), func(b *testing.B) { + require := require.New(b) + + for n := 0; n < b.N; n++ { + _, err := AggregatePublicKeys(keys[:size]) + require.NoError(err) + } + }) + } +} diff --git a/avalanchego/utils/crypto/bls/bls_test.go b/avalanchego/utils/crypto/bls/bls_test.go index 6c2f5a69..f3bb0500 100644 --- a/avalanchego/utils/crypto/bls/bls_test.go +++ b/avalanchego/utils/crypto/bls/bls_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bls @@ -232,7 +232,7 @@ func TestAggregation(t *testing.T) { return nil, sigs, msg }, - expectedPubKeyAggError: errNoPublicKeys, + expectedPubKeyAggError: ErrNoPublicKeys, expectedValid: false, }, { diff --git a/avalanchego/utils/crypto/bls/public.go b/avalanchego/utils/crypto/bls/public.go index 88ed660f..f17d6127 100644 --- a/avalanchego/utils/crypto/bls/public.go +++ b/avalanchego/utils/crypto/bls/public.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bls @@ -12,9 +12,9 @@ import ( const PublicKeyLen = blst.BLST_P1_COMPRESS_BYTES var ( + ErrNoPublicKeys = errors.New("no public keys") errFailedPublicKeyDecompress = errors.New("couldn't decompress public key") errInvalidPublicKey = errors.New("invalid public key") - errNoPublicKeys = errors.New("no public keys") errFailedPublicKeyAggregation = errors.New("couldn't aggregate public keys") ) @@ -46,7 +46,7 @@ func PublicKeyFromBytes(pkBytes []byte) (*PublicKey, error) { // Invariant: all [pks] have been validated. func AggregatePublicKeys(pks []*PublicKey) (*PublicKey, error) { if len(pks) == 0 { - return nil, errNoPublicKeys + return nil, ErrNoPublicKeys } var agg AggregatePublicKey diff --git a/avalanchego/utils/crypto/bls/public_test.go b/avalanchego/utils/crypto/bls/public_test.go index c4b0b54c..02300295 100644 --- a/avalanchego/utils/crypto/bls/public_test.go +++ b/avalanchego/utils/crypto/bls/public_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bls diff --git a/avalanchego/utils/crypto/bls/secret.go b/avalanchego/utils/crypto/bls/secret.go index 382f043b..a37961eb 100644 --- a/avalanchego/utils/crypto/bls/secret.go +++ b/avalanchego/utils/crypto/bls/secret.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bls @@ -6,6 +6,7 @@ package bls import ( "crypto/rand" "errors" + "runtime" blst "github.com/supranational/blst/bindings/go" ) @@ -29,7 +30,12 @@ type SecretKey = blst.SecretKey func NewSecretKey() (*SecretKey, error) { var ikm [32]byte _, err := rand.Read(ikm[:]) - return blst.KeyGen(ikm[:]), err + if err != nil { + return nil, err + } + sk := blst.KeyGen(ikm[:]) + ikm = [32]byte{} // zero out the ikm + return sk, nil } // SecretKeyToBytes returns the big-endian format of the secret key. @@ -44,6 +50,9 @@ func SecretKeyFromBytes(skBytes []byte) (*SecretKey, error) { if sk == nil { return nil, errFailedSecretKeyDeserialize } + runtime.SetFinalizer(sk, func(sk *SecretKey) { + sk.Zeroize() + }) return sk, nil } diff --git a/avalanchego/utils/crypto/bls/secret_test.go b/avalanchego/utils/crypto/bls/secret_test.go index 0968f6b1..c01540ac 100644 --- a/avalanchego/utils/crypto/bls/secret_test.go +++ b/avalanchego/utils/crypto/bls/secret_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bls diff --git a/avalanchego/utils/crypto/bls/signature.go b/avalanchego/utils/crypto/bls/signature.go index 42f1b500..7ee03aef 100644 --- a/avalanchego/utils/crypto/bls/signature.go +++ b/avalanchego/utils/crypto/bls/signature.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bls diff --git a/avalanchego/utils/crypto/bls/signature_test.go b/avalanchego/utils/crypto/bls/signature_test.go index 97ad9837..caf613fc 100644 --- a/avalanchego/utils/crypto/bls/signature_test.go +++ b/avalanchego/utils/crypto/bls/signature_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package bls diff --git a/avalanchego/utils/crypto/crypto.go b/avalanchego/utils/crypto/crypto.go deleted file mode 100644 index a5713c5a..00000000 --- a/avalanchego/utils/crypto/crypto.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package crypto - -import ( - "github.com/ava-labs/avalanchego/ids" -) - -type Factory interface { - NewPrivateKey() (PrivateKey, error) - - ToPublicKey([]byte) (PublicKey, error) - ToPrivateKey([]byte) (PrivateKey, error) -} - -type RecoverableFactory interface { - Factory - - RecoverPublicKey(message, signature []byte) (PublicKey, error) - RecoverHashPublicKey(hash, signature []byte) (PublicKey, error) -} - -type PublicKey interface { - Verify(message, signature []byte) bool - VerifyHash(hash, signature []byte) bool - - Address() ids.ShortID - Bytes() []byte -} - -type PrivateKey interface { - PublicKey() PublicKey - - Sign(message []byte) ([]byte, error) - SignHash(hash []byte) ([]byte, error) - - Bytes() []byte -} diff --git a/avalanchego/utils/crypto/crypto_benchmark_test.go b/avalanchego/utils/crypto/crypto_benchmark_test.go deleted file mode 100644 index dd2d6376..00000000 --- a/avalanchego/utils/crypto/crypto_benchmark_test.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package crypto - -import ( - "testing" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/hashing" -) - -// NumVerifies is the number of verifications to run per operation -const NumVerifies = 1 - -// The different signature schemes -const ( - RSA = iota - RSAPSS - ED25519 - SECP256K1 -) - -var ( - hashes [][]byte - - keys [][]PublicKey - sigs [][][]byte -) - -func init() { - // Setup hashes: - bytes := ids.ID{} - for i := uint64(0); i < NumVerifies; i++ { - bytes[i%32]++ - hash := hashing.ComputeHash256(bytes[:]) - hashes = append(hashes, hash) - } - - // Setup signatures: - factories := []Factory{ - RSA: &FactoryRSA{}, - RSAPSS: &FactoryRSAPSS{}, - ED25519: &FactoryED25519{}, - SECP256K1: &FactorySECP256K1R{}, - } - for _, f := range factories { - fKeys := []PublicKey{} - fSigs := [][]byte{} - for i := uint64(0); i < NumVerifies; i++ { - privateKey, err := f.NewPrivateKey() - if err != nil { - panic(err) - } - - publicKey := privateKey.PublicKey() - sig, err := privateKey.SignHash(hashes[i]) - if err != nil { - panic(err) - } - - fKeys = append(fKeys, publicKey) - fSigs = append(fSigs, sig) - } - keys = append(keys, fKeys) - sigs = append(sigs, fSigs) - } -} - -func verify(algo int) { - for i := 0; i < NumVerifies; i++ { - if !keys[algo][i].VerifyHash(hashes[i], sigs[algo][i]) { - panic("Verification failed") - } - } -} - -// BenchmarkRSAVerify runs the benchmark with RSA keys -func BenchmarkRSAVerify(b *testing.B) { - for n := 0; n < b.N; n++ { - verify(RSA) - } -} - -// BenchmarkRSAPSSVerify runs the benchmark with RSAPSS keys -func BenchmarkRSAPSSVerify(b *testing.B) { - for n := 0; n < b.N; n++ { - verify(RSAPSS) - } -} - -// BenchmarkED25519Verify runs the benchmark with ED25519 keys -func BenchmarkED25519Verify(b *testing.B) { - for n := 0; n < b.N; n++ { - verify(ED25519) - } -} - -// BenchmarkSECP256k1Verify runs the benchmark with SECP256K1 keys -func BenchmarkSECP256k1Verify(b *testing.B) { - for n := 0; n < b.N; n++ { - verify(SECP256K1) - } -} diff --git a/avalanchego/utils/crypto/ed25519.go b/avalanchego/utils/crypto/ed25519.go deleted file mode 100644 index 5b3458d7..00000000 --- a/avalanchego/utils/crypto/ed25519.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package crypto - -import ( - "errors" - - "golang.org/x/crypto/ed25519" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/hashing" -) - -var ( - errWrongPublicKeySize = errors.New("wrong public key size") - errWrongPrivateKeySize = errors.New("wrong private key size") -) - -var ( - _ Factory = &FactoryED25519{} - _ PublicKey = &PublicKeyED25519{} - _ PrivateKey = &PrivateKeyED25519{} -) - -type FactoryED25519 struct{} - -func (*FactoryED25519) NewPrivateKey() (PrivateKey, error) { - _, k, err := ed25519.GenerateKey(nil) - return &PrivateKeyED25519{sk: k}, err -} - -func (*FactoryED25519) ToPublicKey(b []byte) (PublicKey, error) { - if len(b) != ed25519.PublicKeySize { - return nil, errWrongPublicKeySize - } - return &PublicKeyED25519{pk: b}, nil -} - -func (*FactoryED25519) ToPrivateKey(b []byte) (PrivateKey, error) { - if len(b) != ed25519.PrivateKeySize { - return nil, errWrongPrivateKeySize - } - return &PrivateKeyED25519{sk: b}, nil -} - -type PublicKeyED25519 struct { - pk ed25519.PublicKey - addr ids.ShortID -} - -func (k *PublicKeyED25519) Verify(msg, sig []byte) bool { - return ed25519.Verify(k.pk, msg, sig) -} - -func (k *PublicKeyED25519) VerifyHash(hash, sig []byte) bool { - return k.Verify(hash, sig) -} - -func (k *PublicKeyED25519) Address() ids.ShortID { - if k.addr == ids.ShortEmpty { - addr, err := ids.ToShortID(hashing.PubkeyBytesToAddress(k.Bytes())) - if err != nil { - panic(err) - } - k.addr = addr - } - return k.addr -} - -func (k *PublicKeyED25519) Bytes() []byte { return k.pk } - -type PrivateKeyED25519 struct { - sk ed25519.PrivateKey - pk *PublicKeyED25519 -} - -func (k *PrivateKeyED25519) PublicKey() PublicKey { - if k.pk == nil { - k.pk = &PublicKeyED25519{ - pk: k.sk.Public().(ed25519.PublicKey), - } - } - return k.pk -} - -func (k *PrivateKeyED25519) Sign(msg []byte) ([]byte, error) { - return ed25519.Sign(k.sk, msg), nil -} - -func (k PrivateKeyED25519) SignHash(hash []byte) ([]byte, error) { - return k.Sign(hash) -} - -func (k PrivateKeyED25519) Bytes() []byte { return k.sk } diff --git a/avalanchego/utils/crypto/errors.go b/avalanchego/utils/crypto/errors.go deleted file mode 100644 index ad9b5ebe..00000000 --- a/avalanchego/utils/crypto/errors.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package crypto - -import ( - "errors" -) - -var ( - errInvalidSigLen = errors.New("invalid signature length") - errMutatedSig = errors.New("signature was mutated from its original format") -) diff --git a/avalanchego/utils/crypto/keychain/keychain.go b/avalanchego/utils/crypto/keychain/keychain.go new file mode 100644 index 00000000..bd8a4c79 --- /dev/null +++ b/avalanchego/utils/crypto/keychain/keychain.go @@ -0,0 +1,166 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package keychain + +import ( + "errors" + "fmt" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" +) + +var ( + _ Keychain = (*ledgerKeychain)(nil) + _ Signer = (*ledgerSigner)(nil) + + ErrInvalidIndicesLength = errors.New("number of indices should be greater than 0") + ErrInvalidNumAddrsToDerive = errors.New("number of addresses to derive should be greater than 0") + ErrInvalidNumAddrsDerived = errors.New("incorrect number of ledger derived addresses") + ErrInvalidNumSignatures = errors.New("incorrect number of signatures") +) + +// Signer implements functions for a keychain to return its main address and +// to sign a hash +type Signer interface { + SignHash([]byte) ([]byte, error) + Sign([]byte) ([]byte, error) + Address() ids.ShortID +} + +// Keychain maintains a set of addresses together with their corresponding +// signers +type Keychain interface { + // The returned Signer can provide a signature for [addr] + Get(addr ids.ShortID) (Signer, bool) + // Returns the set of addresses for which the accessor keeps an associated + // signer + Addresses() set.Set[ids.ShortID] +} + +// ledgerKeychain is an abstraction of the underlying ledger hardware device, +// to be able to get a signer from a finite set of derived signers +type ledgerKeychain struct { + ledger Ledger + addrs set.Set[ids.ShortID] + addrToIdx map[ids.ShortID]uint32 +} + +// ledgerSigner is an abstraction of the underlying ledger hardware device, +// to be able sign for a specific address +type ledgerSigner struct { + ledger Ledger + idx uint32 + addr ids.ShortID +} + +// NewLedgerKeychain creates a new Ledger with [numToDerive] addresses. +func NewLedgerKeychain(l Ledger, numToDerive int) (Keychain, error) { + if numToDerive < 1 { + return nil, ErrInvalidNumAddrsToDerive + } + + indices := make([]uint32, numToDerive) + for i := range indices { + indices[i] = uint32(i) + } + + return NewLedgerKeychainFromIndices(l, indices) +} + +// NewLedgerKeychainFromIndices creates a new Ledger with addresses taken from the given [indices]. +func NewLedgerKeychainFromIndices(l Ledger, indices []uint32) (Keychain, error) { + if len(indices) == 0 { + return nil, ErrInvalidIndicesLength + } + + addrs, err := l.Addresses(indices) + if err != nil { + return nil, err + } + + if len(addrs) != len(indices) { + return nil, fmt.Errorf( + "%w. expected %d, got %d", + ErrInvalidNumAddrsDerived, + len(indices), + len(addrs), + ) + } + + addrsSet := set.NewSet[ids.ShortID](len(addrs)) + addrsSet.Add(addrs...) + + addrToIdx := map[ids.ShortID]uint32{} + for i := range indices { + addrToIdx[addrs[i]] = indices[i] + } + + return &ledgerKeychain{ + ledger: l, + addrs: addrsSet, + addrToIdx: addrToIdx, + }, nil +} + +func (l *ledgerKeychain) Addresses() set.Set[ids.ShortID] { + return l.addrs +} + +func (l *ledgerKeychain) Get(addr ids.ShortID) (Signer, bool) { + idx, ok := l.addrToIdx[addr] + if !ok { + return nil, false + } + + return &ledgerSigner{ + ledger: l.ledger, + idx: idx, + addr: addr, + }, true +} + +// expects to receive a hash of the unsigned tx bytes +func (l *ledgerSigner) SignHash(b []byte) ([]byte, error) { + // Sign using the address with index l.idx on the ledger device. The number + // of returned signatures should be the same length as the provided indices. + sigs, err := l.ledger.SignHash(b, []uint32{l.idx}) + if err != nil { + return nil, err + } + + if sigsLen := len(sigs); sigsLen != 1 { + return nil, fmt.Errorf( + "%w. expected 1, got %d", + ErrInvalidNumSignatures, + sigsLen, + ) + } + + return sigs[0], err +} + +// expects to receive the unsigned tx bytes +func (l *ledgerSigner) Sign(b []byte) ([]byte, error) { + // Sign using the address with index l.idx on the ledger device. The number + // of returned signatures should be the same length as the provided indices. + sigs, err := l.ledger.Sign(b, []uint32{l.idx}) + if err != nil { + return nil, err + } + + if sigsLen := len(sigs); sigsLen != 1 { + return nil, fmt.Errorf( + "%w. expected 1, got %d", + ErrInvalidNumSignatures, + sigsLen, + ) + } + + return sigs[0], err +} + +func (l *ledgerSigner) Address() ids.ShortID { + return l.addr +} diff --git a/avalanchego/utils/crypto/keychain/keychain_test.go b/avalanchego/utils/crypto/keychain/keychain_test.go new file mode 100644 index 00000000..e260e87c --- /dev/null +++ b/avalanchego/utils/crypto/keychain/keychain_test.go @@ -0,0 +1,432 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package keychain + +import ( + "errors" + "testing" + + "github.com/golang/mock/gomock" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" +) + +var errTest = errors.New("test") + +func TestNewLedgerKeychain(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + addr := ids.GenerateTestShortID() + + // user request invalid number of addresses to derive + ledger := NewMockLedger(ctrl) + _, err := NewLedgerKeychain(ledger, 0) + require.Equal(err, ErrInvalidNumAddrsToDerive) + + // ledger does not return expected number of derived addresses + ledger = NewMockLedger(ctrl) + ledger.EXPECT().Addresses([]uint32{0}).Return([]ids.ShortID{}, nil).Times(1) + _, err = NewLedgerKeychain(ledger, 1) + require.ErrorIs(err, ErrInvalidNumAddrsDerived) + + // ledger return error when asked for derived addresses + ledger = NewMockLedger(ctrl) + ledger.EXPECT().Addresses([]uint32{0}).Return([]ids.ShortID{addr}, errTest).Times(1) + _, err = NewLedgerKeychain(ledger, 1) + require.Equal(err, errTest) + + // good path + ledger = NewMockLedger(ctrl) + ledger.EXPECT().Addresses([]uint32{0}).Return([]ids.ShortID{addr}, nil).Times(1) + _, err = NewLedgerKeychain(ledger, 1) + require.NoError(err) +} + +func TestLedgerKeychain_Addresses(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + addr1 := ids.GenerateTestShortID() + addr2 := ids.GenerateTestShortID() + addr3 := ids.GenerateTestShortID() + + // 1 addr + ledger := NewMockLedger(ctrl) + ledger.EXPECT().Addresses([]uint32{0}).Return([]ids.ShortID{addr1}, nil).Times(1) + kc, err := NewLedgerKeychain(ledger, 1) + require.NoError(err) + + addrs := kc.Addresses() + require.Len(addrs, 1) + require.True(addrs.Contains(addr1)) + + // multiple addresses + ledger = NewMockLedger(ctrl) + ledger.EXPECT().Addresses([]uint32{0, 1, 2}).Return([]ids.ShortID{addr1, addr2, addr3}, nil).Times(1) + kc, err = NewLedgerKeychain(ledger, 3) + require.NoError(err) + + addrs = kc.Addresses() + require.Len(addrs, 3) + require.Contains(addrs, addr1) + require.Contains(addrs, addr2) + require.Contains(addrs, addr3) +} + +func TestLedgerKeychain_Get(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + addr1 := ids.GenerateTestShortID() + addr2 := ids.GenerateTestShortID() + addr3 := ids.GenerateTestShortID() + + // 1 addr + ledger := NewMockLedger(ctrl) + ledger.EXPECT().Addresses([]uint32{0}).Return([]ids.ShortID{addr1}, nil).Times(1) + kc, err := NewLedgerKeychain(ledger, 1) + require.NoError(err) + + _, b := kc.Get(ids.GenerateTestShortID()) + require.False(b) + + s, b := kc.Get(addr1) + require.Equal(s.Address(), addr1) + require.True(b) + + // multiple addresses + ledger = NewMockLedger(ctrl) + ledger.EXPECT().Addresses([]uint32{0, 1, 2}).Return([]ids.ShortID{addr1, addr2, addr3}, nil).Times(1) + kc, err = NewLedgerKeychain(ledger, 3) + require.NoError(err) + + _, b = kc.Get(ids.GenerateTestShortID()) + require.False(b) + + s, b = kc.Get(addr1) + require.True(b) + require.Equal(s.Address(), addr1) + + s, b = kc.Get(addr2) + require.True(b) + require.Equal(s.Address(), addr2) + + s, b = kc.Get(addr3) + require.True(b) + require.Equal(s.Address(), addr3) +} + +func TestLedgerSigner_SignHash(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + addr1 := ids.GenerateTestShortID() + addr2 := ids.GenerateTestShortID() + addr3 := ids.GenerateTestShortID() + toSign := []byte{1, 2, 3, 4, 5} + expectedSignature1 := []byte{1, 1, 1} + expectedSignature2 := []byte{2, 2, 2} + expectedSignature3 := []byte{3, 3, 3} + + // ledger returns an incorrect number of signatures + ledger := NewMockLedger(ctrl) + ledger.EXPECT().Addresses([]uint32{0}).Return([]ids.ShortID{addr1}, nil).Times(1) + ledger.EXPECT().SignHash(toSign, []uint32{0}).Return([][]byte{}, nil).Times(1) + kc, err := NewLedgerKeychain(ledger, 1) + require.NoError(err) + + s, b := kc.Get(addr1) + require.True(b) + + _, err = s.SignHash(toSign) + require.ErrorIs(err, ErrInvalidNumSignatures) + + // ledger returns an error when asked for signature + ledger = NewMockLedger(ctrl) + ledger.EXPECT().Addresses([]uint32{0}).Return([]ids.ShortID{addr1}, nil).Times(1) + ledger.EXPECT().SignHash(toSign, []uint32{0}).Return([][]byte{expectedSignature1}, errTest).Times(1) + kc, err = NewLedgerKeychain(ledger, 1) + require.NoError(err) + + s, b = kc.Get(addr1) + require.True(b) + + _, err = s.SignHash(toSign) + require.Equal(err, errTest) + + // good path 1 addr + ledger = NewMockLedger(ctrl) + ledger.EXPECT().Addresses([]uint32{0}).Return([]ids.ShortID{addr1}, nil).Times(1) + ledger.EXPECT().SignHash(toSign, []uint32{0}).Return([][]byte{expectedSignature1}, nil).Times(1) + kc, err = NewLedgerKeychain(ledger, 1) + require.NoError(err) + + s, b = kc.Get(addr1) + require.True(b) + + signature, err := s.SignHash(toSign) + require.NoError(err) + require.Equal(expectedSignature1, signature) + + // good path 3 addr + ledger = NewMockLedger(ctrl) + ledger.EXPECT().Addresses([]uint32{0, 1, 2}).Return([]ids.ShortID{addr1, addr2, addr3}, nil).Times(1) + ledger.EXPECT().SignHash(toSign, []uint32{0}).Return([][]byte{expectedSignature1}, nil).Times(1) + ledger.EXPECT().SignHash(toSign, []uint32{1}).Return([][]byte{expectedSignature2}, nil).Times(1) + ledger.EXPECT().SignHash(toSign, []uint32{2}).Return([][]byte{expectedSignature3}, nil).Times(1) + kc, err = NewLedgerKeychain(ledger, 3) + require.NoError(err) + + s, b = kc.Get(addr1) + require.True(b) + + signature, err = s.SignHash(toSign) + require.NoError(err) + require.Equal(expectedSignature1, signature) + + s, b = kc.Get(addr2) + require.True(b) + + signature, err = s.SignHash(toSign) + require.NoError(err) + require.Equal(expectedSignature2, signature) + + s, b = kc.Get(addr3) + require.True(b) + + signature, err = s.SignHash(toSign) + require.NoError(err) + require.Equal(expectedSignature3, signature) +} + +func TestNewLedgerKeychainFromIndices(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + addr := ids.GenerateTestShortID() + _ = addr + + // user request invalid number of indices + ledger := NewMockLedger(ctrl) + _, err := NewLedgerKeychainFromIndices(ledger, []uint32{}) + require.Equal(err, ErrInvalidIndicesLength) + + // ledger does not return expected number of derived addresses + ledger = NewMockLedger(ctrl) + ledger.EXPECT().Addresses([]uint32{0}).Return([]ids.ShortID{}, nil).Times(1) + _, err = NewLedgerKeychainFromIndices(ledger, []uint32{0}) + require.ErrorIs(err, ErrInvalidNumAddrsDerived) + + // ledger return error when asked for derived addresses + ledger = NewMockLedger(ctrl) + ledger.EXPECT().Addresses([]uint32{0}).Return([]ids.ShortID{addr}, errTest).Times(1) + _, err = NewLedgerKeychainFromIndices(ledger, []uint32{0}) + require.Equal(err, errTest) + + // good path + ledger = NewMockLedger(ctrl) + ledger.EXPECT().Addresses([]uint32{0}).Return([]ids.ShortID{addr}, nil).Times(1) + _, err = NewLedgerKeychainFromIndices(ledger, []uint32{0}) + require.NoError(err) +} + +func TestLedgerKeychainFromIndices_Addresses(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + addr1 := ids.GenerateTestShortID() + addr2 := ids.GenerateTestShortID() + addr3 := ids.GenerateTestShortID() + + // 1 addr + ledger := NewMockLedger(ctrl) + ledger.EXPECT().Addresses([]uint32{0}).Return([]ids.ShortID{addr1}, nil).Times(1) + kc, err := NewLedgerKeychainFromIndices(ledger, []uint32{0}) + require.NoError(err) + + addrs := kc.Addresses() + require.Len(addrs, 1) + require.True(addrs.Contains(addr1)) + + // first 3 addresses + ledger = NewMockLedger(ctrl) + ledger.EXPECT().Addresses([]uint32{0, 1, 2}).Return([]ids.ShortID{addr1, addr2, addr3}, nil).Times(1) + kc, err = NewLedgerKeychainFromIndices(ledger, []uint32{0, 1, 2}) + require.NoError(err) + + addrs = kc.Addresses() + require.Len(addrs, 3) + require.Contains(addrs, addr1) + require.Contains(addrs, addr2) + require.Contains(addrs, addr3) + + // some 3 addresses + indices := []uint32{3, 7, 1} + addresses := []ids.ShortID{addr1, addr2, addr3} + ledger = NewMockLedger(ctrl) + ledger.EXPECT().Addresses(indices).Return(addresses, nil).Times(1) + kc, err = NewLedgerKeychainFromIndices(ledger, indices) + require.NoError(err) + + addrs = kc.Addresses() + require.Len(addrs, len(indices)) + require.Contains(addrs, addr1) + require.Contains(addrs, addr2) + require.Contains(addrs, addr3) + + // repeated addresses + indices = []uint32{3, 7, 1, 3, 1, 7} + addresses = []ids.ShortID{addr1, addr2, addr3, addr1, addr2, addr3} + ledger = NewMockLedger(ctrl) + ledger.EXPECT().Addresses(indices).Return(addresses, nil).Times(1) + kc, err = NewLedgerKeychainFromIndices(ledger, indices) + require.NoError(err) + + addrs = kc.Addresses() + require.Len(addrs, 3) + require.Contains(addrs, addr1) + require.Contains(addrs, addr2) + require.Contains(addrs, addr3) +} + +func TestLedgerKeychainFromIndices_Get(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + addr1 := ids.GenerateTestShortID() + addr2 := ids.GenerateTestShortID() + addr3 := ids.GenerateTestShortID() + + // 1 addr + ledger := NewMockLedger(ctrl) + ledger.EXPECT().Addresses([]uint32{0}).Return([]ids.ShortID{addr1}, nil).Times(1) + kc, err := NewLedgerKeychainFromIndices(ledger, []uint32{0}) + require.NoError(err) + + _, b := kc.Get(ids.GenerateTestShortID()) + require.False(b) + + s, b := kc.Get(addr1) + require.Equal(s.Address(), addr1) + require.True(b) + + // some 3 addresses + indices := []uint32{3, 7, 1} + addresses := []ids.ShortID{addr1, addr2, addr3} + ledger = NewMockLedger(ctrl) + ledger.EXPECT().Addresses(indices).Return(addresses, nil).Times(1) + kc, err = NewLedgerKeychainFromIndices(ledger, indices) + require.NoError(err) + + _, b = kc.Get(ids.GenerateTestShortID()) + require.False(b) + + s, b = kc.Get(addr1) + require.True(b) + require.Equal(s.Address(), addr1) + + s, b = kc.Get(addr2) + require.True(b) + require.Equal(s.Address(), addr2) + + s, b = kc.Get(addr3) + require.True(b) + require.Equal(s.Address(), addr3) +} + +func TestLedgerSignerFromIndices_SignHash(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + addr1 := ids.GenerateTestShortID() + addr2 := ids.GenerateTestShortID() + addr3 := ids.GenerateTestShortID() + toSign := []byte{1, 2, 3, 4, 5} + expectedSignature1 := []byte{1, 1, 1} + expectedSignature2 := []byte{2, 2, 2} + expectedSignature3 := []byte{3, 3, 3} + + // ledger returns an incorrect number of signatures + ledger := NewMockLedger(ctrl) + ledger.EXPECT().Addresses([]uint32{0}).Return([]ids.ShortID{addr1}, nil).Times(1) + ledger.EXPECT().SignHash(toSign, []uint32{0}).Return([][]byte{}, nil).Times(1) + kc, err := NewLedgerKeychainFromIndices(ledger, []uint32{0}) + require.NoError(err) + + s, b := kc.Get(addr1) + require.True(b) + + _, err = s.SignHash(toSign) + require.ErrorIs(err, ErrInvalidNumSignatures) + + // ledger returns an error when asked for signature + ledger = NewMockLedger(ctrl) + ledger.EXPECT().Addresses([]uint32{0}).Return([]ids.ShortID{addr1}, nil).Times(1) + ledger.EXPECT().SignHash(toSign, []uint32{0}).Return([][]byte{expectedSignature1}, errTest).Times(1) + kc, err = NewLedgerKeychainFromIndices(ledger, []uint32{0}) + require.NoError(err) + + s, b = kc.Get(addr1) + require.True(b) + + _, err = s.SignHash(toSign) + require.Equal(err, errTest) + + // good path 1 addr + ledger = NewMockLedger(ctrl) + ledger.EXPECT().Addresses([]uint32{0}).Return([]ids.ShortID{addr1}, nil).Times(1) + ledger.EXPECT().SignHash(toSign, []uint32{0}).Return([][]byte{expectedSignature1}, nil).Times(1) + kc, err = NewLedgerKeychainFromIndices(ledger, []uint32{0}) + require.NoError(err) + + s, b = kc.Get(addr1) + require.True(b) + + signature, err := s.SignHash(toSign) + require.NoError(err) + require.Equal(expectedSignature1, signature) + + // good path some 3 addresses + indices := []uint32{3, 7, 1} + addresses := []ids.ShortID{addr1, addr2, addr3} + ledger = NewMockLedger(ctrl) + ledger.EXPECT().Addresses(indices).Return(addresses, nil).Times(1) + ledger.EXPECT().SignHash(toSign, []uint32{indices[0]}).Return([][]byte{expectedSignature1}, nil).Times(1) + ledger.EXPECT().SignHash(toSign, []uint32{indices[1]}).Return([][]byte{expectedSignature2}, nil).Times(1) + ledger.EXPECT().SignHash(toSign, []uint32{indices[2]}).Return([][]byte{expectedSignature3}, nil).Times(1) + kc, err = NewLedgerKeychainFromIndices(ledger, indices) + require.NoError(err) + + s, b = kc.Get(addr1) + require.True(b) + + signature, err = s.SignHash(toSign) + require.NoError(err) + require.Equal(expectedSignature1, signature) + + s, b = kc.Get(addr2) + require.True(b) + + signature, err = s.SignHash(toSign) + require.NoError(err) + require.Equal(expectedSignature2, signature) + + s, b = kc.Get(addr3) + require.True(b) + + signature, err = s.SignHash(toSign) + require.NoError(err) + require.Equal(expectedSignature3, signature) +} diff --git a/avalanchego/utils/crypto/keychain/ledger.go b/avalanchego/utils/crypto/keychain/ledger.go new file mode 100644 index 00000000..d709ed19 --- /dev/null +++ b/avalanchego/utils/crypto/keychain/ledger.go @@ -0,0 +1,19 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package keychain + +import ( + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/version" +) + +// Ledger interface for the ledger wrapper +type Ledger interface { + Version() (v *version.Semantic, err error) + Address(displayHRP string, addressIndex uint32) (ids.ShortID, error) + Addresses(addressIndices []uint32) ([]ids.ShortID, error) + SignHash(hash []byte, addressIndices []uint32) ([][]byte, error) + Sign(unsignedTxBytes []byte, addressIndices []uint32) ([][]byte, error) + Disconnect() error +} diff --git a/avalanchego/utils/crypto/keychain/mock_ledger.go b/avalanchego/utils/crypto/keychain/mock_ledger.go new file mode 100644 index 00000000..c6d0ead8 --- /dev/null +++ b/avalanchego/utils/crypto/keychain/mock_ledger.go @@ -0,0 +1,128 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/utils/crypto/keychain (interfaces: Ledger) + +// Package keychain is a generated GoMock package. +package keychain + +import ( + reflect "reflect" + + ids "github.com/ava-labs/avalanchego/ids" + version "github.com/ava-labs/avalanchego/version" + gomock "github.com/golang/mock/gomock" +) + +// MockLedger is a mock of Ledger interface. +type MockLedger struct { + ctrl *gomock.Controller + recorder *MockLedgerMockRecorder +} + +// MockLedgerMockRecorder is the mock recorder for MockLedger. +type MockLedgerMockRecorder struct { + mock *MockLedger +} + +// NewMockLedger creates a new mock instance. +func NewMockLedger(ctrl *gomock.Controller) *MockLedger { + mock := &MockLedger{ctrl: ctrl} + mock.recorder = &MockLedgerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockLedger) EXPECT() *MockLedgerMockRecorder { + return m.recorder +} + +// Address mocks base method. +func (m *MockLedger) Address(arg0 string, arg1 uint32) (ids.ShortID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Address", arg0, arg1) + ret0, _ := ret[0].(ids.ShortID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Address indicates an expected call of Address. +func (mr *MockLedgerMockRecorder) Address(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Address", reflect.TypeOf((*MockLedger)(nil).Address), arg0, arg1) +} + +// Addresses mocks base method. +func (m *MockLedger) Addresses(arg0 []uint32) ([]ids.ShortID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Addresses", arg0) + ret0, _ := ret[0].([]ids.ShortID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Addresses indicates an expected call of Addresses. +func (mr *MockLedgerMockRecorder) Addresses(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Addresses", reflect.TypeOf((*MockLedger)(nil).Addresses), arg0) +} + +// Disconnect mocks base method. +func (m *MockLedger) Disconnect() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Disconnect") + ret0, _ := ret[0].(error) + return ret0 +} + +// Disconnect indicates an expected call of Disconnect. +func (mr *MockLedgerMockRecorder) Disconnect() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Disconnect", reflect.TypeOf((*MockLedger)(nil).Disconnect)) +} + +// Sign mocks base method. +func (m *MockLedger) Sign(arg0 []byte, arg1 []uint32) ([][]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Sign", arg0, arg1) + ret0, _ := ret[0].([][]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Sign indicates an expected call of Sign. +func (mr *MockLedgerMockRecorder) Sign(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Sign", reflect.TypeOf((*MockLedger)(nil).Sign), arg0, arg1) +} + +// SignHash mocks base method. +func (m *MockLedger) SignHash(arg0 []byte, arg1 []uint32) ([][]byte, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SignHash", arg0, arg1) + ret0, _ := ret[0].([][]byte) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SignHash indicates an expected call of SignHash. +func (mr *MockLedgerMockRecorder) SignHash(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SignHash", reflect.TypeOf((*MockLedger)(nil).SignHash), arg0, arg1) +} + +// Version mocks base method. +func (m *MockLedger) Version() (*version.Semantic, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Version") + ret0, _ := ret[0].(*version.Semantic) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Version indicates an expected call of Version. +func (mr *MockLedgerMockRecorder) Version() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Version", reflect.TypeOf((*MockLedger)(nil).Version)) +} diff --git a/avalanchego/utils/crypto/ledger/ledger.go b/avalanchego/utils/crypto/ledger/ledger.go new file mode 100644 index 00000000..37de44fe --- /dev/null +++ b/avalanchego/utils/crypto/ledger/ledger.go @@ -0,0 +1,130 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ledger + +import ( + "fmt" + + ledger "github.com/ava-labs/ledger-avalanche/go" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/keychain" + "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/version" +) + +const ( + rootPath = "m/44'/9000'/0'" + ledgerBufferLimit = 8192 + ledgerPathSize = 9 +) + +var _ keychain.Ledger = (*Ledger)(nil) + +// Ledger is a wrapper around the low-level Ledger Device interface that +// provides Avalanche-specific access. +type Ledger struct { + device *ledger.LedgerAvalanche +} + +func New() (keychain.Ledger, error) { + device, err := ledger.FindLedgerAvalancheApp() + return &Ledger{ + device: device, + }, err +} + +func addressPath(index uint32) string { + return fmt.Sprintf("%s/0/%d", rootPath, index) +} + +func (l *Ledger) Address(hrp string, addressIndex uint32) (ids.ShortID, error) { + _, hash, err := l.device.GetPubKey(addressPath(addressIndex), true, hrp, "") + if err != nil { + return ids.ShortEmpty, err + } + return ids.ToShortID(hash) +} + +func (l *Ledger) Addresses(addressIndices []uint32) ([]ids.ShortID, error) { + addresses := make([]ids.ShortID, len(addressIndices)) + for i, v := range addressIndices { + _, hash, err := l.device.GetPubKey(addressPath(v), false, "", "") + if err != nil { + return nil, err + } + copy(addresses[i][:], hash) + } + return addresses, nil +} + +func convertToSigningPaths(input []uint32) []string { + output := make([]string, len(input)) + for i, v := range input { + output[i] = fmt.Sprintf("0/%d", v) + } + return output +} + +func (l *Ledger) SignHash(hash []byte, addressIndices []uint32) ([][]byte, error) { + strIndices := convertToSigningPaths(addressIndices) + response, err := l.device.SignHash(rootPath, strIndices, hash) + if err != nil { + return nil, fmt.Errorf("%w: unable to sign hash", err) + } + responses := make([][]byte, len(addressIndices)) + for i, index := range strIndices { + sig, ok := response.Signature[index] + if !ok { + return nil, fmt.Errorf("missing signature %s", index) + } + responses[i] = sig + } + return responses, nil +} + +func (l *Ledger) Sign(txBytes []byte, addressIndices []uint32) ([][]byte, error) { + // will pass to the ledger addressIndices both as signing paths and change paths + numSigningPaths := len(addressIndices) + numChangePaths := len(addressIndices) + if len(txBytes)+(numSigningPaths+numChangePaths)*ledgerPathSize > ledgerBufferLimit { + // There is a limit on the tx length that can be parsed by the ledger + // app. When the tx that is being signed is too large, we sign with hash + // instead. + // + // Ref: https://github.com/ava-labs/avalanche-wallet-sdk/blob/9a71f05e424e06b94eaccf21fd32d7983ed1b040/src/Wallet/Ledger/provider/ZondaxProvider.ts#L68 + unsignedHash := hashing.ComputeHash256(txBytes) + return l.SignHash(unsignedHash, addressIndices) + } + strIndices := convertToSigningPaths(addressIndices) + response, err := l.device.Sign(rootPath, strIndices, txBytes, strIndices) + if err != nil { + return nil, fmt.Errorf("%w: unable to sign transaction", err) + } + responses := make([][]byte, len(strIndices)) + for i, index := range strIndices { + sig, ok := response.Signature[index] + if !ok { + return nil, fmt.Errorf("missing signature %s", index) + } + responses[i] = sig + } + return responses, nil +} + +func (l *Ledger) Version() (*version.Semantic, error) { + resp, err := l.device.GetVersion() + if err != nil { + return nil, err + } + return &version.Semantic{ + Major: int(resp.Major), + Minor: int(resp.Minor), + Patch: int(resp.Patch), + }, nil +} + +func (l *Ledger) Disconnect() error { + return l.device.Close() +} diff --git a/avalanchego/utils/crypto/ledger/ledger_test.go b/avalanchego/utils/crypto/ledger/ledger_test.go new file mode 100644 index 00000000..1ab163c9 --- /dev/null +++ b/avalanchego/utils/crypto/ledger/ledger_test.go @@ -0,0 +1,76 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ledger + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/formatting/address" + "github.com/ava-labs/avalanchego/utils/hashing" +) + +const ( + chainAlias = "P" + hrp = "fuji" +) + +var factory secp256k1.Factory + +// TestLedger will be skipped if a ledger is not connected. +func TestLedger(t *testing.T) { + require := require.New(t) + + // Initialize Ledger + device, err := New() + if err != nil { + t.Skip("ledger not detected") + } + + // Get version + version, err := device.Version() + require.NoError(err) + t.Logf("version: %s\n", version) + + // Get Fuji Address + addr, err := device.Address(hrp, 0) + require.NoError(err) + paddr, err := address.Format(chainAlias, hrp, addr[:]) + require.NoError(err) + t.Logf("address: %s shortID: %s\n", paddr, addr) + + // Get Extended Addresses + addresses, err := device.Addresses([]uint32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}) + require.NoError(err) + for i, taddr := range addresses { + paddr, err := address.Format(chainAlias, hrp, taddr[:]) + require.NoError(err) + t.Logf("address(%d): %s shortID: %s\n", i, paddr, taddr) + + // Ensure first derived address matches directly requested address + if i == 0 { + require.Equal(addr, taddr, "address mismatch at index 0") + } + } + + // Sign Hash + rawHash := hashing.ComputeHash256([]byte{0x1, 0x2, 0x3, 0x4}) + indices := []uint32{1, 3} + sigs, err := device.SignHash(rawHash, indices) + require.NoError(err) + require.Len(sigs, 2) + + for i, addrIndex := range indices { + sig := sigs[i] + + pk, err := factory.RecoverHashPublicKey(rawHash, sig) + require.NoError(err) + require.Equal(addresses[addrIndex], pk.Address()) + } + + // Disconnect + require.NoError(device.Disconnect()) +} diff --git a/avalanchego/utils/crypto/rsa.go b/avalanchego/utils/crypto/rsa.go deleted file mode 100644 index cd085fcb..00000000 --- a/avalanchego/utils/crypto/rsa.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package crypto - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "errors" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/hashing" -) - -var ( - errWrongKeyType = errors.New("wrong key type") - - _ Factory = &FactoryRSA{} - _ PublicKey = &PublicKeyRSA{} - _ PrivateKey = &PrivateKeyRSA{} -) - -const rsaSize = 3072 - -type FactoryRSA struct{} - -func (*FactoryRSA) NewPrivateKey() (PrivateKey, error) { - k, err := rsa.GenerateKey(rand.Reader, rsaSize) - if err != nil { - return nil, err - } - return &PrivateKeyRSA{sk: k}, nil -} - -func (*FactoryRSA) ToPublicKey(b []byte) (PublicKey, error) { - key, err := x509.ParsePKIXPublicKey(b) - if err != nil { - return nil, err - } - switch key := key.(type) { - case *rsa.PublicKey: - return &PublicKeyRSA{ - pk: key, - bytes: b, - }, nil - default: - return nil, errWrongKeyType - } -} - -func (*FactoryRSA) ToPrivateKey(b []byte) (PrivateKey, error) { - key, err := x509.ParsePKCS1PrivateKey(b) - if err != nil { - return nil, err - } - return &PrivateKeyRSA{ - sk: key, - bytes: b, - }, nil -} - -type PublicKeyRSA struct { - pk *rsa.PublicKey - addr ids.ShortID - bytes []byte -} - -func (k *PublicKeyRSA) Verify(msg, sig []byte) bool { - return k.VerifyHash(hashing.ComputeHash256(msg), sig) -} - -func (k *PublicKeyRSA) VerifyHash(hash, sig []byte) bool { - return rsa.VerifyPKCS1v15(k.pk, crypto.SHA256, hash, sig) == nil -} - -func (k *PublicKeyRSA) Address() ids.ShortID { - if k.addr == ids.ShortEmpty { - addr, err := ids.ToShortID(hashing.PubkeyBytesToAddress(k.Bytes())) - if err != nil { - panic(err) - } - k.addr = addr - } - return k.addr -} - -func (k *PublicKeyRSA) Bytes() []byte { - if k.bytes == nil { - b, err := x509.MarshalPKIXPublicKey(k.pk) - if err != nil { - panic(err) - } - k.bytes = b - } - return k.bytes -} - -type PrivateKeyRSA struct { - sk *rsa.PrivateKey - pk *PublicKeyRSA - bytes []byte -} - -func (k *PrivateKeyRSA) PublicKey() PublicKey { - if k.pk == nil { - k.pk = &PublicKeyRSA{pk: &k.sk.PublicKey} - } - return k.pk -} - -func (k *PrivateKeyRSA) Sign(msg []byte) ([]byte, error) { - return k.SignHash(hashing.ComputeHash256(msg)) -} - -func (k *PrivateKeyRSA) SignHash(hash []byte) ([]byte, error) { - return rsa.SignPKCS1v15(rand.Reader, k.sk, crypto.SHA256, hash) -} - -func (k *PrivateKeyRSA) Bytes() []byte { - if k.bytes == nil { - k.bytes = x509.MarshalPKCS1PrivateKey(k.sk) - } - return k.bytes -} diff --git a/avalanchego/utils/crypto/rsapss.go b/avalanchego/utils/crypto/rsapss.go deleted file mode 100644 index d0ae67be..00000000 --- a/avalanchego/utils/crypto/rsapss.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package crypto - -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/hashing" -) - -var ( - _ Factory = &FactoryRSAPSS{} - _ PublicKey = &PublicKeyRSAPSS{} - _ PrivateKey = &PrivateKeyRSAPSS{} -) - -const rsaPSSSize = 3072 - -type FactoryRSAPSS struct{} - -func (*FactoryRSAPSS) NewPrivateKey() (PrivateKey, error) { - k, err := rsa.GenerateKey(rand.Reader, rsaPSSSize) - if err != nil { - return nil, err - } - return &PrivateKeyRSAPSS{sk: k}, nil -} - -func (*FactoryRSAPSS) ToPublicKey(b []byte) (PublicKey, error) { - key, err := x509.ParsePKIXPublicKey(b) - if err != nil { - return nil, err - } - switch key := key.(type) { - case *rsa.PublicKey: - return &PublicKeyRSAPSS{ - pk: key, - bytes: b, - }, nil - default: - return nil, errWrongKeyType - } -} - -func (*FactoryRSAPSS) ToPrivateKey(b []byte) (PrivateKey, error) { - key, err := x509.ParsePKCS1PrivateKey(b) - if err != nil { - return nil, err - } - return &PrivateKeyRSAPSS{ - sk: key, - bytes: b, - }, nil -} - -type PublicKeyRSAPSS struct { - pk *rsa.PublicKey - addr ids.ShortID - bytes []byte -} - -func (k *PublicKeyRSAPSS) Verify(msg, sig []byte) bool { - return k.VerifyHash(hashing.ComputeHash256(msg), sig) -} - -func (k *PublicKeyRSAPSS) VerifyHash(hash, sig []byte) bool { - return rsa.VerifyPSS(k.pk, crypto.SHA256, hash, sig, nil) == nil -} - -func (k *PublicKeyRSAPSS) Address() ids.ShortID { - if k.addr == ids.ShortEmpty { - addr, err := ids.ToShortID(hashing.PubkeyBytesToAddress(k.Bytes())) - if err != nil { - panic(err) - } - k.addr = addr - } - return k.addr -} - -func (k *PublicKeyRSAPSS) Bytes() []byte { - if k.bytes == nil { - b, err := x509.MarshalPKIXPublicKey(k.pk) - if err != nil { - panic(err) - } - k.bytes = b - } - return k.bytes -} - -type PrivateKeyRSAPSS struct { - sk *rsa.PrivateKey - pk *PublicKeyRSAPSS - bytes []byte -} - -func (k *PrivateKeyRSAPSS) PublicKey() PublicKey { - if k.pk == nil { - k.pk = &PublicKeyRSAPSS{pk: &k.sk.PublicKey} - } - return k.pk -} - -func (k *PrivateKeyRSAPSS) Sign(msg []byte) ([]byte, error) { - return k.SignHash(hashing.ComputeHash256(msg)) -} - -func (k *PrivateKeyRSAPSS) SignHash(hash []byte) ([]byte, error) { - return rsa.SignPSS(rand.Reader, k.sk, crypto.SHA256, hash, nil) -} - -func (k *PrivateKeyRSAPSS) Bytes() []byte { - if k.bytes == nil { - k.bytes = x509.MarshalPKCS1PrivateKey(k.sk) - } - return k.bytes -} diff --git a/avalanchego/utils/crypto/secp256k1/rfc6979_test.go b/avalanchego/utils/crypto/secp256k1/rfc6979_test.go new file mode 100644 index 00000000..d4c0a9c4 --- /dev/null +++ b/avalanchego/utils/crypto/secp256k1/rfc6979_test.go @@ -0,0 +1,85 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1 + +import ( + "encoding/hex" + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +// See: https://bitcointalk.org/index.php?topic=285142.msg3300992#msg3300992 as +// the source of these test vectors. +var rfc6979Tests = []test{ + { + skHex: "0000000000000000000000000000000000000000000000000000000000000001", + msg: "Everything should be made as simple as possible, but not simpler.", + rsHex: "33a69cd2065432a30f3d1ce4eb0d59b8ab58c74f27c41a7fdb5696ad4e6108c96f807982866f785d3f6418d24163ddae117b7db4d5fdf0071de069fa54342262", + }, + { + skHex: "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", + msg: "Equations are more important to me, because politics is for the present, but an equation is something for eternity.", + rsHex: "54c4a33c6423d689378f160a7ff8b61330444abb58fb470f96ea16d99d4a2fed07082304410efa6b2943111b6a4e0aaa7b7db55a07e9861d1fb3cb1f421044a5", + }, + { + skHex: "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364140", + msg: "Not only is the Universe stranger than we think, it is stranger than we can think.", + rsHex: "ff466a9f1b7b273e2f4c3ffe032eb2e814121ed18ef84665d0f515360dab3dd06fc95f5132e5ecfdc8e5e6e616cc77151455d46ed48f5589b7db7771a332b283", + }, + { + skHex: "0000000000000000000000000000000000000000000000000000000000000001", + msg: "How wonderful that we have met with a paradox. Now we have some hope of making progress.", + rsHex: "c0dafec8251f1d5010289d210232220b03202cba34ec11fec58b3e93a85b91d375afdc06b7d6322a590955bf264e7aaa155847f614d80078a90292fe205064d3", + }, + { + skHex: "69ec59eaa1f4f2e36b639716b7c30ca86d9a5375c7b38d8918bd9c0ebc80ba64", + msg: "Computer science is no more about computers than astronomy is about telescopes.", + rsHex: "7186363571d65e084e7f02b0b77c3ec44fb1b257dee26274c38c928986fea45d0de0b38e06807e46bda1f1e293f4f6323e854c86d58abdd00c46c16441085df6", + }, + { + skHex: "00000000000000000000000000007246174ab1e92e9149c6e446fe194d072637", + msg: "...if you aren't, at any given time, scandalized by code you wrote five or even three years ago, you're not learning anywhere near enough", + rsHex: "fbfe5076a15860ba8ed00e75e9bd22e05d230f02a936b653eb55b61c99dda4870e68880ebb0050fe4312b1b1eb0899e1b82da89baa5b895f612619edf34cbd37", + }, + { + skHex: "000000000000000000000000000000000000000000056916d0f9b31dc9b637f3", + msg: "The question of whether computers can think is like the question of whether submarines can swim.", + rsHex: "cde1302d83f8dd835d89aef803c74a119f561fbaef3eb9129e45f30de86abbf906ce643f5049ee1f27890467b77a6a8e11ec4661cc38cd8badf90115fbd03cef", + }, +} + +type test struct { + skHex string + msg string + rsHex string +} + +func TestRFC6979Compliance(t *testing.T) { + f := Factory{} + for i, tt := range rfc6979Tests { + t.Run(fmt.Sprintf("test %d", i), func(t *testing.T) { + require := require.New(t) + + skBytes, err := hex.DecodeString(tt.skHex) + require.NoError(err) + + sk, err := f.ToPrivateKey(skBytes) + require.NoError(err) + + msgBytes := []byte(tt.msg) + sigBytes, err := sk.Sign(msgBytes) + require.NoError(err) + + expectedRSBytes, err := hex.DecodeString(tt.rsHex) + require.NoError(err) + + // sigBytes is returned in [R || S || V] format, so we drop last + // byte to get [R || S] + rsBytes := sigBytes[:len(sigBytes)-1] + require.Equal(expectedRSBytes, rsBytes) + }) + } +} diff --git a/avalanchego/utils/crypto/secp256k1r.go b/avalanchego/utils/crypto/secp256k1/secp256k1.go similarity index 56% rename from avalanchego/utils/crypto/secp256k1r.go rename to avalanchego/utils/crypto/secp256k1/secp256k1.go index ad0cd33f..80de06c0 100644 --- a/avalanchego/utils/crypto/secp256k1r.go +++ b/avalanchego/utils/crypto/secp256k1/secp256k1.go @@ -1,40 +1,35 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package crypto +package secp256k1 import ( - "bytes" "errors" "fmt" - "sort" "strings" stdecdsa "crypto/ecdsa" - "github.com/decred/dcrd/dcrec/secp256k1/v3/ecdsa" + "github.com/decred/dcrd/dcrec/secp256k1/v4/ecdsa" - secp256k1 "github.com/decred/dcrd/dcrec/secp256k1/v3" + secp256k1 "github.com/decred/dcrd/dcrec/secp256k1/v4" "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/cb58" "github.com/ava-labs/avalanchego/utils/hashing" ) const ( - // SECP256K1RSigLen is the number of bytes in a secp2561k recoverable - // signature - SECP256K1RSigLen = 65 + // SignatureLen is the number of bytes in a secp2561k recoverable signature + SignatureLen = 65 - // SECP256K1RSKLen is the number of bytes in a secp2561k recoverable private + // PrivateKeyLen is the number of bytes in a secp2561k recoverable private // key - SECP256K1RSKLen = 32 + PrivateKeyLen = 32 - // SECP256K1RPKLen is the number of bytes in a secp2561k recoverable public - // key - SECP256K1RPKLen = 33 + // PublicKeyLen is the number of bytes in a secp2561k recoverable public key + PublicKeyLen = 33 // from the decred library: // compactSigMagicOffset is a value used when creating the compact signature @@ -52,49 +47,54 @@ var ( errCompressed = errors.New("wasn't expecting a compressed key") errMissingQuotes = errors.New("first and last characters should be quotes") errMissingKeyPrefix = fmt.Errorf("private key missing %s prefix", PrivateKeyPrefix) - errInvalidPrivateKeyLength = fmt.Errorf("private key has unexpected length, expected %d", SECP256K1RSKLen) - - _ RecoverableFactory = &FactorySECP256K1R{} - _ PublicKey = &PublicKeySECP256K1R{} - _ PrivateKey = &PrivateKeySECP256K1R{} + errInvalidPrivateKeyLength = fmt.Errorf("private key has unexpected length, expected %d", PrivateKeyLen) + errInvalidPublicKeyLength = fmt.Errorf("public key has unexpected length, expected %d", PublicKeyLen) + errInvalidSigLen = errors.New("invalid signature length") + errMutatedSig = errors.New("signature was mutated from its original format") ) -type FactorySECP256K1R struct{ Cache cache.LRU } +type Factory struct { + Cache cache.LRU[ids.ID, *PublicKey] +} -func (*FactorySECP256K1R) NewPrivateKey() (PrivateKey, error) { +func (*Factory) NewPrivateKey() (*PrivateKey, error) { k, err := secp256k1.GeneratePrivateKey() - return &PrivateKeySECP256K1R{sk: k}, err + return &PrivateKey{sk: k}, err } -func (*FactorySECP256K1R) ToPublicKey(b []byte) (PublicKey, error) { +func (*Factory) ToPublicKey(b []byte) (*PublicKey, error) { + if len(b) != PublicKeyLen { + return nil, errInvalidPublicKeyLength + } + key, err := secp256k1.ParsePubKey(b) - return &PublicKeySECP256K1R{ + return &PublicKey{ pk: key, bytes: b, }, err } -func (*FactorySECP256K1R) ToPrivateKey(b []byte) (PrivateKey, error) { - if len(b) != SECP256K1RSKLen { +func (*Factory) ToPrivateKey(b []byte) (*PrivateKey, error) { + if len(b) != PrivateKeyLen { return nil, errInvalidPrivateKeyLength } - return &PrivateKeySECP256K1R{ + return &PrivateKey{ sk: secp256k1.PrivKeyFromBytes(b), bytes: b, }, nil } -func (f *FactorySECP256K1R) RecoverPublicKey(msg, sig []byte) (PublicKey, error) { +func (f *Factory) RecoverPublicKey(msg, sig []byte) (*PublicKey, error) { return f.RecoverHashPublicKey(hashing.ComputeHash256(msg), sig) } -func (f *FactorySECP256K1R) RecoverHashPublicKey(hash, sig []byte) (PublicKey, error) { +func (f *Factory) RecoverHashPublicKey(hash, sig []byte) (*PublicKey, error) { cacheBytes := make([]byte, len(hash)+len(sig)) copy(cacheBytes, hash) copy(cacheBytes[len(hash):], sig) id := hashing.ComputeHash256Array(cacheBytes) if cachedPublicKey, ok := f.Cache.Get(id); ok { - return cachedPublicKey.(*PublicKeySECP256K1R), nil + return cachedPublicKey, nil } if err := verifySECP256K1RSignatureFormat(sig); err != nil { @@ -115,23 +115,23 @@ func (f *FactorySECP256K1R) RecoverHashPublicKey(hash, sig []byte) (PublicKey, e return nil, errCompressed } - pubkey := &PublicKeySECP256K1R{pk: rawPubkey} + pubkey := &PublicKey{pk: rawPubkey} f.Cache.Put(id, pubkey) return pubkey, nil } -type PublicKeySECP256K1R struct { +type PublicKey struct { pk *secp256k1.PublicKey addr ids.ShortID bytes []byte } -func (k *PublicKeySECP256K1R) Verify(msg, sig []byte) bool { +func (k *PublicKey) Verify(msg, sig []byte) bool { return k.VerifyHash(hashing.ComputeHash256(msg), sig) } -func (k *PublicKeySECP256K1R) VerifyHash(hash, sig []byte) bool { - factory := FactorySECP256K1R{} +func (k *PublicKey) VerifyHash(hash, sig []byte) bool { + factory := Factory{} pk, err := factory.RecoverHashPublicKey(hash, sig) if err != nil { return false @@ -140,11 +140,11 @@ func (k *PublicKeySECP256K1R) VerifyHash(hash, sig []byte) bool { } // ToECDSA returns the ecdsa representation of this public key -func (k *PublicKeySECP256K1R) ToECDSA() *stdecdsa.PublicKey { +func (k *PublicKey) ToECDSA() *stdecdsa.PublicKey { return k.pk.ToECDSA() } -func (k *PublicKeySECP256K1R) Address() ids.ShortID { +func (k *PublicKey) Address() ids.ShortID { if k.addr == ids.ShortEmpty { addr, err := ids.ToShortID(hashing.PubkeyBytesToAddress(k.Bytes())) if err != nil { @@ -155,63 +155,67 @@ func (k *PublicKeySECP256K1R) Address() ids.ShortID { return k.addr } -func (k *PublicKeySECP256K1R) Bytes() []byte { +func (k *PublicKey) Bytes() []byte { if k.bytes == nil { k.bytes = k.pk.SerializeCompressed() } return k.bytes } -type PrivateKeySECP256K1R struct { +type PrivateKey struct { sk *secp256k1.PrivateKey - pk *PublicKeySECP256K1R + pk *PublicKey bytes []byte } -func (k *PrivateKeySECP256K1R) PublicKey() PublicKey { +func (k *PrivateKey) PublicKey() *PublicKey { if k.pk == nil { - k.pk = &PublicKeySECP256K1R{pk: k.sk.PubKey()} + k.pk = &PublicKey{pk: k.sk.PubKey()} } return k.pk } -func (k *PrivateKeySECP256K1R) Sign(msg []byte) ([]byte, error) { +func (k *PrivateKey) Address() ids.ShortID { + return k.PublicKey().Address() +} + +func (k *PrivateKey) Sign(msg []byte) ([]byte, error) { return k.SignHash(hashing.ComputeHash256(msg)) } -func (k *PrivateKeySECP256K1R) SignHash(hash []byte) ([]byte, error) { +func (k *PrivateKey) SignHash(hash []byte) ([]byte, error) { sig := ecdsa.SignCompact(k.sk, hash, false) // returns [v || r || s] return rawSigToSig(sig) } // ToECDSA returns the ecdsa representation of this private key -func (k *PrivateKeySECP256K1R) ToECDSA() *stdecdsa.PrivateKey { +func (k *PrivateKey) ToECDSA() *stdecdsa.PrivateKey { return k.sk.ToECDSA() } -func (k *PrivateKeySECP256K1R) Bytes() []byte { +func (k *PrivateKey) Bytes() []byte { if k.bytes == nil { k.bytes = k.sk.Serialize() } return k.bytes } -func (k *PrivateKeySECP256K1R) String() string { +func (k *PrivateKey) String() string { // We assume that the maximum size of a byte slice that // can be stringified is at least the length of a SECP256K1 private key keyStr, _ := cb58.Encode(k.Bytes()) return PrivateKeyPrefix + keyStr } -func (k *PrivateKeySECP256K1R) MarshalJSON() ([]byte, error) { +func (k *PrivateKey) MarshalJSON() ([]byte, error) { return []byte("\"" + k.String() + "\""), nil } -func (k *PrivateKeySECP256K1R) MarshalText() ([]byte, error) { +func (k *PrivateKey) MarshalText() ([]byte, error) { return []byte(k.String()), nil } -func (k *PrivateKeySECP256K1R) UnmarshalJSON(b []byte) error { +func (k *PrivateKey) UnmarshalJSON(b []byte) error { str := string(b) if str == nullStr { // If "null", do nothing return nil @@ -234,46 +238,46 @@ func (k *PrivateKeySECP256K1R) UnmarshalJSON(b []byte) error { if err != nil { return err } - if len(keyBytes) != SECP256K1RSKLen { + if len(keyBytes) != PrivateKeyLen { return errInvalidPrivateKeyLength } - *k = PrivateKeySECP256K1R{ + *k = PrivateKey{ sk: secp256k1.PrivKeyFromBytes(keyBytes), bytes: keyBytes, } return nil } -func (k *PrivateKeySECP256K1R) UnmarshalText(text []byte) error { +func (k *PrivateKey) UnmarshalText(text []byte) error { return k.UnmarshalJSON(text) } // raw sig has format [v || r || s] whereas the sig has format [r || s || v] func rawSigToSig(sig []byte) ([]byte, error) { - if len(sig) != SECP256K1RSigLen { + if len(sig) != SignatureLen { return nil, errInvalidSigLen } recCode := sig[0] copy(sig, sig[1:]) - sig[SECP256K1RSigLen-1] = recCode - compactSigMagicOffset + sig[SignatureLen-1] = recCode - compactSigMagicOffset return sig, nil } // sig has format [r || s || v] whereas the raw sig has format [v || r || s] func sigToRawSig(sig []byte) ([]byte, error) { - if len(sig) != SECP256K1RSigLen { + if len(sig) != SignatureLen { return nil, errInvalidSigLen } - newSig := make([]byte, SECP256K1RSigLen) - newSig[0] = sig[SECP256K1RSigLen-1] + compactSigMagicOffset + newSig := make([]byte, SignatureLen) + newSig[0] = sig[SignatureLen-1] + compactSigMagicOffset copy(newSig[1:], sig) return newSig, nil } // verifies the signature format in format [r || s || v] func verifySECP256K1RSignatureFormat(sig []byte) error { - if len(sig) != SECP256K1RSigLen { + if len(sig) != SignatureLen { return errInvalidSigLen } @@ -284,17 +288,3 @@ func verifySECP256K1RSignatureFormat(sig []byte) error { } return nil } - -type innerSortSECP2561RSigs [][SECP256K1RSigLen]byte - -func (lst innerSortSECP2561RSigs) Less(i, j int) bool { return bytes.Compare(lst[i][:], lst[j][:]) < 0 } -func (lst innerSortSECP2561RSigs) Len() int { return len(lst) } -func (lst innerSortSECP2561RSigs) Swap(i, j int) { lst[j], lst[i] = lst[i], lst[j] } - -// SortSECP2561RSigs sorts a slice of SECP2561R signatures -func SortSECP2561RSigs(lst [][SECP256K1RSigLen]byte) { sort.Sort(innerSortSECP2561RSigs(lst)) } - -// IsSortedAndUniqueSECP2561RSigs returns true if [sigs] is sorted -func IsSortedAndUniqueSECP2561RSigs(sigs [][SECP256K1RSigLen]byte) bool { - return utils.IsSortedAndUnique(innerSortSECP2561RSigs(sigs)) -} diff --git a/avalanchego/utils/crypto/secp256k1/secp256k1_benchmark_test.go b/avalanchego/utils/crypto/secp256k1/secp256k1_benchmark_test.go new file mode 100644 index 00000000..b7f105b0 --- /dev/null +++ b/avalanchego/utils/crypto/secp256k1/secp256k1_benchmark_test.go @@ -0,0 +1,35 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1 + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/hashing" +) + +func BenchmarkVerify(b *testing.B) { + require := require.New(b) + + f := &Factory{} + + privateKey, err := f.NewPrivateKey() + require.NoError(err) + + message := utils.RandomBytes(512) + hash := hashing.ComputeHash256(message) + + publicKey := privateKey.PublicKey() + signature, err := privateKey.SignHash(hash) + require.NoError(err) + + b.ResetTimer() + + for n := 0; n < b.N; n++ { + require.True(publicKey.VerifyHash(hash, signature)) + } +} diff --git a/avalanchego/utils/crypto/secp256k1r_test.go b/avalanchego/utils/crypto/secp256k1/secp256k1_test.go similarity index 75% rename from avalanchego/utils/crypto/secp256k1r_test.go rename to avalanchego/utils/crypto/secp256k1/secp256k1_test.go index 0c4fc435..e8abab31 100644 --- a/avalanchego/utils/crypto/secp256k1r_test.go +++ b/avalanchego/utils/crypto/secp256k1/secp256k1_test.go @@ -1,24 +1,24 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package crypto +package secp256k1 import ( - "bytes" "testing" "github.com/stretchr/testify/require" - secp256k1 "github.com/decred/dcrd/dcrec/secp256k1/v3" + secp256k1 "github.com/decred/dcrd/dcrec/secp256k1/v4" "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/hashing" ) func TestRecover(t *testing.T) { require := require.New(t) - f := FactorySECP256K1R{} + f := Factory{} key, err := f.NewPrivateKey() require.NoError(err) @@ -30,15 +30,13 @@ func TestRecover(t *testing.T) { pubRec, err := f.RecoverPublicKey(msg, sig) require.NoError(err) - if !bytes.Equal(pub.Bytes(), pubRec.Bytes()) { - t.Fatalf("Should have been equal") - } + require.Equal(pub, pubRec) } func TestCachedRecover(t *testing.T) { require := require.New(t) - f := FactorySECP256K1R{Cache: cache.LRU{Size: 1}} + f := Factory{Cache: cache.LRU[ids.ID, *PublicKey]{Size: 1}} key, err := f.NewPrivateKey() require.NoError(err) @@ -51,53 +49,49 @@ func TestCachedRecover(t *testing.T) { pub2, err := f.RecoverPublicKey(msg, sig) require.NoError(err) - if pub1 != pub2 { - t.Fatalf("Should have returned the same public key") - } + require.Equal(pub1, pub2) } func TestExtensive(t *testing.T) { - f := FactorySECP256K1R{} + require := require.New(t) + f := Factory{} hash := hashing.ComputeHash256([]byte{1, 2, 3}) for i := 0; i < 1000; i++ { - if key, err := f.NewPrivateKey(); err != nil { - t.Fatalf("Generated bad private key") - } else if _, err := key.SignHash(hash); err != nil { - t.Fatalf("Failed signing with:\n0x%x", key.Bytes()) - } + key, err := f.NewPrivateKey() + require.NoError(err) + + _, err = key.SignHash(hash) + require.NoError(err) } } func TestGenRecreate(t *testing.T) { - f := FactorySECP256K1R{} + require := require.New(t) + f := Factory{} for i := 0; i < 1000; i++ { sk, err := f.NewPrivateKey() - if err != nil { - t.Fatal(err) - } + require.NoError(err) + skBytes := sk.Bytes() recoveredSk, err := f.ToPrivateKey(skBytes) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(sk.PublicKey().Address().Bytes(), recoveredSk.PublicKey().Address().Bytes()) { - t.Fatalf("Wrong public key") - } + require.NoError(err) + + require.Equal(sk.PublicKey(), recoveredSk.PublicKey()) } } func TestVerifyMutatedSignature(t *testing.T) { - factory := FactorySECP256K1R{} + require := require.New(t) - sk, err := factory.NewPrivateKey() - require.NoError(t, err) + f := Factory{} + sk, err := f.NewPrivateKey() + require.NoError(err) msg := []byte{'h', 'e', 'l', 'l', 'o'} - sig, err := sk.Sign(msg) - require.NoError(t, err) + require.NoError(err) var s secp256k1.ModNScalar s.SetByteSlice(sig[32:64]) @@ -105,25 +99,24 @@ func TestVerifyMutatedSignature(t *testing.T) { newSBytes := s.Bytes() copy(sig[32:], newSBytes[:]) - _, err = factory.RecoverPublicKey(msg, sig) - require.Error(t, err) + _, err = f.RecoverPublicKey(msg, sig) + require.Error(err) } func TestPrivateKeySECP256K1RUnmarshalJSON(t *testing.T) { require := require.New(t) + f := Factory{} - f := FactorySECP256K1R{} - keyIntf, err := f.NewPrivateKey() + key, err := f.NewPrivateKey() require.NoError(err) - key := keyIntf.(*PrivateKeySECP256K1R) keyJSON, err := key.MarshalJSON() require.NoError(err) - key2 := PrivateKeySECP256K1R{} + key2 := PrivateKey{} err = key2.UnmarshalJSON(keyJSON) require.NoError(err) - require.Equal(key.PublicKey().Address(), key2.PublicKey().Address()) + require.Equal(key.PublicKey(), key2.PublicKey()) } func TestPrivateKeySECP256K1RUnmarshalJSONError(t *testing.T) { @@ -160,7 +153,7 @@ func TestPrivateKeySECP256K1RUnmarshalJSONError(t *testing.T) { t.Run(tt.label, func(t *testing.T) { require := require.New(t) - foo := PrivateKeySECP256K1R{} + foo := PrivateKey{} err := foo.UnmarshalJSON(tt.in) require.Error(err) }) @@ -216,7 +209,7 @@ func TestSigning(t *testing.T) { }, } - key := BuildTestKeys()[0] + key := TestKeys()[0] for _, tt := range tests { t.Run(string(tt.msg), func(t *testing.T) { diff --git a/avalanchego/utils/crypto/test_keys.go b/avalanchego/utils/crypto/secp256k1/test_keys.go similarity index 66% rename from avalanchego/utils/crypto/test_keys.go rename to avalanchego/utils/crypto/secp256k1/test_keys.go index 359779a4..ccd85522 100644 --- a/avalanchego/utils/crypto/test_keys.go +++ b/avalanchego/utils/crypto/secp256k1/test_keys.go @@ -1,13 +1,13 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package crypto +package secp256k1 import ( "github.com/ava-labs/avalanchego/utils/cb58" ) -func BuildTestKeys() []*PrivateKeySECP256K1R { +func TestKeys() []*PrivateKey { var ( keyStrings = []string{ "24jUJ9vZexUM6expyMcT48LBx27k1m7xpraoV62oSQAHdziao5", @@ -16,8 +16,8 @@ func BuildTestKeys() []*PrivateKeySECP256K1R { "ewoqjP7PxY4yr3iLTpLisriqt94hdyDFNgchSxGGztUrTXtNN", "2RWLv6YVEXDiWLpaCbXhhqxtLbnFaKQsWPSSMSPhpWo47uJAeV", } - keys = make([]*PrivateKeySECP256K1R, len(keyStrings)) - factory = FactorySECP256K1R{} + keys = make([]*PrivateKey, len(keyStrings)) + factory = Factory{} ) for i, key := range keyStrings { @@ -26,12 +26,10 @@ func BuildTestKeys() []*PrivateKeySECP256K1R { panic(err) } - pk, err := factory.ToPrivateKey(privKeyBytes) + keys[i], err = factory.ToPrivateKey(privKeyBytes) if err != nil { panic(err) } - - keys[i] = pk.(*PrivateKeySECP256K1R) } return keys } diff --git a/avalanchego/utils/dynamicip/ifconfig_resolver.go b/avalanchego/utils/dynamicip/ifconfig_resolver.go index 6e2e929d..24423814 100644 --- a/avalanchego/utils/dynamicip/ifconfig_resolver.go +++ b/avalanchego/utils/dynamicip/ifconfig_resolver.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dynamicip import ( + "context" "fmt" "io" "net" @@ -11,15 +12,20 @@ import ( "strings" ) -var _ Resolver = &ifConfigResolver{} +var _ Resolver = (*ifConfigResolver)(nil) // ifConfigResolver resolves our public IP using ifconfig's format. type ifConfigResolver struct { url string } -func (r *ifConfigResolver) Resolve() (net.IP, error) { - resp, err := http.Get(r.url) +func (r *ifConfigResolver) Resolve(ctx context.Context) (net.IP, error) { + req, err := http.NewRequestWithContext(ctx, "GET", r.url, nil) + if err != nil { + return nil, err + } + + resp, err := http.DefaultClient.Do(req) if err != nil { return nil, err } diff --git a/avalanchego/utils/dynamicip/no_updater.go b/avalanchego/utils/dynamicip/no_updater.go index 40f1266d..5c9e38bd 100644 --- a/avalanchego/utils/dynamicip/no_updater.go +++ b/avalanchego/utils/dynamicip/no_updater.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dynamicip diff --git a/avalanchego/utils/dynamicip/opendns_resolver.go b/avalanchego/utils/dynamicip/opendns_resolver.go index 3ba0d9b8..3bda76c4 100644 --- a/avalanchego/utils/dynamicip/opendns_resolver.go +++ b/avalanchego/utils/dynamicip/opendns_resolver.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dynamicip @@ -7,18 +7,14 @@ import ( "context" "errors" "net" - "time" ) -const ( - ipResolutionTimeout = 10 * time.Second - openDNSUrl = "resolver1.opendns.com:53" -) +const openDNSUrl = "resolver1.opendns.com:53" var ( errOpenDNSNoIP = errors.New("openDNS returned no ip") - _ Resolver = &openDNSResolver{} + _ Resolver = (*openDNSResolver)(nil) ) // IFConfigResolves resolves our public IP using openDNS @@ -31,17 +27,15 @@ func newOpenDNSResolver() Resolver { resolver: &net.Resolver{ PreferGo: true, Dial: func(ctx context.Context, _, _ string) (net.Conn, error) { - d := net.Dialer{ - Timeout: ipResolutionTimeout, - } + d := net.Dialer{} return d.DialContext(ctx, "udp", openDNSUrl) }, }, } } -func (r *openDNSResolver) Resolve() (net.IP, error) { - ips, err := r.resolver.LookupIP(context.TODO(), "ip", "myip.opendns.com") +func (r *openDNSResolver) Resolve(ctx context.Context) (net.IP, error) { + ips, err := r.resolver.LookupIP(ctx, "ip", "myip.opendns.com") if err != nil { return nil, err } diff --git a/avalanchego/utils/dynamicip/resolver.go b/avalanchego/utils/dynamicip/resolver.go index fac20945..df797e20 100644 --- a/avalanchego/utils/dynamicip/resolver.go +++ b/avalanchego/utils/dynamicip/resolver.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dynamicip import ( + "context" "fmt" "net" "strings" @@ -25,7 +26,7 @@ const ( // Resolver resolves our public IP type Resolver interface { // Resolve and return our public IP. - Resolve() (net.IP, error) + Resolve(context.Context) (net.IP, error) } // Returns a new Resolver that uses the given service diff --git a/avalanchego/utils/dynamicip/resolver_test.go b/avalanchego/utils/dynamicip/resolver_test.go index d62f13be..7606bdd8 100644 --- a/avalanchego/utils/dynamicip/resolver_test.go +++ b/avalanchego/utils/dynamicip/resolver_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dynamicip diff --git a/avalanchego/utils/dynamicip/updater.go b/avalanchego/utils/dynamicip/updater.go index 67aa7479..87c99e6d 100644 --- a/avalanchego/utils/dynamicip/updater.go +++ b/avalanchego/utils/dynamicip/updater.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dynamicip import ( + "context" "time" "go.uber.org/zap" @@ -12,7 +13,9 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" ) -var _ Updater = &updater{} +const ipResolutionTimeout = 10 * time.Second + +var _ Updater = (*updater)(nil) // Updater periodically updates this node's public IP. // Dispatch() and Stop() should only be called once. @@ -30,8 +33,12 @@ type updater struct { dynamicIP ips.DynamicIPPort // Used to find out what our public IP is. resolver Resolver - // Closing causes Dispatch() to return. - stopChan chan struct{} + // The parent of all contexts passed into resolver.Resolve(). + // Cancelling causes Dispatch() to eventually return. + rootCtx context.Context + // Cancelling causes Dispatch() to eventually return. + // All in-flight calls to resolver.Resolve() will be cancelled. + rootCtxCancel context.CancelFunc // Closed when Dispatch() has returned. doneChan chan struct{} // How often we update the public IP. @@ -46,12 +53,14 @@ func NewUpdater( resolver Resolver, updateFreq time.Duration, ) Updater { + ctx, cancel := context.WithCancel(context.Background()) return &updater{ - dynamicIP: dynamicIP, - resolver: resolver, - stopChan: make(chan struct{}), - doneChan: make(chan struct{}), - updateFreq: updateFreq, + dynamicIP: dynamicIP, + resolver: resolver, + rootCtx: ctx, + rootCtxCancel: cancel, + doneChan: make(chan struct{}), + updateFreq: updateFreq, } } @@ -69,7 +78,9 @@ func (u *updater) Dispatch(log logging.Logger) { case <-ticker.C: oldIP := u.dynamicIP.IPPort().IP - newIP, err := u.resolver.Resolve() + ctx, cancel := context.WithTimeout(u.rootCtx, ipResolutionTimeout) + newIP, err := u.resolver.Resolve(ctx) + cancel() if err != nil { log.Warn("couldn't resolve public IP. If this machine's IP recently changed, it may be sharing the wrong public IP with peers", zap.Error(err), @@ -83,14 +94,16 @@ func (u *updater) Dispatch(log logging.Logger) { zap.Stringer("newIP", newIP), ) } - case <-u.stopChan: + case <-u.rootCtx.Done(): return } } } func (u *updater) Stop() { - close(u.stopChan) + // Cause Dispatch() to return and cancel all + // in-flight calls to resolver.Resolve(). + u.rootCtxCancel() // Wait until Dispatch() has returned. <-u.doneChan } diff --git a/avalanchego/utils/dynamicip/updater_test.go b/avalanchego/utils/dynamicip/updater_test.go index 2f39947b..c31031f9 100644 --- a/avalanchego/utils/dynamicip/updater_test.go +++ b/avalanchego/utils/dynamicip/updater_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package dynamicip @@ -15,14 +15,14 @@ import ( "github.com/ava-labs/avalanchego/utils/logging" ) -var _ Resolver = &mockResolver{} +var _ Resolver = (*mockResolver)(nil) type mockResolver struct { - onResolve func() (net.IP, error) + onResolve func(context.Context) (net.IP, error) } -func (r *mockResolver) Resolve() (net.IP, error) { - return r.onResolve() +func (r *mockResolver) Resolve(ctx context.Context) (net.IP, error) { + return r.onResolve(ctx) } func TestNewUpdater(t *testing.T) { @@ -32,7 +32,7 @@ func TestNewUpdater(t *testing.T) { dynamicIP := ips.NewDynamicIPPort(originalIP, uint16(originalPort)) newIP := net.IPv4(1, 2, 3, 4) resolver := &mockResolver{ - onResolve: func() (net.IP, error) { + onResolve: func(context.Context) (net.IP, error) { return newIP, nil }, } @@ -44,14 +44,14 @@ func TestNewUpdater(t *testing.T) { ) // Assert NewUpdater returns expected type - require.IsType(&updater{}, updaterIntf) - - updater := updaterIntf.(*updater) + updater, ok := updaterIntf.(*updater) + require.True(ok) // Assert fields set require.Equal(dynamicIP, updater.dynamicIP) require.Equal(resolver, updater.resolver) - require.NotNil(updater.stopChan) + require.NotNil(updater.rootCtx) + require.NotNil(updater.rootCtxCancel) require.NotNil(updater.doneChan) require.Equal(updateFreq, updater.updateFreq) @@ -64,7 +64,9 @@ func TestNewUpdater(t *testing.T) { Port: uint16(originalPort), } require.Eventually( - func() bool { return expectedIP.Equal(dynamicIP.IPPort()) }, + func() bool { + return expectedIP.Equal(dynamicIP.IPPort()) + }, 5*time.Second, updateFreq, ) @@ -76,10 +78,9 @@ func TestNewUpdater(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), stopTimeout) defer cancel() select { - case _, open := <-updater.stopChan: - require.False(open) + case <-updater.rootCtx.Done(): case <-ctx.Done(): - require.FailNow("timeout waiting for stopChan to close") + require.FailNow("timeout waiting for root context cancellation") } select { case _, open := <-updater.doneChan: diff --git a/avalanchego/utils/filesystem/io.go b/avalanchego/utils/filesystem/io.go index 659c30d0..939e635a 100644 --- a/avalanchego/utils/filesystem/io.go +++ b/avalanchego/utils/filesystem/io.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package filesystem diff --git a/avalanchego/utils/filesystem/mock_file.go b/avalanchego/utils/filesystem/mock_file.go index a9295ec0..6ebc1e68 100644 --- a/avalanchego/utils/filesystem/mock_file.go +++ b/avalanchego/utils/filesystem/mock_file.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package filesystem diff --git a/avalanchego/utils/filesystem/mock_io.go b/avalanchego/utils/filesystem/mock_io.go index 0f3595b0..aade6418 100644 --- a/avalanchego/utils/filesystem/mock_io.go +++ b/avalanchego/utils/filesystem/mock_io.go @@ -1,6 +1,10 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. -// Source: utils/filesystem/io.go +// Source: github.com/ava-labs/avalanchego/utils/filesystem (interfaces: Reader) +// Package filesystem is a generated GoMock package. package filesystem import ( diff --git a/avalanchego/utils/filesystem/rename.go b/avalanchego/utils/filesystem/rename.go index 401237ee..3ab7c147 100644 --- a/avalanchego/utils/filesystem/rename.go +++ b/avalanchego/utils/filesystem/rename.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package filesystem diff --git a/avalanchego/utils/filesystem/rename_test.go b/avalanchego/utils/filesystem/rename_test.go index f0dcb358..12de4a67 100644 --- a/avalanchego/utils/filesystem/rename_test.go +++ b/avalanchego/utils/filesystem/rename_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package filesystem @@ -14,12 +14,12 @@ func TestRenameIfExists(t *testing.T) { t.Parallel() f, err := os.CreateTemp(os.TempDir(), "test-rename") - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + a := f.Name() b := a + ".2" - f.Close() + + require.NoError(t, f.Close()) // rename "a" to "b" renamed, err := RenameIfExists(a, b) diff --git a/avalanchego/utils/formatting/address/address.go b/avalanchego/utils/formatting/address/address.go index eaebd510..c0c6cc24 100644 --- a/avalanchego/utils/formatting/address/address.go +++ b/avalanchego/utils/formatting/address/address.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package address diff --git a/avalanchego/utils/formatting/address/converter.go b/avalanchego/utils/formatting/address/converter.go index 5be67e31..63f96dd4 100644 --- a/avalanchego/utils/formatting/address/converter.go +++ b/avalanchego/utils/formatting/address/converter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package address diff --git a/avalanchego/utils/formatting/address/tools.go b/avalanchego/utils/formatting/address/tools.go deleted file mode 100644 index 48c6b0ca..00000000 --- a/avalanchego/utils/formatting/address/tools.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -//go:build tools -// +build tools - -package address - -// TODO: remove this import and delete this file when a new version for -// btcsuite/btcutil is available. -import _ "github.com/btcsuite/btcd" diff --git a/avalanchego/utils/formatting/encoding.go b/avalanchego/utils/formatting/encoding.go index 65bf643e..20ab4df3 100644 --- a/avalanchego/utils/formatting/encoding.go +++ b/avalanchego/utils/formatting/encoding.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package formatting diff --git a/avalanchego/utils/formatting/encoding_benchmark_test.go b/avalanchego/utils/formatting/encoding_benchmark_test.go index 09cd2f95..83d9d9c9 100644 --- a/avalanchego/utils/formatting/encoding_benchmark_test.go +++ b/avalanchego/utils/formatting/encoding_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package formatting diff --git a/avalanchego/utils/formatting/encoding_test.go b/avalanchego/utils/formatting/encoding_test.go index c6382ab8..72793477 100644 --- a/avalanchego/utils/formatting/encoding_test.go +++ b/avalanchego/utils/formatting/encoding_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package formatting @@ -121,3 +121,17 @@ func TestDecodeNil(t *testing.T) { t.Fatal("decoding the empty string should return an empty byte slice") } } + +func FuzzEncodeDecode(f *testing.F) { + f.Fuzz(func(t *testing.T, bytes []byte) { + require := require.New(t) + + str, err := Encode(Hex, bytes) + require.NoError(err) + + decoded, err := Decode(Hex, str) + require.NoError(err) + + require.Equal(bytes, decoded) + }) +} diff --git a/avalanchego/utils/formatting/int_format.go b/avalanchego/utils/formatting/int_format.go index c927f149..6cd8c870 100644 --- a/avalanchego/utils/formatting/int_format.go +++ b/avalanchego/utils/formatting/int_format.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package formatting diff --git a/avalanchego/utils/formatting/int_format_test.go b/avalanchego/utils/formatting/int_format_test.go index 596ec9ac..8860df16 100644 --- a/avalanchego/utils/formatting/int_format_test.go +++ b/avalanchego/utils/formatting/int_format_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package formatting diff --git a/avalanchego/utils/formatting/prefixed_stringer.go b/avalanchego/utils/formatting/prefixed_stringer.go index b1b0287c..458c7bce 100644 --- a/avalanchego/utils/formatting/prefixed_stringer.go +++ b/avalanchego/utils/formatting/prefixed_stringer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package formatting diff --git a/avalanchego/utils/hashing/consistent/hashable.go b/avalanchego/utils/hashing/consistent/hashable.go index bb119b35..df4a08d0 100644 --- a/avalanchego/utils/hashing/consistent/hashable.go +++ b/avalanchego/utils/hashing/consistent/hashable.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package consistent diff --git a/avalanchego/utils/hashing/consistent/ring.go b/avalanchego/utils/hashing/consistent/ring.go index 8f5e32e5..df25bd3d 100644 --- a/avalanchego/utils/hashing/consistent/ring.go +++ b/avalanchego/utils/hashing/consistent/ring.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package consistent @@ -12,8 +12,8 @@ import ( ) var ( - _ Ring = &hashRing{} - _ btree.Item = &ringItem{} + _ Ring = (*hashRing)(nil) + _ btree.LessFunc[ringItem] = ringItem.Less errEmptyRing = errors.New("ring doesn't have any members") ) @@ -45,66 +45,66 @@ var ( // // As an example, assume we have a ring that supports hashes from 1-12. // -// 12 -// 11 1 +// 12 +// 11 1 // -// 10 2 +// 10 2 // -// 9 3 +// 9 3 // -// 8 4 +// 8 4 // -// 7 5 -// 6 +// 7 5 +// 6 // // Add node 1 (n1). Let h(n1) = 12. // First, we compute the hash the node, and insert it into its corresponding // location on the ring. // -// 12 (n1) -// 11 1 +// 12 (n1) +// 11 1 // -// 10 2 +// 10 2 // -// 9 3 +// 9 3 // -// 8 4 +// 8 4 // -// 7 5 -// 6 +// 7 5 +// 6 // // Now, to see which node a key (k1) should map to, we hash the key and search // for its closest clockwise neighbor. // Let h(k1) = 3. Here, we see that since n1 is the closest neighbor, as there // are no other nodes in the ring. // -// 12 (n1) -// 11 1 +// 12 (n1) +// 11 1 // -// 10 2 +// 10 2 // -// 9 3 (k1) +// 9 3 (k1) // -// 8 4 +// 8 4 // -// 7 5 -// 6 +// 7 5 +// 6 // // Now, let's insert another node (n2), such that h(n2) = 6. // Here we observe that k1 has shuffled to n2, as n2 is the closest clockwise // neighbor to k1. // -// 12 (n1) -// 11 1 +// 12 (n1) +// 11 1 // -// 10 2 +// 10 2 // -// 9 3 (k1) +// 9 3 (k1) // -// 8 4 +// 8 4 // -// 7 5 -// 6 (n2) +// 7 5 +// 6 (n2) // // Other optimizations can be made to help reduce blast radius of failures and // the variance in keys (hot shards). One such optimization is introducing @@ -154,7 +154,7 @@ type hashRing struct { virtualNodes int lock sync.RWMutex - ring *btree.BTree + ring *btree.BTreeG[ringItem] } // RingConfig configures settings for a Ring. @@ -172,7 +172,7 @@ func NewHashRing(config RingConfig) Ring { return &hashRing{ hasher: config.Hasher, virtualNodes: config.VirtualNodes, - ring: btree.New(config.Degree), + ring: btree.NewG(config.Degree, ringItem.Less), } } @@ -200,8 +200,7 @@ func (h *hashRing) get(key Hashable) (Hashable, error) { hash: hash, value: key, }, - func(itemIntf btree.Item) bool { - item := itemIntf.(ringItem) + func(item ringItem) bool { if hash < item.hash { result = item.value return false @@ -213,7 +212,8 @@ func (h *hashRing) get(key Hashable) (Hashable, error) { // If found nothing ascending the tree, we need to wrap around the ring to // the left-most (min) node. if result == nil { - result = h.ring.Min().(ringItem).value + min, _ := h.ring.Min() + result = min.value } return result, nil } @@ -260,9 +260,7 @@ func (h *hashRing) remove(key Hashable) bool { item := ringItem{ hash: virtualNodeHash, } - if h.ring.Delete(item) != nil { - removed = true - } + _, removed = h.ring.Delete(item) } return removed } @@ -278,4 +276,6 @@ type ringItem struct { value Hashable } -func (r ringItem) Less(than btree.Item) bool { return r.hash < than.(ringItem).hash } +func (r ringItem) Less(than ringItem) bool { + return r.hash < than.hash +} diff --git a/avalanchego/utils/hashing/consistent/ring_test.go b/avalanchego/utils/hashing/consistent/ring_test.go index af84680a..d5b59c58 100644 --- a/avalanchego/utils/hashing/consistent/ring_test.go +++ b/avalanchego/utils/hashing/consistent/ring_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package consistent @@ -14,7 +14,7 @@ import ( ) var ( - _ Hashable = &testKey{} + _ Hashable = (*testKey)(nil) // nodes node1 = testKey{key: "node-1", hash: 1} @@ -30,7 +30,9 @@ type testKey struct { hash uint64 } -func (t testKey) ConsistentHashKey() []byte { return []byte(t.key) } +func (t testKey) ConsistentHashKey() []byte { + return []byte(t.key) +} // Tests that a key routes to its closest clockwise node. // Test cases are described in greater detail below; see diagrams for Ring. diff --git a/avalanchego/utils/hashing/hasher.go b/avalanchego/utils/hashing/hasher.go index 348daa10..7519dfbb 100644 --- a/avalanchego/utils/hashing/hasher.go +++ b/avalanchego/utils/hashing/hasher.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package hashing diff --git a/avalanchego/utils/hashing/hashing.go b/avalanchego/utils/hashing/hashing.go index fdb9a0b3..a74ef8fe 100644 --- a/avalanchego/utils/hashing/hashing.go +++ b/avalanchego/utils/hashing/hashing.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package hashing @@ -22,24 +22,24 @@ type Hash256 = [HashLen]byte // Hash160 A 160 bit long hash value. type Hash160 = [ripemd160.Size]byte -// ComputeHash256Array Compute a cryptographically strong 256 bit hash of the -// input byte slice. +// ComputeHash256Array computes a cryptographically strong 256 bit hash of the +// input byte slice. func ComputeHash256Array(buf []byte) Hash256 { return sha256.Sum256(buf) } -// ComputeHash256 Compute a cryptographically strong 256 bit hash of the input -// byte slice. +// ComputeHash256 computes a cryptographically strong 256 bit hash of the input +// byte slice. func ComputeHash256(buf []byte) []byte { arr := ComputeHash256Array(buf) return arr[:] } -// ComputeHash256Ranges Compute a cryptographically strong 256 bit hash of the input -// byte slice in the ranges specified. -// Example: ComputeHash256Ranges({1, 2, 4, 8, 16}, {{1, 2}, -// {3, 5}}) -// is equivalent to ComputeHash256({2, 8, 16}). +// ComputeHash256Ranges computes a cryptographically strong 256 bit hash of the input +// byte slice in the ranges specified. +// Example: +// ComputeHash256Ranges({1, 2, 4, 8, 16}, {{1, 2}, {3, 5}}) is equivalent to +// ComputeHash256({2, 8, 16}). func ComputeHash256Ranges(buf []byte, ranges [][2]int) []byte { hashBuilder := sha256.New() for _, r := range ranges { @@ -51,8 +51,8 @@ func ComputeHash256Ranges(buf []byte, ranges [][2]int) []byte { return hashBuilder.Sum(nil) } -// ComputeHash160Array Compute a cryptographically strong 160 bit hash of the -// input byte slice. +// ComputeHash160Array computes a cryptographically strong 160 bit hash of the +// input byte slice. func ComputeHash160Array(buf []byte) Hash160 { h, err := ToHash160(ComputeHash160(buf)) if err != nil { @@ -61,8 +61,8 @@ func ComputeHash160Array(buf []byte) Hash160 { return h } -// ComputeHash160 Compute a cryptographically strong 160 bit hash of the input -// byte slice. +// ComputeHash160 computes a cryptographically strong 160 bit hash of the input +// byte slice. func ComputeHash160(buf []byte) []byte { ripe := ripemd160.New() _, err := io.Writer(ripe).Write(buf) @@ -72,9 +72,11 @@ func ComputeHash160(buf []byte) []byte { return ripe.Sum(nil) } -// Checksum Create checksum of [length] bytes from the 256 bit hash of the byte slice. -// Returns the lower [length] bytes of the hash -// Errors if length > 32. +// Checksum creates a checksum of [length] bytes from the 256 bit hash of the +// byte slice. +// +// Returns: the lower [length] bytes of the hash +// Panics if length > 32. func Checksum(bytes []byte, length int) []byte { hash := ComputeHash256Array(bytes) return hash[len(hash)-length:] diff --git a/avalanchego/utils/hashing/mock_hasher.go b/avalanchego/utils/hashing/mock_hasher.go index 979e5fe3..b903ba8f 100644 --- a/avalanchego/utils/hashing/mock_hasher.go +++ b/avalanchego/utils/hashing/mock_hasher.go @@ -1,7 +1,10 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. -// Source: sharding/hasher.go +// Source: github.com/ava-labs/avalanchego/utils/hashing (interfaces: Hasher) -// Package sharding is a generated GoMock package. +// Package hashing is a generated GoMock package. package hashing import ( diff --git a/avalanchego/utils/ips/claimed_ip_port.go b/avalanchego/utils/ips/claimed_ip_port.go index 2d6c055c..94a5f693 100644 --- a/avalanchego/utils/ips/claimed_ip_port.go +++ b/avalanchego/utils/ips/claimed_ip_port.go @@ -1,10 +1,12 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ips import ( "crypto/x509" + + "github.com/ava-labs/avalanchego/ids" ) // Can't import these from wrappers package due to circular import. @@ -12,8 +14,9 @@ const ( intLen = 4 longLen = 8 ipLen = 18 - // Certificate length, signature length, IP, timestamp - baseIPCertDescLen = 2*intLen + ipLen + longLen + idLen = 32 + // Certificate length, signature length, IP, timestamp, tx ID + baseIPCertDescLen = 2*intLen + ipLen + longLen + idLen ) // A self contained proof that a peer is claiming ownership of an IPPort at a @@ -30,6 +33,8 @@ type ClaimedIPPort struct { // actually claimed by the peer in question, and not by a malicious peer // trying to get us to dial bogus IPPorts. Signature []byte + // The txID that added this peer into the validator set + TxID ids.ID } // Returns the length of the byte representation of this ClaimedIPPort. diff --git a/avalanchego/utils/ips/dynamic_ip_port.go b/avalanchego/utils/ips/dynamic_ip_port.go index 6b3a4760..3f30dc0a 100644 --- a/avalanchego/utils/ips/dynamic_ip_port.go +++ b/avalanchego/utils/ips/dynamic_ip_port.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ips @@ -9,7 +9,7 @@ import ( "sync" ) -var _ DynamicIPPort = &dynamicIPPort{} +var _ DynamicIPPort = (*dynamicIPPort)(nil) // An IPPort that can change. // Safe for use by multiple goroutines. diff --git a/avalanchego/utils/ips/ip_port.go b/avalanchego/utils/ips/ip_port.go index 43b5f80a..ba0e74af 100644 --- a/avalanchego/utils/ips/ip_port.go +++ b/avalanchego/utils/ips/ip_port.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ips @@ -8,6 +8,8 @@ import ( "fmt" "net" "strconv" + + "github.com/ava-labs/avalanchego/utils/wrappers" ) var errBadIP = errors.New("bad ip format") @@ -54,3 +56,9 @@ func ToIPPort(str string) (IPPort, error) { Port: uint16(port), }, nil } + +// PackIP packs an ip port pair to the byte array +func PackIP(p *wrappers.Packer, ip IPPort) { + p.PackFixedBytes(ip.IP.To16()) + p.PackShort(ip.Port) +} diff --git a/avalanchego/utils/ips/ip_test.go b/avalanchego/utils/ips/ip_test.go index 58fb6478..14853ea7 100644 --- a/avalanchego/utils/ips/ip_test.go +++ b/avalanchego/utils/ips/ip_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ips diff --git a/avalanchego/utils/ips/lookup.go b/avalanchego/utils/ips/lookup.go new file mode 100644 index 00000000..8ae3de47 --- /dev/null +++ b/avalanchego/utils/ips/lookup.go @@ -0,0 +1,34 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ips + +import ( + "errors" + "net" +) + +var errNoIPsFound = errors.New("no IPs found") + +// Lookup attempts to resolve a hostname to a single IP. If multiple IPs are +// found, then lookup will attempt to return an IPv4 address, otherwise it will +// pick any of the IPs. +// +// Note: IPv4 is preferred because `net.Listen` prefers IPv4. +func Lookup(hostname string) (net.IP, error) { + ips, err := net.LookupIP(hostname) + if err != nil { + return nil, err + } + if len(ips) == 0 { + return nil, errNoIPsFound + } + + for _, ip := range ips { + ipv4 := ip.To4() + if ipv4 != nil { + return ipv4, nil + } + } + return ips[0], nil +} diff --git a/avalanchego/utils/ips/lookup_test.go b/avalanchego/utils/ips/lookup_test.go new file mode 100644 index 00000000..52c0e5ed --- /dev/null +++ b/avalanchego/utils/ips/lookup_test.go @@ -0,0 +1,44 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ips + +import ( + "net" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestLookup(t *testing.T) { + tests := []struct { + host string + ip net.IP + }{ + { + host: "127.0.0.1", + ip: net.ParseIP("127.0.0.1").To4(), + }, + { + host: "localhost", + ip: net.ParseIP("127.0.0.1").To4(), + }, + { + host: "::", + ip: net.IPv6zero, + }, + { + host: "0.0.0.0", + ip: net.ParseIP("0.0.0.0").To4(), + }, + } + for _, tt := range tests { + t.Run(tt.host, func(t *testing.T) { + require := require.New(t) + + ip, err := Lookup(tt.host) + require.NoError(err) + require.Equal(tt.ip, ip) + }) + } +} diff --git a/avalanchego/utils/json/codec.go b/avalanchego/utils/json/codec.go index 420efade..5871d67f 100644 --- a/avalanchego/utils/json/codec.go +++ b/avalanchego/utils/json/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package json diff --git a/avalanchego/utils/json/float32.go b/avalanchego/utils/json/float32.go index ea583646..70fcc7a0 100644 --- a/avalanchego/utils/json/float32.go +++ b/avalanchego/utils/json/float32.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package json diff --git a/avalanchego/utils/json/float32_test.go b/avalanchego/utils/json/float32_test.go index 45cc05ad..a3a4fdc4 100644 --- a/avalanchego/utils/json/float32_test.go +++ b/avalanchego/utils/json/float32_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package json diff --git a/avalanchego/utils/json/float64.go b/avalanchego/utils/json/float64.go index cdee2622..25a9adec 100644 --- a/avalanchego/utils/json/float64.go +++ b/avalanchego/utils/json/float64.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package json diff --git a/avalanchego/utils/json/uint16.go b/avalanchego/utils/json/uint16.go index a697dc5e..b7b36e9a 100644 --- a/avalanchego/utils/json/uint16.go +++ b/avalanchego/utils/json/uint16.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package json diff --git a/avalanchego/utils/json/uint32.go b/avalanchego/utils/json/uint32.go index b1991aa2..95267659 100644 --- a/avalanchego/utils/json/uint32.go +++ b/avalanchego/utils/json/uint32.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package json diff --git a/avalanchego/utils/json/uint64.go b/avalanchego/utils/json/uint64.go index b2d94969..ba318903 100644 --- a/avalanchego/utils/json/uint64.go +++ b/avalanchego/utils/json/uint64.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package json diff --git a/avalanchego/utils/json/uint8.go b/avalanchego/utils/json/uint8.go index e72ac2ae..d0571a2c 100644 --- a/avalanchego/utils/json/uint8.go +++ b/avalanchego/utils/json/uint8.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package json diff --git a/avalanchego/utils/linkedhashmap/iterator.go b/avalanchego/utils/linkedhashmap/iterator.go index 661e236a..27c4427b 100644 --- a/avalanchego/utils/linkedhashmap/iterator.go +++ b/avalanchego/utils/linkedhashmap/iterator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inte. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inte. All rights reserved. // See the file LICENSE for licensing terms. package linkedhashmap @@ -9,7 +9,7 @@ import ( "github.com/ava-labs/avalanchego/utils" ) -var _ Iter[int, struct{}] = &iterator[int, struct{}]{} +var _ Iter[int, struct{}] = (*iterator[int, struct{}])(nil) // Iterates over the keys and values in a LinkedHashmap // from oldest to newest elements. @@ -66,5 +66,10 @@ func (it *iterator[K, V]) Next() bool { return true } -func (it *iterator[K, V]) Key() K { return it.key } -func (it *iterator[K, V]) Value() V { return it.value } +func (it *iterator[K, V]) Key() K { + return it.key +} + +func (it *iterator[K, V]) Value() V { + return it.value +} diff --git a/avalanchego/utils/linkedhashmap/linkedhashmap.go b/avalanchego/utils/linkedhashmap/linkedhashmap.go index 213d61d5..e4c1b3f4 100644 --- a/avalanchego/utils/linkedhashmap/linkedhashmap.go +++ b/avalanchego/utils/linkedhashmap/linkedhashmap.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inte. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inte. All rights reserved. // See the file LICENSE for licensing terms. package linkedhashmap @@ -10,7 +10,7 @@ import ( "github.com/ava-labs/avalanchego/utils" ) -var _ LinkedHashmap[int, struct{}] = &linkedHashmap[int, struct{}]{} +var _ LinkedHashmap[int, struct{}] = (*linkedHashmap[int, struct{}])(nil) // Hashmap provides an O(1) mapping from a comparable key to any value. // Comparable is defined by https://golang.org/ref/spec#Comparison_operators. @@ -120,7 +120,9 @@ func (lh *linkedHashmap[K, V]) delete(key K) { } } -func (lh *linkedHashmap[K, V]) len() int { return len(lh.entryMap) } +func (lh *linkedHashmap[K, V]) len() int { + return len(lh.entryMap) +} func (lh *linkedHashmap[K, V]) oldest() (K, V, bool) { if val := lh.entryList.Front(); val != nil { diff --git a/avalanchego/utils/linkedhashmap/linkedhashmap_test.go b/avalanchego/utils/linkedhashmap/linkedhashmap_test.go index 977e0a91..4b251a0a 100644 --- a/avalanchego/utils/linkedhashmap/linkedhashmap_test.go +++ b/avalanchego/utils/linkedhashmap/linkedhashmap_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package linkedhashmap diff --git a/avalanchego/utils/logging/color.go b/avalanchego/utils/logging/color.go index 072f85b2..323d1d13 100644 --- a/avalanchego/utils/logging/color.go +++ b/avalanchego/utils/logging/color.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging diff --git a/avalanchego/utils/logging/config.go b/avalanchego/utils/logging/config.go index afc2b480..baeb666d 100644 --- a/avalanchego/utils/logging/config.go +++ b/avalanchego/utils/logging/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging diff --git a/avalanchego/utils/logging/factory.go b/avalanchego/utils/logging/factory.go index f4421239..b3426257 100644 --- a/avalanchego/utils/logging/factory.go +++ b/avalanchego/utils/logging/factory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging @@ -12,10 +12,12 @@ import ( "go.uber.org/zap" "go.uber.org/zap/zapcore" + "golang.org/x/exp/maps" + "gopkg.in/natefinch/lumberjack.v2" ) -var _ Factory = &factory{} +var _ Factory = (*factory)(nil) // Factory creates new instances of different types of Logger type Factory interface { @@ -167,11 +169,7 @@ func (f *factory) GetLoggerNames() []string { f.lock.RLock() defer f.lock.RUnlock() - names := make([]string, 0, len(f.loggers)) - for name := range f.loggers { - names = append(names, name) - } - return names + return maps.Keys(f.loggers) } func (f *factory) Close() { diff --git a/avalanchego/utils/logging/format.go b/avalanchego/utils/logging/format.go index 628e0792..1e979f74 100644 --- a/avalanchego/utils/logging/format.go +++ b/avalanchego/utils/logging/format.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging diff --git a/avalanchego/utils/logging/level.go b/avalanchego/utils/logging/level.go index b2e58191..a7951c03 100644 --- a/avalanchego/utils/logging/level.go +++ b/avalanchego/utils/logging/level.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging diff --git a/avalanchego/utils/logging/log.go b/avalanchego/utils/logging/log.go index 8975cd13..bff8934e 100644 --- a/avalanchego/utils/logging/log.go +++ b/avalanchego/utils/logging/log.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging @@ -10,7 +10,7 @@ import ( "go.uber.org/zap/zapcore" ) -var _ Logger = &log{} +var _ Logger = (*log)(nil) type log struct { wrappedCores []WrappedCore @@ -53,6 +53,7 @@ func NewLogger(prefix string, wrappedCores ...WrappedCore) Logger { } } +// TODO: return errors here func (l *log) Write(p []byte) (int, error) { for _, wc := range l.wrappedCores { if wc.WriterDisabled { @@ -63,9 +64,10 @@ func (l *log) Write(p []byte) (int, error) { return len(p), nil } +// TODO: return errors here func (l *log) Stop() { for _, wc := range l.wrappedCores { - wc.Writer.Close() + _ = wc.Writer.Close() } } @@ -104,6 +106,12 @@ func (l *log) Verbo(msg string, fields ...zap.Field) { l.log(Verbo, msg, fields...) } +func (l *log) SetLevel(level Level) { + for _, core := range l.wrappedCores { + core.AtomicLevel.SetLevel(zapcore.Level(level)) + } +} + func (l *log) StopOnPanic() { if r := recover(); r != nil { l.Fatal("panicking", zap.Any("reason", r), zap.Stack("from")) @@ -112,7 +120,10 @@ func (l *log) StopOnPanic() { } } -func (l *log) RecoverAndPanic(f func()) { defer l.StopOnPanic(); f() } +func (l *log) RecoverAndPanic(f func()) { + defer l.StopOnPanic() + f() +} func (l *log) stopAndExit(exit func()) { if r := recover(); r != nil { @@ -122,4 +133,7 @@ func (l *log) stopAndExit(exit func()) { } } -func (l *log) RecoverAndExit(f, exit func()) { defer l.stopAndExit(exit); f() } +func (l *log) RecoverAndExit(f, exit func()) { + defer l.stopAndExit(exit) + f() +} diff --git a/avalanchego/utils/logging/log_test.go b/avalanchego/utils/logging/log_test.go index be8cd607..4242ecab 100644 --- a/avalanchego/utils/logging/log_test.go +++ b/avalanchego/utils/logging/log_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging diff --git a/avalanchego/utils/logging/logger.go b/avalanchego/utils/logging/logger.go index 8bccc27f..79acb683 100644 --- a/avalanchego/utils/logging/logger.go +++ b/avalanchego/utils/logging/logger.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging @@ -36,6 +36,9 @@ type Logger interface { // aspect of the program Verbo(msg string, fields ...zap.Field) + // SetLevel that this logger should log to + SetLevel(level Level) + // Recovers a panic, logs the error, and rethrows the panic. StopOnPanic() // If a function panics, this will log that panic and then re-panic ensuring diff --git a/avalanchego/utils/logging/mock_logger.go b/avalanchego/utils/logging/mock_logger.go index fa93ca1f..19ade121 100644 --- a/avalanchego/utils/logging/mock_logger.go +++ b/avalanchego/utils/logging/mock_logger.go @@ -1,5 +1,8 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. -// Source: utils/logging/logger.go +// Source: github.com/ava-labs/avalanchego/utils/logging (interfaces: Logger) // Package logging is a generated GoMock package. package logging @@ -8,7 +11,7 @@ import ( reflect "reflect" gomock "github.com/golang/mock/gomock" - zap "go.uber.org/zap" + zapcore "go.uber.org/zap/zapcore" ) // MockLogger is a mock of Logger interface. @@ -35,95 +38,107 @@ func (m *MockLogger) EXPECT() *MockLoggerMockRecorder { } // Debug mocks base method. -func (m *MockLogger) Debug(format string, args ...zap.Field) { +func (m *MockLogger) Debug(arg0 string, arg1 ...zapcore.Field) { m.ctrl.T.Helper() - varargs := []interface{}{format} - for _, a := range args { + varargs := []interface{}{arg0} + for _, a := range arg1 { varargs = append(varargs, a) } m.ctrl.Call(m, "Debug", varargs...) } // Debug indicates an expected call of Debug. -func (mr *MockLoggerMockRecorder) Debug(format interface{}, args ...interface{}) *gomock.Call { +func (mr *MockLoggerMockRecorder) Debug(arg0 interface{}, arg1 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{format}, args...) + varargs := append([]interface{}{arg0}, arg1...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Debug", reflect.TypeOf((*MockLogger)(nil).Debug), varargs...) } // Error mocks base method. -func (m *MockLogger) Error(format string, args ...zap.Field) { +func (m *MockLogger) Error(arg0 string, arg1 ...zapcore.Field) { m.ctrl.T.Helper() - varargs := []interface{}{format} - for _, a := range args { + varargs := []interface{}{arg0} + for _, a := range arg1 { varargs = append(varargs, a) } m.ctrl.Call(m, "Error", varargs...) } // Error indicates an expected call of Error. -func (mr *MockLoggerMockRecorder) Error(format interface{}, args ...interface{}) *gomock.Call { +func (mr *MockLoggerMockRecorder) Error(arg0 interface{}, arg1 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{format}, args...) + varargs := append([]interface{}{arg0}, arg1...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Error", reflect.TypeOf((*MockLogger)(nil).Error), varargs...) } // Fatal mocks base method. -func (m *MockLogger) Fatal(format string, args ...zap.Field) { +func (m *MockLogger) Fatal(arg0 string, arg1 ...zapcore.Field) { m.ctrl.T.Helper() - varargs := []interface{}{format} - for _, a := range args { + varargs := []interface{}{arg0} + for _, a := range arg1 { varargs = append(varargs, a) } m.ctrl.Call(m, "Fatal", varargs...) } // Fatal indicates an expected call of Fatal. -func (mr *MockLoggerMockRecorder) Fatal(format interface{}, args ...interface{}) *gomock.Call { +func (mr *MockLoggerMockRecorder) Fatal(arg0 interface{}, arg1 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{format}, args...) + varargs := append([]interface{}{arg0}, arg1...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Fatal", reflect.TypeOf((*MockLogger)(nil).Fatal), varargs...) } // Info mocks base method. -func (m *MockLogger) Info(format string, args ...zap.Field) { +func (m *MockLogger) Info(arg0 string, arg1 ...zapcore.Field) { m.ctrl.T.Helper() - varargs := []interface{}{format} - for _, a := range args { + varargs := []interface{}{arg0} + for _, a := range arg1 { varargs = append(varargs, a) } m.ctrl.Call(m, "Info", varargs...) } // Info indicates an expected call of Info. -func (mr *MockLoggerMockRecorder) Info(format interface{}, args ...interface{}) *gomock.Call { +func (mr *MockLoggerMockRecorder) Info(arg0 interface{}, arg1 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{format}, args...) + varargs := append([]interface{}{arg0}, arg1...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Info", reflect.TypeOf((*MockLogger)(nil).Info), varargs...) } // RecoverAndExit mocks base method. -func (m *MockLogger) RecoverAndExit(f, exit func()) { +func (m *MockLogger) RecoverAndExit(arg0, arg1 func()) { m.ctrl.T.Helper() - m.ctrl.Call(m, "RecoverAndExit", f, exit) + m.ctrl.Call(m, "RecoverAndExit", arg0, arg1) } // RecoverAndExit indicates an expected call of RecoverAndExit. -func (mr *MockLoggerMockRecorder) RecoverAndExit(f, exit interface{}) *gomock.Call { +func (mr *MockLoggerMockRecorder) RecoverAndExit(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecoverAndExit", reflect.TypeOf((*MockLogger)(nil).RecoverAndExit), f, exit) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecoverAndExit", reflect.TypeOf((*MockLogger)(nil).RecoverAndExit), arg0, arg1) } // RecoverAndPanic mocks base method. -func (m *MockLogger) RecoverAndPanic(f func()) { +func (m *MockLogger) RecoverAndPanic(arg0 func()) { m.ctrl.T.Helper() - m.ctrl.Call(m, "RecoverAndPanic", f) + m.ctrl.Call(m, "RecoverAndPanic", arg0) } // RecoverAndPanic indicates an expected call of RecoverAndPanic. -func (mr *MockLoggerMockRecorder) RecoverAndPanic(f interface{}) *gomock.Call { +func (mr *MockLoggerMockRecorder) RecoverAndPanic(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecoverAndPanic", reflect.TypeOf((*MockLogger)(nil).RecoverAndPanic), arg0) +} + +// SetLevel mocks base method. +func (m *MockLogger) SetLevel(arg0 Level) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetLevel", arg0) +} + +// SetLevel indicates an expected call of SetLevel. +func (mr *MockLoggerMockRecorder) SetLevel(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RecoverAndPanic", reflect.TypeOf((*MockLogger)(nil).RecoverAndPanic), f) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLevel", reflect.TypeOf((*MockLogger)(nil).SetLevel), arg0) } // Stop mocks base method. @@ -151,67 +166,67 @@ func (mr *MockLoggerMockRecorder) StopOnPanic() *gomock.Call { } // Trace mocks base method. -func (m *MockLogger) Trace(format string, args ...zap.Field) { +func (m *MockLogger) Trace(arg0 string, arg1 ...zapcore.Field) { m.ctrl.T.Helper() - varargs := []interface{}{format} - for _, a := range args { + varargs := []interface{}{arg0} + for _, a := range arg1 { varargs = append(varargs, a) } m.ctrl.Call(m, "Trace", varargs...) } // Trace indicates an expected call of Trace. -func (mr *MockLoggerMockRecorder) Trace(format interface{}, args ...interface{}) *gomock.Call { +func (mr *MockLoggerMockRecorder) Trace(arg0 interface{}, arg1 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{format}, args...) + varargs := append([]interface{}{arg0}, arg1...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Trace", reflect.TypeOf((*MockLogger)(nil).Trace), varargs...) } // Verbo mocks base method. -func (m *MockLogger) Verbo(format string, args ...zap.Field) { +func (m *MockLogger) Verbo(arg0 string, arg1 ...zapcore.Field) { m.ctrl.T.Helper() - varargs := []interface{}{format} - for _, a := range args { + varargs := []interface{}{arg0} + for _, a := range arg1 { varargs = append(varargs, a) } m.ctrl.Call(m, "Verbo", varargs...) } // Verbo indicates an expected call of Verbo. -func (mr *MockLoggerMockRecorder) Verbo(format interface{}, args ...interface{}) *gomock.Call { +func (mr *MockLoggerMockRecorder) Verbo(arg0 interface{}, arg1 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{format}, args...) + varargs := append([]interface{}{arg0}, arg1...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verbo", reflect.TypeOf((*MockLogger)(nil).Verbo), varargs...) } // Warn mocks base method. -func (m *MockLogger) Warn(format string, args ...zap.Field) { +func (m *MockLogger) Warn(arg0 string, arg1 ...zapcore.Field) { m.ctrl.T.Helper() - varargs := []interface{}{format} - for _, a := range args { + varargs := []interface{}{arg0} + for _, a := range arg1 { varargs = append(varargs, a) } m.ctrl.Call(m, "Warn", varargs...) } // Warn indicates an expected call of Warn. -func (mr *MockLoggerMockRecorder) Warn(format interface{}, args ...interface{}) *gomock.Call { +func (mr *MockLoggerMockRecorder) Warn(arg0 interface{}, arg1 ...interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - varargs := append([]interface{}{format}, args...) + varargs := append([]interface{}{arg0}, arg1...) return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Warn", reflect.TypeOf((*MockLogger)(nil).Warn), varargs...) } // Write mocks base method. -func (m *MockLogger) Write(p []byte) (int, error) { +func (m *MockLogger) Write(arg0 []byte) (int, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Write", p) + ret := m.ctrl.Call(m, "Write", arg0) ret0, _ := ret[0].(int) ret1, _ := ret[1].(error) return ret0, ret1 } // Write indicates an expected call of Write. -func (mr *MockLoggerMockRecorder) Write(p interface{}) *gomock.Call { +func (mr *MockLoggerMockRecorder) Write(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockLogger)(nil).Write), p) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockLogger)(nil).Write), arg0) } diff --git a/avalanchego/utils/logging/sanitize.go b/avalanchego/utils/logging/sanitize.go index ab65127e..05b24ff9 100644 --- a/avalanchego/utils/logging/sanitize.go +++ b/avalanchego/utils/logging/sanitize.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging diff --git a/avalanchego/utils/logging/test_log.go b/avalanchego/utils/logging/test_log.go index 469a47fe..126128f5 100644 --- a/avalanchego/utils/logging/test_log.go +++ b/avalanchego/utils/logging/test_log.go @@ -1,10 +1,9 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package logging import ( - "errors" "io" "go.uber.org/zap" @@ -14,14 +13,14 @@ var ( // Discard is a mock WriterCloser that drops all writes and close requests Discard io.WriteCloser = discard{} - errNoLoggerWrite = errors.New("NoLogger can't write") - _ Logger = NoLog{} ) type NoLog struct{} -func (NoLog) Write([]byte) (int, error) { return 0, errNoLoggerWrite } +func (NoLog) Write(b []byte) (int, error) { + return len(b), nil +} func (NoLog) Fatal(string, ...zap.Field) {} @@ -37,23 +36,41 @@ func (NoLog) Debug(string, ...zap.Field) {} func (NoLog) Verbo(string, ...zap.Field) {} +func (NoLog) SetLevel(Level) {} + func (NoLog) StopOnPanic() {} -func (NoLog) RecoverAndPanic(f func()) { f() } +func (NoLog) RecoverAndPanic(f func()) { + f() +} -func (NoLog) RecoverAndExit(f, exit func()) { defer exit(); f() } +func (NoLog) RecoverAndExit(f, exit func()) { + defer exit() + f() +} func (NoLog) Stop() {} type NoWarn struct{ NoLog } -func (NoWarn) Fatal(string, ...zap.Field) { panic("unexpected Fatal") } +func (NoWarn) Fatal(string, ...zap.Field) { + panic("unexpected Fatal") +} -func (NoWarn) Error(string, ...zap.Field) { panic("unexpected Error") } +func (NoWarn) Error(string, ...zap.Field) { + panic("unexpected Error") +} -func (NoWarn) Warn(string, ...zap.Field) { panic("unexpected Warn") } +func (NoWarn) Warn(string, ...zap.Field) { + panic("unexpected Warn") +} type discard struct{} -func (discard) Write(p []byte) (int, error) { return len(p), nil } -func (discard) Close() error { return nil } +func (discard) Write(p []byte) (int, error) { + return len(p), nil +} + +func (discard) Close() error { + return nil +} diff --git a/avalanchego/utils/math/averager.go b/avalanchego/utils/math/averager.go index cdbb4216..a926aba0 100644 --- a/avalanchego/utils/math/averager.go +++ b/avalanchego/utils/math/averager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math diff --git a/avalanchego/utils/math/averager_heap.go b/avalanchego/utils/math/averager_heap.go index d90936ea..b09393b4 100644 --- a/avalanchego/utils/math/averager_heap.go +++ b/avalanchego/utils/math/averager_heap.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math @@ -11,7 +11,7 @@ import ( var ( _ AveragerHeap = averagerHeap{} - _ heap.Interface = &averagerHeapBackend{} + _ heap.Interface = (*averagerHeapBackend)(nil) ) // AveragerHeap maintains a heap of the averagers. @@ -111,7 +111,9 @@ func (h averagerHeap) Len() int { return len(h.b.entries) } -func (h *averagerHeapBackend) Len() int { return len(h.entries) } +func (h *averagerHeapBackend) Len() int { + return len(h.entries) +} func (h *averagerHeapBackend) Less(i, j int) bool { if h.isMaxHeap { diff --git a/avalanchego/utils/math/averager_heap_test.go b/avalanchego/utils/math/averager_heap_test.go index 414f2d39..a9796129 100644 --- a/avalanchego/utils/math/averager_heap_test.go +++ b/avalanchego/utils/math/averager_heap_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math diff --git a/avalanchego/utils/math/continuous_averager.go b/avalanchego/utils/math/continuous_averager.go index 43e14a48..e60832f2 100644 --- a/avalanchego/utils/math/continuous_averager.go +++ b/avalanchego/utils/math/continuous_averager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math diff --git a/avalanchego/utils/math/continuous_averager_benchmark_test.go b/avalanchego/utils/math/continuous_averager_benchmark_test.go index ccfbd765..7a8d30a3 100644 --- a/avalanchego/utils/math/continuous_averager_benchmark_test.go +++ b/avalanchego/utils/math/continuous_averager_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math diff --git a/avalanchego/utils/math/continuous_averager_test.go b/avalanchego/utils/math/continuous_averager_test.go index a6cffbdb..e7595537 100644 --- a/avalanchego/utils/math/continuous_averager_test.go +++ b/avalanchego/utils/math/continuous_averager_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math diff --git a/avalanchego/utils/math/meter/continuous_meter.go b/avalanchego/utils/math/meter/continuous_meter.go index 1c60ea73..4bd3f000 100644 --- a/avalanchego/utils/math/meter/continuous_meter.go +++ b/avalanchego/utils/math/meter/continuous_meter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package meter @@ -11,8 +11,8 @@ import ( var ( convertEToBase2 = math.Log(2) - _ Factory = &ContinuousFactory{} - _ Meter = &continuousMeter{} + _ Factory = (*ContinuousFactory)(nil) + _ Meter = (*continuousMeter)(nil) ) // ContinuousFactory implements the Factory interface by returning a continuous diff --git a/avalanchego/utils/math/meter/factory.go b/avalanchego/utils/math/meter/factory.go index ffddc268..49e4859e 100644 --- a/avalanchego/utils/math/meter/factory.go +++ b/avalanchego/utils/math/meter/factory.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package meter diff --git a/avalanchego/utils/math/meter/meter.go b/avalanchego/utils/math/meter/meter.go index e185fb16..cfc5fcd8 100644 --- a/avalanchego/utils/math/meter/meter.go +++ b/avalanchego/utils/math/meter/meter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package meter diff --git a/avalanchego/utils/math/meter/meter_benchmark_test.go b/avalanchego/utils/math/meter/meter_benchmark_test.go index 7c0c6435..65f3dcfa 100644 --- a/avalanchego/utils/math/meter/meter_benchmark_test.go +++ b/avalanchego/utils/math/meter/meter_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package meter diff --git a/avalanchego/utils/math/meter/meter_test.go b/avalanchego/utils/math/meter/meter_test.go index e8dc0c50..2bf29185 100644 --- a/avalanchego/utils/math/meter/meter_test.go +++ b/avalanchego/utils/math/meter/meter_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package meter diff --git a/avalanchego/utils/math/safe_math.go b/avalanchego/utils/math/safe_math.go index dca38f6b..83454758 100644 --- a/avalanchego/utils/math/safe_math.go +++ b/avalanchego/utils/math/safe_math.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math @@ -6,12 +6,18 @@ package math import ( "errors" "math" + + "golang.org/x/exp/constraints" + + "github.com/ava-labs/avalanchego/utils" ) -var errOverflow = errors.New("overflow occurred") +var ( + ErrOverflow = errors.New("overflow") + ErrUnderflow = errors.New("underflow") +) -// Max64 returns the maximum of the values provided -func Max64(max uint64, nums ...uint64) uint64 { +func Max[T constraints.Ordered](max T, nums ...T) T { for _, num := range nums { if num > max { max = num @@ -20,18 +26,7 @@ func Max64(max uint64, nums ...uint64) uint64 { return max } -// Min returns the minimum of the values provided -func Min(min int, nums ...int) int { - for _, num := range nums { - if num < min { - min = num - } - } - return min -} - -// Min64 returns the minimum of the values provided -func Min64(min uint64, nums ...uint64) uint64 { +func Min[T constraints.Ordered](min T, nums ...T) T { for _, num := range nums { if num < min { min = num @@ -43,19 +38,23 @@ func Min64(min uint64, nums ...uint64) uint64 { // Add64 returns: // 1) a + b // 2) If there is overflow, an error +// +// Note that we don't have a generic Add function because checking for +// an overflow requires knowing the max size of a given type, which we +// don't know if we're adding generic types. func Add64(a, b uint64) (uint64, error) { if a > math.MaxUint64-b { - return 0, errOverflow + return 0, ErrOverflow } return a + b, nil } -// Sub64 returns: +// Sub returns: // 1) a - b // 2) If there is underflow, an error -func Sub64(a, b uint64) (uint64, error) { +func Sub[T constraints.Unsigned](a, b T) (T, error) { if a < b { - return 0, errOverflow + return utils.Zero[T](), ErrUnderflow } return a - b, nil } @@ -63,13 +62,17 @@ func Sub64(a, b uint64) (uint64, error) { // Mul64 returns: // 1) a * b // 2) If there is overflow, an error +// +// Note that we don't have a generic Mul function because checking for +// an overflow requires knowing the max size of a given type, which we +// don't know if we're adding generic types. func Mul64(a, b uint64) (uint64, error) { if b != 0 && a > math.MaxUint64/b { - return 0, errOverflow + return 0, ErrOverflow } return a * b, nil } -func Diff64(a, b uint64) uint64 { - return Max64(a, b) - Min64(a, b) +func AbsDiff[T constraints.Unsigned](a, b T) T { + return Max(a, b) - Min(a, b) } diff --git a/avalanchego/utils/math/safe_math_test.go b/avalanchego/utils/math/safe_math_test.go index 4eb6944a..5d9bb702 100644 --- a/avalanchego/utils/math/safe_math_test.go +++ b/avalanchego/utils/math/safe_math_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math @@ -6,113 +6,124 @@ package math import ( "math" "testing" + + "github.com/stretchr/testify/require" ) const maxUint64 uint64 = math.MaxUint64 -func TestMax64(t *testing.T) { - actual := Max64(0, maxUint64) - if actual != maxUint64 { - t.Fatalf("Expected %d, got %d", maxUint64, actual) - } - actual = Max64(maxUint64, 0) - if actual != maxUint64 { - t.Fatalf("Expected %d, got %d", maxUint64, actual) - } +func TestMax(t *testing.T) { + require := require.New(t) + + require.Equal(maxUint64, Max(0, maxUint64)) + require.Equal(maxUint64, Max(maxUint64, 0)) + require.Equal(1, Max(1, 0)) + require.Equal(1, Max(0, 1)) + require.Equal(0, Max(0, 0)) + require.Equal(2, Max(2, 2)) } -func TestMin64(t *testing.T) { - actual := Min64(0, maxUint64) - if actual != 0 { - t.Fatalf("Expected %d, got %d", 0, actual) - } - actual = Min64(maxUint64, 0) - if actual != 0 { - t.Fatalf("Expected %d, got %d", 0, actual) - } +func TestMin(t *testing.T) { + require := require.New(t) + + require.Equal(uint64(0), Min(uint64(0), maxUint64)) + require.Equal(uint64(0), Min(maxUint64, uint64(0))) + require.Equal(0, Min(1, 0)) + require.Equal(0, Min(0, 1)) + require.Equal(0, Min(0, 0)) + require.Equal(2, Min(2, 2)) + require.Equal(1, Min(1, 2)) } func TestAdd64(t *testing.T) { + require := require.New(t) + sum, err := Add64(0, maxUint64) - if err != nil { - t.Fatalf("Add64 failed unexpectedly") - } - if sum != maxUint64 { - t.Fatalf("Expected %d, got %d", maxUint64, sum) - } + require.NoError(err) + require.Equal(maxUint64, sum) sum, err = Add64(maxUint64, 0) - if err != nil { - t.Fatalf("Add64 failed unexpectedly") - } - if sum != math.MaxUint64 { - t.Fatalf("Expected %d, got %d", maxUint64, sum) - } - - sum, err = Add64(1<<62, 1<<62) - if err != nil { - t.Fatalf("Add64 failed unexpectedly") - } - if sum != uint64(1<<63) { - t.Fatalf("Expected %d, got %d", uint64(1<<63), sum) - } + require.NoError(err) + require.Equal(maxUint64, sum) + + sum, err = Add64(uint64(1<<62), uint64(1<<62)) + require.NoError(err) + require.Equal(uint64(1<<63), sum) _, err = Add64(1, maxUint64) - if err == nil { - t.Fatalf("Add64 succeeded unexpectedly") - } + require.ErrorIs(err, ErrOverflow) _, err = Add64(maxUint64, 1) - if err == nil { - t.Fatalf("Add64 succeeded unexpectedly") - } + require.ErrorIs(err, ErrOverflow) _, err = Add64(maxUint64, maxUint64) - if err == nil { - t.Fatalf("Add64 succeeded unexpectedly") - } + require.ErrorIs(err, ErrOverflow) } -func TestSub64(t *testing.T) { - actual, err := Sub64(2, 1) - if err != nil { - t.Fatalf("Sub64 failed unexpectedly") - } else if actual != 1 { - t.Fatalf("Expected %d, got %d", 1, actual) - } - - _, err = Sub64(1, 2) - if err == nil { - t.Fatalf("Sub64 did not fail in the manner expected") - } +func TestSub(t *testing.T) { + require := require.New(t) + + got, err := Sub(uint64(2), uint64(1)) + require.NoError(err) + require.Equal(uint64(1), got) + + got, err = Sub(uint64(2), uint64(2)) + require.NoError(err) + require.Equal(uint64(0), got) + + got, err = Sub(maxUint64, maxUint64) + require.NoError(err) + require.Equal(uint64(0), got) + + got, err = Sub(uint64(3), uint64(2)) + require.NoError(err) + require.Equal(uint64(1), got) + + _, err = Sub(uint64(1), uint64(2)) + require.ErrorIs(err, ErrUnderflow) + + _, err = Sub(maxUint64-1, maxUint64) + require.ErrorIs(err, ErrUnderflow) } func TestMul64(t *testing.T) { - if prod, err := Mul64(maxUint64, 0); err != nil { - t.Fatalf("Mul64 failed unexpectedly") - } else if prod != 0 { - t.Fatalf("Mul64 returned wrong value") - } - - if prod, err := Mul64(maxUint64, 1); err != nil { - t.Fatalf("Mul64 failed unexpectedly") - } else if prod != maxUint64 { - t.Fatalf("Mul64 returned wrong value") - } - - if _, err := Mul64(maxUint64-1, 2); err == nil { - t.Fatalf("Mul64 overflowed") - } + require := require.New(t) + + got, err := Mul64(0, maxUint64) + require.NoError(err) + require.Equal(uint64(0), got) + + got, err = Mul64(maxUint64, 0) + require.NoError(err) + require.Equal(uint64(0), got) + + got, err = Mul64(uint64(1), uint64(3)) + require.NoError(err) + require.Equal(uint64(3), got) + + got, err = Mul64(uint64(3), uint64(1)) + require.NoError(err) + require.Equal(uint64(3), got) + + got, err = Mul64(uint64(2), uint64(3)) + require.NoError(err) + require.Equal(uint64(6), got) + + got, err = Mul64(maxUint64, 0) + require.NoError(err) + require.Equal(uint64(0), got) + + _, err = Mul64(maxUint64-1, 2) + require.ErrorIs(err, ErrOverflow) } -func TestDiff64(t *testing.T) { - actual := Diff64(0, maxUint64) - if actual != maxUint64 { - t.Fatalf("Expected %d, got %d", maxUint64, actual) - } +func TestAbsDiff(t *testing.T) { + require := require.New(t) - actual = Diff64(maxUint64, 0) - if actual != maxUint64 { - t.Fatalf("Expected %d, got %d", maxUint64, actual) - } + require.Equal(maxUint64, AbsDiff(0, maxUint64)) + require.Equal(maxUint64, AbsDiff(maxUint64, 0)) + require.Equal(uint64(2), AbsDiff(uint64(3), uint64(1))) + require.Equal(uint64(2), AbsDiff(uint64(1), uint64(3))) + require.Equal(uint64(0), AbsDiff(uint64(1), uint64(1))) + require.Equal(uint64(0), AbsDiff(uint64(0), uint64(0))) } diff --git a/avalanchego/utils/math/sync_averager.go b/avalanchego/utils/math/sync_averager.go index 6858ba76..cbe8ba10 100644 --- a/avalanchego/utils/math/sync_averager.go +++ b/avalanchego/utils/math/sync_averager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package math diff --git a/avalanchego/utils/metric/api_interceptor.go b/avalanchego/utils/metric/api_interceptor.go index 21e7f7f2..57810fce 100644 --- a/avalanchego/utils/metric/api_interceptor.go +++ b/avalanchego/utils/metric/api_interceptor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metric @@ -68,7 +68,7 @@ func NewAPIInterceptor(namespace string, registerer prometheus.Registerer) (APII }, errs.Err } -func (apr *apiInterceptor) InterceptRequest(i *rpc.RequestInfo) *http.Request { +func (*apiInterceptor) InterceptRequest(i *rpc.RequestInfo) *http.Request { ctx := i.Request.Context() ctx = context.WithValue(ctx, requestTimestampKey, time.Now()) return i.Request.WithContext(ctx) diff --git a/avalanchego/utils/metric/averager.go b/avalanchego/utils/metric/averager.go index dec17d69..9cc4588d 100644 --- a/avalanchego/utils/metric/averager.go +++ b/avalanchego/utils/metric/averager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metric @@ -31,7 +31,7 @@ func NewAveragerWithErrs(namespace, name, desc string, reg prometheus.Registerer count: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Name: fmt.Sprintf("%s_count", name), - Help: fmt.Sprintf("# of observations of %s", desc), + Help: fmt.Sprintf("Total # of observations of %s", desc), }), sum: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, diff --git a/avalanchego/utils/password/hash.go b/avalanchego/utils/password/hash.go index 5ebbc9c3..19c6f731 100644 --- a/avalanchego/utils/password/hash.go +++ b/avalanchego/utils/password/hash.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package password diff --git a/avalanchego/utils/password/hash_test.go b/avalanchego/utils/password/hash_test.go index 652f0955..b7d90e6b 100644 --- a/avalanchego/utils/password/hash_test.go +++ b/avalanchego/utils/password/hash_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package password diff --git a/avalanchego/utils/password/password.go b/avalanchego/utils/password/password.go index e94c339e..af66b0c7 100644 --- a/avalanchego/utils/password/password.go +++ b/avalanchego/utils/password/password.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package password diff --git a/avalanchego/utils/password/password_test.go b/avalanchego/utils/password/password_test.go index 65c11419..26efb783 100644 --- a/avalanchego/utils/password/password_test.go +++ b/avalanchego/utils/password/password_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package password diff --git a/avalanchego/utils/perms/chmod.go b/avalanchego/utils/perms/chmod.go index 6407fad7..5b4ff4a3 100644 --- a/avalanchego/utils/perms/chmod.go +++ b/avalanchego/utils/perms/chmod.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package perms diff --git a/avalanchego/utils/perms/create.go b/avalanchego/utils/perms/create.go index 81110453..8d91baea 100644 --- a/avalanchego/utils/perms/create.go +++ b/avalanchego/utils/perms/create.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package perms diff --git a/avalanchego/utils/perms/perms.go b/avalanchego/utils/perms/perms.go index 388b2dc7..e89dcc94 100644 --- a/avalanchego/utils/perms/perms.go +++ b/avalanchego/utils/perms/perms.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package perms diff --git a/avalanchego/utils/perms/write_file.go b/avalanchego/utils/perms/write_file.go index a1678acf..9ce7f6bf 100644 --- a/avalanchego/utils/perms/write_file.go +++ b/avalanchego/utils/perms/write_file.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package perms diff --git a/avalanchego/utils/profiler/continuous.go b/avalanchego/utils/profiler/continuous.go index 05937d52..548e8877 100644 --- a/avalanchego/utils/profiler/continuous.go +++ b/avalanchego/utils/profiler/continuous.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package profiler @@ -82,9 +82,15 @@ func (p *continuousProfiler) stop() error { func (p *continuousProfiler) rotate() error { g := errgroup.Group{} - g.Go(func() error { return rotate(p.profiler.cpuProfileName, p.maxNumFiles) }) - g.Go(func() error { return rotate(p.profiler.memProfileName, p.maxNumFiles) }) - g.Go(func() error { return rotate(p.profiler.lockProfileName, p.maxNumFiles) }) + g.Go(func() error { + return rotate(p.profiler.cpuProfileName, p.maxNumFiles) + }) + g.Go(func() error { + return rotate(p.profiler.memProfileName, p.maxNumFiles) + }) + g.Go(func() error { + return rotate(p.profiler.lockProfileName, p.maxNumFiles) + }) return g.Wait() } diff --git a/avalanchego/utils/profiler/profiler.go b/avalanchego/utils/profiler/profiler.go index a4b010c5..c35606e7 100644 --- a/avalanchego/utils/profiler/profiler.go +++ b/avalanchego/utils/profiler/profiler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package profiler @@ -52,7 +52,9 @@ type profiler struct { cpuProfileFile *os.File } -func New(dir string) Profiler { return new(dir) } +func New(dir string) Profiler { + return new(dir) +} func new(dir string) *profiler { return &profiler{ diff --git a/avalanchego/utils/profiler/profiler_test.go b/avalanchego/utils/profiler/profiler_test.go index d0f948b9..c2c89143 100644 --- a/avalanchego/utils/profiler/profiler_test.go +++ b/avalanchego/utils/profiler/profiler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package profiler diff --git a/avalanchego/utils/resource/mock_user.go b/avalanchego/utils/resource/mock_user.go index 96c35d09..9b344ca1 100644 --- a/avalanchego/utils/resource/mock_user.go +++ b/avalanchego/utils/resource/mock_user.go @@ -1,3 +1,6 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/utils/resource (interfaces: User) @@ -10,30 +13,44 @@ import ( gomock "github.com/golang/mock/gomock" ) -// MockUser is a mock of User interface +// MockUser is a mock of User interface. type MockUser struct { ctrl *gomock.Controller recorder *MockUserMockRecorder } -// MockUserMockRecorder is the mock recorder for MockUser +// MockUserMockRecorder is the mock recorder for MockUser. type MockUserMockRecorder struct { mock *MockUser } -// NewMockUser creates a new mock instance +// NewMockUser creates a new mock instance. func NewMockUser(ctrl *gomock.Controller) *MockUser { mock := &MockUser{ctrl: ctrl} mock.recorder = &MockUserMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockUser) EXPECT() *MockUserMockRecorder { return m.recorder } -// CPUUsage mocks base method +// AvailableDiskBytes mocks base method. +func (m *MockUser) AvailableDiskBytes() uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AvailableDiskBytes") + ret0, _ := ret[0].(uint64) + return ret0 +} + +// AvailableDiskBytes indicates an expected call of AvailableDiskBytes. +func (mr *MockUserMockRecorder) AvailableDiskBytes() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AvailableDiskBytes", reflect.TypeOf((*MockUser)(nil).AvailableDiskBytes)) +} + +// CPUUsage mocks base method. func (m *MockUser) CPUUsage() float64 { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CPUUsage") @@ -41,13 +58,13 @@ func (m *MockUser) CPUUsage() float64 { return ret0 } -// CPUUsage indicates an expected call of CPUUsage +// CPUUsage indicates an expected call of CPUUsage. func (mr *MockUserMockRecorder) CPUUsage() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CPUUsage", reflect.TypeOf((*MockUser)(nil).CPUUsage)) } -// DiskUsage mocks base method +// DiskUsage mocks base method. func (m *MockUser) DiskUsage() (float64, float64) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "DiskUsage") @@ -56,22 +73,8 @@ func (m *MockUser) DiskUsage() (float64, float64) { return ret0, ret1 } -// DiskUsage indicates an expected call of DiskUsage +// DiskUsage indicates an expected call of DiskUsage. func (mr *MockUserMockRecorder) DiskUsage() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DiskUsage", reflect.TypeOf((*MockUser)(nil).DiskUsage)) } - -// AvailableDiskBytes mocks base method -func (m *MockUser) AvailableDiskBytes() uint64 { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AvailableDiskBytes") - ret0, _ := ret[0].(uint64) - return ret0 -} - -// AvailableDiskBytes indicates an expected call of AvailableDiskBytes -func (mr *MockUserMockRecorder) AvailableDiskBytes() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AvailableDiskBytes", reflect.TypeOf((*MockUser)(nil).AvailableDiskBytes)) -} diff --git a/avalanchego/utils/resource/no_usage.go b/avalanchego/utils/resource/no_usage.go index e1a74ee8..8a10d11c 100644 --- a/avalanchego/utils/resource/no_usage.go +++ b/avalanchego/utils/resource/no_usage.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package resource @@ -10,8 +10,14 @@ var NoUsage User = noUsage{} type noUsage struct{} -func (noUsage) CPUUsage() float64 { return 0 } +func (noUsage) CPUUsage() float64 { + return 0 +} -func (noUsage) DiskUsage() (float64, float64) { return 0, 0 } +func (noUsage) DiskUsage() (float64, float64) { + return 0, 0 +} -func (noUsage) AvailableDiskBytes() uint64 { return math.MaxUint64 } +func (noUsage) AvailableDiskBytes() uint64 { + return math.MaxUint64 +} diff --git a/avalanchego/utils/resource/usage.go b/avalanchego/utils/resource/usage.go index d4806b90..2c83aa0d 100644 --- a/avalanchego/utils/resource/usage.go +++ b/avalanchego/utils/resource/usage.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package resource @@ -16,7 +16,7 @@ import ( var ( lnHalf = math.Log(.5) - _ Manager = &manager{} + _ Manager = (*manager)(nil) ) type CPUUser interface { diff --git a/avalanchego/utils/resource/usage_test.go b/avalanchego/utils/resource/usage_test.go index 5c81d62a..5c1df781 100644 --- a/avalanchego/utils/resource/usage_test.go +++ b/avalanchego/utils/resource/usage_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package resource diff --git a/avalanchego/utils/rpc/json.go b/avalanchego/utils/rpc/json.go index c87766dd..cf8819e7 100644 --- a/avalanchego/utils/rpc/json.go +++ b/avalanchego/utils/rpc/json.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpc diff --git a/avalanchego/utils/rpc/options.go b/avalanchego/utils/rpc/options.go index a01254fe..ce79bc25 100644 --- a/avalanchego/utils/rpc/options.go +++ b/avalanchego/utils/rpc/options.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpc diff --git a/avalanchego/utils/rpc/requester.go b/avalanchego/utils/rpc/requester.go index e5671c6d..49f3ffa0 100644 --- a/avalanchego/utils/rpc/requester.go +++ b/avalanchego/utils/rpc/requester.go @@ -1,28 +1,26 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpc import ( "context" - "fmt" "net/url" ) -var _ EndpointRequester = &avalancheEndpointRequester{} +var _ EndpointRequester = (*avalancheEndpointRequester)(nil) type EndpointRequester interface { SendRequest(ctx context.Context, method string, params interface{}, reply interface{}, options ...Option) error } type avalancheEndpointRequester struct { - uri, base string + uri string } -func NewEndpointRequester(uri, base string) EndpointRequester { +func NewEndpointRequester(uri string) EndpointRequester { return &avalancheEndpointRequester{ - uri: uri, - base: base, + uri: uri, } } @@ -37,10 +35,11 @@ func (e *avalancheEndpointRequester) SendRequest( if err != nil { return err } + return SendJSONRequest( ctx, uri, - fmt.Sprintf("%s.%s", e.base, method), + method, params, reply, options..., diff --git a/avalanchego/utils/sampler/rand.go b/avalanchego/utils/sampler/rand.go index 15bb01d7..476d8475 100644 --- a/avalanchego/utils/sampler/rand.go +++ b/avalanchego/utils/sampler/rand.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler diff --git a/avalanchego/utils/sampler/uniform.go b/avalanchego/utils/sampler/uniform.go index 316e4c9d..03f9068c 100644 --- a/avalanchego/utils/sampler/uniform.go +++ b/avalanchego/utils/sampler/uniform.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -16,4 +16,6 @@ type Uniform interface { } // NewUniform returns a new sampler -func NewUniform() Uniform { return &uniformReplacer{} } +func NewUniform() Uniform { + return &uniformReplacer{} +} diff --git a/avalanchego/utils/sampler/uniform_benchmark_test.go b/avalanchego/utils/sampler/uniform_benchmark_test.go index 9b229bd2..d0fa5650 100644 --- a/avalanchego/utils/sampler/uniform_benchmark_test.go +++ b/avalanchego/utils/sampler/uniform_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler diff --git a/avalanchego/utils/sampler/uniform_best.go b/avalanchego/utils/sampler/uniform_best.go index 0492610c..e7ac3df4 100644 --- a/avalanchego/utils/sampler/uniform_best.go +++ b/avalanchego/utils/sampler/uniform_best.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -14,7 +14,7 @@ import ( var ( errNoValidUniformSamplers = errors.New("no valid uniform samplers found") - _ Uniform = &uniformBest{} + _ Uniform = (*uniformBest)(nil) ) // Sampling is performed by using another implementation of the Uniform diff --git a/avalanchego/utils/sampler/uniform_replacer.go b/avalanchego/utils/sampler/uniform_replacer.go index 45657440..5be2c383 100644 --- a/avalanchego/utils/sampler/uniform_replacer.go +++ b/avalanchego/utils/sampler/uniform_replacer.go @@ -1,10 +1,12 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler import ( "math" + + "golang.org/x/exp/maps" ) type defaultMap map[uint64]uint64 @@ -71,9 +73,7 @@ func (s *uniformReplacer) ClearSeed() { } func (s *uniformReplacer) Reset() { - for k := range s.drawn { - delete(s.drawn, k) - } + maps.Clear(s.drawn) s.drawsCount = 0 } diff --git a/avalanchego/utils/sampler/uniform_resample.go b/avalanchego/utils/sampler/uniform_resample.go index ecf3a956..d6404f67 100644 --- a/avalanchego/utils/sampler/uniform_resample.go +++ b/avalanchego/utils/sampler/uniform_resample.go @@ -1,10 +1,12 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler import ( "math" + + "github.com/ava-labs/avalanchego/utils/set" ) // uniformResample allows for sampling over a uniform distribution without @@ -20,7 +22,7 @@ type uniformResample struct { rng rng seededRNG rng length uint64 - drawn map[uint64]struct{} + drawn set.Set[uint64] } func (s *uniformResample) Initialize(length uint64) error { @@ -30,7 +32,7 @@ func (s *uniformResample) Initialize(length uint64) error { s.rng = globalRNG s.seededRNG = newRNG() s.length = length - s.drawn = make(map[uint64]struct{}) + s.drawn.Clear() return nil } @@ -58,9 +60,7 @@ func (s *uniformResample) ClearSeed() { } func (s *uniformResample) Reset() { - for k := range s.drawn { - delete(s.drawn, k) - } + s.drawn.Clear() } func (s *uniformResample) Next() (uint64, error) { @@ -71,10 +71,10 @@ func (s *uniformResample) Next() (uint64, error) { for { draw := uint64(s.rng.Int63n(int64(s.length))) - if _, ok := s.drawn[draw]; ok { + if s.drawn.Contains(draw) { continue } - s.drawn[draw] = struct{}{} + s.drawn.Add(draw) return draw, nil } } diff --git a/avalanchego/utils/sampler/uniform_test.go b/avalanchego/utils/sampler/uniform_test.go index 02d3fe81..d431d0ee 100644 --- a/avalanchego/utils/sampler/uniform_test.go +++ b/avalanchego/utils/sampler/uniform_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/utils" + "golang.org/x/exp/slices" ) var ( @@ -114,7 +114,7 @@ func UniformDistributionTest(t *testing.T, s Uniform) { val, err := s.Sample(3) require.NoError(t, err) - utils.SortUint64(val) + slices.Sort(val) require.Equal( t, []uint64{0, 1, 2}, diff --git a/avalanchego/utils/sampler/weighted.go b/avalanchego/utils/sampler/weighted.go index 24454c32..58998d47 100644 --- a/avalanchego/utils/sampler/weighted.go +++ b/avalanchego/utils/sampler/weighted.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler diff --git a/avalanchego/utils/sampler/weighted_array.go b/avalanchego/utils/sampler/weighted_array.go index e6c59d26..c6b411b8 100644 --- a/avalanchego/utils/sampler/weighted_array.go +++ b/avalanchego/utils/sampler/weighted_array.go @@ -1,21 +1,28 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler import ( - "sort" - - safemath "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/math" ) -var _ Weighted = &weightedArray{} +var ( + _ Weighted = (*weightedArray)(nil) + _ utils.Sortable[weightedArrayElement] = weightedArrayElement{} +) type weightedArrayElement struct { cumulativeWeight uint64 index int } +// Note that this sorts in order of decreasing weight. +func (e weightedArrayElement) Less(other weightedArrayElement) bool { + return e.cumulativeWeight > other.cumulativeWeight +} + // Sampling is performed by executing a modified binary search over the provided // elements. Rather than cutting the remaining dataset in half, the algorithm // attempt to just in to where it think the value will be assuming a linear @@ -45,7 +52,7 @@ func (s *weightedArray) Initialize(weights []uint64) error { } // Optimize so that the array is closer to the uniform distribution - sortWeightedArray(s.arr) + utils.Sort(s.arr) maxIndex := len(s.arr) - 1 oneIfOdd := 1 & maxIndex @@ -58,7 +65,7 @@ func (s *weightedArray) Initialize(weights []uint64) error { cumulativeWeight := uint64(0) for i := 0; i < len(s.arr); i++ { - newWeight, err := safemath.Add64( + newWeight, err := math.Add64( cumulativeWeight, s.arr[i].cumulativeWeight, ) @@ -114,21 +121,3 @@ func (s *weightedArray) Sample(value uint64) (int, error) { index = int(lookupMass/float64(valueRange)) + minIndex } } - -type innerSortWeightedArray []weightedArrayElement - -func (lst innerSortWeightedArray) Less(i, j int) bool { - return lst[i].cumulativeWeight > lst[j].cumulativeWeight -} - -func (lst innerSortWeightedArray) Len() int { - return len(lst) -} - -func (lst innerSortWeightedArray) Swap(i, j int) { - lst[j], lst[i] = lst[i], lst[j] -} - -func sortWeightedArray(lst []weightedArrayElement) { - sort.Sort(innerSortWeightedArray(lst)) -} diff --git a/avalanchego/utils/sampler/weighted_array_test.go b/avalanchego/utils/sampler/weighted_array_test.go new file mode 100644 index 00000000..e1058363 --- /dev/null +++ b/avalanchego/utils/sampler/weighted_array_test.go @@ -0,0 +1,27 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sampler + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestWeightedArrayElementLess(t *testing.T) { + require := require.New(t) + + var elt1, elt2 weightedArrayElement + require.False(elt1.Less(elt2)) + require.False(elt2.Less(elt1)) + + elt1 = weightedArrayElement{ + cumulativeWeight: 1, + } + elt2 = weightedArrayElement{ + cumulativeWeight: 2, + } + require.False(elt1.Less(elt2)) + require.True(elt2.Less(elt1)) +} diff --git a/avalanchego/utils/sampler/weighted_benchmark_test.go b/avalanchego/utils/sampler/weighted_benchmark_test.go index 2d963503..f0bfb4e7 100644 --- a/avalanchego/utils/sampler/weighted_benchmark_test.go +++ b/avalanchego/utils/sampler/weighted_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -12,6 +12,8 @@ import ( safemath "github.com/ava-labs/avalanchego/utils/math" ) +var errOverflow = errors.New("overflow error") + // BenchmarkAllWeightedSampling func BenchmarkAllWeightedSampling(b *testing.B) { pows := []float64{ @@ -96,7 +98,7 @@ func CalcWeightedPoW(exponent float64, size int) (uint64, []uint64, error) { totalWeight = newWeight } if totalWeight > math.MaxInt64 { - return 0, nil, errors.New("overflow error") + return 0, nil, errOverflow } return totalWeight, weights, nil } diff --git a/avalanchego/utils/sampler/weighted_best.go b/avalanchego/utils/sampler/weighted_best.go index b7ba016e..473ded5f 100644 --- a/avalanchego/utils/sampler/weighted_best.go +++ b/avalanchego/utils/sampler/weighted_best.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -16,7 +16,7 @@ import ( var ( errNoValidWeightedSamplers = errors.New("no valid weighted samplers found") - _ Weighted = &weightedBest{} + _ Weighted = (*weightedBest)(nil) ) // Sampling is performed by using another implementation of the Weighted diff --git a/avalanchego/utils/sampler/weighted_heap.go b/avalanchego/utils/sampler/weighted_heap.go index 7154eef3..1457d665 100644 --- a/avalanchego/utils/sampler/weighted_heap.go +++ b/avalanchego/utils/sampler/weighted_heap.go @@ -1,15 +1,17 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler import ( - "sort" - - safemath "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/math" ) -var _ Weighted = &weightedHeap{} +var ( + _ Weighted = (*weightedHeap)(nil) + _ utils.Sortable[weightedHeapElement] = weightedHeapElement{} +) type weightedHeapElement struct { weight uint64 @@ -17,6 +19,19 @@ type weightedHeapElement struct { index int } +func (e weightedHeapElement) Less(other weightedHeapElement) bool { + // By accounting for the initial index of the weights, this results in a + // stable sort. We do this rather than using `sort.Stable` because of the + // reported change in performance of the sort used. + if e.weight > other.weight { + return true + } + if e.weight < other.weight { + return false + } + return e.index < other.index +} + // Sampling is performed by executing a search over a tree of elements in the // order of their probabilistic occurrence. // @@ -44,14 +59,14 @@ func (s *weightedHeap) Initialize(weights []uint64) error { } // Optimize so that the most probable values are at the top of the heap - sortWeightedHeap(s.heap) + utils.Sort(s.heap) // Initialize the heap for i := len(s.heap) - 1; i > 0; i-- { // Explicitly performing a shift here allows the compiler to avoid // checking for negative numbers, which saves a couple cycles parentIndex := (i - 1) >> 1 - newWeight, err := safemath.Add64( + newWeight, err := math.Add64( s.heap[parentIndex].cumulativeWeight, s.heap[i].cumulativeWeight, ) @@ -89,30 +104,3 @@ func (s *weightedHeap) Sample(value uint64) (int, error) { } } } - -type innerSortWeightedHeap []weightedHeapElement - -func (lst innerSortWeightedHeap) Less(i, j int) bool { - // By accounting for the initial index of the weights, this results in a - // stable sort. We do this rather than using `sort.Stable` because of the - // reported change in performance of the sort used. - if lst[i].weight > lst[j].weight { - return true - } - if lst[i].weight < lst[j].weight { - return false - } - return lst[i].index < lst[j].index -} - -func (lst innerSortWeightedHeap) Len() int { - return len(lst) -} - -func (lst innerSortWeightedHeap) Swap(i, j int) { - lst[j], lst[i] = lst[i], lst[j] -} - -func sortWeightedHeap(heap []weightedHeapElement) { - sort.Sort(innerSortWeightedHeap(heap)) -} diff --git a/avalanchego/utils/sampler/weighted_heap_test.go b/avalanchego/utils/sampler/weighted_heap_test.go index ff7f0048..098f431b 100644 --- a/avalanchego/utils/sampler/weighted_heap_test.go +++ b/avalanchego/utils/sampler/weighted_heap_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -23,3 +23,59 @@ func TestWeightedHeapInitialize(t *testing.T) { require.Equal(expected, elem.index) } } + +func TestWeightedHeapElementLess(t *testing.T) { + type test struct { + name string + elt1 weightedHeapElement + elt2 weightedHeapElement + expected bool + } + tests := []test{ + { + name: "all same", + elt1: weightedHeapElement{}, + elt2: weightedHeapElement{}, + expected: false, + }, + { + name: "first lower weight", + elt1: weightedHeapElement{}, + elt2: weightedHeapElement{ + weight: 1, + }, + expected: false, + }, + { + name: "first higher weight", + elt1: weightedHeapElement{ + weight: 1, + }, + elt2: weightedHeapElement{}, + expected: true, + }, + { + name: "first higher index", + elt1: weightedHeapElement{ + index: 1, + }, + elt2: weightedHeapElement{}, + expected: false, + }, + { + name: "second higher index", + elt1: weightedHeapElement{}, + elt2: weightedHeapElement{ + index: 1, + }, + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + require.Equal(tt.expected, tt.elt1.Less(tt.elt2)) + }) + } +} diff --git a/avalanchego/utils/sampler/weighted_linear.go b/avalanchego/utils/sampler/weighted_linear.go index 7e5221a6..f58fa22a 100644 --- a/avalanchego/utils/sampler/weighted_linear.go +++ b/avalanchego/utils/sampler/weighted_linear.go @@ -1,21 +1,28 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler import ( - "sort" - - safemath "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/math" ) -var _ Weighted = &weightedLinear{} +var ( + _ Weighted = (*weightedLinear)(nil) + _ utils.Sortable[weightedLinearElement] = weightedLinearElement{} +) type weightedLinearElement struct { cumulativeWeight uint64 index int } +// Note that this sorts in order of decreasing cumulative weight. +func (e weightedLinearElement) Less(other weightedLinearElement) bool { + return e.cumulativeWeight > other.cumulativeWeight +} + // Sampling is performed by executing a linear search over the provided elements // in the order of their probabilistic occurrence. // @@ -43,10 +50,10 @@ func (s *weightedLinear) Initialize(weights []uint64) error { } // Optimize so that the most probable values are at the front of the array - sortWeightedLinear(s.arr) + utils.Sort(s.arr) for i := 1; i < len(s.arr); i++ { - newWeight, err := safemath.Add64( + newWeight, err := math.Add64( s.arr[i-1].cumulativeWeight, s.arr[i].cumulativeWeight, ) @@ -72,21 +79,3 @@ func (s *weightedLinear) Sample(value uint64) (int, error) { index++ } } - -type innerSortWeightedLinear []weightedLinearElement - -func (lst innerSortWeightedLinear) Less(i, j int) bool { - return lst[i].cumulativeWeight > lst[j].cumulativeWeight -} - -func (lst innerSortWeightedLinear) Len() int { - return len(lst) -} - -func (lst innerSortWeightedLinear) Swap(i, j int) { - lst[j], lst[i] = lst[i], lst[j] -} - -func sortWeightedLinear(lst []weightedLinearElement) { - sort.Sort(innerSortWeightedLinear(lst)) -} diff --git a/avalanchego/utils/sampler/weighted_linear_test.go b/avalanchego/utils/sampler/weighted_linear_test.go new file mode 100644 index 00000000..b3403501 --- /dev/null +++ b/avalanchego/utils/sampler/weighted_linear_test.go @@ -0,0 +1,27 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sampler + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestWeightedLinearElementLess(t *testing.T) { + require := require.New(t) + + var elt1, elt2 weightedLinearElement + require.False(elt1.Less(elt2)) + require.False(elt2.Less(elt1)) + + elt1 = weightedLinearElement{ + cumulativeWeight: 1, + } + elt2 = weightedLinearElement{ + cumulativeWeight: 2, + } + require.False(elt1.Less(elt2)) + require.True(elt2.Less(elt1)) +} diff --git a/avalanchego/utils/sampler/weighted_test.go b/avalanchego/utils/sampler/weighted_test.go index a8e6ec0c..cb22b431 100644 --- a/avalanchego/utils/sampler/weighted_test.go +++ b/avalanchego/utils/sampler/weighted_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler diff --git a/avalanchego/utils/sampler/weighted_uniform.go b/avalanchego/utils/sampler/weighted_uniform.go index 0fcfc3c8..14620475 100644 --- a/avalanchego/utils/sampler/weighted_uniform.go +++ b/avalanchego/utils/sampler/weighted_uniform.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -13,7 +13,7 @@ import ( var ( errWeightsTooLarge = errors.New("total weight is too large") - _ Weighted = &weightedUniform{} + _ Weighted = (*weightedUniform)(nil) ) // Sampling is performed by indexing into the array to find the correct index. diff --git a/avalanchego/utils/sampler/weighted_without_replacement.go b/avalanchego/utils/sampler/weighted_without_replacement.go index e162f906..a6039a65 100644 --- a/avalanchego/utils/sampler/weighted_without_replacement.go +++ b/avalanchego/utils/sampler/weighted_without_replacement.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler diff --git a/avalanchego/utils/sampler/weighted_without_replacement_benchmark_test.go b/avalanchego/utils/sampler/weighted_without_replacement_benchmark_test.go index 01405aa8..3d9b0085 100644 --- a/avalanchego/utils/sampler/weighted_without_replacement_benchmark_test.go +++ b/avalanchego/utils/sampler/weighted_without_replacement_benchmark_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler diff --git a/avalanchego/utils/sampler/weighted_without_replacement_generic.go b/avalanchego/utils/sampler/weighted_without_replacement_generic.go index 38eeb76b..08731baf 100644 --- a/avalanchego/utils/sampler/weighted_without_replacement_generic.go +++ b/avalanchego/utils/sampler/weighted_without_replacement_generic.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler diff --git a/avalanchego/utils/sampler/weighted_without_replacement_test.go b/avalanchego/utils/sampler/weighted_without_replacement_test.go index 8b1bd16b..42b5ac2a 100644 --- a/avalanchego/utils/sampler/weighted_without_replacement_test.go +++ b/avalanchego/utils/sampler/weighted_without_replacement_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package sampler @@ -6,10 +6,11 @@ package sampler import ( "fmt" "math" - "sort" "testing" "github.com/stretchr/testify/require" + + "golang.org/x/exp/slices" ) var ( @@ -161,7 +162,7 @@ func WeightedWithoutReplacementDistributionTest( indices, err := s.Sample(4) require.NoError(t, err) - sort.Ints(indices) + slices.Sort(indices) require.Equal( t, []int{0, 1, 2, 2}, diff --git a/avalanchego/utils/set/bits.go b/avalanchego/utils/set/bits.go new file mode 100644 index 00000000..bf7f5f7b --- /dev/null +++ b/avalanchego/utils/set/bits.go @@ -0,0 +1,102 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package set + +import ( + "fmt" + "math/big" + "math/bits" +) + +// Bits is a bit-set backed by a big.Int +// Holds values ranging from [0, INT_MAX] (arch-dependent) +// Trying to use negative values will result in a panic. +// This implementation is NOT thread-safe. +type Bits struct { + bits *big.Int +} + +// NewBits returns a new instance of Bits with [bits] set to 1. +// +// Invariants: +// 1. Negative bits will cause a panic. +// 2. Duplicate bits are allowed but will cause a no-op. +func NewBits(bits ...int) Bits { + b := Bits{new(big.Int)} + for _, bit := range bits { + b.Add(bit) + } + return b +} + +// Add sets the [i]'th bit to 1 +func (b Bits) Add(i int) { + b.bits.SetBit(b.bits, i, 1) +} + +// Union performs the set union with another set. +// This adds all elements in [other] to [b] +func (b Bits) Union(other Bits) { + b.bits.Or(b.bits, other.bits) +} + +// Intersection performs the set intersection with another set +// This sets [b] to include only elements in both [b] and [other] +func (b Bits) Intersection(other Bits) { + b.bits.And(b.bits, other.bits) +} + +// Difference removes all the elements in [other] from this set +func (b Bits) Difference(other Bits) { + b.bits.AndNot(b.bits, other.bits) +} + +// Remove sets the [i]'th bit to 0 +func (b Bits) Remove(i int) { + b.bits.SetBit(b.bits, i, 0) +} + +// Clear empties out the bitset +func (b Bits) Clear() { + b.bits.SetUint64(0) +} + +// Contains returns true if the [i]'th bit is 1, and false otherwise +func (b Bits) Contains(i int) bool { + return b.bits.Bit(i) == 1 +} + +// BitLen returns the bit length of this bitset +func (b Bits) BitLen() int { + return b.bits.BitLen() +} + +// Len returns the amount of 1's in the bitset +// +// This is typically referred to as the "Hamming Weight" +// of a set of bits. +func (b Bits) Len() int { + result := 0 + for _, word := range b.bits.Bits() { + result += bits.OnesCount(uint(word)) + } + return result +} + +// Returns the byte representation of this bitset +func (b Bits) Bytes() []byte { + return b.bits.Bytes() +} + +// Inverse of Bits.Bytes() +func BitsFromBytes(bytes []byte) Bits { + return Bits{ + bits: new(big.Int).SetBytes(bytes), + } +} + +// String returns the hex representation of this bitset +func (b Bits) String() string { + return fmt.Sprintf("%x", b.bits.Bytes()) +} diff --git a/avalanchego/utils/set/bits_64.go b/avalanchego/utils/set/bits_64.go new file mode 100644 index 00000000..eed00afd --- /dev/null +++ b/avalanchego/utils/set/bits_64.go @@ -0,0 +1,57 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package set + +import ( + "fmt" + "math/bits" +) + +// Bits64 is a set that can contain uints in the range [0, 64). All functions +// are O(1). The zero value is the empty set. +type Bits64 uint64 + +// Add [i] to the set of ints +func (b *Bits64) Add(i uint) { + *b |= 1 << i +} + +// Union adds all the elements in [s] to this set +func (b *Bits64) Union(s Bits64) { + *b |= s +} + +// Intersection takes the intersection of [s] with this set +func (b *Bits64) Intersection(s Bits64) { + *b &= s +} + +// Difference removes all the elements in [s] from this set +func (b *Bits64) Difference(s Bits64) { + *b &^= s +} + +// Remove [i] from the set of ints +func (b *Bits64) Remove(i uint) { + *b &^= 1 << i +} + +// Clear removes all elements from this set +func (b *Bits64) Clear() { + *b = 0 +} + +// Contains returns true if [i] was previously added to this set +func (b Bits64) Contains(i uint) bool { + return b&(1< 63", + elts: []int{1337}, + }, + { + name: "multiple", + elts: []int{1, 2, 3}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b := NewBits(tt.elts...) + bytes := b.Bytes() + fromBytes := BitsFromBytes(bytes) + + require.Equal(len(tt.elts), fromBytes.Len()) + for _, elt := range tt.elts { + require.True(fromBytes.Contains(elt)) + } + }) + } +} diff --git a/avalanchego/utils/set/set.go b/avalanchego/utils/set/set.go new file mode 100644 index 00000000..76b65e77 --- /dev/null +++ b/avalanchego/utils/set/set.go @@ -0,0 +1,207 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package set + +import ( + "bytes" + + stdjson "encoding/json" + + "golang.org/x/exp/maps" + + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/json" + "github.com/ava-labs/avalanchego/utils/wrappers" +) + +// The minimum capacity of a set +const minSetSize = 16 + +var _ stdjson.Marshaler = (*Set[int])(nil) + +// Set is a set of elements. +type Set[T comparable] map[T]struct{} + +// Return a new set with initial capacity [size]. +// More or less than [size] elements can be added to this set. +// Using NewSet() rather than Set[T]{} is just an optimization that can +// be used if you know how many elements will be put in this set. +func NewSet[T comparable](size int) Set[T] { + if size < 0 { + return Set[T]{} + } + return make(map[T]struct{}, size) +} + +func (s *Set[T]) resize(size int) { + if *s == nil { + if minSetSize > size { + size = minSetSize + } + *s = make(map[T]struct{}, size) + } +} + +// Add all the elements to this set. +// If the element is already in the set, nothing happens. +func (s *Set[T]) Add(elts ...T) { + s.resize(2 * len(elts)) + for _, elt := range elts { + (*s)[elt] = struct{}{} + } +} + +// Union adds all the elements from the provided set to this set. +func (s *Set[T]) Union(set Set[T]) { + s.resize(2 * set.Len()) + for elt := range set { + (*s)[elt] = struct{}{} + } +} + +// Difference removes all the elements in [set] from [s]. +func (s *Set[T]) Difference(set Set[T]) { + for elt := range set { + delete(*s, elt) + } +} + +// Contains returns true iff the set contains this element. +func (s *Set[T]) Contains(elt T) bool { + _, contains := (*s)[elt] + return contains +} + +// Overlaps returns true if the intersection of the set is non-empty +func (s *Set[T]) Overlaps(big Set[T]) bool { + small := *s + if small.Len() > big.Len() { + small, big = big, small + } + + for elt := range small { + if _, ok := big[elt]; ok { + return true + } + } + return false +} + +// Len returns the number of elements in this set. +func (s Set[_]) Len() int { + return len(s) +} + +// Remove all the given elements from this set. +// If an element isn't in the set, it's ignored. +func (s *Set[T]) Remove(elts ...T) { + for _, elt := range elts { + delete(*s, elt) + } +} + +// Clear empties this set +func (s *Set[_]) Clear() { + maps.Clear(*s) +} + +// List converts this set into a list +func (s Set[T]) List() []T { + return maps.Keys(s) +} + +// CappedList returns a list of length at most [size]. +// Size should be >= 0. If size < 0, returns nil. +func (s Set[T]) CappedList(size int) []T { + if size < 0 { + return nil + } + if l := s.Len(); l < size { + size = l + } + i := 0 + elts := make([]T, size) + for elt := range s { + if i >= size { + break + } + elts[i] = elt + i++ + } + return elts +} + +// Equals returns true if the sets contain the same elements +func (s Set[T]) Equals(other Set[T]) bool { + return maps.Equal(s, other) +} + +// Removes and returns an element. +// If the set is empty, does nothing and returns false. +func (s *Set[T]) Pop() (T, bool) { + for elt := range *s { + delete(*s, elt) + return elt, true + } + return utils.Zero[T](), false +} + +func (s *Set[T]) UnmarshalJSON(b []byte) error { + str := string(b) + if str == json.Null { + return nil + } + var elts []T + if err := stdjson.Unmarshal(b, &elts); err != nil { + return err + } + s.Clear() + s.Add(elts...) + return nil +} + +func (s *Set[_]) MarshalJSON() ([]byte, error) { + var ( + eltBytes = make([][]byte, len(*s)) + i int + err error + ) + for elt := range *s { + eltBytes[i], err = stdjson.Marshal(elt) + if err != nil { + return nil, err + } + i++ + } + // Sort for determinism + utils.SortBytes(eltBytes) + + // Build the JSON + var ( + jsonBuf = bytes.Buffer{} + errs = wrappers.Errs{} + ) + _, err = jsonBuf.WriteString("[") + errs.Add(err) + for i, elt := range eltBytes { + _, err := jsonBuf.Write(elt) + errs.Add(err) + if i != len(eltBytes)-1 { + _, err := jsonBuf.WriteString(",") + errs.Add(err) + } + } + _, err = jsonBuf.WriteString("]") + errs.Add(err) + + return jsonBuf.Bytes(), errs.Err +} + +// Returns an element. If the set is empty, returns false +func (s *Set[T]) Peek() (T, bool) { + for elt := range *s { + return elt, true + } + return utils.Zero[T](), false +} diff --git a/avalanchego/utils/set/set_benchmark_test.go b/avalanchego/utils/set/set_benchmark_test.go new file mode 100644 index 00000000..c762b72c --- /dev/null +++ b/avalanchego/utils/set/set_benchmark_test.go @@ -0,0 +1,39 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package set + +import ( + "strconv" + "testing" +) + +func BenchmarkSetList(b *testing.B) { + sizes := []int{5, 25, 100, 100_000} // Test with various sizes + for size := range sizes { + b.Run(strconv.Itoa(size), func(b *testing.B) { + set := Set[int]{} + for i := 0; i < size; i++ { + set.Add(i) + } + b.ResetTimer() + for n := 0; n < b.N; n++ { + set.List() + } + }) + } +} + +func BenchmarkSetClear(b *testing.B) { + for _, numElts := range []int{10, 25, 50, 100, 250, 500, 1000} { + b.Run(strconv.Itoa(numElts), func(b *testing.B) { + set := NewSet[int](numElts) + for n := 0; n < b.N; n++ { + for i := 0; i < numElts; i++ { + set.Add(i) + } + set.Clear() + } + }) + } +} diff --git a/avalanchego/utils/set/set_test.go b/avalanchego/utils/set/set_test.go new file mode 100644 index 00000000..341d4661 --- /dev/null +++ b/avalanchego/utils/set/set_test.go @@ -0,0 +1,194 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package set + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSet(t *testing.T) { + require := require.New(t) + id1 := 1 + + s := Set[int]{id1: struct{}{}} + + s.Add(id1) + require.True(s.Contains(id1)) + + s.Remove(id1) + require.False(s.Contains(id1)) + + s.Add(id1) + require.True(s.Contains(id1)) + require.Len(s.List(), 1) + require.Equal(len(s.List()), 1) + require.Equal(id1, s.List()[0]) + + s.Clear() + require.False(s.Contains(id1)) + + s.Add(id1) + + s2 := Set[int]{} + + require.False(s.Overlaps(s2)) + + s2.Union(s) + require.True(s2.Contains(id1)) + require.True(s.Overlaps(s2)) + + s2.Difference(s) + require.False(s2.Contains(id1)) + require.False(s.Overlaps(s2)) +} + +func TestSetCappedList(t *testing.T) { + require := require.New(t) + s := Set[int]{} + + id := 0 + + require.Len(s.CappedList(0), 0) + + s.Add(id) + + require.Len(s.CappedList(0), 0) + require.Len(s.CappedList(1), 1) + require.Equal(s.CappedList(1)[0], id) + require.Len(s.CappedList(2), 1) + require.Equal(s.CappedList(2)[0], id) + + id2 := 1 + s.Add(id2) + + require.Len(s.CappedList(0), 0) + require.Len(s.CappedList(1), 1) + require.Len(s.CappedList(2), 2) + require.Len(s.CappedList(3), 2) + gotList := s.CappedList(2) + require.Contains(gotList, id) + require.Contains(gotList, id2) + require.NotEqual(gotList[0], gotList[1]) +} + +func TestSetClear(t *testing.T) { + set := Set[int]{} + for i := 0; i < 25; i++ { + set.Add(i) + } + set.Clear() + require.Len(t, set, 0) + set.Add(1337) + require.Len(t, set, 1) +} + +func TestSetPop(t *testing.T) { + var s Set[int] + _, ok := s.Pop() + require.False(t, ok) + + s = make(Set[int]) + _, ok = s.Pop() + require.False(t, ok) + + id1, id2 := 0, 1 + s.Add(id1, id2) + + got, ok := s.Pop() + require.True(t, ok) + require.True(t, got == id1 || got == id2) + require.EqualValues(t, 1, s.Len()) + + got, ok = s.Pop() + require.True(t, ok) + require.True(t, got == id1 || got == id2) + require.EqualValues(t, 0, s.Len()) + + _, ok = s.Pop() + require.False(t, ok) +} + +func TestSetMarshalJSON(t *testing.T) { + require := require.New(t) + set := Set[int]{} + { + asJSON, err := set.MarshalJSON() + require.NoError(err) + require.Equal("[]", string(asJSON)) + } + id1, id2 := 1, 2 + id1JSON, err := json.Marshal(id1) + require.NoError(err) + id2JSON, err := json.Marshal(id2) + require.NoError(err) + set.Add(id1) + { + asJSON, err := set.MarshalJSON() + require.NoError(err) + require.Equal(fmt.Sprintf("[%s]", string(id1JSON)), string(asJSON)) + } + set.Add(id2) + { + asJSON, err := set.MarshalJSON() + require.NoError(err) + require.Equal(fmt.Sprintf("[%s,%s]", string(id1JSON), string(id2JSON)), string(asJSON)) + } +} + +func TestSetUnmarshalJSON(t *testing.T) { + require := require.New(t) + set := Set[int]{} + { + err := set.UnmarshalJSON([]byte("[]")) + require.NoError(err) + require.Empty(set) + } + id1, id2 := 1, 2 + id1JSON, err := json.Marshal(id1) + require.NoError(err) + id2JSON, err := json.Marshal(id2) + require.NoError(err) + { + err := set.UnmarshalJSON([]byte(fmt.Sprintf("[%s]", string(id1JSON)))) + require.NoError(err) + require.Len(set, 1) + require.Contains(set, id1) + } + { + err := set.UnmarshalJSON([]byte(fmt.Sprintf("[%s,%s]", string(id1JSON), string(id2JSON)))) + require.NoError(err) + require.Len(set, 2) + require.Contains(set, id1) + require.Contains(set, id2) + } + { + err := set.UnmarshalJSON([]byte(fmt.Sprintf("[%d,%d,%d]", 3, 4, 5))) + require.NoError(err) + require.Len(set, 3) + require.Contains(set, 3) + require.Contains(set, 4) + require.Contains(set, 5) + } + { + err := set.UnmarshalJSON([]byte(fmt.Sprintf("[%d,%d,%d, %d]", 3, 4, 5, 3))) + require.NoError(err) + require.Len(set, 3) + require.Contains(set, 3) + require.Contains(set, 4) + require.Contains(set, 5) + } + { + set1 := Set[int]{} + set2 := Set[int]{} + err := set1.UnmarshalJSON([]byte(fmt.Sprintf("[%s,%s]", string(id1JSON), string(id2JSON)))) + require.NoError(err) + err = set2.UnmarshalJSON([]byte(fmt.Sprintf("[%s,%s]", string(id2JSON), string(id1JSON)))) + require.NoError(err) + require.Equal(set1, set2) + } +} diff --git a/avalanchego/utils/sorting.go b/avalanchego/utils/sorting.go index 4b1e3e53..6c3911f1 100644 --- a/avalanchego/utils/sorting.go +++ b/avalanchego/utils/sorting.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utils @@ -6,61 +6,103 @@ package utils import ( "bytes" "sort" + + "golang.org/x/exp/constraints" + "golang.org/x/exp/slices" + + "github.com/ava-labs/avalanchego/utils/hashing" ) -// IsSortedAndUnique returns true if the elements in the data are unique and sorted. -func IsSortedAndUnique(data sort.Interface) bool { - for i := data.Len() - 2; i >= 0; i-- { - if !data.Less(i, i+1) { - return false - } - } - return true +// TODO can we handle sorting where the Less function relies on a codec? + +type Sortable[T any] interface { + Less(T) bool } -type innerSortUint32 []uint32 +// Sorts the elements of [s]. +func Sort[T Sortable[T]](s []T) { + slices.SortFunc(s, func(i, j T) bool { + return i.Less(j) + }) +} -func (su32 innerSortUint32) Less(i, j int) bool { return su32[i] < su32[j] } -func (su32 innerSortUint32) Len() int { return len(su32) } -func (su32 innerSortUint32) Swap(i, j int) { su32[j], su32[i] = su32[i], su32[j] } +// Sorts the elements of [s] based on their hashes. +func SortByHash[T ~[]byte](s []T) { + slices.SortFunc(s, func(i, j T) bool { + iHash := hashing.ComputeHash256(i) + jHash := hashing.ComputeHash256(j) + return bytes.Compare(iHash, jHash) == -1 + }) +} -// SortUint32 sorts an uint32 array -func SortUint32(u32 []uint32) { sort.Sort(innerSortUint32(u32)) } +// Sorts a 2D byte slice. +// Each byte slice is not sorted internally; the byte slices are sorted relative +// to one another. +func SortBytes[T ~[]byte](arr []T) { + slices.SortFunc(arr, func(i, j T) bool { + return bytes.Compare(i, j) == -1 + }) +} -// IsSortedAndUniqueUint32 returns true if the array of uint32s are sorted and unique -func IsSortedAndUniqueUint32(arr []uint32) bool { - for i := 0; i < len(arr)-1; i++ { - if arr[i] >= arr[i+1] { +// Returns true iff the elements in [s] are unique and sorted. +func IsSortedAndUniqueSortable[T Sortable[T]](s []T) bool { + for i := 0; i < len(s)-1; i++ { + if !s[i].Less(s[i+1]) { return false } } return true } -type innerSortUint64 []uint64 - -func (su64 innerSortUint64) Less(i, j int) bool { return su64[i] < su64[j] } -func (su64 innerSortUint64) Len() int { return len(su64) } -func (su64 innerSortUint64) Swap(i, j int) { su64[j], su64[i] = su64[i], su64[j] } - -// SortUint64 sorts an uint64 array -func SortUint64(u64 []uint64) { sort.Sort(innerSortUint64(u64)) } - -// IsSortedAndUniqueUint64 returns true if the array of uint64s are sorted and unique -func IsSortedAndUniqueUint64(u64 []uint64) bool { return IsSortedAndUnique(innerSortUint64(u64)) } - -type innerSortBytes [][]byte - -func (arr innerSortBytes) Less(i, j int) bool { - return bytes.Compare(arr[i], arr[j]) == -1 +// Returns true iff the elements in [s] are unique and sorted. +func IsSortedAndUniqueOrdered[T constraints.Ordered](s []T) bool { + for i := 0; i < len(s)-1; i++ { + if s[i] >= s[i+1] { + return false + } + } + return true } -func (arr innerSortBytes) Len() int { return len(arr) } -func (arr innerSortBytes) Swap(i, j int) { arr[j], arr[i] = arr[i], arr[j] } +// Returns true iff the elements in [s] are unique and sorted +// based by their hashes. +func IsSortedAndUniqueByHash[T ~[]byte](s []T) bool { + if len(s) <= 1 { + return true + } + rightHash := hashing.ComputeHash256(s[0]) + for i := 1; i < len(s); i++ { + leftHash := rightHash + rightHash = hashing.ComputeHash256(s[i]) + if bytes.Compare(leftHash, rightHash) != -1 { + return false + } + } + return true +} -// Sort2DBytes sorts a 2D byte array -// Each byte array is not sorted internally; the byte arrays are sorted relative to another. -func Sort2DBytes(arr [][]byte) { sort.Sort(innerSortBytes(arr)) } +// Returns true iff the elements in [s] are unique. +func IsUnique[T comparable](elts []T) bool { + // Can't use set.Set because it'd be a circular import. + asMap := make(map[T]struct{}, len(elts)) + for _, elt := range elts { + if _, ok := asMap[elt]; ok { + return false + } + asMap[elt] = struct{}{} + } + return true +} -// IsSorted2DBytes returns true iff [arr] is sorted -func IsSorted2DBytes(arr [][]byte) bool { return sort.IsSorted(innerSortBytes(arr)) } +// IsSortedAndUnique returns true if the elements in the data are unique and +// sorted. +// +// Deprecated: Use one of the other [IsSortedAndUnique...] functions instead. +func IsSortedAndUnique(data sort.Interface) bool { + for i := 0; i < data.Len()-1; i++ { + if !data.Less(i, i+1) { + return false + } + } + return true +} diff --git a/avalanchego/utils/sorting_test.go b/avalanchego/utils/sorting_test.go index 85943ae2..9de3dffd 100644 --- a/avalanchego/utils/sorting_test.go +++ b/avalanchego/utils/sorting_test.go @@ -1,120 +1,137 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utils import ( - "math/rand" "testing" + + "github.com/stretchr/testify/require" ) -func TestSort2dByteArray(t *testing.T) { - numSubArrs := 20 - maxLength := 100 - - // Create a random 2D array - arr := [][]byte{} - for i := 0; i < numSubArrs; i++ { - subArrLen := rand.Intn(maxLength) // #nosec G404 - subArr := make([]byte, subArrLen) - _, err := rand.Read(subArr) // #nosec G404 - if err != nil { - t.Fatal(err) - } - arr = append(arr, subArr) - } +var _ Sortable[sortable] = sortable(0) - // In the unlikely event the random array is sorted, unsort it - if IsSorted2DBytes(arr) { - arr[0], arr[len(arr)-1] = arr[len(arr)-1], arr[0] - } - Sort2DBytes(arr) // sort it - if !IsSorted2DBytes(arr) { - t.Fatal("should be sorted") - } +type sortable int + +func (s sortable) Less(other sortable) bool { + return s < other } -func TestSortUint32Array(t *testing.T) { - tests := []struct { - name string - arr []uint32 - isSorted bool - }{ - { - name: "nil", - arr: nil, - isSorted: true, - }, - { - name: "[]", - arr: []uint32{}, - isSorted: true, - }, - { - name: "[0]", - arr: []uint32{0}, - isSorted: true, - }, - { - name: "[0,0]", - arr: []uint32{0, 0}, - isSorted: false, - }, - { - name: "[0,1]", - arr: []uint32{0, 1}, - isSorted: true, - }, - { - name: "[1,0]", - arr: []uint32{1, 0}, - isSorted: false, - }, - { - name: "[0,1,2]", - arr: []uint32{0, 1, 2}, - isSorted: true, - }, - { - name: "[0,0,1]", - arr: []uint32{0, 0, 1}, - isSorted: false, - }, - { - name: "[0,1,1]", - arr: []uint32{0, 1, 1}, - isSorted: false, - }, - { - name: "[2,1,2]", - arr: []uint32{2, 1, 2}, - isSorted: false, - }, - { - name: "[2,1,3]", - arr: []uint32{2, 1, 3}, - isSorted: false, - }, - { - name: "[0,10,20]", - arr: []uint32{0, 10, 20}, - isSorted: true, - }, - { - name: "[10,20,25]", - arr: []uint32{10, 20, 25}, - isSorted: true, - }, - } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - if test.isSorted { - if !IsSortedAndUniqueUint32(test.arr) { - t.Fatal("should have been marked as sorted and unique") - } - } else if IsSortedAndUniqueUint32(test.arr) { - t.Fatal("shouldn't have been marked as sorted and unique") - } - }) +func TestSortSliceSortable(t *testing.T) { + require := require.New(t) + + var s []sortable + Sort(s) + require.True(IsSortedAndUniqueSortable(s)) + require.Equal(0, len(s)) + + s = []sortable{1} + Sort(s) + require.True(IsSortedAndUniqueSortable(s)) + require.Equal([]sortable{1}, s) + + s = []sortable{1, 1} + Sort(s) + require.Equal([]sortable{1, 1}, s) + + s = []sortable{1, 2} + Sort(s) + require.True(IsSortedAndUniqueSortable(s)) + require.Equal([]sortable{1, 2}, s) + + s = []sortable{2, 1} + Sort(s) + require.True(IsSortedAndUniqueSortable(s)) + require.Equal([]sortable{1, 2}, s) + + s = []sortable{1, 2, 1} + Sort(s) + require.Equal([]sortable{1, 1, 2}, s) + + s = []sortable{2, 1, 2} + Sort(s) + require.Equal([]sortable{1, 2, 2}, s) + + s = []sortable{3, 1, 2} + Sort(s) + require.Equal([]sortable{1, 2, 3}, s) +} + +func TestIsSortedAndUniqueSortable(t *testing.T) { + require := require.New(t) + + var s []sortable + require.True(IsSortedAndUniqueSortable(s)) + + s = []sortable{} + require.True(IsSortedAndUniqueSortable(s)) + + s = []sortable{1} + require.True(IsSortedAndUniqueSortable(s)) + + s = []sortable{1, 2} + require.True(IsSortedAndUniqueSortable(s)) + + s = []sortable{1, 1} + require.False(IsSortedAndUniqueSortable(s)) + + s = []sortable{2, 1} + require.False(IsSortedAndUniqueSortable(s)) + + s = []sortable{1, 2, 1} + require.False(IsSortedAndUniqueSortable(s)) + + s = []sortable{1, 2, 0} + require.False(IsSortedAndUniqueSortable(s)) +} + +func TestIsUnique(t *testing.T) { + require := require.New(t) + + var s []int + require.True(IsUnique(s)) + + s = []int{} + require.True(IsUnique(s)) + + s = []int{1} + require.True(IsUnique(s)) + + s = []int{1, 2} + require.True(IsUnique(s)) + + s = []int{1, 1} + require.False(IsUnique(s)) + + s = []int{2, 1} + require.True(IsUnique(s)) + + s = []int{1, 2, 1} + require.False(IsUnique(s)) +} + +func TestSortByHash(t *testing.T) { + require := require.New(t) + + s := [][]byte{} + SortByHash(s) + require.Len(s, 0) + + s = [][]byte{{1}} + SortByHash(s) + require.Len(s, 1) + require.Equal([]byte{1}, s[0]) + + s = [][]byte{{1}, {2}} + SortByHash(s) + require.Len(s, 2) + require.Equal([]byte{1}, s[0]) + require.Equal([]byte{2}, s[1]) + + for i := byte(0); i < 100; i++ { + s = [][]byte{{i}, {i + 1}, {i + 2}} + SortByHash(s) + require.Len(s, 3) + require.True(IsSortedAndUniqueByHash(s)) } } diff --git a/avalanchego/utils/stacktrace.go b/avalanchego/utils/stacktrace.go index 37a570df..d68ee4ea 100644 --- a/avalanchego/utils/stacktrace.go +++ b/avalanchego/utils/stacktrace.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utils diff --git a/avalanchego/utils/storage/storage_common.go b/avalanchego/utils/storage/storage_common.go index 7979ebfa..cf1fbd3b 100644 --- a/avalanchego/utils/storage/storage_common.go +++ b/avalanchego/utils/storage/storage_common.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package storage diff --git a/avalanchego/utils/storage/storage_unix.go b/avalanchego/utils/storage/storage_unix.go index 97a24de3..a5995b1d 100644 --- a/avalanchego/utils/storage/storage_unix.go +++ b/avalanchego/utils/storage/storage_unix.go @@ -1,7 +1,7 @@ //go:build !windows // +build !windows -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package storage diff --git a/avalanchego/utils/storage/storage_windows.go b/avalanchego/utils/storage/storage_windows.go index ab70fd96..7879fd65 100644 --- a/avalanchego/utils/storage/storage_windows.go +++ b/avalanchego/utils/storage/storage_windows.go @@ -1,7 +1,7 @@ //go:build windows // +build windows -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package storage diff --git a/avalanchego/utils/subprocess/linux_new.go b/avalanchego/utils/subprocess/linux_new.go deleted file mode 100644 index 6b54386b..00000000 --- a/avalanchego/utils/subprocess/linux_new.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build linux -// +build linux - -// ^ syscall.SysProcAttr only has field Pdeathsig on Linux - -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package subprocess - -import ( - "os/exec" - "syscall" -) - -func New(path string, args ...string) *exec.Cmd { - cmd := exec.Command(path, args...) - cmd.SysProcAttr = &syscall.SysProcAttr{Pdeathsig: syscall.SIGTERM} - return cmd -} diff --git a/avalanchego/utils/subprocess/non_linux_new.go b/avalanchego/utils/subprocess/non_linux_new.go deleted file mode 100644 index fabc137c..00000000 --- a/avalanchego/utils/subprocess/non_linux_new.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build !linux -// +build !linux - -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package subprocess - -import "os/exec" - -func New(path string, args ...string) *exec.Cmd { - return exec.Command(path, args...) -} diff --git a/avalanchego/utils/timer/adaptive_timeout_manager.go b/avalanchego/utils/timer/adaptive_timeout_manager.go index 45567659..8bfab057 100644 --- a/avalanchego/utils/timer/adaptive_timeout_manager.go +++ b/avalanchego/utils/timer/adaptive_timeout_manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timer @@ -13,7 +13,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/message" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/wrappers" @@ -22,23 +21,29 @@ import ( var ( errNonPositiveHalflife = errors.New("timeout halflife must be positive") - _ heap.Interface = &timeoutQueue{} - _ AdaptiveTimeoutManager = &adaptiveTimeoutManager{} + _ heap.Interface = (*timeoutQueue)(nil) + _ AdaptiveTimeoutManager = (*adaptiveTimeoutManager)(nil) ) type adaptiveTimeout struct { - index int // Index in the wait queue - id ids.ID // Unique ID of this timeout - handler func() // Function to execute if timed out - duration time.Duration // How long this timeout was set for - deadline time.Time // When this timeout should be fired - op message.Op // Type of this outstanding request + index int // Index in the wait queue + id ids.RequestID // Unique ID of this timeout + handler func() // Function to execute if timed out + duration time.Duration // How long this timeout was set for + deadline time.Time // When this timeout should be fired + measureLatency bool // Whether this request should impact latency } type timeoutQueue []*adaptiveTimeout -func (tq timeoutQueue) Len() int { return len(tq) } -func (tq timeoutQueue) Less(i, j int) bool { return tq[i].deadline.Before(tq[j].deadline) } +func (tq timeoutQueue) Len() int { + return len(tq) +} + +func (tq timeoutQueue) Less(i, j int) bool { + return tq[i].deadline.Before(tq[j].deadline) +} + func (tq timeoutQueue) Swap(i, j int) { tq[i], tq[j] = tq[j], tq[i] tq[i].index = i @@ -87,10 +92,10 @@ type AdaptiveTimeoutManager interface { TimeoutDuration() time.Duration // Registers a timeout for the item with the given [id]. // If the timeout occurs before the item is Removed, [timeoutHandler] is called. - Put(id ids.ID, op message.Op, timeoutHandler func()) + Put(id ids.RequestID, measureLatency bool, timeoutHandler func()) // Remove the timeout associated with [id]. // Its timeout handler will not be called. - Remove(id ids.ID) + Remove(id ids.RequestID) // ObserveLatency manually registers a response latency. // We use this to pretend that it a query to a benched validator // timed out when actually, we never even sent them a request. @@ -112,7 +117,7 @@ type adaptiveTimeoutManager struct { minimumTimeout time.Duration maximumTimeout time.Duration currentTimeout time.Duration // Amount of time before a timeout - timeoutMap map[ids.ID]*adaptiveTimeout + timeoutMap map[ids.RequestID]*adaptiveTimeout timeoutQueue timeoutQueue timer *Timer // Timer that will fire to clear the timeouts } @@ -158,7 +163,7 @@ func NewAdaptiveTimeoutManager( maximumTimeout: config.MaximumTimeout, currentTimeout: config.InitialTimeout, timeoutCoefficient: config.TimeoutCoefficient, - timeoutMap: make(map[ids.ID]*adaptiveTimeout), + timeoutMap: make(map[ids.RequestID]*adaptiveTimeout), } tm.timer = NewTimer(tm.timeout) tm.averager = math.NewAverager(float64(config.InitialTimeout), config.TimeoutHalflife, tm.clock.Time()) @@ -180,28 +185,32 @@ func (tm *adaptiveTimeoutManager) TimeoutDuration() time.Duration { return tm.currentTimeout } -func (tm *adaptiveTimeoutManager) Dispatch() { tm.timer.Dispatch() } +func (tm *adaptiveTimeoutManager) Dispatch() { + tm.timer.Dispatch() +} -func (tm *adaptiveTimeoutManager) Stop() { tm.timer.Stop() } +func (tm *adaptiveTimeoutManager) Stop() { + tm.timer.Stop() +} -func (tm *adaptiveTimeoutManager) Put(id ids.ID, op message.Op, timeoutHandler func()) { +func (tm *adaptiveTimeoutManager) Put(id ids.RequestID, measureLatency bool, timeoutHandler func()) { tm.lock.Lock() defer tm.lock.Unlock() - tm.put(id, op, timeoutHandler) + tm.put(id, measureLatency, timeoutHandler) } // Assumes [tm.lock] is held -func (tm *adaptiveTimeoutManager) put(id ids.ID, op message.Op, handler func()) { +func (tm *adaptiveTimeoutManager) put(id ids.RequestID, measureLatency bool, handler func()) { now := tm.clock.Time() tm.remove(id, now) timeout := &adaptiveTimeout{ - id: id, - handler: handler, - duration: tm.currentTimeout, - deadline: now.Add(tm.currentTimeout), - op: op, + id: id, + handler: handler, + duration: tm.currentTimeout, + deadline: now.Add(tm.currentTimeout), + measureLatency: measureLatency, } tm.timeoutMap[id] = timeout tm.numPendingTimeouts.Set(float64(len(tm.timeoutMap))) @@ -210,7 +219,7 @@ func (tm *adaptiveTimeoutManager) put(id ids.ID, op message.Op, handler func()) tm.setNextTimeoutTime() } -func (tm *adaptiveTimeoutManager) Remove(id ids.ID) { +func (tm *adaptiveTimeoutManager) Remove(id ids.RequestID) { tm.lock.Lock() defer tm.lock.Unlock() @@ -218,17 +227,14 @@ func (tm *adaptiveTimeoutManager) Remove(id ids.ID) { } // Assumes [tm.lock] is held -func (tm *adaptiveTimeoutManager) remove(id ids.ID, now time.Time) { +func (tm *adaptiveTimeoutManager) remove(id ids.RequestID, now time.Time) { timeout, exists := tm.timeoutMap[id] if !exists { return } // Observe the response time to update average network response time. - // Don't include Get requests in calculation, since an adversary - // can cause you to issue a Get request and then cause it to timeout, - // increasing your timeout. - if timeout.op != message.Get { + if timeout.measureLatency { timeoutRegisteredAt := timeout.deadline.Add(-1 * timeout.duration) latency := now.Sub(timeoutRegisteredAt) tm.observeLatencyAndUpdateTimeout(latency, now) diff --git a/avalanchego/utils/timer/adaptive_timeout_manager_test.go b/avalanchego/utils/timer/adaptive_timeout_manager_test.go index 3c8d2a80..72f1e0c2 100644 --- a/avalanchego/utils/timer/adaptive_timeout_manager_test.go +++ b/avalanchego/utils/timer/adaptive_timeout_manager_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timer @@ -13,7 +13,6 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/message" ) // Test that Initialize works @@ -125,14 +124,14 @@ func TestAdaptiveTimeoutManager(t *testing.T) { numSuccessful-- if numSuccessful > 0 { - tm.Put(ids.ID{byte(numSuccessful)}, message.PullQuery, *callback) + tm.Put(ids.RequestID{Op: byte(numSuccessful)}, true, *callback) } if numSuccessful >= 0 { wg.Done() } if numSuccessful%2 == 0 { - tm.Remove(ids.ID{byte(numSuccessful)}) - tm.Put(ids.ID{byte(numSuccessful)}, message.PullQuery, *callback) + tm.Remove(ids.RequestID{Op: byte(numSuccessful)}) + tm.Put(ids.RequestID{Op: byte(numSuccessful)}, true, *callback) } } (*callback)() diff --git a/avalanchego/utils/timer/eta.go b/avalanchego/utils/timer/eta.go index d45949a6..fe3b6add 100644 --- a/avalanchego/utils/timer/eta.go +++ b/avalanchego/utils/timer/eta.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timer diff --git a/avalanchego/utils/timer/meter.go b/avalanchego/utils/timer/meter.go index 4462fe58..c78376e1 100644 --- a/avalanchego/utils/timer/meter.go +++ b/avalanchego/utils/timer/meter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timer diff --git a/avalanchego/utils/timer/mockable/clock.go b/avalanchego/utils/timer/mockable/clock.go index 4c7cd138..c331ff78 100644 --- a/avalanchego/utils/timer/mockable/clock.go +++ b/avalanchego/utils/timer/mockable/clock.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package mockable diff --git a/avalanchego/utils/timer/mockable/clock_test.go b/avalanchego/utils/timer/mockable/clock_test.go index a4ff8f75..b43da19c 100644 --- a/avalanchego/utils/timer/mockable/clock_test.go +++ b/avalanchego/utils/timer/mockable/clock_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package mockable diff --git a/avalanchego/utils/timer/staged_timer.go b/avalanchego/utils/timer/staged_timer.go index 5a05e670..eec885ee 100644 --- a/avalanchego/utils/timer/staged_timer.go +++ b/avalanchego/utils/timer/staged_timer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timer diff --git a/avalanchego/utils/timer/staged_timer_test.go b/avalanchego/utils/timer/staged_timer_test.go index 98b0160f..bd83ef20 100644 --- a/avalanchego/utils/timer/staged_timer_test.go +++ b/avalanchego/utils/timer/staged_timer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timer diff --git a/avalanchego/utils/timer/timeout_manager.go b/avalanchego/utils/timer/timeout_manager.go deleted file mode 100644 index 4f60932d..00000000 --- a/avalanchego/utils/timer/timeout_manager.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package timer - -import ( - "container/list" - "sync" - "time" - - "github.com/ava-labs/avalanchego/ids" -) - -type timeout struct { - id ids.ID - handler func() - timer time.Time -} - -// TimeoutManager is a manager for timeouts. -type TimeoutManager struct { - lock sync.Mutex - duration time.Duration // Amount of time before a timeout - timeoutMap map[ids.ID]*list.Element - timeoutList *list.List - timer *Timer // Timer that will fire to clear the timeouts -} - -// Initialize is a constructor b/c Golang, in its wisdom, doesn't ... have them? -func (tm *TimeoutManager) Initialize(duration time.Duration) { - tm.duration = duration - tm.timeoutMap = make(map[ids.ID]*list.Element) - tm.timeoutList = list.New() - tm.timer = NewTimer(tm.Timeout) -} - -func (tm *TimeoutManager) Dispatch() { tm.timer.Dispatch() } - -// Stop executing timeouts -func (tm *TimeoutManager) Stop() { tm.timer.Stop() } - -// Put puts hash into the hash map -func (tm *TimeoutManager) Put(id ids.ID, handler func()) { - tm.lock.Lock() - defer tm.lock.Unlock() - - tm.put(id, handler) -} - -// Remove the item that no longer needs to be there. -func (tm *TimeoutManager) Remove(id ids.ID) { - tm.lock.Lock() - defer tm.lock.Unlock() - - tm.remove(id) -} - -// Timeout registers a timeout -func (tm *TimeoutManager) Timeout() { - tm.lock.Lock() - defer tm.lock.Unlock() - - tm.timeout() -} - -func (tm *TimeoutManager) timeout() { - timeBound := time.Now().Add(-tm.duration) - // removeExpiredHead returns false once there is nothing left to remove - for { - timeout := tm.removeExpiredHead(timeBound) - if timeout == nil { - break - } - - // Don't execute a callback with a lock held - tm.lock.Unlock() - timeout() - tm.lock.Lock() - } - tm.registerTimeout() -} - -func (tm *TimeoutManager) put(id ids.ID, handler func()) { - tm.remove(id) - - tm.timeoutMap[id] = tm.timeoutList.PushBack(timeout{ - id: id, - handler: handler, - timer: time.Now(), - }) - - if tm.timeoutList.Len() == 1 { - tm.registerTimeout() - } -} - -func (tm *TimeoutManager) remove(id ids.ID) { - e, exists := tm.timeoutMap[id] - if !exists { - return - } - delete(tm.timeoutMap, id) - tm.timeoutList.Remove(e) -} - -// Returns true if the head was removed, false otherwise -func (tm *TimeoutManager) removeExpiredHead(t time.Time) func() { - if tm.timeoutList.Len() == 0 { - return nil - } - - e := tm.timeoutList.Front() - head := e.Value.(timeout) - - headTime := head.timer - if headTime.Before(t) { - tm.remove(head.id) - return head.handler - } - return nil -} - -func (tm *TimeoutManager) registerTimeout() { - if tm.timeoutList.Len() == 0 { - // There are no pending timeouts - tm.timer.Cancel() - return - } - - e := tm.timeoutList.Front() - head := e.Value.(timeout) - - timeBound := time.Now().Add(-tm.duration) - headTime := head.timer - duration := headTime.Sub(timeBound) - - tm.timer.SetTimeoutIn(duration) -} diff --git a/avalanchego/utils/timer/timeout_manager_test.go b/avalanchego/utils/timer/timeout_manager_test.go deleted file mode 100644 index 5a11c917..00000000 --- a/avalanchego/utils/timer/timeout_manager_test.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package timer - -import ( - "sync" - "testing" - "time" - - "github.com/ava-labs/avalanchego/ids" -) - -func TestTimeoutManager(t *testing.T) { - wg := sync.WaitGroup{} - wg.Add(2) - defer wg.Wait() - - tm := TimeoutManager{} - tm.Initialize(time.Millisecond) - go tm.Dispatch() - - tm.Put(ids.ID{}, wg.Done) - tm.Put(ids.ID{1}, wg.Done) -} diff --git a/avalanchego/utils/timer/timer.go b/avalanchego/utils/timer/timer.go index 546980cc..1b5914fe 100644 --- a/avalanchego/utils/timer/timer.go +++ b/avalanchego/utils/timer/timer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timer diff --git a/avalanchego/utils/timer/timer_test.go b/avalanchego/utils/timer/timer_test.go index d3e049e2..228b19f2 100644 --- a/avalanchego/utils/timer/timer_test.go +++ b/avalanchego/utils/timer/timer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package timer @@ -9,7 +9,7 @@ import ( "time" ) -func TestTimer(t *testing.T) { +func TestTimer(*testing.T) { wg := sync.WaitGroup{} wg.Add(1) defer wg.Wait() diff --git a/avalanchego/utils/ulimit/ulimit_bsd.go b/avalanchego/utils/ulimit/ulimit_bsd.go index c30bfdfb..191b7882 100644 --- a/avalanchego/utils/ulimit/ulimit_bsd.go +++ b/avalanchego/utils/ulimit/ulimit_bsd.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. //go:build freebsd diff --git a/avalanchego/utils/ulimit/ulimit_darwin.go b/avalanchego/utils/ulimit/ulimit_darwin.go index 77f2594a..9eaab72b 100644 --- a/avalanchego/utils/ulimit/ulimit_darwin.go +++ b/avalanchego/utils/ulimit/ulimit_darwin.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. //go:build darwin diff --git a/avalanchego/utils/ulimit/ulimit_test.go b/avalanchego/utils/ulimit/ulimit_test.go deleted file mode 100644 index 0ffe3aa1..00000000 --- a/avalanchego/utils/ulimit/ulimit_test.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package ulimit - -import ( - "testing" - - "github.com/ava-labs/avalanchego/utils/logging" -) - -// Test_SetDefault performs sanity checks for the os default. -func Test_SetDefault(t *testing.T) { - err := Set(DefaultFDLimit, logging.NoLog{}) - if err != nil { - t.Skipf("default fd-limit failed %v", err) - } -} diff --git a/avalanchego/utils/ulimit/ulimit_unix.go b/avalanchego/utils/ulimit/ulimit_unix.go index 12d85e11..898b361c 100644 --- a/avalanchego/utils/ulimit/ulimit_unix.go +++ b/avalanchego/utils/ulimit/ulimit_unix.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. //go:build linux || netbsd || openbsd diff --git a/avalanchego/utils/ulimit/ulimit_windows.go b/avalanchego/utils/ulimit/ulimit_windows.go index 693682af..7646d6f1 100644 --- a/avalanchego/utils/ulimit/ulimit_windows.go +++ b/avalanchego/utils/ulimit/ulimit_windows.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. //go:build windows diff --git a/avalanchego/utils/units/avax.go b/avalanchego/utils/units/avax.go index c9eecc7e..341fd8be 100644 --- a/avalanchego/utils/units/avax.go +++ b/avalanchego/utils/units/avax.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package units diff --git a/avalanchego/utils/units/bytes.go b/avalanchego/utils/units/bytes.go index 3b2fa5e0..93678e95 100644 --- a/avalanchego/utils/units/bytes.go +++ b/avalanchego/utils/units/bytes.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package units diff --git a/avalanchego/utils/window/window.go b/avalanchego/utils/window/window.go index dc9f03d5..da14e060 100644 --- a/avalanchego/utils/window/window.go +++ b/avalanchego/utils/window/window.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package window @@ -12,7 +12,7 @@ import ( "github.com/ava-labs/avalanchego/utils/timer/mockable" ) -var _ Window[struct{}] = &window[struct{}]{} +var _ Window[struct{}] = (*window[struct{}])(nil) // Window is an interface which represents a sliding window of elements. type Window[T any] interface { @@ -32,7 +32,7 @@ type window[T any] struct { // mutex for synchronization lock sync.Mutex // elements in the window - elements buffer.UnboundedQueue[node[T]] + elements buffer.Deque[node[T]] } // Config exposes parameters for Window @@ -48,7 +48,7 @@ func New[T any](config Config) Window[T] { clock: config.Clock, ttl: config.TTL, maxSize: config.MaxSize, - elements: buffer.NewUnboundedSliceQueue[node[T]](config.MaxSize + 1), + elements: buffer.NewUnboundedDeque[node[T]](config.MaxSize + 1), } } @@ -60,11 +60,11 @@ func (w *window[T]) Add(value T) { w.removeStaleNodes() if w.elements.Len() >= w.maxSize { - _, _ = w.elements.Dequeue() + _, _ = w.elements.PopLeft() } // add the new block id - w.elements.Enqueue(node[T]{ + w.elements.PushRight(node[T]{ value: value, entryTime: w.clock.Time(), }) @@ -77,7 +77,7 @@ func (w *window[T]) Oldest() (T, bool) { w.removeStaleNodes() - oldest, ok := w.elements.PeekHead() + oldest, ok := w.elements.PeekLeft() if !ok { return utils.Zero[T](), false } @@ -99,11 +99,11 @@ func (w *window[T]) removeStaleNodes() { // window. Nodes are guaranteed to be strictly increasing in entry time, // so we can break this loop once we find the first non-stale one. for { - oldest, ok := w.elements.PeekHead() + oldest, ok := w.elements.PeekLeft() if !ok || w.clock.Time().Sub(oldest.entryTime) <= w.ttl { return } - _, _ = w.elements.Dequeue() + _, _ = w.elements.PopLeft() } } diff --git a/avalanchego/utils/window/window_test.go b/avalanchego/utils/window/window_test.go index 0c3acdbc..8ca71567 100644 --- a/avalanchego/utils/window/window_test.go +++ b/avalanchego/utils/window/window_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package window diff --git a/avalanchego/utils/wrappers/closers.go b/avalanchego/utils/wrappers/closers.go index 834c64e2..d366e928 100644 --- a/avalanchego/utils/wrappers/closers.go +++ b/avalanchego/utils/wrappers/closers.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package wrappers @@ -24,7 +24,7 @@ func (c *Closer) Add(closer io.Closer) { } // Close closes each of the closers add to [c] and returns the first error -// that occurs or nil if no error occurs. +// that occurs or nil if no error occurs. func (c *Closer) Close() error { c.lock.Lock() closers := c.closers diff --git a/avalanchego/utils/wrappers/errors.go b/avalanchego/utils/wrappers/errors.go index 2484f55d..dab20705 100644 --- a/avalanchego/utils/wrappers/errors.go +++ b/avalanchego/utils/wrappers/errors.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package wrappers @@ -7,11 +7,13 @@ import ( "strings" ) -var _ error = &aggregate{} +var _ error = (*aggregate)(nil) type Errs struct{ Err error } -func (errs *Errs) Errored() bool { return errs.Err != nil } +func (errs *Errs) Errored() bool { + return errs.Err != nil +} func (errs *Errs) Add(errors ...error) { if errs.Err == nil { diff --git a/avalanchego/utils/wrappers/packing.go b/avalanchego/utils/wrappers/packing.go index 081b9231..e6869b1b 100644 --- a/avalanchego/utils/wrappers/packing.go +++ b/avalanchego/utils/wrappers/packing.go @@ -1,16 +1,12 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package wrappers import ( - "crypto/x509" "encoding/binary" "errors" "math" - - "github.com/ava-labs/avalanchego/utils/hashing" - "github.com/ava-labs/avalanchego/utils/ips" ) const ( @@ -30,12 +26,18 @@ const ( IPLen = 16 + ShortLen ) +func StringLen(str string) int { + // note: there is a max length for string ([MaxStringLen]) + // we defer to PackString checking whether str is within limits + return ShortLen + len(str) +} + var ( errBadLength = errors.New("packer has insufficient length for input") errNegativeOffset = errors.New("negative offset") errInvalidInput = errors.New("input does not match expected format") - errBadType = errors.New("wrong type passed") errBadBool = errors.New("unexpected value when unpacking bool") + errOversized = errors.New("size is larger than limit") ) // Packer packs and unpacks a byte array from/to standard values @@ -50,42 +52,9 @@ type Packer struct { Offset int } -// CheckSpace requires that there is at least [bytes] of write space left in the -// byte array. If this is not true, an error is added to the packer -func (p *Packer) CheckSpace(bytes int) { - switch { - case p.Offset < 0: - p.Add(errNegativeOffset) - case bytes < 0: - p.Add(errInvalidInput) - case len(p.Bytes)-p.Offset < bytes: - p.Add(errBadLength) - } -} - -// Expand ensures that there is [bytes] bytes left of space in the byte slice. -// If this is not allowed due to the maximum size, an error is added to the packer -// In order to understand this code, its important to understand the difference -// between a slice's length and its capacity. -func (p *Packer) Expand(bytes int) { - neededSize := bytes + p.Offset // Need byte slice's length to be at least [neededSize] - switch { - case neededSize <= len(p.Bytes): // Byte slice has sufficient length already - return - case neededSize > p.MaxSize: // Lengthening the byte slice would cause it to grow too large - p.Err = errBadLength - return - case neededSize <= cap(p.Bytes): // Byte slice has sufficient capacity to lengthen it without mem alloc - p.Bytes = p.Bytes[:neededSize] - return - default: // Add capacity/length to byte slice - p.Bytes = append(p.Bytes[:cap(p.Bytes)], make([]byte, neededSize-cap(p.Bytes))...) - } -} - // PackByte append a byte to the byte array func (p *Packer) PackByte(val byte) { - p.Expand(ByteLen) + p.expand(ByteLen) if p.Errored() { return } @@ -96,19 +65,19 @@ func (p *Packer) PackByte(val byte) { // UnpackByte unpack a byte from the byte array func (p *Packer) UnpackByte() byte { - p.CheckSpace(ByteLen) + p.checkSpace(ByteLen) if p.Errored() { return 0 } val := p.Bytes[p.Offset] - p.Offset++ + p.Offset += ByteLen return val } // PackShort append a short to the byte array func (p *Packer) PackShort(val uint16) { - p.Expand(ShortLen) + p.expand(ShortLen) if p.Errored() { return } @@ -119,7 +88,7 @@ func (p *Packer) PackShort(val uint16) { // UnpackShort unpack a short from the byte array func (p *Packer) UnpackShort() uint16 { - p.CheckSpace(ShortLen) + p.checkSpace(ShortLen) if p.Errored() { return 0 } @@ -131,7 +100,7 @@ func (p *Packer) UnpackShort() uint16 { // PackInt append an int to the byte array func (p *Packer) PackInt(val uint32) { - p.Expand(IntLen) + p.expand(IntLen) if p.Errored() { return } @@ -142,7 +111,7 @@ func (p *Packer) PackInt(val uint32) { // UnpackInt unpack an int from the byte array func (p *Packer) UnpackInt() uint32 { - p.CheckSpace(IntLen) + p.checkSpace(IntLen) if p.Errored() { return 0 } @@ -154,7 +123,7 @@ func (p *Packer) UnpackInt() uint32 { // PackLong append a long to the byte array func (p *Packer) PackLong(val uint64) { - p.Expand(LongLen) + p.expand(LongLen) if p.Errored() { return } @@ -165,7 +134,7 @@ func (p *Packer) PackLong(val uint64) { // UnpackLong unpack a long from the byte array func (p *Packer) UnpackLong() uint64 { - p.CheckSpace(LongLen) + p.checkSpace(LongLen) if p.Errored() { return 0 } @@ -201,7 +170,7 @@ func (p *Packer) UnpackBool() bool { // PackFixedBytes append a byte slice, with no length descriptor to the byte // array func (p *Packer) PackFixedBytes(bytes []byte) { - p.Expand(len(bytes)) + p.expand(len(bytes)) if p.Errored() { return } @@ -213,7 +182,7 @@ func (p *Packer) PackFixedBytes(bytes []byte) { // UnpackFixedBytes unpack a byte slice, with no length descriptor from the byte // array func (p *Packer) UnpackFixedBytes(size int) []byte { - p.CheckSpace(size) + p.checkSpace(size) if p.Errored() { return nil } @@ -235,42 +204,15 @@ func (p *Packer) UnpackBytes() []byte { return p.UnpackFixedBytes(int(size)) } -// PackFixedByteSlices append a byte slice slice to the byte array -func (p *Packer) PackFixedByteSlices(byteSlices [][]byte) { - p.PackInt(uint32(len(byteSlices))) - for _, bytes := range byteSlices { - p.PackFixedBytes(bytes) - } -} - -// UnpackFixedByteSlices returns a byte slice slice from the byte array. -// Each byte slice has the specified size. The number of byte slices is -// read from the byte array. -func (p *Packer) UnpackFixedByteSlices(size int) [][]byte { - sliceSize := p.UnpackInt() - bytes := [][]byte(nil) - for i := uint32(0); i < sliceSize && !p.Errored(); i++ { - bytes = append(bytes, p.UnpackFixedBytes(size)) - } - return bytes -} - -// Pack2DByteSlice append a 2D byte slice to the byte array -func (p *Packer) Pack2DByteSlice(byteSlices [][]byte) { - p.PackInt(uint32(len(byteSlices))) - for _, bytes := range byteSlices { - p.PackBytes(bytes) - } -} - -// Unpack2DByteSlice returns a 2D byte slice from the byte array. -func (p *Packer) Unpack2DByteSlice() [][]byte { - sliceSize := p.UnpackInt() - bytes := [][]byte(nil) - for i := uint32(0); i < sliceSize && !p.Errored(); i++ { - bytes = append(bytes, p.UnpackBytes()) +// UnpackLimitedBytes unpacks a byte slice. If the size of the slice is greater +// than [limit], adds [errOversized] to the packer and returns nil. +func (p *Packer) UnpackLimitedBytes(limit uint32) []byte { + size := p.UnpackInt() + if size > limit { + p.Add(errOversized) + return nil } - return bytes + return p.UnpackFixedBytes(int(size)) } // PackStr append a string to the byte array @@ -290,233 +232,46 @@ func (p *Packer) UnpackStr() string { return string(p.UnpackFixedBytes(int(strSize))) } -// PackIP packs an ip port pair to the byte array -func (p *Packer) PackIP(ip ips.IPPort) { - p.PackFixedBytes(ip.IP.To16()) - p.PackShort(ip.Port) -} - -// UnpackIP unpacks an ip port pair from the byte array -func (p *Packer) UnpackIP() ips.IPPort { - ip := p.UnpackFixedBytes(16) - port := p.UnpackShort() - return ips.IPPort{ - IP: ip, - Port: port, - } -} - -// PackIPs unpacks an ip port pair slice from the byte array -func (p *Packer) PackIPs(ips []ips.IPPort) { - p.PackInt(uint32(len(ips))) - for i := 0; i < len(ips) && !p.Errored(); i++ { - p.PackIP(ips[i]) - } -} - -// UnpackIPs unpacks an ip port pair slice from the byte array -func (p *Packer) UnpackIPs() []ips.IPPort { - sliceSize := p.UnpackInt() - ips := []ips.IPPort(nil) - for i := uint32(0); i < sliceSize && !p.Errored(); i++ { - ips = append(ips, p.UnpackIP()) - } - return ips -} - -// TryPackByte attempts to pack the value as a byte -func TryPackByte(packer *Packer, valIntf interface{}) { - if val, ok := valIntf.(uint8); ok { - packer.PackByte(val) - } else { - packer.Add(errBadType) - } -} - -// TryUnpackByte attempts to unpack a value as a byte -func TryUnpackByte(packer *Packer) interface{} { - return packer.UnpackByte() -} - -// TryPackInt attempts to pack the value as an int -func TryPackInt(packer *Packer, valIntf interface{}) { - if val, ok := valIntf.(uint32); ok { - packer.PackInt(val) - } else { - packer.Add(errBadType) - } -} - -// TryUnpackInt attempts to unpack a value as an int -func TryUnpackInt(packer *Packer) interface{} { - return packer.UnpackInt() -} - -// TryPackLong attempts to pack the value as a long -func TryPackLong(packer *Packer, valIntf interface{}) { - if val, ok := valIntf.(uint64); ok { - packer.PackLong(val) - } else { - packer.Add(errBadType) - } -} - -// TryUnpackLong attempts to unpack a value as a long -func TryUnpackLong(packer *Packer) interface{} { - return packer.UnpackLong() -} - -// TryPackHash attempts to pack the value as a 32-byte sequence -func TryPackHash(packer *Packer, valIntf interface{}) { - if val, ok := valIntf.([]byte); ok { - packer.PackFixedBytes(val) - } else { - packer.Add(errBadType) - } -} - -// TryUnpackHash attempts to unpack the value as a 32-byte sequence -func TryUnpackHash(packer *Packer) interface{} { - return packer.UnpackFixedBytes(hashing.HashLen) -} - -// TryPackHashes attempts to pack the value as a list of 32-byte sequences -func TryPackHashes(packer *Packer, valIntf interface{}) { - if val, ok := valIntf.([][]byte); ok { - packer.PackFixedByteSlices(val) - } else { - packer.Add(errBadType) - } -} - -// TryUnpackHashes attempts to unpack the value as a list of 32-byte sequences -func TryUnpackHashes(packer *Packer) interface{} { - return packer.UnpackFixedByteSlices(hashing.HashLen) -} - -// TryPackBytes attempts to pack the value as a list of bytes -func TryPackBytes(packer *Packer, valIntf interface{}) { - if val, ok := valIntf.([]byte); ok { - packer.PackBytes(val) - } else { - packer.Add(errBadType) - } -} - -// TryUnpackBytes attempts to unpack the value as a list of bytes -func TryUnpackBytes(packer *Packer) interface{} { - return packer.UnpackBytes() -} - -// TryPack2DBytes attempts to pack the value as a 2D byte slice -func TryPack2DBytes(packer *Packer, valIntf interface{}) { - if val, ok := valIntf.([][]byte); ok { - packer.Pack2DByteSlice(val) - } else { - packer.Add(errBadType) - } -} - -// TryUnpack2DBytes attempts to unpack the value as a 2D byte slice -func TryUnpack2DBytes(packer *Packer) interface{} { - return packer.Unpack2DByteSlice() -} - -// TryPackStr attempts to pack the value as a string -func TryPackStr(packer *Packer, valIntf interface{}) { - if val, ok := valIntf.(string); ok { - packer.PackStr(val) - } else { - packer.Add(errBadType) - } -} - -// TryUnpackStr attempts to unpack the value as a string -func TryUnpackStr(packer *Packer) interface{} { - return packer.UnpackStr() -} - -// TryPackIP attempts to pack the value as an ip port pair -func TryPackIP(packer *Packer, valIntf interface{}) { - if val, ok := valIntf.(ips.IPPort); ok { - packer.PackIP(val) - } else { - packer.Add(errBadType) - } -} - -// TryUnpackIP attempts to unpack the value as an ip port pair -func TryUnpackIP(packer *Packer) interface{} { - return packer.UnpackIP() -} - -func (p *Packer) PackX509Certificate(cert *x509.Certificate) { - p.PackBytes(cert.Raw) -} - -func (p *Packer) UnpackX509Certificate() *x509.Certificate { - b := p.UnpackBytes() - cert, err := x509.ParseCertificate(b) - if err != nil { - p.Add(err) - return nil - } - return cert -} - -func (p *Packer) PackClaimedIPPort(ipCert ips.ClaimedIPPort) { - p.PackX509Certificate(ipCert.Cert) - p.PackIP(ipCert.IPPort) - p.PackLong(ipCert.Timestamp) - p.PackBytes(ipCert.Signature) -} - -func (p *Packer) UnpackClaimedIPPort() ips.ClaimedIPPort { - var ipCert ips.ClaimedIPPort - ipCert.Cert = p.UnpackX509Certificate() - ipCert.IPPort = p.UnpackIP() - ipCert.Timestamp = p.UnpackLong() - ipCert.Signature = p.UnpackBytes() - return ipCert -} - -func TryPackClaimedIPPortList(packer *Packer, valIntf interface{}) { - if ipCertList, ok := valIntf.([]ips.ClaimedIPPort); ok { - packer.PackInt(uint32(len(ipCertList))) - for _, ipc := range ipCertList { - packer.PackClaimedIPPort(ipc) - } - } else { - packer.Add(errBadType) +// UnpackLimitedStr unpacks a string. If the size of the string is greater than +// [limit], adds [errOversized] to the packer and returns the empty string. +func (p *Packer) UnpackLimitedStr(limit uint16) string { + strSize := p.UnpackShort() + if strSize > limit { + p.Add(errOversized) + return "" } + return string(p.UnpackFixedBytes(int(strSize))) } -func TryUnpackClaimedIPPortList(packer *Packer) interface{} { - sliceSize := packer.UnpackInt() - ips := []ips.ClaimedIPPort(nil) - for i := uint32(0); i < sliceSize && !packer.Errored(); i++ { - ips = append(ips, packer.UnpackClaimedIPPort()) +// checkSpace requires that there is at least [bytes] of write space left in the +// byte array. If this is not true, an error is added to the packer +func (p *Packer) checkSpace(bytes int) { + switch { + case p.Offset < 0: + p.Add(errNegativeOffset) + case bytes < 0: + p.Add(errInvalidInput) + case len(p.Bytes)-p.Offset < bytes: + p.Add(errBadLength) } - return ips } -func TryPackUint64Slice(p *Packer, valIntf interface{}) { - longList, ok := valIntf.([]uint64) - if !ok { - p.Add(errBadType) +// expand ensures that there is [bytes] bytes left of space in the byte slice. +// If this is not allowed due to the maximum size, an error is added to the packer +// In order to understand this code, its important to understand the difference +// between a slice's length and its capacity. +func (p *Packer) expand(bytes int) { + neededSize := bytes + p.Offset // Need byte slice's length to be at least [neededSize] + switch { + case neededSize <= len(p.Bytes): // Byte slice has sufficient length already return + case neededSize > p.MaxSize: // Lengthening the byte slice would cause it to grow too large + p.Err = errBadLength + return + case neededSize <= cap(p.Bytes): // Byte slice has sufficient capacity to lengthen it without mem alloc + p.Bytes = p.Bytes[:neededSize] + return + default: // Add capacity/length to byte slice + p.Bytes = append(p.Bytes[:cap(p.Bytes)], make([]byte, neededSize-cap(p.Bytes))...) } - p.PackInt(uint32(len(longList))) - for _, val := range longList { - p.PackLong(val) - } -} - -func TryUnpackUint64Slice(p *Packer) interface{} { - sliceSize := p.UnpackInt() - res := []uint64(nil) - for i := uint32(0); i < sliceSize && !p.Errored(); i++ { - res = append(res, p.UnpackLong()) - } - return res } diff --git a/avalanchego/utils/wrappers/packing_test.go b/avalanchego/utils/wrappers/packing_test.go index 7b45dc0e..1d674020 100644 --- a/avalanchego/utils/wrappers/packing_test.go +++ b/avalanchego/utils/wrappers/packing_test.go @@ -1,651 +1,364 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package wrappers import ( - "bytes" - "net" - "reflect" "testing" "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/staking" - "github.com/ava-labs/avalanchego/utils/ips" ) const ( - ByteSentinal = 0 - ShortSentinal = 0 - IntSentinal = 0 - LongSentinal = 0 - BoolSentinal = false + ByteSentinel = 0 + ShortSentinel = 0 + IntSentinel = 0 + LongSentinel = 0 + BoolSentinel = false ) func TestPackerCheckSpace(t *testing.T) { + require := require.New(t) + p := Packer{Offset: -1} - p.CheckSpace(1) - if !p.Errored() { - t.Fatal("Expected errNegativeOffset") - } + p.checkSpace(1) + require.True(p.Errored()) + require.ErrorIs(p.Err, errNegativeOffset) p = Packer{} - p.CheckSpace(-1) - if !p.Errored() { - t.Fatal("Expected errInvalidInput") - } + p.checkSpace(-1) + require.True(p.Errored()) + require.ErrorIs(p.Err, errInvalidInput) p = Packer{Bytes: []byte{0x01}, Offset: 1} - p.CheckSpace(1) - if !p.Errored() { - t.Fatal("Expected errBadLength") - } + p.checkSpace(1) + require.True(p.Errored()) + require.ErrorIs(p.Err, errBadLength) p = Packer{Bytes: []byte{0x01}, Offset: 2} - p.CheckSpace(0) - if !p.Errored() { - t.Fatal("Expected errBadLength, due to out of bounds offset") - } + p.checkSpace(0) + require.True(p.Errored()) + require.ErrorIs(p.Err, errBadLength) } func TestPackerExpand(t *testing.T) { + require := require.New(t) + p := Packer{Bytes: []byte{0x01}, Offset: 2} - p.Expand(1) - if !p.Errored() { - t.Fatal("packer.Expand didn't notice packer had out of bounds offset") - } + p.expand(1) + require.True(p.Errored()) + require.ErrorIs(p.Err, errBadLength) p = Packer{Bytes: []byte{0x01, 0x02, 0x03}, Offset: 0} - p.Expand(1) - if p.Errored() { - t.Fatalf("packer.Expand unexpectedly had error %s", p.Err) - } else if len(p.Bytes) != 3 { - t.Fatalf("packer.Expand modified byte array, when it didn't need to") - } + p.expand(1) + require.False(p.Errored()) + require.NoError(p.Err) + require.Equal([]byte{0x01, 0x02, 0x03}, p.Bytes) } func TestPackerPackByte(t *testing.T) { - p := Packer{MaxSize: 1} + require := require.New(t) + p := Packer{MaxSize: 1} p.PackByte(0x01) - - if p.Errored() { - t.Fatal(p.Err) - } - - if size := len(p.Bytes); size != 1 { - t.Fatalf("Packer.PackByte wrote %d byte(s) but expected %d byte(s)", size, 1) - } - - expected := []byte{0x01} - if !bytes.Equal(p.Bytes, expected) { - t.Fatalf("Packer.PackByte wrote:\n%v\nExpected:\n%v", p.Bytes, expected) - } + require.False(p.Errored()) + require.NoError(p.Err) + require.Equal([]byte{0x01}, p.Bytes) p.PackByte(0x02) - if !p.Errored() { - t.Fatal("Packer.PackByte did not fail when attempt was beyond p.MaxSize") - } + require.True(p.Errored()) + require.ErrorIs(p.Err, errBadLength) } func TestPackerUnpackByte(t *testing.T) { - var ( - p = Packer{Bytes: []byte{0x01}, Offset: 0} - actual = p.UnpackByte() - expected byte = 1 - expectedLen = ByteLen - ) - switch { - case p.Errored(): - t.Fatalf("Packer.UnpackByte unexpectedly raised %s", p.Err) - case actual != expected: - t.Fatalf("Packer.UnpackByte returned %d, but expected %d", actual, expected) - case p.Offset != expectedLen: - t.Fatalf("Packer.UnpackByte left Offset %d, expected %d", p.Offset, expectedLen) - } - - actual = p.UnpackByte() - if !p.Errored() { - t.Fatalf("Packer.UnpackByte should have set error, due to attempted out of bounds read") - } else if actual != ByteSentinal { - t.Fatalf("Packer.UnpackByte returned %d, expected sentinal value %d", actual, ByteSentinal) - } + require := require.New(t) + + p := Packer{Bytes: []byte{0x01}, Offset: 0} + require.Equal(uint8(1), p.UnpackByte()) + require.False(p.Errored()) + require.NoError(p.Err) + require.Equal(ByteLen, p.Offset) + + require.Equal(uint8(ByteSentinel), p.UnpackByte()) + require.True(p.Errored()) + require.ErrorIs(p.Err, errBadLength) } func TestPackerPackShort(t *testing.T) { - p := Packer{MaxSize: 2} + require := require.New(t) + p := Packer{MaxSize: 2} p.PackShort(0x0102) - - if p.Errored() { - t.Fatal(p.Err) - } - - if size := len(p.Bytes); size != 2 { - t.Fatalf("Packer.PackShort wrote %d byte(s) but expected %d byte(s)", size, 2) - } - - expected := []byte{0x01, 0x02} - if !bytes.Equal(p.Bytes, expected) { - t.Fatalf("Packer.PackShort wrote:\n%v\nExpected:\n%v", p.Bytes, expected) - } + require.False(p.Errored()) + require.NoError(p.Err) + require.Equal([]byte{0x01, 0x02}, p.Bytes) } func TestPackerUnpackShort(t *testing.T) { - var ( - p = Packer{Bytes: []byte{0x01, 0x02}, Offset: 0} - actual = p.UnpackShort() - expected uint16 = 0x0102 - expectedLen = ShortLen - ) - - switch { - case p.Errored(): - t.Fatalf("Packer.UnpackShort unexpectedly raised %s", p.Err) - case actual != expected: - t.Fatalf("Packer.UnpackShort returned %d, but expected %d", actual, expected) - case p.Offset != expectedLen: - t.Fatalf("Packer.UnpackShort left Offset %d, expected %d", p.Offset, expectedLen) - } - - actual = p.UnpackShort() - if !p.Errored() { - t.Fatalf("Packer.UnpackShort should have set error, due to attempted out of bounds read") - } else if actual != ShortSentinal { - t.Fatalf("Packer.UnpackShort returned %d, expected sentinal value %d", actual, ShortSentinal) - } + require := require.New(t) + + p := Packer{Bytes: []byte{0x01, 0x02}, Offset: 0} + require.Equal(uint16(0x0102), p.UnpackShort()) + require.False(p.Errored()) + require.NoError(p.Err) + require.Equal(ShortLen, p.Offset) + + require.Equal(uint16(ShortSentinel), p.UnpackShort()) + require.True(p.Errored()) + require.ErrorIs(p.Err, errBadLength) } func TestPackerPackInt(t *testing.T) { - p := Packer{MaxSize: 4} + require := require.New(t) + p := Packer{MaxSize: 4} p.PackInt(0x01020304) - - if p.Errored() { - t.Fatal(p.Err) - } - - if size := len(p.Bytes); size != 4 { - t.Fatalf("Packer.PackInt wrote %d byte(s) but expected %d byte(s)", size, 4) - } - - expected := []byte{0x01, 0x02, 0x03, 0x04} - if !bytes.Equal(p.Bytes, expected) { - t.Fatalf("Packer.PackInt wrote:\n%v\nExpected:\n%v", p.Bytes, expected) - } + require.False(p.Errored()) + require.NoError(p.Err) + require.Equal([]byte{0x01, 0x02, 0x03, 0x04}, p.Bytes) p.PackInt(0x05060708) - if !p.Errored() { - t.Fatal("Packer.PackInt did not fail when attempt was beyond p.MaxSize") - } + require.True(p.Errored()) + require.ErrorIs(p.Err, errBadLength) } func TestPackerUnpackInt(t *testing.T) { - var ( - p = Packer{Bytes: []byte{0x01, 0x02, 0x03, 0x04}, Offset: 0} - actual = p.UnpackInt() - expected uint32 = 0x01020304 - expectedLen = IntLen - ) - - switch { - case p.Errored(): - t.Fatalf("Packer.UnpackInt unexpectedly raised %s", p.Err) - case actual != expected: - t.Fatalf("Packer.UnpackInt returned %d, but expected %d", actual, expected) - case p.Offset != expectedLen: - t.Fatalf("Packer.UnpackInt left Offset %d, expected %d", p.Offset, expectedLen) - } - - actual = p.UnpackInt() - if !p.Errored() { - t.Fatalf("Packer.UnpackInt should have set error, due to attempted out of bounds read") - } else if actual != IntSentinal { - t.Fatalf("Packer.UnpackInt returned %d, expected sentinal value %d", actual, IntSentinal) - } + require := require.New(t) + + p := Packer{Bytes: []byte{0x01, 0x02, 0x03, 0x04}, Offset: 0} + require.Equal(uint32(0x01020304), p.UnpackInt()) + require.False(p.Errored()) + require.NoError(p.Err) + require.Equal(IntLen, p.Offset) + + require.Equal(uint32(IntSentinel), p.UnpackInt()) + require.True(p.Errored()) + require.ErrorIs(p.Err, errBadLength) } func TestPackerPackLong(t *testing.T) { - maxSize := 8 - p := Packer{MaxSize: maxSize} + require := require.New(t) + p := Packer{MaxSize: 8} p.PackLong(0x0102030405060708) - - if p.Errored() { - t.Fatal(p.Err) - } - - if size := len(p.Bytes); size != maxSize { - t.Fatalf("Packer.PackLong wrote %d byte(s) but expected %d byte(s)", size, maxSize) - } - - expected := []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08} - if !bytes.Equal(p.Bytes, expected) { - t.Fatalf("Packer.PackLong wrote:\n%v\nExpected:\n%v", p.Bytes, expected) - } + require.False(p.Errored()) + require.NoError(p.Err) + require.Equal([]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, p.Bytes) p.PackLong(0x090a0b0c0d0e0f00) - if !p.Errored() { - t.Fatal("Packer.PackLong did not fail when attempt was beyond p.MaxSize") - } + require.True(p.Errored()) + require.ErrorIs(p.Err, errBadLength) } func TestPackerUnpackLong(t *testing.T) { - var ( - p = Packer{Bytes: []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, Offset: 0} - actual = p.UnpackLong() - expected uint64 = 0x0102030405060708 - expectedLen = LongLen - ) - - switch { - case p.Errored(): - t.Fatalf("Packer.UnpackLong unexpectedly raised %s", p.Err) - case actual != expected: - t.Fatalf("Packer.UnpackLong returned %d, but expected %d", actual, expected) - case p.Offset != expectedLen: - t.Fatalf("Packer.UnpackLong left Offset %d, expected %d", p.Offset, expectedLen) - } - - actual = p.UnpackLong() - if !p.Errored() { - t.Fatalf("Packer.UnpackLong should have set error, due to attempted out of bounds read") - } else if actual != LongSentinal { - t.Fatalf("Packer.UnpackLong returned %d, expected sentinal value %d", actual, LongSentinal) - } + require := require.New(t) + + p := Packer{Bytes: []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, Offset: 0} + require.Equal(uint64(0x0102030405060708), p.UnpackLong()) + require.False(p.Errored()) + require.NoError(p.Err) + require.Equal(LongLen, p.Offset) + + require.Equal(uint64(LongSentinel), p.UnpackLong()) + require.True(p.Errored()) + require.ErrorIs(p.Err, errBadLength) } func TestPackerPackFixedBytes(t *testing.T) { - p := Packer{MaxSize: 4} + require := require.New(t) + p := Packer{MaxSize: 4} p.PackFixedBytes([]byte("Avax")) - - if p.Errored() { - t.Fatal(p.Err) - } - - if size := len(p.Bytes); size != 4 { - t.Fatalf("Packer.PackFixedBytes wrote %d byte(s) but expected %d byte(s)", size, 4) - } - - expected := []byte("Avax") - if !bytes.Equal(p.Bytes, expected) { - t.Fatalf("Packer.PackFixedBytes wrote:\n%v\nExpected:\n%v", p.Bytes, expected) - } + require.False(p.Errored()) + require.NoError(p.Err) + require.Equal([]byte("Avax"), p.Bytes) p.PackFixedBytes([]byte("Avax")) - if !p.Errored() { - t.Fatal("Packer.PackFixedBytes did not fail when attempt was beyond p.MaxSize") - } + require.True(p.Errored()) + require.ErrorIs(p.Err, errBadLength) } func TestPackerUnpackFixedBytes(t *testing.T) { - var ( - p = Packer{Bytes: []byte("Avax")} - actual = p.UnpackFixedBytes(4) - expected = []byte("Avax") - expectedLen = 4 - ) - - switch { - case p.Errored(): - t.Fatalf("Packer.UnpackFixedBytes unexpectedly raised %s", p.Err) - case !bytes.Equal(actual, expected): - t.Fatalf("Packer.UnpackFixedBytes returned %d, but expected %d", actual, expected) - case p.Offset != expectedLen: - t.Fatalf("Packer.UnpackFixedBytes left Offset %d, expected %d", p.Offset, expectedLen) - } - - actual = p.UnpackFixedBytes(4) - if !p.Errored() { - t.Fatalf("Packer.UnpackFixedBytes should have set error, due to attempted out of bounds read") - } else if actual != nil { - t.Fatalf("Packer.UnpackFixedBytes returned %v, expected sentinal value %v", actual, nil) - } + require := require.New(t) + + p := Packer{Bytes: []byte("Avax")} + require.Equal([]byte("Avax"), p.UnpackFixedBytes(4)) + require.False(p.Errored()) + require.NoError(p.Err) + require.Equal(4, p.Offset) + + require.Nil(p.UnpackFixedBytes(4)) + require.True(p.Errored()) + require.Error(p.Err, errBadLength) } func TestPackerPackBytes(t *testing.T) { - p := Packer{MaxSize: 8} + require := require.New(t) + p := Packer{MaxSize: 8} p.PackBytes([]byte("Avax")) - - if p.Errored() { - t.Fatal(p.Err) - } - - if size := len(p.Bytes); size != 8 { - t.Fatalf("Packer.PackBytes wrote %d byte(s) but expected %d byte(s)", size, 8) - } - - expected := []byte("\x00\x00\x00\x04Avax") - if !bytes.Equal(p.Bytes, expected) { - t.Fatalf("Packer.PackBytes wrote:\n%v\nExpected:\n%v", p.Bytes, expected) - } + require.False(p.Errored()) + require.NoError(p.Err) + require.Equal([]byte("\x00\x00\x00\x04Avax"), p.Bytes) p.PackBytes([]byte("Avax")) - if !p.Errored() { - t.Fatal("Packer.PackBytes did not fail when attempt was beyond p.MaxSize") - } + require.True(p.Errored()) + require.ErrorIs(p.Err, errBadLength) } func TestPackerUnpackBytes(t *testing.T) { - var ( - p = Packer{Bytes: []byte("\x00\x00\x00\x04Avax")} - actual = p.UnpackBytes() - expected = []byte("Avax") - expectedLen = 8 - ) - - switch { - case p.Errored(): - t.Fatalf("Packer.UnpackBytes unexpectedly raised %s", p.Err) - case !bytes.Equal(actual, expected): - t.Fatalf("Packer.UnpackBytes returned %d, but expected %d", actual, expected) - case p.Offset != expectedLen: - t.Fatalf("Packer.UnpackBytes left Offset %d, expected %d", p.Offset, expectedLen) - } - - actual = p.UnpackBytes() - if !p.Errored() { - t.Fatalf("Packer.UnpackBytes should have set error, due to attempted out of bounds read") - } else if actual != nil { - t.Fatalf("Packer.UnpackBytes returned %v, expected sentinal value %v", actual, nil) - } -} - -func TestPackerPackFixedByteSlices(t *testing.T) { - p := Packer{MaxSize: 12} + require := require.New(t) - p.PackFixedByteSlices([][]byte{[]byte("Avax"), []byte("Evax")}) + p := Packer{Bytes: []byte("\x00\x00\x00\x04Avax")} + require.Equal([]byte("Avax"), p.UnpackBytes()) + require.False(p.Errored()) + require.NoError(p.Err) + require.Equal(8, p.Offset) - if p.Errored() { - t.Fatal(p.Err) - } + require.Nil(p.UnpackBytes()) + require.True(p.Errored()) + require.ErrorIs(p.Err, errBadLength) +} - if size := len(p.Bytes); size != 12 { - t.Fatalf("Packer.PackFixedByteSlices wrote %d byte(s) but expected %d byte(s)", size, 12) - } +func TestPackerUnpackLimitedBytes(t *testing.T) { + require := require.New(t) - expected := []byte("\x00\x00\x00\x02AvaxEvax") - if !bytes.Equal(p.Bytes, expected) { - t.Fatalf("Packer.PackPackFixedByteSlicesBytes wrote:\n%v\nExpected:\n%v", p.Bytes, expected) - } + p := Packer{Bytes: []byte("\x00\x00\x00\x04Avax")} + require.Equal([]byte("Avax"), p.UnpackLimitedBytes(10)) + require.False(p.Errored()) + require.NoError(p.Err) + require.Equal(8, p.Offset) - p.PackFixedByteSlices([][]byte{[]byte("Avax"), []byte("Evax")}) - if !p.Errored() { - t.Fatal("Packer.PackFixedByteSlices did not fail when attempt was beyond p.MaxSize") - } -} + require.Nil(p.UnpackLimitedBytes(10)) + require.True(p.Errored()) + require.ErrorIs(p.Err, errBadLength) -func TestPackerUnpackFixedByteSlices(t *testing.T) { - var ( - p = Packer{Bytes: []byte("\x00\x00\x00\x02AvaxEvax")} - actual = p.UnpackFixedByteSlices(4) - expected = [][]byte{[]byte("Avax"), []byte("Evax")} - expectedLen = 12 - ) - - switch { - case p.Errored(): - t.Fatalf("Packer.UnpackFixedByteSlices unexpectedly raised %s", p.Err) - case !reflect.DeepEqual(actual, expected): - t.Fatalf("Packer.UnpackFixedByteSlices returned %d, but expected %d", actual, expected) - case p.Offset != expectedLen: - t.Fatalf("Packer.UnpackFixedByteSlices left Offset %d, expected %d", p.Offset, expectedLen) - } - - actual = p.UnpackFixedByteSlices(4) - if !p.Errored() { - t.Fatalf("Packer.UnpackFixedByteSlices should have set error, due to attempted out of bounds read") - } else if actual != nil { - t.Fatalf("Packer.UnpackFixedByteSlices returned %v, expected sentinal value %v", actual, nil) - } + // Reset and don't allow enough bytes + p = Packer{Bytes: p.Bytes} + require.Nil(p.UnpackLimitedBytes(2)) + require.True(p.Errored()) + require.ErrorIs(p.Err, errOversized) } func TestPackerString(t *testing.T) { + require := require.New(t) + p := Packer{MaxSize: 6} p.PackStr("Avax") + require.False(p.Errored()) + require.NoError(p.Err) + require.Equal([]byte{0x00, 0x04, 0x41, 0x76, 0x61, 0x78}, p.Bytes) +} - if p.Errored() { - t.Fatal(p.Err) - } +func TestPackerUnpackString(t *testing.T) { + require := require.New(t) - if size := len(p.Bytes); size != 6 { - t.Fatalf("Packer.PackStr wrote %d byte(s) but expected %d byte(s)", size, 5) - } + p := Packer{Bytes: []byte("\x00\x04Avax")} - expected := []byte{0x00, 0x04, 0x41, 0x76, 0x61, 0x78} - if !bytes.Equal(p.Bytes, expected) { - t.Fatalf("Packer.PackStr wrote:\n%v\nExpected:\n%v", p.Bytes, expected) - } -} + require.Equal("Avax", p.UnpackStr()) + require.False(p.Errored()) + require.NoError(p.Err) + require.Equal(6, p.Offset) -func TestPacker(t *testing.T) { - packer := Packer{ - MaxSize: 3, - } - - if packer.Errored() { - t.Fatalf("Packer has error %s", packer.Err) - } - - packer.PackShort(17) - if len(packer.Bytes) != 2 { - t.Fatalf("Wrong byte length") - } - - packer.PackShort(1) - if !packer.Errored() { - t.Fatalf("Packer should have error") - } - - newPacker := Packer{ - Bytes: packer.Bytes, - } - - if newPacker.UnpackShort() != 17 { - t.Fatalf("Unpacked wrong value") - } + require.Equal("", p.UnpackStr()) + require.True(p.Errored()) + require.ErrorIs(p.Err, errBadLength) } -func TestPackBool(t *testing.T) { - p := Packer{MaxSize: 3} - p.PackBool(false) - p.PackBool(true) - p.PackBool(false) - if p.Errored() { - t.Fatal("should have been able to pack 3 bools") - } +func TestPackerUnpackLimitedString(t *testing.T) { + require := require.New(t) - p2 := Packer{Bytes: p.Bytes} - bool1, bool2, bool3 := p2.UnpackBool(), p2.UnpackBool(), p2.UnpackBool() + p := Packer{Bytes: []byte("\x00\x04Avax")} + require.Equal("Avax", p.UnpackLimitedStr(10)) + require.False(p.Errored()) + require.NoError(p.Err) + require.Equal(6, p.Offset) - if p.Errored() { - t.Fatalf("errors while unpacking bools: %v", p.Errs) - } + require.Equal("", p.UnpackLimitedStr(10)) + require.True(p.Errored()) + require.ErrorIs(p.Err, errBadLength) - if bool1 || !bool2 || bool3 { - t.Fatal("got back wrong values") - } + // Reset and don't allow enough bytes + p = Packer{Bytes: p.Bytes} + require.Equal("", p.UnpackLimitedStr(2)) + require.True(p.Errored()) + require.ErrorIs(p.Err, errOversized) } -func TestPackerPackBool(t *testing.T) { - p := Packer{MaxSize: 1} +func TestPacker(t *testing.T) { + require := require.New(t) - p.PackBool(true) + p := Packer{MaxSize: 3} - if p.Errored() { - t.Fatal(p.Err) - } + require.False(p.Errored()) + require.NoError(p.Err) - if size := len(p.Bytes); size != 1 { - t.Fatalf("Packer.PackBool wrote %d byte(s) but expected %d byte(s)", size, 1) - } + p.PackShort(17) + require.False(p.Errored()) + require.NoError(p.Err) + require.Equal([]byte{0x0, 0x11}, p.Bytes) - expected := []byte{0x01} - if !bytes.Equal(p.Bytes, expected) { - t.Fatalf("Packer.PackBool wrote:\n%v\nExpected:\n%v", p.Bytes, expected) - } + p.PackShort(1) + require.True(p.Errored()) + require.ErrorIs(p.Err, errBadLength) - p.PackBool(false) - if !p.Errored() { - t.Fatal("Packer.PackLong did not fail when attempt was beyond p.MaxSize") - } + p = Packer{Bytes: p.Bytes} + require.Equal(uint16(17), p.UnpackShort()) + require.False(p.Errored()) + require.NoError(p.Err) } -func TestPackerUnpackBool(t *testing.T) { - var ( - p = Packer{Bytes: []byte{0x01}, Offset: 0} - actual = p.UnpackBool() - expected = true - expectedLen = BoolLen - ) - - switch { - case p.Errored(): - t.Fatalf("Packer.UnpackBool unexpectedly raised %s", p.Err) - case actual != expected: - t.Fatalf("Packer.UnpackBool returned %t, but expected %t", actual, expected) - case p.Offset != expectedLen: - t.Fatalf("Packer.UnpackBool left Offset %d, expected %d", p.Offset, expectedLen) - } - - actual = p.UnpackBool() - if !p.Errored() { - t.Fatalf("Packer.UnpackBool should have set error, due to attempted out of bounds read") - } else if actual != BoolSentinal { - t.Fatalf("Packer.UnpackBool returned %t, expected sentinal value %t", actual, BoolSentinal) - } - - p = Packer{Bytes: []byte{0x42}, Offset: 0} - expected = false - actual = p.UnpackBool() - if !p.Errored() { - t.Fatalf("Packer.UnpackBool id not raise error for invalid boolean value %v", p.Bytes) - } else if actual != expected { - t.Fatalf("Packer.UnpackBool returned %t, expected sentinal value %t", actual, BoolSentinal) - } -} +func TestPackBool(t *testing.T) { + require := require.New(t) -func TestPacker2DByteSlice(t *testing.T) { - // Case: empty array - p := Packer{MaxSize: 1024} - arr := [][]byte{} - p.Pack2DByteSlice(arr) - if p.Errored() { - t.Fatal(p.Err) - } - arrUnpacked := p.Unpack2DByteSlice() - if len(arrUnpacked) != 0 { - t.Fatal("should be empty") - } - - // Case: Array has one element - p = Packer{MaxSize: 1024} - arr = [][]byte{ - {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, - } - p.Pack2DByteSlice(arr) - if p.Errored() { - t.Fatal(p.Err) - } - p = Packer{MaxSize: 1024, Bytes: p.Bytes} - arrUnpacked = p.Unpack2DByteSlice() - if p.Errored() { - t.Fatal(p.Err) - } - if l := len(arrUnpacked); l != 1 { - t.Fatalf("should be length 1 but is length %d", l) - } - if !bytes.Equal(arrUnpacked[0], arr[0]) { - t.Fatal("should match") - } - - // Case: Array has multiple elements - p = Packer{MaxSize: 1024} - arr = [][]byte{ - {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, - {11, 12, 3, 4, 5, 6, 7, 8, 9, 10}, - } - p.Pack2DByteSlice(arr) - if p.Errored() { - t.Fatal(p.Err) - } - p = Packer{MaxSize: 1024, Bytes: p.Bytes} - arrUnpacked = p.Unpack2DByteSlice() - if p.Errored() { - t.Fatal(p.Err) - } - if l := len(arrUnpacked); l != 2 { - t.Fatalf("should be length 1 but is length %d", l) - } - if !bytes.Equal(arrUnpacked[0], arr[0]) { - t.Fatal("should match") - } - if !bytes.Equal(arrUnpacked[1], arr[1]) { - t.Fatal("should match") - } + p := Packer{MaxSize: 3} + p.PackBool(false) + p.PackBool(true) + p.PackBool(false) + require.False(p.Errored()) + require.NoError(p.Err) + + p = Packer{Bytes: p.Bytes} + bool1, bool2, bool3 := p.UnpackBool(), p.UnpackBool(), p.UnpackBool() + require.False(p.Errored()) + require.NoError(p.Err) + require.False(bool1) + require.True(bool2) + require.False(bool3) } -func TestPackX509Certificate(t *testing.T) { - cert, err := staking.NewTLSCert() - require.NoError(t, err) +func TestPackerPackBool(t *testing.T) { + require := require.New(t) - p := Packer{MaxSize: 10000} - p.PackX509Certificate(cert.Leaf) - require.NoError(t, p.Err) + p := Packer{MaxSize: 1} - p.Offset = 0 - unpackedCert := p.UnpackX509Certificate() + p.PackBool(true) + require.False(p.Errored()) + require.NoError(p.Err) + require.Equal([]byte{0x01}, p.Bytes) - require.Equal(t, cert.Leaf.Raw, unpackedCert.Raw) + p.PackBool(false) + require.True(p.Errored()) + require.ErrorIs(p.Err, errBadLength) } -func TestPackClaimedIPPort(t *testing.T) { - cert, err := staking.NewTLSCert() - require.NoError(t, err) - - ip := ips.ClaimedIPPort{ - IPPort: ips.IPPort{IP: net.IPv4(1, 2, 3, 4), Port: 5}, - Cert: cert.Leaf, - Signature: []byte("signature"), - } +func TestPackerUnpackBool(t *testing.T) { + require := require.New(t) - p := Packer{MaxSize: 10000} - p.PackClaimedIPPort(ip) - require.NoError(t, p.Err) + p := Packer{Bytes: []byte{0x01}, Offset: 0} - p.Offset = 0 - unpackedIPCert := p.UnpackClaimedIPPort() + require.Equal(true, p.UnpackBool()) + require.False(p.Errored()) + require.NoError(p.Err) + require.Equal(BoolLen, p.Offset) - require.Equal(t, ip.IPPort, unpackedIPCert.IPPort) - require.Equal(t, ip.Cert.Raw, unpackedIPCert.Cert.Raw) - require.Equal(t, ip.Signature, unpackedIPCert.Signature) -} + require.Equal(BoolSentinel, p.UnpackBool()) + require.True(p.Errored()) + require.ErrorIs(p.Err, errBadLength) -func TestPackClaimedIPPortList(t *testing.T) { - cert, err := staking.NewTLSCert() - require.NoError(t, err) - - ip := ips.ClaimedIPPort{ - IPPort: ips.IPPort{IP: net.IPv4(1, 2, 3, 4), Port: 5}, - Cert: cert.Leaf, - Signature: []byte("signature"), - Timestamp: 2, - } - - p := Packer{MaxSize: 10000} - TryPackClaimedIPPortList(&p, []ips.ClaimedIPPort{ip}) - require.NoError(t, p.Err) - - p.Offset = 0 - unpackedIPCertList := TryUnpackClaimedIPPortList(&p) - resolvedUnpackedIPCertList := unpackedIPCertList.([]ips.ClaimedIPPort) - require.NotEmpty(t, resolvedUnpackedIPCertList) - require.Equal(t, ip.IPPort, resolvedUnpackedIPCertList[0].IPPort) - require.Equal(t, ip.Cert.Raw, resolvedUnpackedIPCertList[0].Cert.Raw) - require.Equal(t, ip.Signature, resolvedUnpackedIPCertList[0].Signature) - require.Equal(t, ip.Timestamp, resolvedUnpackedIPCertList[0].Timestamp) + p = Packer{Bytes: []byte{0x42}, Offset: 0} + require.Equal(false, p.UnpackBool()) + require.True(p.Errored()) + require.ErrorIs(p.Err, errBadBool) } diff --git a/avalanchego/utils/zero.go b/avalanchego/utils/zero.go index 98120681..c5ca3b9c 100644 --- a/avalanchego/utils/zero.go +++ b/avalanchego/utils/zero.go @@ -1,9 +1,9 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utils // Returns a new instance of a T. func Zero[T any]() T { - return *new(T) //nolint:gocritic + return *new(T) } diff --git a/avalanchego/version/application.go b/avalanchego/version/application.go index f1c20206..83613055 100644 --- a/avalanchego/version/application.go +++ b/avalanchego/version/application.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version @@ -12,7 +12,7 @@ import ( var ( errDifferentMajor = errors.New("different major version") - _ fmt.Stringer = &Semantic{} + _ fmt.Stringer = (*Semantic)(nil) ) type Application struct { diff --git a/avalanchego/version/application_test.go b/avalanchego/version/application_test.go index ac78b355..95757f30 100644 --- a/avalanchego/version/application_test.go +++ b/avalanchego/version/application_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version diff --git a/avalanchego/version/compatibility.go b/avalanchego/version/compatibility.go index b1b86e74..6c3cae20 100644 --- a/avalanchego/version/compatibility.go +++ b/avalanchego/version/compatibility.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version @@ -13,7 +13,7 @@ import ( var ( errIncompatible = errors.New("peers version is incompatible") - _ Compatibility = &compatibility{} + _ Compatibility = (*compatibility)(nil) ) // Compatibility a utility for checking the compatibility of peer versions @@ -52,7 +52,9 @@ func NewCompatibility( } } -func (c *compatibility) Version() *Application { return c.version } +func (c *compatibility) Version() *Application { + return c.version +} func (c *compatibility) Compatible(peer *Application) error { if err := c.version.Compatible(peer); err != nil { diff --git a/avalanchego/version/compatibility.json b/avalanchego/version/compatibility.json new file mode 100644 index 00000000..e11fdd35 --- /dev/null +++ b/avalanchego/version/compatibility.json @@ -0,0 +1,46 @@ +{ + "25": [ + "v1.10.0" + ], + "24": [ + "v1.9.10", + "v1.9.11", + "v1.9.12", + "v1.9.14", + "v1.9.15", + "v1.9.16" + ], + "23": [ + "v1.9.9" + ], + "22": [ + "v1.9.6", + "v1.9.7", + "v1.9.8" + ], + "21": [ + "v1.9.5" + ], + "20": [ + "v1.9.4" + ], + "19": [ + "v1.9.2", + "v1.9.3" + ], + "18": [ + "v1.9.1" + ], + "17": [ + "v1.9.0" + ], + "16": [ + "v1.8.0", + "v1.8.1", + "v1.8.2", + "v1.8.3", + "v1.8.4", + "v1.8.5", + "v1.8.6" + ] +} diff --git a/avalanchego/version/compatibility_test.go b/avalanchego/version/compatibility_test.go index 4e675876..014f28ad 100644 --- a/avalanchego/version/compatibility_test.go +++ b/avalanchego/version/compatibility_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version diff --git a/avalanchego/version/constants.go b/avalanchego/version/constants.go index e6f5d08e..ebb758a1 100644 --- a/avalanchego/version/constants.go +++ b/avalanchego/version/constants.go @@ -1,20 +1,27 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version import ( + "encoding/json" "time" + _ "embed" + "github.com/ava-labs/avalanchego/utils/constants" ) +// RPCChainVMProtocol should be bumped anytime changes are made which require +// the plugin vm to upgrade to latest avalanchego release to be compatible. +const RPCChainVMProtocol uint = 25 + // These are globals that describe network upgrades and node versions var ( Current = &Semantic{ Major: 1, - Minor: 9, - Patch: 1, + Minor: 10, + Patch: 0, } CurrentApp = &Application{ Major: Current.Major, @@ -23,19 +30,19 @@ var ( } MinimumCompatibleVersion = &Application{ Major: 1, - Minor: 9, + Minor: 10, Patch: 0, } PrevMinimumCompatibleVersion = &Application{ Major: 1, - Minor: 7, - Patch: 1806, + Minor: 9, + Patch: 0, } CurrentSgb = &Semantic{ Major: 0, - Minor: 7, - Patch: 1, + Minor: 8, + Patch: 0, } CurrentSgbApp = &Application{ Major: CurrentSgb.Major, @@ -44,13 +51,13 @@ var ( } MinimumCompatibleSgbVersion = &Application{ Major: 0, - Minor: 7, + Minor: 8, Patch: 0, } PrevMinimumCompatibleSgbVersion = &Application{ Major: 0, - Minor: 6, - Patch: 6, + Minor: 7, + Patch: 0, } CurrentDatabase = DatabaseVersion1_4_5 @@ -67,6 +74,13 @@ var ( Patch: 0, } + //go:embed compatibility.json + rpcChainVMProtocolCompatibilityBytes []byte + // RPCChainVMProtocolCompatibility maps RPCChainVMProtocol versions to the + // set of avalanchego versions that supported that version. This is not used + // by avalanchego, but is useful for downstream libraries. + RPCChainVMProtocolCompatibility map[uint][]*Semantic + ApricotPhase3Times = map[uint32]time.Time{ constants.MainnetID: time.Date(2021, time.August, 24, 14, 0, 0, 0, time.UTC), constants.FlareID: time.Date(2022, time.June, 1, 0, 0, 0, 0, time.UTC), @@ -116,7 +130,6 @@ var ( } ApricotPhase6DefaultTime = time.Date(2020, time.December, 5, 5, 0, 0, 0, time.UTC) - // FIXME: update this before release BanffTimes = map[uint32]time.Time{ constants.MainnetID: time.Date(2022, time.October, 18, 16, 0, 0, 0, time.UTC), constants.FlareID: time.Date(2024, time.December, 17, 15, 0, 0, 0, time.UTC), @@ -129,8 +142,7 @@ var ( } BanffDefaultTime = time.Date(2020, time.December, 5, 5, 0, 0, 0, time.UTC) - // FIXME: update this before release - XChainMigrationTimes = map[uint32]time.Time{ + CortinaTimes = map[uint32]time.Time{ constants.MainnetID: time.Date(10000, time.December, 1, 0, 0, 0, 0, time.UTC), constants.FlareID: time.Date(10000, time.December, 1, 0, 0, 0, 0, time.UTC), constants.CostwoID: time.Date(10000, time.December, 1, 0, 0, 0, 0, time.UTC), @@ -140,9 +152,30 @@ var ( constants.SongbirdID: time.Date(10000, time.December, 1, 0, 0, 0, 0, time.UTC), constants.LocalID: time.Date(10000, time.December, 1, 0, 0, 0, 0, time.UTC), } - XChainMigrationDefaultTime = time.Date(2020, time.December, 5, 5, 0, 0, 0, time.UTC) + CortinaDefaultTime = time.Date(2020, time.December, 5, 5, 0, 0, 0, time.UTC) ) +func init() { + var parsedRPCChainVMCompatibility map[uint][]string + err := json.Unmarshal(rpcChainVMProtocolCompatibilityBytes, &parsedRPCChainVMCompatibility) + if err != nil { + panic(err) + } + + RPCChainVMProtocolCompatibility = make(map[uint][]*Semantic) + for rpcChainVMProtocol, versionStrings := range parsedRPCChainVMCompatibility { + versions := make([]*Semantic, len(versionStrings)) + for i, versionString := range versionStrings { + version, err := Parse(versionString) + if err != nil { + panic(err) + } + versions[i] = version + } + RPCChainVMProtocolCompatibility[rpcChainVMProtocol] = versions + } +} + func GetApricotPhase3Time(networkID uint32) time.Time { if upgradeTime, exists := ApricotPhase3Times[networkID]; exists { return upgradeTime @@ -185,11 +218,11 @@ func GetBanffTime(networkID uint32) time.Time { return BanffDefaultTime } -func GetXChainMigrationTime(networkID uint32) time.Time { - if upgradeTime, exists := XChainMigrationTimes[networkID]; exists { +func GetCortinaTime(networkID uint32) time.Time { + if upgradeTime, exists := CortinaTimes[networkID]; exists { return upgradeTime } - return XChainMigrationDefaultTime + return CortinaDefaultTime } func GetCompatibility(networkID uint32) Compatibility { @@ -197,14 +230,14 @@ func GetCompatibility(networkID uint32) Compatibility { return NewCompatibility( CurrentSgbApp, MinimumCompatibleSgbVersion, - GetBanffTime(networkID), + GetCortinaTime(networkID), PrevMinimumCompatibleSgbVersion, ) } return NewCompatibility( CurrentApp, MinimumCompatibleVersion, - GetBanffTime(networkID), + GetCortinaTime(networkID), PrevMinimumCompatibleVersion, ) } diff --git a/avalanchego/version/constants_test.go b/avalanchego/version/constants_test.go new file mode 100644 index 00000000..5e409dd9 --- /dev/null +++ b/avalanchego/version/constants_test.go @@ -0,0 +1,15 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package version + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCurrentRPCChainVMCompatible(t *testing.T) { + compatibleVersions := RPCChainVMProtocolCompatibility[RPCChainVMProtocol] + require.Contains(t, compatibleVersions, Current) +} diff --git a/avalanchego/version/parser.go b/avalanchego/version/parser.go index f39c0ee5..6520ca9a 100644 --- a/avalanchego/version/parser.go +++ b/avalanchego/version/parser.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version diff --git a/avalanchego/version/parser_test.go b/avalanchego/version/parser_test.go index 22d0d3ab..16c435ac 100644 --- a/avalanchego/version/parser_test.go +++ b/avalanchego/version/parser_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version diff --git a/avalanchego/version/string.go b/avalanchego/version/string.go index 2aab52fe..5978181b 100644 --- a/avalanchego/version/string.go +++ b/avalanchego/version/string.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version -import "fmt" +import ( + "fmt" +) var ( // String is displayed when CLI arg --version is used @@ -14,10 +16,11 @@ var ( ) func init() { - format := "%s [database=%s" + format := "%s [database=%s, rpcchainvm=%d" args := []interface{}{ CurrentApp, CurrentDatabase, + RPCChainVMProtocol, } if GitCommit != "" { format += ", commit=%s" diff --git a/avalanchego/version/version.go b/avalanchego/version/version.go index 68d4b677..81acdc42 100644 --- a/avalanchego/version/version.go +++ b/avalanchego/version/version.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version @@ -16,7 +16,7 @@ var ( Patch: 0, } - _ fmt.Stringer = &Semantic{} + _ fmt.Stringer = (*Semantic)(nil) ) type Semantic struct { diff --git a/avalanchego/version/version_test.go b/avalanchego/version/version_test.go index ee07eb6b..d66c1212 100644 --- a/avalanchego/version/version_test.go +++ b/avalanchego/version/version_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package version diff --git a/avalanchego/vms/avm/blocks/block.go b/avalanchego/vms/avm/blocks/block.go new file mode 100644 index 00000000..6ab2af6b --- /dev/null +++ b/avalanchego/vms/avm/blocks/block.go @@ -0,0 +1,33 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package blocks + +import ( + "time" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/vms/avm/txs" +) + +// Block defines the common stateless interface for all blocks +type Block interface { + snow.ContextInitializable + + ID() ids.ID + Parent() ids.ID + Height() uint64 + // Timestamp that this block was created at + Timestamp() time.Time + MerkleRoot() ids.ID + Bytes() []byte + + // Txs returns the transactions contained in the block + Txs() []*txs.Tx + + // note: initialize does not assume that the transactions are initialized, + // and initializes them itself. + initialize(bytes []byte, cm codec.Manager) error +} diff --git a/avalanchego/vms/avm/blocks/block_test.go b/avalanchego/vms/avm/blocks/block_test.go new file mode 100644 index 00000000..0d0d7768 --- /dev/null +++ b/avalanchego/vms/avm/blocks/block_test.go @@ -0,0 +1,104 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package blocks + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/vms/avm/fxs" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" +) + +var ( + chainID = ids.GenerateTestID() + keys = secp256k1.TestKeys() + assetID = ids.GenerateTestID() +) + +func TestStandardBlocks(t *testing.T) { + // check standard block can be built and parsed + require := require.New(t) + + parser, err := NewParser([]fxs.Fx{ + &secp256k1fx.Fx{}, + }) + require.NoError(err) + + blkTimestamp := time.Now() + parentID := ids.GenerateTestID() + height := uint64(2022) + cm := parser.Codec() + txs, err := createTestTxs(cm) + require.NoError(err) + + standardBlk, err := NewStandardBlock(parentID, height, blkTimestamp, txs, cm) + require.NoError(err) + + // parse block + parsed, err := parser.ParseBlock(standardBlk.Bytes()) + require.NoError(err) + + // compare content + require.Equal(standardBlk.ID(), parsed.ID()) + require.Equal(standardBlk.Parent(), parsed.Parent()) + require.Equal(standardBlk.Height(), parsed.Height()) + require.Equal(standardBlk.Bytes(), parsed.Bytes()) + require.Equal(standardBlk.Timestamp(), parsed.Timestamp()) + + parsedStandardBlk, ok := parsed.(*StandardBlock) + require.True(ok) + + require.Equal(txs, parsedStandardBlk.Txs()) + require.Equal(parsed.Txs(), parsedStandardBlk.Txs()) +} + +func createTestTxs(cm codec.Manager) ([]*txs.Tx, error) { + countTxs := 1 + testTxs := make([]*txs.Tx, 0, countTxs) + for i := 0; i < countTxs; i++ { + // Create the tx + tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: constants.UnitTestID, + BlockchainID: chainID, + Outs: []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: assetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: uint64(12345), + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }}, + Ins: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: ids.ID{'t', 'x', 'I', 'D'}, + OutputIndex: 1, + }, + Asset: avax.Asset{ID: assetID}, + In: &secp256k1fx.TransferInput{ + Amt: uint64(54321), + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, + }, + }, + }}, + Memo: []byte{1, 2, 3, 4, 5, 6, 7, 8}, + }}} + if err := tx.SignSECP256K1Fx(cm, [][]*secp256k1.PrivateKey{{keys[0]}}); err != nil { + return nil, err + } + testTxs = append(testTxs, tx) + } + return testTxs, nil +} diff --git a/avalanchego/vms/avm/blocks/builder/builder.go b/avalanchego/vms/avm/blocks/builder/builder.go new file mode 100644 index 00000000..77b7d258 --- /dev/null +++ b/avalanchego/vms/avm/blocks/builder/builder.go @@ -0,0 +1,182 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package builder + +import ( + "context" + "errors" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/avm/blocks" + "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" + + blockexecutor "github.com/ava-labs/avalanchego/vms/avm/blocks/executor" + txexecutor "github.com/ava-labs/avalanchego/vms/avm/txs/executor" +) + +// targetBlockSize is the max block size we aim to produce +const targetBlockSize = 128 * units.KiB + +var ( + _ Builder = (*builder)(nil) + + ErrNoTransactions = errors.New("no transactions") +) + +type Builder interface { + // BuildBlock can be called to attempt to create a new block + BuildBlock(context.Context) (snowman.Block, error) +} + +// builder implements a simple builder to convert txs into valid blocks +type builder struct { + backend *txexecutor.Backend + manager blockexecutor.Manager + clk *mockable.Clock + + // Pool of all txs that may be able to be added + mempool mempool.Mempool +} + +func New( + backend *txexecutor.Backend, + manager blockexecutor.Manager, + clk *mockable.Clock, + mempool mempool.Mempool, +) Builder { + return &builder{ + backend: backend, + manager: manager, + clk: clk, + mempool: mempool, + } +} + +// BuildBlock builds a block to be added to consensus. +func (b *builder) BuildBlock(context.Context) (snowman.Block, error) { + defer b.mempool.RequestBuildBlock() + + ctx := b.backend.Ctx + ctx.Log.Debug("starting to attempt to build a block") + + // Get the block to build on top of and retrieve the new block's context. + preferredID := b.manager.Preferred() + preferred, err := b.manager.GetStatelessBlock(preferredID) + if err != nil { + return nil, err + } + + preferredHeight := preferred.Height() + preferredTimestamp := preferred.Timestamp() + + nextHeight := preferredHeight + 1 + nextTimestamp := b.clk.Time() // [timestamp] = max(now, parentTime) + if preferredTimestamp.After(nextTimestamp) { + nextTimestamp = preferredTimestamp + } + + stateDiff, err := states.NewDiff(preferredID, b.manager) + if err != nil { + return nil, err + } + + var ( + blockTxs []*txs.Tx + inputs set.Set[ids.ID] + remainingSize = targetBlockSize + ) + for { + tx := b.mempool.Peek(remainingSize) + if tx == nil { + break + } + b.mempool.Remove([]*txs.Tx{tx}) + + // Invariant: [tx] has already been syntactically verified. + + txDiff, err := wrapState(stateDiff) + if err != nil { + return nil, err + } + + err = tx.Unsigned.Visit(&txexecutor.SemanticVerifier{ + Backend: b.backend, + State: txDiff, + Tx: tx, + }) + if err != nil { + txID := tx.ID() + b.mempool.MarkDropped(txID, err) + continue + } + + executor := &txexecutor.Executor{ + Codec: b.backend.Codec, + State: txDiff, + Tx: tx, + } + err = tx.Unsigned.Visit(executor) + if err != nil { + txID := tx.ID() + b.mempool.MarkDropped(txID, err) + continue + } + + if inputs.Overlaps(executor.Inputs) { + txID := tx.ID() + b.mempool.MarkDropped(txID, blockexecutor.ErrConflictingBlockTxs) + continue + } + err = b.manager.VerifyUniqueInputs(preferredID, inputs) + if err != nil { + txID := tx.ID() + b.mempool.MarkDropped(txID, err) + continue + } + inputs.Union(executor.Inputs) + + txDiff.AddTx(tx) + txDiff.Apply(stateDiff) + + remainingSize -= len(tx.Bytes()) + blockTxs = append(blockTxs, tx) + } + + if len(blockTxs) == 0 { + return nil, ErrNoTransactions + } + + statelessBlk, err := blocks.NewStandardBlock( + preferredID, + nextHeight, + nextTimestamp, + blockTxs, + b.backend.Codec, + ) + if err != nil { + return nil, err + } + + return b.manager.NewBlock(statelessBlk), nil +} + +type stateGetter struct { + state states.Chain +} + +func (s stateGetter) GetState(ids.ID) (states.Chain, bool) { + return s.state, true +} + +func wrapState(parentState states.Chain) (states.Diff, error) { + return states.NewDiff(ids.Empty, stateGetter{ + state: parentState, + }) +} diff --git a/avalanchego/vms/avm/blocks/builder/builder_test.go b/avalanchego/vms/avm/blocks/builder/builder_test.go new file mode 100644 index 00000000..dbeaed63 --- /dev/null +++ b/avalanchego/vms/avm/blocks/builder/builder_test.go @@ -0,0 +1,642 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package builder + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/golang/mock/gomock" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/versiondb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/vms/avm/blocks" + "github.com/ava-labs/avalanchego/vms/avm/fxs" + "github.com/ava-labs/avalanchego/vms/avm/metrics" + "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + blkexecutor "github.com/ava-labs/avalanchego/vms/avm/blocks/executor" + txexecutor "github.com/ava-labs/avalanchego/vms/avm/txs/executor" +) + +var ( + errTest = errors.New("test error") + chainID = ids.GenerateTestID() + keys = secp256k1.TestKeys() +) + +func TestBuilderBuildBlock(t *testing.T) { + type test struct { + name string + builderFunc func(*gomock.Controller) Builder + expectedErr error + } + + tests := []test{ + { + name: "can't get stateless block", + builderFunc: func(ctrl *gomock.Controller) Builder { + preferredID := ids.GenerateTestID() + manager := blkexecutor.NewMockManager(ctrl) + manager.EXPECT().Preferred().Return(preferredID) + manager.EXPECT().GetStatelessBlock(preferredID).Return(nil, errTest) + + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().RequestBuildBlock() + + return New( + &txexecutor.Backend{ + Ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + manager, + &mockable.Clock{}, + mempool, + ) + }, + expectedErr: errTest, + }, + { + name: "can't get preferred diff", + builderFunc: func(ctrl *gomock.Controller) Builder { + preferredID := ids.GenerateTestID() + preferredHeight := uint64(1337) + preferredTimestamp := time.Now() + preferredBlock := blocks.NewMockBlock(ctrl) + preferredBlock.EXPECT().Height().Return(preferredHeight) + preferredBlock.EXPECT().Timestamp().Return(preferredTimestamp) + + manager := blkexecutor.NewMockManager(ctrl) + manager.EXPECT().Preferred().Return(preferredID) + manager.EXPECT().GetStatelessBlock(preferredID).Return(preferredBlock, nil) + manager.EXPECT().GetState(preferredID).Return(nil, false) + + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().RequestBuildBlock() + + return New( + &txexecutor.Backend{ + Ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + manager, + &mockable.Clock{}, + mempool, + ) + }, + expectedErr: states.ErrMissingParentState, + }, + { + name: "tx fails semantic verification", + builderFunc: func(ctrl *gomock.Controller) Builder { + preferredID := ids.GenerateTestID() + preferredHeight := uint64(1337) + preferredTimestamp := time.Now() + preferredBlock := blocks.NewMockBlock(ctrl) + preferredBlock.EXPECT().Height().Return(preferredHeight) + preferredBlock.EXPECT().Timestamp().Return(preferredTimestamp) + + preferredState := states.NewMockChain(ctrl) + preferredState.EXPECT().GetLastAccepted().Return(preferredID) + preferredState.EXPECT().GetTimestamp().Return(preferredTimestamp) + + manager := blkexecutor.NewMockManager(ctrl) + manager.EXPECT().Preferred().Return(preferredID) + manager.EXPECT().GetStatelessBlock(preferredID).Return(preferredBlock, nil) + manager.EXPECT().GetState(preferredID).Return(preferredState, true) + + unsignedTx := txs.NewMockUnsignedTx(ctrl) + unsignedTx.EXPECT().Visit(gomock.Any()).Return(errTest) // Fail semantic verification + tx := &txs.Tx{Unsigned: unsignedTx} + + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Peek(gomock.Any()).Return(tx) + mempool.EXPECT().Remove([]*txs.Tx{tx}) + mempool.EXPECT().MarkDropped(tx.ID(), errTest) + // Second loop iteration + mempool.EXPECT().Peek(gomock.Any()).Return(nil) + mempool.EXPECT().RequestBuildBlock() + + return New( + &txexecutor.Backend{ + Ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + manager, + &mockable.Clock{}, + mempool, + ) + }, + expectedErr: ErrNoTransactions, // The only tx was invalid + }, + { + name: "tx fails execution", + builderFunc: func(ctrl *gomock.Controller) Builder { + preferredID := ids.GenerateTestID() + preferredHeight := uint64(1337) + preferredTimestamp := time.Now() + preferredBlock := blocks.NewMockBlock(ctrl) + preferredBlock.EXPECT().Height().Return(preferredHeight) + preferredBlock.EXPECT().Timestamp().Return(preferredTimestamp) + + preferredState := states.NewMockChain(ctrl) + preferredState.EXPECT().GetLastAccepted().Return(preferredID) + preferredState.EXPECT().GetTimestamp().Return(preferredTimestamp) + + manager := blkexecutor.NewMockManager(ctrl) + manager.EXPECT().Preferred().Return(preferredID) + manager.EXPECT().GetStatelessBlock(preferredID).Return(preferredBlock, nil) + manager.EXPECT().GetState(preferredID).Return(preferredState, true) + + unsignedTx := txs.NewMockUnsignedTx(ctrl) + unsignedTx.EXPECT().Visit(gomock.Any()).Return(nil) // Pass semantic verification + unsignedTx.EXPECT().Visit(gomock.Any()).Return(errTest) // Fail execution + tx := &txs.Tx{Unsigned: unsignedTx} + + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Peek(gomock.Any()).Return(tx) + mempool.EXPECT().Remove([]*txs.Tx{tx}) + mempool.EXPECT().MarkDropped(tx.ID(), errTest) + // Second loop iteration + mempool.EXPECT().Peek(gomock.Any()).Return(nil) + mempool.EXPECT().RequestBuildBlock() + + return New( + &txexecutor.Backend{ + Ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + manager, + &mockable.Clock{}, + mempool, + ) + }, + expectedErr: ErrNoTransactions, // The only tx was invalid + }, + { + name: "tx has non-unique inputs", + builderFunc: func(ctrl *gomock.Controller) Builder { + preferredID := ids.GenerateTestID() + preferredHeight := uint64(1337) + preferredTimestamp := time.Now() + preferredBlock := blocks.NewMockBlock(ctrl) + preferredBlock.EXPECT().Height().Return(preferredHeight) + preferredBlock.EXPECT().Timestamp().Return(preferredTimestamp) + + preferredState := states.NewMockChain(ctrl) + preferredState.EXPECT().GetLastAccepted().Return(preferredID) + preferredState.EXPECT().GetTimestamp().Return(preferredTimestamp) + + manager := blkexecutor.NewMockManager(ctrl) + manager.EXPECT().Preferred().Return(preferredID) + manager.EXPECT().GetStatelessBlock(preferredID).Return(preferredBlock, nil) + manager.EXPECT().GetState(preferredID).Return(preferredState, true) + manager.EXPECT().VerifyUniqueInputs(preferredID, gomock.Any()).Return(errTest) + + unsignedTx := txs.NewMockUnsignedTx(ctrl) + unsignedTx.EXPECT().Visit(gomock.Any()).Return(nil) // Pass semantic verification + unsignedTx.EXPECT().Visit(gomock.Any()).Return(nil) // Pass execution + tx := &txs.Tx{Unsigned: unsignedTx} + + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Peek(gomock.Any()).Return(tx) + mempool.EXPECT().Remove([]*txs.Tx{tx}) + mempool.EXPECT().MarkDropped(tx.ID(), errTest) + // Second loop iteration + mempool.EXPECT().Peek(gomock.Any()).Return(nil) + mempool.EXPECT().RequestBuildBlock() + + return New( + &txexecutor.Backend{ + Ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + manager, + &mockable.Clock{}, + mempool, + ) + }, + expectedErr: ErrNoTransactions, // The only tx was invalid + }, + { + name: "txs consume same input", + builderFunc: func(ctrl *gomock.Controller) Builder { + preferredID := ids.GenerateTestID() + preferredHeight := uint64(1337) + preferredTimestamp := time.Now() + preferredBlock := blocks.NewMockBlock(ctrl) + preferredBlock.EXPECT().Height().Return(preferredHeight) + preferredBlock.EXPECT().Timestamp().Return(preferredTimestamp) + + preferredState := states.NewMockChain(ctrl) + preferredState.EXPECT().GetLastAccepted().Return(preferredID) + preferredState.EXPECT().GetTimestamp().Return(preferredTimestamp) + + // tx1 and tx2 both consume [inputID]. + // tx1 is added to the block first, so tx2 should be dropped. + inputID := ids.GenerateTestID() + unsignedTx1 := txs.NewMockUnsignedTx(ctrl) + unsignedTx1.EXPECT().Visit(gomock.Any()).Return(nil) // Pass semantic verification + unsignedTx1.EXPECT().Visit(gomock.Any()).DoAndReturn( // Pass execution + func(visitor txs.Visitor) error { + executor, ok := visitor.(*txexecutor.Executor) + require.True(t, ok) + executor.Inputs.Add(inputID) + return nil + }, + ) + unsignedTx1.EXPECT().SetBytes(gomock.Any()).AnyTimes() + tx1 := &txs.Tx{Unsigned: unsignedTx1} + // Set the bytes of tx1 to something other than nil + // so we can check that the remainingSize is updated + tx1Bytes := []byte{1, 2, 3} + tx1.SetBytes(nil, tx1Bytes) + + unsignedTx2 := txs.NewMockUnsignedTx(ctrl) + unsignedTx2.EXPECT().Visit(gomock.Any()).Return(nil) // Pass semantic verification + unsignedTx2.EXPECT().Visit(gomock.Any()).DoAndReturn( // Pass execution + func(visitor txs.Visitor) error { + executor, ok := visitor.(*txexecutor.Executor) + require.True(t, ok) + executor.Inputs.Add(inputID) + return nil + }, + ) + tx2 := &txs.Tx{Unsigned: unsignedTx2} + + manager := blkexecutor.NewMockManager(ctrl) + manager.EXPECT().Preferred().Return(preferredID) + manager.EXPECT().GetStatelessBlock(preferredID).Return(preferredBlock, nil) + manager.EXPECT().GetState(preferredID).Return(preferredState, true) + manager.EXPECT().VerifyUniqueInputs(preferredID, gomock.Any()).Return(nil) + // Assert created block has one tx, tx1, + // and other fields are set correctly. + manager.EXPECT().NewBlock(gomock.Any()).DoAndReturn( + func(block *blocks.StandardBlock) snowman.Block { + require.Len(t, block.Transactions, 1) + require.Equal(t, tx1, block.Transactions[0]) + require.Equal(t, preferredHeight+1, block.Height()) + require.Equal(t, preferredID, block.Parent()) + return nil + }, + ) + + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Peek(targetBlockSize).Return(tx1) + mempool.EXPECT().Remove([]*txs.Tx{tx1}) + // Second loop iteration + mempool.EXPECT().Peek(targetBlockSize - len(tx1Bytes)).Return(tx2) + mempool.EXPECT().Remove([]*txs.Tx{tx2}) + mempool.EXPECT().MarkDropped(tx2.ID(), blkexecutor.ErrConflictingBlockTxs) + // Third loop iteration + mempool.EXPECT().Peek(targetBlockSize - len(tx1Bytes)).Return(nil) + mempool.EXPECT().RequestBuildBlock() + + // To marshal the tx/block + codec := codec.NewMockManager(ctrl) + codec.EXPECT().Marshal(gomock.Any(), gomock.Any()).Return([]byte{1, 2, 3}, nil).AnyTimes() + codec.EXPECT().Size(gomock.Any(), gomock.Any()).Return(2, nil).AnyTimes() + + return New( + &txexecutor.Backend{ + Codec: codec, + Ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + manager, + &mockable.Clock{}, + mempool, + ) + }, + expectedErr: nil, + }, + { + name: "preferred timestamp after now", + builderFunc: func(ctrl *gomock.Controller) Builder { + preferredID := ids.GenerateTestID() + preferredHeight := uint64(1337) + preferredTimestamp := time.Now() + preferredBlock := blocks.NewMockBlock(ctrl) + preferredBlock.EXPECT().Height().Return(preferredHeight) + preferredBlock.EXPECT().Timestamp().Return(preferredTimestamp) + + // Clock reads just before the preferred timestamp. + // Created block should have the preferred timestamp since it's later. + clock := &mockable.Clock{} + clock.Set(preferredTimestamp.Add(-2 * time.Second)) + + preferredState := states.NewMockChain(ctrl) + preferredState.EXPECT().GetLastAccepted().Return(preferredID) + preferredState.EXPECT().GetTimestamp().Return(preferredTimestamp) + + manager := blkexecutor.NewMockManager(ctrl) + manager.EXPECT().Preferred().Return(preferredID) + manager.EXPECT().GetStatelessBlock(preferredID).Return(preferredBlock, nil) + manager.EXPECT().GetState(preferredID).Return(preferredState, true) + manager.EXPECT().VerifyUniqueInputs(preferredID, gomock.Any()).Return(nil) + // Assert that the created block has the right timestamp + manager.EXPECT().NewBlock(gomock.Any()).DoAndReturn( + func(block *blocks.StandardBlock) snowman.Block { + require.Equal(t, preferredTimestamp.Unix(), block.Timestamp().Unix()) + return nil + }, + ) + + inputID := ids.GenerateTestID() + unsignedTx := txs.NewMockUnsignedTx(ctrl) + unsignedTx.EXPECT().Visit(gomock.Any()).Return(nil) // Pass semantic verification + unsignedTx.EXPECT().Visit(gomock.Any()).DoAndReturn( // Pass execution + func(visitor txs.Visitor) error { + executor, ok := visitor.(*txexecutor.Executor) + require.True(t, ok) + executor.Inputs.Add(inputID) + return nil + }, + ) + unsignedTx.EXPECT().SetBytes(gomock.Any()).AnyTimes() + tx := &txs.Tx{Unsigned: unsignedTx} + + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Peek(gomock.Any()).Return(tx) + mempool.EXPECT().Remove([]*txs.Tx{tx}) + // Second loop iteration + mempool.EXPECT().Peek(gomock.Any()).Return(nil) + mempool.EXPECT().RequestBuildBlock() + + // To marshal the tx/block + codec := codec.NewMockManager(ctrl) + codec.EXPECT().Marshal(gomock.Any(), gomock.Any()).Return([]byte{1, 2, 3}, nil).AnyTimes() + codec.EXPECT().Size(gomock.Any(), gomock.Any()).Return(2, nil).AnyTimes() + + return New( + &txexecutor.Backend{ + Codec: codec, + Ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + manager, + clock, + mempool, + ) + }, + expectedErr: nil, + }, + { + name: "preferred timestamp before now", + builderFunc: func(ctrl *gomock.Controller) Builder { + preferredID := ids.GenerateTestID() + preferredHeight := uint64(1337) + // preferred block's timestamp is after the time reported by clock + now := time.Now() + preferredTimestamp := now.Add(-2 * time.Second) + preferredBlock := blocks.NewMockBlock(ctrl) + preferredBlock.EXPECT().Height().Return(preferredHeight) + preferredBlock.EXPECT().Timestamp().Return(preferredTimestamp) + + // Clock reads after the preferred timestamp. + // Created block should have [now] timestamp since it's later. + clock := &mockable.Clock{} + clock.Set(now) + + preferredState := states.NewMockChain(ctrl) + preferredState.EXPECT().GetLastAccepted().Return(preferredID) + preferredState.EXPECT().GetTimestamp().Return(preferredTimestamp) + + manager := blkexecutor.NewMockManager(ctrl) + manager.EXPECT().Preferred().Return(preferredID) + manager.EXPECT().GetStatelessBlock(preferredID).Return(preferredBlock, nil) + manager.EXPECT().GetState(preferredID).Return(preferredState, true) + manager.EXPECT().VerifyUniqueInputs(preferredID, gomock.Any()).Return(nil) + // Assert that the created block has the right timestamp + manager.EXPECT().NewBlock(gomock.Any()).DoAndReturn( + func(block *blocks.StandardBlock) snowman.Block { + require.Equal(t, now.Unix(), block.Timestamp().Unix()) + return nil + }, + ) + + inputID := ids.GenerateTestID() + unsignedTx := txs.NewMockUnsignedTx(ctrl) + unsignedTx.EXPECT().Visit(gomock.Any()).Return(nil) // Pass semantic verification + unsignedTx.EXPECT().Visit(gomock.Any()).DoAndReturn( // Pass execution + func(visitor txs.Visitor) error { + executor, ok := visitor.(*txexecutor.Executor) + require.True(t, ok) + executor.Inputs.Add(inputID) + return nil + }, + ) + unsignedTx.EXPECT().SetBytes(gomock.Any()).AnyTimes() + tx := &txs.Tx{Unsigned: unsignedTx} + + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Peek(gomock.Any()).Return(tx) + mempool.EXPECT().Remove([]*txs.Tx{tx}) + // Second loop iteration + mempool.EXPECT().Peek(gomock.Any()).Return(nil) + mempool.EXPECT().RequestBuildBlock() + + // To marshal the tx/block + codec := codec.NewMockManager(ctrl) + codec.EXPECT().Marshal(gomock.Any(), gomock.Any()).Return([]byte{1, 2, 3}, nil).AnyTimes() + codec.EXPECT().Size(gomock.Any(), gomock.Any()).Return(2, nil).AnyTimes() + + return New( + &txexecutor.Backend{ + Codec: codec, + Ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + manager, + clock, + mempool, + ) + }, + expectedErr: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + builder := tt.builderFunc(ctrl) + _, err := builder.BuildBlock(context.Background()) + require.ErrorIs(err, tt.expectedErr) + }) + } +} + +func TestBlockBuilderAddLocalTx(t *testing.T) { + transactions := createTxs() + + require := require.New(t) + + registerer := prometheus.NewRegistry() + toEngine := make(chan common.Message, 100) + mempool, err := mempool.New("mempool", registerer, toEngine) + require.NoError(err) + // add a tx to the mempool + tx := transactions[0] + txID := tx.ID() + err = mempool.Add(tx) + require.NoError(err) + + has := mempool.Has(txID) + require.True(has) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + parser, err := blocks.NewParser([]fxs.Fx{ + &secp256k1fx.Fx{}, + }) + require.NoError(err) + + backend := &txexecutor.Backend{ + Ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + Codec: parser.Codec(), + } + + baseDBManager := manager.NewMemDB(version.Semantic1_0_0) + baseDB := versiondb.New(baseDBManager.Current().Database) + + state, err := states.New(baseDB, parser, registerer) + require.NoError(err) + + clk := &mockable.Clock{} + onAccept := func(*txs.Tx) error { return nil } + now := time.Now() + parentTimestamp := now.Add(-2 * time.Second) + parentID := ids.GenerateTestID() + cm := parser.Codec() + txs, err := createParentTxs(cm) + require.NoError(err) + parentBlk, err := blocks.NewStandardBlock(parentID, 0, parentTimestamp, txs, cm) + require.NoError(err) + state.AddBlock(parentBlk) + state.SetLastAccepted(parentBlk.ID()) + + metrics, err := metrics.New("", registerer) + require.NoError(err) + + manager := blkexecutor.NewManager(mempool, metrics, state, backend, clk, onAccept) + + manager.SetPreference(parentBlk.ID()) + + builder := New(backend, manager, clk, mempool) + + // show that build block fails if tx is invalid + _, err = builder.BuildBlock(context.Background()) + require.ErrorIs(err, ErrNoTransactions) +} + +func createTxs() []*txs.Tx { + return []*txs.Tx{{ + Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: constants.UnitTestID, + BlockchainID: ids.GenerateTestID(), + Outs: []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: ids.GenerateTestID()}, + Out: &secp256k1fx.TransferOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, + }, + }}, + Ins: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: ids.ID{'t', 'x', 'I', 'D'}, + OutputIndex: 1, + }, + Asset: avax.Asset{ID: ids.GenerateTestID()}, + In: &secp256k1fx.TransferInput{ + Amt: uint64(54321), + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, + }, + }, + }}, + Memo: []byte{1, 2, 3, 4, 5, 6, 7, 8}, + }}, + Creds: []*fxs.FxCredential{ + { + Verifiable: &secp256k1fx.Credential{}, + }, + }, + }} +} + +func createParentTxs(cm codec.Manager) ([]*txs.Tx, error) { + countTxs := 1 + testTxs := make([]*txs.Tx, 0, countTxs) + for i := 0; i < countTxs; i++ { + // Create the tx + tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: constants.UnitTestID, + BlockchainID: chainID, + Outs: []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: ids.ID{1, 2, 3}}, + Out: &secp256k1fx.TransferOutput{ + Amt: uint64(12345), + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }}, + Ins: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: ids.ID{'t', 'x', 'p', 'a', 'r', 'e', 'n', 't'}, + OutputIndex: 1, + }, + Asset: avax.Asset{ID: ids.ID{1, 2, 3}}, + In: &secp256k1fx.TransferInput{ + Amt: uint64(54321), + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, + }, + }, + }}, + Memo: []byte{1, 2, 9, 4, 5, 6, 7, 8}, + }}} + if err := tx.SignSECP256K1Fx(cm, [][]*secp256k1.PrivateKey{{keys[0]}}); err != nil { + return nil, err + } + testTxs = append(testTxs, tx) + } + return testTxs, nil +} diff --git a/avalanchego/vms/avm/blocks/executor/block.go b/avalanchego/vms/avm/blocks/executor/block.go new file mode 100644 index 00000000..b2743abe --- /dev/null +++ b/avalanchego/vms/avm/blocks/executor/block.go @@ -0,0 +1,332 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package executor + +import ( + "context" + "errors" + "fmt" + "time" + + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/vms/avm/blocks" + "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/txs/executor" +) + +const SyncBound = 10 * time.Second + +var ( + _ snowman.Block = (*Block)(nil) + + ErrUnexpectedMerkleRoot = errors.New("unexpected merkle root") + ErrTimestampBeyondSyncBound = errors.New("proposed timestamp is too far in the future relative to local time") + ErrEmptyBlock = errors.New("block contains no transactions") + ErrChildBlockEarlierThanParent = errors.New("proposed timestamp before current chain time") + ErrConflictingBlockTxs = errors.New("block contains conflicting transactions") + ErrIncorrectHeight = errors.New("block has incorrect height") + ErrBlockNotFound = errors.New("block not found") +) + +// Exported for testing in avm package. +type Block struct { + blocks.Block + manager *manager + rejected bool +} + +func (b *Block) Verify(context.Context) error { + blkID := b.ID() + if _, ok := b.manager.blkIDToState[blkID]; ok { + // This block has already been verified. + return nil + } + + // Currently we don't populate the blocks merkle root. + merkleRoot := b.Block.MerkleRoot() + if merkleRoot != ids.Empty { + return fmt.Errorf("%w: %s", ErrUnexpectedMerkleRoot, merkleRoot) + } + + // Only allow timestamp to reasonably far forward + newChainTime := b.Timestamp() + now := b.manager.clk.Time() + maxNewChainTime := now.Add(SyncBound) + if newChainTime.After(maxNewChainTime) { + return fmt.Errorf( + "%w, proposed time (%s), local time (%s)", + ErrTimestampBeyondSyncBound, + newChainTime, + now, + ) + } + + txs := b.Txs() + if len(txs) == 0 { + return ErrEmptyBlock + } + + // Syntactic verification is generally pretty fast, so we verify this first + // before performing any possible DB reads. + for _, tx := range txs { + err := tx.Unsigned.Visit(&executor.SyntacticVerifier{ + Backend: b.manager.backend, + Tx: tx, + }) + if err != nil { + txID := tx.ID() + b.manager.mempool.MarkDropped(txID, err) + return err + } + } + + // Verify that the parent exists. + parentID := b.Parent() + parent, err := b.manager.GetStatelessBlock(parentID) + if err != nil { + return err + } + + // Verify that currentBlkHeight = parentBlkHeight + 1. + expectedHeight := parent.Height() + 1 + height := b.Height() + if expectedHeight != height { + return fmt.Errorf( + "%w: expected height %d, got %d", + ErrIncorrectHeight, + expectedHeight, + height, + ) + } + + stateDiff, err := states.NewDiff(parentID, b.manager) + if err != nil { + return err + } + + parentChainTime := stateDiff.GetTimestamp() + // The proposed timestamp must not be before the parent's timestamp. + if newChainTime.Before(parentChainTime) { + return fmt.Errorf( + "%w: proposed timestamp (%s), chain time (%s)", + ErrChildBlockEarlierThanParent, + newChainTime, + parentChainTime, + ) + } + + stateDiff.SetTimestamp(newChainTime) + + blockState := &blockState{ + statelessBlock: b.Block, + onAcceptState: stateDiff, + atomicRequests: make(map[ids.ID]*atomic.Requests), + } + + for _, tx := range txs { + // Verify that the tx is valid according to the current state of the + // chain. + err := tx.Unsigned.Visit(&executor.SemanticVerifier{ + Backend: b.manager.backend, + State: stateDiff, + Tx: tx, + }) + if err != nil { + txID := tx.ID() + b.manager.mempool.MarkDropped(txID, err) + return err + } + + // Apply the txs state changes to the state. + // + // Note: This must be done inside the same loop as semantic verification + // to ensure that semantic verification correctly accounts for + // transactions that occurred earlier in the block. + executor := &executor.Executor{ + Codec: b.manager.backend.Codec, + State: stateDiff, + Tx: tx, + } + err = tx.Unsigned.Visit(executor) + if err != nil { + txID := tx.ID() + b.manager.mempool.MarkDropped(txID, err) + return err + } + + // Verify that the transaction we just executed didn't consume inputs + // that were already imported in a previous transaction. + if blockState.importedInputs.Overlaps(executor.Inputs) { + txID := tx.ID() + b.manager.mempool.MarkDropped(txID, ErrConflictingBlockTxs) + return ErrConflictingBlockTxs + } + blockState.importedInputs.Union(executor.Inputs) + + // Now that the tx would be marked as accepted, we should add it to the + // state for the next transaction in the block. + stateDiff.AddTx(tx) + + for chainID, txRequests := range executor.AtomicRequests { + // Add/merge in the atomic requests represented by [tx] + chainRequests, exists := blockState.atomicRequests[chainID] + if !exists { + blockState.atomicRequests[chainID] = txRequests + continue + } + + chainRequests.PutRequests = append(chainRequests.PutRequests, txRequests.PutRequests...) + chainRequests.RemoveRequests = append(chainRequests.RemoveRequests, txRequests.RemoveRequests...) + } + } + + // Verify that none of the transactions consumed any inputs that were + // already imported in a currently processing block. + err = b.manager.VerifyUniqueInputs(parentID, blockState.importedInputs) + if err != nil { + return err + } + + // Now that the block has been executed, we can add the block data to the + // state diff. + stateDiff.SetLastAccepted(blkID) + stateDiff.AddBlock(b.Block) + + b.manager.blkIDToState[blkID] = blockState + b.manager.mempool.Remove(txs) + return nil +} + +func (b *Block) Accept(context.Context) error { + blkID := b.ID() + defer b.manager.free(blkID) + + b.manager.backend.Ctx.Log.Debug( + "accepting block", + zap.Stringer("blkID", blkID), + zap.Uint64("height", b.Height()), + zap.Stringer("parentID", b.Parent()), + ) + + txs := b.Txs() + for _, tx := range txs { + if err := b.manager.onAccept(tx); err != nil { + return fmt.Errorf( + "failed to mark tx %q as accepted: %w", + blkID, + err, + ) + } + } + + b.manager.lastAccepted = blkID + b.manager.mempool.Remove(txs) + + blkState, ok := b.manager.blkIDToState[blkID] + if !ok { + return fmt.Errorf("%w: %s", ErrBlockNotFound, blkID) + } + + // Update the state to reflect the changes made in [onAcceptState]. + blkState.onAcceptState.Apply(b.manager.state) + + defer b.manager.state.Abort() + batch, err := b.manager.state.CommitBatch() + if err != nil { + return fmt.Errorf( + "failed to stage state diff for block %s: %w", + blkID, + err, + ) + } + + // Note that this method writes [batch] to the database. + if err := b.manager.backend.Ctx.SharedMemory.Apply(blkState.atomicRequests, batch); err != nil { + return fmt.Errorf("failed to apply state diff to shared memory: %w", err) + } + return b.manager.metrics.MarkBlockAccepted(b) +} + +func (b *Block) Reject(context.Context) error { + blkID := b.ID() + defer b.manager.free(blkID) + + b.manager.backend.Ctx.Log.Verbo( + "rejecting block", + zap.Stringer("blkID", blkID), + zap.Uint64("height", b.Height()), + zap.Stringer("parentID", b.Parent()), + ) + + for _, tx := range b.Txs() { + if err := b.manager.VerifyTx(tx); err != nil { + b.manager.backend.Ctx.Log.Debug("dropping invalidated tx", + zap.Stringer("txID", tx.ID()), + zap.Stringer("blkID", blkID), + zap.Error(err), + ) + continue + } + if err := b.manager.mempool.Add(tx); err != nil { + b.manager.backend.Ctx.Log.Debug("dropping valid tx", + zap.Stringer("txID", tx.ID()), + zap.Stringer("blkID", blkID), + zap.Error(err), + ) + } + } + + b.rejected = true + return nil +} + +func (b *Block) Status() choices.Status { + // If this block's reference was rejected, we should report it as rejected. + // + // We don't persist the rejection, but that's fine. The consensus engine + // will hold the same reference to the block until it no longer needs it. + // After the consensus engine has released the reference to the block that + // was verified, it may get a new reference that isn't marked as rejected. + // The consensus engine may then try to issue the block, but will discover + // that it was rejected due to a conflicting block having been accepted. + if b.rejected { + return choices.Rejected + } + + blkID := b.ID() + // If this block is the last accepted block, we don't need to go to disk to + // check the status. + if b.manager.lastAccepted == blkID { + return choices.Accepted + } + // Check if the block is in memory. If so, it's processing. + if _, ok := b.manager.blkIDToState[blkID]; ok { + return choices.Processing + } + // Block isn't in memory. Check in the database. + _, err := b.manager.state.GetBlock(blkID) + switch err { + case nil: + return choices.Accepted + + case database.ErrNotFound: + // choices.Unknown means we don't have the bytes of the block. + // In this case, we do, so we return choices.Processing. + return choices.Processing + + default: + // TODO: correctly report this error to the consensus engine. + b.manager.backend.Ctx.Log.Error( + "dropping unhandled database error", + zap.Error(err), + ) + return choices.Processing + } +} diff --git a/avalanchego/vms/avm/blocks/executor/block_test.go b/avalanchego/vms/avm/blocks/executor/block_test.go new file mode 100644 index 00000000..781ede46 --- /dev/null +++ b/avalanchego/vms/avm/blocks/executor/block_test.go @@ -0,0 +1,1072 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package executor + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/golang/mock/gomock" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/vms/avm/blocks" + "github.com/ava-labs/avalanchego/vms/avm/metrics" + "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/avm/txs/executor" + "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" +) + +func TestBlockVerify(t *testing.T) { + type test struct { + name string + blockFunc func(*gomock.Controller) *Block + expectedErr error + postVerify func(*require.Assertions, *Block) + } + tests := []test{ + { + name: "block already verified", + blockFunc: func(ctrl *gomock.Controller) *Block { + mockBlock := blocks.NewMockBlock(ctrl) + mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() + b := &Block{ + Block: mockBlock, + manager: &manager{ + blkIDToState: map[ids.ID]*blockState{}, + }, + } + b.manager.blkIDToState[b.ID()] = &blockState{ + statelessBlock: b.Block, + } + return b + }, + expectedErr: nil, + }, + { + name: "block timestamp too far in the future", + blockFunc: func(ctrl *gomock.Controller) *Block { + mockBlock := blocks.NewMockBlock(ctrl) + mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() + mockBlock.EXPECT().MerkleRoot().Return(ids.GenerateTestID()).AnyTimes() + return &Block{ + Block: mockBlock, + manager: &manager{}, + } + }, + expectedErr: ErrUnexpectedMerkleRoot, + }, + { + name: "block timestamp too far in the future", + blockFunc: func(ctrl *gomock.Controller) *Block { + mockBlock := blocks.NewMockBlock(ctrl) + mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() + mockBlock.EXPECT().MerkleRoot().Return(ids.Empty).AnyTimes() + now := time.Now() + tooFarInFutureTime := now.Add(SyncBound + 1) + mockBlock.EXPECT().Timestamp().Return(tooFarInFutureTime).AnyTimes() + clk := &mockable.Clock{} + clk.Set(now) + return &Block{ + Block: mockBlock, + manager: &manager{ + clk: clk, + }, + } + }, + expectedErr: ErrTimestampBeyondSyncBound, + }, + { + name: "block contains no transactions", + blockFunc: func(ctrl *gomock.Controller) *Block { + mockBlock := blocks.NewMockBlock(ctrl) + mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() + mockBlock.EXPECT().MerkleRoot().Return(ids.Empty).AnyTimes() + mockBlock.EXPECT().Timestamp().Return(time.Now()).AnyTimes() + mockBlock.EXPECT().Txs().Return(nil).AnyTimes() + return &Block{ + Block: mockBlock, + manager: &manager{ + blkIDToState: map[ids.ID]*blockState{}, + clk: &mockable.Clock{}, + }, + } + }, + expectedErr: ErrEmptyBlock, + }, + { + name: "block transaction fails verification", + blockFunc: func(ctrl *gomock.Controller) *Block { + mockBlock := blocks.NewMockBlock(ctrl) + mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() + mockBlock.EXPECT().MerkleRoot().Return(ids.Empty).AnyTimes() + mockBlock.EXPECT().Timestamp().Return(time.Now()).AnyTimes() + mockUnsignedTx := txs.NewMockUnsignedTx(ctrl) + mockUnsignedTx.EXPECT().Visit(gomock.Any()).Return(errTest) + errTx := &txs.Tx{ + Unsigned: mockUnsignedTx, + } + mockBlock.EXPECT().Txs().Return([]*txs.Tx{errTx}).AnyTimes() + + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().MarkDropped(errTx.ID(), errTest).Times(1) + return &Block{ + Block: mockBlock, + manager: &manager{ + mempool: mempool, + metrics: metrics.NewMockMetrics(ctrl), + blkIDToState: map[ids.ID]*blockState{}, + clk: &mockable.Clock{}, + }, + } + }, + expectedErr: errTest, + }, + { + name: "parent doesn't exist", + blockFunc: func(ctrl *gomock.Controller) *Block { + mockBlock := blocks.NewMockBlock(ctrl) + mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() + mockBlock.EXPECT().MerkleRoot().Return(ids.Empty).AnyTimes() + mockBlock.EXPECT().Timestamp().Return(time.Now()).AnyTimes() + + mockUnsignedTx := txs.NewMockUnsignedTx(ctrl) + mockUnsignedTx.EXPECT().Visit(gomock.Any()).Return(nil) + tx := &txs.Tx{ + Unsigned: mockUnsignedTx, + } + mockBlock.EXPECT().Txs().Return([]*txs.Tx{tx}).AnyTimes() + + parentID := ids.GenerateTestID() + mockBlock.EXPECT().Parent().Return(parentID).AnyTimes() + + mockState := states.NewMockState(ctrl) + mockState.EXPECT().GetBlock(parentID).Return(nil, errTest) + return &Block{ + Block: mockBlock, + manager: &manager{ + state: mockState, + blkIDToState: map[ids.ID]*blockState{}, + clk: &mockable.Clock{}, + }, + } + }, + expectedErr: errTest, + }, + { + name: "block height isn't parent height + 1", + blockFunc: func(ctrl *gomock.Controller) *Block { + mockBlock := blocks.NewMockBlock(ctrl) + mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() + mockBlock.EXPECT().MerkleRoot().Return(ids.Empty).AnyTimes() + mockBlock.EXPECT().Timestamp().Return(time.Now()).AnyTimes() + blockHeight := uint64(1337) + mockBlock.EXPECT().Height().Return(blockHeight).AnyTimes() + + mockUnsignedTx := txs.NewMockUnsignedTx(ctrl) + mockUnsignedTx.EXPECT().Visit(gomock.Any()).Return(nil) + tx := &txs.Tx{ + Unsigned: mockUnsignedTx, + } + mockBlock.EXPECT().Txs().Return([]*txs.Tx{tx}).AnyTimes() + + parentID := ids.GenerateTestID() + mockBlock.EXPECT().Parent().Return(parentID).AnyTimes() + + mockState := states.NewMockState(ctrl) + mockParentBlock := blocks.NewMockBlock(ctrl) + mockParentBlock.EXPECT().Height().Return(blockHeight) // Should be blockHeight - 1 + mockState.EXPECT().GetBlock(parentID).Return(mockParentBlock, nil) + + return &Block{ + Block: mockBlock, + manager: &manager{ + state: mockState, + blkIDToState: map[ids.ID]*blockState{}, + clk: &mockable.Clock{}, + }, + } + }, + expectedErr: ErrIncorrectHeight, + }, + { + name: "block timestamp before parent timestamp", + blockFunc: func(ctrl *gomock.Controller) *Block { + mockBlock := blocks.NewMockBlock(ctrl) + mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() + mockBlock.EXPECT().MerkleRoot().Return(ids.Empty).AnyTimes() + blockTimestamp := time.Now() + mockBlock.EXPECT().Timestamp().Return(blockTimestamp).AnyTimes() + blockHeight := uint64(1337) + mockBlock.EXPECT().Height().Return(blockHeight).AnyTimes() + + mockUnsignedTx := txs.NewMockUnsignedTx(ctrl) + mockUnsignedTx.EXPECT().Visit(gomock.Any()).Return(nil) + tx := &txs.Tx{ + Unsigned: mockUnsignedTx, + } + mockBlock.EXPECT().Txs().Return([]*txs.Tx{tx}).AnyTimes() + + parentID := ids.GenerateTestID() + mockBlock.EXPECT().Parent().Return(parentID).AnyTimes() + + mockParentBlock := blocks.NewMockBlock(ctrl) + mockParentBlock.EXPECT().Height().Return(blockHeight - 1) + + mockParentState := states.NewMockDiff(ctrl) + mockParentState.EXPECT().GetLastAccepted().Return(parentID) + mockParentState.EXPECT().GetTimestamp().Return(blockTimestamp.Add(1)) + + return &Block{ + Block: mockBlock, + manager: &manager{ + blkIDToState: map[ids.ID]*blockState{ + parentID: { + onAcceptState: mockParentState, + statelessBlock: mockParentBlock, + }, + }, + clk: &mockable.Clock{}, + lastAccepted: parentID, + }, + } + }, + expectedErr: ErrChildBlockEarlierThanParent, + }, + { + name: "tx fails semantic verification", + blockFunc: func(ctrl *gomock.Controller) *Block { + mockBlock := blocks.NewMockBlock(ctrl) + mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() + mockBlock.EXPECT().MerkleRoot().Return(ids.Empty).AnyTimes() + blockTimestamp := time.Now() + mockBlock.EXPECT().Timestamp().Return(blockTimestamp).AnyTimes() + blockHeight := uint64(1337) + mockBlock.EXPECT().Height().Return(blockHeight).AnyTimes() + + mockUnsignedTx := txs.NewMockUnsignedTx(ctrl) + mockUnsignedTx.EXPECT().Visit(gomock.Any()).Return(nil).Times(1) // Syntactic verification passes + mockUnsignedTx.EXPECT().Visit(gomock.Any()).Return(errTest).Times(1) // Semantic verification fails + tx := &txs.Tx{ + Unsigned: mockUnsignedTx, + } + mockBlock.EXPECT().Txs().Return([]*txs.Tx{tx}).AnyTimes() + + parentID := ids.GenerateTestID() + mockBlock.EXPECT().Parent().Return(parentID).AnyTimes() + + mockParentBlock := blocks.NewMockBlock(ctrl) + mockParentBlock.EXPECT().Height().Return(blockHeight - 1) + + mockParentState := states.NewMockDiff(ctrl) + mockParentState.EXPECT().GetLastAccepted().Return(parentID) + mockParentState.EXPECT().GetTimestamp().Return(blockTimestamp) + + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().MarkDropped(tx.ID(), errTest).Times(1) + return &Block{ + Block: mockBlock, + manager: &manager{ + mempool: mempool, + metrics: metrics.NewMockMetrics(ctrl), + blkIDToState: map[ids.ID]*blockState{ + parentID: { + onAcceptState: mockParentState, + statelessBlock: mockParentBlock, + }, + }, + clk: &mockable.Clock{}, + lastAccepted: parentID, + }, + } + }, + expectedErr: errTest, + }, + { + name: "tx fails execution", + blockFunc: func(ctrl *gomock.Controller) *Block { + mockBlock := blocks.NewMockBlock(ctrl) + mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() + mockBlock.EXPECT().MerkleRoot().Return(ids.Empty).AnyTimes() + blockTimestamp := time.Now() + mockBlock.EXPECT().Timestamp().Return(blockTimestamp).AnyTimes() + blockHeight := uint64(1337) + mockBlock.EXPECT().Height().Return(blockHeight).AnyTimes() + + mockUnsignedTx := txs.NewMockUnsignedTx(ctrl) + mockUnsignedTx.EXPECT().Visit(gomock.Any()).Return(nil).Times(1) // Syntactic verification passes + mockUnsignedTx.EXPECT().Visit(gomock.Any()).Return(nil).Times(1) // Semantic verification fails + mockUnsignedTx.EXPECT().Visit(gomock.Any()).Return(errTest).Times(1) // Execution fails + tx := &txs.Tx{ + Unsigned: mockUnsignedTx, + } + mockBlock.EXPECT().Txs().Return([]*txs.Tx{tx}).AnyTimes() + + parentID := ids.GenerateTestID() + mockBlock.EXPECT().Parent().Return(parentID).AnyTimes() + + mockParentBlock := blocks.NewMockBlock(ctrl) + mockParentBlock.EXPECT().Height().Return(blockHeight - 1) + + mockParentState := states.NewMockDiff(ctrl) + mockParentState.EXPECT().GetLastAccepted().Return(parentID) + mockParentState.EXPECT().GetTimestamp().Return(blockTimestamp) + + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().MarkDropped(tx.ID(), errTest).Times(1) + return &Block{ + Block: mockBlock, + manager: &manager{ + mempool: mempool, + metrics: metrics.NewMockMetrics(ctrl), + backend: &executor.Backend{}, + blkIDToState: map[ids.ID]*blockState{ + parentID: { + onAcceptState: mockParentState, + statelessBlock: mockParentBlock, + }, + }, + clk: &mockable.Clock{}, + lastAccepted: parentID, + }, + } + }, + expectedErr: errTest, + }, + { + name: "tx imported inputs overlap", + blockFunc: func(ctrl *gomock.Controller) *Block { + mockBlock := blocks.NewMockBlock(ctrl) + mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() + mockBlock.EXPECT().MerkleRoot().Return(ids.Empty).AnyTimes() + blockTimestamp := time.Now() + mockBlock.EXPECT().Timestamp().Return(blockTimestamp).AnyTimes() + blockHeight := uint64(1337) + mockBlock.EXPECT().Height().Return(blockHeight).AnyTimes() + + // tx1 and tx2 both consume imported input [inputID] + inputID := ids.GenerateTestID() + mockUnsignedTx1 := txs.NewMockUnsignedTx(ctrl) + mockUnsignedTx1.EXPECT().Visit(gomock.Any()).Return(nil).Times(1) // Syntactic verification passes + mockUnsignedTx1.EXPECT().Visit(gomock.Any()).Return(nil).Times(1) // Semantic verification fails + mockUnsignedTx1.EXPECT().Visit(gomock.Any()).DoAndReturn( + func(visitor txs.Visitor) error { + executor, ok := visitor.(*executor.Executor) + if !ok { + return errors.New("wrong visitor type") + } + executor.Inputs.Add(inputID) + return nil + }, + ).Times(1) + mockUnsignedTx2 := txs.NewMockUnsignedTx(ctrl) + mockUnsignedTx2.EXPECT().Visit(gomock.Any()).Return(nil).Times(1) // Syntactic verification passes + mockUnsignedTx2.EXPECT().Visit(gomock.Any()).Return(nil).Times(1) // Semantic verification fails + mockUnsignedTx2.EXPECT().Visit(gomock.Any()).DoAndReturn( + func(visitor txs.Visitor) error { + executor, ok := visitor.(*executor.Executor) + if !ok { + return errors.New("wrong visitor type") + } + executor.Inputs.Add(inputID) + return nil + }, + ).Times(1) + tx1 := &txs.Tx{ + Unsigned: mockUnsignedTx1, + } + tx2 := &txs.Tx{ + Unsigned: mockUnsignedTx2, + } + mockBlock.EXPECT().Txs().Return([]*txs.Tx{tx1, tx2}).AnyTimes() + + parentID := ids.GenerateTestID() + mockBlock.EXPECT().Parent().Return(parentID).AnyTimes() + + mockParentBlock := blocks.NewMockBlock(ctrl) + mockParentBlock.EXPECT().Height().Return(blockHeight - 1) + + mockParentState := states.NewMockDiff(ctrl) + mockParentState.EXPECT().GetLastAccepted().Return(parentID) + mockParentState.EXPECT().GetTimestamp().Return(blockTimestamp) + + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().MarkDropped(tx2.ID(), ErrConflictingBlockTxs).Times(1) + return &Block{ + Block: mockBlock, + manager: &manager{ + mempool: mempool, + metrics: metrics.NewMockMetrics(ctrl), + backend: &executor.Backend{}, + blkIDToState: map[ids.ID]*blockState{ + parentID: { + onAcceptState: mockParentState, + statelessBlock: mockParentBlock, + }, + }, + clk: &mockable.Clock{}, + lastAccepted: parentID, + }, + } + }, + expectedErr: ErrConflictingBlockTxs, + }, + { + name: "tx input overlaps with other tx", + blockFunc: func(ctrl *gomock.Controller) *Block { + mockBlock := blocks.NewMockBlock(ctrl) + mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() + mockBlock.EXPECT().MerkleRoot().Return(ids.Empty).AnyTimes() + blockTimestamp := time.Now() + mockBlock.EXPECT().Timestamp().Return(blockTimestamp).AnyTimes() + blockHeight := uint64(1337) + mockBlock.EXPECT().Height().Return(blockHeight).AnyTimes() + + // tx1 and parent block both consume [inputID] + inputID := ids.GenerateTestID() + mockUnsignedTx := txs.NewMockUnsignedTx(ctrl) + mockUnsignedTx.EXPECT().Visit(gomock.Any()).Return(nil).Times(1) // Syntactic verification passes + mockUnsignedTx.EXPECT().Visit(gomock.Any()).Return(nil).Times(1) // Semantic verification fails + mockUnsignedTx.EXPECT().Visit(gomock.Any()).DoAndReturn( + func(visitor txs.Visitor) error { + executor, ok := visitor.(*executor.Executor) + if !ok { + return errors.New("wrong visitor type") + } + executor.Inputs.Add(inputID) + return nil + }, + ).Times(1) + tx := &txs.Tx{ + Unsigned: mockUnsignedTx, + } + mockBlock.EXPECT().Txs().Return([]*txs.Tx{tx}).AnyTimes() + + parentID := ids.GenerateTestID() + mockBlock.EXPECT().Parent().Return(parentID).AnyTimes() + + mockParentBlock := blocks.NewMockBlock(ctrl) + mockParentBlock.EXPECT().Height().Return(blockHeight - 1) + + mockParentState := states.NewMockDiff(ctrl) + mockParentState.EXPECT().GetLastAccepted().Return(parentID) + mockParentState.EXPECT().GetTimestamp().Return(blockTimestamp) + + return &Block{ + Block: mockBlock, + manager: &manager{ + backend: &executor.Backend{}, + blkIDToState: map[ids.ID]*blockState{ + parentID: { + onAcceptState: mockParentState, + statelessBlock: mockParentBlock, + importedInputs: set.Set[ids.ID]{inputID: struct{}{}}, + }, + }, + clk: &mockable.Clock{}, + lastAccepted: parentID, + }, + } + }, + expectedErr: ErrConflictingParentTxs, + }, + { + name: "happy path", + blockFunc: func(ctrl *gomock.Controller) *Block { + mockBlock := blocks.NewMockBlock(ctrl) + mockBlock.EXPECT().ID().Return(ids.Empty).AnyTimes() + mockBlock.EXPECT().MerkleRoot().Return(ids.Empty).AnyTimes() + blockTimestamp := time.Now() + mockBlock.EXPECT().Timestamp().Return(blockTimestamp).AnyTimes() + blockHeight := uint64(1337) + mockBlock.EXPECT().Height().Return(blockHeight).AnyTimes() + + mockUnsignedTx := txs.NewMockUnsignedTx(ctrl) + mockUnsignedTx.EXPECT().Visit(gomock.Any()).Return(nil).Times(1) // Syntactic verification passes + mockUnsignedTx.EXPECT().Visit(gomock.Any()).Return(nil).Times(1) // Semantic verification fails + mockUnsignedTx.EXPECT().Visit(gomock.Any()).Return(nil).Times(1) // Execution passes + tx := &txs.Tx{ + Unsigned: mockUnsignedTx, + } + mockBlock.EXPECT().Txs().Return([]*txs.Tx{tx}).AnyTimes() + + parentID := ids.GenerateTestID() + mockBlock.EXPECT().Parent().Return(parentID).AnyTimes() + + mockParentBlock := blocks.NewMockBlock(ctrl) + mockParentBlock.EXPECT().Height().Return(blockHeight - 1) + + mockParentState := states.NewMockDiff(ctrl) + mockParentState.EXPECT().GetLastAccepted().Return(parentID) + mockParentState.EXPECT().GetTimestamp().Return(blockTimestamp) + + mockMempool := mempool.NewMockMempool(ctrl) + mockMempool.EXPECT().Remove([]*txs.Tx{tx}) + return &Block{ + Block: mockBlock, + manager: &manager{ + mempool: mockMempool, + metrics: metrics.NewMockMetrics(ctrl), + backend: &executor.Backend{}, + blkIDToState: map[ids.ID]*blockState{ + parentID: { + onAcceptState: mockParentState, + statelessBlock: mockParentBlock, + }, + }, + clk: &mockable.Clock{}, + lastAccepted: parentID, + }, + } + }, + expectedErr: nil, + postVerify: func(require *require.Assertions, b *Block) { + // Assert block is in the cache + blockState, ok := b.manager.blkIDToState[b.ID()] + require.True(ok) + require.Equal(b.Block, blockState.statelessBlock) + + // Assert block is added to on accept state + persistedBlock, err := blockState.onAcceptState.GetBlock(b.ID()) + require.NoError(err) + require.Equal(b.Block, persistedBlock) + + // Assert block is set to last accepted + lastAccepted := b.ID() + require.Equal(lastAccepted, blockState.onAcceptState.GetLastAccepted()) + + // Assert txs are added to on accept state + blockTxs := b.Txs() + for _, tx := range blockTxs { + _, err := blockState.onAcceptState.GetTx(tx.ID()) + require.NoError(err) + } + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + b := tt.blockFunc(ctrl) + err := b.Verify(context.Background()) + require.ErrorIs(err, tt.expectedErr) + if tt.postVerify != nil { + tt.postVerify(require, b) + } + }) + } +} + +func TestBlockAccept(t *testing.T) { + type test struct { + name string + blockFunc func(*gomock.Controller) *Block + expectedErr error + } + tests := []test{ + { + name: "block not found", + blockFunc: func(ctrl *gomock.Controller) *Block { + mockBlock := blocks.NewMockBlock(ctrl) + mockBlock.EXPECT().ID().Return(ids.GenerateTestID()).AnyTimes() + mockBlock.EXPECT().Height().Return(uint64(0)).AnyTimes() + mockBlock.EXPECT().Parent().Return(ids.GenerateTestID()).AnyTimes() + mockBlock.EXPECT().Txs().Return([]*txs.Tx{}).AnyTimes() + + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Remove(gomock.Any()).AnyTimes() + + return &Block{ + Block: mockBlock, + manager: &manager{ + mempool: mempool, + metrics: metrics.NewMockMetrics(ctrl), + backend: &executor.Backend{ + Ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + blkIDToState: map[ids.ID]*blockState{}, + }, + } + }, + expectedErr: ErrBlockNotFound, + }, + { + name: "can't get commit batch", + blockFunc: func(ctrl *gomock.Controller) *Block { + blockID := ids.GenerateTestID() + mockBlock := blocks.NewMockBlock(ctrl) + mockBlock.EXPECT().ID().Return(blockID).AnyTimes() + mockBlock.EXPECT().Height().Return(uint64(0)).AnyTimes() + mockBlock.EXPECT().Parent().Return(ids.GenerateTestID()).AnyTimes() + mockBlock.EXPECT().Txs().Return([]*txs.Tx{}).AnyTimes() + + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Remove(gomock.Any()).AnyTimes() + + mockManagerState := states.NewMockState(ctrl) + mockManagerState.EXPECT().CommitBatch().Return(nil, errTest) + mockManagerState.EXPECT().Abort() + + mockOnAcceptState := states.NewMockDiff(ctrl) + mockOnAcceptState.EXPECT().Apply(mockManagerState) + + return &Block{ + Block: mockBlock, + manager: &manager{ + state: mockManagerState, + mempool: mempool, + backend: &executor.Backend{ + Ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + blkIDToState: map[ids.ID]*blockState{ + blockID: { + onAcceptState: mockOnAcceptState, + }, + }, + }, + } + }, + expectedErr: errTest, + }, + { + name: "can't apply shared memory", + blockFunc: func(ctrl *gomock.Controller) *Block { + blockID := ids.GenerateTestID() + mockBlock := blocks.NewMockBlock(ctrl) + mockBlock.EXPECT().ID().Return(blockID).AnyTimes() + mockBlock.EXPECT().Height().Return(uint64(0)).AnyTimes() + mockBlock.EXPECT().Parent().Return(ids.GenerateTestID()).AnyTimes() + mockBlock.EXPECT().Txs().Return([]*txs.Tx{}).AnyTimes() + + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Remove(gomock.Any()).AnyTimes() + + mockManagerState := states.NewMockState(ctrl) + // Note the returned batch is nil but not used + // because we mock the call to shared memory + mockManagerState.EXPECT().CommitBatch().Return(nil, nil) + mockManagerState.EXPECT().Abort() + + mockSharedMemory := atomic.NewMockSharedMemory(ctrl) + mockSharedMemory.EXPECT().Apply(gomock.Any(), gomock.Any()).Return(errTest) + + mockOnAcceptState := states.NewMockDiff(ctrl) + mockOnAcceptState.EXPECT().Apply(mockManagerState) + + return &Block{ + Block: mockBlock, + manager: &manager{ + state: mockManagerState, + mempool: mempool, + backend: &executor.Backend{ + Ctx: &snow.Context{ + SharedMemory: mockSharedMemory, + Log: logging.NoLog{}, + }, + }, + blkIDToState: map[ids.ID]*blockState{ + blockID: { + onAcceptState: mockOnAcceptState, + }, + }, + }, + } + }, + expectedErr: errTest, + }, + { + name: "failed to apply metrics", + blockFunc: func(ctrl *gomock.Controller) *Block { + blockID := ids.GenerateTestID() + mockBlock := blocks.NewMockBlock(ctrl) + mockBlock.EXPECT().ID().Return(blockID).AnyTimes() + mockBlock.EXPECT().Height().Return(uint64(0)).AnyTimes() + mockBlock.EXPECT().Parent().Return(ids.GenerateTestID()).AnyTimes() + mockBlock.EXPECT().Txs().Return([]*txs.Tx{}).AnyTimes() + + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Remove(gomock.Any()).AnyTimes() + + mockManagerState := states.NewMockState(ctrl) + // Note the returned batch is nil but not used + // because we mock the call to shared memory + mockManagerState.EXPECT().CommitBatch().Return(nil, nil) + mockManagerState.EXPECT().Abort() + + mockSharedMemory := atomic.NewMockSharedMemory(ctrl) + mockSharedMemory.EXPECT().Apply(gomock.Any(), gomock.Any()).Return(nil) + + mockOnAcceptState := states.NewMockDiff(ctrl) + mockOnAcceptState.EXPECT().Apply(mockManagerState) + + metrics := metrics.NewMockMetrics(ctrl) + metrics.EXPECT().MarkBlockAccepted(gomock.Any()).Return(errTest) + + return &Block{ + Block: mockBlock, + manager: &manager{ + state: mockManagerState, + mempool: mempool, + metrics: metrics, + backend: &executor.Backend{ + Ctx: &snow.Context{ + SharedMemory: mockSharedMemory, + Log: logging.NoLog{}, + }, + }, + blkIDToState: map[ids.ID]*blockState{ + blockID: { + onAcceptState: mockOnAcceptState, + }, + }, + }, + } + }, + expectedErr: errTest, + }, + { + name: "no error", + blockFunc: func(ctrl *gomock.Controller) *Block { + blockID := ids.GenerateTestID() + mockBlock := blocks.NewMockBlock(ctrl) + mockBlock.EXPECT().ID().Return(blockID).AnyTimes() + mockBlock.EXPECT().Height().Return(uint64(0)).AnyTimes() + mockBlock.EXPECT().Parent().Return(ids.GenerateTestID()).AnyTimes() + mockBlock.EXPECT().Txs().Return([]*txs.Tx{}).AnyTimes() + + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Remove(gomock.Any()).AnyTimes() + + mockManagerState := states.NewMockState(ctrl) + // Note the returned batch is nil but not used + // because we mock the call to shared memory + mockManagerState.EXPECT().CommitBatch().Return(nil, nil) + mockManagerState.EXPECT().Abort() + + mockSharedMemory := atomic.NewMockSharedMemory(ctrl) + mockSharedMemory.EXPECT().Apply(gomock.Any(), gomock.Any()).Return(nil) + + mockOnAcceptState := states.NewMockDiff(ctrl) + mockOnAcceptState.EXPECT().Apply(mockManagerState) + + metrics := metrics.NewMockMetrics(ctrl) + metrics.EXPECT().MarkBlockAccepted(gomock.Any()).Return(nil) + + return &Block{ + Block: mockBlock, + manager: &manager{ + state: mockManagerState, + mempool: mempool, + metrics: metrics, + backend: &executor.Backend{ + Ctx: &snow.Context{ + SharedMemory: mockSharedMemory, + Log: logging.NoLog{}, + }, + }, + blkIDToState: map[ids.ID]*blockState{ + blockID: { + onAcceptState: mockOnAcceptState, + }, + }, + }, + } + }, + expectedErr: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + b := tt.blockFunc(ctrl) + err := b.Accept(context.Background()) + require.ErrorIs(err, tt.expectedErr) + if err == nil { + // Make sure block is removed from cache + _, ok := b.manager.blkIDToState[b.ID()] + require.False(ok) + } + }) + } +} + +func TestBlockReject(t *testing.T) { + type test struct { + name string + blockFunc func(*gomock.Controller) *Block + } + tests := []test{ + { + name: "one tx passes verification; one fails syntactic verification; one fails semantic verification; one fails execution", + blockFunc: func(ctrl *gomock.Controller) *Block { + blockID := ids.GenerateTestID() + mockBlock := blocks.NewMockBlock(ctrl) + mockBlock.EXPECT().ID().Return(blockID).AnyTimes() + mockBlock.EXPECT().Height().Return(uint64(0)).AnyTimes() + mockBlock.EXPECT().Parent().Return(ids.GenerateTestID()).AnyTimes() + + unsignedValidTx := txs.NewMockUnsignedTx(ctrl) + unsignedValidTx.EXPECT().SetBytes(gomock.Any()) + unsignedValidTx.EXPECT().Visit(gomock.Any()).Return(nil).AnyTimes() // Passes verification and execution + + unsignedSyntacticallyInvalidTx := txs.NewMockUnsignedTx(ctrl) + unsignedSyntacticallyInvalidTx.EXPECT().SetBytes(gomock.Any()) + unsignedSyntacticallyInvalidTx.EXPECT().Visit(gomock.Any()).Return(errTest) // Fails syntactic verification + + unsignedSemanticallyInvalidTx := txs.NewMockUnsignedTx(ctrl) + unsignedSemanticallyInvalidTx.EXPECT().SetBytes(gomock.Any()) + unsignedSemanticallyInvalidTx.EXPECT().Visit(gomock.Any()).Return(nil) // Passes syntactic verification + unsignedSemanticallyInvalidTx.EXPECT().Visit(gomock.Any()).Return(errTest) // Fails semantic verification + + unsignedExecutionFailsTx := txs.NewMockUnsignedTx(ctrl) + unsignedExecutionFailsTx.EXPECT().SetBytes(gomock.Any()) + unsignedExecutionFailsTx.EXPECT().Visit(gomock.Any()).Return(nil) // Passes syntactic verification + unsignedExecutionFailsTx.EXPECT().Visit(gomock.Any()).Return(nil) // Passes semantic verification + unsignedExecutionFailsTx.EXPECT().Visit(gomock.Any()).Return(errTest) // Fails execution + + // Give each tx a unique ID + validTx := &txs.Tx{Unsigned: unsignedValidTx} + validTx.SetBytes(utils.RandomBytes(16), utils.RandomBytes(16)) + syntacticallyInvalidTx := &txs.Tx{Unsigned: unsignedSyntacticallyInvalidTx} + syntacticallyInvalidTx.SetBytes(utils.RandomBytes(16), utils.RandomBytes(16)) + semanticallyInvalidTx := &txs.Tx{Unsigned: unsignedSemanticallyInvalidTx} + semanticallyInvalidTx.SetBytes(utils.RandomBytes(16), utils.RandomBytes(16)) + executionFailsTx := &txs.Tx{Unsigned: unsignedExecutionFailsTx} + executionFailsTx.SetBytes(utils.RandomBytes(16), utils.RandomBytes(16)) + + mockBlock.EXPECT().Txs().Return([]*txs.Tx{ + validTx, + syntacticallyInvalidTx, + semanticallyInvalidTx, + executionFailsTx, + }) + + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Add(validTx).Return(nil) // Only add the one that passes verification + + preferredID := ids.GenerateTestID() + mockPreferredState := states.NewMockDiff(ctrl) + mockPreferredState.EXPECT().GetLastAccepted().Return(ids.GenerateTestID()).AnyTimes() + mockPreferredState.EXPECT().GetTimestamp().Return(time.Now()).AnyTimes() + + return &Block{ + Block: mockBlock, + manager: &manager{ + preferred: preferredID, + mempool: mempool, + metrics: metrics.NewMockMetrics(ctrl), + backend: &executor.Backend{ + Bootstrapped: true, + Ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + blkIDToState: map[ids.ID]*blockState{ + preferredID: { + onAcceptState: mockPreferredState, + }, + blockID: {}, + }, + }, + } + }, + }, + { + name: "all txs valid", + blockFunc: func(ctrl *gomock.Controller) *Block { + blockID := ids.GenerateTestID() + mockBlock := blocks.NewMockBlock(ctrl) + mockBlock.EXPECT().ID().Return(blockID).AnyTimes() + mockBlock.EXPECT().Height().Return(uint64(0)).AnyTimes() + mockBlock.EXPECT().Parent().Return(ids.GenerateTestID()).AnyTimes() + + unsignedTx1 := txs.NewMockUnsignedTx(ctrl) + unsignedTx1.EXPECT().SetBytes(gomock.Any()) + unsignedTx1.EXPECT().Visit(gomock.Any()).Return(nil).AnyTimes() // Passes verification and execution + + unsignedTx2 := txs.NewMockUnsignedTx(ctrl) + unsignedTx2.EXPECT().SetBytes(gomock.Any()) + unsignedTx2.EXPECT().Visit(gomock.Any()).Return(nil).AnyTimes() // Passes verification and execution + + // Give each tx a unique ID + tx1 := &txs.Tx{Unsigned: unsignedTx1} + tx1.SetBytes(utils.RandomBytes(16), utils.RandomBytes(16)) + tx2 := &txs.Tx{Unsigned: unsignedTx2} + tx2.SetBytes(utils.RandomBytes(16), utils.RandomBytes(16)) + + mockBlock.EXPECT().Txs().Return([]*txs.Tx{ + tx1, + tx2, + }) + + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Add(tx1).Return(nil) + mempool.EXPECT().Add(tx2).Return(nil) + + preferredID := ids.GenerateTestID() + mockPreferredState := states.NewMockDiff(ctrl) + mockPreferredState.EXPECT().GetLastAccepted().Return(ids.GenerateTestID()).AnyTimes() + mockPreferredState.EXPECT().GetTimestamp().Return(time.Now()).AnyTimes() + + return &Block{ + Block: mockBlock, + manager: &manager{ + preferred: preferredID, + mempool: mempool, + metrics: metrics.NewMockMetrics(ctrl), + backend: &executor.Backend{ + Bootstrapped: true, + Ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + blkIDToState: map[ids.ID]*blockState{ + preferredID: { + onAcceptState: mockPreferredState, + }, + blockID: {}, + }, + }, + } + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + b := tt.blockFunc(ctrl) + err := b.Reject(context.Background()) + require.NoError(err) + require.True(b.rejected) + _, ok := b.manager.blkIDToState[b.ID()] + require.False(ok) + }) + } +} + +func TestBlockStatus(t *testing.T) { + type test struct { + name string + blockFunc func(ctrl *gomock.Controller) *Block + expected choices.Status + } + tests := []test{ + { + name: "block is rejected", + blockFunc: func(ctrl *gomock.Controller) *Block { + return &Block{ + rejected: true, + } + }, + expected: choices.Rejected, + }, + { + name: "block is last accepted", + blockFunc: func(ctrl *gomock.Controller) *Block { + blockID := ids.GenerateTestID() + mockBlock := blocks.NewMockBlock(ctrl) + mockBlock.EXPECT().ID().Return(blockID).AnyTimes() + return &Block{ + Block: mockBlock, + manager: &manager{ + lastAccepted: blockID, + }, + } + }, + expected: choices.Accepted, + }, + { + name: "block is processing", + blockFunc: func(ctrl *gomock.Controller) *Block { + blockID := ids.GenerateTestID() + mockBlock := blocks.NewMockBlock(ctrl) + mockBlock.EXPECT().ID().Return(blockID).AnyTimes() + return &Block{ + Block: mockBlock, + manager: &manager{ + blkIDToState: map[ids.ID]*blockState{ + blockID: {}, + }, + }, + } + }, + expected: choices.Processing, + }, + { + name: "block is accepted but not last accepted", + blockFunc: func(ctrl *gomock.Controller) *Block { + blockID := ids.GenerateTestID() + mockBlock := blocks.NewMockBlock(ctrl) + mockBlock.EXPECT().ID().Return(blockID).AnyTimes() + + mockState := states.NewMockState(ctrl) + mockState.EXPECT().GetBlock(blockID).Return(nil, nil) + + return &Block{ + Block: mockBlock, + manager: &manager{ + blkIDToState: map[ids.ID]*blockState{}, + state: mockState, + }, + } + }, + expected: choices.Accepted, + }, + { + name: "block is unknown", + blockFunc: func(ctrl *gomock.Controller) *Block { + blockID := ids.GenerateTestID() + mockBlock := blocks.NewMockBlock(ctrl) + mockBlock.EXPECT().ID().Return(blockID).AnyTimes() + + mockState := states.NewMockState(ctrl) + mockState.EXPECT().GetBlock(blockID).Return(nil, database.ErrNotFound) + + return &Block{ + Block: mockBlock, + manager: &manager{ + blkIDToState: map[ids.ID]*blockState{}, + state: mockState, + }, + } + }, + expected: choices.Processing, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + b := tt.blockFunc(ctrl) + require.Equal(tt.expected, b.Status()) + }) + } +} diff --git a/avalanchego/vms/avm/blocks/executor/manager.go b/avalanchego/vms/avm/blocks/executor/manager.go new file mode 100644 index 00000000..0a233b6a --- /dev/null +++ b/avalanchego/vms/avm/blocks/executor/manager.go @@ -0,0 +1,210 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package executor + +import ( + "errors" + + "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/vms/avm/blocks" + "github.com/ava-labs/avalanchego/vms/avm/metrics" + "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/avm/txs/executor" + "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" +) + +var ( + _ Manager = (*manager)(nil) + + ErrChainNotSynced = errors.New("chain not synced") + ErrConflictingParentTxs = errors.New("block contains a transaction that conflicts with a transaction in a parent block") +) + +type Manager interface { + states.Versions + + // Returns the ID of the most recently accepted block. + LastAccepted() ids.ID + + SetPreference(blkID ids.ID) + Preferred() ids.ID + + GetBlock(blkID ids.ID) (snowman.Block, error) + GetStatelessBlock(blkID ids.ID) (blocks.Block, error) + NewBlock(blocks.Block) snowman.Block + + // VerifyTx verifies that the transaction can be issued based on the + // currently preferred state. + VerifyTx(tx *txs.Tx) error + + // VerifyUniqueInputs verifies that the inputs are not duplicated in the + // provided blk or any of its ancestors pinned in memory. + VerifyUniqueInputs(blkID ids.ID, inputs set.Set[ids.ID]) error +} + +func NewManager( + mempool mempool.Mempool, + metrics metrics.Metrics, + state states.State, + backend *executor.Backend, + clk *mockable.Clock, + onAccept func(*txs.Tx) error, +) Manager { + lastAccepted := state.GetLastAccepted() + return &manager{ + backend: backend, + state: state, + metrics: metrics, + mempool: mempool, + clk: clk, + onAccept: onAccept, + blkIDToState: map[ids.ID]*blockState{}, + lastAccepted: lastAccepted, + preferred: lastAccepted, + } +} + +type manager struct { + backend *executor.Backend + state states.State + metrics metrics.Metrics + mempool mempool.Mempool + clk *mockable.Clock + // Invariant: onAccept is called when [tx] is being marked as accepted, but + // before its state changes are applied. + // Invariant: any error returned by onAccept should be considered fatal. + onAccept func(*txs.Tx) error + + // blkIDToState is a map from a block's ID to the state of the block. + // Blocks are put into this map when they are verified. + // Blocks are removed from this map when they are decided. + blkIDToState map[ids.ID]*blockState + + // lastAccepted is the ID of the last block that had Accept() called on it. + lastAccepted ids.ID + preferred ids.ID +} + +type blockState struct { + statelessBlock blocks.Block + onAcceptState states.Diff + importedInputs set.Set[ids.ID] + atomicRequests map[ids.ID]*atomic.Requests +} + +func (m *manager) GetState(blkID ids.ID) (states.Chain, bool) { + // If the block is in the map, it is processing. + if state, ok := m.blkIDToState[blkID]; ok { + return state.onAcceptState, true + } + return m.state, blkID == m.lastAccepted +} + +func (m *manager) LastAccepted() ids.ID { + return m.lastAccepted +} + +func (m *manager) SetPreference(blockID ids.ID) { + m.preferred = blockID +} + +func (m *manager) Preferred() ids.ID { + return m.preferred +} + +func (m *manager) GetBlock(blkID ids.ID) (snowman.Block, error) { + blk, err := m.GetStatelessBlock(blkID) + if err != nil { + return nil, err + } + return m.NewBlock(blk), nil +} + +func (m *manager) GetStatelessBlock(blkID ids.ID) (blocks.Block, error) { + // See if the block is in memory. + if blkState, ok := m.blkIDToState[blkID]; ok { + return blkState.statelessBlock, nil + } + // The block isn't in memory. Check the database. + return m.state.GetBlock(blkID) +} + +func (m *manager) NewBlock(blk blocks.Block) snowman.Block { + return &Block{ + Block: blk, + manager: m, + } +} + +func (m *manager) VerifyTx(tx *txs.Tx) error { + if !m.backend.Bootstrapped { + return ErrChainNotSynced + } + + err := tx.Unsigned.Visit(&executor.SyntacticVerifier{ + Backend: m.backend, + Tx: tx, + }) + if err != nil { + return err + } + + stateDiff, err := states.NewDiff(m.preferred, m) + if err != nil { + return err + } + + err = tx.Unsigned.Visit(&executor.SemanticVerifier{ + Backend: m.backend, + State: stateDiff, + Tx: tx, + }) + if err != nil { + return err + } + + executor := &executor.Executor{ + Codec: m.backend.Codec, + State: stateDiff, + Tx: tx, + } + err = tx.Unsigned.Visit(executor) + if err != nil { + return err + } + + return m.VerifyUniqueInputs(m.preferred, executor.Inputs) +} + +func (m *manager) VerifyUniqueInputs(blkID ids.ID, inputs set.Set[ids.ID]) error { + if inputs.Len() == 0 { + return nil + } + + // Check for conflicts in ancestors. + for { + state, ok := m.blkIDToState[blkID] + if !ok { + // The parent state isn't pinned in memory. + // This means the parent must be accepted already. + return nil + } + + if state.importedInputs.Overlaps(inputs) { + return ErrConflictingParentTxs + } + + blk := state.statelessBlock + blkID = blk.Parent() + } +} + +func (m *manager) free(blkID ids.ID) { + delete(m.blkIDToState, blkID) +} diff --git a/avalanchego/vms/avm/blocks/executor/manager_test.go b/avalanchego/vms/avm/blocks/executor/manager_test.go new file mode 100644 index 00000000..f5c43d1f --- /dev/null +++ b/avalanchego/vms/avm/blocks/executor/manager_test.go @@ -0,0 +1,349 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package executor + +import ( + "errors" + "testing" + "time" + + "github.com/golang/mock/gomock" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/avm/blocks" + "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/avm/txs/executor" +) + +var ( + errTest = errors.New("test error") + errTestSyntacticVerifyFail = errors.New("test error") + errTestSemanticVerifyFail = errors.New("test error") + errTestExecutionFail = errors.New("test error") +) + +func TestManagerGetStatelessBlock(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := states.NewMockState(ctrl) + m := &manager{ + state: state, + blkIDToState: map[ids.ID]*blockState{}, + } + + // Case: block is in memory + { + statelessBlk := blocks.NewMockBlock(ctrl) + blkID := ids.GenerateTestID() + blk := &blockState{ + statelessBlock: statelessBlk, + } + m.blkIDToState[blkID] = blk + gotBlk, err := m.GetStatelessBlock(blkID) + require.NoError(err) + require.Equal(statelessBlk, gotBlk) + } + + // Case: block isn't in memory + { + blkID := ids.GenerateTestID() + blk := blocks.NewMockBlock(ctrl) + state.EXPECT().GetBlock(blkID).Return(blk, nil) + gotBlk, err := m.GetStatelessBlock(blkID) + require.NoError(err) + require.Equal(blk, gotBlk) + } + + // Case: error while getting block from state + { + blkID := ids.GenerateTestID() + state.EXPECT().GetBlock(blkID).Return(nil, errTest) + _, err := m.GetStatelessBlock(blkID) + require.ErrorIs(err, errTest) + } +} + +func TestManagerGetState(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := states.NewMockState(ctrl) + m := &manager{ + state: state, + blkIDToState: map[ids.ID]*blockState{}, + lastAccepted: ids.GenerateTestID(), + } + + // Case: Block is in memory + { + diff := states.NewMockDiff(ctrl) + blkID := ids.GenerateTestID() + m.blkIDToState[blkID] = &blockState{ + onAcceptState: diff, + } + gotState, ok := m.GetState(blkID) + require.True(ok) + require.Equal(diff, gotState) + } + + // Case: Block isn't in memory; block isn't last accepted + { + blkID := ids.GenerateTestID() + gotState, ok := m.GetState(blkID) + require.False(ok) + require.Equal(state, gotState) + } + + // Case: Block isn't in memory; block is last accepted + { + gotState, ok := m.GetState(m.lastAccepted) + require.True(ok) + require.Equal(state, gotState) + } +} + +func TestManagerVerifyTx(t *testing.T) { + type test struct { + name string + txF func(*gomock.Controller) *txs.Tx + managerF func(*gomock.Controller) *manager + expectedErr error + } + + inputID := ids.GenerateTestID() + tests := []test{ + { + name: "not bootstrapped", + txF: func(*gomock.Controller) *txs.Tx { + return &txs.Tx{} + }, + managerF: func(ctrl *gomock.Controller) *manager { + return &manager{ + backend: &executor.Backend{}, + } + }, + expectedErr: ErrChainNotSynced, + }, + { + name: "fails syntactic verification", + txF: func(ctrl *gomock.Controller) *txs.Tx { + unsigned := txs.NewMockUnsignedTx(ctrl) + unsigned.EXPECT().Visit(gomock.Any()).Return(errTestSyntacticVerifyFail) + return &txs.Tx{ + Unsigned: unsigned, + } + }, + managerF: func(*gomock.Controller) *manager { + return &manager{ + backend: &executor.Backend{ + Bootstrapped: true, + }, + } + }, + expectedErr: errTestSyntacticVerifyFail, + }, + { + name: "fails semantic verification", + txF: func(ctrl *gomock.Controller) *txs.Tx { + unsigned := txs.NewMockUnsignedTx(ctrl) + // Syntactic verification passes + unsigned.EXPECT().Visit(gomock.Any()).Return(nil) + // Semantic verification fails + unsigned.EXPECT().Visit(gomock.Any()).Return(errTestSemanticVerifyFail) + return &txs.Tx{ + Unsigned: unsigned, + } + }, + managerF: func(ctrl *gomock.Controller) *manager { + preferred := ids.GenerateTestID() + + // These values don't matter for this test + state := states.NewMockState(ctrl) + state.EXPECT().GetLastAccepted().Return(preferred) + state.EXPECT().GetTimestamp().Return(time.Time{}) + + return &manager{ + backend: &executor.Backend{ + Bootstrapped: true, + }, + state: state, + lastAccepted: preferred, + preferred: preferred, + } + }, + expectedErr: errTestSemanticVerifyFail, + }, + { + name: "fails execution", + txF: func(ctrl *gomock.Controller) *txs.Tx { + unsigned := txs.NewMockUnsignedTx(ctrl) + // Syntactic verification passes + unsigned.EXPECT().Visit(gomock.Any()).Return(nil) + // Semantic verification passes + unsigned.EXPECT().Visit(gomock.Any()).Return(nil) + // Execution fails + unsigned.EXPECT().Visit(gomock.Any()).Return(errTestExecutionFail) + return &txs.Tx{ + Unsigned: unsigned, + } + }, + managerF: func(ctrl *gomock.Controller) *manager { + preferred := ids.GenerateTestID() + + // These values don't matter for this test + state := states.NewMockState(ctrl) + state.EXPECT().GetLastAccepted().Return(preferred) + state.EXPECT().GetTimestamp().Return(time.Time{}) + + return &manager{ + backend: &executor.Backend{ + Bootstrapped: true, + }, + state: state, + lastAccepted: preferred, + preferred: preferred, + } + }, + expectedErr: errTestExecutionFail, + }, + { + name: "non-unique inputs", + txF: func(ctrl *gomock.Controller) *txs.Tx { + unsigned := txs.NewMockUnsignedTx(ctrl) + // Syntactic verification passes + unsigned.EXPECT().Visit(gomock.Any()).Return(nil) + // Semantic verification passes + unsigned.EXPECT().Visit(gomock.Any()).Return(nil) + // Execution passes + unsigned.EXPECT().Visit(gomock.Any()).DoAndReturn(func(e *executor.Executor) error { + e.Inputs.Add(inputID) + return nil + }) + return &txs.Tx{ + Unsigned: unsigned, + } + }, + managerF: func(ctrl *gomock.Controller) *manager { + lastAcceptedID := ids.GenerateTestID() + + preferredID := ids.GenerateTestID() + preferred := blocks.NewMockBlock(ctrl) + preferred.EXPECT().Parent().Return(lastAcceptedID).AnyTimes() + + // These values don't matter for this test + diffState := states.NewMockDiff(ctrl) + diffState.EXPECT().GetLastAccepted().Return(preferredID) + diffState.EXPECT().GetTimestamp().Return(time.Time{}) + + return &manager{ + backend: &executor.Backend{ + Bootstrapped: true, + }, + blkIDToState: map[ids.ID]*blockState{ + preferredID: { + statelessBlock: preferred, + onAcceptState: diffState, + importedInputs: set.Set[ids.ID]{inputID: struct{}{}}, + }, + }, + lastAccepted: lastAcceptedID, + preferred: preferredID, + } + }, + expectedErr: ErrConflictingParentTxs, + }, + { + name: "happy path", + txF: func(ctrl *gomock.Controller) *txs.Tx { + unsigned := txs.NewMockUnsignedTx(ctrl) + // Syntactic verification passes + unsigned.EXPECT().Visit(gomock.Any()).Return(nil) + // Semantic verification passes + unsigned.EXPECT().Visit(gomock.Any()).Return(nil) + // Execution passes + unsigned.EXPECT().Visit(gomock.Any()).Return(nil) + return &txs.Tx{ + Unsigned: unsigned, + } + }, + managerF: func(ctrl *gomock.Controller) *manager { + preferred := ids.GenerateTestID() + + // These values don't matter for this test + state := states.NewMockState(ctrl) + state.EXPECT().GetLastAccepted().Return(preferred) + state.EXPECT().GetTimestamp().Return(time.Time{}) + + return &manager{ + backend: &executor.Backend{ + Bootstrapped: true, + }, + state: state, + lastAccepted: preferred, + preferred: preferred, + } + }, + expectedErr: nil, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + m := test.managerF(ctrl) + tx := test.txF(ctrl) + err := m.VerifyTx(tx) + require.ErrorIs(err, test.expectedErr) + }) + } +} + +func TestVerifyUniqueInputs(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Case: No inputs + { + m := &manager{} + err := m.VerifyUniqueInputs(ids.GenerateTestID(), set.Set[ids.ID]{}) + require.NoError(err) + } + + // blk0 is blk1's parent + blk0ID, blk1ID := ids.GenerateTestID(), ids.GenerateTestID() + blk0, blk1 := blocks.NewMockBlock(ctrl), blocks.NewMockBlock(ctrl) + blk1.EXPECT().Parent().Return(blk0ID).AnyTimes() + blk0.EXPECT().Parent().Return(ids.Empty).AnyTimes() // blk0's parent is accepted + + inputID := ids.GenerateTestID() + m := &manager{ + blkIDToState: map[ids.ID]*blockState{ + blk0ID: { + statelessBlock: blk0, + importedInputs: set.Set[ids.ID]{inputID: struct{}{}}, + }, + blk1ID: { + statelessBlock: blk1, + importedInputs: set.Set[ids.ID]{ids.GenerateTestID(): struct{}{}}, + }, + }, + } + // [blk1]'s parent, [blk0], has [inputID] as an input + err := m.VerifyUniqueInputs(blk1ID, set.Set[ids.ID]{inputID: struct{}{}}) + require.ErrorIs(err, ErrConflictingParentTxs) + + err = m.VerifyUniqueInputs(blk1ID, set.Set[ids.ID]{ids.GenerateTestID(): struct{}{}}) + require.NoError(err) +} diff --git a/avalanchego/vms/avm/blocks/executor/mock_manager.go b/avalanchego/vms/avm/blocks/executor/mock_manager.go new file mode 100644 index 00000000..a727d06b --- /dev/null +++ b/avalanchego/vms/avm/blocks/executor/mock_manager.go @@ -0,0 +1,170 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/vms/avm/blocks/executor (interfaces: Manager) + +// Package executor is a generated GoMock package. +package executor + +import ( + reflect "reflect" + + ids "github.com/ava-labs/avalanchego/ids" + snowman "github.com/ava-labs/avalanchego/snow/consensus/snowman" + set "github.com/ava-labs/avalanchego/utils/set" + blocks "github.com/ava-labs/avalanchego/vms/avm/blocks" + states "github.com/ava-labs/avalanchego/vms/avm/states" + txs "github.com/ava-labs/avalanchego/vms/avm/txs" + gomock "github.com/golang/mock/gomock" +) + +// MockManager is a mock of Manager interface. +type MockManager struct { + ctrl *gomock.Controller + recorder *MockManagerMockRecorder +} + +// MockManagerMockRecorder is the mock recorder for MockManager. +type MockManagerMockRecorder struct { + mock *MockManager +} + +// NewMockManager creates a new mock instance. +func NewMockManager(ctrl *gomock.Controller) *MockManager { + mock := &MockManager{ctrl: ctrl} + mock.recorder = &MockManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockManager) EXPECT() *MockManagerMockRecorder { + return m.recorder +} + +// GetBlock mocks base method. +func (m *MockManager) GetBlock(arg0 ids.ID) (snowman.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlock", arg0) + ret0, _ := ret[0].(snowman.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBlock indicates an expected call of GetBlock. +func (mr *MockManagerMockRecorder) GetBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockManager)(nil).GetBlock), arg0) +} + +// GetState mocks base method. +func (m *MockManager) GetState(arg0 ids.ID) (states.Chain, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetState", arg0) + ret0, _ := ret[0].(states.Chain) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetState indicates an expected call of GetState. +func (mr *MockManagerMockRecorder) GetState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetState", reflect.TypeOf((*MockManager)(nil).GetState), arg0) +} + +// GetStatelessBlock mocks base method. +func (m *MockManager) GetStatelessBlock(arg0 ids.ID) (blocks.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetStatelessBlock", arg0) + ret0, _ := ret[0].(blocks.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetStatelessBlock indicates an expected call of GetStatelessBlock. +func (mr *MockManagerMockRecorder) GetStatelessBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatelessBlock", reflect.TypeOf((*MockManager)(nil).GetStatelessBlock), arg0) +} + +// LastAccepted mocks base method. +func (m *MockManager) LastAccepted() ids.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LastAccepted") + ret0, _ := ret[0].(ids.ID) + return ret0 +} + +// LastAccepted indicates an expected call of LastAccepted. +func (mr *MockManagerMockRecorder) LastAccepted() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastAccepted", reflect.TypeOf((*MockManager)(nil).LastAccepted)) +} + +// NewBlock mocks base method. +func (m *MockManager) NewBlock(arg0 blocks.Block) snowman.Block { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewBlock", arg0) + ret0, _ := ret[0].(snowman.Block) + return ret0 +} + +// NewBlock indicates an expected call of NewBlock. +func (mr *MockManagerMockRecorder) NewBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBlock", reflect.TypeOf((*MockManager)(nil).NewBlock), arg0) +} + +// Preferred mocks base method. +func (m *MockManager) Preferred() ids.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Preferred") + ret0, _ := ret[0].(ids.ID) + return ret0 +} + +// Preferred indicates an expected call of Preferred. +func (mr *MockManagerMockRecorder) Preferred() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Preferred", reflect.TypeOf((*MockManager)(nil).Preferred)) +} + +// SetPreference mocks base method. +func (m *MockManager) SetPreference(arg0 ids.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetPreference", arg0) +} + +// SetPreference indicates an expected call of SetPreference. +func (mr *MockManagerMockRecorder) SetPreference(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetPreference", reflect.TypeOf((*MockManager)(nil).SetPreference), arg0) +} + +// VerifyTx mocks base method. +func (m *MockManager) VerifyTx(arg0 *txs.Tx) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VerifyTx", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// VerifyTx indicates an expected call of VerifyTx. +func (mr *MockManagerMockRecorder) VerifyTx(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyTx", reflect.TypeOf((*MockManager)(nil).VerifyTx), arg0) +} + +// VerifyUniqueInputs mocks base method. +func (m *MockManager) VerifyUniqueInputs(arg0 ids.ID, arg1 set.Set[ids.ID]) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "VerifyUniqueInputs", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// VerifyUniqueInputs indicates an expected call of VerifyUniqueInputs. +func (mr *MockManagerMockRecorder) VerifyUniqueInputs(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyUniqueInputs", reflect.TypeOf((*MockManager)(nil).VerifyUniqueInputs), arg0, arg1) +} diff --git a/avalanchego/vms/avm/blocks/mock_block.go b/avalanchego/vms/avm/blocks/mock_block.go new file mode 100644 index 00000000..83537a9a --- /dev/null +++ b/avalanchego/vms/avm/blocks/mock_block.go @@ -0,0 +1,166 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/vms/avm/blocks (interfaces: Block) + +// Package blocks is a generated GoMock package. +package blocks + +import ( + reflect "reflect" + time "time" + + codec "github.com/ava-labs/avalanchego/codec" + ids "github.com/ava-labs/avalanchego/ids" + snow "github.com/ava-labs/avalanchego/snow" + txs "github.com/ava-labs/avalanchego/vms/avm/txs" + gomock "github.com/golang/mock/gomock" +) + +// MockBlock is a mock of Block interface. +type MockBlock struct { + ctrl *gomock.Controller + recorder *MockBlockMockRecorder +} + +// MockBlockMockRecorder is the mock recorder for MockBlock. +type MockBlockMockRecorder struct { + mock *MockBlock +} + +// NewMockBlock creates a new mock instance. +func NewMockBlock(ctrl *gomock.Controller) *MockBlock { + mock := &MockBlock{ctrl: ctrl} + mock.recorder = &MockBlockMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBlock) EXPECT() *MockBlockMockRecorder { + return m.recorder +} + +// Bytes mocks base method. +func (m *MockBlock) Bytes() []byte { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Bytes") + ret0, _ := ret[0].([]byte) + return ret0 +} + +// Bytes indicates an expected call of Bytes. +func (mr *MockBlockMockRecorder) Bytes() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bytes", reflect.TypeOf((*MockBlock)(nil).Bytes)) +} + +// Height mocks base method. +func (m *MockBlock) Height() uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Height") + ret0, _ := ret[0].(uint64) + return ret0 +} + +// Height indicates an expected call of Height. +func (mr *MockBlockMockRecorder) Height() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Height", reflect.TypeOf((*MockBlock)(nil).Height)) +} + +// ID mocks base method. +func (m *MockBlock) ID() ids.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ID") + ret0, _ := ret[0].(ids.ID) + return ret0 +} + +// ID indicates an expected call of ID. +func (mr *MockBlockMockRecorder) ID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockBlock)(nil).ID)) +} + +// InitCtx mocks base method. +func (m *MockBlock) InitCtx(arg0 *snow.Context) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "InitCtx", arg0) +} + +// InitCtx indicates an expected call of InitCtx. +func (mr *MockBlockMockRecorder) InitCtx(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitCtx", reflect.TypeOf((*MockBlock)(nil).InitCtx), arg0) +} + +// MerkleRoot mocks base method. +func (m *MockBlock) MerkleRoot() ids.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MerkleRoot") + ret0, _ := ret[0].(ids.ID) + return ret0 +} + +// MerkleRoot indicates an expected call of MerkleRoot. +func (mr *MockBlockMockRecorder) MerkleRoot() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MerkleRoot", reflect.TypeOf((*MockBlock)(nil).MerkleRoot)) +} + +// Parent mocks base method. +func (m *MockBlock) Parent() ids.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Parent") + ret0, _ := ret[0].(ids.ID) + return ret0 +} + +// Parent indicates an expected call of Parent. +func (mr *MockBlockMockRecorder) Parent() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Parent", reflect.TypeOf((*MockBlock)(nil).Parent)) +} + +// Timestamp mocks base method. +func (m *MockBlock) Timestamp() time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Timestamp") + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// Timestamp indicates an expected call of Timestamp. +func (mr *MockBlockMockRecorder) Timestamp() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Timestamp", reflect.TypeOf((*MockBlock)(nil).Timestamp)) +} + +// Txs mocks base method. +func (m *MockBlock) Txs() []*txs.Tx { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Txs") + ret0, _ := ret[0].([]*txs.Tx) + return ret0 +} + +// Txs indicates an expected call of Txs. +func (mr *MockBlockMockRecorder) Txs() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Txs", reflect.TypeOf((*MockBlock)(nil).Txs)) +} + +// initialize mocks base method. +func (m *MockBlock) initialize(arg0 []byte, arg1 codec.Manager) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "initialize", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// initialize indicates an expected call of initialize. +func (mr *MockBlockMockRecorder) initialize(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "initialize", reflect.TypeOf((*MockBlock)(nil).initialize), arg0, arg1) +} diff --git a/avalanchego/vms/avm/blocks/parser.go b/avalanchego/vms/avm/blocks/parser.go new file mode 100644 index 00000000..17023b20 --- /dev/null +++ b/avalanchego/vms/avm/blocks/parser.go @@ -0,0 +1,110 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package blocks + +import ( + "fmt" + "reflect" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/vms/avm/fxs" + "github.com/ava-labs/avalanchego/vms/avm/txs" +) + +// CodecVersion is the current default codec version +const CodecVersion = txs.CodecVersion + +var _ Parser = (*parser)(nil) + +type Parser interface { + txs.Parser + + ParseBlock(bytes []byte) (Block, error) + ParseGenesisBlock(bytes []byte) (Block, error) + + InitializeBlock(block Block) error + InitializeGenesisBlock(block Block) error +} + +type parser struct { + txs.Parser +} + +func NewParser(fxs []fxs.Fx) (Parser, error) { + p, err := txs.NewParser(fxs) + if err != nil { + return nil, err + } + c := p.CodecRegistry() + gc := p.GenesisCodecRegistry() + + errs := wrappers.Errs{} + errs.Add( + c.RegisterType(&StandardBlock{}), + gc.RegisterType(&StandardBlock{}), + ) + return &parser{ + Parser: p, + }, errs.Err +} + +func NewCustomParser( + typeToFxIndex map[reflect.Type]int, + clock *mockable.Clock, + log logging.Logger, + fxs []fxs.Fx, +) (Parser, error) { + p, err := txs.NewCustomParser(typeToFxIndex, clock, log, fxs) + if err != nil { + return nil, err + } + c := p.CodecRegistry() + gc := p.GenesisCodecRegistry() + + errs := wrappers.Errs{} + errs.Add( + c.RegisterType(&StandardBlock{}), + gc.RegisterType(&StandardBlock{}), + ) + return &parser{ + Parser: p, + }, errs.Err +} + +func (p *parser) ParseBlock(bytes []byte) (Block, error) { + return parse(p.Codec(), bytes) +} + +func (p *parser) ParseGenesisBlock(bytes []byte) (Block, error) { + return parse(p.GenesisCodec(), bytes) +} + +func parse(cm codec.Manager, bytes []byte) (Block, error) { + var blk Block + if _, err := cm.Unmarshal(bytes, &blk); err != nil { + return nil, err + } + return blk, blk.initialize(bytes, cm) +} + +func (p *parser) InitializeBlock(block Block) error { + return initialize(block, p.Codec()) +} + +func (p *parser) InitializeGenesisBlock(block Block) error { + return initialize(block, p.GenesisCodec()) +} + +func initialize(blk Block, cm codec.Manager) error { + // We serialize this block as a pointer so that it can be deserialized into + // a Block + bytes, err := cm.Marshal(CodecVersion, &blk) + if err != nil { + return fmt.Errorf("couldn't marshal block: %w", err) + } + return blk.initialize(bytes, cm) +} diff --git a/avalanchego/vms/avm/blocks/standard_block.go b/avalanchego/vms/avm/blocks/standard_block.go new file mode 100644 index 00000000..2383a24a --- /dev/null +++ b/avalanchego/vms/avm/blocks/standard_block.go @@ -0,0 +1,92 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package blocks + +import ( + "fmt" + "time" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/vms/avm/txs" +) + +var _ Block = (*StandardBlock)(nil) + +type StandardBlock struct { + // parent's ID + PrntID ids.ID `serialize:"true" json:"parentID"` + // This block's height. The genesis block is at height 0. + Hght uint64 `serialize:"true" json:"height"` + Time uint64 `serialize:"true" json:"time"` + Root ids.ID `serialize:"true" json:"merkleRoot"` + // List of transactions contained in this block. + Transactions []*txs.Tx `serialize:"true" json:"txs"` + + id ids.ID + bytes []byte +} + +func (b *StandardBlock) initialize(bytes []byte, cm codec.Manager) error { + b.id = hashing.ComputeHash256Array(bytes) + b.bytes = bytes + for _, tx := range b.Transactions { + if err := tx.Initialize(cm); err != nil { + return fmt.Errorf("failed to initialize tx: %w", err) + } + } + return nil +} + +func (b *StandardBlock) InitCtx(ctx *snow.Context) { + for _, tx := range b.Transactions { + tx.Unsigned.InitCtx(ctx) + } +} + +func (b *StandardBlock) ID() ids.ID { + return b.id +} + +func (b *StandardBlock) Parent() ids.ID { + return b.PrntID +} + +func (b *StandardBlock) Height() uint64 { + return b.Hght +} + +func (b *StandardBlock) Timestamp() time.Time { + return time.Unix(int64(b.Time), 0) +} + +func (b *StandardBlock) MerkleRoot() ids.ID { + return b.Root +} + +func (b *StandardBlock) Txs() []*txs.Tx { + return b.Transactions +} + +func (b *StandardBlock) Bytes() []byte { + return b.bytes +} + +func NewStandardBlock( + parentID ids.ID, + height uint64, + timestamp time.Time, + txs []*txs.Tx, + cm codec.Manager, +) (*StandardBlock, error) { + blk := &StandardBlock{ + PrntID: parentID, + Hght: height, + Time: uint64(timestamp.Unix()), + Transactions: txs, + } + return blk, initialize(blk, cm) +} diff --git a/avalanchego/vms/avm/chain_state.go b/avalanchego/vms/avm/chain_state.go new file mode 100644 index 00000000..62e80527 --- /dev/null +++ b/avalanchego/vms/avm/chain_state.go @@ -0,0 +1,48 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/txs" +) + +var _ states.Chain = (*chainState)(nil) + +// chainState wraps the disk state and filters non-accepted transactions from +// being returned in GetTx. +type chainState struct { + states.State +} + +func (s *chainState) GetTx(txID ids.ID) (*txs.Tx, error) { + tx, err := s.State.GetTx(txID) + if err != nil { + return nil, err + } + + // Before the linearization, transactions were persisted before they were + // marked as Accepted. However, this function aims to only return accepted + // transactions. + status, err := s.State.GetStatus(txID) + if err == database.ErrNotFound { + // If the status wasn't persisted, then the transaction was written + // after the linearization, and is accepted. + return tx, nil + } + if err != nil { + return nil, err + } + + // If the status was persisted, then the transaction was written before the + // linearization. If it wasn't marked as accepted, then we treat it as if it + // doesn't exist. + if status != choices.Accepted { + return nil, database.ErrNotFound + } + return tx, nil +} diff --git a/avalanchego/vms/avm/client.go b/avalanchego/vms/avm/client.go index c1eeaa80..33d3b21f 100644 --- a/avalanchego/vms/avm/client.go +++ b/avalanchego/vms/avm/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -12,7 +12,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/rpc" @@ -20,16 +20,27 @@ import ( cjson "github.com/ava-labs/avalanchego/utils/json" ) -var _ Client = &client{} +var _ Client = (*client)(nil) // Client for interacting with an AVM (X-Chain) instance type Client interface { WalletClient + // GetBlock returns the block with the given id. + GetBlock(ctx context.Context, blkID ids.ID, options ...rpc.Option) ([]byte, error) + // GetBlockByHeight returns the block at the given [height]. + GetBlockByHeight(ctx context.Context, height uint64, options ...rpc.Option) ([]byte, error) + // GetHeight returns the height of the last accepted block. + GetHeight(ctx context.Context, options ...rpc.Option) (uint64, error) // GetTxStatus returns the status of [txID] + // + // Deprecated: GetTxStatus only returns Accepted or Unknown, GetTx should be + // used instead to determine if the tx was accepted. GetTxStatus(ctx context.Context, txID ids.ID, options ...rpc.Option) (choices.Status, error) // ConfirmTx attempts to confirm [txID] by repeatedly checking its status. // Note: ConfirmTx will block until either the context is done or the client // returns a decided status. + // TODO: Move this function off of the Client interface into a utility + // function. ConfirmTx(ctx context.Context, txID ids.ID, freq time.Duration, options ...rpc.Option) (choices.Status, error) // GetTx returns the byte representation of [txID] GetTx(ctx context.Context, txID ids.ID, options ...rpc.Option) ([]byte, error) @@ -59,10 +70,17 @@ type Client interface { GetAssetDescription(ctx context.Context, assetID string, options ...rpc.Option) (*GetAssetDescriptionReply, error) // GetBalance returns the balance of [assetID] held by [addr]. // If [includePartial], balance includes partial owned (i.e. in a multisig) funds. + // + // Deprecated: GetUTXOs should be used instead. GetBalance(ctx context.Context, addr ids.ShortID, assetID string, includePartial bool, options ...rpc.Option) (*GetBalanceReply, error) // GetAllBalances returns all asset balances for [addr] + // + // Deprecated: GetUTXOs should be used instead. GetAllBalances(ctx context.Context, addr ids.ShortID, includePartial bool, options ...rpc.Option) ([]Balance, error) // CreateAsset creates a new asset and returns its assetID + // + // Deprecated: Transactions should be issued using the + // `avalanchego/wallet/chain/x.Wallet` utility. CreateAsset( ctx context.Context, user api.UserPass, @@ -76,6 +94,9 @@ type Client interface { options ...rpc.Option, ) (ids.ID, error) // CreateFixedCapAsset creates a new fixed cap asset and returns its assetID + // + // Deprecated: Transactions should be issued using the + // `avalanchego/wallet/chain/x.Wallet` utility. CreateFixedCapAsset( ctx context.Context, user api.UserPass, @@ -88,6 +109,9 @@ type Client interface { options ...rpc.Option, ) (ids.ID, error) // CreateVariableCapAsset creates a new variable cap asset and returns its assetID + // + // Deprecated: Transactions should be issued using the + // `avalanchego/wallet/chain/x.Wallet` utility. CreateVariableCapAsset( ctx context.Context, user api.UserPass, @@ -100,6 +124,9 @@ type Client interface { options ...rpc.Option, ) (ids.ID, error) // CreateNFTAsset creates a new NFT asset and returns its assetID + // + // Deprecated: Transactions should be issued using the + // `avalanchego/wallet/chain/x.Wallet` utility. CreateNFTAsset( ctx context.Context, user api.UserPass, @@ -111,14 +138,25 @@ type Client interface { options ...rpc.Option, ) (ids.ID, error) // CreateAddress creates a new address controlled by [user] + // + // Deprecated: Keys should no longer be stored on the node. CreateAddress(ctx context.Context, user api.UserPass, options ...rpc.Option) (ids.ShortID, error) // ListAddresses returns all addresses on this chain controlled by [user] + // + // Deprecated: Keys should no longer be stored on the node. ListAddresses(ctx context.Context, user api.UserPass, options ...rpc.Option) ([]ids.ShortID, error) // ExportKey returns the private key corresponding to [addr] controlled by [user] - ExportKey(ctx context.Context, user api.UserPass, addr ids.ShortID, options ...rpc.Option) (*crypto.PrivateKeySECP256K1R, error) + // + // Deprecated: Keys should no longer be stored on the node. + ExportKey(ctx context.Context, user api.UserPass, addr ids.ShortID, options ...rpc.Option) (*secp256k1.PrivateKey, error) // ImportKey imports [privateKey] to [user] - ImportKey(ctx context.Context, user api.UserPass, privateKey *crypto.PrivateKeySECP256K1R, options ...rpc.Option) (ids.ShortID, error) + // + // Deprecated: Keys should no longer be stored on the node. + ImportKey(ctx context.Context, user api.UserPass, privateKey *secp256k1.PrivateKey, options ...rpc.Option) (ids.ShortID, error) // Mint [amount] of [assetID] to be owned by [to] + // + // Deprecated: Transactions should be issued using the + // `avalanchego/wallet/chain/x.Wallet` utility. Mint( ctx context.Context, user api.UserPass, @@ -130,6 +168,9 @@ type Client interface { options ...rpc.Option, ) (ids.ID, error) // SendNFT sends an NFT and returns the ID of the newly created transaction + // + // Deprecated: Transactions should be issued using the + // `avalanchego/wallet/chain/x.Wallet` utility. SendNFT( ctx context.Context, user api.UserPass, @@ -141,6 +182,9 @@ type Client interface { options ...rpc.Option, ) (ids.ID, error) // MintNFT issues a MintNFT transaction and returns the ID of the newly created transaction + // + // Deprecated: Transactions should be issued using the + // `avalanchego/wallet/chain/x.Wallet` utility. MintNFT( ctx context.Context, user api.UserPass, @@ -153,9 +197,15 @@ type Client interface { ) (ids.ID, error) // Import sends an import transaction to import funds from [sourceChain] and // returns the ID of the newly created transaction + // + // Deprecated: Transactions should be issued using the + // `avalanchego/wallet/chain/x.Wallet` utility. Import(ctx context.Context, user api.UserPass, to ids.ShortID, sourceChain string, options ...rpc.Option) (ids.ID, error) // Export sends an asset from this chain to the P/C-Chain. // After this tx is accepted, the AVAX must be imported to the P/C-chain with an importTx. // Returns the ID of the newly created atomic transaction + // + // Deprecated: Transactions should be issued using the + // `avalanchego/wallet/chain/x.Wallet` utility. Export( ctx context.Context, user api.UserPass, @@ -183,17 +233,49 @@ func NewClient(uri, chain string) Client { chain, ) return &client{ - requester: rpc.NewEndpointRequester(path, "avm"), + requester: rpc.NewEndpointRequester(path), } } +func (c *client) GetBlock(ctx context.Context, blkID ids.ID, options ...rpc.Option) ([]byte, error) { + res := &api.FormattedBlock{} + err := c.requester.SendRequest(ctx, "avm.getBlock", &api.GetBlockArgs{ + BlockID: blkID, + Encoding: formatting.HexNC, + }, res, options...) + if err != nil { + return nil, err + } + + return formatting.Decode(res.Encoding, res.Block) +} + +func (c *client) GetBlockByHeight(ctx context.Context, height uint64, options ...rpc.Option) ([]byte, error) { + res := &api.FormattedBlock{} + err := c.requester.SendRequest(ctx, "avm.getBlockByHeight", &api.GetBlockByHeightArgs{ + Height: height, + Encoding: formatting.HexNC, + }, res, options...) + if err != nil { + return nil, err + } + + return formatting.Decode(res.Encoding, res.Block) +} + +func (c *client) GetHeight(ctx context.Context, options ...rpc.Option) (uint64, error) { + res := &api.GetHeightResponse{} + err := c.requester.SendRequest(ctx, "avm.getHeight", struct{}{}, res, options...) + return uint64(res.Height), err +} + func (c *client) IssueTx(ctx context.Context, txBytes []byte, options ...rpc.Option) (ids.ID, error) { txStr, err := formatting.Encode(formatting.Hex, txBytes) if err != nil { return ids.ID{}, err } res := &api.JSONTxID{} - err = c.requester.SendRequest(ctx, "issueTx", &api.FormattedTx{ + err = c.requester.SendRequest(ctx, "avm.issueTx", &api.FormattedTx{ Tx: txStr, Encoding: formatting.Hex, }, res, options...) @@ -201,12 +283,12 @@ func (c *client) IssueTx(ctx context.Context, txBytes []byte, options ...rpc.Opt } func (c *client) IssueStopVertex(ctx context.Context, options ...rpc.Option) error { - return c.requester.SendRequest(ctx, "issueStopVertex", &struct{}{}, &struct{}{}, options...) + return c.requester.SendRequest(ctx, "avm.issueStopVertex", &struct{}{}, &struct{}{}, options...) } func (c *client) GetTxStatus(ctx context.Context, txID ids.ID, options ...rpc.Option) (choices.Status, error) { res := &GetTxStatusReply{} - err := c.requester.SendRequest(ctx, "getTxStatus", &api.JSONTxID{ + err := c.requester.SendRequest(ctx, "avm.getTxStatus", &api.JSONTxID{ TxID: txID, }, res, options...) return res.Status, err @@ -234,7 +316,7 @@ func (c *client) ConfirmTx(ctx context.Context, txID ids.ID, freq time.Duration, func (c *client) GetTx(ctx context.Context, txID ids.ID, options ...rpc.Option) ([]byte, error) { res := &api.FormattedTx{} - err := c.requester.SendRequest(ctx, "getTx", &api.GetTxArgs{ + err := c.requester.SendRequest(ctx, "avm.getTx", &api.GetTxArgs{ TxID: txID, Encoding: formatting.Hex, }, res, options...) @@ -270,7 +352,7 @@ func (c *client) GetAtomicUTXOs( options ...rpc.Option, ) ([][]byte, ids.ShortID, ids.ID, error) { res := &api.GetUTXOsReply{} - err := c.requester.SendRequest(ctx, "getUTXOs", &api.GetUTXOsArgs{ + err := c.requester.SendRequest(ctx, "avm.getUTXOs", &api.GetUTXOsArgs{ Addresses: ids.ShortIDsToStrings(addrs), SourceChain: sourceChain, Limit: cjson.Uint32(limit), @@ -302,7 +384,7 @@ func (c *client) GetAtomicUTXOs( func (c *client) GetAssetDescription(ctx context.Context, assetID string, options ...rpc.Option) (*GetAssetDescriptionReply, error) { res := &GetAssetDescriptionReply{} - err := c.requester.SendRequest(ctx, "getAssetDescription", &GetAssetDescriptionArgs{ + err := c.requester.SendRequest(ctx, "avm.getAssetDescription", &GetAssetDescriptionArgs{ AssetID: assetID, }, res, options...) return res, err @@ -316,7 +398,7 @@ func (c *client) GetBalance( options ...rpc.Option, ) (*GetBalanceReply, error) { res := &GetBalanceReply{} - err := c.requester.SendRequest(ctx, "getBalance", &GetBalanceArgs{ + err := c.requester.SendRequest(ctx, "avm.getBalance", &GetBalanceArgs{ Address: addr.String(), AssetID: assetID, IncludePartial: includePartial, @@ -331,7 +413,7 @@ func (c *client) GetAllBalances( options ...rpc.Option, ) ([]Balance, error) { res := &GetAllBalancesReply{} - err := c.requester.SendRequest(ctx, "getAllBalances", &GetAllBalancesArgs{ + err := c.requester.SendRequest(ctx, "avm.getAllBalances", &GetAllBalancesArgs{ JSONAddress: api.JSONAddress{Address: addr.String()}, IncludePartial: includePartial, }, res, options...) @@ -377,7 +459,7 @@ func (c *client) CreateAsset( Minters: ids.ShortIDsToStrings(clientMinter.Minters), } } - err := c.requester.SendRequest(ctx, "createAsset", &CreateAssetArgs{ + err := c.requester.SendRequest(ctx, "avm.createAsset", &CreateAssetArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: user, JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, @@ -411,7 +493,7 @@ func (c *client) CreateFixedCapAsset( Address: clientHolder.Address.String(), } } - err := c.requester.SendRequest(ctx, "createAsset", &CreateAssetArgs{ + err := c.requester.SendRequest(ctx, "avm.createAsset", &CreateAssetArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: user, JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, @@ -444,7 +526,7 @@ func (c *client) CreateVariableCapAsset( Minters: ids.ShortIDsToStrings(clientMinter.Minters), } } - err := c.requester.SendRequest(ctx, "createAsset", &CreateAssetArgs{ + err := c.requester.SendRequest(ctx, "avm.createAsset", &CreateAssetArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: user, JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, @@ -476,7 +558,7 @@ func (c *client) CreateNFTAsset( Minters: ids.ShortIDsToStrings(clientMinter.Minters), } } - err := c.requester.SendRequest(ctx, "createNFTAsset", &CreateNFTAssetArgs{ + err := c.requester.SendRequest(ctx, "avm.createNFTAsset", &CreateNFTAssetArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: user, JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, @@ -491,7 +573,7 @@ func (c *client) CreateNFTAsset( func (c *client) CreateAddress(ctx context.Context, user api.UserPass, options ...rpc.Option) (ids.ShortID, error) { res := &api.JSONAddress{} - err := c.requester.SendRequest(ctx, "createAddress", &user, res, options...) + err := c.requester.SendRequest(ctx, "avm.createAddress", &user, res, options...) if err != nil { return ids.ShortID{}, err } @@ -500,25 +582,25 @@ func (c *client) CreateAddress(ctx context.Context, user api.UserPass, options . func (c *client) ListAddresses(ctx context.Context, user api.UserPass, options ...rpc.Option) ([]ids.ShortID, error) { res := &api.JSONAddresses{} - err := c.requester.SendRequest(ctx, "listAddresses", &user, res, options...) + err := c.requester.SendRequest(ctx, "avm.listAddresses", &user, res, options...) if err != nil { return nil, err } return address.ParseToIDs(res.Addresses) } -func (c *client) ExportKey(ctx context.Context, user api.UserPass, addr ids.ShortID, options ...rpc.Option) (*crypto.PrivateKeySECP256K1R, error) { +func (c *client) ExportKey(ctx context.Context, user api.UserPass, addr ids.ShortID, options ...rpc.Option) (*secp256k1.PrivateKey, error) { res := &ExportKeyReply{} - err := c.requester.SendRequest(ctx, "exportKey", &ExportKeyArgs{ + err := c.requester.SendRequest(ctx, "avm.exportKey", &ExportKeyArgs{ UserPass: user, Address: addr.String(), }, res, options...) return res.PrivateKey, err } -func (c *client) ImportKey(ctx context.Context, user api.UserPass, privateKey *crypto.PrivateKeySECP256K1R, options ...rpc.Option) (ids.ShortID, error) { +func (c *client) ImportKey(ctx context.Context, user api.UserPass, privateKey *secp256k1.PrivateKey, options ...rpc.Option) (ids.ShortID, error) { res := &api.JSONAddress{} - err := c.requester.SendRequest(ctx, "importKey", &ImportKeyArgs{ + err := c.requester.SendRequest(ctx, "avm.importKey", &ImportKeyArgs{ UserPass: user, PrivateKey: privateKey, }, res, options...) @@ -540,7 +622,7 @@ func (c *client) Send( options ...rpc.Option, ) (ids.ID, error) { res := &api.JSONTxID{} - err := c.requester.SendRequest(ctx, "send", &SendArgs{ + err := c.requester.SendRequest(ctx, "avm.send", &SendArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: user, JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, @@ -574,7 +656,7 @@ func (c *client) SendMultiple( To: clientOutput.To.String(), } } - err := c.requester.SendRequest(ctx, "sendMultiple", &SendMultipleArgs{ + err := c.requester.SendRequest(ctx, "avm.sendMultiple", &SendMultipleArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: user, JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, @@ -597,7 +679,7 @@ func (c *client) Mint( options ...rpc.Option, ) (ids.ID, error) { res := &api.JSONTxID{} - err := c.requester.SendRequest(ctx, "mint", &MintArgs{ + err := c.requester.SendRequest(ctx, "avm.mint", &MintArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: user, JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, @@ -621,7 +703,7 @@ func (c *client) SendNFT( options ...rpc.Option, ) (ids.ID, error) { res := &api.JSONTxID{} - err := c.requester.SendRequest(ctx, "sendNFT", &SendNFTArgs{ + err := c.requester.SendRequest(ctx, "avm.sendNFT", &SendNFTArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: user, JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, @@ -649,7 +731,7 @@ func (c *client) MintNFT( return ids.ID{}, err } res := &api.JSONTxID{} - err = c.requester.SendRequest(ctx, "mintNFT", &MintNFTArgs{ + err = c.requester.SendRequest(ctx, "avm.mintNFT", &MintNFTArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: user, JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, @@ -665,7 +747,7 @@ func (c *client) MintNFT( func (c *client) Import(ctx context.Context, user api.UserPass, to ids.ShortID, sourceChain string, options ...rpc.Option) (ids.ID, error) { res := &api.JSONTxID{} - err := c.requester.SendRequest(ctx, "import", &ImportArgs{ + err := c.requester.SendRequest(ctx, "avm.import", &ImportArgs{ UserPass: user, To: to.String(), SourceChain: sourceChain, @@ -685,7 +767,7 @@ func (c *client) Export( options ...rpc.Option, ) (ids.ID, error) { res := &api.JSONTxID{} - err := c.requester.SendRequest(ctx, "export", &ExportArgs{ + err := c.requester.SendRequest(ctx, "avm.export", &ExportArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: user, JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, diff --git a/avalanchego/vms/avm/client_test.go b/avalanchego/vms/avm/client_test.go index 878a1c0f..e8013b15 100644 --- a/avalanchego/vms/avm/client_test.go +++ b/avalanchego/vms/avm/client_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -21,11 +21,11 @@ type mockClient struct { } func (mc *mockClient) SendRequest( - ctx context.Context, - method string, + _ context.Context, + _ string, inData interface{}, - reply interface{}, - options ...rpc.Option, + _ interface{}, + _ ...rpc.Option, ) error { mc.require.Equal(inData, mc.expectedInData) return nil diff --git a/avalanchego/vms/avm/config/config.go b/avalanchego/vms/avm/config/config.go new file mode 100644 index 00000000..045b4474 --- /dev/null +++ b/avalanchego/vms/avm/config/config.go @@ -0,0 +1,13 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package config + +// Struct collecting all the foundational parameters of the AVM +type Config struct { + // Fee that is burned by every non-asset creating transaction + TxFee uint64 + + // Fee that must be burned by every asset creating transaction + CreateAssetTxFee uint64 +} diff --git a/avalanchego/vms/avm/dag_state.go b/avalanchego/vms/avm/dag_state.go new file mode 100644 index 00000000..927a8559 --- /dev/null +++ b/avalanchego/vms/avm/dag_state.go @@ -0,0 +1,84 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/components/avax" +) + +var _ states.Chain = (*dagState)(nil) + +type dagState struct { + states.Chain + vm *VM +} + +func (s *dagState) GetUTXOFromID(utxoID *avax.UTXOID) (*avax.UTXO, error) { + inputID := utxoID.InputID() + utxo, err := s.GetUTXO(inputID) + if err == nil { + // If the UTXO exists in the base state, then we can immediately return + // it. + return utxo, nil + } + if err != database.ErrNotFound { + s.vm.ctx.Log.Error("fetching UTXO returned unexpected error", + zap.Stringer("txID", utxoID.TxID), + zap.Uint32("index", utxoID.OutputIndex), + zap.Stringer("utxoID", inputID), + zap.Error(err), + ) + return nil, err + } + + // The UTXO doesn't exist in the base state, so we need to check if the UTXO + // could exist from a currently processing tx. + inputTxID, inputIndex := utxoID.InputSource() + parent := UniqueTx{ + vm: s.vm, + txID: inputTxID, + } + + // If the parent doesn't exist or is otherwise invalid, then this UTXO isn't + // available. + if err := parent.verifyWithoutCacheWrites(); err != nil { + return nil, database.ErrNotFound + } + + // If the parent was accepted, the UTXO should have been in the base state. + // This means the UTXO was already consumed by a conflicting tx. + if status := parent.Status(); status.Decided() { + return nil, database.ErrNotFound + } + + parentUTXOs := parent.UTXOs() + + // At this point we have only verified the TxID portion of [utxoID] as being + // potentially valid. It is still possible that a user specified an invalid + // index. So, we must bounds check the parents UTXOs. + // + // Invariant: len(parentUTXOs) <= MaxInt32. This guarantees that casting + // inputIndex to an int, even on 32-bit architectures, will not overflow. + if uint32(len(parentUTXOs)) <= inputIndex { + return nil, database.ErrNotFound + } + return parentUTXOs[int(inputIndex)], nil +} + +func (s *dagState) GetTx(txID ids.ID) (*txs.Tx, error) { + tx := &UniqueTx{ + vm: s.vm, + txID: txID, + } + if status := tx.Status(); !status.Fetched() { + return nil, database.ErrNotFound + } + return tx.Tx, nil +} diff --git a/avalanchego/vms/avm/factory.go b/avalanchego/vms/avm/factory.go index a1ccf7b1..1e2c6f68 100644 --- a/avalanchego/vms/avm/factory.go +++ b/avalanchego/vms/avm/factory.go @@ -1,29 +1,20 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm import ( - "time" - - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms" + "github.com/ava-labs/avalanchego/vms/avm/config" ) -var _ vms.Factory = &Factory{} +var _ vms.Factory = (*Factory)(nil) type Factory struct { - TxFee uint64 - CreateAssetTxFee uint64 - - // Time of the Banff network upgrade - BanffTime time.Time -} - -func (f *Factory) IsBanffActivated(timestamp time.Time) bool { - return !timestamp.Before(f.BanffTime) + config.Config } -func (f *Factory) New(*snow.Context) (interface{}, error) { - return &VM{Factory: *f}, nil +func (f *Factory) New(logging.Logger) (interface{}, error) { + return &VM{Config: f.Config}, nil } diff --git a/avalanchego/vms/avm/fx_test.go b/avalanchego/vms/avm/fx_test.go index 1e78246d..ebb525a2 100644 --- a/avalanchego/vms/avm/fx_test.go +++ b/avalanchego/vms/avm/fx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -8,6 +8,14 @@ import ( "testing" ) +var ( + errCalledInitialize = errors.New("unexpectedly called Initialize") + errCalledBootstrapping = errors.New("unexpectedly called Bootstrapping") + errCalledBootstrapped = errors.New("unexpectedly called Bootstrapped") + errCalledVerifyTransfer = errors.New("unexpectedly called VerifyTransfer") + errCalledVerifyOperation = errors.New("unexpectedly called VerifyOperation") +) + type FxTest struct { T *testing.T @@ -40,9 +48,9 @@ func (fx *FxTest) Initialize(vm interface{}) error { return nil } if fx.T != nil { - fx.T.Fatalf("Unexpectedly called Initialize") + fx.T.Fatal(errCalledInitialize) } - return errors.New("Unexpectedly called Initialize") + return errCalledInitialize } func (fx *FxTest) Bootstrapping() error { @@ -53,9 +61,9 @@ func (fx *FxTest) Bootstrapping() error { return nil } if fx.T != nil { - fx.T.Fatalf("Unexpectedly called Bootstrapping") + fx.T.Fatal(errCalledBootstrapping) } - return errors.New("Unexpectedly called Bootstrapping") + return errCalledBootstrapping } func (fx *FxTest) Bootstrapped() error { @@ -66,9 +74,9 @@ func (fx *FxTest) Bootstrapped() error { return nil } if fx.T != nil { - fx.T.Fatalf("Unexpectedly called Bootstrapped") + fx.T.Fatal(errCalledBootstrapped) } - return errors.New("Unexpectedly called Bootstrapped") + return errCalledBootstrapped } func (fx *FxTest) VerifyTransfer(tx, in, cred, utxo interface{}) error { @@ -79,9 +87,9 @@ func (fx *FxTest) VerifyTransfer(tx, in, cred, utxo interface{}) error { return nil } if fx.T != nil { - fx.T.Fatalf("Unexpectedly called VerifyTransfer") + fx.T.Fatal(errCalledVerifyTransfer) } - return errors.New("Unexpectedly called VerifyTransfer") + return errCalledVerifyTransfer } func (fx *FxTest) VerifyOperation(tx, op, cred interface{}, utxos []interface{}) error { @@ -92,7 +100,7 @@ func (fx *FxTest) VerifyOperation(tx, op, cred interface{}, utxos []interface{}) return nil } if fx.T != nil { - fx.T.Fatalf("Unexpectedly called VerifyOperation") + fx.T.Fatal(errCalledVerifyOperation) } - return errors.New("Unexpectedly called VerifyOperation") + return errCalledVerifyOperation } diff --git a/avalanchego/vms/avm/fxs/fx.go b/avalanchego/vms/avm/fxs/fx.go index 7db0ab8c..e16ad4a8 100644 --- a/avalanchego/vms/avm/fxs/fx.go +++ b/avalanchego/vms/avm/fxs/fx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package fxs @@ -14,9 +14,9 @@ import ( ) var ( - _ Fx = &secp256k1fx.Fx{} - _ Fx = &nftfx.Fx{} - _ Fx = &propertyfx.Fx{} + _ Fx = (*secp256k1fx.Fx)(nil) + _ Fx = (*nftfx.Fx)(nil) + _ Fx = (*propertyfx.Fx)(nil) ) type ParsedFx struct { diff --git a/avalanchego/vms/avm/genesis.go b/avalanchego/vms/avm/genesis.go index b1c8f899..506d2465 100644 --- a/avalanchego/vms/avm/genesis.go +++ b/avalanchego/vms/avm/genesis.go @@ -1,31 +1,24 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm import ( - "sort" - "strings" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/vms/avm/txs" ) +var _ utils.Sortable[*GenesisAsset] = (*GenesisAsset)(nil) + type Genesis struct { Txs []*GenesisAsset `serialize:"true"` } -func (g *Genesis) Less(i, j int) bool { return strings.Compare(g.Txs[i].Alias, g.Txs[j].Alias) == -1 } - -func (g *Genesis) Len() int { return len(g.Txs) } - -func (g *Genesis) Swap(i, j int) { g.Txs[j], g.Txs[i] = g.Txs[i], g.Txs[j] } - -func (g *Genesis) Sort() { sort.Sort(g) } - -func (g *Genesis) IsSortedAndUnique() bool { return utils.IsSortedAndUnique(g) } - type GenesisAsset struct { Alias string `serialize:"true"` txs.CreateAssetTx `serialize:"true"` } + +func (g *GenesisAsset) Less(other *GenesisAsset) bool { + return g.Alias < other.Alias +} diff --git a/avalanchego/vms/avm/genesis_test.go b/avalanchego/vms/avm/genesis_test.go new file mode 100644 index 00000000..10c7aac4 --- /dev/null +++ b/avalanchego/vms/avm/genesis_test.go @@ -0,0 +1,27 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGenesisAssetLess(t *testing.T) { + require := require.New(t) + + var g1, g2 GenesisAsset + require.False(g1.Less(&g2)) + require.False(g2.Less(&g1)) + + g1 = GenesisAsset{ + Alias: "a", + } + g2 = GenesisAsset{ + Alias: "aa", + } + require.True(g1.Less(&g2)) + require.False(g2.Less(&g1)) +} diff --git a/avalanchego/vms/avm/health.go b/avalanchego/vms/avm/health.go index 9c788b7b..725418b1 100644 --- a/avalanchego/vms/avm/health.go +++ b/avalanchego/vms/avm/health.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm +import "context" + // TODO: add health checks -func (vm *VM) HealthCheck() (interface{}, error) { +func (*VM) HealthCheck(context.Context) (interface{}, error) { return nil, nil } diff --git a/avalanchego/vms/avm/index_test.go b/avalanchego/vms/avm/index_test.go index 93adce16..8a2b681c 100644 --- a/avalanchego/vms/avm/index_test.go +++ b/avalanchego/vms/avm/index_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm import ( + "context" "encoding/binary" "encoding/json" "fmt" @@ -22,7 +23,8 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/avm/txs" @@ -44,7 +46,7 @@ func TestIndexTransaction_Ordered(t *testing.T) { avaxID := genesisTx.ID() vm := setupTestVM(t, ctx, baseDBManager, genesisBytes, issuer, indexEnabledAvmConfig) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } ctx.Lock.Unlock() @@ -75,9 +77,7 @@ func TestIndexTransaction_Ordered(t *testing.T) { utxo := buildPlatformUTXO(utxoID, txAssetID, addr) // save utxo to state - if err := vm.state.PutUTXO(utxo); err != nil { - t.Fatal("Error saving utxo", err) - } + vm.state.AddUTXO(utxo) // issue transaction if _, err := vm.IssueTx(tx.Bytes()); err != nil { @@ -94,7 +94,7 @@ func TestIndexTransaction_Ordered(t *testing.T) { ctx.Lock.Lock() // get pending transactions - txs := vm.PendingTxs() + txs := vm.PendingTxs(context.Background()) if len(txs) != 1 { t.Fatalf("Should have returned %d tx(s)", 1) } @@ -105,7 +105,7 @@ func TestIndexTransaction_Ordered(t *testing.T) { var inputUTXOs []*avax.UTXO for _, utxoID := range uniqueParsedTX.InputUTXOs() { - utxo, err := vm.getUTXO(utxoID) + utxo, err := vm.dagState.GetUTXOFromID(utxoID) if err != nil { t.Fatal(err) } @@ -138,7 +138,7 @@ func TestIndexTransaction_MultipleTransactions(t *testing.T) { avaxID := genesisTx.ID() vm := setupTestVM(t, ctx, baseDBManager, genesisBytes, issuer, indexEnabledAvmConfig) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } ctx.Lock.Unlock() @@ -167,9 +167,7 @@ func TestIndexTransaction_MultipleTransactions(t *testing.T) { utxo := buildPlatformUTXO(utxoID, txAssetID, addr) // save utxo to state - if err := vm.state.PutUTXO(utxo); err != nil { - t.Fatal("Error saving utxo", err) - } + vm.state.AddUTXO(utxo) // issue transaction if _, err := vm.IssueTx(tx.Bytes()); err != nil { @@ -186,7 +184,7 @@ func TestIndexTransaction_MultipleTransactions(t *testing.T) { ctx.Lock.Lock() // get pending transactions - txs := vm.PendingTxs() + txs := vm.PendingTxs(context.Background()) if len(txs) != 1 { t.Fatalf("Should have returned %d tx(s)", 1) } @@ -197,7 +195,7 @@ func TestIndexTransaction_MultipleTransactions(t *testing.T) { var inputUTXOs []*avax.UTXO for _, utxoID := range uniqueParsedTX.InputUTXOs() { - utxo, err := vm.getUTXO(utxoID) + utxo, err := vm.dagState.GetUTXOFromID(utxoID) if err != nil { t.Fatal(err) } @@ -230,7 +228,7 @@ func TestIndexTransaction_MultipleAddresses(t *testing.T) { avaxID := genesisTx.ID() vm := setupTestVM(t, ctx, baseDBManager, genesisBytes, issuer, indexEnabledAvmConfig) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } ctx.Lock.Unlock() @@ -263,13 +261,11 @@ func TestIndexTransaction_MultipleAddresses(t *testing.T) { utxo := buildPlatformUTXO(utxoID, txAssetID, addr) // save utxo to state - if err := vm.state.PutUTXO(utxo); err != nil { - t.Fatal("Error saving utxo", err) - } + vm.state.AddUTXO(utxo) var inputUTXOs []*avax.UTXO //nolint:prealloc for _, utxoID := range tx.Unsigned.InputUTXOs() { - utxo, err := vm.getUTXO(utxoID) + utxo, err := vm.dagState.GetUTXOFromID(utxoID) if err != nil { t.Fatal(err) } @@ -295,7 +291,7 @@ func TestIndexTransaction_UnorderedWrites(t *testing.T) { avaxID := genesisTx.ID() vm := setupTestVM(t, ctx, baseDBManager, genesisBytes, issuer, indexEnabledAvmConfig) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } ctx.Lock.Unlock() @@ -324,9 +320,7 @@ func TestIndexTransaction_UnorderedWrites(t *testing.T) { utxo := buildPlatformUTXO(utxoID, txAssetID, addr) // save utxo to state - if err := vm.state.PutUTXO(utxo); err != nil { - t.Fatal("Error saving utxo", err) - } + vm.state.AddUTXO(utxo) // issue transaction if _, err := vm.IssueTx(tx.Bytes()); err != nil { @@ -343,7 +337,7 @@ func TestIndexTransaction_UnorderedWrites(t *testing.T) { ctx.Lock.Lock() // get pending transactions - txs := vm.PendingTxs() + txs := vm.PendingTxs(context.Background()) if len(txs) != 1 { t.Fatalf("Should have returned %d tx(s)", 1) } @@ -354,7 +348,7 @@ func TestIndexTransaction_UnorderedWrites(t *testing.T) { var inputUTXOs []*avax.UTXO for _, utxoID := range uniqueParsedTX.InputUTXOs() { - utxo, err := vm.getUTXO(utxoID) + utxo, err := vm.dagState.GetUTXOFromID(utxoID) if err != nil { t.Fatal(err) } @@ -382,7 +376,7 @@ func TestIndexer_Read(t *testing.T) { _, vm, _, _, _ := setup(t, true) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } vm.ctx.Lock.Unlock() @@ -485,14 +479,14 @@ func buildPlatformUTXO(utxoID avax.UTXOID, txAssetID avax.Asset, addr ids.ShortI } } -func signTX(codec codec.Manager, tx *txs.Tx, key *crypto.PrivateKeySECP256K1R) error { - return tx.SignSECP256K1Fx(codec, [][]*crypto.PrivateKeySECP256K1R{{key}}) +func signTX(codec codec.Manager, tx *txs.Tx, key *secp256k1.PrivateKey) error { + return tx.SignSECP256K1Fx(codec, [][]*secp256k1.PrivateKey{{key}}) } func buildTX(utxoID avax.UTXOID, txAssetID avax.Asset, address ...ids.ShortID) *txs.Tx { return &txs.Tx{Unsigned: &txs.BaseTx{ BaseTx: avax.BaseTx{ - NetworkID: networkID, + NetworkID: constants.UnitTestID, BlockchainID: chainID, Ins: []*avax.TransferableInput{{ UTXOID: utxoID, @@ -521,7 +515,9 @@ func setupTestVM(t *testing.T, ctx *snow.Context, baseDBManager manager.Manager, avmConfigBytes, err := json.Marshal(config) require.NoError(t, err) appSender := &common.SenderTest{T: t} - if err := vm.Initialize( + + err = vm.Initialize( + context.Background(), ctx, baseDBManager.NewPrefixDBManager([]byte{1}), genesisBytes, @@ -533,16 +529,18 @@ func setupTestVM(t *testing.T, ctx *snow.Context, baseDBManager manager.Manager, Fx: &secp256k1fx.Fx{}, }}, appSender, - ); err != nil { + ) + if err != nil { t.Fatal(err) } + vm.batchTimeout = 0 - if err := vm.SetState(snow.Bootstrapping); err != nil { + if err := vm.SetState(context.Background(), snow.Bootstrapping); err != nil { t.Fatal(err) } - if err := vm.SetState(snow.NormalOp); err != nil { + if err := vm.SetState(context.Background(), snow.NormalOp); err != nil { t.Fatal(err) } return vm @@ -587,12 +585,14 @@ func assertIndexedTX(t *testing.T, db database.Database, index uint64, sourceAdd } } -// Sets up test tx IDs in DB in the following structure for the indexer to pick them up: -// [address] prefix DB -// [assetID] prefix DB -// - "idx": 2 -// - 0: txID1 -// - 1: txID1 +// Sets up test tx IDs in DB in the following structure for the indexer to pick +// them up: +// +// [address] prefix DB +// [assetID] prefix DB +// - "idx": 2 +// - 0: txID1 +// - 1: txID1 func setupTestTxsInDB(t *testing.T, db *versiondb.Database, address ids.ShortID, assetID ids.ID, txCount int) []ids.ID { var testTxs []ids.ID for i := 0; i < txCount; i++ { diff --git a/avalanchego/vms/avm/metrics.go b/avalanchego/vms/avm/metrics.go deleted file mode 100644 index c3291fa3..00000000 --- a/avalanchego/vms/avm/metrics.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avm - -import ( - "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/avalanchego/utils/metric" - "github.com/ava-labs/avalanchego/utils/wrappers" -) - -type metrics struct { - numTxRefreshes, numTxRefreshHits, numTxRefreshMisses prometheus.Counter - - apiRequestMetric metric.APIInterceptor -} - -func (m *metrics) Initialize( - namespace string, - registerer prometheus.Registerer, -) error { - m.numTxRefreshes = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "tx_refreshes", - Help: "Number of times unique txs have been refreshed", - }) - m.numTxRefreshHits = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "tx_refresh_hits", - Help: "Number of times unique txs have not been unique, but were cached", - }) - m.numTxRefreshMisses = prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "tx_refresh_misses", - Help: "Number of times unique txs have not been unique and weren't cached", - }) - - apiRequestMetric, err := metric.NewAPIInterceptor(namespace, registerer) - m.apiRequestMetric = apiRequestMetric - errs := wrappers.Errs{} - errs.Add( - err, - registerer.Register(m.numTxRefreshes), - registerer.Register(m.numTxRefreshHits), - registerer.Register(m.numTxRefreshMisses), - ) - return errs.Err -} diff --git a/avalanchego/vms/avm/metrics/metrics.go b/avalanchego/vms/avm/metrics/metrics.go new file mode 100644 index 00000000..233dd65a --- /dev/null +++ b/avalanchego/vms/avm/metrics/metrics.go @@ -0,0 +1,103 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/utils/metric" + "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/vms/avm/blocks" + "github.com/ava-labs/avalanchego/vms/avm/txs" +) + +var _ Metrics = (*metrics)(nil) + +type Metrics interface { + metric.APIInterceptor + + IncTxRefreshes() + IncTxRefreshHits() + IncTxRefreshMisses() + + // MarkBlockAccepted updates all metrics relating to the acceptance of a + // block, including the underlying acceptance of the contained transactions. + MarkBlockAccepted(b blocks.Block) error + // MarkTxAccepted updates all metrics relating to the acceptance of a + // transaction. + // + // Note: This is not intended to be called during the acceptance of a block, + // as MarkBlockAccepted already handles updating transaction related + // metrics. + MarkTxAccepted(tx *txs.Tx) error +} + +type metrics struct { + txMetrics *txMetrics + + numTxRefreshes, numTxRefreshHits, numTxRefreshMisses prometheus.Counter + + metric.APIInterceptor +} + +func (m *metrics) IncTxRefreshes() { + m.numTxRefreshes.Inc() +} + +func (m *metrics) IncTxRefreshHits() { + m.numTxRefreshHits.Inc() +} + +func (m *metrics) IncTxRefreshMisses() { + m.numTxRefreshMisses.Inc() +} + +func (m *metrics) MarkBlockAccepted(b blocks.Block) error { + for _, tx := range b.Txs() { + if err := tx.Unsigned.Visit(m.txMetrics); err != nil { + return err + } + } + return nil +} + +func (m *metrics) MarkTxAccepted(tx *txs.Tx) error { + return tx.Unsigned.Visit(m.txMetrics) +} + +func New( + namespace string, + registerer prometheus.Registerer, +) (Metrics, error) { + txMetrics, err := newTxMetrics(namespace, registerer) + errs := wrappers.Errs{Err: err} + + m := &metrics{txMetrics: txMetrics} + + m.numTxRefreshes = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "tx_refreshes", + Help: "Number of times unique txs have been refreshed", + }) + m.numTxRefreshHits = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "tx_refresh_hits", + Help: "Number of times unique txs have not been unique, but were cached", + }) + m.numTxRefreshMisses = prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "tx_refresh_misses", + Help: "Number of times unique txs have not been unique and weren't cached", + }) + + apiRequestMetric, err := metric.NewAPIInterceptor(namespace, registerer) + m.APIInterceptor = apiRequestMetric + errs.Add( + err, + registerer.Register(m.numTxRefreshes), + registerer.Register(m.numTxRefreshHits), + registerer.Register(m.numTxRefreshMisses), + ) + return m, errs.Err +} diff --git a/avalanchego/vms/avm/metrics/mock_metrics.go b/avalanchego/vms/avm/metrics/mock_metrics.go new file mode 100644 index 00000000..d002c1dc --- /dev/null +++ b/avalanchego/vms/avm/metrics/mock_metrics.go @@ -0,0 +1,131 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/vms/avm/metrics (interfaces: Metrics) + +// Package metrics is a generated GoMock package. +package metrics + +import ( + http "net/http" + reflect "reflect" + + blocks "github.com/ava-labs/avalanchego/vms/avm/blocks" + txs "github.com/ava-labs/avalanchego/vms/avm/txs" + gomock "github.com/golang/mock/gomock" + rpc "github.com/gorilla/rpc/v2" +) + +// MockMetrics is a mock of Metrics interface. +type MockMetrics struct { + ctrl *gomock.Controller + recorder *MockMetricsMockRecorder +} + +// MockMetricsMockRecorder is the mock recorder for MockMetrics. +type MockMetricsMockRecorder struct { + mock *MockMetrics +} + +// NewMockMetrics creates a new mock instance. +func NewMockMetrics(ctrl *gomock.Controller) *MockMetrics { + mock := &MockMetrics{ctrl: ctrl} + mock.recorder = &MockMetricsMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockMetrics) EXPECT() *MockMetricsMockRecorder { + return m.recorder +} + +// AfterRequest mocks base method. +func (m *MockMetrics) AfterRequest(arg0 *rpc.RequestInfo) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AfterRequest", arg0) +} + +// AfterRequest indicates an expected call of AfterRequest. +func (mr *MockMetricsMockRecorder) AfterRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AfterRequest", reflect.TypeOf((*MockMetrics)(nil).AfterRequest), arg0) +} + +// IncTxRefreshHits mocks base method. +func (m *MockMetrics) IncTxRefreshHits() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "IncTxRefreshHits") +} + +// IncTxRefreshHits indicates an expected call of IncTxRefreshHits. +func (mr *MockMetricsMockRecorder) IncTxRefreshHits() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IncTxRefreshHits", reflect.TypeOf((*MockMetrics)(nil).IncTxRefreshHits)) +} + +// IncTxRefreshMisses mocks base method. +func (m *MockMetrics) IncTxRefreshMisses() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "IncTxRefreshMisses") +} + +// IncTxRefreshMisses indicates an expected call of IncTxRefreshMisses. +func (mr *MockMetricsMockRecorder) IncTxRefreshMisses() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IncTxRefreshMisses", reflect.TypeOf((*MockMetrics)(nil).IncTxRefreshMisses)) +} + +// IncTxRefreshes mocks base method. +func (m *MockMetrics) IncTxRefreshes() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "IncTxRefreshes") +} + +// IncTxRefreshes indicates an expected call of IncTxRefreshes. +func (mr *MockMetricsMockRecorder) IncTxRefreshes() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IncTxRefreshes", reflect.TypeOf((*MockMetrics)(nil).IncTxRefreshes)) +} + +// InterceptRequest mocks base method. +func (m *MockMetrics) InterceptRequest(arg0 *rpc.RequestInfo) *http.Request { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InterceptRequest", arg0) + ret0, _ := ret[0].(*http.Request) + return ret0 +} + +// InterceptRequest indicates an expected call of InterceptRequest. +func (mr *MockMetricsMockRecorder) InterceptRequest(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InterceptRequest", reflect.TypeOf((*MockMetrics)(nil).InterceptRequest), arg0) +} + +// MarkBlockAccepted mocks base method. +func (m *MockMetrics) MarkBlockAccepted(arg0 blocks.Block) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarkBlockAccepted", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarkBlockAccepted indicates an expected call of MarkBlockAccepted. +func (mr *MockMetricsMockRecorder) MarkBlockAccepted(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkBlockAccepted", reflect.TypeOf((*MockMetrics)(nil).MarkBlockAccepted), arg0) +} + +// MarkTxAccepted mocks base method. +func (m *MockMetrics) MarkTxAccepted(arg0 *txs.Tx) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MarkTxAccepted", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// MarkTxAccepted indicates an expected call of MarkTxAccepted. +func (mr *MockMetricsMockRecorder) MarkTxAccepted(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkTxAccepted", reflect.TypeOf((*MockMetrics)(nil).MarkTxAccepted), arg0) +} diff --git a/avalanchego/vms/avm/metrics/tx_metrics.go b/avalanchego/vms/avm/metrics/tx_metrics.go new file mode 100644 index 00000000..217eeb18 --- /dev/null +++ b/avalanchego/vms/avm/metrics/tx_metrics.go @@ -0,0 +1,78 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package metrics + +import ( + "fmt" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/vms/avm/txs" +) + +var _ txs.Visitor = (*txMetrics)(nil) + +type txMetrics struct { + numBaseTxs, + numCreateAssetTxs, + numOperationTxs, + numImportTxs, + numExportTxs prometheus.Counter +} + +func newTxMetrics( + namespace string, + registerer prometheus.Registerer, +) (*txMetrics, error) { + errs := wrappers.Errs{} + m := &txMetrics{ + numBaseTxs: newTxMetric(namespace, "base", registerer, &errs), + numCreateAssetTxs: newTxMetric(namespace, "create_asset", registerer, &errs), + numOperationTxs: newTxMetric(namespace, "operation", registerer, &errs), + numImportTxs: newTxMetric(namespace, "import", registerer, &errs), + numExportTxs: newTxMetric(namespace, "export", registerer, &errs), + } + return m, errs.Err +} + +func newTxMetric( + namespace string, + txName string, + registerer prometheus.Registerer, + errs *wrappers.Errs, +) prometheus.Counter { + txMetric := prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: fmt.Sprintf("%s_txs_accepted", txName), + Help: fmt.Sprintf("Number of %s transactions accepted", txName), + }) + errs.Add(registerer.Register(txMetric)) + return txMetric +} + +func (m *txMetrics) BaseTx(*txs.BaseTx) error { + m.numBaseTxs.Inc() + return nil +} + +func (m *txMetrics) CreateAssetTx(*txs.CreateAssetTx) error { + m.numCreateAssetTxs.Inc() + return nil +} + +func (m *txMetrics) OperationTx(*txs.OperationTx) error { + m.numOperationTxs.Inc() + return nil +} + +func (m *txMetrics) ImportTx(*txs.ImportTx) error { + m.numImportTxs.Inc() + return nil +} + +func (m *txMetrics) ExportTx(*txs.ExportTx) error { + m.numExportTxs.Inc() + return nil +} diff --git a/avalanchego/vms/avm/network/atomic.go b/avalanchego/vms/avm/network/atomic.go new file mode 100644 index 00000000..c6b011da --- /dev/null +++ b/avalanchego/vms/avm/network/atomic.go @@ -0,0 +1,138 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "context" + "time" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils" +) + +var _ Atomic = (*atomic)(nil) + +type Atomic interface { + common.AppHandler + + Set(common.AppHandler) +} + +type atomic struct { + handler utils.Atomic[common.AppHandler] +} + +func NewAtomic(h common.AppHandler) Atomic { + a := &atomic{} + a.handler.Set(h) + return a +} + +func (a *atomic) CrossChainAppRequest( + ctx context.Context, + chainID ids.ID, + requestID uint32, + deadline time.Time, + msg []byte, +) error { + h := a.handler.Get() + return h.CrossChainAppRequest( + ctx, + chainID, + requestID, + deadline, + msg, + ) +} + +func (a *atomic) CrossChainAppRequestFailed( + ctx context.Context, + chainID ids.ID, + requestID uint32, +) error { + h := a.handler.Get() + return h.CrossChainAppRequestFailed( + ctx, + chainID, + requestID, + ) +} + +func (a *atomic) CrossChainAppResponse( + ctx context.Context, + chainID ids.ID, + requestID uint32, + msg []byte, +) error { + h := a.handler.Get() + return h.CrossChainAppResponse( + ctx, + chainID, + requestID, + msg, + ) +} + +func (a *atomic) AppRequest( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, + deadline time.Time, + msg []byte, +) error { + h := a.handler.Get() + return h.AppRequest( + ctx, + nodeID, + requestID, + deadline, + msg, + ) +} + +func (a *atomic) AppRequestFailed( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, +) error { + h := a.handler.Get() + return h.AppRequestFailed( + ctx, + nodeID, + requestID, + ) +} + +func (a *atomic) AppResponse( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, + msg []byte, +) error { + h := a.handler.Get() + return h.AppResponse( + ctx, + nodeID, + requestID, + msg, + ) +} + +func (a *atomic) AppGossip( + ctx context.Context, + nodeID ids.NodeID, + msg []byte, +) error { + h := a.handler.Get() + return h.AppGossip( + ctx, + nodeID, + msg, + ) +} + +func (a *atomic) Set(h common.AppHandler) { + a.handler.Set(h) +} diff --git a/avalanchego/vms/avm/network/network.go b/avalanchego/vms/avm/network/network.go new file mode 100644 index 00000000..6e83d97e --- /dev/null +++ b/avalanchego/vms/avm/network/network.go @@ -0,0 +1,202 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "context" + "sync" + + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/vms/avm/blocks/executor" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" + "github.com/ava-labs/avalanchego/vms/components/message" +) + +// We allow [recentTxsCacheSize] to be fairly large because we only store hashes +// in the cache, not entire transactions. +const recentTxsCacheSize = 512 + +var _ Network = (*network)(nil) + +type Network interface { + common.AppHandler + + // IssueTx verifies the transaction at the currently preferred state, adds + // it to the mempool, and gossips it to the network. + // + // Invariant: Assumes the context lock is held. + IssueTx(context.Context, *txs.Tx) error +} + +type network struct { + // We embed a noop handler for all unhandled messages + common.AppHandler + + ctx *snow.Context + parser txs.Parser + manager executor.Manager + mempool mempool.Mempool + appSender common.AppSender + + // gossip related attributes + recentTxsLock sync.Mutex + recentTxs *cache.LRU[ids.ID, struct{}] +} + +func New( + ctx *snow.Context, + parser txs.Parser, + manager executor.Manager, + mempool mempool.Mempool, + appSender common.AppSender, +) Network { + return &network{ + AppHandler: common.NewNoOpAppHandler(ctx.Log), + + ctx: ctx, + parser: parser, + manager: manager, + mempool: mempool, + appSender: appSender, + + recentTxs: &cache.LRU[ids.ID, struct{}]{ + Size: recentTxsCacheSize, + }, + } +} + +func (n *network) AppGossip(ctx context.Context, nodeID ids.NodeID, msgBytes []byte) error { + n.ctx.Log.Debug("called AppGossip message handler", + zap.Stringer("nodeID", nodeID), + zap.Int("messageLen", len(msgBytes)), + ) + + msgIntf, err := message.Parse(msgBytes) + if err != nil { + n.ctx.Log.Debug("dropping AppGossip message", + zap.String("reason", "failed to parse message"), + ) + return nil + } + + msg, ok := msgIntf.(*message.Tx) + if !ok { + n.ctx.Log.Debug("dropping unexpected message", + zap.Stringer("nodeID", nodeID), + ) + return nil + } + + tx, err := n.parser.ParseTx(msg.Tx) + if err != nil { + n.ctx.Log.Verbo("received invalid tx", + zap.Stringer("nodeID", nodeID), + zap.Binary("tx", msg.Tx), + zap.Error(err), + ) + return nil + } + + // We need to grab the context lock here to avoid racy behavior with + // transaction verification + mempool modifications. + n.ctx.Lock.Lock() + err = n.issueTx(tx) + n.ctx.Lock.Unlock() + if err == nil { + txID := tx.ID() + n.gossipTx(ctx, txID, msgBytes) + } + return nil +} + +func (n *network) IssueTx(ctx context.Context, tx *txs.Tx) error { + if err := n.issueTx(tx); err != nil { + return err + } + + txBytes := tx.Bytes() + msg := &message.Tx{ + Tx: txBytes, + } + msgBytes, err := message.Build(msg) + if err != nil { + return err + } + + txID := tx.ID() + n.gossipTx(ctx, txID, msgBytes) + return nil +} + +// returns nil if the tx is in the mempool +func (n *network) issueTx(tx *txs.Tx) error { + txID := tx.ID() + if n.mempool.Has(txID) { + // The tx is already in the mempool + return nil + } + + if reason := n.mempool.GetDropReason(txID); reason != nil { + // If the tx is being dropped - just ignore it + // + // TODO: Should we allow re-verification of the transaction even if it + // failed previously? + return reason + } + + // Verify the tx at the currently preferred state + if err := n.manager.VerifyTx(tx); err != nil { + n.ctx.Log.Debug("tx failed verification", + zap.Stringer("txID", txID), + zap.Error(err), + ) + + n.mempool.MarkDropped(txID, err) + return err + } + + if err := n.mempool.Add(tx); err != nil { + n.ctx.Log.Debug("tx failed to be added to the mempool", + zap.Stringer("txID", txID), + zap.Error(err), + ) + + n.mempool.MarkDropped(txID, err) + return err + } + + n.mempool.RequestBuildBlock() + return nil +} + +func (n *network) gossipTx(ctx context.Context, txID ids.ID, msgBytes []byte) { + // This lock is just to ensure there isn't racy behavior between checking if + // the tx was gossiped and marking the tx as gossiped. + n.recentTxsLock.Lock() + _, has := n.recentTxs.Get(txID) + n.recentTxs.Put(txID, struct{}{}) + n.recentTxsLock.Unlock() + + // Don't gossip a transaction if it has been recently gossiped. + if has { + return + } + + n.ctx.Log.Debug("gossiping tx", + zap.Stringer("txID", txID), + ) + + if err := n.appSender.SendAppGossip(ctx, msgBytes); err != nil { + n.ctx.Log.Error("failed to gossip tx", + zap.Stringer("txID", txID), + zap.Error(err), + ) + } +} diff --git a/avalanchego/vms/avm/network/network_test.go b/avalanchego/vms/avm/network/network_test.go new file mode 100644 index 00000000..ec32a6d3 --- /dev/null +++ b/avalanchego/vms/avm/network/network_test.go @@ -0,0 +1,343 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package network + +import ( + "context" + "errors" + "testing" + + "github.com/golang/mock/gomock" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/avm/blocks/executor" + "github.com/ava-labs/avalanchego/vms/avm/fxs" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/components/message" + "github.com/ava-labs/avalanchego/vms/nftfx" + "github.com/ava-labs/avalanchego/vms/propertyfx" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" +) + +var errTest = errors.New("test error") + +func TestNetworkAppGossip(t *testing.T) { + testTx := &txs.Tx{ + Unsigned: &txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: 1, + BlockchainID: ids.GenerateTestID(), + Ins: []*avax.TransferableInput{}, + Outs: []*avax.TransferableOutput{}, + }, + }, + } + + parser, err := txs.NewParser([]fxs.Fx{ + &secp256k1fx.Fx{}, + }) + require.NoError(t, err) + require.NoError(t, parser.InitializeTx(testTx)) + + type test struct { + name string + msgBytesFunc func() []byte + mempoolFunc func(*gomock.Controller) mempool.Mempool + appSenderFunc func(*gomock.Controller) common.AppSender + } + + tests := []test{ + { + // Shouldn't attempt to issue or gossip the tx + name: "invalid message bytes", + msgBytesFunc: func() []byte { + return []byte{0x00} + }, + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + // Unused in this test + return nil + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Unused in this test + return nil + }, + }, + { + // Shouldn't attempt to issue or gossip the tx + name: "invalid tx bytes", + msgBytesFunc: func() []byte { + msg := message.Tx{ + Tx: []byte{0x00}, + } + msgBytes, err := message.Build(&msg) + require.NoError(t, err) + return msgBytes + }, + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + // Unused in this test + return mempool.NewMockMempool(ctrl) + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Unused in this test + return common.NewMockSender(ctrl) + }, + }, + { + // Issue returns nil because mempool has tx. We should gossip the tx. + name: "issuance succeeds", + msgBytesFunc: func() []byte { + msg := message.Tx{ + Tx: testTx.Bytes(), + } + msgBytes, err := message.Build(&msg) + require.NoError(t, err) + return msgBytes + }, + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Has(gomock.Any()).Return(true) + return mempool + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + appSender := common.NewMockSender(ctrl) + appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()) + return appSender + }, + }, + { + // Issue returns error because tx was dropped. We shouldn't gossip the tx. + name: "issuance fails", + msgBytesFunc: func() []byte { + msg := message.Tx{ + Tx: testTx.Bytes(), + } + msgBytes, err := message.Build(&msg) + require.NoError(t, err) + return msgBytes + }, + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Has(gomock.Any()).Return(false) + mempool.EXPECT().GetDropReason(gomock.Any()).Return(errTest) + return mempool + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Unused in this test + return common.NewMockSender(ctrl) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + parser, err := txs.NewParser([]fxs.Fx{ + &secp256k1fx.Fx{}, + &nftfx.Fx{}, + &propertyfx.Fx{}, + }) + require.NoError(err) + + n := New( + &snow.Context{ + Log: logging.NoLog{}, + }, + parser, + executor.NewMockManager(ctrl), // Manager is unused in this test + tt.mempoolFunc(ctrl), + tt.appSenderFunc(ctrl), + ) + err = n.AppGossip(context.Background(), ids.GenerateTestNodeID(), tt.msgBytesFunc()) + require.NoError(err) + }) + } +} + +func TestNetworkIssueTx(t *testing.T) { + type test struct { + name string + mempoolFunc func(*gomock.Controller) mempool.Mempool + managerFunc func(*gomock.Controller) executor.Manager + appSenderFunc func(*gomock.Controller) common.AppSender + expectedErr error + } + + tests := []test{ + { + name: "mempool has transaction", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Has(gomock.Any()).Return(true) + return mempool + }, + managerFunc: func(ctrl *gomock.Controller) executor.Manager { + // Unused in this test + return executor.NewMockManager(ctrl) + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Should gossip the tx + appSender := common.NewMockSender(ctrl) + appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Return(nil) + return appSender + }, + expectedErr: nil, + }, + { + name: "transaction marked as dropped in mempool", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Has(gomock.Any()).Return(false) + mempool.EXPECT().GetDropReason(gomock.Any()).Return(errTest) + return mempool + }, + managerFunc: func(ctrl *gomock.Controller) executor.Manager { + // Unused in this test + return executor.NewMockManager(ctrl) + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Shouldn't gossip the tx + return common.NewMockSender(ctrl) + }, + expectedErr: errTest, + }, + { + name: "transaction invalid", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Has(gomock.Any()).Return(false) + mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) + mempool.EXPECT().MarkDropped(gomock.Any(), gomock.Any()) + return mempool + }, + managerFunc: func(ctrl *gomock.Controller) executor.Manager { + manager := executor.NewMockManager(ctrl) + manager.EXPECT().VerifyTx(gomock.Any()).Return(errTest) + return manager + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Shouldn't gossip the tx + return common.NewMockSender(ctrl) + }, + expectedErr: errTest, + }, + { + name: "can't add transaction to mempool", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Has(gomock.Any()).Return(false) + mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) + mempool.EXPECT().Add(gomock.Any()).Return(errTest) + mempool.EXPECT().MarkDropped(gomock.Any(), gomock.Any()) + return mempool + }, + managerFunc: func(ctrl *gomock.Controller) executor.Manager { + manager := executor.NewMockManager(ctrl) + manager.EXPECT().VerifyTx(gomock.Any()).Return(nil) + return manager + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Shouldn't gossip the tx + return common.NewMockSender(ctrl) + }, + expectedErr: errTest, + }, + { + name: "happy path", + mempoolFunc: func(ctrl *gomock.Controller) mempool.Mempool { + mempool := mempool.NewMockMempool(ctrl) + mempool.EXPECT().Has(gomock.Any()).Return(false) + mempool.EXPECT().GetDropReason(gomock.Any()).Return(nil) + mempool.EXPECT().Add(gomock.Any()).Return(nil) + mempool.EXPECT().RequestBuildBlock() + return mempool + }, + managerFunc: func(ctrl *gomock.Controller) executor.Manager { + manager := executor.NewMockManager(ctrl) + manager.EXPECT().VerifyTx(gomock.Any()).Return(nil) + return manager + }, + appSenderFunc: func(ctrl *gomock.Controller) common.AppSender { + // Should gossip the tx + appSender := common.NewMockSender(ctrl) + appSender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()).Return(nil) + return appSender + }, + expectedErr: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + parser, err := txs.NewParser([]fxs.Fx{ + &secp256k1fx.Fx{}, + &nftfx.Fx{}, + &propertyfx.Fx{}, + }) + require.NoError(err) + + n := New( + &snow.Context{ + Log: logging.NoLog{}, + }, + parser, + tt.managerFunc(ctrl), + tt.mempoolFunc(ctrl), + tt.appSenderFunc(ctrl), + ) + err = n.IssueTx(context.Background(), &txs.Tx{}) + require.ErrorIs(err, tt.expectedErr) + }) + } +} + +func TestNetworkGossipTx(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + parser, err := txs.NewParser([]fxs.Fx{ + &secp256k1fx.Fx{}, + }) + require.NoError(err) + + appSender := common.NewMockSender(ctrl) + + nIntf := New( + &snow.Context{ + Log: logging.NoLog{}, + }, + parser, + executor.NewMockManager(ctrl), + mempool.NewMockMempool(ctrl), + appSender, + ) + n, ok := nIntf.(*network) + require.True(ok) + + // Case: Tx was recently gossiped + txID := ids.GenerateTestID() + n.recentTxs.Put(txID, struct{}{}) + n.gossipTx(context.Background(), txID, []byte{}) + // Didn't make a call to SendAppGossip + + // Case: Tx was not recently gossiped + msgBytes := []byte{1, 2, 3} + appSender.EXPECT().SendAppGossip(gomock.Any(), msgBytes).Return(nil) + n.gossipTx(context.Background(), ids.GenerateTestID(), msgBytes) + // Did make a call to SendAppGossip +} diff --git a/avalanchego/vms/avm/pubsub_filterer.go b/avalanchego/vms/avm/pubsub_filterer.go index bbe64529..24297034 100644 --- a/avalanchego/vms/avm/pubsub_filterer.go +++ b/avalanchego/vms/avm/pubsub_filterer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -10,18 +10,18 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" ) -var _ pubsub.Filterer = &filterer{} +var _ pubsub.Filterer = (*connector)(nil) -type filterer struct { +type connector struct { tx *txs.Tx } func NewPubSubFilterer(tx *txs.Tx) pubsub.Filterer { - return &filterer{tx: tx} + return &connector{tx: tx} } // Apply the filter on the addresses. -func (f *filterer) Filter(filters []pubsub.Filter) ([]bool, interface{}) { +func (f *connector) Filter(filters []pubsub.Filter) ([]bool, interface{}) { resp := make([]bool, len(filters)) for _, utxo := range f.tx.UTXOs() { addressable, ok := utxo.Out.(avax.Addressable) diff --git a/avalanchego/vms/avm/pubsub_filterer_test.go b/avalanchego/vms/avm/pubsub_filterer_test.go index 5ae243f4..9d5fb5c2 100644 --- a/avalanchego/vms/avm/pubsub_filterer_test.go +++ b/avalanchego/vms/avm/pubsub_filterer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm diff --git a/avalanchego/vms/avm/service.go b/avalanchego/vms/avm/service.go index b946b8c2..499b9aff 100644 --- a/avalanchego/vms/avm/service.go +++ b/avalanchego/vms/avm/service.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -12,12 +12,15 @@ import ( "go.uber.org/zap" "github.com/ava-labs/avalanchego/api" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/keystore" @@ -37,33 +40,151 @@ const ( ) var ( - errUnknownAssetID = errors.New("unknown asset ID") - errTxNotCreateAsset = errors.New("transaction doesn't create an asset") - errNoMinters = errors.New("no minters provided") - errNoHoldersOrMinters = errors.New("no minters or initialHolders provided") - errZeroAmount = errors.New("amount must be positive") - errNoOutputs = errors.New("no outputs to send") - errSpendOverflow = errors.New("spent amount overflows uint64") - errInvalidMintAmount = errors.New("amount minted must be positive") - errAddressesCantMintAsset = errors.New("provided addresses don't have the authority to mint the provided asset") - errInvalidUTXO = errors.New("invalid utxo") - errNilTxID = errors.New("nil transaction ID") - errNoAddresses = errors.New("no addresses provided") - errNoKeys = errors.New("from addresses have no keys or funds") - errMissingPrivateKey = errors.New("argument 'privateKey' not given") + errTxNotCreateAsset = errors.New("transaction doesn't create an asset") + errNoMinters = errors.New("no minters provided") + errNoHoldersOrMinters = errors.New("no minters or initialHolders provided") + errZeroAmount = errors.New("amount must be positive") + errNoOutputs = errors.New("no outputs to send") + errInvalidMintAmount = errors.New("amount minted must be positive") + errNilTxID = errors.New("nil transaction ID") + errNoAddresses = errors.New("no addresses provided") + errNoKeys = errors.New("from addresses have no keys or funds") + errMissingPrivateKey = errors.New("argument 'privateKey' not given") + errNotLinearized = errors.New("chain is not linearized") ) -// Service defines the base service for the asset vm -type Service struct{ vm *VM } - // FormattedAssetID defines a JSON formatted struct containing an assetID as a string type FormattedAssetID struct { AssetID ids.ID `json:"assetID"` } +// Service defines the base service for the asset vm +type Service struct{ vm *VM } + +// GetBlock returns the requested block. +func (s *Service) GetBlock(_ *http.Request, args *api.GetBlockArgs, reply *api.GetBlockResponse) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "avm"), + zap.String("method", "getBlock"), + zap.Stringer("blkID", args.BlockID), + zap.Stringer("encoding", args.Encoding), + ) + + if s.vm.chainManager == nil { + return errNotLinearized + } + block, err := s.vm.chainManager.GetStatelessBlock(args.BlockID) + if err != nil { + return fmt.Errorf("couldn't get block with id %s: %w", args.BlockID, err) + } + reply.Encoding = args.Encoding + + if args.Encoding == formatting.JSON { + block.InitCtx(s.vm.ctx) + for _, tx := range block.Txs() { + err := tx.Unsigned.Visit(&txInit{ + tx: tx, + ctx: s.vm.ctx, + typeToFxIndex: s.vm.typeToFxIndex, + fxs: s.vm.fxs, + }) + if err != nil { + return err + } + } + reply.Block = block + return nil + } + + reply.Block, err = formatting.Encode(args.Encoding, block.Bytes()) + if err != nil { + return fmt.Errorf("couldn't encode block %s as string: %w", args.BlockID, err) + } + + return nil +} + +// GetBlockByHeight returns the block at the given height. +func (s *Service) GetBlockByHeight(_ *http.Request, args *api.GetBlockByHeightArgs, reply *api.GetBlockResponse) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "avm"), + zap.String("method", "getBlockByHeight"), + zap.Uint64("height", args.Height), + ) + + if s.vm.chainManager == nil { + return errNotLinearized + } + reply.Encoding = args.Encoding + + blockID, err := s.vm.state.GetBlockID(args.Height) + if err != nil { + return fmt.Errorf("couldn't get block at height %d: %w", args.Height, err) + } + block, err := s.vm.chainManager.GetStatelessBlock(blockID) + if err != nil { + s.vm.ctx.Log.Error("couldn't get accepted block", + zap.Stringer("blkID", blockID), + zap.Error(err), + ) + return fmt.Errorf("couldn't get block with id %s: %w", blockID, err) + } + + if args.Encoding == formatting.JSON { + block.InitCtx(s.vm.ctx) + for _, tx := range block.Txs() { + err := tx.Unsigned.Visit(&txInit{ + tx: tx, + ctx: s.vm.ctx, + typeToFxIndex: s.vm.typeToFxIndex, + fxs: s.vm.fxs, + }) + if err != nil { + return err + } + } + reply.Block = block + return nil + } + + reply.Block, err = formatting.Encode(args.Encoding, block.Bytes()) + if err != nil { + return fmt.Errorf("couldn't encode block %s as string: %w", blockID, err) + } + + return nil +} + +// GetHeight returns the height of the last accepted block. +func (s *Service) GetHeight(_ *http.Request, _ *struct{}, reply *api.GetHeightResponse) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "avm"), + zap.String("method", "getHeight"), + ) + + if s.vm.chainManager == nil { + return errNotLinearized + } + + blockID := s.vm.state.GetLastAccepted() + block, err := s.vm.chainManager.GetStatelessBlock(blockID) + if err != nil { + s.vm.ctx.Log.Error("couldn't get last accepted block", + zap.Stringer("blkID", blockID), + zap.Error(err), + ) + return fmt.Errorf("couldn't get block with id %s: %w", blockID, err) + } + + reply.Height = json.Uint64(block.Height()) + return nil +} + // IssueTx attempts to issue a transaction into consensus -func (service *Service) IssueTx(r *http.Request, args *api.FormattedTx, reply *api.JSONTxID) error { - service.vm.ctx.Log.Debug("AVM: IssueTx called", +func (s *Service) IssueTx(_ *http.Request, args *api.FormattedTx, reply *api.JSONTxID) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "avm"), + zap.String("method", "issueTx"), logging.UserString("tx", args.Tx), ) @@ -71,7 +192,7 @@ func (service *Service) IssueTx(r *http.Request, args *api.FormattedTx, reply *a if err != nil { return fmt.Errorf("problem decoding transaction: %w", err) } - txID, err := service.vm.IssueTx(txBytes) + txID, err := s.vm.IssueTx(txBytes) if err != nil { return err } @@ -80,8 +201,9 @@ func (service *Service) IssueTx(r *http.Request, args *api.FormattedTx, reply *a return nil } -func (service *Service) IssueStopVertex(_ *http.Request, _ *struct{}, _ *struct{}) error { - return service.vm.issueStopVertex() +// TODO: After the chain is linearized, remove this. +func (s *Service) IssueStopVertex(_ *http.Request, _, _ *struct{}) error { + return s.vm.issueStopVertex() } // GetTxStatusReply defines the GetTxStatus replies returned from the API @@ -106,10 +228,12 @@ type GetAddressTxsReply struct { } // GetAddressTxs returns list of transactions for a given address -func (service *Service) GetAddressTxs(r *http.Request, args *GetAddressTxsArgs, reply *GetAddressTxsReply) error { +func (s *Service) GetAddressTxs(_ *http.Request, args *GetAddressTxsArgs, reply *GetAddressTxsReply) error { cursor := uint64(args.Cursor) pageSize := uint64(args.PageSize) - service.vm.ctx.Log.Debug("AVM: GetAddressTxs called", + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "avm"), + zap.String("method", "getAddressTxs"), logging.UserString("address", args.Address), logging.UserString("assetID", args.AssetID), zap.Uint64("cursor", cursor), @@ -122,18 +246,18 @@ func (service *Service) GetAddressTxs(r *http.Request, args *GetAddressTxsArgs, } // Parse to address - address, err := avax.ParseServiceAddress(service.vm, args.Address) + address, err := avax.ParseServiceAddress(s.vm, args.Address) if err != nil { return fmt.Errorf("couldn't parse argument 'address' to address: %w", err) } // Lookup assetID - assetID, err := service.vm.lookupAssetID(args.AssetID) + assetID, err := s.vm.lookupAssetID(args.AssetID) if err != nil { return fmt.Errorf("specified `assetID` is invalid: %w", err) } - service.vm.ctx.Log.Debug("fetching transactions", + s.vm.ctx.Log.Debug("fetching transactions", logging.UserString("address", args.Address), logging.UserString("assetID", args.AssetID), zap.Uint64("cursor", cursor), @@ -141,11 +265,11 @@ func (service *Service) GetAddressTxs(r *http.Request, args *GetAddressTxsArgs, ) // Read transactions from the indexer - reply.TxIDs, err = service.vm.addressTxsIndexer.Read(address[:], assetID, cursor, pageSize) + reply.TxIDs, err = s.vm.addressTxsIndexer.Read(address[:], assetID, cursor, pageSize) if err != nil { return err } - service.vm.ctx.Log.Debug("fetched transactions", + s.vm.ctx.Log.Debug("fetched transactions", logging.UserString("address", args.Address), logging.UserString("assetID", args.AssetID), zap.Int("numTxs", len(reply.TxIDs)), @@ -159,8 +283,13 @@ func (service *Service) GetAddressTxs(r *http.Request, args *GetAddressTxsArgs, } // GetTxStatus returns the status of the specified transaction -func (service *Service) GetTxStatus(r *http.Request, args *api.JSONTxID, reply *GetTxStatusReply) error { - service.vm.ctx.Log.Debug("AVM: GetTxStatus called", +// +// Deprecated: GetTxStatus only returns Accepted or Unknown, GetTx should be +// used instead to determine if the tx was accepted. +func (s *Service) GetTxStatus(_ *http.Request, args *api.JSONTxID, reply *GetTxStatusReply) error { + s.vm.ctx.Log.Debug("deprecated API called", + zap.String("service", "avm"), + zap.String("method", "getTxStatus"), zap.Stringer("txID", args.TxID), ) @@ -168,18 +297,26 @@ func (service *Service) GetTxStatus(r *http.Request, args *api.JSONTxID, reply * return errNilTxID } - tx := UniqueTx{ - vm: service.vm, - txID: args.TxID, + chainState := &chainState{ + State: s.vm.state, + } + _, err := chainState.GetTx(args.TxID) + switch err { + case nil: + reply.Status = choices.Accepted + case database.ErrNotFound: + reply.Status = choices.Unknown + default: + return err } - - reply.Status = tx.Status() return nil } // GetTx returns the specified transaction -func (service *Service) GetTx(r *http.Request, args *api.GetTxArgs, reply *api.GetTxReply) error { - service.vm.ctx.Log.Debug("AVM: GetTx called", +func (s *Service) GetTx(_ *http.Request, args *api.GetTxArgs, reply *api.GetTxReply) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "avm"), + zap.String("method", "getTx"), zap.Stringer("txID", args.TxID), ) @@ -187,27 +324,25 @@ func (service *Service) GetTx(r *http.Request, args *api.GetTxArgs, reply *api.G return errNilTxID } - tx := UniqueTx{ - vm: service.vm, - txID: args.TxID, + chainState := &chainState{ + State: s.vm.state, } - if status := tx.Status(); !status.Fetched() { - return errUnknownTx + tx, err := chainState.GetTx(args.TxID) + if err != nil { + return err } reply.Encoding = args.Encoding - if args.Encoding == formatting.JSON { reply.Tx = tx return tx.Unsigned.Visit(&txInit{ - tx: tx.Tx, - ctx: service.vm.ctx, - typeToFxIndex: service.vm.typeToFxIndex, - fxs: service.vm.fxs, + tx: tx, + ctx: s.vm.ctx, + typeToFxIndex: s.vm.typeToFxIndex, + fxs: s.vm.fxs, }) } - var err error reply.Tx, err = formatting.Encode(args.Encoding, tx.Bytes()) if err != nil { return fmt.Errorf("couldn't encode tx as string: %w", err) @@ -216,8 +351,10 @@ func (service *Service) GetTx(r *http.Request, args *api.GetTxArgs, reply *api.G } // GetUTXOs gets all utxos for passed in addresses -func (service *Service) GetUTXOs(r *http.Request, args *api.GetUTXOsArgs, reply *api.GetUTXOsReply) error { - service.vm.ctx.Log.Debug("AVM: GetUTXOs called", +func (s *Service) GetUTXOs(_ *http.Request, args *api.GetUTXOsArgs, reply *api.GetUTXOsReply) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "avm"), + zap.String("method", "getUTXOs"), logging.UserStrings("addresses", args.Addresses), ) @@ -230,16 +367,16 @@ func (service *Service) GetUTXOs(r *http.Request, args *api.GetUTXOsArgs, reply var sourceChain ids.ID if args.SourceChain == "" { - sourceChain = service.vm.ctx.ChainID + sourceChain = s.vm.ctx.ChainID } else { - chainID, err := service.vm.ctx.BCLookup.Lookup(args.SourceChain) + chainID, err := s.vm.ctx.BCLookup.Lookup(args.SourceChain) if err != nil { return fmt.Errorf("problem parsing source chainID %q: %w", args.SourceChain, err) } sourceChain = chainID } - addrSet, err := avax.ParseServiceAddresses(service.vm, args.Addresses) + addrSet, err := avax.ParseServiceAddresses(s.vm, args.Addresses) if err != nil { return err } @@ -247,7 +384,7 @@ func (service *Service) GetUTXOs(r *http.Request, args *api.GetUTXOsArgs, reply startAddr := ids.ShortEmpty startUTXO := ids.Empty if args.StartIndex.Address != "" || args.StartIndex.UTXO != "" { - startAddr, err = avax.ParseServiceAddress(service.vm, args.StartIndex.Address) + startAddr, err = avax.ParseServiceAddress(s.vm, args.StartIndex.Address) if err != nil { return fmt.Errorf("couldn't parse start index address %q: %w", args.StartIndex.Address, err) } @@ -266,16 +403,16 @@ func (service *Service) GetUTXOs(r *http.Request, args *api.GetUTXOsArgs, reply if limit <= 0 || int(maxPageSize) < limit { limit = int(maxPageSize) } - if sourceChain == service.vm.ctx.ChainID { + if sourceChain == s.vm.ctx.ChainID { utxos, endAddr, endUTXOID, err = avax.GetPaginatedUTXOs( - service.vm.state, + s.vm.state, addrSet, startAddr, startUTXO, limit, ) } else { - utxos, endAddr, endUTXOID, err = service.vm.GetAtomicUTXOs( + utxos, endAddr, endUTXOID, err = s.vm.GetAtomicUTXOs( sourceChain, addrSet, startAddr, @@ -288,7 +425,7 @@ func (service *Service) GetUTXOs(r *http.Request, args *api.GetUTXOsArgs, reply } reply.UTXOs = make([]string, len(utxos)) - codec := service.vm.parser.Codec() + codec := s.vm.parser.Codec() for i, utxo := range utxos { b, err := codec.Marshal(txs.CodecVersion, utxo) if err != nil { @@ -300,7 +437,7 @@ func (service *Service) GetUTXOs(r *http.Request, args *api.GetUTXOsArgs, reply } } - endAddress, err := service.vm.FormatLocalAddress(endAddr) + endAddress, err := s.vm.FormatLocalAddress(endAddr) if err != nil { return fmt.Errorf("problem formatting address: %w", err) } @@ -326,22 +463,24 @@ type GetAssetDescriptionReply struct { } // GetAssetDescription creates an empty account with the name passed in -func (service *Service) GetAssetDescription(_ *http.Request, args *GetAssetDescriptionArgs, reply *GetAssetDescriptionReply) error { - service.vm.ctx.Log.Debug("AVM: GetAssetDescription called", +func (s *Service) GetAssetDescription(_ *http.Request, args *GetAssetDescriptionArgs, reply *GetAssetDescriptionReply) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "avm"), + zap.String("method", "getAssetDescription"), logging.UserString("assetID", args.AssetID), ) - assetID, err := service.vm.lookupAssetID(args.AssetID) + assetID, err := s.vm.lookupAssetID(args.AssetID) if err != nil { return err } - tx := &UniqueTx{ - vm: service.vm, - txID: assetID, + chainState := &chainState{ + State: s.vm.state, } - if status := tx.Status(); !status.Fetched() { - return errUnknownAssetID + tx, err := chainState.GetTx(assetID) + if err != nil { + return err } createAssetTx, ok := tx.Unsigned.(*txs.CreateAssetTx) if !ok { @@ -374,31 +513,33 @@ type GetBalanceReply struct { // (1 out of 1 multisig) by the address and with a locktime in the past. // Otherwise, returned balance includes assets held only partially by the // address, and includes balances with locktime in the future. -func (service *Service) GetBalance(r *http.Request, args *GetBalanceArgs, reply *GetBalanceReply) error { - service.vm.ctx.Log.Debug("AVM: GetBalance called", +func (s *Service) GetBalance(_ *http.Request, args *GetBalanceArgs, reply *GetBalanceReply) error { + s.vm.ctx.Log.Debug("deprecated API called", + zap.String("service", "avm"), + zap.String("method", "getBalance"), logging.UserString("address", args.Address), logging.UserString("assetID", args.AssetID), ) - addr, err := avax.ParseServiceAddress(service.vm, args.Address) + addr, err := avax.ParseServiceAddress(s.vm, args.Address) if err != nil { return fmt.Errorf("problem parsing address '%s': %w", args.Address, err) } - assetID, err := service.vm.lookupAssetID(args.AssetID) + assetID, err := s.vm.lookupAssetID(args.AssetID) if err != nil { return err } - addrSet := ids.ShortSet{} + addrSet := set.Set[ids.ShortID]{} addrSet.Add(addr) - utxos, err := avax.GetAllUTXOs(service.vm.state, addrSet) + utxos, err := avax.GetAllUTXOs(s.vm.state, addrSet) if err != nil { return fmt.Errorf("problem retrieving UTXOs: %w", err) } - now := service.vm.clock.Unix() + now := s.vm.clock.Unix() reply.UTXOIDs = make([]avax.UTXOID, 0, len(utxos)) for _, utxo := range utxos { if utxo.AssetID() != assetID { @@ -440,30 +581,34 @@ type GetAllBalancesReply struct { } // GetAllBalances returns a map where: -// Key: ID of an asset such that [args.Address] has a non-zero balance of the asset -// Value: The balance of the asset held by the address +// +// Key: ID of an asset such that [args.Address] has a non-zero balance of the asset +// Value: The balance of the asset held by the address +// // If ![args.IncludePartial], returns only unlocked balance/UTXOs with a 1-out-of-1 multisig. // Otherwise, returned balance/UTXOs includes assets held only partially by the // address, and includes balances with locktime in the future. -func (service *Service) GetAllBalances(r *http.Request, args *GetAllBalancesArgs, reply *GetAllBalancesReply) error { - service.vm.ctx.Log.Debug("AVM: GetAllBalances called", +func (s *Service) GetAllBalances(_ *http.Request, args *GetAllBalancesArgs, reply *GetAllBalancesReply) error { + s.vm.ctx.Log.Debug("deprecated API called", + zap.String("service", "avm"), + zap.String("method", "getAllBalances"), logging.UserString("address", args.Address), ) - address, err := avax.ParseServiceAddress(service.vm, args.Address) + address, err := avax.ParseServiceAddress(s.vm, args.Address) if err != nil { return fmt.Errorf("problem parsing address '%s': %w", args.Address, err) } - addrSet := ids.ShortSet{} + addrSet := set.Set[ids.ShortID]{} addrSet.Add(address) - utxos, err := avax.GetAllUTXOs(service.vm.state, addrSet) + utxos, err := avax.GetAllUTXOs(s.vm.state, addrSet) if err != nil { return fmt.Errorf("couldn't get address's UTXOs: %w", err) } - now := service.vm.clock.Unix() - assetIDs := ids.Set{} // IDs of assets the address has a non-zero balance of + now := s.vm.clock.Unix() + assetIDs := set.Set[ids.ID]{} // IDs of assets the address has a non-zero balance of balances := make(map[ids.ID]uint64) // key: ID (as bytes). value: balance of that asset for _, utxo := range utxos { // TODO make this not specific to *secp256k1fx.TransferOutput @@ -489,7 +634,7 @@ func (service *Service) GetAllBalances(r *http.Request, args *GetAllBalancesArgs reply.Balances = make([]Balance, assetIDs.Len()) i := 0 for assetID := range assetIDs { - alias := service.vm.PrimaryAliasOrDefault(assetID) + alias := s.vm.PrimaryAliasOrDefault(assetID) reply.Balances[i] = Balance{ AssetID: alias, Balance: json.Uint64(balances[assetID]), @@ -529,8 +674,10 @@ type AssetIDChangeAddr struct { } // CreateAsset returns ID of the newly created asset -func (service *Service) CreateAsset(r *http.Request, args *CreateAssetArgs, reply *AssetIDChangeAddr) error { - service.vm.ctx.Log.Debug("AVM: CreateAsset called", +func (s *Service) CreateAsset(_ *http.Request, args *CreateAssetArgs, reply *AssetIDChangeAddr) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "avm"), + zap.String("method", "createAsset"), logging.UserString("name", args.Name), logging.UserString("symbol", args.Symbol), zap.Int("numInitialHolders", len(args.InitialHolders)), @@ -542,13 +689,13 @@ func (service *Service) CreateAsset(r *http.Request, args *CreateAssetArgs, repl } // Parse the from addresses - fromAddrs, err := avax.ParseServiceAddresses(service.vm, args.From) + fromAddrs, err := avax.ParseServiceAddresses(s.vm, args.From) if err != nil { return err } // Get the UTXOs/keys for the from addresses - utxos, kc, err := service.vm.LoadUser(args.Username, args.Password, fromAddrs) + utxos, kc, err := s.vm.LoadUser(args.Username, args.Password, fromAddrs) if err != nil { return err } @@ -557,16 +704,16 @@ func (service *Service) CreateAsset(r *http.Request, args *CreateAssetArgs, repl if len(kc.Keys) == 0 { return errNoKeys } - changeAddr, err := service.vm.selectChangeAddr(kc.Keys[0].PublicKey().Address(), args.ChangeAddr) + changeAddr, err := s.vm.selectChangeAddr(kc.Keys[0].PublicKey().Address(), args.ChangeAddr) if err != nil { return err } - amountsSpent, ins, keys, err := service.vm.Spend( + amountsSpent, ins, keys, err := s.vm.Spend( utxos, kc, map[ids.ID]uint64{ - service.vm.feeAssetID: service.vm.CreateAssetTxFee, + s.vm.feeAssetID: s.vm.CreateAssetTxFee, }, ) if err != nil { @@ -574,11 +721,11 @@ func (service *Service) CreateAsset(r *http.Request, args *CreateAssetArgs, repl } outs := []*avax.TransferableOutput{} - if amountSpent := amountsSpent[service.vm.feeAssetID]; amountSpent > service.vm.CreateAssetTxFee { + if amountSpent := amountsSpent[s.vm.feeAssetID]; amountSpent > s.vm.CreateAssetTxFee { outs = append(outs, &avax.TransferableOutput{ - Asset: avax.Asset{ID: service.vm.feeAssetID}, + Asset: avax.Asset{ID: s.vm.feeAssetID}, Out: &secp256k1fx.TransferOutput{ - Amt: amountSpent - service.vm.CreateAssetTxFee, + Amt: amountSpent - s.vm.CreateAssetTxFee, OutputOwners: secp256k1fx.OutputOwners{ Locktime: 0, Threshold: 1, @@ -593,7 +740,7 @@ func (service *Service) CreateAsset(r *http.Request, args *CreateAssetArgs, repl Outs: make([]verify.State, 0, len(args.InitialHolders)+len(args.MinterSets)), } for _, holder := range args.InitialHolders { - addr, err := avax.ParseServiceAddress(service.vm, holder.Address) + addr, err := avax.ParseServiceAddress(s.vm, holder.Address) if err != nil { return err } @@ -612,20 +759,20 @@ func (service *Service) CreateAsset(r *http.Request, args *CreateAssetArgs, repl Addrs: make([]ids.ShortID, 0, len(owner.Minters)), }, } - minterAddrsSet, err := avax.ParseServiceAddresses(service.vm, owner.Minters) + minterAddrsSet, err := avax.ParseServiceAddresses(s.vm, owner.Minters) if err != nil { return err } minter.Addrs = minterAddrsSet.List() - ids.SortShortIDs(minter.Addrs) + utils.Sort(minter.Addrs) initialState.Outs = append(initialState.Outs, minter) } - initialState.Sort(service.vm.parser.Codec()) + initialState.Sort(s.vm.parser.Codec()) tx := txs.Tx{Unsigned: &txs.CreateAssetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: service.vm.ctx.NetworkID, - BlockchainID: service.vm.ctx.ChainID, + NetworkID: s.vm.ctx.NetworkID, + BlockchainID: s.vm.ctx.ChainID, Outs: outs, Ins: ins, }}, @@ -634,40 +781,44 @@ func (service *Service) CreateAsset(r *http.Request, args *CreateAssetArgs, repl Denomination: args.Denomination, States: []*txs.InitialState{initialState}, }} - if err := tx.SignSECP256K1Fx(service.vm.parser.Codec(), keys); err != nil { + if err := tx.SignSECP256K1Fx(s.vm.parser.Codec(), keys); err != nil { return err } - assetID, err := service.vm.IssueTx(tx.Bytes()) + assetID, err := s.vm.IssueTx(tx.Bytes()) if err != nil { return fmt.Errorf("problem issuing transaction: %w", err) } reply.AssetID = assetID - reply.ChangeAddr, err = service.vm.FormatLocalAddress(changeAddr) + reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) return err } // CreateFixedCapAsset returns ID of the newly created asset -func (service *Service) CreateFixedCapAsset(r *http.Request, args *CreateAssetArgs, reply *AssetIDChangeAddr) error { - service.vm.ctx.Log.Debug("AVM: CreateFixedCapAsset called", +func (s *Service) CreateFixedCapAsset(r *http.Request, args *CreateAssetArgs, reply *AssetIDChangeAddr) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "avm"), + zap.String("method", "createFixedCapAsset"), logging.UserString("name", args.Name), logging.UserString("symbol", args.Symbol), zap.Int("numInitialHolders", len(args.InitialHolders)), ) - return service.CreateAsset(nil, args, reply) + return s.CreateAsset(r, args, reply) } // CreateVariableCapAsset returns ID of the newly created asset -func (service *Service) CreateVariableCapAsset(r *http.Request, args *CreateAssetArgs, reply *AssetIDChangeAddr) error { - service.vm.ctx.Log.Debug("AVM: CreateVariableCapAsset called", +func (s *Service) CreateVariableCapAsset(r *http.Request, args *CreateAssetArgs, reply *AssetIDChangeAddr) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "avm"), + zap.String("method", "createVariableCapAsset"), logging.UserString("name", args.Name), logging.UserString("symbol", args.Symbol), zap.Int("numMinters", len(args.MinterSets)), ) - return service.CreateAsset(nil, args, reply) + return s.CreateAsset(r, args, reply) } // CreateNFTAssetArgs are arguments for passing into CreateNFTAsset requests @@ -679,8 +830,10 @@ type CreateNFTAssetArgs struct { } // CreateNFTAsset returns ID of the newly created asset -func (service *Service) CreateNFTAsset(r *http.Request, args *CreateNFTAssetArgs, reply *AssetIDChangeAddr) error { - service.vm.ctx.Log.Debug("AVM: CreateNFTAsset called", +func (s *Service) CreateNFTAsset(_ *http.Request, args *CreateNFTAssetArgs, reply *AssetIDChangeAddr) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "avm"), + zap.String("method", "createNFTAsset"), logging.UserString("name", args.Name), logging.UserString("symbol", args.Symbol), zap.Int("numMinters", len(args.MinterSets)), @@ -691,13 +844,13 @@ func (service *Service) CreateNFTAsset(r *http.Request, args *CreateNFTAssetArgs } // Parse the from addresses - fromAddrs, err := avax.ParseServiceAddresses(service.vm, args.From) + fromAddrs, err := avax.ParseServiceAddresses(s.vm, args.From) if err != nil { return err } // Get the UTXOs/keys for the from addresses - utxos, kc, err := service.vm.LoadUser(args.Username, args.Password, fromAddrs) + utxos, kc, err := s.vm.LoadUser(args.Username, args.Password, fromAddrs) if err != nil { return err } @@ -706,16 +859,16 @@ func (service *Service) CreateNFTAsset(r *http.Request, args *CreateNFTAssetArgs if len(kc.Keys) == 0 { return errNoKeys } - changeAddr, err := service.vm.selectChangeAddr(kc.Keys[0].PublicKey().Address(), args.ChangeAddr) + changeAddr, err := s.vm.selectChangeAddr(kc.Keys[0].PublicKey().Address(), args.ChangeAddr) if err != nil { return err } - amountsSpent, ins, keys, err := service.vm.Spend( + amountsSpent, ins, keys, err := s.vm.Spend( utxos, kc, map[ids.ID]uint64{ - service.vm.feeAssetID: service.vm.CreateAssetTxFee, + s.vm.feeAssetID: s.vm.CreateAssetTxFee, }, ) if err != nil { @@ -723,11 +876,11 @@ func (service *Service) CreateNFTAsset(r *http.Request, args *CreateNFTAssetArgs } outs := []*avax.TransferableOutput{} - if amountSpent := amountsSpent[service.vm.feeAssetID]; amountSpent > service.vm.CreateAssetTxFee { + if amountSpent := amountsSpent[s.vm.feeAssetID]; amountSpent > s.vm.CreateAssetTxFee { outs = append(outs, &avax.TransferableOutput{ - Asset: avax.Asset{ID: service.vm.feeAssetID}, + Asset: avax.Asset{ID: s.vm.feeAssetID}, Out: &secp256k1fx.TransferOutput{ - Amt: amountSpent - service.vm.CreateAssetTxFee, + Amt: amountSpent - s.vm.CreateAssetTxFee, OutputOwners: secp256k1fx.OutputOwners{ Locktime: 0, Threshold: 1, @@ -748,20 +901,20 @@ func (service *Service) CreateNFTAsset(r *http.Request, args *CreateNFTAssetArgs Threshold: uint32(owner.Threshold), }, } - minterAddrsSet, err := avax.ParseServiceAddresses(service.vm, owner.Minters) + minterAddrsSet, err := avax.ParseServiceAddresses(s.vm, owner.Minters) if err != nil { return err } minter.Addrs = minterAddrsSet.List() - ids.SortShortIDs(minter.Addrs) + utils.Sort(minter.Addrs) initialState.Outs = append(initialState.Outs, minter) } - initialState.Sort(service.vm.parser.Codec()) + initialState.Sort(s.vm.parser.Codec()) tx := txs.Tx{Unsigned: &txs.CreateAssetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: service.vm.ctx.NetworkID, - BlockchainID: service.vm.ctx.ChainID, + NetworkID: s.vm.ctx.NetworkID, + BlockchainID: s.vm.ctx.ChainID, Outs: outs, Ins: ins, }}, @@ -770,27 +923,29 @@ func (service *Service) CreateNFTAsset(r *http.Request, args *CreateNFTAssetArgs Denomination: 0, // NFTs are non-fungible States: []*txs.InitialState{initialState}, }} - if err := tx.SignSECP256K1Fx(service.vm.parser.Codec(), keys); err != nil { + if err := tx.SignSECP256K1Fx(s.vm.parser.Codec(), keys); err != nil { return err } - assetID, err := service.vm.IssueTx(tx.Bytes()) + assetID, err := s.vm.IssueTx(tx.Bytes()) if err != nil { return fmt.Errorf("problem issuing transaction: %w", err) } reply.AssetID = assetID - reply.ChangeAddr, err = service.vm.FormatLocalAddress(changeAddr) + reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) return err } // CreateAddress creates an address for the user [args.Username] -func (service *Service) CreateAddress(r *http.Request, args *api.UserPass, reply *api.JSONAddress) error { - service.vm.ctx.Log.Debug("AVM: CreateAddress called", +func (s *Service) CreateAddress(_ *http.Request, args *api.UserPass, reply *api.JSONAddress) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "avm"), + zap.String("method", "createAddress"), logging.UserString("username", args.Username), ) - user, err := keystore.NewUserFromKeystore(service.vm.ctx.Keystore, args.Username, args.Password) + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { return err } @@ -801,7 +956,7 @@ func (service *Service) CreateAddress(r *http.Request, args *api.UserPass, reply return err } - reply.Address, err = service.vm.FormatLocalAddress(sk.PublicKey().Address()) + reply.Address, err = s.vm.FormatLocalAddress(sk.PublicKey().Address()) if err != nil { return fmt.Errorf("problem formatting address: %w", err) } @@ -812,12 +967,14 @@ func (service *Service) CreateAddress(r *http.Request, args *api.UserPass, reply } // ListAddresses returns all of the addresses controlled by user [args.Username] -func (service *Service) ListAddresses(_ *http.Request, args *api.UserPass, response *api.JSONAddresses) error { - service.vm.ctx.Log.Debug("AVM: ListAddresses called", +func (s *Service) ListAddresses(_ *http.Request, args *api.UserPass, response *api.JSONAddresses) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "avm"), + zap.String("method", "listAddresses"), logging.UserString("username", args.Username), ) - user, err := keystore.NewUserFromKeystore(service.vm.ctx.Keystore, args.Username, args.Password) + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { return err } @@ -832,7 +989,7 @@ func (service *Service) ListAddresses(_ *http.Request, args *api.UserPass, respo } for _, address := range addresses { - addr, err := service.vm.FormatLocalAddress(address) + addr, err := s.vm.FormatLocalAddress(address) if err != nil { // Drop any potential error closing the database to report the // original error @@ -853,21 +1010,23 @@ type ExportKeyArgs struct { // ExportKeyReply is the response for ExportKey type ExportKeyReply struct { // The decrypted PrivateKey for the Address provided in the arguments - PrivateKey *crypto.PrivateKeySECP256K1R `json:"privateKey"` + PrivateKey *secp256k1.PrivateKey `json:"privateKey"` } // ExportKey returns a private key from the provided user -func (service *Service) ExportKey(r *http.Request, args *ExportKeyArgs, reply *ExportKeyReply) error { - service.vm.ctx.Log.Debug("AVM: ExportKey called", +func (s *Service) ExportKey(_ *http.Request, args *ExportKeyArgs, reply *ExportKeyReply) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "avm"), + zap.String("method", "exportKey"), logging.UserString("username", args.Username), ) - addr, err := avax.ParseServiceAddress(service.vm, args.Address) + addr, err := avax.ParseServiceAddress(s.vm, args.Address) if err != nil { return fmt.Errorf("problem parsing address %q: %w", args.Address, err) } - user, err := keystore.NewUserFromKeystore(service.vm.ctx.Keystore, args.Username, args.Password) + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { return err } @@ -885,7 +1044,7 @@ func (service *Service) ExportKey(r *http.Request, args *ExportKeyArgs, reply *E // ImportKeyArgs are arguments for ImportKey type ImportKeyArgs struct { api.UserPass - PrivateKey *crypto.PrivateKeySECP256K1R `json:"privateKey"` + PrivateKey *secp256k1.PrivateKey `json:"privateKey"` } // ImportKeyReply is the response for ImportKey @@ -895,8 +1054,10 @@ type ImportKeyReply struct { } // ImportKey adds a private key to the provided user -func (service *Service) ImportKey(r *http.Request, args *ImportKeyArgs, reply *api.JSONAddress) error { - service.vm.ctx.Log.Debug("AVM: ImportKey called", +func (s *Service) ImportKey(_ *http.Request, args *ImportKeyArgs, reply *api.JSONAddress) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "avm"), + zap.String("method", "importKey"), logging.UserString("username", args.Username), ) @@ -904,7 +1065,7 @@ func (service *Service) ImportKey(r *http.Request, args *ImportKeyArgs, reply *a return errMissingPrivateKey } - user, err := keystore.NewUserFromKeystore(service.vm.ctx.Keystore, args.Username, args.Password) + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { return err } @@ -915,7 +1076,7 @@ func (service *Service) ImportKey(r *http.Request, args *ImportKeyArgs, reply *a } newAddress := args.PrivateKey.PublicKey().Address() - reply.Address, err = service.vm.FormatLocalAddress(newAddress) + reply.Address, err = s.vm.FormatLocalAddress(newAddress) if err != nil { return fmt.Errorf("problem formatting address: %w", err) } @@ -960,8 +1121,8 @@ type SendMultipleArgs struct { } // Send returns the ID of the newly created transaction -func (service *Service) Send(r *http.Request, args *SendArgs, reply *api.JSONTxIDChangeAddr) error { - return service.SendMultiple(r, &SendMultipleArgs{ +func (s *Service) Send(r *http.Request, args *SendArgs, reply *api.JSONTxIDChangeAddr) error { + return s.SendMultiple(r, &SendMultipleArgs{ JSONSpendHeader: args.JSONSpendHeader, Outputs: []SendOutput{args.SendOutput}, Memo: args.Memo, @@ -969,8 +1130,10 @@ func (service *Service) Send(r *http.Request, args *SendArgs, reply *api.JSONTxI } // SendMultiple sends a transaction with multiple outputs. -func (service *Service) SendMultiple(r *http.Request, args *SendMultipleArgs, reply *api.JSONTxIDChangeAddr) error { - service.vm.ctx.Log.Debug("AVM: SendMultiple called", +func (s *Service) SendMultiple(_ *http.Request, args *SendMultipleArgs, reply *api.JSONTxIDChangeAddr) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "avm"), + zap.String("method", "sendMultiple"), logging.UserString("username", args.Username), ) @@ -983,13 +1146,13 @@ func (service *Service) SendMultiple(r *http.Request, args *SendMultipleArgs, re } // Parse the from addresses - fromAddrs, err := avax.ParseServiceAddresses(service.vm, args.From) + fromAddrs, err := avax.ParseServiceAddresses(s.vm, args.From) if err != nil { return err } // Load user's UTXOs/keys - utxos, kc, err := service.vm.LoadUser(args.Username, args.Password, fromAddrs) + utxos, kc, err := s.vm.LoadUser(args.Username, args.Password, fromAddrs) if err != nil { return err } @@ -998,7 +1161,7 @@ func (service *Service) SendMultiple(r *http.Request, args *SendMultipleArgs, re if len(kc.Keys) == 0 { return errNoKeys } - changeAddr, err := service.vm.selectChangeAddr(kc.Keys[0].PublicKey().Address(), args.ChangeAddr) + changeAddr, err := s.vm.selectChangeAddr(kc.Keys[0].PublicKey().Address(), args.ChangeAddr) if err != nil { return err } @@ -1016,7 +1179,7 @@ func (service *Service) SendMultiple(r *http.Request, args *SendMultipleArgs, re } assetID, ok := assetIDs[output.AssetID] // Asset ID of next output if !ok { - assetID, err = service.vm.lookupAssetID(output.AssetID) + assetID, err = s.vm.lookupAssetID(output.AssetID) if err != nil { return fmt.Errorf("couldn't find asset %s", output.AssetID) } @@ -1030,7 +1193,7 @@ func (service *Service) SendMultiple(r *http.Request, args *SendMultipleArgs, re amounts[assetID] = newAmount // Parse the to address - to, err := avax.ParseServiceAddress(service.vm, output.To) + to, err := avax.ParseServiceAddress(s.vm, output.To) if err != nil { return fmt.Errorf("problem parsing to address %q: %w", output.To, err) } @@ -1054,13 +1217,13 @@ func (service *Service) SendMultiple(r *http.Request, args *SendMultipleArgs, re amountsWithFee[assetID] = amount } - amountWithFee, err := safemath.Add64(amounts[service.vm.feeAssetID], service.vm.TxFee) + amountWithFee, err := safemath.Add64(amounts[s.vm.feeAssetID], s.vm.TxFee) if err != nil { return fmt.Errorf("problem calculating required spend amount: %w", err) } - amountsWithFee[service.vm.feeAssetID] = amountWithFee + amountsWithFee[s.vm.feeAssetID] = amountWithFee - amountsSpent, ins, keys, err := service.vm.Spend( + amountsSpent, ins, keys, err := s.vm.Spend( utxos, kc, amountsWithFee, @@ -1087,26 +1250,26 @@ func (service *Service) SendMultiple(r *http.Request, args *SendMultipleArgs, re }) } } - avax.SortTransferableOutputs(outs, service.vm.parser.Codec()) + avax.SortTransferableOutputs(outs, s.vm.parser.Codec()) tx := txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: service.vm.ctx.NetworkID, - BlockchainID: service.vm.ctx.ChainID, + NetworkID: s.vm.ctx.NetworkID, + BlockchainID: s.vm.ctx.ChainID, Outs: outs, Ins: ins, Memo: memoBytes, }}} - if err := tx.SignSECP256K1Fx(service.vm.parser.Codec(), keys); err != nil { + if err := tx.SignSECP256K1Fx(s.vm.parser.Codec(), keys); err != nil { return err } - txID, err := service.vm.IssueTx(tx.Bytes()) + txID, err := s.vm.IssueTx(tx.Bytes()) if err != nil { return fmt.Errorf("problem issuing transaction: %w", err) } reply.TxID = txID - reply.ChangeAddr, err = service.vm.FormatLocalAddress(changeAddr) + reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) return err } @@ -1119,8 +1282,10 @@ type MintArgs struct { } // Mint issues a transaction that mints more of the asset -func (service *Service) Mint(r *http.Request, args *MintArgs, reply *api.JSONTxIDChangeAddr) error { - service.vm.ctx.Log.Debug("AVM: Mint called", +func (s *Service) Mint(_ *http.Request, args *MintArgs, reply *api.JSONTxIDChangeAddr) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "avm"), + zap.String("method", "mint"), logging.UserString("username", args.Username), ) @@ -1128,24 +1293,24 @@ func (service *Service) Mint(r *http.Request, args *MintArgs, reply *api.JSONTxI return errInvalidMintAmount } - assetID, err := service.vm.lookupAssetID(args.AssetID) + assetID, err := s.vm.lookupAssetID(args.AssetID) if err != nil { return err } - to, err := avax.ParseServiceAddress(service.vm, args.To) + to, err := avax.ParseServiceAddress(s.vm, args.To) if err != nil { return fmt.Errorf("problem parsing to address %q: %w", args.To, err) } // Parse the from addresses - fromAddrs, err := avax.ParseServiceAddresses(service.vm, args.From) + fromAddrs, err := avax.ParseServiceAddresses(s.vm, args.From) if err != nil { return err } // Get the UTXOs/keys for the from addresses - feeUTXOs, feeKc, err := service.vm.LoadUser(args.Username, args.Password, fromAddrs) + feeUTXOs, feeKc, err := s.vm.LoadUser(args.Username, args.Password, fromAddrs) if err != nil { return err } @@ -1154,16 +1319,16 @@ func (service *Service) Mint(r *http.Request, args *MintArgs, reply *api.JSONTxI if len(feeKc.Keys) == 0 { return errNoKeys } - changeAddr, err := service.vm.selectChangeAddr(feeKc.Keys[0].PublicKey().Address(), args.ChangeAddr) + changeAddr, err := s.vm.selectChangeAddr(feeKc.Keys[0].PublicKey().Address(), args.ChangeAddr) if err != nil { return err } - amountsSpent, ins, keys, err := service.vm.Spend( + amountsSpent, ins, keys, err := s.vm.Spend( feeUTXOs, feeKc, map[ids.ID]uint64{ - service.vm.feeAssetID: service.vm.TxFee, + s.vm.feeAssetID: s.vm.TxFee, }, ) if err != nil { @@ -1171,11 +1336,11 @@ func (service *Service) Mint(r *http.Request, args *MintArgs, reply *api.JSONTxI } outs := []*avax.TransferableOutput{} - if amountSpent := amountsSpent[service.vm.feeAssetID]; amountSpent > service.vm.TxFee { + if amountSpent := amountsSpent[s.vm.feeAssetID]; amountSpent > s.vm.TxFee { outs = append(outs, &avax.TransferableOutput{ - Asset: avax.Asset{ID: service.vm.feeAssetID}, + Asset: avax.Asset{ID: s.vm.feeAssetID}, Out: &secp256k1fx.TransferOutput{ - Amt: amountSpent - service.vm.TxFee, + Amt: amountSpent - s.vm.TxFee, OutputOwners: secp256k1fx.OutputOwners{ Locktime: 0, Threshold: 1, @@ -1186,12 +1351,12 @@ func (service *Service) Mint(r *http.Request, args *MintArgs, reply *api.JSONTxI } // Get all UTXOs/keys for the user - utxos, kc, err := service.vm.LoadUser(args.Username, args.Password, nil) + utxos, kc, err := s.vm.LoadUser(args.Username, args.Password, nil) if err != nil { return err } - ops, opKeys, err := service.vm.Mint( + ops, opKeys, err := s.vm.Mint( utxos, kc, map[ids.ID]uint64{ @@ -1206,24 +1371,24 @@ func (service *Service) Mint(r *http.Request, args *MintArgs, reply *api.JSONTxI tx := txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: service.vm.ctx.NetworkID, - BlockchainID: service.vm.ctx.ChainID, + NetworkID: s.vm.ctx.NetworkID, + BlockchainID: s.vm.ctx.ChainID, Outs: outs, Ins: ins, }}, Ops: ops, }} - if err := tx.SignSECP256K1Fx(service.vm.parser.Codec(), keys); err != nil { + if err := tx.SignSECP256K1Fx(s.vm.parser.Codec(), keys); err != nil { return err } - txID, err := service.vm.IssueTx(tx.Bytes()) + txID, err := s.vm.IssueTx(tx.Bytes()) if err != nil { return fmt.Errorf("problem issuing transaction: %w", err) } reply.TxID = txID - reply.ChangeAddr, err = service.vm.FormatLocalAddress(changeAddr) + reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) return err } @@ -1236,31 +1401,33 @@ type SendNFTArgs struct { } // SendNFT sends an NFT -func (service *Service) SendNFT(r *http.Request, args *SendNFTArgs, reply *api.JSONTxIDChangeAddr) error { - service.vm.ctx.Log.Debug("AVM: SendNFT called", +func (s *Service) SendNFT(_ *http.Request, args *SendNFTArgs, reply *api.JSONTxIDChangeAddr) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "avm"), + zap.String("method", "sendNFT"), logging.UserString("username", args.Username), ) // Parse the asset ID - assetID, err := service.vm.lookupAssetID(args.AssetID) + assetID, err := s.vm.lookupAssetID(args.AssetID) if err != nil { return err } // Parse the to address - to, err := avax.ParseServiceAddress(service.vm, args.To) + to, err := avax.ParseServiceAddress(s.vm, args.To) if err != nil { return fmt.Errorf("problem parsing to address %q: %w", args.To, err) } // Parse the from addresses - fromAddrs, err := avax.ParseServiceAddresses(service.vm, args.From) + fromAddrs, err := avax.ParseServiceAddresses(s.vm, args.From) if err != nil { return err } // Get the UTXOs/keys for the from addresses - utxos, kc, err := service.vm.LoadUser(args.Username, args.Password, fromAddrs) + utxos, kc, err := s.vm.LoadUser(args.Username, args.Password, fromAddrs) if err != nil { return err } @@ -1269,16 +1436,16 @@ func (service *Service) SendNFT(r *http.Request, args *SendNFTArgs, reply *api.J if len(kc.Keys) == 0 { return errNoKeys } - changeAddr, err := service.vm.selectChangeAddr(kc.Keys[0].PublicKey().Address(), args.ChangeAddr) + changeAddr, err := s.vm.selectChangeAddr(kc.Keys[0].PublicKey().Address(), args.ChangeAddr) if err != nil { return err } - amountsSpent, ins, secpKeys, err := service.vm.Spend( + amountsSpent, ins, secpKeys, err := s.vm.Spend( utxos, kc, map[ids.ID]uint64{ - service.vm.feeAssetID: service.vm.TxFee, + s.vm.feeAssetID: s.vm.TxFee, }, ) if err != nil { @@ -1286,11 +1453,11 @@ func (service *Service) SendNFT(r *http.Request, args *SendNFTArgs, reply *api.J } outs := []*avax.TransferableOutput{} - if amountSpent := amountsSpent[service.vm.feeAssetID]; amountSpent > service.vm.TxFee { + if amountSpent := amountsSpent[s.vm.feeAssetID]; amountSpent > s.vm.TxFee { outs = append(outs, &avax.TransferableOutput{ - Asset: avax.Asset{ID: service.vm.feeAssetID}, + Asset: avax.Asset{ID: s.vm.feeAssetID}, Out: &secp256k1fx.TransferOutput{ - Amt: amountSpent - service.vm.TxFee, + Amt: amountSpent - s.vm.TxFee, OutputOwners: secp256k1fx.OutputOwners{ Locktime: 0, Threshold: 1, @@ -1300,7 +1467,7 @@ func (service *Service) SendNFT(r *http.Request, args *SendNFTArgs, reply *api.J }) } - ops, nftKeys, err := service.vm.SpendNFT( + ops, nftKeys, err := s.vm.SpendNFT( utxos, kc, assetID, @@ -1313,27 +1480,27 @@ func (service *Service) SendNFT(r *http.Request, args *SendNFTArgs, reply *api.J tx := txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: service.vm.ctx.NetworkID, - BlockchainID: service.vm.ctx.ChainID, + NetworkID: s.vm.ctx.NetworkID, + BlockchainID: s.vm.ctx.ChainID, Outs: outs, Ins: ins, }}, Ops: ops, }} - if err := tx.SignSECP256K1Fx(service.vm.parser.Codec(), secpKeys); err != nil { + if err := tx.SignSECP256K1Fx(s.vm.parser.Codec(), secpKeys); err != nil { return err } - if err := tx.SignNFTFx(service.vm.parser.Codec(), nftKeys); err != nil { + if err := tx.SignNFTFx(s.vm.parser.Codec(), nftKeys); err != nil { return err } - txID, err := service.vm.IssueTx(tx.Bytes()) + txID, err := s.vm.IssueTx(tx.Bytes()) if err != nil { return fmt.Errorf("problem issuing transaction: %w", err) } reply.TxID = txID - reply.ChangeAddr, err = service.vm.FormatLocalAddress(changeAddr) + reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) return err } @@ -1347,17 +1514,19 @@ type MintNFTArgs struct { } // MintNFT issues a MintNFT transaction and returns the ID of the newly created transaction -func (service *Service) MintNFT(r *http.Request, args *MintNFTArgs, reply *api.JSONTxIDChangeAddr) error { - service.vm.ctx.Log.Debug("AVM: MintNFT called", +func (s *Service) MintNFT(_ *http.Request, args *MintNFTArgs, reply *api.JSONTxIDChangeAddr) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "avm"), + zap.String("method", "mintNFT"), logging.UserString("username", args.Username), ) - assetID, err := service.vm.lookupAssetID(args.AssetID) + assetID, err := s.vm.lookupAssetID(args.AssetID) if err != nil { return err } - to, err := avax.ParseServiceAddress(service.vm, args.To) + to, err := avax.ParseServiceAddress(s.vm, args.To) if err != nil { return fmt.Errorf("problem parsing to address %q: %w", args.To, err) } @@ -1368,13 +1537,13 @@ func (service *Service) MintNFT(r *http.Request, args *MintNFTArgs, reply *api.J } // Parse the from addresses - fromAddrs, err := avax.ParseServiceAddresses(service.vm, args.From) + fromAddrs, err := avax.ParseServiceAddresses(s.vm, args.From) if err != nil { return err } // Get the UTXOs/keys for the from addresses - feeUTXOs, feeKc, err := service.vm.LoadUser(args.Username, args.Password, fromAddrs) + feeUTXOs, feeKc, err := s.vm.LoadUser(args.Username, args.Password, fromAddrs) if err != nil { return err } @@ -1383,16 +1552,16 @@ func (service *Service) MintNFT(r *http.Request, args *MintNFTArgs, reply *api.J if len(feeKc.Keys) == 0 { return errNoKeys } - changeAddr, err := service.vm.selectChangeAddr(feeKc.Keys[0].PublicKey().Address(), args.ChangeAddr) + changeAddr, err := s.vm.selectChangeAddr(feeKc.Keys[0].PublicKey().Address(), args.ChangeAddr) if err != nil { return err } - amountsSpent, ins, secpKeys, err := service.vm.Spend( + amountsSpent, ins, secpKeys, err := s.vm.Spend( feeUTXOs, feeKc, map[ids.ID]uint64{ - service.vm.feeAssetID: service.vm.TxFee, + s.vm.feeAssetID: s.vm.TxFee, }, ) if err != nil { @@ -1400,11 +1569,11 @@ func (service *Service) MintNFT(r *http.Request, args *MintNFTArgs, reply *api.J } outs := []*avax.TransferableOutput{} - if amountSpent := amountsSpent[service.vm.feeAssetID]; amountSpent > service.vm.TxFee { + if amountSpent := amountsSpent[s.vm.feeAssetID]; amountSpent > s.vm.TxFee { outs = append(outs, &avax.TransferableOutput{ - Asset: avax.Asset{ID: service.vm.feeAssetID}, + Asset: avax.Asset{ID: s.vm.feeAssetID}, Out: &secp256k1fx.TransferOutput{ - Amt: amountSpent - service.vm.TxFee, + Amt: amountSpent - s.vm.TxFee, OutputOwners: secp256k1fx.OutputOwners{ Locktime: 0, Threshold: 1, @@ -1415,12 +1584,12 @@ func (service *Service) MintNFT(r *http.Request, args *MintNFTArgs, reply *api.J } // Get all UTXOs/keys - utxos, kc, err := service.vm.LoadUser(args.Username, args.Password, nil) + utxos, kc, err := s.vm.LoadUser(args.Username, args.Password, nil) if err != nil { return err } - ops, nftKeys, err := service.vm.MintNFT( + ops, nftKeys, err := s.vm.MintNFT( utxos, kc, assetID, @@ -1433,27 +1602,27 @@ func (service *Service) MintNFT(r *http.Request, args *MintNFTArgs, reply *api.J tx := txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: service.vm.ctx.NetworkID, - BlockchainID: service.vm.ctx.ChainID, + NetworkID: s.vm.ctx.NetworkID, + BlockchainID: s.vm.ctx.ChainID, Outs: outs, Ins: ins, }}, Ops: ops, }} - if err := tx.SignSECP256K1Fx(service.vm.parser.Codec(), secpKeys); err != nil { + if err := tx.SignSECP256K1Fx(s.vm.parser.Codec(), secpKeys); err != nil { return err } - if err := tx.SignNFTFx(service.vm.parser.Codec(), nftKeys); err != nil { + if err := tx.SignNFTFx(s.vm.parser.Codec(), nftKeys); err != nil { return err } - txID, err := service.vm.IssueTx(tx.Bytes()) + txID, err := s.vm.IssueTx(tx.Bytes()) if err != nil { return fmt.Errorf("problem issuing transaction: %w", err) } reply.TxID = txID - reply.ChangeAddr, err = service.vm.FormatLocalAddress(changeAddr) + reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) return err } @@ -1472,46 +1641,48 @@ type ImportArgs struct { // Import imports an asset to this chain from the P/C-Chain. // The AVAX must have already been exported from the P/C-Chain. // Returns the ID of the newly created atomic transaction -func (service *Service) Import(_ *http.Request, args *ImportArgs, reply *api.JSONTxID) error { - service.vm.ctx.Log.Debug("AVM: Import called", +func (s *Service) Import(_ *http.Request, args *ImportArgs, reply *api.JSONTxID) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "avm"), + zap.String("method", "import"), logging.UserString("username", args.Username), ) - chainID, err := service.vm.ctx.BCLookup.Lookup(args.SourceChain) + chainID, err := s.vm.ctx.BCLookup.Lookup(args.SourceChain) if err != nil { return fmt.Errorf("problem parsing chainID %q: %w", args.SourceChain, err) } - to, err := avax.ParseServiceAddress(service.vm, args.To) + to, err := avax.ParseServiceAddress(s.vm, args.To) if err != nil { return fmt.Errorf("problem parsing to address %q: %w", args.To, err) } - utxos, kc, err := service.vm.LoadUser(args.Username, args.Password, nil) + utxos, kc, err := s.vm.LoadUser(args.Username, args.Password, nil) if err != nil { return err } - atomicUTXOs, _, _, err := service.vm.GetAtomicUTXOs(chainID, kc.Addrs, ids.ShortEmpty, ids.Empty, int(maxPageSize)) + atomicUTXOs, _, _, err := s.vm.GetAtomicUTXOs(chainID, kc.Addrs, ids.ShortEmpty, ids.Empty, int(maxPageSize)) if err != nil { return fmt.Errorf("problem retrieving user's atomic UTXOs: %w", err) } - amountsSpent, importInputs, importKeys, err := service.vm.SpendAll(atomicUTXOs, kc) + amountsSpent, importInputs, importKeys, err := s.vm.SpendAll(atomicUTXOs, kc) if err != nil { return err } ins := []*avax.TransferableInput{} - keys := [][]*crypto.PrivateKeySECP256K1R{} + keys := [][]*secp256k1.PrivateKey{} - if amountSpent := amountsSpent[service.vm.feeAssetID]; amountSpent < service.vm.TxFee { + if amountSpent := amountsSpent[s.vm.feeAssetID]; amountSpent < s.vm.TxFee { var localAmountsSpent map[ids.ID]uint64 - localAmountsSpent, ins, keys, err = service.vm.Spend( + localAmountsSpent, ins, keys, err = s.vm.Spend( utxos, kc, map[ids.ID]uint64{ - service.vm.feeAssetID: service.vm.TxFee - amountSpent, + s.vm.feeAssetID: s.vm.TxFee - amountSpent, }, ) if err != nil { @@ -1528,7 +1699,7 @@ func (service *Service) Import(_ *http.Request, args *ImportArgs, reply *api.JSO // Because we ensured that we had enough inputs for the fee, we can // safely just remove it without concern for underflow. - amountsSpent[service.vm.feeAssetID] -= service.vm.TxFee + amountsSpent[s.vm.feeAssetID] -= s.vm.TxFee keys = append(keys, importKeys...) @@ -1548,23 +1719,23 @@ func (service *Service) Import(_ *http.Request, args *ImportArgs, reply *api.JSO }) } } - avax.SortTransferableOutputs(outs, service.vm.parser.Codec()) + avax.SortTransferableOutputs(outs, s.vm.parser.Codec()) tx := txs.Tx{Unsigned: &txs.ImportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: service.vm.ctx.NetworkID, - BlockchainID: service.vm.ctx.ChainID, + NetworkID: s.vm.ctx.NetworkID, + BlockchainID: s.vm.ctx.ChainID, Outs: outs, Ins: ins, }}, SourceChain: chainID, ImportedIns: importInputs, }} - if err := tx.SignSECP256K1Fx(service.vm.parser.Codec(), keys); err != nil { + if err := tx.SignSECP256K1Fx(s.vm.parser.Codec(), keys); err != nil { return err } - txID, err := service.vm.IssueTx(tx.Bytes()) + txID, err := s.vm.IssueTx(tx.Bytes()) if err != nil { return fmt.Errorf("problem issuing transaction: %w", err) } @@ -1593,21 +1764,23 @@ type ExportArgs struct { // Export sends an asset from this chain to the P/C-Chain. // After this tx is accepted, the AVAX must be imported to the P/C-chain with an importTx. // Returns the ID of the newly created atomic transaction -func (service *Service) Export(_ *http.Request, args *ExportArgs, reply *api.JSONTxIDChangeAddr) error { - service.vm.ctx.Log.Debug("AVM: Export called", +func (s *Service) Export(_ *http.Request, args *ExportArgs, reply *api.JSONTxIDChangeAddr) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "avm"), + zap.String("method", "export"), logging.UserString("username", args.Username), ) // Parse the asset ID - assetID, err := service.vm.lookupAssetID(args.AssetID) + assetID, err := s.vm.lookupAssetID(args.AssetID) if err != nil { return err } // Get the chainID and parse the to address - chainID, to, err := service.vm.ParseAddress(args.To) + chainID, to, err := s.vm.ParseAddress(args.To) if err != nil { - chainID, err = service.vm.ctx.BCLookup.Lookup(args.TargetChain) + chainID, err = s.vm.ctx.BCLookup.Lookup(args.TargetChain) if err != nil { return err } @@ -1622,13 +1795,13 @@ func (service *Service) Export(_ *http.Request, args *ExportArgs, reply *api.JSO } // Parse the from addresses - fromAddrs, err := avax.ParseServiceAddresses(service.vm, args.From) + fromAddrs, err := avax.ParseServiceAddresses(s.vm, args.From) if err != nil { return err } // Get the UTXOs/keys for the from addresses - utxos, kc, err := service.vm.LoadUser(args.Username, args.Password, fromAddrs) + utxos, kc, err := s.vm.LoadUser(args.Username, args.Password, fromAddrs) if err != nil { return err } @@ -1637,24 +1810,24 @@ func (service *Service) Export(_ *http.Request, args *ExportArgs, reply *api.JSO if len(kc.Keys) == 0 { return errNoKeys } - changeAddr, err := service.vm.selectChangeAddr(kc.Keys[0].PublicKey().Address(), args.ChangeAddr) + changeAddr, err := s.vm.selectChangeAddr(kc.Keys[0].PublicKey().Address(), args.ChangeAddr) if err != nil { return err } amounts := map[ids.ID]uint64{} - if assetID == service.vm.feeAssetID { - amountWithFee, err := safemath.Add64(uint64(args.Amount), service.vm.TxFee) + if assetID == s.vm.feeAssetID { + amountWithFee, err := safemath.Add64(uint64(args.Amount), s.vm.TxFee) if err != nil { return fmt.Errorf("problem calculating required spend amount: %w", err) } - amounts[service.vm.feeAssetID] = amountWithFee + amounts[s.vm.feeAssetID] = amountWithFee } else { - amounts[service.vm.feeAssetID] = service.vm.TxFee + amounts[s.vm.feeAssetID] = s.vm.TxFee amounts[assetID] = uint64(args.Amount) } - amountsSpent, ins, keys, err := service.vm.Spend(utxos, kc, amounts) + amountsSpent, ins, keys, err := s.vm.Spend(utxos, kc, amounts) if err != nil { return err } @@ -1688,28 +1861,28 @@ func (service *Service) Export(_ *http.Request, args *ExportArgs, reply *api.JSO }) } } - avax.SortTransferableOutputs(outs, service.vm.parser.Codec()) + avax.SortTransferableOutputs(outs, s.vm.parser.Codec()) tx := txs.Tx{Unsigned: &txs.ExportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: service.vm.ctx.NetworkID, - BlockchainID: service.vm.ctx.ChainID, + NetworkID: s.vm.ctx.NetworkID, + BlockchainID: s.vm.ctx.ChainID, Outs: outs, Ins: ins, }}, DestinationChain: chainID, ExportedOuts: exportOuts, }} - if err := tx.SignSECP256K1Fx(service.vm.parser.Codec(), keys); err != nil { + if err := tx.SignSECP256K1Fx(s.vm.parser.Codec(), keys); err != nil { return err } - txID, err := service.vm.IssueTx(tx.Bytes()) + txID, err := s.vm.IssueTx(tx.Bytes()) if err != nil { return fmt.Errorf("problem issuing transaction: %w", err) } reply.TxID = txID - reply.ChangeAddr, err = service.vm.FormatLocalAddress(changeAddr) + reply.ChangeAddr, err = s.vm.FormatLocalAddress(changeAddr) return err } diff --git a/avalanchego/vms/avm/service_test.go b/avalanchego/vms/avm/service_test.go index 98585542..d6b0b81e 100644 --- a/avalanchego/vms/avm/service_test.go +++ b/avalanchego/vms/avm/service_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm import ( "bytes" + "context" "fmt" "math/rand" "testing" @@ -12,24 +13,32 @@ import ( stdjson "encoding/json" + "github.com/golang/mock/gomock" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/json" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/sampler" "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/vms/avm/blocks" + "github.com/ava-labs/avalanchego/vms/avm/blocks/executor" + "github.com/ava-labs/avalanchego/vms/avm/states" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/index" @@ -190,7 +199,7 @@ func verifyTxFeeDeducted(t *testing.T, s *Service, fromAddrs []ids.ShortID, numT func TestServiceIssueTx(t *testing.T) { genesisBytes, vm, s, _, _ := setup(t, true) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } vm.ctx.Lock.Unlock() @@ -218,62 +227,52 @@ func TestServiceIssueTx(t *testing.T) { } func TestServiceGetTxStatus(t *testing.T) { - genesisBytes, vm, s, _, _ := setup(t, true) + require := require.New(t) + + genesisBytes, vm, s, issuer := setupWithIssuer(t, true) + ctx := vm.ctx defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } - vm.ctx.Lock.Unlock() + require.NoError(vm.Shutdown(context.Background())) + ctx.Lock.Unlock() }() statusArgs := &api.JSONTxID{} statusReply := &GetTxStatusReply{} - if err := s.GetTxStatus(nil, statusArgs, statusReply); err == nil { - t.Fatal("Expected empty transaction to return an error") - } + err := s.GetTxStatus(nil, statusArgs, statusReply) + require.ErrorIs(err, errNilTxID) - tx := NewTx(t, genesisBytes, vm) - statusArgs.TxID = tx.ID() - statusReply = &GetTxStatusReply{} - if err := s.GetTxStatus(nil, statusArgs, statusReply); err != nil { - t.Fatal(err) - } - if expected := choices.Unknown; expected != statusReply.Status { - t.Fatalf( - "Expected an unsubmitted tx to have status %q, got %q", - expected.String(), statusReply.Status.String(), - ) - } + newTx := newAvaxBaseTxWithOutputs(t, genesisBytes, vm) + txID := newTx.ID() - txStr, err := formatting.Encode(formatting.Hex, tx.Bytes()) - if err != nil { - t.Fatal(err) - } - txArgs := &api.FormattedTx{ - Tx: txStr, - Encoding: formatting.Hex, - } - txReply := &api.JSONTxID{} - if err := s.IssueTx(nil, txArgs, txReply); err != nil { - t.Fatal(err) + statusArgs = &api.JSONTxID{ + TxID: txID, } statusReply = &GetTxStatusReply{} - if err := s.GetTxStatus(nil, statusArgs, statusReply); err != nil { - t.Fatal(err) - } - if expected := choices.Processing; expected != statusReply.Status { - t.Fatalf( - "Expected a submitted tx to have status %q, got %q", - expected.String(), statusReply.Status.String(), - ) - } + require.NoError(s.GetTxStatus(nil, statusArgs, statusReply)) + require.Equal(choices.Unknown, statusReply.Status) + + _, err = vm.IssueTx(newTx.Bytes()) + require.NoError(err) + ctx.Lock.Unlock() + + msg := <-issuer + require.Equal(common.PendingTxs, msg) + ctx.Lock.Lock() + + txs := vm.PendingTxs(context.Background()) + require.Len(txs, 1) + require.NoError(txs[0].Accept(context.Background())) + + statusReply = &GetTxStatusReply{} + require.NoError(s.GetTxStatus(nil, statusArgs, statusReply)) + require.Equal(choices.Accepted, statusReply.Status) } // Test the GetBalance method when argument Strict is true func TestServiceGetBalanceStrict(t *testing.T) { _, vm, s, _, _ := setup(t, true) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } vm.ctx.Lock.Unlock() @@ -303,8 +302,8 @@ func TestServiceGetBalanceStrict(t *testing.T) { }, } // Insert the UTXO - err = vm.state.PutUTXO(twoOfTwoUTXO) - require.NoError(t, err) + vm.state.AddUTXO(twoOfTwoUTXO) + require.NoError(t, vm.state.Commit()) // Check the balance with IncludePartial set to true balanceArgs := &GetBalanceArgs{ @@ -348,8 +347,8 @@ func TestServiceGetBalanceStrict(t *testing.T) { }, } // Insert the UTXO - err = vm.state.PutUTXO(oneOfTwoUTXO) - require.NoError(t, err) + vm.state.AddUTXO(oneOfTwoUTXO) + require.NoError(t, vm.state.Commit()) // Check the balance with IncludePartial set to true balanceArgs = &GetBalanceArgs{ @@ -395,8 +394,8 @@ func TestServiceGetBalanceStrict(t *testing.T) { }, } // Insert the UTXO - err = vm.state.PutUTXO(futureUTXO) - require.NoError(t, err) + vm.state.AddUTXO(futureUTXO) + require.NoError(t, vm.state.Commit()) // Check the balance with IncludePartial set to true balanceArgs = &GetBalanceArgs{ @@ -430,7 +429,7 @@ func TestServiceGetTxs(t *testing.T) { vm.addressTxsIndexer, err = index.NewIndexer(vm.db, vm.ctx.Log, "", prometheus.NewRegistry(), false) require.NoError(t, err) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } vm.ctx.Lock.Unlock() @@ -470,7 +469,7 @@ func TestServiceGetTxs(t *testing.T) { func TestServiceGetAllBalances(t *testing.T) { _, vm, s, _, _ := setup(t, true) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } vm.ctx.Lock.Unlock() @@ -499,8 +498,8 @@ func TestServiceGetAllBalances(t *testing.T) { }, } // Insert the UTXO - err = vm.state.PutUTXO(twoOfTwoUTXO) - require.NoError(t, err) + vm.state.AddUTXO(twoOfTwoUTXO) + require.NoError(t, vm.state.Commit()) // Check the balance with IncludePartial set to true balanceArgs := &GetAllBalancesArgs{ @@ -541,8 +540,8 @@ func TestServiceGetAllBalances(t *testing.T) { }, } // Insert the UTXO - err = vm.state.PutUTXO(oneOfTwoUTXO) - require.NoError(t, err) + vm.state.AddUTXO(oneOfTwoUTXO) + require.NoError(t, vm.state.Commit()) // Check the balance with IncludePartial set to true balanceArgs = &GetAllBalancesArgs{ @@ -586,8 +585,8 @@ func TestServiceGetAllBalances(t *testing.T) { }, } // Insert the UTXO - err = vm.state.PutUTXO(futureUTXO) - require.NoError(t, err) + vm.state.AddUTXO(futureUTXO) + require.NoError(t, vm.state.Commit()) // Check the balance with IncludePartial set to true balanceArgs = &GetAllBalancesArgs{ @@ -629,8 +628,8 @@ func TestServiceGetAllBalances(t *testing.T) { }, } // Insert the UTXO - err = vm.state.PutUTXO(otherAssetUTXO) - require.NoError(t, err) + vm.state.AddUTXO(otherAssetUTXO) + require.NoError(t, vm.state.Commit()) // Check the balance with IncludePartial set to true balanceArgs = &GetAllBalancesArgs{ @@ -663,7 +662,7 @@ func TestServiceGetAllBalances(t *testing.T) { func TestServiceGetTx(t *testing.T) { _, vm, s, _, genesisTx := setup(t, true) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } vm.ctx.Lock.Unlock() @@ -687,116 +686,109 @@ func TestServiceGetTx(t *testing.T) { } func TestServiceGetTxJSON_BaseTx(t *testing.T) { + require := require.New(t) + genesisBytes, vm, s, issuer := setupWithIssuer(t, true) ctx := vm.ctx defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() newTx := newAvaxBaseTxWithOutputs(t, genesisBytes, vm) txID, err := vm.IssueTx(newTx.Bytes()) - if err != nil { - t.Fatal(err) - } - if txID != newTx.ID() { - t.Fatalf("Issue Tx returned wrong TxID") - } + require.NoError(err) + require.Equal(newTx.ID(), txID) ctx.Lock.Unlock() msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } + require.Equal(common.PendingTxs, msg) ctx.Lock.Lock() - if txs := vm.PendingTxs(); len(txs) != 1 { - t.Fatalf("Should have returned %d tx(s)", 1) - } + txs := vm.PendingTxs(context.Background()) + require.Len(txs, 1) + require.NoError(txs[0].Accept(context.Background())) reply := api.GetTxReply{} err = s.GetTx(nil, &api.GetTxArgs{ TxID: txID, Encoding: formatting.JSON, }, &reply) - require.NoError(t, err) + require.NoError(err) - require.Equal(t, reply.Encoding, formatting.JSON) + require.Equal(reply.Encoding, formatting.JSON) jsonTxBytes, err := stdjson.Marshal(reply.Tx) - require.NoError(t, err) + require.NoError(err) jsonString := string(jsonTxBytes) // fxID in the VM is really set to 11111111111111111111111111111111LpoYY for [secp256k1fx.TransferOutput] - require.Contains(t, jsonString, "\"memo\":\"0x0102030405060708\"") - require.Contains(t, jsonString, "\"inputs\":[{\"txID\":\"2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ\",\"outputIndex\":2,\"assetID\":\"2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ\",\"fxID\":\"11111111111111111111111111111111LpoYY\",\"input\":{\"amount\":50000,\"signatureIndices\":[0]}}]") - require.Contains(t, jsonString, "\"outputs\":[{\"assetID\":\"2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ\",\"fxID\":\"11111111111111111111111111111111LpoYY\",\"output\":{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"amount\":49000,\"locktime\":0,\"threshold\":1}}]") + require.Contains(jsonString, "\"memo\":\"0x0102030405060708\"") + require.Contains(jsonString, "\"inputs\":[{\"txID\":\"2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ\",\"outputIndex\":2,\"assetID\":\"2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ\",\"fxID\":\"11111111111111111111111111111111LpoYY\",\"input\":{\"amount\":50000,\"signatureIndices\":[0]}}]") + require.Contains(jsonString, "\"outputs\":[{\"assetID\":\"2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ\",\"fxID\":\"11111111111111111111111111111111LpoYY\",\"output\":{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"amount\":49000,\"locktime\":0,\"threshold\":1}}]") } func TestServiceGetTxJSON_ExportTx(t *testing.T) { + require := require.New(t) + genesisBytes, vm, s, issuer := setupWithIssuer(t, true) ctx := vm.ctx defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() newTx := newAvaxExportTxWithOutputs(t, genesisBytes, vm) txID, err := vm.IssueTx(newTx.Bytes()) - if err != nil { - t.Fatal(err) - } - if txID != newTx.ID() { - t.Fatalf("Issue Tx returned wrong TxID") - } + require.NoError(err) + require.Equal(newTx.ID(), txID) ctx.Lock.Unlock() msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } - ctx.Lock.Lock() + require.Equal(common.PendingTxs, msg) - if txs := vm.PendingTxs(); len(txs) != 1 { - t.Fatalf("Should have returned %d tx(s)", 1) - } + ctx.Lock.Lock() + txs := vm.PendingTxs(context.Background()) + require.Len(txs, 1) + require.NoError(txs[0].Accept(context.Background())) reply := api.GetTxReply{} err = s.GetTx(nil, &api.GetTxArgs{ TxID: txID, Encoding: formatting.JSON, }, &reply) - require.NoError(t, err) + require.NoError(err) - require.Equal(t, reply.Encoding, formatting.JSON) + require.Equal(reply.Encoding, formatting.JSON) jsonTxBytes, err := stdjson.Marshal(reply.Tx) - require.NoError(t, err) + require.NoError(err) jsonString := string(jsonTxBytes) // fxID in the VM is really set to 11111111111111111111111111111111LpoYY for [secp256k1fx.TransferOutput] - require.Contains(t, jsonString, "\"inputs\":[{\"txID\":\"2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ\",\"outputIndex\":2,\"assetID\":\"2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ\",\"fxID\":\"11111111111111111111111111111111LpoYY\",\"input\":{\"amount\":50000,\"signatureIndices\":[0]}}]") - require.Contains(t, jsonString, "\"exportedOutputs\":[{\"assetID\":\"2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ\",\"fxID\":\"11111111111111111111111111111111LpoYY\",\"output\":{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"amount\":49000,\"locktime\":0,\"threshold\":1}}]}") + require.Contains(jsonString, "\"inputs\":[{\"txID\":\"2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ\",\"outputIndex\":2,\"assetID\":\"2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ\",\"fxID\":\"11111111111111111111111111111111LpoYY\",\"input\":{\"amount\":50000,\"signatureIndices\":[0]}}]") + require.Contains(jsonString, "\"exportedOutputs\":[{\"assetID\":\"2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ\",\"fxID\":\"11111111111111111111111111111111LpoYY\",\"output\":{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"amount\":49000,\"locktime\":0,\"threshold\":1}}]}") } func TestServiceGetTxJSON_CreateAssetTx(t *testing.T) { + require := require.New(t) + vm := &VM{} ctx := NewContext(t) ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() + baseDBManager := manager.NewMemDB(version.Semantic1_0_0) + m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDBManager.Current().Database)) + ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) + genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) err := vm.Initialize( + context.Background(), ctx, - manager.NewMemDB(version.Semantic1_0_0), + baseDBManager, genesisBytes, nil, nil, @@ -817,41 +809,25 @@ func TestServiceGetTxJSON_CreateAssetTx(t *testing.T) { }, &common.SenderTest{T: t}, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) vm.batchTimeout = 0 - err = vm.SetState(snow.Bootstrapping) - if err != nil { - t.Fatal(err) - } - - err = vm.SetState(snow.NormalOp) - if err != nil { - t.Fatal(err) - } + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) createAssetTx := newAvaxCreateAssetTxWithOutputs(t, vm) txID, err := vm.IssueTx(createAssetTx.Bytes()) - if err != nil { - t.Fatal(err) - } - - if txID != createAssetTx.ID() { - t.Fatalf("Issue Tx returned wrong TxID") - } + require.NoError(err) + require.Equal(createAssetTx.ID(), txID) ctx.Lock.Unlock() msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } + require.Equal(common.PendingTxs, msg) ctx.Lock.Lock() - if txs := vm.PendingTxs(); len(txs) != 1 { - t.Fatalf("Should have returned %d tx(s)", 1) - } + txs := vm.PendingTxs(context.Background()) + require.Len(txs, 1) + require.NoError(txs[0].Accept(context.Background())) reply := api.GetTxReply{} s := &Service{vm: vm} @@ -859,34 +835,39 @@ func TestServiceGetTxJSON_CreateAssetTx(t *testing.T) { TxID: txID, Encoding: formatting.JSON, }, &reply) - require.NoError(t, err) + require.NoError(err) - require.Equal(t, reply.Encoding, formatting.JSON) + require.Equal(reply.Encoding, formatting.JSON) jsonTxBytes, err := stdjson.Marshal(reply.Tx) - require.NoError(t, err) + require.NoError(err) jsonString := string(jsonTxBytes) // contains the address in the right format - require.Contains(t, jsonString, "\"outputs\":[{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"groupID\":1,\"locktime\":0,\"threshold\":1},{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"groupID\":2,\"locktime\":0,\"threshold\":1}]}") - require.Contains(t, jsonString, "\"initialStates\":[{\"fxIndex\":0,\"fxID\":\"LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq\",\"outputs\":[{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"locktime\":0,\"threshold\":1},{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"locktime\":0,\"threshold\":1}]},{\"fxIndex\":1,\"fxID\":\"TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES\",\"outputs\":[{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"groupID\":1,\"locktime\":0,\"threshold\":1},{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"groupID\":2,\"locktime\":0,\"threshold\":1}]},{\"fxIndex\":2,\"fxID\":\"2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w\",\"outputs\":[{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"locktime\":0,\"threshold\":1},{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"locktime\":0,\"threshold\":1}]}]},\"credentials\":[]}") + require.Contains(jsonString, "\"outputs\":[{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"groupID\":1,\"locktime\":0,\"threshold\":1},{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"groupID\":2,\"locktime\":0,\"threshold\":1}]}") + require.Contains(jsonString, "\"initialStates\":[{\"fxIndex\":0,\"fxID\":\"LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq\",\"outputs\":[{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"locktime\":0,\"threshold\":1},{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"locktime\":0,\"threshold\":1}]},{\"fxIndex\":1,\"fxID\":\"TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES\",\"outputs\":[{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"groupID\":1,\"locktime\":0,\"threshold\":1},{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"groupID\":2,\"locktime\":0,\"threshold\":1}]},{\"fxIndex\":2,\"fxID\":\"2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w\",\"outputs\":[{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"locktime\":0,\"threshold\":1},{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"locktime\":0,\"threshold\":1}]}]},\"credentials\":[]}") } func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { + require := require.New(t) + vm := &VM{} ctx := NewContext(t) ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() + baseDBManager := manager.NewMemDB(version.Semantic1_0_0) + m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDBManager.Current().Database)) + ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) + genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) err := vm.Initialize( + context.Background(), ctx, - manager.NewMemDB(version.Semantic1_0_0), + baseDBManager, genesisBytes, nil, nil, @@ -907,52 +888,34 @@ func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { }, &common.SenderTest{T: t}, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) vm.batchTimeout = 0 - err = vm.SetState(snow.Bootstrapping) - if err != nil { - t.Fatal(err) - } - - err = vm.SetState(snow.NormalOp) - if err != nil { - t.Fatal(err) - } + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, vm) _, err = vm.IssueTx(createAssetTx.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) mintNFTTx := buildOperationTxWithOp(buildNFTxMintOp(createAssetTx, key, 2, 1)) - err = mintNFTTx.SignNFTFx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{key}}) - require.NoError(t, err) + err = mintNFTTx.SignNFTFx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}) + require.NoError(err) txID, err := vm.IssueTx(mintNFTTx.Bytes()) - if err != nil { - t.Fatal(err) - } - - if txID != mintNFTTx.ID() { - t.Fatalf("Issue Tx returned wrong TxID") - } + require.NoError(err) + require.Equal(mintNFTTx.ID(), txID) ctx.Lock.Unlock() msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } + require.Equal(common.PendingTxs, msg) ctx.Lock.Lock() - if txs := vm.PendingTxs(); len(txs) != 2 { - t.Fatalf("Should have returned %d tx(s)", 1) - } + txs := vm.PendingTxs(context.Background()) + require.Len(txs, 2) + require.NoError(txs[0].Accept(context.Background())) + require.NoError(txs[1].Accept(context.Background())) reply := api.GetTxReply{} s := &Service{vm: vm} @@ -960,38 +923,43 @@ func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { TxID: txID, Encoding: formatting.JSON, }, &reply) - require.NoError(t, err) + require.NoError(err) - require.Equal(t, reply.Encoding, formatting.JSON) + require.Equal(reply.Encoding, formatting.JSON) jsonTxBytes, err := stdjson.Marshal(reply.Tx) - require.NoError(t, err) + require.NoError(err) jsonString := string(jsonTxBytes) // assert memo and payload are in hex - require.Contains(t, jsonString, "\"memo\":\"0x\"") - require.Contains(t, jsonString, "\"payload\":\"0x68656c6c6f\"") + require.Contains(jsonString, "\"memo\":\"0x\"") + require.Contains(jsonString, "\"payload\":\"0x68656c6c6f\"") // contains the address in the right format - require.Contains(t, jsonString, "\"outputs\":[{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"]") + require.Contains(jsonString, "\"outputs\":[{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"]") // contains the fxID - require.Contains(t, jsonString, "\"operations\":[{\"assetID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"inputIDs\":[{\"txID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"outputIndex\":2}],\"fxID\":\"TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES\"") - require.Contains(t, jsonString, "\"credentials\":[{\"fxID\":\"TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES\",\"credential\":{\"signatures\":[\"0x571f18cfdb254263ab6b987f742409bd5403eafe08b4dbc297c5cd8d1c85eb8812e4541e11d3dc692cd14b5f4bccc1835ec001df6d8935ce881caf97017c2a4801\"]}}]") + require.Contains(jsonString, "\"operations\":[{\"assetID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"inputIDs\":[{\"txID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"outputIndex\":2}],\"fxID\":\"TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES\"") + require.Contains(jsonString, "\"credentials\":[{\"fxID\":\"TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES\",\"credential\":{\"signatures\":[\"0x571f18cfdb254263ab6b987f742409bd5403eafe08b4dbc297c5cd8d1c85eb8812e4541e11d3dc692cd14b5f4bccc1835ec001df6d8935ce881caf97017c2a4801\"]}}]") } func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { + require := require.New(t) + vm := &VM{} ctx := NewContext(t) ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() + baseDBManager := manager.NewMemDB(version.Semantic1_0_0) + m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDBManager.Current().Database)) + ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) + genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) err := vm.Initialize( + context.Background(), ctx, - manager.NewMemDB(version.Semantic1_0_0), + baseDBManager, genesisBytes, nil, nil, @@ -1012,55 +980,37 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { }, &common.SenderTest{T: t}, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) vm.batchTimeout = 0 - err = vm.SetState(snow.Bootstrapping) - if err != nil { - t.Fatal(err) - } - - err = vm.SetState(snow.NormalOp) - if err != nil { - t.Fatal(err) - } + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, vm) _, err = vm.IssueTx(createAssetTx.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) mintOp1 := buildNFTxMintOp(createAssetTx, key, 2, 1) mintOp2 := buildNFTxMintOp(createAssetTx, key, 3, 2) mintNFTTx := buildOperationTxWithOp(mintOp1, mintOp2) - err = mintNFTTx.SignNFTFx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{key}, {key}}) - require.NoError(t, err) + err = mintNFTTx.SignNFTFx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}, {key}}) + require.NoError(err) txID, err := vm.IssueTx(mintNFTTx.Bytes()) - if err != nil { - t.Fatal(err) - } - - if txID != mintNFTTx.ID() { - t.Fatalf("Issue Tx returned wrong TxID") - } + require.NoError(err) + require.Equal(mintNFTTx.ID(), txID) ctx.Lock.Unlock() msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } + require.Equal(common.PendingTxs, msg) ctx.Lock.Lock() - if txs := vm.PendingTxs(); len(txs) != 2 { - t.Fatalf("Should have returned %d tx(s)", 1) - } + txs := vm.PendingTxs(context.Background()) + require.Len(txs, 2) + require.NoError(txs[0].Accept(context.Background())) + require.NoError(txs[1].Accept(context.Background())) reply := api.GetTxReply{} s := &Service{vm: vm} @@ -1068,37 +1018,42 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { TxID: txID, Encoding: formatting.JSON, }, &reply) - require.NoError(t, err) + require.NoError(err) - require.Equal(t, reply.Encoding, formatting.JSON) + require.Equal(reply.Encoding, formatting.JSON) jsonTxBytes, err := stdjson.Marshal(reply.Tx) - require.NoError(t, err) + require.NoError(err) jsonString := string(jsonTxBytes) // contains the address in the right format - require.Contains(t, jsonString, "\"outputs\":[{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"]") + require.Contains(jsonString, "\"outputs\":[{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"]") // contains the fxID - require.Contains(t, jsonString, "\"operations\":[{\"assetID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"inputIDs\":[{\"txID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"outputIndex\":2}],\"fxID\":\"TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES\"") - require.Contains(t, jsonString, "\"credentials\":[{\"fxID\":\"TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES\",\"credential\":{\"signatures\":[\"0x2400cf2cf978697b3484d5340609b524eb9dfa401e5b2bd5d1bc6cee2a6b1ae41926550f00ae0651c312c35e225cb3f39b506d96c5170fb38a820dcfed11ccd801\"]}},{\"fxID\":\"TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES\",\"credential\":{\"signatures\":[\"0x2400cf2cf978697b3484d5340609b524eb9dfa401e5b2bd5d1bc6cee2a6b1ae41926550f00ae0651c312c35e225cb3f39b506d96c5170fb38a820dcfed11ccd801\"]}}]") + require.Contains(jsonString, "\"operations\":[{\"assetID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"inputIDs\":[{\"txID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"outputIndex\":2}],\"fxID\":\"TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES\"") + require.Contains(jsonString, "\"credentials\":[{\"fxID\":\"TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES\",\"credential\":{\"signatures\":[\"0x2400cf2cf978697b3484d5340609b524eb9dfa401e5b2bd5d1bc6cee2a6b1ae41926550f00ae0651c312c35e225cb3f39b506d96c5170fb38a820dcfed11ccd801\"]}},{\"fxID\":\"TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES\",\"credential\":{\"signatures\":[\"0x2400cf2cf978697b3484d5340609b524eb9dfa401e5b2bd5d1bc6cee2a6b1ae41926550f00ae0651c312c35e225cb3f39b506d96c5170fb38a820dcfed11ccd801\"]}}]") } func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { + require := require.New(t) + vm := &VM{} ctx := NewContext(t) ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() + baseDBManager := manager.NewMemDB(version.Semantic1_0_0) + m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDBManager.Current().Database)) + ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) + genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) err := vm.Initialize( + context.Background(), ctx, - manager.NewMemDB(version.Semantic1_0_0), + baseDBManager, genesisBytes, nil, nil, @@ -1119,52 +1074,34 @@ func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { }, &common.SenderTest{T: t}, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) vm.batchTimeout = 0 - err = vm.SetState(snow.Bootstrapping) - if err != nil { - t.Fatal(err) - } - - err = vm.SetState(snow.NormalOp) - if err != nil { - t.Fatal(err) - } + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, vm) _, err = vm.IssueTx(createAssetTx.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) mintSecpOpTx := buildOperationTxWithOp(buildSecpMintOp(createAssetTx, key, 0)) - err = mintSecpOpTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{key}}) - require.NoError(t, err) + err = mintSecpOpTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}) + require.NoError(err) txID, err := vm.IssueTx(mintSecpOpTx.Bytes()) - if err != nil { - t.Fatal(err) - } - - if txID != mintSecpOpTx.ID() { - t.Fatalf("Issue Tx returned wrong TxID") - } + require.NoError(err) + require.Equal(mintSecpOpTx.ID(), txID) ctx.Lock.Unlock() msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } + require.Equal(common.PendingTxs, msg) ctx.Lock.Lock() - if txs := vm.PendingTxs(); len(txs) != 2 { - t.Fatalf("Should have returned %d tx(s)", 1) - } + txs := vm.PendingTxs(context.Background()) + require.Len(txs, 2) + require.NoError(txs[0].Accept(context.Background())) + require.NoError(txs[1].Accept(context.Background())) reply := api.GetTxReply{} s := &Service{vm: vm} @@ -1172,40 +1109,45 @@ func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { TxID: txID, Encoding: formatting.JSON, }, &reply) - require.NoError(t, err) + require.NoError(err) - require.Equal(t, reply.Encoding, formatting.JSON) + require.Equal(reply.Encoding, formatting.JSON) jsonTxBytes, err := stdjson.Marshal(reply.Tx) - require.NoError(t, err) + require.NoError(err) jsonString := string(jsonTxBytes) // ensure memo is in hex - require.Contains(t, jsonString, "\"memo\":\"0x\"") + require.Contains(jsonString, "\"memo\":\"0x\"") // contains the address in the right format - require.Contains(t, jsonString, "\"mintOutput\":{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"]") - require.Contains(t, jsonString, "\"transferOutput\":{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"amount\":1,\"locktime\":0,\"threshold\":1}}}]}") + require.Contains(jsonString, "\"mintOutput\":{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"]") + require.Contains(jsonString, "\"transferOutput\":{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"amount\":1,\"locktime\":0,\"threshold\":1}}}]}") // contains the fxID - require.Contains(t, jsonString, "\"operations\":[{\"assetID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"inputIDs\":[{\"txID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"outputIndex\":0}],\"fxID\":\"LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq\"") - require.Contains(t, jsonString, "\"credentials\":[{\"fxID\":\"LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq\",\"credential\":{\"signatures\":[\"0x6d7406d5e1bdb1d80de542e276e2d162b0497d0df1170bec72b14d40e84ecf7929cb571211d60149404413a9342fdfa0a2b5d07b48e6f3eaea1e2f9f183b480500\"]}}]") + require.Contains(jsonString, "\"operations\":[{\"assetID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"inputIDs\":[{\"txID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"outputIndex\":0}],\"fxID\":\"LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq\"") + require.Contains(jsonString, "\"credentials\":[{\"fxID\":\"LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq\",\"credential\":{\"signatures\":[\"0x6d7406d5e1bdb1d80de542e276e2d162b0497d0df1170bec72b14d40e84ecf7929cb571211d60149404413a9342fdfa0a2b5d07b48e6f3eaea1e2f9f183b480500\"]}}]") } func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { + require := require.New(t) + vm := &VM{} ctx := NewContext(t) ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() + baseDBManager := manager.NewMemDB(version.Semantic1_0_0) + m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDBManager.Current().Database)) + ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) + genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) err := vm.Initialize( + context.Background(), ctx, - manager.NewMemDB(version.Semantic1_0_0), + baseDBManager, genesisBytes, nil, nil, @@ -1226,55 +1168,37 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { }, &common.SenderTest{T: t}, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) vm.batchTimeout = 0 - err = vm.SetState(snow.Bootstrapping) - if err != nil { - t.Fatal(err) - } - - err = vm.SetState(snow.NormalOp) - if err != nil { - t.Fatal(err) - } + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, vm) _, err = vm.IssueTx(createAssetTx.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) op1 := buildSecpMintOp(createAssetTx, key, 0) op2 := buildSecpMintOp(createAssetTx, key, 1) mintSecpOpTx := buildOperationTxWithOp(op1, op2) - err = mintSecpOpTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{key}, {key}}) - require.NoError(t, err) + err = mintSecpOpTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}, {key}}) + require.NoError(err) txID, err := vm.IssueTx(mintSecpOpTx.Bytes()) - if err != nil { - t.Fatal(err) - } - - if txID != mintSecpOpTx.ID() { - t.Fatalf("Issue Tx returned wrong TxID") - } + require.NoError(err) + require.Equal(mintSecpOpTx.ID(), txID) ctx.Lock.Unlock() msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } + require.Equal(common.PendingTxs, msg) ctx.Lock.Lock() - if txs := vm.PendingTxs(); len(txs) != 2 { - t.Fatalf("Should have returned %d tx(s)", 1) - } + txs := vm.PendingTxs(context.Background()) + require.Len(txs, 2) + require.NoError(txs[0].Accept(context.Background())) + require.NoError(txs[1].Accept(context.Background())) reply := api.GetTxReply{} s := &Service{vm: vm} @@ -1282,38 +1206,43 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { TxID: txID, Encoding: formatting.JSON, }, &reply) - require.NoError(t, err) + require.NoError(err) - require.Equal(t, reply.Encoding, formatting.JSON) + require.Equal(reply.Encoding, formatting.JSON) jsonTxBytes, err := stdjson.Marshal(reply.Tx) - require.NoError(t, err) + require.NoError(err) jsonString := string(jsonTxBytes) // contains the address in the right format - require.Contains(t, jsonString, "\"mintOutput\":{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"]") - require.Contains(t, jsonString, "\"transferOutput\":{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"amount\":1,\"locktime\":0,\"threshold\":1}}}") + require.Contains(jsonString, "\"mintOutput\":{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"]") + require.Contains(jsonString, "\"transferOutput\":{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"],\"amount\":1,\"locktime\":0,\"threshold\":1}}}") // contains the fxID - require.Contains(t, jsonString, "\"assetID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"inputIDs\":[{\"txID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"outputIndex\":1}],\"fxID\":\"LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq\"") - require.Contains(t, jsonString, "\"credentials\":[{\"fxID\":\"LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq\",\"credential\":{\"signatures\":[\"0xcc650f48341601c348d8634e8d207e07ea7b4ee4fbdeed3055fa1f1e4f4e27556d25056447a3bd5d949e5f1cbb0155bb20216ac3a4055356e3c82dca74323e7401\"]}},{\"fxID\":\"LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq\",\"credential\":{\"signatures\":[\"0xcc650f48341601c348d8634e8d207e07ea7b4ee4fbdeed3055fa1f1e4f4e27556d25056447a3bd5d949e5f1cbb0155bb20216ac3a4055356e3c82dca74323e7401\"]}}]") + require.Contains(jsonString, "\"assetID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"inputIDs\":[{\"txID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"outputIndex\":1}],\"fxID\":\"LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq\"") + require.Contains(jsonString, "\"credentials\":[{\"fxID\":\"LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq\",\"credential\":{\"signatures\":[\"0xcc650f48341601c348d8634e8d207e07ea7b4ee4fbdeed3055fa1f1e4f4e27556d25056447a3bd5d949e5f1cbb0155bb20216ac3a4055356e3c82dca74323e7401\"]}},{\"fxID\":\"LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq\",\"credential\":{\"signatures\":[\"0xcc650f48341601c348d8634e8d207e07ea7b4ee4fbdeed3055fa1f1e4f4e27556d25056447a3bd5d949e5f1cbb0155bb20216ac3a4055356e3c82dca74323e7401\"]}}]") } func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { + require := require.New(t) + vm := &VM{} ctx := NewContext(t) ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() + baseDBManager := manager.NewMemDB(version.Semantic1_0_0) + m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDBManager.Current().Database)) + ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) + genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) err := vm.Initialize( + context.Background(), ctx, - manager.NewMemDB(version.Semantic1_0_0), + baseDBManager, genesisBytes, nil, nil, @@ -1334,51 +1263,34 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { }, &common.SenderTest{T: t}, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) vm.batchTimeout = 0 - err = vm.SetState(snow.Bootstrapping) - if err != nil { - t.Fatal(err) - } - - err = vm.SetState(snow.NormalOp) - if err != nil { - t.Fatal(err) - } + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, vm) _, err = vm.IssueTx(createAssetTx.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + mintPropertyFxOpTx := buildOperationTxWithOp(buildPropertyFxMintOp(createAssetTx, key, 4)) - err = mintPropertyFxOpTx.SignPropertyFx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{key}}) - require.NoError(t, err) + err = mintPropertyFxOpTx.SignPropertyFx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}) + require.NoError(err) txID, err := vm.IssueTx(mintPropertyFxOpTx.Bytes()) - if err != nil { - t.Fatal(err) - } - - if txID != mintPropertyFxOpTx.ID() { - t.Fatalf("Issue Tx returned wrong TxID") - } + require.NoError(err) + require.Equal(mintPropertyFxOpTx.ID(), txID) ctx.Lock.Unlock() msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } + require.Equal(common.PendingTxs, msg) ctx.Lock.Lock() - if txs := vm.PendingTxs(); len(txs) != 2 { - t.Fatalf("Should have returned %d tx(s)", 1) - } + txs := vm.PendingTxs(context.Background()) + require.Len(txs, 2) + require.NoError(txs[0].Accept(context.Background())) + require.NoError(txs[1].Accept(context.Background())) reply := api.GetTxReply{} s := &Service{vm: vm} @@ -1386,39 +1298,44 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { TxID: txID, Encoding: formatting.JSON, }, &reply) - require.NoError(t, err) + require.NoError(err) - require.Equal(t, reply.Encoding, formatting.JSON) + require.Equal(reply.Encoding, formatting.JSON) jsonTxBytes, err := stdjson.Marshal(reply.Tx) - require.NoError(t, err) + require.NoError(err) jsonString := string(jsonTxBytes) // ensure memo is in hex - require.Contains(t, jsonString, "\"memo\":\"0x\"") + require.Contains(jsonString, "\"memo\":\"0x\"") // contains the address in the right format - require.Contains(t, jsonString, "\"mintOutput\":{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"]") + require.Contains(jsonString, "\"mintOutput\":{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"]") // contains the fxID - require.Contains(t, jsonString, "\"assetID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"inputIDs\":[{\"txID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"outputIndex\":4}],\"fxID\":\"2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w\"") - require.Contains(t, jsonString, "\"credentials\":[{\"fxID\":\"2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w\",\"credential\":{\"signatures\":[\"0xa3a00a03d3f1551ff696d6c0abdde73ae7002cd6dcce1c37d720de3b7ed80757411c9698cd9681a0fa55ca685904ca87056a3b8abc858a8ac08f45483b32a80201\"]}}]") + require.Contains(jsonString, "\"assetID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"inputIDs\":[{\"txID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"outputIndex\":4}],\"fxID\":\"2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w\"") + require.Contains(jsonString, "\"credentials\":[{\"fxID\":\"2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w\",\"credential\":{\"signatures\":[\"0xa3a00a03d3f1551ff696d6c0abdde73ae7002cd6dcce1c37d720de3b7ed80757411c9698cd9681a0fa55ca685904ca87056a3b8abc858a8ac08f45483b32a80201\"]}}]") } func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) { + require := require.New(t) + vm := &VM{} ctx := NewContext(t) ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() + baseDBManager := manager.NewMemDB(version.Semantic1_0_0) + m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDBManager.Current().Database)) + ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) + genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) err := vm.Initialize( + context.Background(), ctx, - manager.NewMemDB(version.Semantic1_0_0), + baseDBManager, genesisBytes, nil, nil, @@ -1439,55 +1356,37 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) }, &common.SenderTest{T: t}, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) vm.batchTimeout = 0 - err = vm.SetState(snow.Bootstrapping) - if err != nil { - t.Fatal(err) - } - - err = vm.SetState(snow.NormalOp) - if err != nil { - t.Fatal(err) - } + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) key := keys[0] - createAssetTx := newAvaxCreateAssetTxWithOutputs(t, vm) _, err = vm.IssueTx(createAssetTx.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) op1 := buildPropertyFxMintOp(createAssetTx, key, 4) op2 := buildPropertyFxMintOp(createAssetTx, key, 5) mintPropertyFxOpTx := buildOperationTxWithOp(op1, op2) - err = mintPropertyFxOpTx.SignPropertyFx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{key}, {key}}) - require.NoError(t, err) + err = mintPropertyFxOpTx.SignPropertyFx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}, {key}}) + require.NoError(err) txID, err := vm.IssueTx(mintPropertyFxOpTx.Bytes()) - if err != nil { - t.Fatal(err) - } - - if txID != mintPropertyFxOpTx.ID() { - t.Fatalf("Issue Tx returned wrong TxID") - } + require.NoError(err) + require.Equal(mintPropertyFxOpTx.ID(), txID) ctx.Lock.Unlock() msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } + require.Equal(common.PendingTxs, msg) ctx.Lock.Lock() - if txs := vm.PendingTxs(); len(txs) != 2 { - t.Fatalf("Should have returned %d tx(s)", 1) - } + txs := vm.PendingTxs(context.Background()) + require.Len(txs, 2) + require.NoError(txs[0].Accept(context.Background())) + require.NoError(txs[1].Accept(context.Background())) reply := api.GetTxReply{} s := &Service{vm: vm} @@ -1495,26 +1394,26 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) TxID: txID, Encoding: formatting.JSON, }, &reply) - require.NoError(t, err) + require.NoError(err) - require.Equal(t, reply.Encoding, formatting.JSON) + require.Equal(reply.Encoding, formatting.JSON) jsonTxBytes, err := stdjson.Marshal(reply.Tx) - require.NoError(t, err) + require.NoError(err) jsonString := string(jsonTxBytes) // contains the address in the right format - require.Contains(t, jsonString, "\"mintOutput\":{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"]") + require.Contains(jsonString, "\"mintOutput\":{\"addresses\":[\"X-testing1lnk637g0edwnqc2tn8tel39652fswa3xk4r65e\"]") // contains the fxID - require.Contains(t, jsonString, "\"operations\":[{\"assetID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"inputIDs\":[{\"txID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"outputIndex\":4}],\"fxID\":\"2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w\"") - require.Contains(t, jsonString, "\"credentials\":[{\"fxID\":\"2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w\",\"credential\":{\"signatures\":[\"0x25b7ca14df108d4a32877bda4f10d84eda6d653c620f4c8d124265bdcf0ac91f45712b58b33f4b62a19698325a3c89adff214b77f772d9f311742860039abb5601\"]}},{\"fxID\":\"2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w\",\"credential\":{\"signatures\":[\"0x25b7ca14df108d4a32877bda4f10d84eda6d653c620f4c8d124265bdcf0ac91f45712b58b33f4b62a19698325a3c89adff214b77f772d9f311742860039abb5601\"]}}]") + require.Contains(jsonString, "\"operations\":[{\"assetID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"inputIDs\":[{\"txID\":\"2MDgrsBHMRsEPa4D4NA1Bo1pjkVLUK173S3dd9BgT2nCJNiDuS\",\"outputIndex\":4}],\"fxID\":\"2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w\"") + require.Contains(jsonString, "\"credentials\":[{\"fxID\":\"2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w\",\"credential\":{\"signatures\":[\"0x25b7ca14df108d4a32877bda4f10d84eda6d653c620f4c8d124265bdcf0ac91f45712b58b33f4b62a19698325a3c89adff214b77f772d9f311742860039abb5601\"]}},{\"fxID\":\"2mcwQKiD8VEspmMJpL1dc7okQQ5dDVAWeCBZ7FWBFAbxpv3t7w\",\"credential\":{\"signatures\":[\"0x25b7ca14df108d4a32877bda4f10d84eda6d653c620f4c8d124265bdcf0ac91f45712b58b33f4b62a19698325a3c89adff214b77f772d9f311742860039abb5601\"]}}]") } func newAvaxBaseTxWithOutputs(t *testing.T, genesisBytes []byte, vm *VM) *txs.Tx { avaxTx := GetAVAXTxFromGenesisTest(genesisBytes, t) key := keys[0] tx := buildBaseTx(avaxTx, vm, key) - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{key}}); err != nil { + if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { t.Fatal(err) } return tx @@ -1524,7 +1423,7 @@ func newAvaxExportTxWithOutputs(t *testing.T, genesisBytes []byte, vm *VM) *txs. avaxTx := GetAVAXTxFromGenesisTest(genesisBytes, t) key := keys[0] tx := buildExportTx(avaxTx, vm, key) - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{key}}); err != nil { + if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { t.Fatal(err) } return tx @@ -1539,10 +1438,10 @@ func newAvaxCreateAssetTxWithOutputs(t *testing.T, vm *VM) *txs.Tx { return tx } -func buildBaseTx(avaxTx *txs.Tx, vm *VM, key *crypto.PrivateKeySECP256K1R) *txs.Tx { +func buildBaseTx(avaxTx *txs.Tx, vm *VM, key *secp256k1.PrivateKey) *txs.Tx { return &txs.Tx{Unsigned: &txs.BaseTx{ BaseTx: avax.BaseTx{ - NetworkID: networkID, + NetworkID: constants.UnitTestID, BlockchainID: chainID, Memo: []byte{1, 2, 3, 4, 5, 6, 7, 8}, Ins: []*avax.TransferableInput{{ @@ -1574,11 +1473,11 @@ func buildBaseTx(avaxTx *txs.Tx, vm *VM, key *crypto.PrivateKeySECP256K1R) *txs. }} } -func buildExportTx(avaxTx *txs.Tx, vm *VM, key *crypto.PrivateKeySECP256K1R) *txs.Tx { +func buildExportTx(avaxTx *txs.Tx, vm *VM, key *secp256k1.PrivateKey) *txs.Tx { return &txs.Tx{Unsigned: &txs.ExportTx{ BaseTx: txs.BaseTx{ BaseTx: avax.BaseTx{ - NetworkID: networkID, + NetworkID: constants.UnitTestID, BlockchainID: chainID, Ins: []*avax.TransferableInput{{ UTXOID: avax.UTXOID{ @@ -1607,10 +1506,10 @@ func buildExportTx(avaxTx *txs.Tx, vm *VM, key *crypto.PrivateKeySECP256K1R) *tx }} } -func buildCreateAssetTx(key *crypto.PrivateKeySECP256K1R) *txs.Tx { +func buildCreateAssetTx(key *secp256k1.PrivateKey) *txs.Tx { return &txs.Tx{Unsigned: &txs.CreateAssetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, + NetworkID: constants.UnitTestID, BlockchainID: chainID, }}, Name: "Team Rocket", @@ -1673,7 +1572,7 @@ func buildCreateAssetTx(key *crypto.PrivateKeySECP256K1R) *txs.Tx { }} } -func buildNFTxMintOp(createAssetTx *txs.Tx, key *crypto.PrivateKeySECP256K1R, outputIndex, groupID uint32) *txs.Operation { +func buildNFTxMintOp(createAssetTx *txs.Tx, key *secp256k1.PrivateKey, outputIndex, groupID uint32) *txs.Operation { return &txs.Operation{ Asset: avax.Asset{ID: createAssetTx.ID()}, UTXOIDs: []*avax.UTXOID{{ @@ -1694,7 +1593,7 @@ func buildNFTxMintOp(createAssetTx *txs.Tx, key *crypto.PrivateKeySECP256K1R, ou } } -func buildPropertyFxMintOp(createAssetTx *txs.Tx, key *crypto.PrivateKeySECP256K1R, outputIndex uint32) *txs.Operation { +func buildPropertyFxMintOp(createAssetTx *txs.Tx, key *secp256k1.PrivateKey, outputIndex uint32) *txs.Operation { return &txs.Operation{ Asset: avax.Asset{ID: createAssetTx.ID()}, UTXOIDs: []*avax.UTXOID{{ @@ -1715,7 +1614,7 @@ func buildPropertyFxMintOp(createAssetTx *txs.Tx, key *crypto.PrivateKeySECP256K } } -func buildSecpMintOp(createAssetTx *txs.Tx, key *crypto.PrivateKeySECP256K1R, outputIndex uint32) *txs.Operation { +func buildSecpMintOp(createAssetTx *txs.Tx, key *secp256k1.PrivateKey, outputIndex uint32) *txs.Operation { return &txs.Operation{ Asset: avax.Asset{ID: createAssetTx.ID()}, UTXOIDs: []*avax.UTXOID{{ @@ -1749,7 +1648,7 @@ func buildSecpMintOp(createAssetTx *txs.Tx, key *crypto.PrivateKeySECP256K1R, ou func buildOperationTxWithOp(op ...*txs.Operation) *txs.Tx { return &txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, + NetworkID: constants.UnitTestID, BlockchainID: chainID, }}, Ops: op, @@ -1759,7 +1658,7 @@ func buildOperationTxWithOp(op ...*txs.Operation) *txs.Tx { func TestServiceGetNilTx(t *testing.T) { _, vm, s, _, _ := setup(t, true) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } vm.ctx.Lock.Unlock() @@ -1773,7 +1672,7 @@ func TestServiceGetNilTx(t *testing.T) { func TestServiceGetUnknownTx(t *testing.T) { _, vm, s, _, _ := setup(t, true) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } vm.ctx.Lock.Unlock() @@ -1787,7 +1686,7 @@ func TestServiceGetUnknownTx(t *testing.T) { func TestServiceGetUTXOs(t *testing.T) { _, vm, s, m, _ := setup(t, true) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } vm.ctx.Lock.Unlock() @@ -1812,10 +1711,9 @@ func TestServiceGetUTXOs(t *testing.T) { }, }, } - if err := vm.state.PutUTXO(utxo); err != nil { - t.Fatal(err) - } + vm.state.AddUTXO(utxo) } + require.NoError(t, vm.state.Commit()) sm := m.NewSharedMemory(constants.PlatformChainID) @@ -2054,7 +1952,7 @@ func TestServiceGetUTXOs(t *testing.T) { func TestGetAssetDescription(t *testing.T) { _, vm, s, _, genesisTx := setup(t, true) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } vm.ctx.Lock.Unlock() @@ -2081,7 +1979,7 @@ func TestGetAssetDescription(t *testing.T) { func TestGetBalance(t *testing.T) { _, vm, s, _, genesisTx := setup(t, true) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } vm.ctx.Lock.Unlock() @@ -2112,7 +2010,7 @@ func TestCreateFixedCapAsset(t *testing.T) { t.Run(tc.name, func(t *testing.T) { _, vm, s, _, _ := setupWithKeys(t, tc.avaxAsset) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } vm.ctx.Lock.Unlock() @@ -2161,7 +2059,7 @@ func TestCreateVariableCapAsset(t *testing.T) { t.Run(tc.name, func(t *testing.T) { _, vm, s, _, _ := setupWithKeys(t, tc.avaxAsset) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } vm.ctx.Lock.Unlock() @@ -2211,7 +2109,7 @@ func TestCreateVariableCapAsset(t *testing.T) { if status := createAssetTx.Status(); status != choices.Processing { t.Fatalf("CreateVariableCapAssetTx status should have been Processing, but was %s", status) } - if err := createAssetTx.Accept(); err != nil { + if err := createAssetTx.Accept(context.Background()); err != nil { t.Fatalf("Failed to accept CreateVariableCapAssetTx due to: %s", err) } @@ -2244,7 +2142,7 @@ func TestCreateVariableCapAsset(t *testing.T) { if status := mintTx.Status(); status != choices.Processing { t.Fatalf("MintTx status should have been Processing, but was %s", status) } - if err := mintTx.Accept(); err != nil { + if err := mintTx.Accept(context.Background()); err != nil { t.Fatalf("Failed to accept MintTx due to: %s", err) } @@ -2278,7 +2176,7 @@ func TestNFTWorkflow(t *testing.T) { t.Run(tc.name, func(t *testing.T) { _, vm, s, _, _ := setupWithKeys(t, tc.avaxAsset) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } vm.ctx.Lock.Unlock() @@ -2328,7 +2226,7 @@ func TestNFTWorkflow(t *testing.T) { if createNFTTx.Status() != choices.Processing { t.Fatalf("CreateNFTTx should have been processing after creating the NFT") } - if err := createNFTTx.Accept(); err != nil { + if err := createNFTTx.Accept(context.Background()); err != nil { t.Fatalf("Failed to accept CreateNFT transaction: %s", err) } else if err := verifyTxFeeDeducted(t, s, fromAddrs, 1); err != nil { t.Fatal(err) @@ -2369,7 +2267,7 @@ func TestNFTWorkflow(t *testing.T) { } // Accept the transaction so that we can send the newly minted NFT - if err := mintNFTTx.Accept(); err != nil { + if err := mintNFTTx.Accept(context.Background()); err != nil { t.Fatalf("Failed to accept MintNFTTx: %s", err) } @@ -2399,18 +2297,17 @@ func TestNFTWorkflow(t *testing.T) { func TestImportExportKey(t *testing.T) { _, vm, s, _, _ := setup(t, true) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } vm.ctx.Lock.Unlock() }() - factory := crypto.FactorySECP256K1R{} - skIntf, err := factory.NewPrivateKey() + factory := secp256k1.Factory{} + sk, err := factory.NewPrivateKey() if err != nil { t.Fatalf("problem generating private key: %s", err) } - sk := skIntf.(*crypto.PrivateKeySECP256K1R) importArgs := &ImportKeyArgs{ UserPass: api.UserPass{ @@ -2420,7 +2317,7 @@ func TestImportExportKey(t *testing.T) { PrivateKey: sk, } importReply := &api.JSONAddress{} - if err = s.ImportKey(nil, importArgs, importReply); err != nil { + if err := s.ImportKey(nil, importArgs, importReply); err != nil { t.Fatal(err) } @@ -2436,7 +2333,7 @@ func TestImportExportKey(t *testing.T) { Address: addrStr, } exportReply := &ExportKeyReply{} - if err = s.ExportKey(nil, exportArgs, exportReply); err != nil { + if err := s.ExportKey(nil, exportArgs, exportReply); err != nil { t.Fatal(err) } @@ -2449,18 +2346,17 @@ func TestImportAVMKeyNoDuplicates(t *testing.T) { _, vm, s, _, _ := setup(t, true) ctx := vm.ctx defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } ctx.Lock.Unlock() }() - factory := crypto.FactorySECP256K1R{} - skIntf, err := factory.NewPrivateKey() + factory := secp256k1.Factory{} + sk, err := factory.NewPrivateKey() if err != nil { t.Fatalf("problem generating private key: %s", err) } - sk := skIntf.(*crypto.PrivateKeySECP256K1R) args := ImportKeyArgs{ UserPass: api.UserPass{ Username: username, @@ -2469,7 +2365,7 @@ func TestImportAVMKeyNoDuplicates(t *testing.T) { PrivateKey: sk, } reply := api.JSONAddress{} - if err = s.ImportKey(nil, &args, &reply); err != nil { + if err := s.ImportKey(nil, &args, &reply); err != nil { t.Fatal(err) } @@ -2483,7 +2379,7 @@ func TestImportAVMKeyNoDuplicates(t *testing.T) { } reply2 := api.JSONAddress{} - if err = s.ImportKey(nil, &args, &reply2); err != nil { + if err := s.ImportKey(nil, &args, &reply2); err != nil { t.Fatal(err) } @@ -2512,7 +2408,7 @@ func TestImportAVMKeyNoDuplicates(t *testing.T) { func TestSend(t *testing.T) { _, vm, s, _, genesisTx := setupWithKeys(t, true) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } vm.ctx.Lock.Unlock() @@ -2569,7 +2465,7 @@ func TestSendMultiple(t *testing.T) { t.Run(tc.name, func(t *testing.T) { _, vm, s, _, genesisTx := setupWithKeys(t, tc.avaxAsset) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } vm.ctx.Lock.Unlock() @@ -2627,7 +2523,7 @@ func TestSendMultiple(t *testing.T) { t.Fatal("Transaction ID returned by SendMultiple does not match the transaction found in vm's pending transactions") } - if _, err = vm.GetTx(reply.TxID); err != nil { + if _, err := vm.GetTx(context.Background(), reply.TxID); err != nil { t.Fatalf("Failed to retrieve created transaction: %s", err) } }) @@ -2637,7 +2533,7 @@ func TestSendMultiple(t *testing.T) { func TestCreateAndListAddresses(t *testing.T) { _, vm, s, _, _ := setup(t, true) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } vm.ctx.Lock.Unlock() @@ -2678,7 +2574,7 @@ func TestImport(t *testing.T) { t.Run(tc.name, func(t *testing.T) { _, vm, s, m, genesisTx := setupWithKeys(t, tc.avaxAsset) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } vm.ctx.Lock.Unlock() @@ -2733,3 +2629,451 @@ func TestImport(t *testing.T) { }) } } + +func TestServiceGetBlock(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + blockID := ids.GenerateTestID() + + type test struct { + name string + serviceAndExpectedBlockFunc func(ctrl *gomock.Controller) (*Service, interface{}) + encoding formatting.Encoding + expectedErr error + } + + tests := []test{ + { + name: "chain not linearized", + serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + return &Service{ + vm: &VM{ + ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + }, nil + }, + encoding: formatting.Hex, + expectedErr: errNotLinearized, + }, + { + name: "block not found", + serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + manager := executor.NewMockManager(ctrl) + manager.EXPECT().GetStatelessBlock(blockID).Return(nil, database.ErrNotFound) + return &Service{ + vm: &VM{ + chainManager: manager, + ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + }, nil + }, + encoding: formatting.Hex, + expectedErr: database.ErrNotFound, + }, + { + name: "JSON format", + serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + block := blocks.NewMockBlock(ctrl) + block.EXPECT().InitCtx(gomock.Any()) + block.EXPECT().Txs().Return(nil) + + manager := executor.NewMockManager(ctrl) + manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) + return &Service{ + vm: &VM{ + chainManager: manager, + ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + }, block + }, + encoding: formatting.JSON, + expectedErr: nil, + }, + { + name: "hex format", + serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + block := blocks.NewMockBlock(ctrl) + blockBytes := []byte("hi mom") + block.EXPECT().Bytes().Return(blockBytes) + + expected, err := formatting.Encode(formatting.Hex, blockBytes) + require.NoError(err) + + manager := executor.NewMockManager(ctrl) + manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) + return &Service{ + vm: &VM{ + chainManager: manager, + ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + }, expected + }, + encoding: formatting.Hex, + expectedErr: nil, + }, + { + name: "hexc format", + serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + block := blocks.NewMockBlock(ctrl) + blockBytes := []byte("hi mom") + block.EXPECT().Bytes().Return(blockBytes) + + expected, err := formatting.Encode(formatting.HexC, blockBytes) + require.NoError(err) + + manager := executor.NewMockManager(ctrl) + manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) + return &Service{ + vm: &VM{ + chainManager: manager, + ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + }, expected + }, + encoding: formatting.HexC, + expectedErr: nil, + }, + { + name: "hexnc format", + serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + block := blocks.NewMockBlock(ctrl) + blockBytes := []byte("hi mom") + block.EXPECT().Bytes().Return(blockBytes) + + expected, err := formatting.Encode(formatting.HexNC, blockBytes) + require.NoError(err) + + manager := executor.NewMockManager(ctrl) + manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) + return &Service{ + vm: &VM{ + chainManager: manager, + ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + }, expected + }, + encoding: formatting.HexNC, + expectedErr: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + service, expected := tt.serviceAndExpectedBlockFunc(ctrl) + + args := &api.GetBlockArgs{ + BlockID: blockID, + Encoding: tt.encoding, + } + reply := &api.GetBlockResponse{} + err := service.GetBlock(nil, args, reply) + require.ErrorIs(err, tt.expectedErr) + if err == nil { + require.Equal(tt.encoding, reply.Encoding) + require.Equal(expected, reply.Block) + } + }) + } +} + +func TestServiceGetBlockByHeight(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + blockID := ids.GenerateTestID() + blockHeight := uint64(1337) + + type test struct { + name string + serviceAndExpectedBlockFunc func(ctrl *gomock.Controller) (*Service, interface{}) + encoding formatting.Encoding + expectedErr error + } + + tests := []test{ + { + name: "chain not linearized", + serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + return &Service{ + vm: &VM{ + ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + }, nil + }, + encoding: formatting.Hex, + expectedErr: errNotLinearized, + }, + { + name: "block height not found", + serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + state := states.NewMockState(ctrl) + state.EXPECT().GetBlockID(blockHeight).Return(ids.Empty, database.ErrNotFound) + + manager := executor.NewMockManager(ctrl) + return &Service{ + vm: &VM{ + state: state, + chainManager: manager, + ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + }, nil + }, + encoding: formatting.Hex, + expectedErr: database.ErrNotFound, + }, + { + name: "block not found", + serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + state := states.NewMockState(ctrl) + state.EXPECT().GetBlockID(blockHeight).Return(blockID, nil) + + manager := executor.NewMockManager(ctrl) + manager.EXPECT().GetStatelessBlock(blockID).Return(nil, database.ErrNotFound) + return &Service{ + vm: &VM{ + state: state, + chainManager: manager, + ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + }, nil + }, + encoding: formatting.Hex, + expectedErr: database.ErrNotFound, + }, + { + name: "JSON format", + serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + block := blocks.NewMockBlock(ctrl) + block.EXPECT().InitCtx(gomock.Any()) + block.EXPECT().Txs().Return(nil) + + state := states.NewMockState(ctrl) + state.EXPECT().GetBlockID(blockHeight).Return(blockID, nil) + + manager := executor.NewMockManager(ctrl) + manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) + return &Service{ + vm: &VM{ + state: state, + chainManager: manager, + ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + }, block + }, + encoding: formatting.JSON, + expectedErr: nil, + }, + { + name: "hex format", + serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + block := blocks.NewMockBlock(ctrl) + blockBytes := []byte("hi mom") + block.EXPECT().Bytes().Return(blockBytes) + + state := states.NewMockState(ctrl) + state.EXPECT().GetBlockID(blockHeight).Return(blockID, nil) + + expected, err := formatting.Encode(formatting.Hex, blockBytes) + require.NoError(err) + + manager := executor.NewMockManager(ctrl) + manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) + return &Service{ + vm: &VM{ + state: state, + chainManager: manager, + ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + }, expected + }, + encoding: formatting.Hex, + expectedErr: nil, + }, + { + name: "hexc format", + serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + block := blocks.NewMockBlock(ctrl) + blockBytes := []byte("hi mom") + block.EXPECT().Bytes().Return(blockBytes) + + state := states.NewMockState(ctrl) + state.EXPECT().GetBlockID(blockHeight).Return(blockID, nil) + + expected, err := formatting.Encode(formatting.HexC, blockBytes) + require.NoError(err) + + manager := executor.NewMockManager(ctrl) + manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) + return &Service{ + vm: &VM{ + state: state, + chainManager: manager, + ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + }, expected + }, + encoding: formatting.HexC, + expectedErr: nil, + }, + { + name: "hexnc format", + serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + block := blocks.NewMockBlock(ctrl) + blockBytes := []byte("hi mom") + block.EXPECT().Bytes().Return(blockBytes) + + state := states.NewMockState(ctrl) + state.EXPECT().GetBlockID(blockHeight).Return(blockID, nil) + + expected, err := formatting.Encode(formatting.HexNC, blockBytes) + require.NoError(err) + + manager := executor.NewMockManager(ctrl) + manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) + return &Service{ + vm: &VM{ + state: state, + chainManager: manager, + ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + }, expected + }, + encoding: formatting.HexNC, + expectedErr: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + service, expected := tt.serviceAndExpectedBlockFunc(ctrl) + + args := &api.GetBlockByHeightArgs{ + Height: blockHeight, + Encoding: tt.encoding, + } + reply := &api.GetBlockResponse{} + err := service.GetBlockByHeight(nil, args, reply) + require.ErrorIs(err, tt.expectedErr) + if err == nil { + require.Equal(tt.encoding, reply.Encoding) + require.Equal(expected, reply.Block) + } + }) + } +} + +func TestServiceGetHeight(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + blockID := ids.GenerateTestID() + blockHeight := uint64(1337) + + type test struct { + name string + serviceFunc func(ctrl *gomock.Controller) *Service + expectedErr error + } + + tests := []test{ + { + name: "chain not linearized", + serviceFunc: func(ctrl *gomock.Controller) *Service { + return &Service{ + vm: &VM{ + ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + } + }, + expectedErr: errNotLinearized, + }, + { + name: "block not found", + serviceFunc: func(ctrl *gomock.Controller) *Service { + state := states.NewMockState(ctrl) + state.EXPECT().GetLastAccepted().Return(blockID) + + manager := executor.NewMockManager(ctrl) + manager.EXPECT().GetStatelessBlock(blockID).Return(nil, database.ErrNotFound) + return &Service{ + vm: &VM{ + state: state, + chainManager: manager, + ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + } + }, + expectedErr: database.ErrNotFound, + }, + { + name: "happy path", + serviceFunc: func(ctrl *gomock.Controller) *Service { + state := states.NewMockState(ctrl) + state.EXPECT().GetLastAccepted().Return(blockID) + + block := blocks.NewMockBlock(ctrl) + block.EXPECT().Height().Return(blockHeight) + + manager := executor.NewMockManager(ctrl) + manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) + return &Service{ + vm: &VM{ + state: state, + chainManager: manager, + ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + }, + } + }, + expectedErr: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + service := tt.serviceFunc(ctrl) + + reply := &api.GetHeightResponse{} + err := service.GetHeight(nil, nil, reply) + require.ErrorIs(err, tt.expectedErr) + if err == nil { + require.Equal(json.Uint64(blockHeight), reply.Height) + } + }) + } +} diff --git a/avalanchego/vms/avm/state_test.go b/avalanchego/vms/avm/state_test.go index 4e054e80..32a3874f 100644 --- a/avalanchego/vms/avm/state_test.go +++ b/avalanchego/vms/avm/state_test.go @@ -1,16 +1,20 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm import ( + "context" "math" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -33,7 +37,7 @@ func TestSetsAndGets(t *testing.T) { ) ctx := vm.ctx defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } ctx.Lock.Unlock() @@ -52,7 +56,7 @@ func TestSetsAndGets(t *testing.T) { utxoID := utxo.InputID() tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, + NetworkID: constants.UnitTestID, BlockchainID: chainID, Ins: []*avax.TransferableInput{{ UTXOID: avax.UTXOID{ @@ -70,29 +74,25 @@ func TestSetsAndGets(t *testing.T) { }, }}, }}} - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{keys[0]}}); err != nil { + if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}}); err != nil { t.Fatal(err) } - if err := state.PutUTXO(utxo); err != nil { - t.Fatal(err) - } - if err := state.PutTx(ids.Empty, tx); err != nil { - t.Fatal(err) - } - if err := state.PutStatus(ids.Empty, choices.Accepted); err != nil { - t.Fatal(err) - } + txID := tx.ID() + + state.AddUTXO(utxo) + state.AddTx(tx) + state.AddStatus(txID, choices.Accepted) resultUTXO, err := state.GetUTXO(utxoID) if err != nil { t.Fatal(err) } - resultTx, err := state.GetTx(ids.Empty) + resultTx, err := state.GetTx(txID) if err != nil { t.Fatal(err) } - resultStatus, err := state.GetStatus(ids.Empty) + resultStatus, err := state.GetStatus(txID) if err != nil { t.Fatal(err) } @@ -124,7 +124,7 @@ func TestFundingNoAddresses(t *testing.T) { ) ctx := vm.ctx defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } ctx.Lock.Unlock() @@ -141,12 +141,8 @@ func TestFundingNoAddresses(t *testing.T) { Out: &avax.TestVerifiable{}, } - if err := state.PutUTXO(utxo); err != nil { - t.Fatal(err) - } - if err := state.DeleteUTXO(utxo.InputID()); err != nil { - t.Fatal(err) - } + state.AddUTXO(utxo) + state.DeleteUTXO(utxo.InputID()) } func TestFundingAddresses(t *testing.T) { @@ -165,7 +161,7 @@ func TestFundingAddresses(t *testing.T) { ) ctx := vm.ctx defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } ctx.Lock.Unlock() @@ -184,27 +180,18 @@ func TestFundingAddresses(t *testing.T) { }, } - if err := state.PutUTXO(utxo); err != nil { - t.Fatal(err) - } + state.AddUTXO(utxo) + require.NoError(t, state.Commit()) + utxos, err := state.UTXOIDs([]byte{0}, ids.Empty, math.MaxInt32) - if err != nil { - t.Fatal(err) - } - if len(utxos) != 1 { - t.Fatalf("Should have returned 1 utxoIDs") - } - if utxoID := utxos[0]; utxoID != utxo.InputID() { - t.Fatalf("Returned wrong utxoID") - } - if err := state.DeleteUTXO(utxo.InputID()); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + require.Len(t, utxos, 1) + require.Equal(t, utxo.InputID(), utxos[0]) + + state.DeleteUTXO(utxo.InputID()) + require.NoError(t, state.Commit()) + utxos, err = state.UTXOIDs([]byte{0}, ids.Empty, math.MaxInt32) - if err != nil { - t.Fatal(err) - } - if len(utxos) != 0 { - t.Fatalf("Should have returned 0 utxoIDs") - } + require.NoError(t, err) + require.Empty(t, utxos) } diff --git a/avalanchego/vms/avm/states/diff.go b/avalanchego/vms/avm/states/diff.go new file mode 100644 index 00000000..5b77c0a7 --- /dev/null +++ b/avalanchego/vms/avm/states/diff.go @@ -0,0 +1,172 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package states + +import ( + "errors" + "fmt" + "time" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/avm/blocks" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/components/avax" +) + +var ( + _ Diff = (*diff)(nil) + + ErrMissingParentState = errors.New("missing parent state") +) + +type Diff interface { + Chain + + Apply(Chain) +} + +type diff struct { + parentID ids.ID + stateVersions Versions + + // map of modified UTXOID -> *UTXO if the UTXO is nil, it has been removed + modifiedUTXOs map[ids.ID]*avax.UTXO + addedTxs map[ids.ID]*txs.Tx // map of txID -> tx + addedBlockIDs map[uint64]ids.ID // map of height -> blockID + addedBlocks map[ids.ID]blocks.Block // map of blockID -> block + + lastAccepted ids.ID + timestamp time.Time +} + +func NewDiff( + parentID ids.ID, + stateVersions Versions, +) (Diff, error) { + parentState, ok := stateVersions.GetState(parentID) + if !ok { + return nil, fmt.Errorf("%w: %s", ErrMissingParentState, parentID) + } + return &diff{ + parentID: parentID, + stateVersions: stateVersions, + modifiedUTXOs: make(map[ids.ID]*avax.UTXO), + addedTxs: make(map[ids.ID]*txs.Tx), + addedBlockIDs: make(map[uint64]ids.ID), + addedBlocks: make(map[ids.ID]blocks.Block), + lastAccepted: parentState.GetLastAccepted(), + timestamp: parentState.GetTimestamp(), + }, nil +} + +func (d *diff) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) { + if utxo, modified := d.modifiedUTXOs[utxoID]; modified { + if utxo == nil { + return nil, database.ErrNotFound + } + return utxo, nil + } + + parentState, ok := d.stateVersions.GetState(d.parentID) + if !ok { + return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) + } + return parentState.GetUTXO(utxoID) +} + +func (d *diff) GetUTXOFromID(utxoID *avax.UTXOID) (*avax.UTXO, error) { + return d.GetUTXO(utxoID.InputID()) +} + +func (d *diff) AddUTXO(utxo *avax.UTXO) { + d.modifiedUTXOs[utxo.InputID()] = utxo +} + +func (d *diff) DeleteUTXO(utxoID ids.ID) { + d.modifiedUTXOs[utxoID] = nil +} + +func (d *diff) GetTx(txID ids.ID) (*txs.Tx, error) { + if tx, exists := d.addedTxs[txID]; exists { + return tx, nil + } + + parentState, ok := d.stateVersions.GetState(d.parentID) + if !ok { + return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) + } + return parentState.GetTx(txID) +} + +func (d *diff) AddTx(tx *txs.Tx) { + d.addedTxs[tx.ID()] = tx +} + +func (d *diff) GetBlockID(height uint64) (ids.ID, error) { + if blkID, exists := d.addedBlockIDs[height]; exists { + return blkID, nil + } + + parentState, ok := d.stateVersions.GetState(d.parentID) + if !ok { + return ids.Empty, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) + } + return parentState.GetBlockID(height) +} + +func (d *diff) GetBlock(blkID ids.ID) (blocks.Block, error) { + if blk, exists := d.addedBlocks[blkID]; exists { + return blk, nil + } + + parentState, ok := d.stateVersions.GetState(d.parentID) + if !ok { + return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) + } + return parentState.GetBlock(blkID) +} + +func (d *diff) AddBlock(blk blocks.Block) { + blkID := blk.ID() + d.addedBlockIDs[blk.Height()] = blkID + d.addedBlocks[blkID] = blk +} + +func (d *diff) GetLastAccepted() ids.ID { + return d.lastAccepted +} + +func (d *diff) SetLastAccepted(lastAccepted ids.ID) { + d.lastAccepted = lastAccepted +} + +func (d *diff) GetTimestamp() time.Time { + return d.timestamp +} + +func (d *diff) SetTimestamp(t time.Time) { + d.timestamp = t +} + +func (d *diff) Apply(state Chain) { + for utxoID, utxo := range d.modifiedUTXOs { + if utxo != nil { + state.AddUTXO(utxo) + } else { + state.DeleteUTXO(utxoID) + } + } + + for _, tx := range d.addedTxs { + state.AddTx(tx) + } + + for _, blk := range d.addedBlocks { + state.AddBlock(blk) + } + + state.SetLastAccepted(d.lastAccepted) + state.SetTimestamp(d.timestamp) +} diff --git a/avalanchego/vms/avm/states/mock_states.go b/avalanchego/vms/avm/states/mock_states.go new file mode 100644 index 00000000..a5b5f6ea --- /dev/null +++ b/avalanchego/vms/avm/states/mock_states.go @@ -0,0 +1,767 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/vms/avm/states (interfaces: Chain,State,Diff) + +// Package states is a generated GoMock package. +package states + +import ( + reflect "reflect" + time "time" + + database "github.com/ava-labs/avalanchego/database" + ids "github.com/ava-labs/avalanchego/ids" + choices "github.com/ava-labs/avalanchego/snow/choices" + blocks "github.com/ava-labs/avalanchego/vms/avm/blocks" + txs "github.com/ava-labs/avalanchego/vms/avm/txs" + avax "github.com/ava-labs/avalanchego/vms/components/avax" + gomock "github.com/golang/mock/gomock" +) + +// MockChain is a mock of Chain interface. +type MockChain struct { + ctrl *gomock.Controller + recorder *MockChainMockRecorder +} + +// MockChainMockRecorder is the mock recorder for MockChain. +type MockChainMockRecorder struct { + mock *MockChain +} + +// NewMockChain creates a new mock instance. +func NewMockChain(ctrl *gomock.Controller) *MockChain { + mock := &MockChain{ctrl: ctrl} + mock.recorder = &MockChainMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockChain) EXPECT() *MockChainMockRecorder { + return m.recorder +} + +// AddBlock mocks base method. +func (m *MockChain) AddBlock(arg0 blocks.Block) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddBlock", arg0) +} + +// AddBlock indicates an expected call of AddBlock. +func (mr *MockChainMockRecorder) AddBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddBlock", reflect.TypeOf((*MockChain)(nil).AddBlock), arg0) +} + +// AddTx mocks base method. +func (m *MockChain) AddTx(arg0 *txs.Tx) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddTx", arg0) +} + +// AddTx indicates an expected call of AddTx. +func (mr *MockChainMockRecorder) AddTx(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTx", reflect.TypeOf((*MockChain)(nil).AddTx), arg0) +} + +// AddUTXO mocks base method. +func (m *MockChain) AddUTXO(arg0 *avax.UTXO) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddUTXO", arg0) +} + +// AddUTXO indicates an expected call of AddUTXO. +func (mr *MockChainMockRecorder) AddUTXO(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUTXO", reflect.TypeOf((*MockChain)(nil).AddUTXO), arg0) +} + +// DeleteUTXO mocks base method. +func (m *MockChain) DeleteUTXO(arg0 ids.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeleteUTXO", arg0) +} + +// DeleteUTXO indicates an expected call of DeleteUTXO. +func (mr *MockChainMockRecorder) DeleteUTXO(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUTXO", reflect.TypeOf((*MockChain)(nil).DeleteUTXO), arg0) +} + +// GetBlock mocks base method. +func (m *MockChain) GetBlock(arg0 ids.ID) (blocks.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlock", arg0) + ret0, _ := ret[0].(blocks.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBlock indicates an expected call of GetBlock. +func (mr *MockChainMockRecorder) GetBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockChain)(nil).GetBlock), arg0) +} + +// GetBlockID mocks base method. +func (m *MockChain) GetBlockID(arg0 uint64) (ids.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlockID", arg0) + ret0, _ := ret[0].(ids.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBlockID indicates an expected call of GetBlockID. +func (mr *MockChainMockRecorder) GetBlockID(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockID", reflect.TypeOf((*MockChain)(nil).GetBlockID), arg0) +} + +// GetLastAccepted mocks base method. +func (m *MockChain) GetLastAccepted() ids.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLastAccepted") + ret0, _ := ret[0].(ids.ID) + return ret0 +} + +// GetLastAccepted indicates an expected call of GetLastAccepted. +func (mr *MockChainMockRecorder) GetLastAccepted() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLastAccepted", reflect.TypeOf((*MockChain)(nil).GetLastAccepted)) +} + +// GetTimestamp mocks base method. +func (m *MockChain) GetTimestamp() time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTimestamp") + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// GetTimestamp indicates an expected call of GetTimestamp. +func (mr *MockChainMockRecorder) GetTimestamp() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTimestamp", reflect.TypeOf((*MockChain)(nil).GetTimestamp)) +} + +// GetTx mocks base method. +func (m *MockChain) GetTx(arg0 ids.ID) (*txs.Tx, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTx", arg0) + ret0, _ := ret[0].(*txs.Tx) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTx indicates an expected call of GetTx. +func (mr *MockChainMockRecorder) GetTx(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTx", reflect.TypeOf((*MockChain)(nil).GetTx), arg0) +} + +// GetUTXO mocks base method. +func (m *MockChain) GetUTXO(arg0 ids.ID) (*avax.UTXO, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUTXO", arg0) + ret0, _ := ret[0].(*avax.UTXO) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUTXO indicates an expected call of GetUTXO. +func (mr *MockChainMockRecorder) GetUTXO(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXO", reflect.TypeOf((*MockChain)(nil).GetUTXO), arg0) +} + +// GetUTXOFromID mocks base method. +func (m *MockChain) GetUTXOFromID(arg0 *avax.UTXOID) (*avax.UTXO, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUTXOFromID", arg0) + ret0, _ := ret[0].(*avax.UTXO) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUTXOFromID indicates an expected call of GetUTXOFromID. +func (mr *MockChainMockRecorder) GetUTXOFromID(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXOFromID", reflect.TypeOf((*MockChain)(nil).GetUTXOFromID), arg0) +} + +// SetLastAccepted mocks base method. +func (m *MockChain) SetLastAccepted(arg0 ids.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetLastAccepted", arg0) +} + +// SetLastAccepted indicates an expected call of SetLastAccepted. +func (mr *MockChainMockRecorder) SetLastAccepted(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLastAccepted", reflect.TypeOf((*MockChain)(nil).SetLastAccepted), arg0) +} + +// SetTimestamp mocks base method. +func (m *MockChain) SetTimestamp(arg0 time.Time) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetTimestamp", arg0) +} + +// SetTimestamp indicates an expected call of SetTimestamp. +func (mr *MockChainMockRecorder) SetTimestamp(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTimestamp", reflect.TypeOf((*MockChain)(nil).SetTimestamp), arg0) +} + +// MockState is a mock of State interface. +type MockState struct { + ctrl *gomock.Controller + recorder *MockStateMockRecorder +} + +// MockStateMockRecorder is the mock recorder for MockState. +type MockStateMockRecorder struct { + mock *MockState +} + +// NewMockState creates a new mock instance. +func NewMockState(ctrl *gomock.Controller) *MockState { + mock := &MockState{ctrl: ctrl} + mock.recorder = &MockStateMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockState) EXPECT() *MockStateMockRecorder { + return m.recorder +} + +// Abort mocks base method. +func (m *MockState) Abort() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Abort") +} + +// Abort indicates an expected call of Abort. +func (mr *MockStateMockRecorder) Abort() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Abort", reflect.TypeOf((*MockState)(nil).Abort)) +} + +// AddBlock mocks base method. +func (m *MockState) AddBlock(arg0 blocks.Block) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddBlock", arg0) +} + +// AddBlock indicates an expected call of AddBlock. +func (mr *MockStateMockRecorder) AddBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddBlock", reflect.TypeOf((*MockState)(nil).AddBlock), arg0) +} + +// AddStatus mocks base method. +func (m *MockState) AddStatus(arg0 ids.ID, arg1 choices.Status) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddStatus", arg0, arg1) +} + +// AddStatus indicates an expected call of AddStatus. +func (mr *MockStateMockRecorder) AddStatus(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddStatus", reflect.TypeOf((*MockState)(nil).AddStatus), arg0, arg1) +} + +// AddTx mocks base method. +func (m *MockState) AddTx(arg0 *txs.Tx) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddTx", arg0) +} + +// AddTx indicates an expected call of AddTx. +func (mr *MockStateMockRecorder) AddTx(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTx", reflect.TypeOf((*MockState)(nil).AddTx), arg0) +} + +// AddUTXO mocks base method. +func (m *MockState) AddUTXO(arg0 *avax.UTXO) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddUTXO", arg0) +} + +// AddUTXO indicates an expected call of AddUTXO. +func (mr *MockStateMockRecorder) AddUTXO(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUTXO", reflect.TypeOf((*MockState)(nil).AddUTXO), arg0) +} + +// Close mocks base method. +func (m *MockState) Close() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close") + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockStateMockRecorder) Close() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockState)(nil).Close)) +} + +// Commit mocks base method. +func (m *MockState) Commit() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Commit") + ret0, _ := ret[0].(error) + return ret0 +} + +// Commit indicates an expected call of Commit. +func (mr *MockStateMockRecorder) Commit() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Commit", reflect.TypeOf((*MockState)(nil).Commit)) +} + +// CommitBatch mocks base method. +func (m *MockState) CommitBatch() (database.Batch, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CommitBatch") + ret0, _ := ret[0].(database.Batch) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CommitBatch indicates an expected call of CommitBatch. +func (mr *MockStateMockRecorder) CommitBatch() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CommitBatch", reflect.TypeOf((*MockState)(nil).CommitBatch)) +} + +// DeleteUTXO mocks base method. +func (m *MockState) DeleteUTXO(arg0 ids.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeleteUTXO", arg0) +} + +// DeleteUTXO indicates an expected call of DeleteUTXO. +func (mr *MockStateMockRecorder) DeleteUTXO(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUTXO", reflect.TypeOf((*MockState)(nil).DeleteUTXO), arg0) +} + +// GetBlock mocks base method. +func (m *MockState) GetBlock(arg0 ids.ID) (blocks.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlock", arg0) + ret0, _ := ret[0].(blocks.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBlock indicates an expected call of GetBlock. +func (mr *MockStateMockRecorder) GetBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockState)(nil).GetBlock), arg0) +} + +// GetBlockID mocks base method. +func (m *MockState) GetBlockID(arg0 uint64) (ids.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlockID", arg0) + ret0, _ := ret[0].(ids.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBlockID indicates an expected call of GetBlockID. +func (mr *MockStateMockRecorder) GetBlockID(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockID", reflect.TypeOf((*MockState)(nil).GetBlockID), arg0) +} + +// GetLastAccepted mocks base method. +func (m *MockState) GetLastAccepted() ids.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLastAccepted") + ret0, _ := ret[0].(ids.ID) + return ret0 +} + +// GetLastAccepted indicates an expected call of GetLastAccepted. +func (mr *MockStateMockRecorder) GetLastAccepted() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLastAccepted", reflect.TypeOf((*MockState)(nil).GetLastAccepted)) +} + +// GetStatus mocks base method. +func (m *MockState) GetStatus(arg0 ids.ID) (choices.Status, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetStatus", arg0) + ret0, _ := ret[0].(choices.Status) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetStatus indicates an expected call of GetStatus. +func (mr *MockStateMockRecorder) GetStatus(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatus", reflect.TypeOf((*MockState)(nil).GetStatus), arg0) +} + +// GetTimestamp mocks base method. +func (m *MockState) GetTimestamp() time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTimestamp") + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// GetTimestamp indicates an expected call of GetTimestamp. +func (mr *MockStateMockRecorder) GetTimestamp() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTimestamp", reflect.TypeOf((*MockState)(nil).GetTimestamp)) +} + +// GetTx mocks base method. +func (m *MockState) GetTx(arg0 ids.ID) (*txs.Tx, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTx", arg0) + ret0, _ := ret[0].(*txs.Tx) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTx indicates an expected call of GetTx. +func (mr *MockStateMockRecorder) GetTx(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTx", reflect.TypeOf((*MockState)(nil).GetTx), arg0) +} + +// GetUTXO mocks base method. +func (m *MockState) GetUTXO(arg0 ids.ID) (*avax.UTXO, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUTXO", arg0) + ret0, _ := ret[0].(*avax.UTXO) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUTXO indicates an expected call of GetUTXO. +func (mr *MockStateMockRecorder) GetUTXO(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXO", reflect.TypeOf((*MockState)(nil).GetUTXO), arg0) +} + +// GetUTXOFromID mocks base method. +func (m *MockState) GetUTXOFromID(arg0 *avax.UTXOID) (*avax.UTXO, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUTXOFromID", arg0) + ret0, _ := ret[0].(*avax.UTXO) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUTXOFromID indicates an expected call of GetUTXOFromID. +func (mr *MockStateMockRecorder) GetUTXOFromID(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXOFromID", reflect.TypeOf((*MockState)(nil).GetUTXOFromID), arg0) +} + +// InitializeChainState mocks base method. +func (m *MockState) InitializeChainState(arg0 ids.ID, arg1 time.Time) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InitializeChainState", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// InitializeChainState indicates an expected call of InitializeChainState. +func (mr *MockStateMockRecorder) InitializeChainState(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitializeChainState", reflect.TypeOf((*MockState)(nil).InitializeChainState), arg0, arg1) +} + +// IsInitialized mocks base method. +func (m *MockState) IsInitialized() (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsInitialized") + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// IsInitialized indicates an expected call of IsInitialized. +func (mr *MockStateMockRecorder) IsInitialized() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsInitialized", reflect.TypeOf((*MockState)(nil).IsInitialized)) +} + +// SetInitialized mocks base method. +func (m *MockState) SetInitialized() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetInitialized") + ret0, _ := ret[0].(error) + return ret0 +} + +// SetInitialized indicates an expected call of SetInitialized. +func (mr *MockStateMockRecorder) SetInitialized() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetInitialized", reflect.TypeOf((*MockState)(nil).SetInitialized)) +} + +// SetLastAccepted mocks base method. +func (m *MockState) SetLastAccepted(arg0 ids.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetLastAccepted", arg0) +} + +// SetLastAccepted indicates an expected call of SetLastAccepted. +func (mr *MockStateMockRecorder) SetLastAccepted(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLastAccepted", reflect.TypeOf((*MockState)(nil).SetLastAccepted), arg0) +} + +// SetTimestamp mocks base method. +func (m *MockState) SetTimestamp(arg0 time.Time) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetTimestamp", arg0) +} + +// SetTimestamp indicates an expected call of SetTimestamp. +func (mr *MockStateMockRecorder) SetTimestamp(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTimestamp", reflect.TypeOf((*MockState)(nil).SetTimestamp), arg0) +} + +// UTXOIDs mocks base method. +func (m *MockState) UTXOIDs(arg0 []byte, arg1 ids.ID, arg2 int) ([]ids.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UTXOIDs", arg0, arg1, arg2) + ret0, _ := ret[0].([]ids.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UTXOIDs indicates an expected call of UTXOIDs. +func (mr *MockStateMockRecorder) UTXOIDs(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UTXOIDs", reflect.TypeOf((*MockState)(nil).UTXOIDs), arg0, arg1, arg2) +} + +// MockDiff is a mock of Diff interface. +type MockDiff struct { + ctrl *gomock.Controller + recorder *MockDiffMockRecorder +} + +// MockDiffMockRecorder is the mock recorder for MockDiff. +type MockDiffMockRecorder struct { + mock *MockDiff +} + +// NewMockDiff creates a new mock instance. +func NewMockDiff(ctrl *gomock.Controller) *MockDiff { + mock := &MockDiff{ctrl: ctrl} + mock.recorder = &MockDiffMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDiff) EXPECT() *MockDiffMockRecorder { + return m.recorder +} + +// AddBlock mocks base method. +func (m *MockDiff) AddBlock(arg0 blocks.Block) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddBlock", arg0) +} + +// AddBlock indicates an expected call of AddBlock. +func (mr *MockDiffMockRecorder) AddBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddBlock", reflect.TypeOf((*MockDiff)(nil).AddBlock), arg0) +} + +// AddTx mocks base method. +func (m *MockDiff) AddTx(arg0 *txs.Tx) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddTx", arg0) +} + +// AddTx indicates an expected call of AddTx. +func (mr *MockDiffMockRecorder) AddTx(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddTx", reflect.TypeOf((*MockDiff)(nil).AddTx), arg0) +} + +// AddUTXO mocks base method. +func (m *MockDiff) AddUTXO(arg0 *avax.UTXO) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "AddUTXO", arg0) +} + +// AddUTXO indicates an expected call of AddUTXO. +func (mr *MockDiffMockRecorder) AddUTXO(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUTXO", reflect.TypeOf((*MockDiff)(nil).AddUTXO), arg0) +} + +// Apply mocks base method. +func (m *MockDiff) Apply(arg0 Chain) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Apply", arg0) +} + +// Apply indicates an expected call of Apply. +func (mr *MockDiffMockRecorder) Apply(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Apply", reflect.TypeOf((*MockDiff)(nil).Apply), arg0) +} + +// DeleteUTXO mocks base method. +func (m *MockDiff) DeleteUTXO(arg0 ids.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeleteUTXO", arg0) +} + +// DeleteUTXO indicates an expected call of DeleteUTXO. +func (mr *MockDiffMockRecorder) DeleteUTXO(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUTXO", reflect.TypeOf((*MockDiff)(nil).DeleteUTXO), arg0) +} + +// GetBlock mocks base method. +func (m *MockDiff) GetBlock(arg0 ids.ID) (blocks.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlock", arg0) + ret0, _ := ret[0].(blocks.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBlock indicates an expected call of GetBlock. +func (mr *MockDiffMockRecorder) GetBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockDiff)(nil).GetBlock), arg0) +} + +// GetBlockID mocks base method. +func (m *MockDiff) GetBlockID(arg0 uint64) (ids.ID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlockID", arg0) + ret0, _ := ret[0].(ids.ID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBlockID indicates an expected call of GetBlockID. +func (mr *MockDiffMockRecorder) GetBlockID(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlockID", reflect.TypeOf((*MockDiff)(nil).GetBlockID), arg0) +} + +// GetLastAccepted mocks base method. +func (m *MockDiff) GetLastAccepted() ids.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLastAccepted") + ret0, _ := ret[0].(ids.ID) + return ret0 +} + +// GetLastAccepted indicates an expected call of GetLastAccepted. +func (mr *MockDiffMockRecorder) GetLastAccepted() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLastAccepted", reflect.TypeOf((*MockDiff)(nil).GetLastAccepted)) +} + +// GetTimestamp mocks base method. +func (m *MockDiff) GetTimestamp() time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTimestamp") + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// GetTimestamp indicates an expected call of GetTimestamp. +func (mr *MockDiffMockRecorder) GetTimestamp() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTimestamp", reflect.TypeOf((*MockDiff)(nil).GetTimestamp)) +} + +// GetTx mocks base method. +func (m *MockDiff) GetTx(arg0 ids.ID) (*txs.Tx, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTx", arg0) + ret0, _ := ret[0].(*txs.Tx) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTx indicates an expected call of GetTx. +func (mr *MockDiffMockRecorder) GetTx(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTx", reflect.TypeOf((*MockDiff)(nil).GetTx), arg0) +} + +// GetUTXO mocks base method. +func (m *MockDiff) GetUTXO(arg0 ids.ID) (*avax.UTXO, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUTXO", arg0) + ret0, _ := ret[0].(*avax.UTXO) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUTXO indicates an expected call of GetUTXO. +func (mr *MockDiffMockRecorder) GetUTXO(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXO", reflect.TypeOf((*MockDiff)(nil).GetUTXO), arg0) +} + +// GetUTXOFromID mocks base method. +func (m *MockDiff) GetUTXOFromID(arg0 *avax.UTXOID) (*avax.UTXO, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetUTXOFromID", arg0) + ret0, _ := ret[0].(*avax.UTXO) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetUTXOFromID indicates an expected call of GetUTXOFromID. +func (mr *MockDiffMockRecorder) GetUTXOFromID(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUTXOFromID", reflect.TypeOf((*MockDiff)(nil).GetUTXOFromID), arg0) +} + +// SetLastAccepted mocks base method. +func (m *MockDiff) SetLastAccepted(arg0 ids.ID) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetLastAccepted", arg0) +} + +// SetLastAccepted indicates an expected call of SetLastAccepted. +func (mr *MockDiffMockRecorder) SetLastAccepted(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetLastAccepted", reflect.TypeOf((*MockDiff)(nil).SetLastAccepted), arg0) +} + +// SetTimestamp mocks base method. +func (m *MockDiff) SetTimestamp(arg0 time.Time) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetTimestamp", arg0) +} + +// SetTimestamp indicates an expected call of SetTimestamp. +func (mr *MockDiffMockRecorder) SetTimestamp(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTimestamp", reflect.TypeOf((*MockDiff)(nil).SetTimestamp), arg0) +} diff --git a/avalanchego/vms/avm/states/state.go b/avalanchego/vms/avm/states/state.go index 6c4ae746..f6394e73 100644 --- a/avalanchego/vms/avm/states/state.go +++ b/avalanchego/vms/avm/states/state.go @@ -1,63 +1,578 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package states import ( + "fmt" + "time" + "github.com/prometheus/client_golang/prometheus" + "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/cache/metercacher" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/prefixdb" + "github.com/ava-labs/avalanchego/database/versiondb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/vms/avm/blocks" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" ) +const ( + statusCacheSize = 8192 + txCacheSize = 8192 + blockIDCacheSize = 8192 + blockCacheSize = 2048 +) + var ( utxoPrefix = []byte("utxo") statusPrefix = []byte("status") - singletonPrefix = []byte("singleton") txPrefix = []byte("tx") + blockIDPrefix = []byte("blockID") + blockPrefix = []byte("block") + singletonPrefix = []byte("singleton") + + isInitializedKey = []byte{0x00} + timestampKey = []byte{0x01} + lastAcceptedKey = []byte{0x02} - _ State = &state{} + _ State = (*state)(nil) ) +type ReadOnlyChain interface { + avax.UTXOGetter + + // TODO: Remove GetUTXOFromID after the DAG linearization + GetUTXOFromID(utxoID *avax.UTXOID) (*avax.UTXO, error) + + GetTx(txID ids.ID) (*txs.Tx, error) + GetBlockID(height uint64) (ids.ID, error) + GetBlock(blkID ids.ID) (blocks.Block, error) + GetLastAccepted() ids.ID + GetTimestamp() time.Time +} + +type Chain interface { + ReadOnlyChain + avax.UTXOAdder + avax.UTXODeleter + + AddTx(tx *txs.Tx) + AddBlock(block blocks.Block) + SetLastAccepted(blkID ids.ID) + SetTimestamp(t time.Time) +} + // State persistently maintains a set of UTXOs, transaction, statuses, and // singletons. type State interface { - avax.UTXOState - avax.StatusState - avax.SingletonState - TxState + Chain + avax.UTXOReader + + IsInitialized() (bool, error) + SetInitialized() error + + // InitializeChainState is called after the VM has been linearized. Calling + // [GetLastAccepted] or [GetTimestamp] before calling this function will + // return uninitialized data. + // + // Invariant: After the chain is linearized, this function is expected to be + // called during startup. + InitializeChainState(stopVertexID ids.ID, genesisTimestamp time.Time) error + + // TODO: deprecate statuses. We should only persist accepted state + // Status returns a status from storage. + GetStatus(id ids.ID) (choices.Status, error) + // AddStatus saves a status in storage. + AddStatus(id ids.ID, status choices.Status) + + // Discard uncommitted changes to the database. + Abort() + + // Commit changes to the base database. + Commit() error + + // Returns a batch of unwritten changes that, when written, will commit all + // pending changes to the base database. + CommitBatch() (database.Batch, error) + + Close() error } +/* + * VMDB + * |- utxos + * | '-- utxoDB + * |- statuses + * | '-- statusDB + * |-. txs + * | '-- txID -> tx bytes + * |-. blockIDs + * | '-- height -> blockID + * |-. blocks + * | '-- blockID -> block bytes + * '-. singletons + * |-- initializedKey -> nil + * |-- timestampKey -> timestamp + * '-- lastAcceptedKey -> lastAccepted + */ type state struct { - avax.UTXOState - avax.StatusState - avax.SingletonState - TxState + parser blocks.Parser + db *versiondb.Database + + modifiedUTXOs map[ids.ID]*avax.UTXO // map of modified UTXOID -> *UTXO if the UTXO is nil, it has been removed + utxoDB database.Database + utxoState avax.UTXOState + + addedStatuses map[ids.ID]choices.Status + statusCache cache.Cacher[ids.ID, *choices.Status] // cache of id -> choices.Status. If the entry is nil, it is not in the database + statusDB database.Database + + addedTxs map[ids.ID]*txs.Tx // map of txID -> *txs.Tx + txCache cache.Cacher[ids.ID, *txs.Tx] // cache of txID -> *txs.Tx. If the entry is nil, it is not in the database + txDB database.Database + + addedBlockIDs map[uint64]ids.ID // map of height -> blockID + blockIDCache cache.Cacher[uint64, ids.ID] // cache of height -> blockID. If the entry is ids.Empty, it is not in the database + blockIDDB database.Database + + addedBlocks map[ids.ID]blocks.Block // map of blockID -> Block + blockCache cache.Cacher[ids.ID, blocks.Block] // cache of blockID -> Block. If the entry is nil, it is not in the database + blockDB database.Database + + // [lastAccepted] is the most recently accepted block. + lastAccepted, persistedLastAccepted ids.ID + timestamp, persistedTimestamp time.Time + singletonDB database.Database } -func New(db database.Database, parser txs.Parser, metrics prometheus.Registerer) (State, error) { +func New( + db *versiondb.Database, + parser blocks.Parser, + metrics prometheus.Registerer, +) (State, error) { utxoDB := prefixdb.New(utxoPrefix, db) statusDB := prefixdb.New(statusPrefix, db) - singletonDB := prefixdb.New(singletonPrefix, db) txDB := prefixdb.New(txPrefix, db) + blockIDDB := prefixdb.New(blockIDPrefix, db) + blockDB := prefixdb.New(blockPrefix, db) + singletonDB := prefixdb.New(singletonPrefix, db) - utxoState, err := avax.NewMeteredUTXOState(utxoDB, parser.Codec(), metrics) + statusCache, err := metercacher.New[ids.ID, *choices.Status]( + "status_cache", + metrics, + &cache.LRU[ids.ID, *choices.Status]{Size: statusCacheSize}, + ) + if err != nil { + return nil, err + } + + txCache, err := metercacher.New[ids.ID, *txs.Tx]( + "tx_cache", + metrics, + &cache.LRU[ids.ID, *txs.Tx]{Size: txCacheSize}, + ) if err != nil { return nil, err } - statusState, err := avax.NewMeteredStatusState(statusDB, metrics) + blockIDCache, err := metercacher.New[uint64, ids.ID]( + "block_id_cache", + metrics, + &cache.LRU[uint64, ids.ID]{Size: blockIDCacheSize}, + ) if err != nil { return nil, err } - txState, err := NewTxState(txDB, parser, metrics) + blockCache, err := metercacher.New[ids.ID, blocks.Block]( + "block_cache", + metrics, + &cache.LRU[ids.ID, blocks.Block]{Size: blockCacheSize}, + ) + if err != nil { + return nil, err + } + + utxoState, err := avax.NewMeteredUTXOState(utxoDB, parser.Codec(), metrics) return &state{ - UTXOState: utxoState, - StatusState: statusState, - SingletonState: avax.NewSingletonState(singletonDB), - TxState: txState, + parser: parser, + db: db, + + modifiedUTXOs: make(map[ids.ID]*avax.UTXO), + utxoDB: utxoDB, + utxoState: utxoState, + + addedStatuses: make(map[ids.ID]choices.Status), + statusCache: statusCache, + statusDB: statusDB, + + addedTxs: make(map[ids.ID]*txs.Tx), + txCache: txCache, + txDB: txDB, + + addedBlockIDs: make(map[uint64]ids.ID), + blockIDCache: blockIDCache, + blockIDDB: blockIDDB, + + addedBlocks: make(map[ids.ID]blocks.Block), + blockCache: blockCache, + blockDB: blockDB, + + singletonDB: singletonDB, }, err } + +func (s *state) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) { + if utxo, exists := s.modifiedUTXOs[utxoID]; exists { + if utxo == nil { + return nil, database.ErrNotFound + } + return utxo, nil + } + return s.utxoState.GetUTXO(utxoID) +} + +func (s *state) GetUTXOFromID(utxoID *avax.UTXOID) (*avax.UTXO, error) { + return s.GetUTXO(utxoID.InputID()) +} + +func (s *state) UTXOIDs(addr []byte, start ids.ID, limit int) ([]ids.ID, error) { + return s.utxoState.UTXOIDs(addr, start, limit) +} + +func (s *state) AddUTXO(utxo *avax.UTXO) { + s.modifiedUTXOs[utxo.InputID()] = utxo +} + +func (s *state) DeleteUTXO(utxoID ids.ID) { + s.modifiedUTXOs[utxoID] = nil +} + +func (s *state) GetTx(txID ids.ID) (*txs.Tx, error) { + if tx, exists := s.addedTxs[txID]; exists { + return tx, nil + } + if tx, exists := s.txCache.Get(txID); exists { + if tx == nil { + return nil, database.ErrNotFound + } + return tx, nil + } + + txBytes, err := s.txDB.Get(txID[:]) + if err == database.ErrNotFound { + s.txCache.Put(txID, nil) + return nil, database.ErrNotFound + } + if err != nil { + return nil, err + } + + // The key was in the database + tx, err := s.parser.ParseGenesisTx(txBytes) + if err != nil { + return nil, err + } + + s.txCache.Put(txID, tx) + return tx, nil +} + +func (s *state) AddTx(tx *txs.Tx) { + s.addedTxs[tx.ID()] = tx +} + +func (s *state) GetBlockID(height uint64) (ids.ID, error) { + if blkID, exists := s.addedBlockIDs[height]; exists { + return blkID, nil + } + if blkID, cached := s.blockIDCache.Get(height); cached { + if blkID == ids.Empty { + return ids.Empty, database.ErrNotFound + } + + return blkID, nil + } + + heightKey := database.PackUInt64(height) + + blkID, err := database.GetID(s.blockIDDB, heightKey) + if err == database.ErrNotFound { + s.blockIDCache.Put(height, ids.Empty) + return ids.Empty, database.ErrNotFound + } + if err != nil { + return ids.Empty, err + } + + s.blockIDCache.Put(height, blkID) + return blkID, nil +} + +func (s *state) GetBlock(blkID ids.ID) (blocks.Block, error) { + if blk, exists := s.addedBlocks[blkID]; exists { + return blk, nil + } + if blk, cached := s.blockCache.Get(blkID); cached { + if blk == nil { + return nil, database.ErrNotFound + } + + return blk, nil + } + + blkBytes, err := s.blockDB.Get(blkID[:]) + if err == database.ErrNotFound { + s.blockCache.Put(blkID, nil) + return nil, database.ErrNotFound + } + if err != nil { + return nil, err + } + + blk, err := s.parser.ParseBlock(blkBytes) + if err != nil { + return nil, err + } + + s.blockCache.Put(blkID, blk) + return blk, nil +} + +func (s *state) AddBlock(block blocks.Block) { + blkID := block.ID() + s.addedBlockIDs[block.Height()] = blkID + s.addedBlocks[blkID] = block +} + +func (s *state) InitializeChainState(stopVertexID ids.ID, genesisTimestamp time.Time) error { + lastAccepted, err := database.GetID(s.singletonDB, lastAcceptedKey) + if err == database.ErrNotFound { + return s.initializeChainState(stopVertexID, genesisTimestamp) + } else if err != nil { + return err + } + s.lastAccepted = lastAccepted + s.persistedLastAccepted = lastAccepted + s.timestamp, err = database.GetTimestamp(s.singletonDB, timestampKey) + s.persistedTimestamp = s.timestamp + return err +} + +func (s *state) initializeChainState(stopVertexID ids.ID, genesisTimestamp time.Time) error { + genesis, err := blocks.NewStandardBlock( + stopVertexID, + 0, + genesisTimestamp, + nil, + s.parser.Codec(), + ) + if err != nil { + return err + } + + s.SetLastAccepted(genesis.ID()) + s.SetTimestamp(genesis.Timestamp()) + s.AddBlock(genesis) + return s.Commit() +} + +func (s *state) IsInitialized() (bool, error) { + return s.singletonDB.Has(isInitializedKey) +} + +func (s *state) SetInitialized() error { + return s.singletonDB.Put(isInitializedKey, nil) +} + +func (s *state) GetLastAccepted() ids.ID { + return s.lastAccepted +} + +func (s *state) SetLastAccepted(lastAccepted ids.ID) { + s.lastAccepted = lastAccepted +} + +func (s *state) GetTimestamp() time.Time { + return s.timestamp +} + +func (s *state) SetTimestamp(t time.Time) { + s.timestamp = t +} + +// TODO: remove status support +func (s *state) GetStatus(id ids.ID) (choices.Status, error) { + if status, exists := s.addedStatuses[id]; exists { + return status, nil + } + if status, found := s.statusCache.Get(id); found { + if status == nil { + return choices.Unknown, database.ErrNotFound + } + return *status, nil + } + + val, err := database.GetUInt32(s.statusDB, id[:]) + if err == database.ErrNotFound { + s.statusCache.Put(id, nil) + return choices.Unknown, database.ErrNotFound + } + if err != nil { + return choices.Unknown, err + } + + status := choices.Status(val) + if err := status.Valid(); err != nil { + return choices.Unknown, err + } + + s.statusCache.Put(id, &status) + return status, nil +} + +// TODO: remove status support +func (s *state) AddStatus(id ids.ID, status choices.Status) { + s.addedStatuses[id] = status +} + +func (s *state) Commit() error { + defer s.Abort() + batch, err := s.CommitBatch() + if err != nil { + return err + } + return batch.Write() +} + +func (s *state) Abort() { + s.db.Abort() +} + +func (s *state) CommitBatch() (database.Batch, error) { + if err := s.write(); err != nil { + return nil, err + } + return s.db.CommitBatch() +} + +func (s *state) Close() error { + errs := wrappers.Errs{} + errs.Add( + s.utxoDB.Close(), + s.statusDB.Close(), + s.txDB.Close(), + s.blockIDDB.Close(), + s.blockDB.Close(), + s.singletonDB.Close(), + s.db.Close(), + ) + return errs.Err +} + +func (s *state) write() error { + errs := wrappers.Errs{} + errs.Add( + s.writeUTXOs(), + s.writeTxs(), + s.writeBlockIDs(), + s.writeBlocks(), + s.writeMetadata(), + s.writeStatuses(), + ) + return errs.Err +} + +func (s *state) writeUTXOs() error { + for utxoID, utxo := range s.modifiedUTXOs { + delete(s.modifiedUTXOs, utxoID) + + if utxo != nil { + if err := s.utxoState.PutUTXO(utxo); err != nil { + return fmt.Errorf("failed to add utxo: %w", err) + } + } else { + if err := s.utxoState.DeleteUTXO(utxoID); err != nil { + return fmt.Errorf("failed to remove utxo: %w", err) + } + } + } + return nil +} + +func (s *state) writeTxs() error { + for txID, tx := range s.addedTxs { + txID := txID + txBytes := tx.Bytes() + + delete(s.addedTxs, txID) + s.txCache.Put(txID, tx) + if err := s.txDB.Put(txID[:], txBytes); err != nil { + return fmt.Errorf("failed to add tx: %w", err) + } + } + return nil +} + +func (s *state) writeBlockIDs() error { + for height, blkID := range s.addedBlockIDs { + heightKey := database.PackUInt64(height) + + delete(s.addedBlockIDs, height) + s.blockIDCache.Put(height, blkID) + if err := database.PutID(s.blockIDDB, heightKey, blkID); err != nil { + return fmt.Errorf("failed to add blockID: %w", err) + } + } + return nil +} + +func (s *state) writeBlocks() error { + for blkID, blk := range s.addedBlocks { + blkID := blkID + blkBytes := blk.Bytes() + + delete(s.addedBlocks, blkID) + s.blockCache.Put(blkID, blk) + if err := s.blockDB.Put(blkID[:], blkBytes); err != nil { + return fmt.Errorf("failed to add block: %w", err) + } + } + return nil +} + +func (s *state) writeMetadata() error { + if !s.persistedTimestamp.Equal(s.timestamp) { + if err := database.PutTimestamp(s.singletonDB, timestampKey, s.timestamp); err != nil { + return fmt.Errorf("failed to write timestamp: %w", err) + } + s.persistedTimestamp = s.timestamp + } + if s.persistedLastAccepted != s.lastAccepted { + if err := database.PutID(s.singletonDB, lastAcceptedKey, s.lastAccepted); err != nil { + return fmt.Errorf("failed to write last accepted: %w", err) + } + s.persistedLastAccepted = s.lastAccepted + } + return nil +} + +func (s *state) writeStatuses() error { + for id, status := range s.addedStatuses { + id := id + status := status + + delete(s.addedStatuses, id) + s.statusCache.Put(id, &status) + if err := database.PutUInt32(s.statusDB, id[:], uint32(status)); err != nil { + return fmt.Errorf("failed to add status: %w", err) + } + } + return nil +} diff --git a/avalanchego/vms/avm/states/state_test.go b/avalanchego/vms/avm/states/state_test.go new file mode 100644 index 00000000..35533a84 --- /dev/null +++ b/avalanchego/vms/avm/states/state_test.go @@ -0,0 +1,314 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package states + +import ( + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/versiondb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/vms/avm/blocks" + "github.com/ava-labs/avalanchego/vms/avm/fxs" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" +) + +var ( + parser blocks.Parser + populatedUTXO *avax.UTXO + populatedUTXOID ids.ID + populatedTx *txs.Tx + populatedTxID ids.ID + populatedBlk blocks.Block + populatedBlkHeight uint64 + populatedBlkID ids.ID +) + +func init() { + var err error + parser, err = blocks.NewParser([]fxs.Fx{ + &secp256k1fx.Fx{}, + }) + if err != nil { + panic(err) + } + + populatedUTXO = &avax.UTXO{ + UTXOID: avax.UTXOID{ + TxID: ids.GenerateTestID(), + }, + Asset: avax.Asset{ + ID: ids.GenerateTestID(), + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 1, + }, + } + populatedUTXOID = populatedUTXO.InputID() + + populatedTx = &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ + BlockchainID: ids.GenerateTestID(), + }}} + err = parser.InitializeTx(populatedTx) + if err != nil { + panic(err) + } + populatedTxID = populatedTx.ID() + + populatedBlk, err = blocks.NewStandardBlock( + ids.GenerateTestID(), + 1, + time.Now(), + []*txs.Tx{ + { + Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ + BlockchainID: ids.GenerateTestID(), + }}, + }, + }, + parser.Codec(), + ) + if err != nil { + panic(err) + } + populatedBlkHeight = populatedBlk.Height() + populatedBlkID = populatedBlk.ID() +} + +type versions struct { + chains map[ids.ID]Chain +} + +func (v *versions) GetState(blkID ids.ID) (Chain, bool) { + c, ok := v.chains[blkID] + return c, ok +} + +func TestState(t *testing.T) { + db := memdb.New() + vdb := versiondb.New(db) + s, err := New(vdb, parser, prometheus.NewRegistry()) + require.NoError(t, err) + + s.AddUTXO(populatedUTXO) + s.AddTx(populatedTx) + s.AddBlock(populatedBlk) + require.NoError(t, s.Commit()) + + s, err = New(vdb, parser, prometheus.NewRegistry()) + require.NoError(t, err) + + ChainUTXOTest(t, s) + ChainTxTest(t, s) + ChainBlockTest(t, s) +} + +func TestDiff(t *testing.T) { + db := memdb.New() + vdb := versiondb.New(db) + s, err := New(vdb, parser, prometheus.NewRegistry()) + require.NoError(t, err) + + s.AddUTXO(populatedUTXO) + s.AddTx(populatedTx) + s.AddBlock(populatedBlk) + require.NoError(t, s.Commit()) + + parentID := ids.GenerateTestID() + d, err := NewDiff(parentID, &versions{ + chains: map[ids.ID]Chain{ + parentID: s, + }, + }) + require.NoError(t, err) + + ChainUTXOTest(t, d) + ChainTxTest(t, d) + ChainBlockTest(t, d) +} + +func ChainUTXOTest(t *testing.T, c Chain) { + require := require.New(t) + + fetchedUTXO, err := c.GetUTXO(populatedUTXOID) + require.NoError(err) + + // Compare IDs because [fetchedUTXO] isn't initialized + require.Equal(populatedUTXO.InputID(), fetchedUTXO.InputID()) + + utxo := &avax.UTXO{ + UTXOID: avax.UTXOID{ + TxID: ids.GenerateTestID(), + }, + Asset: avax.Asset{ + ID: ids.GenerateTestID(), + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 1, + }, + } + utxoID := utxo.InputID() + + _, err = c.GetUTXO(utxoID) + require.ErrorIs(err, database.ErrNotFound) + + c.AddUTXO(utxo) + + fetchedUTXO, err = c.GetUTXO(utxoID) + require.NoError(err) + require.Equal(utxo, fetchedUTXO) + + c.DeleteUTXO(utxoID) + + _, err = c.GetUTXO(utxoID) + require.ErrorIs(err, database.ErrNotFound) +} + +func ChainTxTest(t *testing.T, c Chain) { + require := require.New(t) + + fetchedTx, err := c.GetTx(populatedTxID) + require.NoError(err) + + // Compare IDs because [fetchedTx] differs between nil and empty fields + require.Equal(populatedTx.ID(), fetchedTx.ID()) + + // Pull again for the cached path + fetchedTx, err = c.GetTx(populatedTxID) + require.NoError(err) + require.Equal(populatedTx.ID(), fetchedTx.ID()) + + tx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ + BlockchainID: ids.GenerateTestID(), + }}} + require.NoError(parser.InitializeTx(tx)) + txID := tx.ID() + + _, err = c.GetTx(txID) + require.ErrorIs(err, database.ErrNotFound) + + // Pull again for the cached path + _, err = c.GetTx(txID) + require.ErrorIs(err, database.ErrNotFound) + + c.AddTx(tx) + + fetchedTx, err = c.GetTx(txID) + require.NoError(err) + require.Equal(tx, fetchedTx) +} + +func ChainBlockTest(t *testing.T, c Chain) { + require := require.New(t) + + fetchedBlkID, err := c.GetBlockID(populatedBlkHeight) + require.NoError(err) + require.Equal(populatedBlkID, fetchedBlkID) + + fetchedBlk, err := c.GetBlock(populatedBlkID) + require.NoError(err) + require.Equal(populatedBlk.ID(), fetchedBlk.ID()) + + // Pull again for the cached path + fetchedBlkID, err = c.GetBlockID(populatedBlkHeight) + require.NoError(err) + require.Equal(populatedBlkID, fetchedBlkID) + + fetchedBlk, err = c.GetBlock(populatedBlkID) + require.NoError(err) + require.Equal(populatedBlk.ID(), fetchedBlk.ID()) + + blk, err := blocks.NewStandardBlock( + ids.GenerateTestID(), + 10, + time.Now(), + []*txs.Tx{ + { + Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ + BlockchainID: ids.GenerateTestID(), + }}, + }, + }, + parser.Codec(), + ) + if err != nil { + panic(err) + } + blkID := blk.ID() + blkHeight := blk.Height() + + _, err = c.GetBlockID(blkHeight) + require.ErrorIs(err, database.ErrNotFound) + + _, err = c.GetBlock(blkID) + require.ErrorIs(err, database.ErrNotFound) + + // Pull again for the cached path + _, err = c.GetBlockID(blkHeight) + require.ErrorIs(err, database.ErrNotFound) + + _, err = c.GetBlock(blkID) + require.ErrorIs(err, database.ErrNotFound) + + c.AddBlock(blk) + + fetchedBlkID, err = c.GetBlockID(blkHeight) + require.NoError(err) + require.Equal(blkID, fetchedBlkID) + + fetchedBlk, err = c.GetBlock(blkID) + require.NoError(err) + require.Equal(blk, fetchedBlk) +} + +func TestInitializeChainState(t *testing.T) { + require := require.New(t) + + db := memdb.New() + vdb := versiondb.New(db) + s, err := New(vdb, parser, prometheus.NewRegistry()) + require.NoError(err) + + stopVertexID := ids.GenerateTestID() + genesisTimestamp := version.CortinaDefaultTime + err = s.InitializeChainState(stopVertexID, genesisTimestamp) + require.NoError(err) + + lastAcceptedID := s.GetLastAccepted() + genesis, err := s.GetBlock(lastAcceptedID) + require.NoError(err) + require.Equal(stopVertexID, genesis.Parent()) + require.Equal(genesisTimestamp.UnixNano(), genesis.Timestamp().UnixNano()) + + childBlock, err := blocks.NewStandardBlock( + genesis.ID(), + genesis.Height()+1, + genesisTimestamp, + nil, + parser.Codec(), + ) + require.NoError(err) + + s.AddBlock(childBlock) + s.SetLastAccepted(childBlock.ID()) + err = s.Commit() + require.NoError(err) + + err = s.InitializeChainState(stopVertexID, genesisTimestamp) + require.NoError(err) + + lastAcceptedID = s.GetLastAccepted() + lastAccepted, err := s.GetBlock(lastAcceptedID) + require.NoError(err) + require.Equal(genesis.ID(), lastAccepted.Parent()) +} diff --git a/avalanchego/vms/avm/states/tx_state.go b/avalanchego/vms/avm/states/tx_state.go deleted file mode 100644 index ca0c48ff..00000000 --- a/avalanchego/vms/avm/states/tx_state.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package states - -import ( - "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/avalanchego/cache" - "github.com/ava-labs/avalanchego/cache/metercacher" - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms/avm/txs" -) - -const txCacheSize = 8192 - -var _ TxState = &txState{} - -// TxState is a thin wrapper around a database to provide, caching, -// serialization, and de-serialization of transactions. -type TxState interface { - // Tx attempts to load a transaction from storage. - GetTx(txID ids.ID) (*txs.Tx, error) - - // PutTx saves the provided transaction to storage. - PutTx(txID ids.ID, tx *txs.Tx) error - - // DeleteTx removes the provided transaction from storage. - DeleteTx(txID ids.ID) error -} - -type txState struct { - parser txs.Parser - - // Caches TxID -> *Tx. If the *Tx is nil, that means the tx is not in - // storage. - txCache cache.Cacher - txDB database.Database -} - -func NewTxState(db database.Database, parser txs.Parser, metrics prometheus.Registerer) (TxState, error) { - cache, err := metercacher.New( - "tx_cache", - metrics, - &cache.LRU{Size: txCacheSize}, - ) - return &txState{ - parser: parser, - - txCache: cache, - txDB: db, - }, err -} - -func (s *txState) GetTx(txID ids.ID) (*txs.Tx, error) { - if txIntf, found := s.txCache.Get(txID); found { - if txIntf == nil { - return nil, database.ErrNotFound - } - return txIntf.(*txs.Tx), nil - } - - txBytes, err := s.txDB.Get(txID[:]) - if err == database.ErrNotFound { - s.txCache.Put(txID, nil) - return nil, database.ErrNotFound - } - if err != nil { - return nil, err - } - - // The key was in the database - tx, err := s.parser.ParseGenesis(txBytes) - if err != nil { - return nil, err - } - - s.txCache.Put(txID, tx) - return tx, nil -} - -func (s *txState) PutTx(txID ids.ID, tx *txs.Tx) error { - s.txCache.Put(txID, tx) - return s.txDB.Put(txID[:], tx.Bytes()) -} - -func (s *txState) DeleteTx(txID ids.ID) error { - s.txCache.Put(txID, nil) - return s.txDB.Delete(txID[:]) -} diff --git a/avalanchego/vms/avm/states/tx_state_test.go b/avalanchego/vms/avm/states/tx_state_test.go deleted file mode 100644 index d6d07269..00000000 --- a/avalanchego/vms/avm/states/tx_state_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package states - -import ( - "testing" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto" - "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/vms/avm/fxs" - "github.com/ava-labs/avalanchego/vms/avm/txs" - "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/nftfx" - "github.com/ava-labs/avalanchego/vms/propertyfx" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" -) - -var ( - networkID uint32 = 10 - chainID = ids.ID{5, 4, 3, 2, 1} - assetID = ids.ID{1, 2, 3} - keys = crypto.BuildTestKeys() -) - -func TestTxState(t *testing.T) { - require := require.New(t) - - db := memdb.New() - parser, err := txs.NewParser([]fxs.Fx{ - &secp256k1fx.Fx{}, - &nftfx.Fx{}, - &propertyfx.Fx{}, - }) - require.NoError(err) - - stateIntf, err := NewTxState(db, parser, prometheus.NewRegistry()) - require.NoError(err) - - s := stateIntf.(*txState) - - _, err = s.GetTx(ids.Empty) - require.Equal(database.ErrNotFound, err) - - tx := &txs.Tx{ - Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.Empty, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 20 * units.KiloAvax, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }}, - }, - }, - } - - err = tx.SignSECP256K1Fx(parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{keys[0]}}) - require.NoError(err) - - err = s.PutTx(ids.Empty, tx) - require.NoError(err) - - loadedTx, err := s.GetTx(ids.Empty) - require.NoError(err) - require.Equal(tx.ID(), loadedTx.ID()) - - s.txCache.Flush() - - loadedTx, err = s.GetTx(ids.Empty) - require.NoError(err) - require.Equal(tx.ID(), loadedTx.ID()) - - err = s.DeleteTx(ids.Empty) - require.NoError(err) - - _, err = s.GetTx(ids.Empty) - require.Equal(database.ErrNotFound, err) - - s.txCache.Flush() - - _, err = s.GetTx(ids.Empty) - require.Equal(database.ErrNotFound, err) -} diff --git a/avalanchego/vms/avm/states/versions.go b/avalanchego/vms/avm/states/versions.go new file mode 100644 index 00000000..581ec3b3 --- /dev/null +++ b/avalanchego/vms/avm/states/versions.go @@ -0,0 +1,14 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package states + +import ( + "github.com/ava-labs/avalanchego/ids" +) + +type Versions interface { + // GetState returns the state of the chain after [blkID] has been accepted. + // If the state is not known, `false` will be returned. + GetState(blkID ids.ID) (Chain, bool) +} diff --git a/avalanchego/vms/avm/static_client.go b/avalanchego/vms/avm/static_client.go index cb82a7fa..78014785 100644 --- a/avalanchego/vms/avm/static_client.go +++ b/avalanchego/vms/avm/static_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -9,7 +9,7 @@ import ( "github.com/ava-labs/avalanchego/utils/rpc" ) -var _ StaticClient = &staticClient{} +var _ StaticClient = (*staticClient)(nil) // StaticClient for interacting with the AVM static api type StaticClient interface { @@ -25,13 +25,12 @@ type staticClient struct { // NewClient returns an AVM client for interacting with the avm static api func NewStaticClient(uri string) StaticClient { return &staticClient{requester: rpc.NewEndpointRequester( - uri+"/ext/vm/avm", - "avm", + uri + "/ext/vm/avm", )} } func (c *staticClient) BuildGenesis(ctx context.Context, args *BuildGenesisArgs, options ...rpc.Option) (resp *BuildGenesisReply, err error) { resp = &BuildGenesisReply{} - err = c.requester.SendRequest(ctx, "buildGenesis", args, resp, options...) + err = c.requester.SendRequest(ctx, "avm.buildGenesis", args, resp, options...) return resp, err } diff --git a/avalanchego/vms/avm/static_service.go b/avalanchego/vms/avm/static_service.go index 040dff34..27554606 100644 --- a/avalanchego/vms/avm/static_service.go +++ b/avalanchego/vms/avm/static_service.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -11,6 +11,7 @@ import ( stdjson "encoding/json" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/json" @@ -26,23 +27,23 @@ import ( var ( errUnknownAssetType = errors.New("unknown asset type") - _ avax.TransferableIn = &secp256k1fx.TransferInput{} - _ verify.State = &secp256k1fx.MintOutput{} - _ avax.TransferableOut = &secp256k1fx.TransferOutput{} - _ fxs.FxOperation = &secp256k1fx.MintOperation{} - _ verify.Verifiable = &secp256k1fx.Credential{} - - _ verify.State = &nftfx.MintOutput{} - _ verify.State = &nftfx.TransferOutput{} - _ fxs.FxOperation = &nftfx.MintOperation{} - _ fxs.FxOperation = &nftfx.TransferOperation{} - _ verify.Verifiable = &nftfx.Credential{} - - _ verify.State = &propertyfx.MintOutput{} - _ verify.State = &propertyfx.OwnedOutput{} - _ fxs.FxOperation = &propertyfx.MintOperation{} - _ fxs.FxOperation = &propertyfx.BurnOperation{} - _ verify.Verifiable = &propertyfx.Credential{} + _ avax.TransferableIn = (*secp256k1fx.TransferInput)(nil) + _ verify.State = (*secp256k1fx.MintOutput)(nil) + _ avax.TransferableOut = (*secp256k1fx.TransferOutput)(nil) + _ fxs.FxOperation = (*secp256k1fx.MintOperation)(nil) + _ verify.Verifiable = (*secp256k1fx.Credential)(nil) + + _ verify.State = (*nftfx.MintOutput)(nil) + _ verify.State = (*nftfx.TransferOutput)(nil) + _ fxs.FxOperation = (*nftfx.MintOperation)(nil) + _ fxs.FxOperation = (*nftfx.TransferOperation)(nil) + _ verify.Verifiable = (*nftfx.Credential)(nil) + + _ verify.State = (*propertyfx.MintOutput)(nil) + _ verify.State = (*propertyfx.OwnedOutput)(nil) + _ fxs.FxOperation = (*propertyfx.MintOperation)(nil) + _ fxs.FxOperation = (*propertyfx.BurnOperation)(nil) + _ verify.Verifiable = (*propertyfx.Credential)(nil) ) // StaticService defines the base service for the asset vm @@ -75,7 +76,7 @@ type BuildGenesisReply struct { // BuildGenesis returns the UTXOs such that at least one address in [args.Addresses] is // referenced in the UTXO. -func (ss *StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, reply *BuildGenesisReply) error { +func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, reply *BuildGenesisReply) error { parser, err := txs.NewParser([]fxs.Fx{ &secp256k1fx.Fx{}, &nftfx.Fx{}, @@ -175,10 +176,10 @@ func (ss *StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, r initialState.Sort(genesisCodec) asset.States = append(asset.States, initialState) } - asset.Sort() + utils.Sort(asset.States) g.Txs = append(g.Txs, &asset) } - g.Sort() + utils.Sort(g.Txs) b, err := genesisCodec.Marshal(txs.CodecVersion, &g) if err != nil { diff --git a/avalanchego/vms/avm/static_service_test.go b/avalanchego/vms/avm/static_service_test.go index df8b285f..ed2b7e43 100644 --- a/avalanchego/vms/avm/static_service_test.go +++ b/avalanchego/vms/avm/static_service_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -20,7 +20,7 @@ var addrStrArray = []string{ "Jz9ayEDt7dx9hDx45aXALujWmL9ZUuqe7", } -var testHRP = constants.NetworkIDToHRP[networkID] +var testHRP = constants.NetworkIDToHRP[constants.UnitTestID] func TestBuildGenesis(t *testing.T) { ss := CreateStaticService() diff --git a/avalanchego/vms/avm/tx_execute.go b/avalanchego/vms/avm/tx_execute.go deleted file mode 100644 index 1e84b147..00000000 --- a/avalanchego/vms/avm/tx_execute.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avm - -import ( - "github.com/ava-labs/avalanchego/chains/atomic" - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms/avm/txs" - "github.com/ava-labs/avalanchego/vms/components/avax" -) - -var _ txs.Visitor = &executeTx{} - -type executeTx struct { - tx *txs.Tx - batch database.Batch - sharedMemory atomic.SharedMemory - parser txs.Parser -} - -func (et *executeTx) BaseTx(t *txs.BaseTx) error { - return et.batch.Write() -} - -func (et *executeTx) ImportTx(t *txs.ImportTx) error { - utxoIDs := make([][]byte, len(t.ImportedIns)) - for i, in := range t.ImportedIns { - inputID := in.UTXOID.InputID() - utxoIDs[i] = inputID[:] - } - return et.sharedMemory.Apply( - map[ids.ID]*atomic.Requests{ - t.SourceChain: { - RemoveRequests: utxoIDs, - }, - }, - et.batch, - ) -} - -func (et *executeTx) ExportTx(t *txs.ExportTx) error { - txID := et.tx.ID() - - elems := make([]*atomic.Element, len(t.ExportedOuts)) - codec := et.parser.Codec() - for i, out := range t.ExportedOuts { - utxo := &avax.UTXO{ - UTXOID: avax.UTXOID{ - TxID: txID, - OutputIndex: uint32(len(t.Outs) + i), - }, - Asset: avax.Asset{ID: out.AssetID()}, - Out: out.Out, - } - - utxoBytes, err := codec.Marshal(txs.CodecVersion, utxo) - if err != nil { - return err - } - - inputID := utxo.InputID() - elem := &atomic.Element{ - Key: inputID[:], - Value: utxoBytes, - } - if out, ok := utxo.Out.(avax.Addressable); ok { - elem.Traits = out.Addresses() - } - - elems[i] = elem - } - - return et.sharedMemory.Apply( - map[ids.ID]*atomic.Requests{ - t.DestinationChain: { - PutRequests: elems, - }, - }, - et.batch, - ) -} - -func (et *executeTx) CreateAssetTx(t *txs.CreateAssetTx) error { - return et.BaseTx(&t.BaseTx) -} - -func (et *executeTx) OperationTx(t *txs.OperationTx) error { - return et.BaseTx(&t.BaseTx) -} diff --git a/avalanchego/vms/avm/tx_init.go b/avalanchego/vms/avm/tx_init.go index b0d023fe..2b016bdd 100644 --- a/avalanchego/vms/avm/tx_init.go +++ b/avalanchego/vms/avm/tx_init.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -11,7 +11,7 @@ import ( "github.com/ava-labs/avalanchego/vms/avm/txs" ) -var _ txs.Visitor = &txInit{} +var _ txs.Visitor = (*txInit)(nil) // Init initializes FxID where required type txInit struct { diff --git a/avalanchego/vms/avm/tx_semantic_verify.go b/avalanchego/vms/avm/tx_semantic_verify.go deleted file mode 100644 index e66039cf..00000000 --- a/avalanchego/vms/avm/tx_semantic_verify.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avm - -import ( - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/vms/avm/txs" - "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/components/verify" -) - -var _ txs.Visitor = &txSemanticVerify{} - -// SemanticVerify that this transaction is well-formed. -type txSemanticVerify struct { - tx *txs.Tx - vm *VM -} - -func (t *txSemanticVerify) BaseTx(tx *txs.BaseTx) error { - for i, in := range tx.Ins { - // Note: Verification of the length of [t.tx.Creds] happens during - // syntactic verification, which happens before semantic verification. - cred := t.tx.Creds[i].Verifiable - if err := t.vm.verifyTransfer(t.tx.Unsigned, in, cred); err != nil { - return err - } - } - - for _, out := range tx.Outs { - fxIndex, err := t.vm.getFx(out.Out) - if err != nil { - return err - } - - if assetID := out.AssetID(); !t.vm.verifyFxUsage(fxIndex, assetID) { - return errIncompatibleFx - } - } - return nil -} - -func (t *txSemanticVerify) ImportTx(tx *txs.ImportTx) error { - if err := t.BaseTx(&tx.BaseTx); err != nil { - return err - } - - if !t.vm.bootstrapped { - return nil - } - - if err := verify.SameSubnet(t.vm.ctx, tx.SourceChain); err != nil { - return err - } - - utxoIDs := make([][]byte, len(tx.ImportedIns)) - for i, in := range tx.ImportedIns { - inputID := in.UTXOID.InputID() - utxoIDs[i] = inputID[:] - } - - allUTXOBytes, err := t.vm.ctx.SharedMemory.Get(tx.SourceChain, utxoIDs) - if err != nil { - return err - } - - codec := t.vm.parser.Codec() - offset := tx.BaseTx.NumCredentials() - for i, in := range tx.ImportedIns { - utxo := avax.UTXO{} - if _, err := codec.Unmarshal(allUTXOBytes[i], &utxo); err != nil { - return err - } - - // Note: Verification of the length of [t.tx.Creds] happens during - // syntactic verification, which happens before semantic verification. - cred := t.tx.Creds[i+offset].Verifiable - if err := t.vm.verifyTransferOfUTXO(tx, in, cred, &utxo); err != nil { - return err - } - } - return nil -} - -func (t *txSemanticVerify) ExportTx(tx *txs.ExportTx) error { - if t.vm.bootstrapped { - if err := verify.SameSubnet(t.vm.ctx, tx.DestinationChain); err != nil { - return err - } - } - - now := t.vm.clock.Time() - for _, out := range tx.ExportedOuts { - fxIndex, err := t.vm.getFx(out.Out) - if err != nil { - return err - } - - assetID := out.AssetID() - if !t.vm.IsBanffActivated(now) { - // TODO: Remove this check once the Banff network upgrade is - // complete. - // - // Banff network upgrade allows exporting of all assets to the - // P-chain. - if assetID != t.vm.ctx.AVAXAssetID && tx.DestinationChain == constants.PlatformChainID { - return errWrongAssetID - } - } - - if !t.vm.verifyFxUsage(fxIndex, assetID) { - return errIncompatibleFx - } - } - - return t.BaseTx(&tx.BaseTx) -} - -func (t *txSemanticVerify) OperationTx(tx *txs.OperationTx) error { - if err := t.BaseTx(&tx.BaseTx); err != nil { - return err - } - - offset := tx.BaseTx.NumCredentials() - for i, op := range tx.Ops { - // Note: Verification of the length of [t.tx.Creds] happens during - // syntactic verification, which happens before semantic verification. - cred := t.tx.Creds[i+offset].Verifiable - if err := t.vm.verifyOperation(tx, op, cred); err != nil { - return err - } - } - return nil -} - -func (t *txSemanticVerify) CreateAssetTx(tx *txs.CreateAssetTx) error { - return t.BaseTx((&tx.BaseTx)) -} diff --git a/avalanchego/vms/avm/tx_semantic_verify_test.go b/avalanchego/vms/avm/tx_semantic_verify_test.go deleted file mode 100644 index 65e44182..00000000 --- a/avalanchego/vms/avm/tx_semantic_verify_test.go +++ /dev/null @@ -1,1684 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avm - -import ( - "math" - "testing" - "time" - - "github.com/ava-labs/avalanchego/api/keystore" - "github.com/ava-labs/avalanchego/chains/atomic" - "github.com/ava-labs/avalanchego/database/manager" - "github.com/ava-labs/avalanchego/database/prefixdb" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" - "github.com/ava-labs/avalanchego/version" - "github.com/ava-labs/avalanchego/vms/avm/fxs" - "github.com/ava-labs/avalanchego/vms/avm/txs" - "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" -) - -func TestBaseTxSemanticVerify(t *testing.T) { - genesisBytes, _, vm, _ := GenesisVM(t) - ctx := vm.ctx - defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - - tx := &txs.Tx{Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 2, - }, - Asset: avax.Asset{ID: genesisTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }}, - }, - }} - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{keys[0]}}); err != nil { - t.Fatal(err) - } - - err := tx.Unsigned.Visit(&txSemanticVerify{ - tx: tx, - vm: vm, - }) - if err != nil { - t.Fatal(err) - } -} - -func TestBaseTxSemanticVerifyUnknownFx(t *testing.T) { - genesisBytes, _, vm, _ := GenesisVMWithArgs( - t, - []*common.Fx{{ - ID: ids.GenerateTestID(), - Fx: &FxTest{ - InitializeF: func(vmIntf interface{}) error { - vm := vmIntf.(secp256k1fx.VM) - return vm.CodecRegistry().RegisterType(&avax.TestVerifiable{}) - }, - }, - }}, - nil, - ) - ctx := vm.ctx - defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - - tx := &txs.Tx{ - Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{ - { - UTXOID: avax.UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 1, - }, - Asset: avax.Asset{ID: genesisTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }, - }, - }, - }, - Creds: []*fxs.FxCredential{{ - Verifiable: &avax.TestVerifiable{}, - }}, - } - if err := vm.parser.InitializeTx(tx); err != nil { - t.Fatal(err) - } - - err := tx.Unsigned.Visit(&txSemanticVerify{ - tx: tx, - vm: vm, - }) - if err == nil { - t.Fatalf("should have erred due to an unknown feature extension") - } -} - -func TestBaseTxSemanticVerifyWrongAssetID(t *testing.T) { - genesisBytes, _, vm, _ := GenesisVM(t) - ctx := vm.ctx - defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - - tx := &txs.Tx{Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 2, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }}, - }, - }} - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{keys[0]}}); err != nil { - t.Fatal(err) - } - - err := tx.Unsigned.Visit(&txSemanticVerify{ - tx: tx, - vm: vm, - }) - if err == nil { - t.Fatalf("should have erred due to an asset ID mismatch") - } -} - -func TestBaseTxSemanticVerifyUnauthorizedFx(t *testing.T) { - ctx := NewContext(t) - vm := &VM{} - ctx.Lock.Lock() - defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - fx := &FxTest{} - fx.InitializeF = func(vmIntf interface{}) error { - vm := vmIntf.(secp256k1fx.VM) - return vm.CodecRegistry().RegisterType(&avax.TestTransferable{}) - } - - genesisBytes := BuildGenesisTest(t) - issuer := make(chan common.Message, 1) - err := vm.Initialize( - ctx, - manager.NewMemDB(version.Semantic1_0_0), - genesisBytes, - nil, - nil, - issuer, - []*common.Fx{ - { - ID: ids.Empty, - Fx: &secp256k1fx.Fx{}, - }, - { - ID: ids.ID{1}, - Fx: fx, - }, - }, - nil, - ) - if err != nil { - t.Fatal(err) - } - vm.batchTimeout = 0 - - if err = vm.SetState(snow.Bootstrapping); err != nil { - t.Fatal(err) - } - - err = vm.SetState(snow.NormalOp) - if err != nil { - t.Fatal(err) - } - - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - - tx := &txs.Tx{Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 2, - }, - Asset: avax.Asset{ID: genesisTx.ID()}, - In: &avax.TestTransferable{}, - }}, - }, - }} - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{keys[0]}}); err != nil { - t.Fatal(err) - } - - err = tx.Unsigned.Visit(&txSemanticVerify{ - tx: tx, - vm: vm, - }) - if err == nil { - t.Fatalf("should have erred due to an unsupported fx") - } -} - -func TestBaseTxSemanticVerifyInvalidSignature(t *testing.T) { - genesisBytes, _, vm, _ := GenesisVM(t) - ctx := vm.ctx - defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - - tx := &txs.Tx{ - Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{ - { - UTXOID: avax.UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 2, - }, - Asset: avax.Asset{ID: genesisTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }, - }, - }, - }, - Creds: []*fxs.FxCredential{ - { - Verifiable: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{{}}, - }, - }, - }, - } - if err := vm.parser.InitializeTx(tx); err != nil { - t.Fatal(err) - } - - err := tx.Unsigned.Visit(&txSemanticVerify{ - tx: tx, - vm: vm, - }) - if err == nil { - t.Fatalf("Invalid credential should have failed verification") - } -} - -func TestBaseTxSemanticVerifyMissingUTXO(t *testing.T) { - genesisBytes, _, vm, _ := GenesisVM(t) - ctx := vm.ctx - defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - - tx := &txs.Tx{Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.Empty, - OutputIndex: 1, - }, - Asset: avax.Asset{ID: genesisTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }}, - }, - }} - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{keys[0]}}); err != nil { - t.Fatal(err) - } - - err := tx.Unsigned.Visit(&txSemanticVerify{ - tx: tx, - vm: vm, - }) - if err == nil { - t.Fatalf("Unknown UTXO should have failed verification") - } -} - -func TestBaseTxSemanticVerifyInvalidUTXO(t *testing.T) { - genesisBytes, _, vm, _ := GenesisVM(t) - ctx := vm.ctx - defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - - tx := &txs.Tx{Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: math.MaxUint32, - }, - Asset: avax.Asset{ID: genesisTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }}, - }, - }} - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{keys[0]}}); err != nil { - t.Fatal(err) - } - - err := tx.Unsigned.Visit(&txSemanticVerify{ - tx: tx, - vm: vm, - }) - if err == nil { - t.Fatalf("Invalid UTXO should have failed verification") - } -} - -func TestBaseTxSemanticVerifyPendingInvalidUTXO(t *testing.T) { - genesisBytes, issuer, vm, _ := GenesisVM(t) - ctx := vm.ctx - - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - - pendingTx := &txs.Tx{Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 2, - }, - Asset: avax.Asset{ID: genesisTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }}, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: genesisTx.ID()}, - Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - vm.TxFee, - OutputOwners: secp256k1fx.OutputOwners{ - Locktime: 0, - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - }, - }} - if err := pendingTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{keys[0]}}); err != nil { - t.Fatal(err) - } - - txID, err := vm.IssueTx(pendingTx.Bytes()) - if err != nil { - t.Fatal(err) - } - - ctx.Lock.Unlock() - - <-issuer - - ctx.Lock.Lock() - defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - vm.PendingTxs() - - tx := &txs.Tx{Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: txID, - OutputIndex: 2, - }, - Asset: avax.Asset{ID: genesisTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }}, - }, - }} - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{keys[0]}}); err != nil { - t.Fatal(err) - } - - err = tx.Unsigned.Visit(&txSemanticVerify{ - tx: tx, - vm: vm, - }) - if err == nil { - t.Fatalf("Invalid UTXO should have failed verification") - } -} - -func TestBaseTxSemanticVerifyPendingWrongAssetID(t *testing.T) { - genesisBytes, issuer, vm, _ := GenesisVM(t) - ctx := vm.ctx - - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - - pendingTx := &txs.Tx{Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 2, - }, - Asset: avax.Asset{ID: genesisTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }}, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: genesisTx.ID()}, - Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - vm.TxFee, - OutputOwners: secp256k1fx.OutputOwners{ - Locktime: 0, - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - }, - }} - if err := pendingTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{keys[0]}}); err != nil { - t.Fatal(err) - } - - txID, err := vm.IssueTx(pendingTx.Bytes()) - if err != nil { - t.Fatal(err) - } - - ctx.Lock.Unlock() - - <-issuer - - ctx.Lock.Lock() - defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - vm.PendingTxs() - - tx := &txs.Tx{Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: txID, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }}, - }, - }} - - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{keys[0]}}); err != nil { - t.Fatal(err) - } - - err = tx.Unsigned.Visit(&txSemanticVerify{ - tx: tx, - vm: vm, - }) - if err == nil { - t.Fatalf("Wrong asset ID should have failed verification") - } -} - -func TestBaseTxSemanticVerifyPendingUnauthorizedFx(t *testing.T) { - genesisBytes := BuildGenesisTest(t) - ctx := NewContext(t) - - issuer := make(chan common.Message, 1) - - ctx.Lock.Lock() - - vm := &VM{} - - fx := &FxTest{} - fx.InitializeF = func(vmIntf interface{}) error { - vm := vmIntf.(secp256k1fx.VM) - return vm.CodecRegistry().RegisterType(&avax.TestVerifiable{}) - } - - err := vm.Initialize( - ctx, - manager.NewMemDB(version.Semantic1_0_0), - genesisBytes, - nil, - nil, - issuer, - []*common.Fx{ - { - ID: ids.ID{1}, - Fx: &secp256k1fx.Fx{}, - }, - { - ID: ids.Empty, - Fx: fx, - }, - }, - nil, - ) - if err != nil { - t.Fatal(err) - } - vm.batchTimeout = 0 - - if err = vm.SetState(snow.Bootstrapping); err != nil { - t.Fatal(err) - } - - err = vm.SetState(snow.NormalOp) - if err != nil { - t.Fatal(err) - } - - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - - pendingTx := &txs.Tx{Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 2, - }, - Asset: avax.Asset{ID: genesisTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }}, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: genesisTx.ID()}, - Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - vm.TxFee, - OutputOwners: secp256k1fx.OutputOwners{ - Locktime: 0, - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - }, - }} - if err := pendingTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{keys[0]}}); err != nil { - t.Fatal(err) - } - - txID, err := vm.IssueTx(pendingTx.Bytes()) - if err != nil { - t.Fatal(err) - } - - ctx.Lock.Unlock() - - <-issuer - - ctx.Lock.Lock() - defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - vm.PendingTxs() - - tx := &txs.Tx{ - Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{ - { - UTXOID: avax.UTXOID{ - TxID: txID, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: genesisTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }, - }, - }, - }, - Creds: []*fxs.FxCredential{{ - Verifiable: &avax.TestVerifiable{}, - }}, - } - if err := vm.parser.InitializeTx(tx); err != nil { - t.Fatal(err) - } - - err = tx.Unsigned.Visit(&txSemanticVerify{ - tx: tx, - vm: vm, - }) - if err == nil { - t.Fatalf("Unsupported feature extension should have failed verification") - } -} - -func TestBaseTxSemanticVerifyPendingInvalidSignature(t *testing.T) { - genesisBytes := BuildGenesisTest(t) - ctx := NewContext(t) - - issuer := make(chan common.Message, 1) - - ctx.Lock.Lock() - - vm := &VM{} - - fx := &FxTest{} - fx.InitializeF = func(vmIntf interface{}) error { - vm := vmIntf.(secp256k1fx.VM) - return vm.CodecRegistry().RegisterType(&avax.TestVerifiable{}) - } - - err := vm.Initialize( - ctx, - manager.NewMemDB(version.Semantic1_0_0), - genesisBytes, - nil, - nil, - issuer, - []*common.Fx{ - { - ID: ids.ID{1}, - Fx: &secp256k1fx.Fx{}, - }, - { - ID: ids.Empty, - Fx: fx, - }, - }, - nil, - ) - if err != nil { - t.Fatal(err) - } - vm.batchTimeout = 0 - - if err = vm.SetState(snow.Bootstrapping); err != nil { - t.Fatal(err) - } - - err = vm.SetState(snow.NormalOp) - if err != nil { - t.Fatal(err) - } - - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - - pendingTx := &txs.Tx{Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 2, - }, - Asset: avax.Asset{ID: genesisTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }}, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: genesisTx.ID()}, - Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - vm.TxFee, - OutputOwners: secp256k1fx.OutputOwners{ - Locktime: 0, - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - }, - }} - if err := pendingTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{keys[0]}}); err != nil { - t.Fatal(err) - } - - txID, err := vm.IssueTx(pendingTx.Bytes()) - if err != nil { - t.Fatal(err) - } - - ctx.Lock.Unlock() - - <-issuer - - ctx.Lock.Lock() - defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - vm.PendingTxs() - - tx := &txs.Tx{ - Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{ - { - UTXOID: avax.UTXOID{ - TxID: txID, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: genesisTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }, - }, - }, - }, - Creds: []*fxs.FxCredential{{ - Verifiable: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{{}}, - }, - }}, - } - - if err := vm.parser.InitializeTx(tx); err != nil { - t.Fatal(err) - } - - err = tx.Unsigned.Visit(&txSemanticVerify{ - tx: tx, - vm: vm, - }) - if err == nil { - t.Fatalf("Invalid signature should have failed verification") - } -} - -func TestBaseTxSemanticVerifyMalformedOutput(t *testing.T) { - _, _, vm, _ := GenesisVM(t) - ctx := vm.ctx - defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - txBytes := []byte{ - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x66, - 0x05, 0x04, 0x03, 0x02, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, 0x70, 0xae, 0x33, 0xb5, - 0x60, 0x9c, 0xd8, 0x9a, 0x72, 0x92, 0x4f, 0xa2, - 0x88, 0x3f, 0x9b, 0xf1, 0xc6, 0xd8, 0x9f, 0x07, - 0x09, 0x9b, 0x2a, 0xd7, 0x1b, 0xe1, 0x7c, 0x5d, - 0x44, 0x93, 0x23, 0xdb, 0x00, 0x00, 0x00, 0x05, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x50, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x70, 0xae, 0x33, 0xb5, 0x60, 0x9c, 0xd8, 0x9a, - 0x72, 0x92, 0x4f, 0xa2, 0x88, 0x3f, 0x9b, 0xf1, - 0xc6, 0xd8, 0x9f, 0x07, 0x09, 0x9b, 0x2a, 0xd7, - 0x1b, 0xe1, 0x7c, 0x5d, 0x44, 0x93, 0x23, 0xdb, - 0x00, 0x00, 0x00, 0x01, 0x70, 0xae, 0x33, 0xb5, - 0x60, 0x9c, 0xd8, 0x9a, 0x72, 0x92, 0x4f, 0xa2, - 0x88, 0x3f, 0x9b, 0xf1, 0xc6, 0xd8, 0x9f, 0x07, - 0x09, 0x9b, 0x2a, 0xd7, 0x1b, 0xe1, 0x7c, 0x5d, - 0x44, 0x93, 0x23, 0xdb, 0x00, 0x00, 0x00, 0x05, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x50, - 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x09, - 0x00, 0x00, 0x00, 0x01, 0x50, 0x6b, 0xd9, 0x2d, - 0xe5, 0xeb, 0xc2, 0xbf, 0x8f, 0xaa, 0xf1, 0x7d, - 0xbb, 0xae, 0xb3, 0xf3, 0x13, 0x9e, 0xae, 0xb4, - 0xad, 0x32, 0x95, 0x6e, 0x92, 0x74, 0xf9, 0x53, - 0x0e, 0xcc, 0x03, 0xd8, 0x02, 0xab, 0x1c, 0x16, - 0x52, 0xd0, 0xe3, 0xfc, 0xe5, 0x93, 0xa9, 0x8e, - 0x96, 0x1e, 0x83, 0xf0, 0x12, 0x27, 0x66, 0x9f, - 0x03, 0x56, 0x9f, 0x17, 0x1b, 0xd1, 0x22, 0x90, - 0xfd, 0x64, 0xf5, 0x73, 0x01, - } - - tx := &txs.Tx{} - if _, err := vm.parser.Codec().Unmarshal(txBytes, tx); err == nil { - t.Fatalf("should have failed to unmarshal the tx") - } -} - -func TestBaseTxSemanticVerifyInvalidFxOutput(t *testing.T) { - genesisBytes, _, vm, _ := GenesisVMWithArgs( - t, - []*common.Fx{{ - ID: ids.GenerateTestID(), - Fx: &FxTest{ - InitializeF: func(vmIntf interface{}) error { - vm := vmIntf.(secp256k1fx.VM) - return vm.CodecRegistry().RegisterType(&avax.TestTransferable{}) - }, - }, - }}, - nil, - ) - ctx := vm.ctx - defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - - tx := &txs.Tx{Unsigned: &txs.BaseTx{ - BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 2, - }, - Asset: avax.Asset{ID: genesisTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }}, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: genesisTx.ID()}, - Out: &avax.TestTransferable{ - Val: 1, - }, - }}, - }, - }} - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{keys[0]}}); err != nil { - t.Fatal(err) - } - - err := tx.Unsigned.Visit(&txSemanticVerify{ - tx: tx, - vm: vm, - }) - if err == nil { - t.Fatalf("should have erred due to sending funds to an un-authorized fx") - } -} - -func TestExportTxSemanticVerify(t *testing.T) { - genesisBytes, _, vm, _ := GenesisVM(t) - ctx := vm.ctx - defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - avaxID := genesisTx.ID() - rawTx := &txs.Tx{Unsigned: &txs.ExportTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: avaxID, - OutputIndex: 2, - }, - Asset: avax.Asset{ID: avaxID}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{SigIndices: []uint32{0}}, - }, - }}, - }}, - DestinationChain: constants.PlatformChainID, - ExportedOuts: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: avaxID}, - Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - vm.TxFee, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - }} - - if err := rawTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{keys[0]}}); err != nil { - t.Fatal(err) - } - - tx, err := vm.ParseTx(rawTx.Bytes()) - if err != nil { - t.Fatal(err) - } - - utx, ok := tx.(*UniqueTx) - if !ok { - t.Fatalf("wrong tx type") - } - - err = rawTx.Unsigned.Visit(&txSemanticVerify{ - tx: utx.Tx, - vm: vm, - }) - if err != nil { - t.Fatal(err) - } -} - -func TestExportTxSemanticVerifyUnknownCredFx(t *testing.T) { - genesisBytes, _, vm, _ := GenesisVM(t) - ctx := vm.ctx - defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - avaxID := genesisTx.ID() - rawTx := &txs.Tx{Unsigned: &txs.ExportTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: avaxID, - OutputIndex: 2, - }, - Asset: avax.Asset{ID: avaxID}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{SigIndices: []uint32{0}}, - }, - }}, - }}, - DestinationChain: constants.PlatformChainID, - ExportedOuts: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: avaxID}, - Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - vm.TxFee, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - }} - if err := rawTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{keys[0]}}); err != nil { - t.Fatal(err) - } - - tx, err := vm.ParseTx(rawTx.Bytes()) - if err != nil { - t.Fatal(err) - } - - utx, ok := tx.(*UniqueTx) - if !ok { - t.Fatalf("wrong tx type") - } - - utx.Tx.Creds[0].Verifiable = nil - err = rawTx.Unsigned.Visit(&txSemanticVerify{ - tx: utx.Tx, - vm: vm, - }) - if err == nil { - t.Fatalf("should have erred due to an unknown credential fx") - } -} - -func TestExportTxSemanticVerifyMissingUTXO(t *testing.T) { - genesisBytes, _, vm, _ := GenesisVM(t) - ctx := vm.ctx - defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - avaxID := genesisTx.ID() - rawTx := &txs.Tx{Unsigned: &txs.ExportTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: avaxID, - OutputIndex: 1000, - }, - Asset: avax.Asset{ID: avaxID}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{SigIndices: []uint32{0}}, - }, - }}, - }}, - DestinationChain: constants.PlatformChainID, - ExportedOuts: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: avaxID}, - Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - vm.TxFee, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - }} - - if err := rawTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{keys[0]}}); err != nil { - t.Fatal(err) - } - - tx, err := vm.ParseTx(rawTx.Bytes()) - if err != nil { - t.Fatal(err) - } - - utx, ok := tx.(*UniqueTx) - if !ok { - t.Fatalf("wrong tx type") - } - - err = rawTx.Unsigned.Visit(&txSemanticVerify{ - tx: utx.Tx, - vm: vm, - }) - if err == nil { - t.Fatalf("should have erred due to an unknown utxo") - } -} - -// Test that we can't create an output of by consuming a UTXO that doesn't exist -func TestExportTxSemanticVerifyInvalidAssetID(t *testing.T) { - genesisBytes, _, vm, _ := GenesisVM(t) - ctx := vm.ctx - defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - avaxID := genesisTx.ID() - assetID := avaxID - // so the inputs below are sorted - copy(assetID[len(assetID)-5:], []byte{255, 255, 255, 255}) - rawTx := &txs.Tx{Unsigned: &txs.ExportTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{ - { - UTXOID: avax.UTXOID{ - TxID: avaxID, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{SigIndices: []uint32{0}}, - }, - }, - { - UTXOID: avax.UTXOID{ - TxID: assetID, // This tx doesn't exist - OutputIndex: 0, - }, - Asset: avax.Asset{ID: assetID}, // This asset doesn't exist - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{SigIndices: []uint32{0}}, - }, - }, - }, - }}, - DestinationChain: constants.PlatformChainID, - ExportedOuts: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - vm.TxFee, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - }} - if err := rawTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{ - { - keys[0], - }, - { - keys[0], - }, - }); err != nil { - t.Fatal(err) - } - - tx, err := vm.ParseTx(rawTx.Bytes()) - if err != nil { - t.Fatal(err) - } - - utx, ok := tx.(*UniqueTx) - if !ok { - t.Fatalf("wrong tx type") - } - - err = rawTx.Unsigned.Visit(&txSemanticVerify{ - tx: utx.Tx, - vm: vm, - }) - if err == nil { - t.Fatalf("should have erred due to an invalid asset ID") - } -} - -func TestExportTxSemanticVerifyInvalidFx(t *testing.T) { - genesisBytes := BuildGenesisTest(t) - ctx := NewContext(t) - - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - - m := atomic.NewMemory(prefixdb.New([]byte{0}, baseDBManager.Current().Database)) - ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) - - ctx.Lock.Lock() - - userKeystore, err := keystore.CreateTestKeystore() - if err != nil { - t.Fatal(err) - } - if err := userKeystore.CreateUser(username, password); err != nil { - t.Fatal(err) - } - ctx.Keystore = userKeystore.NewBlockchainKeyStore(ctx.ChainID) - - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - - avaxID := genesisTx.ID() - - issuer := make(chan common.Message, 1) - vm := &VM{} - err = vm.Initialize( - ctx, - baseDBManager.NewPrefixDBManager([]byte{1}), - genesisBytes, - nil, - nil, - issuer, - []*common.Fx{ - { - ID: ids.Empty, - Fx: &secp256k1fx.Fx{}, - }, - { - ID: ids.Empty.Prefix(0), - Fx: &FxTest{ - InitializeF: func(vmIntf interface{}) error { - vm := vmIntf.(secp256k1fx.VM) - return vm.CodecRegistry().RegisterType(&avax.TestVerifiable{}) - }, - }, - }, - }, - nil, - ) - if err != nil { - t.Fatal(err) - } - vm.batchTimeout = 0 - - if err := vm.SetState(snow.Bootstrapping); err != nil { - t.Fatal(err) - } - - if err := vm.SetState(snow.NormalOp); err != nil { - t.Fatal(err) - } - - defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - rawTx := &txs.Tx{Unsigned: &txs.ExportTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: avaxID, - OutputIndex: 2, - }, - Asset: avax.Asset{ID: avaxID}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{SigIndices: []uint32{0}}, - }, - }}, - }}, - DestinationChain: constants.PlatformChainID, - ExportedOuts: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: avaxID}, - Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - vm.TxFee, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - }} - if err := rawTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{keys[0]}}); err != nil { - t.Fatal(err) - } - - tx, err := vm.ParseTx(rawTx.Bytes()) - if err != nil { - t.Fatal(err) - } - - utx, ok := tx.(*UniqueTx) - if !ok { - t.Fatalf("wrong tx type") - } - - utx.Tx.Creds[0].Verifiable = &avax.TestVerifiable{} - err = rawTx.Unsigned.Visit(&txSemanticVerify{ - tx: utx.Tx, - vm: vm, - }) - if err == nil { - t.Fatalf("should have erred due to using an invalid fxID") - } -} - -func TestExportTxSemanticVerifyInvalidTransfer(t *testing.T) { - genesisBytes, _, vm, _ := GenesisVM(t) - ctx := vm.ctx - defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - genesisTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - avaxID := genesisTx.ID() - rawTx := &txs.Tx{Unsigned: &txs.ExportTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: avaxID, - OutputIndex: 2, - }, - Asset: avax.Asset{ID: avaxID}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{SigIndices: []uint32{0}}, - }, - }}, - }}, - DestinationChain: constants.PlatformChainID, - ExportedOuts: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: avaxID}, - Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - vm.TxFee, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - }} - if err := rawTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{keys[1]}}); err != nil { - t.Fatal(err) - } - - tx, err := vm.ParseTx(rawTx.Bytes()) - if err != nil { - t.Fatal(err) - } - - utx, ok := tx.(*UniqueTx) - if !ok { - t.Fatalf("wrong tx type") - } - - err = rawTx.Unsigned.Visit(&txSemanticVerify{ - tx: utx.Tx, - vm: vm, - }) - if err == nil { - t.Fatalf("should have erred due to an invalid credential") - } -} - -func TestExportTxSemanticVerifyTransferCustomAssetBeforeBanff(t *testing.T) { - genesisBytes, _, vm, _ := GenesisVM(t) - ctx := vm.ctx - defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - vm.clock.Set(testBanffTime.Add(-time.Second)) - - genesisAvaxTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - avaxID := genesisAvaxTx.ID() - - genesisCustomAssetTx := GetCreateTxFromGenesisTest(t, genesisBytes, "myFixedCapAsset") - customAssetID := genesisCustomAssetTx.ID() - - rawTx := &txs.Tx{Unsigned: &txs.ExportTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{ - { - UTXOID: avax.UTXOID{ - TxID: customAssetID, - OutputIndex: 1, - }, - Asset: avax.Asset{ID: customAssetID}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{SigIndices: []uint32{0}}, - }, - }, - { - UTXOID: avax.UTXOID{ - TxID: avaxID, - OutputIndex: 2, - }, - Asset: avax.Asset{ID: avaxID}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{SigIndices: []uint32{0}}, - }, - }, - }, - }}, - DestinationChain: constants.PlatformChainID, - ExportedOuts: []*avax.TransferableOutput{ - { - Asset: avax.Asset{ID: customAssetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: startBalance, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }, - { - Asset: avax.Asset{ID: avaxID}, - Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - vm.TxFee, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }, - }, - }} - - err := rawTx.SignSECP256K1Fx( - vm.parser.Codec(), - [][]*crypto.PrivateKeySECP256K1R{ - {keys[0]}, - {keys[0]}, - }, - ) - if err != nil { - t.Fatal(err) - } - - tx, err := vm.ParseTx(rawTx.Bytes()) - if err != nil { - t.Fatal(err) - } - - utx, ok := tx.(*UniqueTx) - if !ok { - t.Fatalf("wrong tx type") - } - - err = rawTx.Unsigned.Visit(&txSemanticVerify{ - tx: utx.Tx, - vm: vm, - }) - if err != errWrongAssetID { - t.Fatalf("should have erred due to an invalid assetID") - } -} - -func TestExportTxSemanticVerifyTransferCustomAssetAfterBanff(t *testing.T) { - genesisBytes, _, vm, _ := GenesisVM(t) - ctx := vm.ctx - defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } - ctx.Lock.Unlock() - }() - - vm.clock.Set(testBanffTime.Add(time.Second)) - - genesisAvaxTx := GetAVAXTxFromGenesisTest(genesisBytes, t) - avaxID := genesisAvaxTx.ID() - - genesisCustomAssetTx := GetCreateTxFromGenesisTest(t, genesisBytes, "myFixedCapAsset") - customAssetID := genesisCustomAssetTx.ID() - - rawTx := &txs.Tx{Unsigned: &txs.ExportTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{ - { - UTXOID: avax.UTXOID{ - TxID: customAssetID, - OutputIndex: 1, - }, - Asset: avax.Asset{ID: customAssetID}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{SigIndices: []uint32{0}}, - }, - }, - { - UTXOID: avax.UTXOID{ - TxID: avaxID, - OutputIndex: 2, - }, - Asset: avax.Asset{ID: avaxID}, - In: &secp256k1fx.TransferInput{ - Amt: startBalance, - Input: secp256k1fx.Input{SigIndices: []uint32{0}}, - }, - }, - }, - }}, - DestinationChain: constants.PlatformChainID, - ExportedOuts: []*avax.TransferableOutput{ - { - Asset: avax.Asset{ID: customAssetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: startBalance, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }, - { - Asset: avax.Asset{ID: avaxID}, - Out: &secp256k1fx.TransferOutput{ - Amt: startBalance - vm.TxFee, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }, - }, - }} - - err := rawTx.SignSECP256K1Fx( - vm.parser.Codec(), - [][]*crypto.PrivateKeySECP256K1R{ - {keys[0]}, - {keys[0]}, - }, - ) - if err != nil { - t.Fatal(err) - } - - tx, err := vm.ParseTx(rawTx.Bytes()) - if err != nil { - t.Fatal(err) - } - - utx, ok := tx.(*UniqueTx) - if !ok { - t.Fatalf("wrong tx type") - } - - err = rawTx.Unsigned.Visit(&txSemanticVerify{ - tx: utx.Tx, - vm: vm, - }) - if err != nil { - t.Fatal(err) - } -} diff --git a/avalanchego/vms/avm/txs/base_tx.go b/avalanchego/vms/avm/txs/base_tx.go index 450962a4..617769d3 100644 --- a/avalanchego/vms/avm/txs/base_tx.go +++ b/avalanchego/vms/avm/txs/base_tx.go @@ -1,19 +1,19 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( - "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) var ( - _ UnsignedTx = &BaseTx{} - _ secp256k1fx.UnsignedTx = &BaseTx{} + _ UnsignedTx = (*BaseTx)(nil) + _ secp256k1fx.UnsignedTx = (*BaseTx)(nil) ) // BaseTx is the basis of all transactions. @@ -29,7 +29,7 @@ func (t *BaseTx) InitCtx(ctx *snow.Context) { } } -func (t *BaseTx) Initialize(bytes []byte) { +func (t *BaseTx) SetBytes(bytes []byte) { t.bytes = bytes } @@ -37,29 +37,12 @@ func (t *BaseTx) Bytes() []byte { return t.bytes } -func (t *BaseTx) SyntacticVerify( - ctx *snow.Context, - c codec.Manager, - txFeeAssetID ids.ID, - txFee uint64, - _ uint64, - _ int, -) error { - if t == nil { - return errNilTx +func (t *BaseTx) InputIDs() set.Set[ids.ID] { + inputIDs := set.NewSet[ids.ID](len(t.Ins)) + for _, in := range t.Ins { + inputIDs.Add(in.InputID()) } - - if err := t.BaseTx.Verify(ctx); err != nil { - return err - } - - return avax.VerifyTx( - txFee, - txFeeAssetID, - [][]*avax.TransferableInput{t.Ins}, - [][]*avax.TransferableOutput{t.Outs}, - c, - ) + return inputIDs } func (t *BaseTx) Visit(v Visitor) error { diff --git a/avalanchego/vms/avm/txs/base_tx_test.go b/avalanchego/vms/avm/txs/base_tx_test.go index 00f2172a..0259c3eb 100644 --- a/avalanchego/vms/avm/txs/base_tx_test.go +++ b/avalanchego/vms/avm/txs/base_tx_test.go @@ -1,23 +1,31 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( - "bytes" - "math" "testing" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) +var ( + chainID = ids.ID{5, 4, 3, 2, 1} + assetID = ids.ID{1, 2, 3} + keys = secp256k1.TestKeys() +) + func TestBaseTxSerialization(t *testing.T) { + require := require.New(t) + expected := []byte{ // Codec version: 0x00, 0x00, @@ -84,7 +92,7 @@ func TestBaseTxSerialization(t *testing.T) { } tx := &Tx{Unsigned: &BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, + NetworkID: constants.UnitTestID, BlockchainID: chainID, Outs: []*avax.TransferableOutput{{ Asset: avax.Asset{ID: assetID}, @@ -117,15 +125,16 @@ func TestBaseTxSerialization(t *testing.T) { Memo: []byte{0x00, 0x01, 0x02, 0x03}, }}} - c := setupCodec() - if err := tx.SignSECP256K1Fx(c, nil); err != nil { - t.Fatal(err) - } - require.Equal(t, tx.ID().String(), "zeqT8FTnRAxes7QQQYkaWhNkHavd9d6aCdH8TQu2Mx5KEydEz") + parser, err := NewParser([]fxs.Fx{ + &secp256k1fx.Fx{}, + }) + require.NoError(err) + + require.NoError(parser.InitializeTx(tx)) + require.Equal(tx.ID().String(), "zeqT8FTnRAxes7QQQYkaWhNkHavd9d6aCdH8TQu2Mx5KEydEz") + result := tx.Bytes() - if !bytes.Equal(expected, result) { - t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) - } + require.Equal(expected, result) credBytes := []byte{ // type id @@ -176,169 +185,30 @@ func TestBaseTxSerialization(t *testing.T) { 0x5d, 0x73, 0x6d, 0x94, 0xfc, 0x80, 0xbc, 0x73, 0x5f, 0x51, 0xc8, 0x06, 0xd7, 0x43, 0x00, } - if err := tx.SignSECP256K1Fx(c, [][]*crypto.PrivateKeySECP256K1R{{keys[0], keys[0]}, {keys[0], keys[0]}}); err != nil { - t.Fatal(err) - } - require.Equal(t, tx.ID().String(), "QnTUuie2qe6BKyYrC2jqd73bJ828QNhYnZbdA2HWsnVRPjBfV") - result = tx.Bytes() + + err = tx.SignSECP256K1Fx( + parser.Codec(), + [][]*secp256k1.PrivateKey{ + {keys[0], keys[0]}, + {keys[0], keys[0]}, + }, + ) + require.NoError(err) + require.Equal(tx.ID().String(), "QnTUuie2qe6BKyYrC2jqd73bJ828QNhYnZbdA2HWsnVRPjBfV") // there are two credentials expected[len(expected)-1] = 0x02 expected = append(expected, credBytes...) - if !bytes.Equal(expected, result) { - t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) - } -} -func TestBaseTxGetters(t *testing.T) { - tx := &BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 1, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }}, - }} - tx.Initialize(nil) - - if assets := tx.AssetIDs(); assets.Len() != 1 { - t.Fatalf("Wrong number of assets returned") - } else if !assets.Contains(assetID) { - t.Fatalf("Wrong asset returned") - } else if assets := tx.ConsumedAssetIDs(); assets.Len() != 1 { - t.Fatalf("Wrong number of consumed assets returned") - } else if !assets.Contains(assetID) { - t.Fatalf("Wrong consumed asset returned") - } -} - -func TestBaseTxSyntacticVerify(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }}, - }} - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err != nil { - t.Fatal(err) - } -} - -func TestBaseTxSyntacticVerifyMemoTooLarge(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }}, - Memo: make([]byte, avax.MaxMemoSize+1), - }} - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { - t.Fatal("should have failed because memo is too large") - } -} - -func TestBaseTxSyntacticVerifyNil(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := (*BaseTx)(nil) - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { - t.Fatalf("Nil BaseTx should have erred") - } + result = tx.Bytes() + require.Equal(expected, result) } -func TestBaseTxSyntacticVerifyWrongNetworkID(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() +func TestBaseTxGetters(t *testing.T) { + require := require.New(t) tx := &BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID + 1, + NetworkID: constants.UnitTestID, BlockchainID: chainID, Outs: []*avax.TransferableOutput{{ Asset: avax.Asset{ID: assetID}, @@ -352,55 +222,7 @@ func TestBaseTxSyntacticVerifyWrongNetworkID(t *testing.T) { }}, Ins: []*avax.TransferableInput{{ UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 1, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }}, - }} - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { - t.Fatalf("Wrong networkID should have erred") - } -} - -func TestBaseTxSyntacticVerifyWrongChainID(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID.Prefix(0), - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, + TxID: ids.GenerateTestID(), OutputIndex: 1, }, Asset: avax.Asset{ID: assetID}, @@ -412,294 +234,20 @@ func TestBaseTxSyntacticVerifyWrongChainID(t *testing.T) { }, }}, }} - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { - t.Fatalf("Wrong chain ID should have erred") - } -} -func TestBaseTxSyntacticVerifyInvalidOutput(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Outs: []*avax.TransferableOutput{nil}, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 1, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }}, - }} - tx.Initialize(nil) + assets := tx.AssetIDs() + require.Len(assets, 1) + require.Contains(assets, assetID) - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { - t.Fatalf("Invalid output should have erred") - } -} - -func TestBaseTxSyntacticVerifyUnsortedOutputs(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Outs: []*avax.TransferableOutput{ - { - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 2, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }, - { - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 1, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }, - }, - Ins: []*avax.TransferableInput{ - { - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 1, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }, - }, - }} - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { - t.Fatalf("Unsorted outputs should have erred") - } -} - -func TestBaseTxSyntacticVerifyInvalidInput(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - Ins: []*avax.TransferableInput{nil}, - }} - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { - t.Fatalf("Invalid input should have erred") - } -} - -func TestBaseTxSyntacticVerifyInputOverflow(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - Ins: []*avax.TransferableInput{ - { - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: math.MaxUint64, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }, - { - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 1, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 1, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }, - }, - }} - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { - t.Fatalf("Input overflow should have erred") - } -} - -func TestBaseTxSyntacticVerifyOutputOverflow(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Outs: []*avax.TransferableOutput{ - { - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 2, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }, - { - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: math.MaxUint64, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }, - }, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 1, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }}, - }} - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { - t.Fatalf("Output overflow should have erred") - } -} - -func TestBaseTxSyntacticVerifyInsufficientFunds(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: math.MaxUint64, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 1, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }}, - }} - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { - t.Fatalf("Insufficient funds should have erred") - } + consumedAssets := tx.ConsumedAssetIDs() + require.Len(consumedAssets, 1) + require.Contains(consumedAssets, assetID) } func TestBaseTxNotState(t *testing.T) { + require := require.New(t) + intf := interface{}(&BaseTx{}) - if _, ok := intf.(verify.State); ok { - t.Fatalf("shouldn't be marked as state") - } + _, ok := intf.(verify.State) + require.False(ok, "should not be marked as state") } diff --git a/avalanchego/vms/avm/txs/codec.go b/avalanchego/vms/avm/txs/codec.go index 284d035f..7f485b9d 100644 --- a/avalanchego/vms/avm/txs/codec.go +++ b/avalanchego/vms/avm/txs/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -14,8 +14,8 @@ import ( ) var ( - _ codec.Registry = &codecRegistry{} - _ secp256k1fx.VM = &fxVM{} + _ codec.Registry = (*codecRegistry)(nil) + _ secp256k1fx.VM = (*fxVM)(nil) ) type codecRegistry struct { @@ -43,7 +43,18 @@ type fxVM struct { codecRegistry codec.Registry } -func (vm *fxVM) Clock() *mockable.Clock { return vm.clock } -func (vm *fxVM) CodecRegistry() codec.Registry { return vm.codecRegistry } -func (vm *fxVM) Logger() logging.Logger { return vm.log } -func (vm *fxVM) EthVerificationEnabled() bool { return false } +func (vm *fxVM) Clock() *mockable.Clock { + return vm.clock +} + +func (vm *fxVM) CodecRegistry() codec.Registry { + return vm.codecRegistry +} + +func (vm *fxVM) Logger() logging.Logger { + return vm.log +} + +func (vm *fxVM) EthVerificationEnabled() bool { + return false +} diff --git a/avalanchego/vms/avm/txs/create_asset_tx.go b/avalanchego/vms/avm/txs/create_asset_tx.go index ebeebb97..4a80d018 100644 --- a/avalanchego/vms/avm/txs/create_asset_tx.go +++ b/avalanchego/vms/avm/txs/create_asset_tx.go @@ -1,43 +1,16 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( - "errors" - "fmt" - "strings" - "unicode" - - "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -const ( - minNameLen = 1 - maxNameLen = 128 - minSymbolLen = 1 - maxSymbolLen = 4 - maxDenomination = 32 -) - var ( - errInitialStatesNotSortedUnique = errors.New("initial states not sorted and unique") - errNameTooShort = fmt.Errorf("name is too short, minimum size is %d", minNameLen) - errNameTooLong = fmt.Errorf("name is too long, maximum size is %d", maxNameLen) - errSymbolTooShort = fmt.Errorf("symbol is too short, minimum size is %d", minSymbolLen) - errSymbolTooLong = fmt.Errorf("symbol is too long, maximum size is %d", maxSymbolLen) - errNoFxs = errors.New("assets must support at least one Fx") - errIllegalNameCharacter = errors.New("asset's name must be made up of only letters and numbers") - errIllegalSymbolCharacter = errors.New("asset's symbol must be all upper case letters") - errUnexpectedWhitespace = errors.New("unexpected whitespace provided") - errDenominationTooLarge = errors.New("denomination is too large") - - _ UnsignedTx = &CreateAssetTx{} - _ secp256k1fx.UnsignedTx = &CreateAssetTx{} + _ UnsignedTx = (*CreateAssetTx)(nil) + _ secp256k1fx.UnsignedTx = (*CreateAssetTx)(nil) ) // CreateAssetTx is a transaction that creates a new asset. @@ -58,64 +31,10 @@ func (t *CreateAssetTx) InitCtx(ctx *snow.Context) { // InitialStates track which virtual machines, and the initial state of these // machines, this asset uses. The returned array should not be modified. -func (t *CreateAssetTx) InitialStates() []*InitialState { return t.States } - -func (t *CreateAssetTx) SyntacticVerify( - ctx *snow.Context, - c codec.Manager, - txFeeAssetID ids.ID, - _ uint64, - txFee uint64, - numFxs int, -) error { - switch { - case t == nil: - return errNilTx - case len(t.Name) < minNameLen: - return errNameTooShort - case len(t.Name) > maxNameLen: - return errNameTooLong - case len(t.Symbol) < minSymbolLen: - return errSymbolTooShort - case len(t.Symbol) > maxSymbolLen: - return errSymbolTooLong - case len(t.States) == 0: - return errNoFxs - case t.Denomination > maxDenomination: - return errDenominationTooLarge - case strings.TrimSpace(t.Name) != t.Name: - return errUnexpectedWhitespace - } - - for _, r := range t.Name { - if r > unicode.MaxASCII || !(unicode.IsLetter(r) || unicode.IsNumber(r) || r == ' ') { - return errIllegalNameCharacter - } - } - for _, r := range t.Symbol { - if r > unicode.MaxASCII || !unicode.IsUpper(r) { - return errIllegalSymbolCharacter - } - } - - if err := t.BaseTx.SyntacticVerify(ctx, c, txFeeAssetID, txFee, txFee, numFxs); err != nil { - return err - } - - for _, state := range t.States { - if err := state.Verify(c, numFxs); err != nil { - return err - } - } - if !utils.IsSortedAndUnique(innerSortInitialState(t.States)) { - return errInitialStatesNotSortedUnique - } - - return nil +func (t *CreateAssetTx) InitialStates() []*InitialState { + return t.States } -func (t *CreateAssetTx) Sort() { SortInitialStates(t.States) } - func (t *CreateAssetTx) Visit(v Visitor) error { return v.CreateAssetTx(t) } diff --git a/avalanchego/vms/avm/txs/create_asset_tx_test.go b/avalanchego/vms/avm/txs/create_asset_tx_test.go index 2f7c03cd..08d0c46f 100644 --- a/avalanchego/vms/avm/txs/create_asset_tx_test.go +++ b/avalanchego/vms/avm/txs/create_asset_tx_test.go @@ -1,15 +1,15 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( - "bytes" "testing" - "github.com/ava-labs/avalanchego/codec" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -17,83 +17,9 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -var ( - nameTooLong = "LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL" - symbolTooLong = "LLLLL" - illegalNameCharacter = "h8*32" - invalidASCIIStr = "ÉÎ" - invalidWhitespaceStr = " HAT" - denominationTooLarge = byte(maxDenomination + 1) -) - -func validCreateAssetTx(t *testing.T) (*CreateAssetTx, codec.Manager, *snow.Context) { - c := setupCodec() - tx := &CreateAssetTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 1, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }}, - }}, - Name: "NormalName", - Symbol: "TICK", - Denomination: byte(2), - States: []*InitialState{ - { - FxIndex: 0, - Outs: []verify.State{ - &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }, - }, - }, - } - - unsignedBytes, err := c.Marshal(CodecVersion, tx) - if err != nil { - t.Fatal(err) - } - tx.Initialize(unsignedBytes) - - ctx := NewContext(t) - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 1); err != nil { - t.Fatalf("Valid CreateAssetTx failed syntactic verification due to: %s", err) - } - return tx, c, ctx -} - func TestCreateAssetTxSerialization(t *testing.T) { + require := require.New(t) + expected := []byte{ // Codec version: 0x00, 0x00, @@ -267,18 +193,20 @@ func TestCreateAssetTxSerialization(t *testing.T) { }, }} - c := setupCodec() - if err := tx.SignSECP256K1Fx(c, nil); err != nil { - t.Fatal(err) - } + parser, err := NewParser([]fxs.Fx{ + &secp256k1fx.Fx{}, + }) + require.NoError(err) + + require.NoError(parser.InitializeTx(tx)) result := tx.Bytes() - if !bytes.Equal(expected, result) { - t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) - } + require.Equal(expected, result) } func TestCreateAssetTxSerializationAgain(t *testing.T) { + require := require.New(t) + expected := []byte{ // Codec version: 0x00, 0x00, @@ -397,7 +325,7 @@ func TestCreateAssetTxSerializationAgain(t *testing.T) { unsignedTx := &CreateAssetTx{ BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, + NetworkID: constants.UnitTestID, BlockchainID: chainID, Memo: []byte{0x00, 0x01, 0x02, 0x03}, }}, @@ -437,517 +365,17 @@ func TestCreateAssetTxSerializationAgain(t *testing.T) { parser, err := NewParser([]fxs.Fx{ &secp256k1fx.Fx{}, }) - if err != nil { - t.Fatal(err) - } - if err := parser.InitializeTx(tx); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(parser.InitializeTx(tx)) result := tx.Bytes() - if !bytes.Equal(expected, result) { - t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) - } -} - -func TestCreateAssetTxGetters(t *testing.T) { - tx := &CreateAssetTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - }}, - Name: "BRADY", - Symbol: "TOM", - Denomination: 0, - } - tx.Initialize(nil) -} - -func TestCreateAssetTxSyntacticVerify(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &CreateAssetTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - }}, - Name: "BRADY", - Symbol: "TOM", - Denomination: 0, - States: []*InitialState{{ - FxIndex: 0, - }}, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err != nil { - t.Fatal(err) - } -} - -func TestCreateAssetTxSyntacticVerifyNil(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := (*CreateAssetTx)(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { - t.Fatalf("Nil CreateAssetTx should have erred") - } -} - -func TestCreateAssetTxSyntacticVerifyNameTooShort(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &CreateAssetTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - }}, - Name: "", - Symbol: "TOM", - Denomination: 0, - States: []*InitialState{{ - FxIndex: 0, - }}, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { - t.Fatalf("Too short name should have erred") - } -} - -func TestCreateAssetTxSyntacticVerifyNameTooLong(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &CreateAssetTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - }}, - Name: "BRADY WINSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS" + - "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS" + - "SSS", - Symbol: "TOM", - Denomination: 0, - States: []*InitialState{{ - FxIndex: 0, - }}, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { - t.Fatalf("Too long name should have erred") - } -} - -func TestCreateAssetTxSyntacticVerifySymbolTooShort(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &CreateAssetTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - }}, - Name: "BRADY", - Symbol: "", - Denomination: 0, - States: []*InitialState{{ - FxIndex: 0, - }}, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { - t.Fatalf("Too short symbol should have erred") - } -} - -func TestCreateAssetTxSyntacticVerifySymbolTooLong(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &CreateAssetTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - }}, - Name: "TOM", - Symbol: "BRADY", - Denomination: 0, - States: []*InitialState{{ - FxIndex: 0, - }}, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { - t.Fatalf("Too long symbol should have erred") - } -} - -func TestCreateAssetTxSyntacticVerifyNoFxs(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &CreateAssetTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - }}, - Name: "BRADY", - Symbol: "TOM", - Denomination: 0, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { - t.Fatalf("No Fxs should have erred") - } -} - -func TestCreateAssetTxSyntacticVerifyDenominationTooLong(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &CreateAssetTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - }}, - Name: "BRADY", - Symbol: "TOM", - Denomination: denominationTooLarge, - States: []*InitialState{{ - FxIndex: 0, - }}, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { - t.Fatalf("Too large denomination should have erred") - } -} - -func TestCreateAssetTxSyntacticVerifyNameWithWhitespace(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &CreateAssetTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - }}, - Name: "BRADY ", - Symbol: "TOM", - Denomination: 0, - States: []*InitialState{{ - FxIndex: 0, - }}, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { - t.Fatalf("Whitespace at the end of the name should have erred") - } -} - -func TestCreateAssetTxSyntacticVerifyNameWithInvalidCharacter(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &CreateAssetTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - }}, - Name: "BRADY!", - Symbol: "TOM", - Denomination: 0, - States: []*InitialState{{ - FxIndex: 0, - }}, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { - t.Fatalf("Name with an invalid character should have erred") - } -} - -func TestCreateAssetTxSyntacticVerifyNameWithUnicodeCharacter(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &CreateAssetTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - }}, - Name: illegalNameCharacter, - Symbol: "TOM", - Denomination: 0, - States: []*InitialState{{ - FxIndex: 0, - }}, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { - t.Fatalf("Name with an invalid character should have erred") - } -} - -func TestCreateAssetTxSyntacticVerifySymbolWithInvalidCharacter(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &CreateAssetTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - }}, - Name: "BRADY", - Symbol: "TOM!", - Denomination: 0, - States: []*InitialState{{ - FxIndex: 0, - }}, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { - t.Fatalf("Symbol with an invalid character should have erred") - } -} - -func TestCreateAssetTxSyntacticVerifyInvalidBaseTx(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &CreateAssetTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID + 1, - BlockchainID: chainID, - }}, - Name: "BRADY", - Symbol: "TOM", - Denomination: 0, - States: []*InitialState{{ - FxIndex: 0, - }}, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { - t.Fatalf("Invalid BaseTx should have erred") - } -} - -func TestCreateAssetTxSyntacticVerifyInvalidInitialState(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &CreateAssetTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - }}, - Name: "BRADY", - Symbol: "TOM", - Denomination: 0, - States: []*InitialState{{ - FxIndex: 1, - }}, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { - t.Fatalf("Invalid InitialState should have erred") - } -} - -func TestCreateAssetTxSyntacticVerifyUnsortedInitialStates(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &CreateAssetTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - }}, - Name: "BRADY", - Symbol: "TOM", - Denomination: 0, - States: []*InitialState{ - { - FxIndex: 1, - }, - { - FxIndex: 0, - }, - }, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 2); err == nil { - t.Fatalf("Unsorted InitialStates should have erred") - } + require.Equal(expected, result) } func TestCreateAssetTxNotState(t *testing.T) { - intf := interface{}(&CreateAssetTx{}) - if _, ok := intf.(verify.State); ok { - t.Fatalf("shouldn't be marked as state") - } -} - -func TestCreateAssetTxSyntacticVerifyName(t *testing.T) { - tx, c, ctx := validCreateAssetTx(t) - - // String of Length 129 should fail SyntacticVerify - tx.Name = nameTooLong - - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 1); err == nil { - t.Fatal("CreateAssetTx should have failed syntactic verification due to name too long") - } - - tx.Name = invalidWhitespaceStr - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 1); err == nil { - t.Fatal("CreateAssetTx should have failed syntactic verification due to invalid whitespace in name") - } - - tx.Name = invalidASCIIStr - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 1); err == nil { - t.Fatal("CreateAssetTx should have failed syntactic verification due to invalid ASCII character in name") - } -} - -func TestCreateAssetTxSyntacticVerifySymbol(t *testing.T) { - tx, c, ctx := validCreateAssetTx(t) - - tx.Symbol = symbolTooLong - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 1); err == nil { - t.Fatal("CreateAssetTx should have failed syntactic verification due to symbol too long") - } - - tx.Symbol = " F" - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 1); err == nil { - t.Fatal("CreateAssetTx should have failed syntactic verification due to invalid whitespace in symbol") - } - - tx.Symbol = "É" - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 1); err == nil { - t.Fatal("CreateAssetTx should have failed syntactic verification due to invalid ASCII character in symbol") - } -} - -func TestCreateAssetTxSyntacticVerifyInvalidDenomination(t *testing.T) { - tx, c, ctx := validCreateAssetTx(t) - - tx.Denomination = byte(33) - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 1); err == nil { - t.Fatal("CreateAssetTx should have failed syntactic verification due to denomination too large") - } -} - -func TestCreateAssetTxSyntacticVerifyInitialStates(t *testing.T) { - tx, c, ctx := validCreateAssetTx(t) - - tx.States = []*InitialState{} - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 1); err == nil { - t.Fatal("CreateAssetTx should have failed syntactic verification due to no Initial States") - } - - tx.States = []*InitialState{ - { - FxIndex: 5, // Invalid FxIndex - Outs: []verify.State{ - &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }, - }, - } + require := require.New(t) - // NumFxs is 1, so FxIndex 5 should cause an error - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 1); err == nil { - t.Fatal("CreateAssetTx should have failed syntactic verification due to invalid Fx") - } - - uniqueStates := []*InitialState{ - { - FxIndex: 0, - Outs: []verify.State{ - &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }, - }, - { - FxIndex: 1, - Outs: []verify.State{ - &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }, - }, - { - FxIndex: 2, - Outs: []verify.State{ - &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }, - }, - } - - SortInitialStates(uniqueStates) - - // Put states in unsorted order - tx.States = []*InitialState{ - uniqueStates[2], - uniqueStates[0], - } - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 3); err == nil { - t.Fatal("CreateAssetTx should have failed syntactic verification due to non-sorted initial states") - } - - tx.States = []*InitialState{ - uniqueStates[0], - uniqueStates[0], - } - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 3); err == nil { - t.Fatal("CreateAssetTx should have failed syntactic verification due to non-unique initial states") - } -} - -func TestCreateAssetTxSyntacticVerifyBaseTx(t *testing.T) { - tx, c, ctx := validCreateAssetTx(t) - var baseTx BaseTx - tx.BaseTx = baseTx - if err := tx.SyntacticVerify(ctx, c, assetID, 0, 0, 2); err == nil { - t.Fatal("CreateAssetTx should have failed syntactic verification due to invalid BaseTx (nil)") - } + intf := interface{}(&CreateAssetTx{}) + _, ok := intf.(verify.State) + require.False(ok, "should not be marked as state") } diff --git a/avalanchego/vms/avm/txs/executor/backend.go b/avalanchego/vms/avm/txs/executor/backend.go new file mode 100644 index 00000000..fbf4a756 --- /dev/null +++ b/avalanchego/vms/avm/txs/executor/backend.go @@ -0,0 +1,26 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package executor + +import ( + "reflect" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/vms/avm/config" + "github.com/ava-labs/avalanchego/vms/avm/fxs" +) + +type Backend struct { + Ctx *snow.Context + Config *config.Config + Fxs []*fxs.ParsedFx + TypeToFxIndex map[reflect.Type]int + Codec codec.Manager + // Note: FeeAssetID may be different than ctx.AVAXAssetID if this AVM is + // running in a subnet. + FeeAssetID ids.ID + Bootstrapped bool +} diff --git a/avalanchego/vms/avm/txs/executor/executor.go b/avalanchego/vms/avm/txs/executor/executor.go new file mode 100644 index 00000000..040b1d9c --- /dev/null +++ b/avalanchego/vms/avm/txs/executor/executor.go @@ -0,0 +1,147 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package executor + +import ( + "fmt" + + "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/components/avax" +) + +var _ txs.Visitor = (*Executor)(nil) + +type Executor struct { + Codec codec.Manager + State states.Chain // state will be modified + Tx *txs.Tx + Inputs set.Set[ids.ID] // imported inputs + AtomicRequests map[ids.ID]*atomic.Requests // may be nil +} + +func (e *Executor) BaseTx(tx *txs.BaseTx) error { + txID := e.Tx.ID() + avax.Consume(e.State, tx.Ins) + avax.Produce(e.State, txID, tx.Outs) + return nil +} + +func (e *Executor) CreateAssetTx(tx *txs.CreateAssetTx) error { + if err := e.BaseTx(&tx.BaseTx); err != nil { + return err + } + + txID := e.Tx.ID() + index := len(tx.Outs) + for _, state := range tx.States { + for _, out := range state.Outs { + e.State.AddUTXO(&avax.UTXO{ + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: uint32(index), + }, + Asset: avax.Asset{ + ID: txID, + }, + Out: out, + }) + index++ + } + } + return nil +} + +func (e *Executor) OperationTx(tx *txs.OperationTx) error { + if err := e.BaseTx(&tx.BaseTx); err != nil { + return err + } + + txID := e.Tx.ID() + index := len(tx.Outs) + for _, op := range tx.Ops { + for _, utxoID := range op.UTXOIDs { + e.State.DeleteUTXO(utxoID.InputID()) + } + asset := op.AssetID() + for _, out := range op.Op.Outs() { + e.State.AddUTXO(&avax.UTXO{ + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: uint32(index), + }, + Asset: avax.Asset{ID: asset}, + Out: out, + }) + index++ + } + } + return nil +} + +func (e *Executor) ImportTx(tx *txs.ImportTx) error { + if err := e.BaseTx(&tx.BaseTx); err != nil { + return err + } + + utxoIDs := make([][]byte, len(tx.ImportedIns)) + for i, in := range tx.ImportedIns { + utxoID := in.UTXOID.InputID() + + e.Inputs.Add(utxoID) + utxoIDs[i] = utxoID[:] + } + e.AtomicRequests = map[ids.ID]*atomic.Requests{ + tx.SourceChain: { + RemoveRequests: utxoIDs, + }, + } + return nil +} + +func (e *Executor) ExportTx(tx *txs.ExportTx) error { + if err := e.BaseTx(&tx.BaseTx); err != nil { + return err + } + + txID := e.Tx.ID() + index := len(tx.Outs) + elems := make([]*atomic.Element, len(tx.ExportedOuts)) + for i, out := range tx.ExportedOuts { + utxo := &avax.UTXO{ + UTXOID: avax.UTXOID{ + TxID: txID, + OutputIndex: uint32(index), + }, + Asset: avax.Asset{ID: out.AssetID()}, + Out: out.Out, + } + index++ + + utxoBytes, err := e.Codec.Marshal(txs.CodecVersion, utxo) + if err != nil { + return fmt.Errorf("failed to marshal UTXO: %w", err) + } + utxoID := utxo.InputID() + elem := &atomic.Element{ + Key: utxoID[:], + Value: utxoBytes, + } + if out, ok := utxo.Out.(avax.Addressable); ok { + elem.Traits = out.Addresses() + } + + elems[i] = elem + } + e.AtomicRequests = map[ids.ID]*atomic.Requests{ + tx.DestinationChain: { + PutRequests: elems, + }, + } + return nil +} diff --git a/avalanchego/vms/avm/txs/executor/semantic_verifier.go b/avalanchego/vms/avm/txs/executor/semantic_verifier.go new file mode 100644 index 00000000..fde1e631 --- /dev/null +++ b/avalanchego/vms/avm/txs/executor/semantic_verifier.go @@ -0,0 +1,252 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package executor + +import ( + "context" + "errors" + "reflect" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/components/verify" +) + +var ( + _ txs.Visitor = (*SemanticVerifier)(nil) + + errAssetIDMismatch = errors.New("asset IDs in the input don't match the utxo") + errNotAnAsset = errors.New("not an asset") + errIncompatibleFx = errors.New("incompatible feature extension") + errUnknownFx = errors.New("unknown feature extension") +) + +type SemanticVerifier struct { + *Backend + State states.ReadOnlyChain + Tx *txs.Tx +} + +func (v *SemanticVerifier) BaseTx(tx *txs.BaseTx) error { + for i, in := range tx.Ins { + // Note: Verification of the length of [t.tx.Creds] happens during + // syntactic verification, which happens before semantic verification. + cred := v.Tx.Creds[i].Verifiable + if err := v.verifyTransfer(tx, in, cred); err != nil { + return err + } + } + + for _, out := range tx.Outs { + fxIndex, err := v.getFx(out.Out) + if err != nil { + return err + } + + assetID := out.AssetID() + if err := v.verifyFxUsage(fxIndex, assetID); err != nil { + return err + } + } + + return nil +} + +func (v *SemanticVerifier) CreateAssetTx(tx *txs.CreateAssetTx) error { + return v.BaseTx(&tx.BaseTx) +} + +func (v *SemanticVerifier) OperationTx(tx *txs.OperationTx) error { + if err := v.BaseTx(&tx.BaseTx); err != nil { + return err + } + + if !v.Bootstrapped || v.Tx.ID().String() == "MkvpJS13eCnEYeYi9B5zuWrU9goG9RBj7nr83U7BjrFV22a12" { + return nil + } + + offset := len(tx.Ins) + for i, op := range tx.Ops { + // Note: Verification of the length of [t.tx.Creds] happens during + // syntactic verification, which happens before semantic verification. + cred := v.Tx.Creds[i+offset].Verifiable + if err := v.verifyOperation(tx, op, cred); err != nil { + return err + } + } + return nil +} + +func (v *SemanticVerifier) ImportTx(tx *txs.ImportTx) error { + if err := v.BaseTx(&tx.BaseTx); err != nil { + return err + } + + if !v.Bootstrapped { + return nil + } + + if err := verify.SameSubnet(context.TODO(), v.Ctx, tx.SourceChain); err != nil { + return err + } + + utxoIDs := make([][]byte, len(tx.ImportedIns)) + for i, in := range tx.ImportedIns { + inputID := in.UTXOID.InputID() + utxoIDs[i] = inputID[:] + } + + allUTXOBytes, err := v.Ctx.SharedMemory.Get(tx.SourceChain, utxoIDs) + if err != nil { + return err + } + + offset := len(tx.Ins) + for i, in := range tx.ImportedIns { + utxo := avax.UTXO{} + if _, err := v.Codec.Unmarshal(allUTXOBytes[i], &utxo); err != nil { + return err + } + + // Note: Verification of the length of [t.tx.Creds] happens during + // syntactic verification, which happens before semantic verification. + cred := v.Tx.Creds[i+offset].Verifiable + if err := v.verifyTransferOfUTXO(tx, in, cred, &utxo); err != nil { + return err + } + } + return nil +} + +func (v *SemanticVerifier) ExportTx(tx *txs.ExportTx) error { + if err := v.BaseTx(&tx.BaseTx); err != nil { + return err + } + + if v.Bootstrapped { + if err := verify.SameSubnet(context.TODO(), v.Ctx, tx.DestinationChain); err != nil { + return err + } + } + + for _, out := range tx.ExportedOuts { + fxIndex, err := v.getFx(out.Out) + if err != nil { + return err + } + + assetID := out.AssetID() + if err := v.verifyFxUsage(fxIndex, assetID); err != nil { + return err + } + } + return nil +} + +func (v *SemanticVerifier) verifyTransfer( + tx txs.UnsignedTx, + in *avax.TransferableInput, + cred verify.Verifiable, +) error { + utxo, err := v.State.GetUTXOFromID(&in.UTXOID) + if err != nil { + return err + } + return v.verifyTransferOfUTXO(tx, in, cred, utxo) +} + +func (v *SemanticVerifier) verifyTransferOfUTXO( + tx txs.UnsignedTx, + in *avax.TransferableInput, + cred verify.Verifiable, + utxo *avax.UTXO, +) error { + utxoAssetID := utxo.AssetID() + inAssetID := in.AssetID() + if utxoAssetID != inAssetID { + return errAssetIDMismatch + } + + fxIndex, err := v.getFx(cred) + if err != nil { + return err + } + + if err := v.verifyFxUsage(fxIndex, inAssetID); err != nil { + return err + } + + fx := v.Fxs[fxIndex].Fx + return fx.VerifyTransfer(tx, in.In, cred, utxo.Out) +} + +func (v *SemanticVerifier) verifyOperation( + tx *txs.OperationTx, + op *txs.Operation, + cred verify.Verifiable, +) error { + var ( + opAssetID = op.AssetID() + numUTXOs = len(op.UTXOIDs) + utxos = make([]interface{}, numUTXOs) + ) + for i, utxoID := range op.UTXOIDs { + utxo, err := v.State.GetUTXOFromID(utxoID) + if err != nil { + return err + } + + utxoAssetID := utxo.AssetID() + if utxoAssetID != opAssetID { + return errAssetIDMismatch + } + utxos[i] = utxo.Out + } + + fxIndex, err := v.getFx(op.Op) + if err != nil { + return err + } + + if err := v.verifyFxUsage(fxIndex, opAssetID); err != nil { + return err + } + + fx := v.Fxs[fxIndex].Fx + return fx.VerifyOperation(tx, op.Op, cred, utxos) +} + +func (v *SemanticVerifier) verifyFxUsage( + fxID int, + assetID ids.ID, +) error { + tx, err := v.State.GetTx(assetID) + if err != nil { + return err + } + + createAssetTx, ok := tx.Unsigned.(*txs.CreateAssetTx) + if !ok { + return errNotAnAsset + } + + for _, state := range createAssetTx.States { + if state.FxIndex == uint32(fxID) { + return nil + } + } + + return errIncompatibleFx +} + +func (v *SemanticVerifier) getFx(val interface{}) (int, error) { + valType := reflect.TypeOf(val) + fx, exists := v.TypeToFxIndex[valType] + if !exists { + return 0, errUnknownFx + } + return fx, nil +} diff --git a/avalanchego/vms/avm/txs/executor/semantic_verifier_test.go b/avalanchego/vms/avm/txs/executor/semantic_verifier_test.go new file mode 100644 index 00000000..b5157fb9 --- /dev/null +++ b/avalanchego/vms/avm/txs/executor/semantic_verifier_test.go @@ -0,0 +1,894 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package executor + +import ( + "reflect" + "testing" + + "github.com/golang/mock/gomock" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/vms/avm/fxs" + "github.com/ava-labs/avalanchego/vms/avm/states" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" +) + +func TestSemanticVerifierBaseTx(t *testing.T) { + ctx := newContext(t) + + typeToFxIndex := make(map[reflect.Type]int) + secpFx := &secp256k1fx.Fx{} + parser, err := txs.NewCustomParser( + typeToFxIndex, + new(mockable.Clock), + logging.NoWarn{}, + []fxs.Fx{ + secpFx, + }, + ) + require.NoError(t, err) + + codec := parser.Codec() + txID := ids.GenerateTestID() + utxoID := avax.UTXOID{ + TxID: txID, + OutputIndex: 2, + } + asset := avax.Asset{ + ID: ids.GenerateTestID(), + } + inputSigner := secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + } + fxInput := secp256k1fx.TransferInput{ + Amt: 12345, + Input: inputSigner, + } + input := avax.TransferableInput{ + UTXOID: utxoID, + Asset: asset, + In: &fxInput, + } + baseTx := txs.BaseTx{ + BaseTx: avax.BaseTx{ + Ins: []*avax.TransferableInput{ + &input, + }, + }, + } + + backend := &Backend{ + Ctx: ctx, + Config: &feeConfig, + Fxs: []*fxs.ParsedFx{ + { + ID: secp256k1fx.ID, + Fx: secpFx, + }, + }, + TypeToFxIndex: typeToFxIndex, + Codec: codec, + FeeAssetID: ids.GenerateTestID(), + Bootstrapped: true, + } + require.NoError(t, secpFx.Bootstrapped()) + + outputOwners := secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + keys[0].Address(), + }, + } + output := secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: outputOwners, + } + utxo := avax.UTXO{ + UTXOID: utxoID, + Asset: asset, + Out: &output, + } + unsignedCreateAssetTx := txs.CreateAssetTx{ + States: []*txs.InitialState{{ + FxIndex: 0, + }}, + } + createAssetTx := txs.Tx{ + Unsigned: &unsignedCreateAssetTx, + } + + tests := []struct { + name string + stateFunc func(*gomock.Controller) states.Chain + txFunc func(*require.Assertions) *txs.Tx + err error + }{ + { + name: "valid", + stateFunc: func(ctrl *gomock.Controller) states.Chain { + state := states.NewMockChain(ctrl) + + state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &baseTx, + } + err := tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + ) + require.NoError(err) + return tx + }, + err: nil, + }, + { + name: "assetID mismatch", + stateFunc: func(ctrl *gomock.Controller) states.Chain { + state := states.NewMockChain(ctrl) + + utxo := utxo + utxo.Asset.ID = ids.GenerateTestID() + + state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &baseTx, + } + err := tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + ) + require.NoError(err) + return tx + }, + err: errAssetIDMismatch, + }, + { + name: "not allowed input feature extension", + stateFunc: func(ctrl *gomock.Controller) states.Chain { + state := states.NewMockChain(ctrl) + + unsignedCreateAssetTx := unsignedCreateAssetTx + unsignedCreateAssetTx.States = nil + + createAssetTx := txs.Tx{ + Unsigned: &unsignedCreateAssetTx, + } + + state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &baseTx, + } + err := tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + ) + require.NoError(err) + return tx + }, + err: errIncompatibleFx, + }, + { + name: "invalid signature", + stateFunc: func(ctrl *gomock.Controller) states.Chain { + state := states.NewMockChain(ctrl) + + state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &baseTx, + } + err := tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[1]}, + }, + ) + require.NoError(err) + return tx + }, + err: secp256k1fx.ErrWrongSig, + }, + { + name: "missing UTXO", + stateFunc: func(ctrl *gomock.Controller) states.Chain { + state := states.NewMockChain(ctrl) + + state.EXPECT().GetUTXOFromID(&utxoID).Return(nil, database.ErrNotFound) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &baseTx, + } + err := tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + ) + require.NoError(err) + return tx + }, + err: database.ErrNotFound, + }, + { + name: "invalid UTXO amount", + stateFunc: func(ctrl *gomock.Controller) states.Chain { + state := states.NewMockChain(ctrl) + + output := output + output.Amt-- + + utxo := utxo + utxo.Out = &output + + state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &baseTx, + } + err := tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + ) + require.NoError(err) + return tx + }, + err: secp256k1fx.ErrMismatchedAmounts, + }, + { + name: "not allowed output feature extension", + stateFunc: func(ctrl *gomock.Controller) states.Chain { + state := states.NewMockChain(ctrl) + + unsignedCreateAssetTx := unsignedCreateAssetTx + unsignedCreateAssetTx.States = nil + + createAssetTx := txs.Tx{ + Unsigned: &unsignedCreateAssetTx, + } + + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + baseTx := baseTx + baseTx.Ins = nil + baseTx.Outs = []*avax.TransferableOutput{ + { + Asset: asset, + Out: &output, + }, + } + tx := &txs.Tx{ + Unsigned: &baseTx, + } + err := tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{}, + ) + require.NoError(err) + return tx + }, + err: errIncompatibleFx, + }, + { + name: "unknown asset", + stateFunc: func(ctrl *gomock.Controller) states.Chain { + state := states.NewMockChain(ctrl) + + state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetTx(asset.ID).Return(nil, database.ErrNotFound) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &baseTx, + } + err := tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + ) + require.NoError(err) + return tx + }, + err: database.ErrNotFound, + }, + { + name: "not an asset", + stateFunc: func(ctrl *gomock.Controller) states.Chain { + state := states.NewMockChain(ctrl) + + tx := txs.Tx{ + Unsigned: &baseTx, + } + + state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetTx(asset.ID).Return(&tx, nil) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &baseTx, + } + err := tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + ) + require.NoError(err) + return tx + }, + err: errNotAnAsset, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := test.stateFunc(ctrl) + tx := test.txFunc(require) + + err = tx.Unsigned.Visit(&SemanticVerifier{ + Backend: backend, + State: state, + Tx: tx, + }) + require.ErrorIs(err, test.err) + }) + } +} + +func TestSemanticVerifierExportTx(t *testing.T) { + ctx := newContext(t) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + validatorState := validators.NewMockState(ctrl) + validatorState.EXPECT().GetSubnetID(gomock.Any(), ctx.CChainID).AnyTimes().Return(ctx.SubnetID, nil) + ctx.ValidatorState = validatorState + + typeToFxIndex := make(map[reflect.Type]int) + secpFx := &secp256k1fx.Fx{} + parser, err := txs.NewCustomParser( + typeToFxIndex, + new(mockable.Clock), + logging.NoWarn{}, + []fxs.Fx{ + secpFx, + }, + ) + require.NoError(t, err) + + codec := parser.Codec() + txID := ids.GenerateTestID() + utxoID := avax.UTXOID{ + TxID: txID, + OutputIndex: 2, + } + asset := avax.Asset{ + ID: ids.GenerateTestID(), + } + inputSigner := secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + } + fxInput := secp256k1fx.TransferInput{ + Amt: 12345, + Input: inputSigner, + } + input := avax.TransferableInput{ + UTXOID: utxoID, + Asset: asset, + In: &fxInput, + } + baseTx := txs.BaseTx{ + BaseTx: avax.BaseTx{ + Ins: []*avax.TransferableInput{ + &input, + }, + }, + } + exportTx := txs.ExportTx{ + BaseTx: baseTx, + DestinationChain: ctx.CChainID, + } + + backend := &Backend{ + Ctx: ctx, + Config: &feeConfig, + Fxs: []*fxs.ParsedFx{ + { + ID: secp256k1fx.ID, + Fx: secpFx, + }, + }, + TypeToFxIndex: typeToFxIndex, + Codec: codec, + FeeAssetID: ids.GenerateTestID(), + Bootstrapped: true, + } + require.NoError(t, secpFx.Bootstrapped()) + + outputOwners := secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + keys[0].Address(), + }, + } + output := secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: outputOwners, + } + utxo := avax.UTXO{ + UTXOID: utxoID, + Asset: asset, + Out: &output, + } + unsignedCreateAssetTx := txs.CreateAssetTx{ + States: []*txs.InitialState{{ + FxIndex: 0, + }}, + } + createAssetTx := txs.Tx{ + Unsigned: &unsignedCreateAssetTx, + } + + tests := []struct { + name string + stateFunc func(*gomock.Controller) states.Chain + txFunc func(*require.Assertions) *txs.Tx + err error + }{ + { + name: "valid", + stateFunc: func(ctrl *gomock.Controller) states.Chain { + state := states.NewMockChain(ctrl) + + state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &exportTx, + } + err := tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + ) + require.NoError(err) + return tx + }, + err: nil, + }, + { + name: "assetID mismatch", + stateFunc: func(ctrl *gomock.Controller) states.Chain { + state := states.NewMockChain(ctrl) + + utxo := utxo + utxo.Asset.ID = ids.GenerateTestID() + + state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &exportTx, + } + err := tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + ) + require.NoError(err) + return tx + }, + err: errAssetIDMismatch, + }, + { + name: "not allowed input feature extension", + stateFunc: func(ctrl *gomock.Controller) states.Chain { + state := states.NewMockChain(ctrl) + + unsignedCreateAssetTx := unsignedCreateAssetTx + unsignedCreateAssetTx.States = nil + + createAssetTx := txs.Tx{ + Unsigned: &unsignedCreateAssetTx, + } + + state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &exportTx, + } + err := tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + ) + require.NoError(err) + return tx + }, + err: errIncompatibleFx, + }, + { + name: "invalid signature", + stateFunc: func(ctrl *gomock.Controller) states.Chain { + state := states.NewMockChain(ctrl) + + state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &exportTx, + } + err := tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[1]}, + }, + ) + require.NoError(err) + return tx + }, + err: secp256k1fx.ErrWrongSig, + }, + { + name: "missing UTXO", + stateFunc: func(ctrl *gomock.Controller) states.Chain { + state := states.NewMockChain(ctrl) + + state.EXPECT().GetUTXOFromID(&utxoID).Return(nil, database.ErrNotFound) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &exportTx, + } + err := tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + ) + require.NoError(err) + return tx + }, + err: database.ErrNotFound, + }, + { + name: "invalid UTXO amount", + stateFunc: func(ctrl *gomock.Controller) states.Chain { + state := states.NewMockChain(ctrl) + + output := output + output.Amt-- + + utxo := utxo + utxo.Out = &output + + state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &exportTx, + } + err := tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + ) + require.NoError(err) + return tx + }, + err: secp256k1fx.ErrMismatchedAmounts, + }, + { + name: "not allowed output feature extension", + stateFunc: func(ctrl *gomock.Controller) states.Chain { + state := states.NewMockChain(ctrl) + + unsignedCreateAssetTx := unsignedCreateAssetTx + unsignedCreateAssetTx.States = nil + + createAssetTx := txs.Tx{ + Unsigned: &unsignedCreateAssetTx, + } + + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + exportTx := exportTx + exportTx.Ins = nil + exportTx.ExportedOuts = []*avax.TransferableOutput{ + { + Asset: asset, + Out: &output, + }, + } + tx := &txs.Tx{ + Unsigned: &exportTx, + } + err := tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{}, + ) + require.NoError(err) + return tx + }, + err: errIncompatibleFx, + }, + { + name: "unknown asset", + stateFunc: func(ctrl *gomock.Controller) states.Chain { + state := states.NewMockChain(ctrl) + + state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetTx(asset.ID).Return(nil, database.ErrNotFound) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &exportTx, + } + err := tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + ) + require.NoError(err) + return tx + }, + err: database.ErrNotFound, + }, + { + name: "not an asset", + stateFunc: func(ctrl *gomock.Controller) states.Chain { + state := states.NewMockChain(ctrl) + + tx := txs.Tx{ + Unsigned: &baseTx, + } + + state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetTx(asset.ID).Return(&tx, nil) + + return state + }, + txFunc: func(require *require.Assertions) *txs.Tx { + tx := &txs.Tx{ + Unsigned: &exportTx, + } + err := tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + ) + require.NoError(err) + return tx + }, + err: errNotAnAsset, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := test.stateFunc(ctrl) + tx := test.txFunc(require) + + err = tx.Unsigned.Visit(&SemanticVerifier{ + Backend: backend, + State: state, + Tx: tx, + }) + require.ErrorIs(err, test.err) + }) + } +} + +func TestSemanticVerifierExportTxDifferentSubnet(t *testing.T) { + require := require.New(t) + + ctx := newContext(t) + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + validatorState := validators.NewMockState(ctrl) + validatorState.EXPECT().GetSubnetID(gomock.Any(), ctx.CChainID).AnyTimes().Return(ids.GenerateTestID(), nil) + ctx.ValidatorState = validatorState + + typeToFxIndex := make(map[reflect.Type]int) + secpFx := &secp256k1fx.Fx{} + parser, err := txs.NewCustomParser( + typeToFxIndex, + new(mockable.Clock), + logging.NoWarn{}, + []fxs.Fx{ + secpFx, + }, + ) + require.NoError(err) + + codec := parser.Codec() + txID := ids.GenerateTestID() + utxoID := avax.UTXOID{ + TxID: txID, + OutputIndex: 2, + } + asset := avax.Asset{ + ID: ids.GenerateTestID(), + } + inputSigner := secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + } + fxInput := secp256k1fx.TransferInput{ + Amt: 12345, + Input: inputSigner, + } + input := avax.TransferableInput{ + UTXOID: utxoID, + Asset: asset, + In: &fxInput, + } + baseTx := txs.BaseTx{ + BaseTx: avax.BaseTx{ + Ins: []*avax.TransferableInput{ + &input, + }, + }, + } + exportTx := txs.ExportTx{ + BaseTx: baseTx, + DestinationChain: ctx.CChainID, + } + + backend := &Backend{ + Ctx: ctx, + Config: &feeConfig, + Fxs: []*fxs.ParsedFx{ + { + ID: secp256k1fx.ID, + Fx: secpFx, + }, + }, + TypeToFxIndex: typeToFxIndex, + Codec: codec, + FeeAssetID: ids.GenerateTestID(), + Bootstrapped: true, + } + require.NoError(secpFx.Bootstrapped()) + + outputOwners := secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + keys[0].Address(), + }, + } + output := secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: outputOwners, + } + utxo := avax.UTXO{ + UTXOID: utxoID, + Asset: asset, + Out: &output, + } + unsignedCreateAssetTx := txs.CreateAssetTx{ + States: []*txs.InitialState{{ + FxIndex: 0, + }}, + } + createAssetTx := txs.Tx{ + Unsigned: &unsignedCreateAssetTx, + } + + state := states.NewMockChain(ctrl) + + state.EXPECT().GetUTXOFromID(&utxoID).Return(&utxo, nil) + state.EXPECT().GetTx(asset.ID).Return(&createAssetTx, nil) + + tx := &txs.Tx{ + Unsigned: &exportTx, + } + err = tx.SignSECP256K1Fx( + codec, + [][]*secp256k1.PrivateKey{ + {keys[0]}, + }, + ) + require.NoError(err) + + err = tx.Unsigned.Visit(&SemanticVerifier{ + Backend: backend, + State: state, + Tx: tx, + }) + require.ErrorIs(err, verify.ErrMismatchedSubnetIDs) +} diff --git a/avalanchego/vms/avm/txs/executor/syntactic_verifier.go b/avalanchego/vms/avm/txs/executor/syntactic_verifier.go new file mode 100644 index 00000000..b7023887 --- /dev/null +++ b/avalanchego/vms/avm/txs/executor/syntactic_verifier.go @@ -0,0 +1,302 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package executor + +import ( + "errors" + "fmt" + "strings" + "unicode" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/components/avax" +) + +const ( + minNameLen = 1 + maxNameLen = 128 + minSymbolLen = 1 + maxSymbolLen = 4 + maxDenomination = 32 +) + +var ( + _ txs.Visitor = (*SyntacticVerifier)(nil) + + errWrongNumberOfCredentials = errors.New("wrong number of credentials") + errInitialStatesNotSortedUnique = errors.New("initial states not sorted and unique") + errNameTooShort = fmt.Errorf("name is too short, minimum size is %d", minNameLen) + errNameTooLong = fmt.Errorf("name is too long, maximum size is %d", maxNameLen) + errSymbolTooShort = fmt.Errorf("symbol is too short, minimum size is %d", minSymbolLen) + errSymbolTooLong = fmt.Errorf("symbol is too long, maximum size is %d", maxSymbolLen) + errNoFxs = errors.New("assets must support at least one Fx") + errIllegalNameCharacter = errors.New("asset's name must be made up of only letters and numbers") + errIllegalSymbolCharacter = errors.New("asset's symbol must be all upper case letters") + errUnexpectedWhitespace = errors.New("unexpected whitespace provided") + errDenominationTooLarge = errors.New("denomination is too large") + errOperationsNotSortedUnique = errors.New("operations not sorted and unique") + errNoOperations = errors.New("an operationTx must have at least one operation") + errDoubleSpend = errors.New("inputs attempt to double spend an input") + errNoImportInputs = errors.New("no import inputs") + errNoExportOutputs = errors.New("no export outputs") +) + +type SyntacticVerifier struct { + *Backend + Tx *txs.Tx +} + +func (v *SyntacticVerifier) BaseTx(tx *txs.BaseTx) error { + if err := tx.BaseTx.Verify(v.Ctx); err != nil { + return err + } + + err := avax.VerifyTx( + v.Config.TxFee, + v.FeeAssetID, + [][]*avax.TransferableInput{tx.Ins}, + [][]*avax.TransferableOutput{tx.Outs}, + v.Codec, + ) + if err != nil { + return err + } + + for _, cred := range v.Tx.Creds { + if err := cred.Verify(); err != nil { + return err + } + } + + numCreds := len(v.Tx.Creds) + numInputs := len(tx.Ins) + if numCreds != numInputs { + return fmt.Errorf("%w: %d != %d", + errWrongNumberOfCredentials, + numCreds, + numInputs, + ) + } + + return nil +} + +func (v *SyntacticVerifier) CreateAssetTx(tx *txs.CreateAssetTx) error { + switch { + case len(tx.Name) < minNameLen: + return errNameTooShort + case len(tx.Name) > maxNameLen: + return errNameTooLong + case len(tx.Symbol) < minSymbolLen: + return errSymbolTooShort + case len(tx.Symbol) > maxSymbolLen: + return errSymbolTooLong + case len(tx.States) == 0: + return errNoFxs + case tx.Denomination > maxDenomination: + return errDenominationTooLarge + case strings.TrimSpace(tx.Name) != tx.Name: + return errUnexpectedWhitespace + } + + for _, r := range tx.Name { + if r > unicode.MaxASCII || !(unicode.IsLetter(r) || unicode.IsNumber(r) || r == ' ') { + return errIllegalNameCharacter + } + } + for _, r := range tx.Symbol { + if r > unicode.MaxASCII || !unicode.IsUpper(r) { + return errIllegalSymbolCharacter + } + } + + if err := tx.BaseTx.BaseTx.Verify(v.Ctx); err != nil { + return err + } + + err := avax.VerifyTx( + v.Config.CreateAssetTxFee, + v.FeeAssetID, + [][]*avax.TransferableInput{tx.Ins}, + [][]*avax.TransferableOutput{tx.Outs}, + v.Codec, + ) + if err != nil { + return err + } + + for _, state := range tx.States { + if err := state.Verify(v.Codec, len(v.Fxs)); err != nil { + return err + } + } + if !utils.IsSortedAndUniqueSortable(tx.States) { + return errInitialStatesNotSortedUnique + } + + for _, cred := range v.Tx.Creds { + if err := cred.Verify(); err != nil { + return err + } + } + + numCreds := len(v.Tx.Creds) + numInputs := len(tx.Ins) + if numCreds != numInputs { + return fmt.Errorf("%w: %d != %d", + errWrongNumberOfCredentials, + numCreds, + numInputs, + ) + } + + return nil +} + +func (v *SyntacticVerifier) OperationTx(tx *txs.OperationTx) error { + if len(tx.Ops) == 0 { + return errNoOperations + } + + if err := tx.BaseTx.BaseTx.Verify(v.Ctx); err != nil { + return err + } + + err := avax.VerifyTx( + v.Config.TxFee, + v.FeeAssetID, + [][]*avax.TransferableInput{tx.Ins}, + [][]*avax.TransferableOutput{tx.Outs}, + v.Codec, + ) + if err != nil { + return err + } + + inputs := set.NewSet[ids.ID](len(tx.Ins)) + for _, in := range tx.Ins { + inputs.Add(in.InputID()) + } + + for _, op := range tx.Ops { + if err := op.Verify(); err != nil { + return err + } + for _, utxoID := range op.UTXOIDs { + inputID := utxoID.InputID() + if inputs.Contains(inputID) { + return errDoubleSpend + } + inputs.Add(inputID) + } + } + if !txs.IsSortedAndUniqueOperations(tx.Ops, v.Codec) { + return errOperationsNotSortedUnique + } + + for _, cred := range v.Tx.Creds { + if err := cred.Verify(); err != nil { + return err + } + } + + numCreds := len(v.Tx.Creds) + numInputs := len(tx.Ins) + len(tx.Ops) + if numCreds != numInputs { + return fmt.Errorf("%w: %d != %d", + errWrongNumberOfCredentials, + numCreds, + numInputs, + ) + } + + return nil +} + +func (v *SyntacticVerifier) ImportTx(tx *txs.ImportTx) error { + if len(tx.ImportedIns) == 0 { + return errNoImportInputs + } + + if err := tx.BaseTx.BaseTx.Verify(v.Ctx); err != nil { + return err + } + + err := avax.VerifyTx( + v.Config.TxFee, + v.FeeAssetID, + [][]*avax.TransferableInput{ + tx.Ins, + tx.ImportedIns, + }, + [][]*avax.TransferableOutput{tx.Outs}, + v.Codec, + ) + if err != nil { + return err + } + + for _, cred := range v.Tx.Creds { + if err := cred.Verify(); err != nil { + return err + } + } + + numCreds := len(v.Tx.Creds) + numInputs := len(tx.Ins) + len(tx.ImportedIns) + if numCreds != numInputs { + return fmt.Errorf("%w: %d != %d", + errWrongNumberOfCredentials, + numCreds, + numInputs, + ) + } + + return nil +} + +func (v *SyntacticVerifier) ExportTx(tx *txs.ExportTx) error { + if len(tx.ExportedOuts) == 0 { + return errNoExportOutputs + } + + if err := tx.BaseTx.BaseTx.Verify(v.Ctx); err != nil { + return err + } + + err := avax.VerifyTx( + v.Config.TxFee, + v.FeeAssetID, + [][]*avax.TransferableInput{tx.Ins}, + [][]*avax.TransferableOutput{ + tx.Outs, + tx.ExportedOuts, + }, + v.Codec, + ) + if err != nil { + return err + } + + for _, cred := range v.Tx.Creds { + if err := cred.Verify(); err != nil { + return err + } + } + + numCreds := len(v.Tx.Creds) + numInputs := len(tx.Ins) + if numCreds != numInputs { + return fmt.Errorf("%w: %d != %d", + errWrongNumberOfCredentials, + numCreds, + numInputs, + ) + } + + return nil +} diff --git a/avalanchego/vms/avm/txs/executor/syntactic_verifier_test.go b/avalanchego/vms/avm/txs/executor/syntactic_verifier_test.go new file mode 100644 index 00000000..37dd48bf --- /dev/null +++ b/avalanchego/vms/avm/txs/executor/syntactic_verifier_test.go @@ -0,0 +1,2332 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package executor + +import ( + "strings" + "testing" + + stdmath "math" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/vms/avm/config" + "github.com/ava-labs/avalanchego/vms/avm/fxs" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" +) + +var ( + keys = secp256k1.TestKeys() + feeConfig = config.Config{ + TxFee: 2, + CreateAssetTxFee: 3, + } +) + +func newContext(t testing.TB) *snow.Context { + require := require.New(t) + + ctx := snow.DefaultContextTest() + ctx.NetworkID = constants.UnitTestID + ctx.ChainID = ids.GenerateTestID() + ctx.XChainID = ctx.ChainID + ctx.CChainID = ids.GenerateTestID() + + aliaser := ctx.BCLookup.(ids.Aliaser) + require.NoError(aliaser.Alias(ctx.XChainID, "X")) + require.NoError(aliaser.Alias(ctx.XChainID, ctx.XChainID.String())) + require.NoError(aliaser.Alias(constants.PlatformChainID, "P")) + require.NoError(aliaser.Alias(constants.PlatformChainID, constants.PlatformChainID.String())) + return ctx +} + +func TestSyntacticVerifierBaseTx(t *testing.T) { + ctx := newContext(t) + + fx := &secp256k1fx.Fx{} + parser, err := txs.NewParser([]fxs.Fx{ + fx, + }) + require.NoError(t, err) + + feeAssetID := ids.GenerateTestID() + asset := avax.Asset{ + ID: feeAssetID, + } + outputOwners := secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + } + fxOutput := secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: outputOwners, + } + output := avax.TransferableOutput{ + Asset: asset, + Out: &fxOutput, + } + inputTxID := ids.GenerateTestID() + utxoID := avax.UTXOID{ + TxID: inputTxID, + OutputIndex: 0, + } + inputSigners := secp256k1fx.Input{ + SigIndices: []uint32{2}, + } + fxInput := secp256k1fx.TransferInput{ + Amt: 54321, + Input: inputSigners, + } + input := avax.TransferableInput{ + UTXOID: utxoID, + Asset: asset, + In: &fxInput, + } + baseTx := avax.BaseTx{ + NetworkID: constants.UnitTestID, + BlockchainID: ctx.ChainID, + Outs: []*avax.TransferableOutput{ + &output, + }, + Ins: []*avax.TransferableInput{ + &input, + }, + } + cred := fxs.FxCredential{ + Verifiable: &secp256k1fx.Credential{}, + } + creds := []*fxs.FxCredential{ + &cred, + } + + codec := parser.Codec() + backend := &Backend{ + Ctx: ctx, + Config: &feeConfig, + Fxs: []*fxs.ParsedFx{ + { + ID: secp256k1fx.ID, + Fx: fx, + }, + }, + Codec: codec, + FeeAssetID: feeAssetID, + } + + tests := []struct { + name string + txFunc func() *txs.Tx + err error + }{ + { + name: "valid", + txFunc: func() *txs.Tx { + return &txs.Tx{ + Unsigned: &txs.BaseTx{BaseTx: baseTx}, + Creds: creds, + } + }, + err: nil, + }, + { + name: "wrong networkID", + txFunc: func() *txs.Tx { + baseTx := baseTx + baseTx.NetworkID++ + return &txs.Tx{ + Unsigned: &txs.BaseTx{BaseTx: baseTx}, + Creds: creds, + } + }, + err: avax.ErrWrongNetworkID, + }, + { + name: "wrong chainID", + txFunc: func() *txs.Tx { + baseTx := baseTx + baseTx.BlockchainID = ids.GenerateTestID() + return &txs.Tx{ + Unsigned: &txs.BaseTx{BaseTx: baseTx}, + Creds: creds, + } + }, + err: avax.ErrWrongChainID, + }, + { + name: "memo too large", + txFunc: func() *txs.Tx { + baseTx := baseTx + baseTx.Memo = make([]byte, avax.MaxMemoSize+1) + return &txs.Tx{ + Unsigned: &txs.BaseTx{BaseTx: baseTx}, + Creds: creds, + } + }, + err: avax.ErrMemoTooLarge, + }, + { + name: "invalid output", + txFunc: func() *txs.Tx { + output := output + output.Out = &secp256k1fx.TransferOutput{ + Amt: 0, + OutputOwners: outputOwners, + } + + baseTx := baseTx + baseTx.Outs = []*avax.TransferableOutput{ + &output, + } + return &txs.Tx{ + Unsigned: &txs.BaseTx{BaseTx: baseTx}, + Creds: creds, + } + }, + err: secp256k1fx.ErrNoValueOutput, + }, + { + name: "unsorted outputs", + txFunc: func() *txs.Tx { + output0 := output + output0.Out = &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: outputOwners, + } + + output1 := output + output1.Out = &secp256k1fx.TransferOutput{ + Amt: 2, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output0, + &output1, + } + avax.SortTransferableOutputs(outputs, codec) + outputs[0], outputs[1] = outputs[1], outputs[0] + + baseTx := baseTx + baseTx.Outs = outputs + return &txs.Tx{ + Unsigned: &txs.BaseTx{BaseTx: baseTx}, + Creds: creds, + } + }, + err: avax.ErrOutputsNotSorted, + }, + { + name: "invalid input", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 0, + Input: inputSigners, + } + + baseTx := baseTx + baseTx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &txs.BaseTx{BaseTx: baseTx}, + Creds: creds, + } + }, + err: secp256k1fx.ErrNoValueInput, + }, + { + name: "duplicate inputs", + txFunc: func() *txs.Tx { + baseTx := baseTx + baseTx.Ins = []*avax.TransferableInput{ + &input, + &input, + } + return &txs.Tx{ + Unsigned: &txs.BaseTx{BaseTx: baseTx}, + Creds: []*fxs.FxCredential{ + &cred, + &cred, + }, + } + }, + err: avax.ErrInputsNotSortedUnique, + }, + { + name: "input overflow", + txFunc: func() *txs.Tx { + input0 := input + input0.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } + + input1 := input + input1.UTXOID.OutputIndex++ + input1.In = &secp256k1fx.TransferInput{ + Amt: stdmath.MaxUint64, + Input: inputSigners, + } + + baseTx := baseTx + baseTx.Ins = []*avax.TransferableInput{ + &input0, + &input1, + } + avax.SortTransferableInputsWithSigners(baseTx.Ins, make([][]*secp256k1.PrivateKey, 2)) + return &txs.Tx{ + Unsigned: &txs.BaseTx{BaseTx: baseTx}, + Creds: []*fxs.FxCredential{ + &cred, + &cred, + }, + } + }, + err: math.ErrOverflow, + }, + { + name: "output overflow", + txFunc: func() *txs.Tx { + output0 := output + output0.Out = &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: outputOwners, + } + + output1 := output + output1.Out = &secp256k1fx.TransferOutput{ + Amt: stdmath.MaxUint64, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output0, + &output1, + } + avax.SortTransferableOutputs(outputs, codec) + + baseTx := baseTx + baseTx.Outs = outputs + return &txs.Tx{ + Unsigned: &txs.BaseTx{BaseTx: baseTx}, + Creds: creds, + } + }, + err: math.ErrOverflow, + }, + { + name: "insufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } + + baseTx := baseTx + baseTx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &txs.BaseTx{BaseTx: baseTx}, + Creds: creds, + } + }, + err: avax.ErrInsufficientFunds, + }, + { + name: "invalid credential", + txFunc: func() *txs.Tx { + return &txs.Tx{ + Unsigned: &txs.BaseTx{BaseTx: baseTx}, + Creds: []*fxs.FxCredential{{ + Verifiable: (*secp256k1fx.Credential)(nil), + }}, + } + }, + err: secp256k1fx.ErrNilCredential, + }, + { + name: "wrong number of credentials", + txFunc: func() *txs.Tx { + return &txs.Tx{ + Unsigned: &txs.BaseTx{BaseTx: baseTx}, + } + }, + err: errWrongNumberOfCredentials, + }, + { + name: "barely sufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: fxOutput.Amt + feeConfig.TxFee, + Input: inputSigners, + } + + baseTx := baseTx + baseTx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &txs.BaseTx{BaseTx: baseTx}, + Creds: creds, + } + }, + err: nil, + }, + { + name: "barely insufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: fxOutput.Amt + feeConfig.TxFee - 1, + Input: inputSigners, + } + + baseTx := baseTx + baseTx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &txs.BaseTx{BaseTx: baseTx}, + Creds: creds, + } + }, + err: avax.ErrInsufficientFunds, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + tx := test.txFunc() + verifier := &SyntacticVerifier{ + Backend: backend, + Tx: tx, + } + err := tx.Unsigned.Visit(verifier) + require.ErrorIs(err, test.err) + }) + } +} + +func TestSyntacticVerifierCreateAssetTx(t *testing.T) { + ctx := newContext(t) + + fx := &secp256k1fx.Fx{} + parser, err := txs.NewParser([]fxs.Fx{ + fx, + }) + require.NoError(t, err) + + feeAssetID := ids.GenerateTestID() + asset := avax.Asset{ + ID: feeAssetID, + } + outputOwners := secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + } + fxOutput := secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: outputOwners, + } + output := avax.TransferableOutput{ + Asset: asset, + Out: &fxOutput, + } + inputTxID := ids.GenerateTestID() + utxoID := avax.UTXOID{ + TxID: inputTxID, + OutputIndex: 0, + } + inputSigners := secp256k1fx.Input{ + SigIndices: []uint32{2}, + } + fxInput := secp256k1fx.TransferInput{ + Amt: 54321, + Input: inputSigners, + } + input := avax.TransferableInput{ + UTXOID: utxoID, + Asset: asset, + In: &fxInput, + } + baseTx := avax.BaseTx{ + NetworkID: constants.UnitTestID, + BlockchainID: ctx.ChainID, + Outs: []*avax.TransferableOutput{ + &output, + }, + Ins: []*avax.TransferableInput{ + &input, + }, + } + initialState := txs.InitialState{ + FxIndex: 0, + Outs: []verify.State{ + &fxOutput, + }, + } + tx := txs.CreateAssetTx{ + BaseTx: txs.BaseTx{BaseTx: baseTx}, + Name: "NormalName", + Symbol: "TICK", + Denomination: byte(2), + States: []*txs.InitialState{ + &initialState, + }, + } + cred := fxs.FxCredential{ + Verifiable: &secp256k1fx.Credential{}, + } + creds := []*fxs.FxCredential{ + &cred, + } + + codec := parser.Codec() + backend := &Backend{ + Ctx: ctx, + Config: &feeConfig, + Fxs: []*fxs.ParsedFx{ + { + ID: secp256k1fx.ID, + Fx: fx, + }, + }, + Codec: codec, + FeeAssetID: feeAssetID, + } + + tests := []struct { + name string + txFunc func() *txs.Tx + err error + }{ + { + name: "valid", + txFunc: func() *txs.Tx { + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: nil, + }, + { + name: "name too short", + txFunc: func() *txs.Tx { + tx := tx + tx.Name = "" + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: errNameTooShort, + }, + { + name: "name too long", + txFunc: func() *txs.Tx { + tx := tx + tx.Name = strings.Repeat("X", maxNameLen+1) + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: errNameTooLong, + }, + { + name: "symbol too short", + txFunc: func() *txs.Tx { + tx := tx + tx.Symbol = "" + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: errSymbolTooShort, + }, + { + name: "symbol too long", + txFunc: func() *txs.Tx { + tx := tx + tx.Symbol = strings.Repeat("X", maxSymbolLen+1) + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: errSymbolTooLong, + }, + { + name: "no feature extensions", + txFunc: func() *txs.Tx { + tx := tx + tx.States = nil + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: errNoFxs, + }, + { + name: "denomination too large", + txFunc: func() *txs.Tx { + tx := tx + tx.Denomination = maxDenomination + 1 + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: errDenominationTooLarge, + }, + { + name: "bounding whitespace in name", + txFunc: func() *txs.Tx { + tx := tx + tx.Name = " AVAX" + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: errUnexpectedWhitespace, + }, + { + name: "illegal character in name", + txFunc: func() *txs.Tx { + tx := tx + tx.Name = "h8*32" + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: errIllegalNameCharacter, + }, + { + name: "illegal character in ticker", + txFunc: func() *txs.Tx { + tx := tx + tx.Symbol = "H I" + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: errIllegalSymbolCharacter, + }, + { + name: "wrong networkID", + txFunc: func() *txs.Tx { + tx := tx + tx.NetworkID++ + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrWrongNetworkID, + }, + { + name: "wrong chainID", + txFunc: func() *txs.Tx { + tx := tx + tx.BlockchainID = ids.GenerateTestID() + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrWrongChainID, + }, + { + name: "memo too large", + txFunc: func() *txs.Tx { + tx := tx + tx.Memo = make([]byte, avax.MaxMemoSize+1) + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrMemoTooLarge, + }, + { + name: "invalid output", + txFunc: func() *txs.Tx { + output := output + output.Out = &secp256k1fx.TransferOutput{ + Amt: 0, + OutputOwners: outputOwners, + } + + tx := tx + tx.Outs = []*avax.TransferableOutput{ + &output, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: secp256k1fx.ErrNoValueOutput, + }, + { + name: "unsorted outputs", + txFunc: func() *txs.Tx { + output0 := output + output0.Out = &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: outputOwners, + } + + output1 := output + output1.Out = &secp256k1fx.TransferOutput{ + Amt: 2, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output0, + &output1, + } + avax.SortTransferableOutputs(outputs, codec) + outputs[0], outputs[1] = outputs[1], outputs[0] + + tx := tx + tx.Outs = outputs + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrOutputsNotSorted, + }, + { + name: "invalid input", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 0, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: secp256k1fx.ErrNoValueInput, + }, + { + name: "duplicate inputs", + txFunc: func() *txs.Tx { + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: []*fxs.FxCredential{ + &cred, + &cred, + }, + } + }, + err: avax.ErrInputsNotSortedUnique, + }, + { + name: "input overflow", + txFunc: func() *txs.Tx { + input0 := input + input0.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } + + input1 := input + input1.UTXOID.OutputIndex++ + input1.In = &secp256k1fx.TransferInput{ + Amt: stdmath.MaxUint64, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input0, + &input1, + } + avax.SortTransferableInputsWithSigners(baseTx.Ins, make([][]*secp256k1.PrivateKey, 2)) + return &txs.Tx{ + Unsigned: &tx, + Creds: []*fxs.FxCredential{ + &cred, + &cred, + }, + } + }, + err: math.ErrOverflow, + }, + { + name: "output overflow", + txFunc: func() *txs.Tx { + output0 := output + output0.Out = &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: outputOwners, + } + + output1 := output + output1.Out = &secp256k1fx.TransferOutput{ + Amt: stdmath.MaxUint64, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output0, + &output1, + } + avax.SortTransferableOutputs(outputs, codec) + + tx := tx + tx.Outs = outputs + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: math.ErrOverflow, + }, + { + name: "insufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrInsufficientFunds, + }, + { + name: "invalid nil state", + txFunc: func() *txs.Tx { + tx := tx + tx.States = []*txs.InitialState{ + nil, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: txs.ErrNilInitialState, + }, + { + name: "invalid fx", + txFunc: func() *txs.Tx { + initialState := initialState + initialState.FxIndex = 1 + + tx := tx + tx.States = []*txs.InitialState{ + &initialState, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: txs.ErrUnknownFx, + }, + { + name: "invalid nil state output", + txFunc: func() *txs.Tx { + initialState := initialState + initialState.Outs = []verify.State{ + nil, + } + + tx := tx + tx.States = []*txs.InitialState{ + &initialState, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: txs.ErrNilFxOutput, + }, + { + name: "invalid state output", + txFunc: func() *txs.Tx { + fxOutput := fxOutput + fxOutput.Amt = 0 + + initialState := initialState + initialState.Outs = []verify.State{ + &fxOutput, + } + + tx := tx + tx.States = []*txs.InitialState{ + &initialState, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: secp256k1fx.ErrNoValueOutput, + }, + { + name: "unsorted initial state", + txFunc: func() *txs.Tx { + fxOutput0 := fxOutput + + fxOutput1 := fxOutput + fxOutput1.Amt++ + + initialState := initialState + initialState.Outs = []verify.State{ + &fxOutput0, + &fxOutput1, + } + initialState.Sort(codec) + initialState.Outs[0], initialState.Outs[1] = initialState.Outs[1], initialState.Outs[0] + + tx := tx + tx.States = []*txs.InitialState{ + &initialState, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: txs.ErrOutputsNotSorted, + }, + { + name: "non-unique initial states", + txFunc: func() *txs.Tx { + tx := tx + tx.States = []*txs.InitialState{ + &initialState, + &initialState, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: errInitialStatesNotSortedUnique, + }, + { + name: "invalid credential", + txFunc: func() *txs.Tx { + return &txs.Tx{ + Unsigned: &tx, + Creds: []*fxs.FxCredential{{ + Verifiable: (*secp256k1fx.Credential)(nil), + }}, + } + }, + err: secp256k1fx.ErrNilCredential, + }, + { + name: "wrong number of credentials", + txFunc: func() *txs.Tx { + return &txs.Tx{ + Unsigned: &tx, + } + }, + err: errWrongNumberOfCredentials, + }, + { + name: "barely sufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: fxOutput.Amt + feeConfig.CreateAssetTxFee, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: nil, + }, + { + name: "barely insufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: fxOutput.Amt + feeConfig.CreateAssetTxFee - 1, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrInsufficientFunds, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + tx := test.txFunc() + verifier := &SyntacticVerifier{ + Backend: backend, + Tx: tx, + } + err := tx.Unsigned.Visit(verifier) + require.ErrorIs(err, test.err) + }) + } +} + +func TestSyntacticVerifierOperationTx(t *testing.T) { + ctx := newContext(t) + + fx := &secp256k1fx.Fx{} + parser, err := txs.NewParser([]fxs.Fx{ + fx, + }) + require.NoError(t, err) + + feeAssetID := ids.GenerateTestID() + asset := avax.Asset{ + ID: feeAssetID, + } + outputOwners := secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + } + fxOutput := secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: outputOwners, + } + output := avax.TransferableOutput{ + Asset: asset, + Out: &fxOutput, + } + inputTxID := ids.GenerateTestID() + utxoID := avax.UTXOID{ + TxID: inputTxID, + OutputIndex: 0, + } + inputSigners := secp256k1fx.Input{ + SigIndices: []uint32{2}, + } + fxInput := secp256k1fx.TransferInput{ + Amt: 54321, + Input: inputSigners, + } + input := avax.TransferableInput{ + UTXOID: utxoID, + Asset: asset, + In: &fxInput, + } + baseTx := avax.BaseTx{ + NetworkID: constants.UnitTestID, + BlockchainID: ctx.ChainID, + Ins: []*avax.TransferableInput{ + &input, + }, + Outs: []*avax.TransferableOutput{ + &output, + }, + } + opUTXOID := utxoID + opUTXOID.OutputIndex++ + fxOp := secp256k1fx.MintOperation{ + MintInput: inputSigners, + MintOutput: secp256k1fx.MintOutput{ + OutputOwners: outputOwners, + }, + TransferOutput: fxOutput, + } + op := txs.Operation{ + Asset: asset, + UTXOIDs: []*avax.UTXOID{ + &opUTXOID, + }, + Op: &fxOp, + } + tx := txs.OperationTx{ + BaseTx: txs.BaseTx{BaseTx: baseTx}, + Ops: []*txs.Operation{ + &op, + }, + } + cred := fxs.FxCredential{ + Verifiable: &secp256k1fx.Credential{}, + } + creds := []*fxs.FxCredential{ + &cred, + &cred, + } + + codec := parser.Codec() + backend := &Backend{ + Ctx: ctx, + Config: &feeConfig, + Fxs: []*fxs.ParsedFx{ + { + ID: secp256k1fx.ID, + Fx: fx, + }, + }, + Codec: codec, + FeeAssetID: feeAssetID, + } + + tests := []struct { + name string + txFunc func() *txs.Tx + err error + }{ + { + name: "valid", + txFunc: func() *txs.Tx { + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: nil, + }, + { + name: "no operation", + txFunc: func() *txs.Tx { + tx := tx + tx.Ops = nil + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: errNoOperations, + }, + { + name: "wrong networkID", + txFunc: func() *txs.Tx { + tx := tx + tx.NetworkID++ + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrWrongNetworkID, + }, + { + name: "wrong chainID", + txFunc: func() *txs.Tx { + tx := tx + tx.BlockchainID = ids.GenerateTestID() + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrWrongChainID, + }, + { + name: "memo too large", + txFunc: func() *txs.Tx { + tx := tx + tx.Memo = make([]byte, avax.MaxMemoSize+1) + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrMemoTooLarge, + }, + { + name: "invalid output", + txFunc: func() *txs.Tx { + output := output + output.Out = &secp256k1fx.TransferOutput{ + Amt: 0, + OutputOwners: outputOwners, + } + + tx := tx + tx.Outs = []*avax.TransferableOutput{ + &output, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: secp256k1fx.ErrNoValueOutput, + }, + { + name: "unsorted outputs", + txFunc: func() *txs.Tx { + output0 := output + output0.Out = &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: outputOwners, + } + + output1 := output + output1.Out = &secp256k1fx.TransferOutput{ + Amt: 2, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output0, + &output1, + } + avax.SortTransferableOutputs(outputs, codec) + outputs[0], outputs[1] = outputs[1], outputs[0] + + tx := tx + tx.Outs = outputs + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrOutputsNotSorted, + }, + { + name: "invalid input", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 0, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: secp256k1fx.ErrNoValueInput, + }, + { + name: "duplicate inputs", + txFunc: func() *txs.Tx { + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: []*fxs.FxCredential{ + &cred, + &cred, + }, + } + }, + err: avax.ErrInputsNotSortedUnique, + }, + { + name: "input overflow", + txFunc: func() *txs.Tx { + input0 := input + input0.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } + + input1 := input + input1.UTXOID.OutputIndex++ + input1.In = &secp256k1fx.TransferInput{ + Amt: stdmath.MaxUint64, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input0, + &input1, + } + avax.SortTransferableInputsWithSigners(tx.Ins, make([][]*secp256k1.PrivateKey, 2)) + return &txs.Tx{ + Unsigned: &tx, + Creds: []*fxs.FxCredential{ + &cred, + &cred, + }, + } + }, + err: math.ErrOverflow, + }, + { + name: "output overflow", + txFunc: func() *txs.Tx { + output := output + output.Out = &secp256k1fx.TransferOutput{ + Amt: stdmath.MaxUint64, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output, + } + avax.SortTransferableOutputs(outputs, codec) + + tx := tx + tx.Outs = outputs + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: math.ErrOverflow, + }, + { + name: "insufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrInsufficientFunds, + }, + { + name: "invalid nil op", + txFunc: func() *txs.Tx { + tx := tx + tx.Ops = []*txs.Operation{ + nil, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: txs.ErrNilOperation, + }, + { + name: "invalid nil fx op", + txFunc: func() *txs.Tx { + op := op + op.Op = nil + + tx := tx + tx.Ops = []*txs.Operation{ + &op, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: txs.ErrNilFxOperation, + }, + { + name: "invalid duplicated op UTXOs", + txFunc: func() *txs.Tx { + op := op + op.UTXOIDs = []*avax.UTXOID{ + &opUTXOID, + &opUTXOID, + } + + tx := tx + tx.Ops = []*txs.Operation{ + &op, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: txs.ErrNotSortedAndUniqueUTXOIDs, + }, + { + name: "invalid duplicated UTXOs across ops", + txFunc: func() *txs.Tx { + newOp := op + op.Asset.ID = ids.GenerateTestID() + + tx := tx + tx.Ops = []*txs.Operation{ + &op, + &newOp, + } + txs.SortOperations(tx.Ops, codec) + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: errDoubleSpend, + }, + { + name: "invalid duplicated op", + txFunc: func() *txs.Tx { + op := op + op.UTXOIDs = nil + + tx := tx + tx.Ops = []*txs.Operation{ + &op, + &op, + } + txs.SortOperations(tx.Ops, codec) + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: errOperationsNotSortedUnique, + }, + { + name: "invalid credential", + txFunc: func() *txs.Tx { + return &txs.Tx{ + Unsigned: &tx, + Creds: []*fxs.FxCredential{{ + Verifiable: (*secp256k1fx.Credential)(nil), + }}, + } + }, + err: secp256k1fx.ErrNilCredential, + }, + { + name: "wrong number of credentials", + txFunc: func() *txs.Tx { + return &txs.Tx{ + Unsigned: &tx, + } + }, + err: errWrongNumberOfCredentials, + }, + { + name: "barely sufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: fxOutput.Amt + feeConfig.TxFee, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: nil, + }, + { + name: "barely insufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: fxOutput.Amt + feeConfig.TxFee - 1, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrInsufficientFunds, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + tx := test.txFunc() + verifier := &SyntacticVerifier{ + Backend: backend, + Tx: tx, + } + err := tx.Unsigned.Visit(verifier) + require.ErrorIs(err, test.err) + }) + } +} + +func TestSyntacticVerifierImportTx(t *testing.T) { + ctx := newContext(t) + + fx := &secp256k1fx.Fx{} + parser, err := txs.NewParser([]fxs.Fx{ + fx, + }) + require.NoError(t, err) + + feeAssetID := ids.GenerateTestID() + asset := avax.Asset{ + ID: feeAssetID, + } + outputOwners := secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + } + fxOutput := secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: outputOwners, + } + output := avax.TransferableOutput{ + Asset: asset, + Out: &fxOutput, + } + inputTxID := ids.GenerateTestID() + utxoID := avax.UTXOID{ + TxID: inputTxID, + OutputIndex: 0, + } + inputSigners := secp256k1fx.Input{ + SigIndices: []uint32{2}, + } + fxInput := secp256k1fx.TransferInput{ + Amt: 54321, + Input: inputSigners, + } + input := avax.TransferableInput{ + UTXOID: utxoID, + Asset: asset, + In: &fxInput, + } + baseTx := avax.BaseTx{ + NetworkID: constants.UnitTestID, + BlockchainID: ctx.ChainID, + Outs: []*avax.TransferableOutput{ + &output, + }, + } + tx := txs.ImportTx{ + BaseTx: txs.BaseTx{BaseTx: baseTx}, + SourceChain: ctx.CChainID, + ImportedIns: []*avax.TransferableInput{ + &input, + }, + } + cred := fxs.FxCredential{ + Verifiable: &secp256k1fx.Credential{}, + } + creds := []*fxs.FxCredential{ + &cred, + } + + codec := parser.Codec() + backend := &Backend{ + Ctx: ctx, + Config: &feeConfig, + Fxs: []*fxs.ParsedFx{ + { + ID: secp256k1fx.ID, + Fx: fx, + }, + }, + Codec: codec, + FeeAssetID: feeAssetID, + } + + tests := []struct { + name string + txFunc func() *txs.Tx + err error + }{ + { + name: "valid", + txFunc: func() *txs.Tx { + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: nil, + }, + { + name: "no imported inputs", + txFunc: func() *txs.Tx { + tx := tx + tx.ImportedIns = nil + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: errNoImportInputs, + }, + { + name: "wrong networkID", + txFunc: func() *txs.Tx { + tx := tx + tx.NetworkID++ + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrWrongNetworkID, + }, + { + name: "wrong chainID", + txFunc: func() *txs.Tx { + tx := tx + tx.BlockchainID = ids.GenerateTestID() + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrWrongChainID, + }, + { + name: "memo too large", + txFunc: func() *txs.Tx { + tx := tx + tx.Memo = make([]byte, avax.MaxMemoSize+1) + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrMemoTooLarge, + }, + { + name: "invalid output", + txFunc: func() *txs.Tx { + output := output + output.Out = &secp256k1fx.TransferOutput{ + Amt: 0, + OutputOwners: outputOwners, + } + + tx := tx + tx.Outs = []*avax.TransferableOutput{ + &output, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: secp256k1fx.ErrNoValueOutput, + }, + { + name: "unsorted outputs", + txFunc: func() *txs.Tx { + output0 := output + output0.Out = &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: outputOwners, + } + + output1 := output + output1.Out = &secp256k1fx.TransferOutput{ + Amt: 2, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output0, + &output1, + } + avax.SortTransferableOutputs(outputs, codec) + outputs[0], outputs[1] = outputs[1], outputs[0] + + tx := tx + tx.Outs = outputs + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrOutputsNotSorted, + }, + { + name: "invalid input", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 0, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: secp256k1fx.ErrNoValueInput, + }, + { + name: "duplicate inputs", + txFunc: func() *txs.Tx { + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: []*fxs.FxCredential{ + &cred, + &cred, + &cred, + }, + } + }, + err: avax.ErrInputsNotSortedUnique, + }, + { + name: "duplicate imported inputs", + txFunc: func() *txs.Tx { + tx := tx + tx.ImportedIns = []*avax.TransferableInput{ + &input, + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: []*fxs.FxCredential{ + &cred, + &cred, + }, + } + }, + err: avax.ErrInputsNotSortedUnique, + }, + { + name: "input overflow", + txFunc: func() *txs.Tx { + input0 := input + input0.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } + + input1 := input + input1.UTXOID.OutputIndex++ + input1.In = &secp256k1fx.TransferInput{ + Amt: stdmath.MaxUint64, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input0, + &input1, + } + avax.SortTransferableInputsWithSigners(tx.Ins, make([][]*secp256k1.PrivateKey, 2)) + return &txs.Tx{ + Unsigned: &tx, + Creds: []*fxs.FxCredential{ + &cred, + &cred, + }, + } + }, + err: math.ErrOverflow, + }, + { + name: "output overflow", + txFunc: func() *txs.Tx { + output := output + output.Out = &secp256k1fx.TransferOutput{ + Amt: stdmath.MaxUint64, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output, + } + avax.SortTransferableOutputs(outputs, codec) + + tx := tx + tx.Outs = outputs + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: math.ErrOverflow, + }, + { + name: "insufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } + + tx := tx + tx.ImportedIns = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrInsufficientFunds, + }, + { + name: "invalid credential", + txFunc: func() *txs.Tx { + return &txs.Tx{ + Unsigned: &tx, + Creds: []*fxs.FxCredential{{ + Verifiable: (*secp256k1fx.Credential)(nil), + }}, + } + }, + err: secp256k1fx.ErrNilCredential, + }, + { + name: "wrong number of credentials", + txFunc: func() *txs.Tx { + return &txs.Tx{ + Unsigned: &tx, + } + }, + err: errWrongNumberOfCredentials, + }, + { + name: "barely sufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: fxOutput.Amt + feeConfig.TxFee, + Input: inputSigners, + } + + tx := tx + tx.ImportedIns = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: nil, + }, + { + name: "barely insufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: fxOutput.Amt + feeConfig.TxFee - 1, + Input: inputSigners, + } + + tx := tx + tx.ImportedIns = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrInsufficientFunds, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + tx := test.txFunc() + verifier := &SyntacticVerifier{ + Backend: backend, + Tx: tx, + } + err := tx.Unsigned.Visit(verifier) + require.ErrorIs(err, test.err) + }) + } +} + +func TestSyntacticVerifierExportTx(t *testing.T) { + ctx := newContext(t) + + fx := &secp256k1fx.Fx{} + parser, err := txs.NewParser([]fxs.Fx{ + fx, + }) + require.NoError(t, err) + + feeAssetID := ids.GenerateTestID() + asset := avax.Asset{ + ID: feeAssetID, + } + outputOwners := secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + } + fxOutput := secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: outputOwners, + } + output := avax.TransferableOutput{ + Asset: asset, + Out: &fxOutput, + } + inputTxID := ids.GenerateTestID() + utxoID := avax.UTXOID{ + TxID: inputTxID, + OutputIndex: 0, + } + inputSigners := secp256k1fx.Input{ + SigIndices: []uint32{2}, + } + fxInput := secp256k1fx.TransferInput{ + Amt: 54321, + Input: inputSigners, + } + input := avax.TransferableInput{ + UTXOID: utxoID, + Asset: asset, + In: &fxInput, + } + baseTx := avax.BaseTx{ + NetworkID: constants.UnitTestID, + BlockchainID: ctx.ChainID, + Ins: []*avax.TransferableInput{ + &input, + }, + } + tx := txs.ExportTx{ + BaseTx: txs.BaseTx{BaseTx: baseTx}, + DestinationChain: ctx.CChainID, + ExportedOuts: []*avax.TransferableOutput{ + &output, + }, + } + cred := fxs.FxCredential{ + Verifiable: &secp256k1fx.Credential{}, + } + creds := []*fxs.FxCredential{ + &cred, + } + + codec := parser.Codec() + backend := &Backend{ + Ctx: ctx, + Config: &feeConfig, + Fxs: []*fxs.ParsedFx{ + { + ID: secp256k1fx.ID, + Fx: fx, + }, + }, + Codec: codec, + FeeAssetID: feeAssetID, + } + + tests := []struct { + name string + txFunc func() *txs.Tx + err error + }{ + { + name: "valid", + txFunc: func() *txs.Tx { + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: nil, + }, + { + name: "no exported outputs", + txFunc: func() *txs.Tx { + tx := tx + tx.ExportedOuts = nil + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: errNoExportOutputs, + }, + { + name: "wrong networkID", + txFunc: func() *txs.Tx { + tx := tx + tx.NetworkID++ + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrWrongNetworkID, + }, + { + name: "wrong chainID", + txFunc: func() *txs.Tx { + tx := tx + tx.BlockchainID = ids.GenerateTestID() + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrWrongChainID, + }, + { + name: "memo too large", + txFunc: func() *txs.Tx { + tx := tx + tx.Memo = make([]byte, avax.MaxMemoSize+1) + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrMemoTooLarge, + }, + { + name: "invalid output", + txFunc: func() *txs.Tx { + output := output + output.Out = &secp256k1fx.TransferOutput{ + Amt: 0, + OutputOwners: outputOwners, + } + + tx := tx + tx.Outs = []*avax.TransferableOutput{ + &output, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: secp256k1fx.ErrNoValueOutput, + }, + { + name: "unsorted outputs", + txFunc: func() *txs.Tx { + output0 := output + output0.Out = &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: outputOwners, + } + + output1 := output + output1.Out = &secp256k1fx.TransferOutput{ + Amt: 2, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output0, + &output1, + } + avax.SortTransferableOutputs(outputs, codec) + outputs[0], outputs[1] = outputs[1], outputs[0] + + tx := tx + tx.Outs = outputs + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrOutputsNotSorted, + }, + { + name: "unsorted exported outputs", + txFunc: func() *txs.Tx { + output0 := output + output0.Out = &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: outputOwners, + } + + output1 := output + output1.Out = &secp256k1fx.TransferOutput{ + Amt: 2, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output0, + &output1, + } + avax.SortTransferableOutputs(outputs, codec) + outputs[0], outputs[1] = outputs[1], outputs[0] + + tx := tx + tx.ExportedOuts = outputs + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrOutputsNotSorted, + }, + { + name: "invalid input", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 0, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: secp256k1fx.ErrNoValueInput, + }, + { + name: "duplicate inputs", + txFunc: func() *txs.Tx { + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: []*fxs.FxCredential{ + &cred, + &cred, + }, + } + }, + err: avax.ErrInputsNotSortedUnique, + }, + { + name: "input overflow", + txFunc: func() *txs.Tx { + input0 := input + input0.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } + + input1 := input + input1.UTXOID.OutputIndex++ + input1.In = &secp256k1fx.TransferInput{ + Amt: stdmath.MaxUint64, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input0, + &input1, + } + avax.SortTransferableInputsWithSigners(tx.Ins, make([][]*secp256k1.PrivateKey, 2)) + return &txs.Tx{ + Unsigned: &tx, + Creds: []*fxs.FxCredential{ + &cred, + &cred, + }, + } + }, + err: math.ErrOverflow, + }, + { + name: "output overflow", + txFunc: func() *txs.Tx { + output := output + output.Out = &secp256k1fx.TransferOutput{ + Amt: stdmath.MaxUint64, + OutputOwners: outputOwners, + } + + outputs := []*avax.TransferableOutput{ + &output, + } + avax.SortTransferableOutputs(outputs, codec) + + tx := tx + tx.Outs = outputs + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: math.ErrOverflow, + }, + { + name: "insufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: 1, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrInsufficientFunds, + }, + { + name: "invalid credential", + txFunc: func() *txs.Tx { + return &txs.Tx{ + Unsigned: &tx, + Creds: []*fxs.FxCredential{{ + Verifiable: (*secp256k1fx.Credential)(nil), + }}, + } + }, + err: secp256k1fx.ErrNilCredential, + }, + { + name: "wrong number of credentials", + txFunc: func() *txs.Tx { + return &txs.Tx{ + Unsigned: &tx, + } + }, + err: errWrongNumberOfCredentials, + }, + { + name: "barely sufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: fxOutput.Amt + feeConfig.TxFee, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: nil, + }, + { + name: "barely insufficient funds", + txFunc: func() *txs.Tx { + input := input + input.In = &secp256k1fx.TransferInput{ + Amt: fxOutput.Amt + feeConfig.TxFee - 1, + Input: inputSigners, + } + + tx := tx + tx.Ins = []*avax.TransferableInput{ + &input, + } + return &txs.Tx{ + Unsigned: &tx, + Creds: creds, + } + }, + err: avax.ErrInsufficientFunds, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + tx := test.txFunc() + verifier := &SyntacticVerifier{ + Backend: backend, + Tx: tx, + } + err := tx.Unsigned.Visit(verifier) + require.ErrorIs(err, test.err) + }) + } +} diff --git a/avalanchego/vms/avm/txs/export_tx.go b/avalanchego/vms/avm/txs/export_tx.go index 5d9014fe..aec13141 100644 --- a/avalanchego/vms/avm/txs/export_tx.go +++ b/avalanchego/vms/avm/txs/export_tx.go @@ -1,12 +1,9 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( - "errors" - - "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -14,10 +11,8 @@ import ( ) var ( - errNoExportOutputs = errors.New("no export outputs") - - _ UnsignedTx = &ExportTx{} - _ secp256k1fx.UnsignedTx = &ExportTx{} + _ UnsignedTx = (*ExportTx)(nil) + _ secp256k1fx.UnsignedTx = (*ExportTx)(nil) ) // ExportTx is a transaction that exports an asset to another blockchain. @@ -38,41 +33,6 @@ func (t *ExportTx) InitCtx(ctx *snow.Context) { t.BaseTx.InitCtx(ctx) } -func (t *ExportTx) SyntacticVerify( - ctx *snow.Context, - c codec.Manager, - txFeeAssetID ids.ID, - txFee uint64, - _ uint64, - _ int, -) error { - switch { - case t == nil: - return errNilTx - case len(t.ExportedOuts) == 0: - return errNoExportOutputs - } - - // We don't call [t.BaseTx.SyntacticVerify] because the flow check performed - // here is more strict than the flow check performed in the [BaseTx]. - // Therefore, we avoid performing a useless flow check by performing the - // other verifications here. - if err := t.BaseTx.BaseTx.Verify(ctx); err != nil { - return err - } - - return avax.VerifyTx( - txFee, - txFeeAssetID, - [][]*avax.TransferableInput{t.Ins}, - [][]*avax.TransferableOutput{ - t.Outs, - t.ExportedOuts, - }, - c, - ) -} - func (t *ExportTx) Visit(v Visitor) error { return v.ExportTx(t) } diff --git a/avalanchego/vms/avm/txs/export_tx_test.go b/avalanchego/vms/avm/txs/export_tx_test.go index dd91b952..c02504e9 100644 --- a/avalanchego/vms/avm/txs/export_tx_test.go +++ b/avalanchego/vms/avm/txs/export_tx_test.go @@ -1,22 +1,24 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( - "bytes" "testing" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) func TestExportTxSerialization(t *testing.T) { + require := require.New(t) + expected := []byte{ // Codec version: 0x00, 0x00, @@ -106,15 +108,16 @@ func TestExportTxSerialization(t *testing.T) { }, }} - c := setupCodec() - if err := tx.SignSECP256K1Fx(c, nil); err != nil { - t.Fatal(err) - } - require.Equal(t, tx.ID().String(), "2PKJE4TrKYpgynBFCpNPpV3GHK7d9QTgrL5mpYG6abHKDvNBG3") + parser, err := NewParser([]fxs.Fx{ + &secp256k1fx.Fx{}, + }) + require.NoError(err) + + require.NoError(parser.InitializeTx(tx)) + require.Equal(tx.ID().String(), "2PKJE4TrKYpgynBFCpNPpV3GHK7d9QTgrL5mpYG6abHKDvNBG3") + result := tx.Bytes() - if !bytes.Equal(expected, result) { - t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) - } + require.Equal(expected, result) credBytes := []byte{ // type id @@ -165,630 +168,27 @@ func TestExportTxSerialization(t *testing.T) { 0x8f, 0xe0, 0x2a, 0xf3, 0xcc, 0x31, 0x32, 0xef, 0xfe, 0x7d, 0x3d, 0x9f, 0x14, 0x94, 0x01, } - if err := tx.SignSECP256K1Fx(c, [][]*crypto.PrivateKeySECP256K1R{{keys[0], keys[0]}, {keys[0], keys[0]}}); err != nil { - t.Fatal(err) - } - require.Equal(t, tx.ID().String(), "2oG52e7Cb7XF1yUzv3pRFndAypgbpswWRcSAKD5SH5VgaiTm5D") - result = tx.Bytes() + err = tx.SignSECP256K1Fx( + parser.Codec(), + [][]*secp256k1.PrivateKey{ + {keys[0], keys[0]}, + {keys[0], keys[0]}, + }, + ) + require.NoError(err) + require.Equal(tx.ID().String(), "2oG52e7Cb7XF1yUzv3pRFndAypgbpswWRcSAKD5SH5VgaiTm5D") // there are two credentials expected[len(expected)-1] = 0x02 expected = append(expected, credBytes...) - if !bytes.Equal(expected, result) { - t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) - } -} - -func TestExportTxSyntacticVerify(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &ExportTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }}, - }}, - DestinationChain: platformChainID, - ExportedOuts: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err != nil { - t.Fatal(err) - } -} - -func TestExportTxSyntacticVerifyNil(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := (*ExportTx)(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { - t.Fatalf("should have erred due to a nil ExportTx") - } -} - -func TestExportTxSyntacticVerifyWrongNetworkID(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &ExportTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID + 1, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }}, - }}, - DestinationChain: platformChainID, - ExportedOuts: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { - t.Fatalf("should have erred due to a wrong network ID") - } -} - -func TestExportTxSyntacticVerifyWrongBlockchainID(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &ExportTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: ids.ID{ - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, - 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - }, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }}, - }}, - DestinationChain: platformChainID, - ExportedOuts: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { - t.Fatalf("should have erred due to wrong blockchain ID") - } -} - -func TestExportTxSyntacticVerifyInvalidMemo(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &ExportTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }}, - Memo: make([]byte, avax.MaxMemoSize+1), - }}, - DestinationChain: platformChainID, - ExportedOuts: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { - t.Fatalf("should have erred due to memo field being too long") - } -} - -func TestExportTxSyntacticVerifyInvalidBaseOutput(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &ExportTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }}, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 10000, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{}, - }, - }, - }}, - }}, - DestinationChain: platformChainID, - ExportedOuts: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 2345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { - t.Fatalf("should have erred due to an invalid base output") - } -} - -func TestExportTxSyntacticVerifyUnsortedBaseOutputs(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &ExportTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }}, - Outs: []*avax.TransferableOutput{ - { - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 10000, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }, - { - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 1111, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }, - }, - }}, - DestinationChain: platformChainID, - ExportedOuts: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 1234, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { - t.Fatalf("should have erred due to unsorted base outputs") - } -} - -func TestExportTxSyntacticVerifyInvalidOutput(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &ExportTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }}, - }}, - DestinationChain: platformChainID, - ExportedOuts: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{}, - }, - }, - }}, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { - t.Fatalf("should have erred due to invalid output") - } -} - -func TestExportTxSyntacticVerifyUnsortedOutputs(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &ExportTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }}, - }}, - DestinationChain: platformChainID, - ExportedOuts: []*avax.TransferableOutput{ - { - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 10000, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }, - { - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 2345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }, - }, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { - t.Fatalf("should have erred due to unsorted outputs") - } -} - -func TestExportTxSyntacticVerifyInvalidInput(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &ExportTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{ - { - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }, - { - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 1, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 0, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }, - }, - }}, - DestinationChain: platformChainID, - ExportedOuts: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { - t.Fatalf("should have erred due to invalid input") - } -} - -func TestExportTxSyntacticVerifyUnsortedInputs(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &ExportTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{ - { - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 1, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 1, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }, - { - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }, - }, - }}, - DestinationChain: platformChainID, - ExportedOuts: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { - t.Fatalf("should have erred due to unsorted inputs") - } -} - -func TestExportTxSyntacticVerifyInvalidFlowCheck(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &ExportTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }}, - }}, - DestinationChain: platformChainID, - ExportedOuts: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 123450, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { - t.Fatalf("should have erred due to an invalid flow check") - } + result = tx.Bytes() + require.Equal(expected, result) } func TestExportTxNotState(t *testing.T) { + require := require.New(t) + intf := interface{}(&ExportTx{}) - if _, ok := intf.(verify.State); ok { - t.Fatalf("shouldn't be marked as state") - } + _, ok := intf.(verify.State) + require.False(ok, "should not be marked as state") } diff --git a/avalanchego/vms/avm/txs/import_tx.go b/avalanchego/vms/avm/txs/import_tx.go index efdc892b..5076aa1f 100644 --- a/avalanchego/vms/avm/txs/import_tx.go +++ b/avalanchego/vms/avm/txs/import_tx.go @@ -1,23 +1,18 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( - "errors" - - "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) var ( - errNoImportInputs = errors.New("no import inputs") - - _ UnsignedTx = &ImportTx{} - _ secp256k1fx.UnsignedTx = &ImportTx{} + _ UnsignedTx = (*ImportTx)(nil) + _ secp256k1fx.UnsignedTx = (*ImportTx)(nil) ) // ImportTx is a transaction that imports an asset from another blockchain. @@ -41,8 +36,16 @@ func (t *ImportTx) InputUTXOs() []*avax.UTXOID { return utxos } +func (t *ImportTx) InputIDs() set.Set[ids.ID] { + inputs := t.BaseTx.InputIDs() + for _, in := range t.ImportedIns { + inputs.Add(in.InputID()) + } + return inputs +} + // ConsumedAssetIDs returns the IDs of the assets this transaction consumes -func (t *ImportTx) ConsumedAssetIDs() ids.Set { +func (t *ImportTx) ConsumedAssetIDs() set.Set[ids.ID] { assets := t.BaseTx.AssetIDs() for _, in := range t.ImportedIns { assets.Add(in.AssetID()) @@ -51,7 +54,7 @@ func (t *ImportTx) ConsumedAssetIDs() ids.Set { } // AssetIDs returns the IDs of the assets this transaction depends on -func (t *ImportTx) AssetIDs() ids.Set { +func (t *ImportTx) AssetIDs() set.Set[ids.ID] { assets := t.BaseTx.AssetIDs() for _, in := range t.ImportedIns { assets.Add(in.AssetID()) @@ -64,40 +67,6 @@ func (t *ImportTx) NumCredentials() int { return t.BaseTx.NumCredentials() + len(t.ImportedIns) } -// SyntacticVerify that this import transaction is well-formed. -func (t *ImportTx) SyntacticVerify( - ctx *snow.Context, - c codec.Manager, - txFeeAssetID ids.ID, - txFee uint64, - _ uint64, - numFxs int, -) error { - switch { - case t == nil: - return errNilTx - case len(t.ImportedIns) == 0: - return errNoImportInputs - } - - // We don't call [t.BaseTx.SyntacticVerify] because the flow check performed - // here is less strict than the flow check performed in the [BaseTx]. - if err := t.BaseTx.BaseTx.Verify(ctx); err != nil { - return err - } - - return avax.VerifyTx( - txFee, - txFeeAssetID, - [][]*avax.TransferableInput{ - t.Ins, - t.ImportedIns, - }, - [][]*avax.TransferableOutput{t.Outs}, - c, - ) -} - func (t *ImportTx) Visit(v Visitor) error { return v.ImportTx(t) } diff --git a/avalanchego/vms/avm/txs/import_tx_test.go b/avalanchego/vms/avm/txs/import_tx_test.go index 19cea0a0..47c1eb01 100644 --- a/avalanchego/vms/avm/txs/import_tx_test.go +++ b/avalanchego/vms/avm/txs/import_tx_test.go @@ -1,22 +1,24 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( - "bytes" "testing" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) func TestImportTxSerialization(t *testing.T) { + require := require.New(t) + expected := []byte{ // Codec version 0x00, 0x00, @@ -106,15 +108,16 @@ func TestImportTxSerialization(t *testing.T) { }}, }} - c := setupCodec() - if err := tx.SignSECP256K1Fx(c, nil); err != nil { - t.Fatal(err) - } - require.Equal(t, tx.ID().String(), "9wdPb5rsThXYLX4WxkNeyYrNMfDE5cuWLgifSjxKiA2dCmgCZ") + parser, err := NewParser([]fxs.Fx{ + &secp256k1fx.Fx{}, + }) + require.NoError(err) + + require.NoError(parser.InitializeTx(tx)) + require.Equal(tx.ID().String(), "9wdPb5rsThXYLX4WxkNeyYrNMfDE5cuWLgifSjxKiA2dCmgCZ") + result := tx.Bytes() - if !bytes.Equal(expected, result) { - t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) - } + require.Equal(expected, result) credBytes := []byte{ // type id @@ -165,116 +168,27 @@ func TestImportTxSerialization(t *testing.T) { 0x1f, 0x49, 0x9b, 0x0a, 0x4f, 0xbf, 0x95, 0xfc, 0x31, 0x39, 0x46, 0x4e, 0xa1, 0xaf, 0x00, } - if err := tx.SignSECP256K1Fx(c, [][]*crypto.PrivateKeySECP256K1R{{keys[0], keys[0]}, {keys[0], keys[0]}}); err != nil { - t.Fatal(err) - } - require.Equal(t, tx.ID().String(), "pCW7sVBytzdZ1WrqzGY1DvA2S9UaMr72xpUMxVyx1QHBARNYx") - result = tx.Bytes() + err = tx.SignSECP256K1Fx( + parser.Codec(), + [][]*secp256k1.PrivateKey{ + {keys[0], keys[0]}, + {keys[0], keys[0]}, + }, + ) + require.NoError(err) + require.Equal(tx.ID().String(), "pCW7sVBytzdZ1WrqzGY1DvA2S9UaMr72xpUMxVyx1QHBARNYx") // there are two credentials expected[len(expected)-1] = 0x02 expected = append(expected, credBytes...) - if !bytes.Equal(expected, result) { - t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) - } -} - -func TestImportTxSyntacticVerify(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &ImportTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - }}, - SourceChain: platformChainID, - ImportedIns: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }}, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err != nil { - t.Fatal(err) - } -} - -func TestImportTxSyntacticVerifyInvalidMemo(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &ImportTx{ - BaseTx: BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Outs: []*avax.TransferableOutput{{ - Asset: avax.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, - }, - }}, - Memo: make([]byte, avax.MaxMemoSize+1), - }}, - SourceChain: platformChainID, - ImportedIns: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.ID{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, - }, - }}, - } - tx.Initialize(nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 0); err == nil { - t.Fatalf("should have erred due to memo field being too long") - } + result = tx.Bytes() + require.Equal(expected, result) } func TestImportTxNotState(t *testing.T) { + require := require.New(t) + intf := interface{}(&ImportTx{}) - if _, ok := intf.(verify.State); ok { - t.Fatalf("shouldn't be marked as state") - } + _, ok := intf.(verify.State) + require.False(ok, "should not be marked as state") } diff --git a/avalanchego/vms/avm/txs/initial_state.go b/avalanchego/vms/avm/txs/initial_state.go index 1794fc5d..ae20e6f1 100644 --- a/avalanchego/vms/avm/txs/initial_state.go +++ b/avalanchego/vms/avm/txs/initial_state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -16,10 +16,12 @@ import ( ) var ( - errNilInitialState = errors.New("nil initial state is not valid") - errNilFxOutput = errors.New("nil feature extension output is not valid") - errOutputsNotSorted = errors.New("outputs not sorted") - errUnknownFx = errors.New("unknown feature extension") + ErrNilInitialState = errors.New("nil initial state is not valid") + ErrNilFxOutput = errors.New("nil feature extension output is not valid") + ErrOutputsNotSorted = errors.New("outputs not sorted") + ErrUnknownFx = errors.New("unknown feature extension") + + _ utils.Sortable[*InitialState] = (*InitialState)(nil) ) type InitialState struct { @@ -37,27 +39,33 @@ func (is *InitialState) InitCtx(ctx *snow.Context) { func (is *InitialState) Verify(c codec.Manager, numFxs int) error { switch { case is == nil: - return errNilInitialState + return ErrNilInitialState case is.FxIndex >= uint32(numFxs): - return errUnknownFx + return ErrUnknownFx } for _, out := range is.Outs { if out == nil { - return errNilFxOutput + return ErrNilFxOutput } if err := out.Verify(); err != nil { return err } } if !isSortedState(is.Outs, c) { - return errOutputsNotSorted + return ErrOutputsNotSorted } return nil } -func (is *InitialState) Sort(c codec.Manager) { sortState(is.Outs, c) } +func (is *InitialState) Less(other *InitialState) bool { + return is.FxIndex < other.FxIndex +} + +func (is *InitialState) Sort(c codec.Manager) { + sortState(is.Outs, c) +} type innerSortState struct { vers []verify.State @@ -78,8 +86,15 @@ func (vers *innerSortState) Less(i, j int) bool { } return bytes.Compare(iBytes, jBytes) == -1 } -func (vers *innerSortState) Len() int { return len(vers.vers) } -func (vers *innerSortState) Swap(i, j int) { v := vers.vers; v[j], v[i] = v[i], v[j] } + +func (vers *innerSortState) Len() int { + return len(vers.vers) +} + +func (vers *innerSortState) Swap(i, j int) { + v := vers.vers + v[j], v[i] = v[i], v[j] +} func sortState(vers []verify.State, c codec.Manager) { sort.Sort(&innerSortState{vers: vers, codec: c}) @@ -88,14 +103,3 @@ func sortState(vers []verify.State, c codec.Manager) { func isSortedState(vers []verify.State, c codec.Manager) bool { return sort.IsSorted(&innerSortState{vers: vers, codec: c}) } - -type innerSortInitialState []*InitialState - -func (iss innerSortInitialState) Less(i, j int) bool { return iss[i].FxIndex < iss[j].FxIndex } -func (iss innerSortInitialState) Len() int { return len(iss) } -func (iss innerSortInitialState) Swap(i, j int) { iss[j], iss[i] = iss[i], iss[j] } - -func SortInitialStates(iss []*InitialState) { sort.Sort(innerSortInitialState(iss)) } -func IsSortedAndUniqueInitialStates(iss []*InitialState) bool { - return utils.IsSortedAndUnique(innerSortInitialState(iss)) -} diff --git a/avalanchego/vms/avm/txs/initial_state_test.go b/avalanchego/vms/avm/txs/initial_state_test.go index 142ae2c5..1d294870 100644 --- a/avalanchego/vms/avm/txs/initial_state_test.go +++ b/avalanchego/vms/avm/txs/initial_state_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -8,6 +8,8 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/ids" @@ -16,6 +18,8 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) +var errTest = errors.New("non-nil error") + func TestInitialStateVerifySerialization(t *testing.T) { c := linearcodec.NewDefault() if err := c.RegisterType(&secp256k1fx.TransferOutput{}); err != nil { @@ -142,7 +146,7 @@ func TestInitialStateVerifyInvalidOutput(t *testing.T) { is := InitialState{ FxIndex: 0, - Outs: []verify.State{&avax.TestVerifiable{Err: errors.New("")}}, + Outs: []verify.State{&avax.TestVerifiable{Err: errTest}}, } if err := is.Verify(m, numFxs); err == nil { t.Fatalf("Should have erred due to an invalid output") @@ -177,3 +181,15 @@ func TestInitialStateVerifyUnsortedOutputs(t *testing.T) { t.Fatal(err) } } + +func TestInitialStateLess(t *testing.T) { + require := require.New(t) + + var is1, is2 InitialState + require.False(is1.Less(&is2)) + require.False(is2.Less(&is1)) + + is1.FxIndex = 1 + require.False(is1.Less(&is2)) + require.True(is2.Less(&is1)) +} diff --git a/avalanchego/vms/avm/txs/mempool/mempool.go b/avalanchego/vms/avm/txs/mempool/mempool.go new file mode 100644 index 00000000..a04f1061 --- /dev/null +++ b/avalanchego/vms/avm/txs/mempool/mempool.go @@ -0,0 +1,219 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package mempool + +import ( + "errors" + "fmt" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/cache" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/linkedhashmap" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/avm/txs" +) + +const ( + // MaxTxSize is the maximum number of bytes a transaction can use to be + // allowed into the mempool. + MaxTxSize = 64 * units.KiB + + // droppedTxIDsCacheSize is the maximum number of dropped txIDs to cache + droppedTxIDsCacheSize = 64 + + initialConsumedUTXOsSize = 512 + + // maxMempoolSize is the maximum number of bytes allowed in the mempool + maxMempoolSize = 64 * units.MiB +) + +var ( + _ Mempool = &mempool{} + + errDuplicateTx = errors.New("duplicate tx") + errTxTooLarge = errors.New("tx too large") + errMempoolFull = errors.New("mempool is full") + errConflictsWithOtherTx = errors.New("tx conflicts with other tx") +) + +// Mempool contains transactions that have not yet been put into a block. +type Mempool interface { + Add(tx *txs.Tx) error + Has(txID ids.ID) bool + Get(txID ids.ID) *txs.Tx + Remove(txs []*txs.Tx) + + // Peek returns the next first tx that was added to the mempool whose size + // is less than or equal to maxTxSize. + Peek(maxTxSize int) *txs.Tx + + // RequestBuildBlock notifies the consensus engine that a block should be + // built if there is at least one transaction in the mempool. + RequestBuildBlock() + + // Note: Dropped txs are added to droppedTxIDs but not evicted from + // unissued. This allows previously dropped txs to be possibly reissued. + MarkDropped(txID ids.ID, reason error) + GetDropReason(txID ids.ID) error +} + +type mempool struct { + bytesAvailableMetric prometheus.Gauge + bytesAvailable int + + unissuedTxs linkedhashmap.LinkedHashmap[ids.ID, *txs.Tx] + numTxs prometheus.Gauge + + toEngine chan<- common.Message + + // Key: Tx ID + // Value: Verification error + droppedTxIDs *cache.LRU[ids.ID, error] + + consumedUTXOs set.Set[ids.ID] +} + +func New( + namespace string, + registerer prometheus.Registerer, + toEngine chan<- common.Message, +) (Mempool, error) { + bytesAvailableMetric := prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "bytes_available", + Help: "Number of bytes of space currently available in the mempool", + }) + if err := registerer.Register(bytesAvailableMetric); err != nil { + return nil, err + } + + numTxsMetric := prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "count", + Help: "Number of transactions in the mempool", + }) + if err := registerer.Register(numTxsMetric); err != nil { + return nil, err + } + + bytesAvailableMetric.Set(maxMempoolSize) + return &mempool{ + bytesAvailableMetric: bytesAvailableMetric, + bytesAvailable: maxMempoolSize, + unissuedTxs: linkedhashmap.New[ids.ID, *txs.Tx](), + numTxs: numTxsMetric, + toEngine: toEngine, + droppedTxIDs: &cache.LRU[ids.ID, error]{Size: droppedTxIDsCacheSize}, + consumedUTXOs: set.NewSet[ids.ID](initialConsumedUTXOsSize), + }, nil +} + +func (m *mempool) Add(tx *txs.Tx) error { + // Note: a previously dropped tx can be re-added + txID := tx.ID() + if m.Has(txID) { + return fmt.Errorf("%w: %s", errDuplicateTx, txID) + } + + txSize := len(tx.Bytes()) + if txSize > MaxTxSize { + return fmt.Errorf("%w: %s size (%d) > max size (%d)", + errTxTooLarge, + txID, + txSize, + MaxTxSize, + ) + } + if txSize > m.bytesAvailable { + return fmt.Errorf("%w: %s size (%d) > available space (%d)", + errMempoolFull, + txID, + txSize, + m.bytesAvailable, + ) + } + + inputs := tx.Unsigned.InputIDs() + if m.consumedUTXOs.Overlaps(inputs) { + return fmt.Errorf("%w: %s", errConflictsWithOtherTx, txID) + } + + m.bytesAvailable -= txSize + m.bytesAvailableMetric.Set(float64(m.bytesAvailable)) + + m.unissuedTxs.Put(txID, tx) + m.numTxs.Inc() + + // Mark these UTXOs as consumed in the mempool + m.consumedUTXOs.Union(inputs) + + // An explicitly added tx must not be marked as dropped. + m.droppedTxIDs.Evict(txID) + return nil +} + +func (m *mempool) Has(txID ids.ID) bool { + return m.Get(txID) != nil +} + +func (m *mempool) Get(txID ids.ID) *txs.Tx { + unissuedTxs, _ := m.unissuedTxs.Get(txID) + return unissuedTxs +} + +func (m *mempool) Remove(txsToRemove []*txs.Tx) { + for _, tx := range txsToRemove { + txID := tx.ID() + if _, ok := m.unissuedTxs.Get(txID); !ok { + // If tx isn't in the mempool, there is nothing to do. + continue + } + + txBytes := tx.Bytes() + m.bytesAvailable += len(txBytes) + m.bytesAvailableMetric.Set(float64(m.bytesAvailable)) + + m.unissuedTxs.Delete(txID) + m.numTxs.Dec() + + inputs := tx.Unsigned.InputIDs() + m.consumedUTXOs.Difference(inputs) + } +} + +func (m *mempool) Peek(maxTxSize int) *txs.Tx { + txIter := m.unissuedTxs.NewIterator() + for txIter.Next() { + tx := txIter.Value() + txSize := len(tx.Bytes()) + if txSize <= maxTxSize { + return tx + } + } + return nil +} + +func (m *mempool) RequestBuildBlock() { + if m.unissuedTxs.Len() == 0 { + return + } + + select { + case m.toEngine <- common.PendingTxs: + default: + } +} + +func (m *mempool) MarkDropped(txID ids.ID, reason error) { + m.droppedTxIDs.Put(txID, reason) +} + +func (m *mempool) GetDropReason(txID ids.ID) error { + err, _ := m.droppedTxIDs.Get(txID) + return err +} diff --git a/avalanchego/vms/avm/txs/mempool/mempool_test.go b/avalanchego/vms/avm/txs/mempool/mempool_test.go new file mode 100644 index 00000000..d02abc48 --- /dev/null +++ b/avalanchego/vms/avm/txs/mempool/mempool_test.go @@ -0,0 +1,173 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package mempool + +import ( + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" +) + +var ( + keys = secp256k1.TestKeys() + chainID = ids.ID{5, 4, 3, 2, 1} + assetID = ids.ID{1, 2, 3} +) + +// shows that valid tx is not added to mempool if this would exceed its maximum +// size +func TestBlockBuilderMaxMempoolSizeHandling(t *testing.T) { + require := require.New(t) + + registerer := prometheus.NewRegistry() + mempoolIntf, err := New("mempool", registerer, nil) + require.NoError(err) + + mempool := mempoolIntf.(*mempool) + + testTxs := createTestTxs(2) + tx := testTxs[0] + + // shortcut to simulated almost filled mempool + mempool.bytesAvailable = len(tx.Bytes()) - 1 + + err = mempool.Add(tx) + require.ErrorIs(err, errMempoolFull) + + // shortcut to simulated almost filled mempool + mempool.bytesAvailable = len(tx.Bytes()) + + err = mempool.Add(tx) + require.NoError(err) +} + +func TestTxsInMempool(t *testing.T) { + require := require.New(t) + + registerer := prometheus.NewRegistry() + toEngine := make(chan common.Message, 100) + mempool, err := New("mempool", registerer, toEngine) + require.NoError(err) + + testTxs := createTestTxs(2) + + mempool.RequestBuildBlock() + select { + case <-toEngine: + t.Fatalf("should not have sent message to engine") + default: + } + + for _, tx := range testTxs { + txID := tx.ID() + // tx not already there + require.False(mempool.Has(txID)) + + // we can insert + require.NoError(mempool.Add(tx)) + + // we can get it + require.True(mempool.Has(txID)) + + retrieved := mempool.Get(txID) + require.True(retrieved != nil) + require.Equal(tx, retrieved) + + // tx exists in mempool + require.True(mempool.Has(txID)) + + // once removed it cannot be there + mempool.Remove([]*txs.Tx{tx}) + + require.False(mempool.Has(txID)) + require.Nil(mempool.Get(txID)) + + // we can reinsert it again to grow the mempool + require.NoError(mempool.Add(tx)) + } + + mempool.RequestBuildBlock() + select { + case <-toEngine: + default: + t.Fatalf("should have sent message to engine") + } + + mempool.Remove(testTxs) + + mempool.RequestBuildBlock() + select { + case <-toEngine: + t.Fatalf("should not have sent message to engine") + default: + } +} + +func createTestTxs(count int) []*txs.Tx { + testTxs := make([]*txs.Tx, 0, count) + addr := keys[0].PublicKey().Address() + for i := uint32(0); i < uint32(count); i++ { + tx := &txs.Tx{Unsigned: &txs.CreateAssetTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: constants.UnitTestID, + BlockchainID: chainID, + Ins: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: ids.ID{'t', 'x', 'I', 'D'}, + OutputIndex: i, + }, + Asset: avax.Asset{ID: assetID}, + In: &secp256k1fx.TransferInput{ + Amt: 54321, + Input: secp256k1fx.Input{ + SigIndices: []uint32{i}, + }, + }, + }}, + Outs: []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: assetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, + }, + }}, + }}, + Name: "NormalName", + Symbol: "TICK", + Denomination: byte(2), + States: []*txs.InitialState{ + { + FxIndex: 0, + Outs: []verify.State{ + &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, + }, + }, + }, + }, + }} + tx.SetBytes(utils.RandomBytes(16), utils.RandomBytes(16)) + testTxs = append(testTxs, tx) + } + return testTxs +} diff --git a/avalanchego/vms/avm/txs/mempool/mock_mempool.go b/avalanchego/vms/avm/txs/mempool/mock_mempool.go new file mode 100644 index 00000000..215e97f1 --- /dev/null +++ b/avalanchego/vms/avm/txs/mempool/mock_mempool.go @@ -0,0 +1,145 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/vms/avm/txs/mempool (interfaces: Mempool) + +// Package mempool is a generated GoMock package. +package mempool + +import ( + reflect "reflect" + + ids "github.com/ava-labs/avalanchego/ids" + txs "github.com/ava-labs/avalanchego/vms/avm/txs" + gomock "github.com/golang/mock/gomock" +) + +// MockMempool is a mock of Mempool interface. +type MockMempool struct { + ctrl *gomock.Controller + recorder *MockMempoolMockRecorder +} + +// MockMempoolMockRecorder is the mock recorder for MockMempool. +type MockMempoolMockRecorder struct { + mock *MockMempool +} + +// NewMockMempool creates a new mock instance. +func NewMockMempool(ctrl *gomock.Controller) *MockMempool { + mock := &MockMempool{ctrl: ctrl} + mock.recorder = &MockMempoolMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockMempool) EXPECT() *MockMempoolMockRecorder { + return m.recorder +} + +// Add mocks base method. +func (m *MockMempool) Add(arg0 *txs.Tx) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Add", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Add indicates an expected call of Add. +func (mr *MockMempoolMockRecorder) Add(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Add", reflect.TypeOf((*MockMempool)(nil).Add), arg0) +} + +// Get mocks base method. +func (m *MockMempool) Get(arg0 ids.ID) *txs.Tx { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Get", arg0) + ret0, _ := ret[0].(*txs.Tx) + return ret0 +} + +// Get indicates an expected call of Get. +func (mr *MockMempoolMockRecorder) Get(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockMempool)(nil).Get), arg0) +} + +// GetDropReason mocks base method. +func (m *MockMempool) GetDropReason(arg0 ids.ID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDropReason", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// GetDropReason indicates an expected call of GetDropReason. +func (mr *MockMempoolMockRecorder) GetDropReason(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDropReason", reflect.TypeOf((*MockMempool)(nil).GetDropReason), arg0) +} + +// Has mocks base method. +func (m *MockMempool) Has(arg0 ids.ID) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Has", arg0) + ret0, _ := ret[0].(bool) + return ret0 +} + +// Has indicates an expected call of Has. +func (mr *MockMempoolMockRecorder) Has(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockMempool)(nil).Has), arg0) +} + +// MarkDropped mocks base method. +func (m *MockMempool) MarkDropped(arg0 ids.ID, arg1 error) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "MarkDropped", arg0, arg1) +} + +// MarkDropped indicates an expected call of MarkDropped. +func (mr *MockMempoolMockRecorder) MarkDropped(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkDropped", reflect.TypeOf((*MockMempool)(nil).MarkDropped), arg0, arg1) +} + +// Peek mocks base method. +func (m *MockMempool) Peek(arg0 int) *txs.Tx { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Peek", arg0) + ret0, _ := ret[0].(*txs.Tx) + return ret0 +} + +// Peek indicates an expected call of Peek. +func (mr *MockMempoolMockRecorder) Peek(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Peek", reflect.TypeOf((*MockMempool)(nil).Peek), arg0) +} + +// Remove mocks base method. +func (m *MockMempool) Remove(arg0 []*txs.Tx) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "Remove", arg0) +} + +// Remove indicates an expected call of Remove. +func (mr *MockMempoolMockRecorder) Remove(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remove", reflect.TypeOf((*MockMempool)(nil).Remove), arg0) +} + +// RequestBuildBlock mocks base method. +func (m *MockMempool) RequestBuildBlock() { + m.ctrl.T.Helper() + m.ctrl.Call(m, "RequestBuildBlock") +} + +// RequestBuildBlock indicates an expected call of RequestBuildBlock. +func (mr *MockMempoolMockRecorder) RequestBuildBlock() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RequestBuildBlock", reflect.TypeOf((*MockMempool)(nil).RequestBuildBlock)) +} diff --git a/avalanchego/vms/avm/txs/mock_unsigned_tx.go b/avalanchego/vms/avm/txs/mock_unsigned_tx.go new file mode 100644 index 00000000..25a7a8c4 --- /dev/null +++ b/avalanchego/vms/avm/txs/mock_unsigned_tx.go @@ -0,0 +1,163 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/vms/avm/txs (interfaces: UnsignedTx) + +// Package txs is a generated GoMock package. +package txs + +import ( + reflect "reflect" + + ids "github.com/ava-labs/avalanchego/ids" + snow "github.com/ava-labs/avalanchego/snow" + set "github.com/ava-labs/avalanchego/utils/set" + avax "github.com/ava-labs/avalanchego/vms/components/avax" + gomock "github.com/golang/mock/gomock" +) + +// MockUnsignedTx is a mock of UnsignedTx interface. +type MockUnsignedTx struct { + ctrl *gomock.Controller + recorder *MockUnsignedTxMockRecorder +} + +// MockUnsignedTxMockRecorder is the mock recorder for MockUnsignedTx. +type MockUnsignedTxMockRecorder struct { + mock *MockUnsignedTx +} + +// NewMockUnsignedTx creates a new mock instance. +func NewMockUnsignedTx(ctrl *gomock.Controller) *MockUnsignedTx { + mock := &MockUnsignedTx{ctrl: ctrl} + mock.recorder = &MockUnsignedTxMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockUnsignedTx) EXPECT() *MockUnsignedTxMockRecorder { + return m.recorder +} + +// AssetIDs mocks base method. +func (m *MockUnsignedTx) AssetIDs() set.Set[ids.ID] { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AssetIDs") + ret0, _ := ret[0].(set.Set[ids.ID]) + return ret0 +} + +// AssetIDs indicates an expected call of AssetIDs. +func (mr *MockUnsignedTxMockRecorder) AssetIDs() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AssetIDs", reflect.TypeOf((*MockUnsignedTx)(nil).AssetIDs)) +} + +// Bytes mocks base method. +func (m *MockUnsignedTx) Bytes() []byte { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Bytes") + ret0, _ := ret[0].([]byte) + return ret0 +} + +// Bytes indicates an expected call of Bytes. +func (mr *MockUnsignedTxMockRecorder) Bytes() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bytes", reflect.TypeOf((*MockUnsignedTx)(nil).Bytes)) +} + +// ConsumedAssetIDs mocks base method. +func (m *MockUnsignedTx) ConsumedAssetIDs() set.Set[ids.ID] { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ConsumedAssetIDs") + ret0, _ := ret[0].(set.Set[ids.ID]) + return ret0 +} + +// ConsumedAssetIDs indicates an expected call of ConsumedAssetIDs. +func (mr *MockUnsignedTxMockRecorder) ConsumedAssetIDs() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ConsumedAssetIDs", reflect.TypeOf((*MockUnsignedTx)(nil).ConsumedAssetIDs)) +} + +// InitCtx mocks base method. +func (m *MockUnsignedTx) InitCtx(arg0 *snow.Context) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "InitCtx", arg0) +} + +// InitCtx indicates an expected call of InitCtx. +func (mr *MockUnsignedTxMockRecorder) InitCtx(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitCtx", reflect.TypeOf((*MockUnsignedTx)(nil).InitCtx), arg0) +} + +// InputIDs mocks base method. +func (m *MockUnsignedTx) InputIDs() set.Set[ids.ID] { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InputIDs") + ret0, _ := ret[0].(set.Set[ids.ID]) + return ret0 +} + +// InputIDs indicates an expected call of InputIDs. +func (mr *MockUnsignedTxMockRecorder) InputIDs() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InputIDs", reflect.TypeOf((*MockUnsignedTx)(nil).InputIDs)) +} + +// InputUTXOs mocks base method. +func (m *MockUnsignedTx) InputUTXOs() []*avax.UTXOID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "InputUTXOs") + ret0, _ := ret[0].([]*avax.UTXOID) + return ret0 +} + +// InputUTXOs indicates an expected call of InputUTXOs. +func (mr *MockUnsignedTxMockRecorder) InputUTXOs() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InputUTXOs", reflect.TypeOf((*MockUnsignedTx)(nil).InputUTXOs)) +} + +// NumCredentials mocks base method. +func (m *MockUnsignedTx) NumCredentials() int { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NumCredentials") + ret0, _ := ret[0].(int) + return ret0 +} + +// NumCredentials indicates an expected call of NumCredentials. +func (mr *MockUnsignedTxMockRecorder) NumCredentials() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NumCredentials", reflect.TypeOf((*MockUnsignedTx)(nil).NumCredentials)) +} + +// SetBytes mocks base method. +func (m *MockUnsignedTx) SetBytes(arg0 []byte) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetBytes", arg0) +} + +// SetBytes indicates an expected call of SetBytes. +func (mr *MockUnsignedTxMockRecorder) SetBytes(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBytes", reflect.TypeOf((*MockUnsignedTx)(nil).SetBytes), arg0) +} + +// Visit mocks base method. +func (m *MockUnsignedTx) Visit(arg0 Visitor) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Visit", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Visit indicates an expected call of Visit. +func (mr *MockUnsignedTxMockRecorder) Visit(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Visit", reflect.TypeOf((*MockUnsignedTx)(nil).Visit), arg0) +} diff --git a/avalanchego/vms/avm/txs/operation.go b/avalanchego/vms/avm/txs/operation.go index 4b025e7a..5978ab47 100644 --- a/avalanchego/vms/avm/txs/operation.go +++ b/avalanchego/vms/avm/txs/operation.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -11,16 +11,16 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" ) var ( - errNilOperation = errors.New("nil operation is not valid") - errNilFxOperation = errors.New("nil fx operation is not valid") - errNotSortedAndUniqueUTXOIDs = errors.New("utxo IDs not sorted and unique") + ErrNilOperation = errors.New("nil operation is not valid") + ErrNilFxOperation = errors.New("nil fx operation is not valid") + ErrNotSortedAndUniqueUTXOIDs = errors.New("utxo IDs not sorted and unique") ) type Operation struct { @@ -30,14 +30,14 @@ type Operation struct { Op fxs.FxOperation `serialize:"true" json:"operation"` } -func (op *Operation) Verify(c codec.Manager) error { +func (op *Operation) Verify() error { switch { case op == nil: - return errNilOperation + return ErrNilOperation case op.Op == nil: - return errNilFxOperation - case !avax.IsSortedAndUniqueUTXOIDs(op.UTXOIDs): - return errNotSortedAndUniqueUTXOIDs + return ErrNilFxOperation + case !utils.IsSortedAndUniqueSortable(op.UTXOIDs): + return ErrNotSortedAndUniqueUTXOIDs default: return verify.All(&op.Asset, op.Op) } @@ -62,8 +62,15 @@ func (ops *innerSortOperation) Less(i, j int) bool { } return bytes.Compare(iBytes, jBytes) == -1 } -func (ops *innerSortOperation) Len() int { return len(ops.ops) } -func (ops *innerSortOperation) Swap(i, j int) { o := ops.ops; o[j], o[i] = o[i], o[j] } + +func (ops *innerSortOperation) Len() int { + return len(ops.ops) +} + +func (ops *innerSortOperation) Swap(i, j int) { + o := ops.ops + o[j], o[i] = o[i], o[j] +} func SortOperations(ops []*Operation, c codec.Manager) { sort.Sort(&innerSortOperation{ops: ops, codec: c}) @@ -75,7 +82,7 @@ func IsSortedAndUniqueOperations(ops []*Operation, c codec.Manager) bool { type innerSortOperationsWithSigners struct { ops []*Operation - signers [][]*crypto.PrivateKeySECP256K1R + signers [][]*secp256k1.PrivateKey codec codec.Manager } @@ -93,12 +100,16 @@ func (ops *innerSortOperationsWithSigners) Less(i, j int) bool { } return bytes.Compare(iBytes, jBytes) == -1 } -func (ops *innerSortOperationsWithSigners) Len() int { return len(ops.ops) } + +func (ops *innerSortOperationsWithSigners) Len() int { + return len(ops.ops) +} + func (ops *innerSortOperationsWithSigners) Swap(i, j int) { ops.ops[j], ops.ops[i] = ops.ops[i], ops.ops[j] ops.signers[j], ops.signers[i] = ops.signers[i], ops.signers[j] } -func SortOperationsWithSigners(ops []*Operation, signers [][]*crypto.PrivateKeySECP256K1R, codec codec.Manager) { +func SortOperationsWithSigners(ops []*Operation, signers [][]*secp256k1.PrivateKey, codec codec.Manager) { sort.Sort(&innerSortOperationsWithSigners{ops: ops, signers: signers, codec: codec}) } diff --git a/avalanchego/vms/avm/txs/operation_test.go b/avalanchego/vms/avm/txs/operation_test.go index 02d119fe..55a06268 100644 --- a/avalanchego/vms/avm/txs/operation_test.go +++ b/avalanchego/vms/avm/txs/operation_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -20,45 +20,29 @@ type testOperable struct { Outputs []verify.State `serialize:"true"` } -func (o *testOperable) InitCtx(ctx *snow.Context) {} +func (*testOperable) InitCtx(*snow.Context) {} -func (o *testOperable) Outs() []verify.State { return o.Outputs } +func (o *testOperable) Outs() []verify.State { + return o.Outputs +} func TestOperationVerifyNil(t *testing.T) { - c := linearcodec.NewDefault() - m := codec.NewDefaultManager() - if err := m.RegisterCodec(CodecVersion, c); err != nil { - t.Fatal(err) - } - op := (*Operation)(nil) - if err := op.Verify(m); err == nil { + if err := op.Verify(); err == nil { t.Fatalf("Should have erred due to nil operation") } } func TestOperationVerifyEmpty(t *testing.T) { - c := linearcodec.NewDefault() - m := codec.NewDefaultManager() - if err := m.RegisterCodec(CodecVersion, c); err != nil { - t.Fatal(err) - } - op := &Operation{ Asset: avax.Asset{ID: ids.Empty}, } - if err := op.Verify(m); err == nil { + if err := op.Verify(); err == nil { t.Fatalf("Should have erred due to empty operation") } } func TestOperationVerifyUTXOIDsNotSorted(t *testing.T) { - c := linearcodec.NewDefault() - m := codec.NewDefaultManager() - if err := m.RegisterCodec(CodecVersion, c); err != nil { - t.Fatal(err) - } - op := &Operation{ Asset: avax.Asset{ID: ids.Empty}, UTXOIDs: []*avax.UTXOID{ @@ -73,18 +57,12 @@ func TestOperationVerifyUTXOIDsNotSorted(t *testing.T) { }, Op: &testOperable{}, } - if err := op.Verify(m); err == nil { + if err := op.Verify(); err == nil { t.Fatalf("Should have erred due to unsorted utxoIDs") } } func TestOperationVerify(t *testing.T) { - c := linearcodec.NewDefault() - m := codec.NewDefaultManager() - if err := m.RegisterCodec(CodecVersion, c); err != nil { - t.Fatal(err) - } - assetID := ids.GenerateTestID() op := &Operation{ Asset: avax.Asset{ID: assetID}, @@ -96,7 +74,7 @@ func TestOperationVerify(t *testing.T) { }, Op: &testOperable{}, } - if err := op.Verify(m); err != nil { + if err := op.Verify(); err != nil { t.Fatal(err) } } diff --git a/avalanchego/vms/avm/txs/operation_tx.go b/avalanchego/vms/avm/txs/operation_tx.go index 45239661..503ce742 100644 --- a/avalanchego/vms/avm/txs/operation_tx.go +++ b/avalanchego/vms/avm/txs/operation_tx.go @@ -1,25 +1,19 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( - "errors" - - "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) var ( - errOperationsNotSortedUnique = errors.New("operations not sorted and unique") - errNoOperations = errors.New("an operationTx must have at least one operation") - errDoubleSpend = errors.New("inputs attempt to double spend an input") - - _ UnsignedTx = &OperationTx{} - _ secp256k1fx.UnsignedTx = &OperationTx{} + _ UnsignedTx = (*OperationTx)(nil) + _ secp256k1fx.UnsignedTx = (*OperationTx)(nil) ) // OperationTx is a transaction with no credentials. @@ -38,7 +32,9 @@ func (t *OperationTx) InitCtx(ctx *snow.Context) { // Operations track which ops this transaction is performing. The returned array // should not be modified. -func (t *OperationTx) Operations() []*Operation { return t.Ops } +func (t *OperationTx) Operations() []*Operation { + return t.Ops +} func (t *OperationTx) InputUTXOs() []*avax.UTXOID { utxos := t.BaseTx.InputUTXOs() @@ -48,8 +44,18 @@ func (t *OperationTx) InputUTXOs() []*avax.UTXOID { return utxos } +func (t *OperationTx) InputIDs() set.Set[ids.ID] { + inputs := t.BaseTx.InputIDs() + for _, op := range t.Ops { + for _, utxo := range op.UTXOIDs { + inputs.Add(utxo.InputID()) + } + } + return inputs +} + // ConsumedAssetIDs returns the IDs of the assets this transaction consumes -func (t *OperationTx) ConsumedAssetIDs() ids.Set { +func (t *OperationTx) ConsumedAssetIDs() set.Set[ids.ID] { assets := t.AssetIDs() for _, op := range t.Ops { if len(op.UTXOIDs) > 0 { @@ -60,7 +66,7 @@ func (t *OperationTx) ConsumedAssetIDs() ids.Set { } // AssetIDs returns the IDs of the assets this transaction depends on -func (t *OperationTx) AssetIDs() ids.Set { +func (t *OperationTx) AssetIDs() set.Set[ids.ID] { assets := t.BaseTx.AssetIDs() for _, op := range t.Ops { assets.Add(op.AssetID()) @@ -73,49 +79,6 @@ func (t *OperationTx) NumCredentials() int { return t.BaseTx.NumCredentials() + len(t.Ops) } -// SyntacticVerify that this transaction is well-formed. -func (t *OperationTx) SyntacticVerify( - ctx *snow.Context, - c codec.Manager, - txFeeAssetID ids.ID, - txFee uint64, - _ uint64, - numFxs int, -) error { - switch { - case t == nil: - return errNilTx - case len(t.Ops) == 0: - return errNoOperations - } - - if err := t.BaseTx.SyntacticVerify(ctx, c, txFeeAssetID, txFee, txFee, numFxs); err != nil { - return err - } - - inputs := ids.NewSet(len(t.Ins)) - for _, in := range t.Ins { - inputs.Add(in.InputID()) - } - - for _, op := range t.Ops { - if err := op.Verify(c); err != nil { - return err - } - for _, utxoID := range op.UTXOIDs { - inputID := utxoID.InputID() - if inputs.Contains(inputID) { - return errDoubleSpend - } - inputs.Add(inputID) - } - } - if !IsSortedAndUniqueOperations(t.Ops, c) { - return errOperationsNotSortedUnique - } - return nil -} - func (t *OperationTx) Visit(v Visitor) error { return v.OperationTx(t) } diff --git a/avalanchego/vms/avm/txs/parser.go b/avalanchego/vms/avm/txs/parser.go index b070250b..f5f16d59 100644 --- a/avalanchego/vms/avm/txs/parser.go +++ b/avalanchego/vms/avm/txs/parser.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -17,16 +17,20 @@ import ( "github.com/ava-labs/avalanchego/vms/avm/fxs" ) +// CodecVersion is the current default codec version const CodecVersion = 0 -var _ Parser = &parser{} +var _ Parser = (*parser)(nil) type Parser interface { Codec() codec.Manager GenesisCodec() codec.Manager - Parse(bytes []byte) (*Tx, error) - ParseGenesis(bytes []byte) (*Tx, error) + CodecRegistry() codec.Registry + GenesisCodecRegistry() codec.Registry + + ParseTx(bytes []byte) (*Tx, error) + ParseGenesisTx(bytes []byte) (*Tx, error) InitializeTx(tx *Tx) error InitializeGenesisTx(tx *Tx) error @@ -35,6 +39,8 @@ type Parser interface { type parser struct { cm codec.Manager gcm codec.Manager + c linearcodec.Codec + gc linearcodec.Codec } func NewParser(fxs []fxs.Fx) (Parser, error) { @@ -96,19 +102,46 @@ func NewCustomParser( return &parser{ cm: cm, gcm: gcm, + c: c, + gc: gc, }, nil } -func (p *parser) Codec() codec.Manager { return p.cm } -func (p *parser) GenesisCodec() codec.Manager { return p.gcm } -func (p *parser) Parse(bytes []byte) (*Tx, error) { return parse(p.cm, bytes) } -func (p *parser) ParseGenesis(bytes []byte) (*Tx, error) { return parse(p.gcm, bytes) } -func (p *parser) InitializeTx(tx *Tx) error { return initializeTx(p.cm, tx) } -func (p *parser) InitializeGenesisTx(tx *Tx) error { return initializeTx(p.gcm, tx) } +func (p *parser) Codec() codec.Manager { + return p.cm +} + +func (p *parser) GenesisCodec() codec.Manager { + return p.gcm +} + +func (p *parser) CodecRegistry() codec.Registry { + return p.c +} + +func (p *parser) GenesisCodecRegistry() codec.Registry { + return p.gc +} + +func (p *parser) ParseTx(bytes []byte) (*Tx, error) { + return parse(p.cm, bytes) +} + +func (p *parser) ParseGenesisTx(bytes []byte) (*Tx, error) { + return parse(p.gcm, bytes) +} + +func (p *parser) InitializeTx(tx *Tx) error { + return initializeTx(p.cm, tx) +} + +func (p *parser) InitializeGenesisTx(tx *Tx) error { + return initializeTx(p.gcm, tx) +} -func parse(cm codec.Manager, bytes []byte) (*Tx, error) { +func parse(cm codec.Manager, signedBytes []byte) (*Tx, error) { tx := &Tx{} - parsedVersion, err := cm.Unmarshal(bytes, tx) + parsedVersion, err := cm.Unmarshal(signedBytes, tx) if err != nil { return nil, err } @@ -116,23 +149,28 @@ func parse(cm codec.Manager, bytes []byte) (*Tx, error) { return nil, fmt.Errorf("expected codec version %d but got %d", CodecVersion, parsedVersion) } - unsignedBytes, err := cm.Marshal(CodecVersion, &tx.Unsigned) + unsignedBytesLen, err := cm.Size(CodecVersion, &tx.Unsigned) if err != nil { - return nil, err + return nil, fmt.Errorf("couldn't calculate UnsignedTx marshal length: %w", err) } - tx.Initialize(unsignedBytes, bytes) + + unsignedBytes := signedBytes[:unsignedBytesLen] + tx.SetBytes(unsignedBytes, signedBytes) return tx, nil } func initializeTx(cm codec.Manager, tx *Tx) error { - unsignedBytes, err := cm.Marshal(CodecVersion, tx.Unsigned) + signedBytes, err := cm.Marshal(CodecVersion, tx) if err != nil { - return err + return fmt.Errorf("problem creating transaction: %w", err) } - signedBytes, err := cm.Marshal(CodecVersion, &tx) + + unsignedBytesLen, err := cm.Size(CodecVersion, &tx.Unsigned) if err != nil { - return err + return fmt.Errorf("couldn't calculate UnsignedTx marshal length: %w", err) } - tx.Initialize(unsignedBytes, signedBytes) + + unsignedBytes := signedBytes[:unsignedBytesLen] + tx.SetBytes(unsignedBytes, signedBytes) return nil } diff --git a/avalanchego/vms/avm/txs/tx.go b/avalanchego/vms/avm/txs/tx.go index 2bb601fc..309d5974 100644 --- a/avalanchego/vms/avm/txs/tx.go +++ b/avalanchego/vms/avm/txs/tx.go @@ -1,17 +1,17 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( - "errors" "fmt" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/nftfx" @@ -19,28 +19,21 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -var errNilTx = errors.New("nil tx is not valid") - type UnsignedTx interface { snow.ContextInitializable - Initialize(unsignedBytes []byte) + SetBytes(unsignedBytes []byte) Bytes() []byte - ConsumedAssetIDs() ids.Set - AssetIDs() ids.Set + InputIDs() set.Set[ids.ID] + + ConsumedAssetIDs() set.Set[ids.ID] + AssetIDs() set.Set[ids.ID] NumCredentials() int + // TODO: deprecate after x-chain linearization InputUTXOs() []*avax.UTXOID - SyntacticVerify( - ctx *snow.Context, - c codec.Manager, - txFeeAssetID ids.ID, - txFee uint64, - creationTxFee uint64, - numFxs int, - ) error // Visit calls [visitor] with this transaction's concrete type Visit(visitor Visitor) error } @@ -58,17 +51,37 @@ type Tx struct { bytes []byte } -func (t *Tx) Initialize(unsignedBytes, signedBytes []byte) { +func (t *Tx) Initialize(c codec.Manager) error { + signedBytes, err := c.Marshal(CodecVersion, t) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + + unsignedBytesLen, err := c.Size(CodecVersion, &t.Unsigned) + if err != nil { + return fmt.Errorf("couldn't calculate UnsignedTx marshal length: %w", err) + } + + unsignedBytes := signedBytes[:unsignedBytesLen] + t.SetBytes(unsignedBytes, signedBytes) + return nil +} + +func (t *Tx) SetBytes(unsignedBytes, signedBytes []byte) { t.id = hashing.ComputeHash256Array(signedBytes) t.bytes = signedBytes - t.Unsigned.Initialize(unsignedBytes) + t.Unsigned.SetBytes(unsignedBytes) } // ID returns the unique ID of this tx -func (t *Tx) ID() ids.ID { return t.id } +func (t *Tx) ID() ids.ID { + return t.id +} // Bytes returns the binary representation of this tx -func (t *Tx) Bytes() []byte { return t.bytes } +func (t *Tx) Bytes() []byte { + return t.bytes +} // UTXOs returns the UTXOs transaction is producing. func (t *Tx) UTXOs() []*avax.UTXO { @@ -79,39 +92,7 @@ func (t *Tx) UTXOs() []*avax.UTXO { return u.utxos } -// SyntacticVerify verifies that this transaction is well-formed. -func (t *Tx) SyntacticVerify( - ctx *snow.Context, - c codec.Manager, - txFeeAssetID ids.ID, - txFee uint64, - creationTxFee uint64, - numFxs int, -) error { - if t == nil || t.Unsigned == nil { - return errNilTx - } - - if err := t.Unsigned.SyntacticVerify(ctx, c, txFeeAssetID, txFee, creationTxFee, numFxs); err != nil { - return err - } - - for _, cred := range t.Creds { - if err := cred.Verify(); err != nil { - return err - } - } - - if numCreds := t.Unsigned.NumCredentials(); numCreds != len(t.Creds) { - return fmt.Errorf("tx has %d credentials but %d inputs. Should be same", - len(t.Creds), - numCreds, - ) - } - return nil -} - -func (t *Tx) SignSECP256K1Fx(c codec.Manager, signers [][]*crypto.PrivateKeySECP256K1R) error { +func (t *Tx) SignSECP256K1Fx(c codec.Manager, signers [][]*secp256k1.PrivateKey) error { unsignedBytes, err := c.Marshal(CodecVersion, &t.Unsigned) if err != nil { return fmt.Errorf("problem creating transaction: %w", err) @@ -120,7 +101,7 @@ func (t *Tx) SignSECP256K1Fx(c codec.Manager, signers [][]*crypto.PrivateKeySECP hash := hashing.ComputeHash256(unsignedBytes) for _, keys := range signers { cred := &secp256k1fx.Credential{ - Sigs: make([][crypto.SECP256K1RSigLen]byte, len(keys)), + Sigs: make([][secp256k1.SignatureLen]byte, len(keys)), } for i, key := range keys { sig, err := key.SignHash(hash) @@ -136,11 +117,11 @@ func (t *Tx) SignSECP256K1Fx(c codec.Manager, signers [][]*crypto.PrivateKeySECP if err != nil { return fmt.Errorf("problem creating transaction: %w", err) } - t.Initialize(unsignedBytes, signedBytes) + t.SetBytes(unsignedBytes, signedBytes) return nil } -func (t *Tx) SignPropertyFx(c codec.Manager, signers [][]*crypto.PrivateKeySECP256K1R) error { +func (t *Tx) SignPropertyFx(c codec.Manager, signers [][]*secp256k1.PrivateKey) error { unsignedBytes, err := c.Marshal(CodecVersion, &t.Unsigned) if err != nil { return fmt.Errorf("problem creating transaction: %w", err) @@ -149,7 +130,7 @@ func (t *Tx) SignPropertyFx(c codec.Manager, signers [][]*crypto.PrivateKeySECP2 hash := hashing.ComputeHash256(unsignedBytes) for _, keys := range signers { cred := &propertyfx.Credential{Credential: secp256k1fx.Credential{ - Sigs: make([][crypto.SECP256K1RSigLen]byte, len(keys)), + Sigs: make([][secp256k1.SignatureLen]byte, len(keys)), }} for i, key := range keys { sig, err := key.SignHash(hash) @@ -165,11 +146,11 @@ func (t *Tx) SignPropertyFx(c codec.Manager, signers [][]*crypto.PrivateKeySECP2 if err != nil { return fmt.Errorf("problem creating transaction: %w", err) } - t.Initialize(unsignedBytes, signedBytes) + t.SetBytes(unsignedBytes, signedBytes) return nil } -func (t *Tx) SignNFTFx(c codec.Manager, signers [][]*crypto.PrivateKeySECP256K1R) error { +func (t *Tx) SignNFTFx(c codec.Manager, signers [][]*secp256k1.PrivateKey) error { unsignedBytes, err := c.Marshal(CodecVersion, &t.Unsigned) if err != nil { return fmt.Errorf("problem creating transaction: %w", err) @@ -178,7 +159,7 @@ func (t *Tx) SignNFTFx(c codec.Manager, signers [][]*crypto.PrivateKeySECP256K1R hash := hashing.ComputeHash256(unsignedBytes) for _, keys := range signers { cred := &nftfx.Credential{Credential: secp256k1fx.Credential{ - Sigs: make([][crypto.SECP256K1RSigLen]byte, len(keys)), + Sigs: make([][secp256k1.SignatureLen]byte, len(keys)), }} for i, key := range keys { sig, err := key.SignHash(hash) @@ -194,6 +175,6 @@ func (t *Tx) SignNFTFx(c codec.Manager, signers [][]*crypto.PrivateKeySECP256K1R if err != nil { return fmt.Errorf("problem creating transaction: %w", err) } - t.Initialize(unsignedBytes, signedBytes) + t.SetBytes(unsignedBytes, signedBytes) return nil } diff --git a/avalanchego/vms/avm/txs/tx_test.go b/avalanchego/vms/avm/txs/tx_test.go deleted file mode 100644 index 462b3a4a..00000000 --- a/avalanchego/vms/avm/txs/tx_test.go +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package txs - -import ( - "errors" - "testing" - - "github.com/ava-labs/avalanchego/codec" - "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils/crypto" - "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/utils/wrappers" - "github.com/ava-labs/avalanchego/vms/avm/fxs" - "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" -) - -var ( - networkID uint32 = 10 - chainID = ids.ID{5, 4, 3, 2, 1} - platformChainID = ids.Empty.Prefix(0) - - keys = crypto.BuildTestKeys() - - assetID = ids.ID{1, 2, 3} -) - -func setupCodec() codec.Manager { - c := linearcodec.NewDefault() - m := codec.NewDefaultManager() - errs := wrappers.Errs{} - errs.Add( - c.RegisterType(&BaseTx{}), - c.RegisterType(&CreateAssetTx{}), - c.RegisterType(&OperationTx{}), - c.RegisterType(&ImportTx{}), - c.RegisterType(&ExportTx{}), - c.RegisterType(&secp256k1fx.TransferInput{}), - c.RegisterType(&secp256k1fx.MintOutput{}), - c.RegisterType(&secp256k1fx.TransferOutput{}), - c.RegisterType(&secp256k1fx.MintOperation{}), - c.RegisterType(&secp256k1fx.Credential{}), - m.RegisterCodec(CodecVersion, c), - ) - if errs.Errored() { - panic(errs.Err) - } - return m -} - -func NewContext(tb testing.TB) *snow.Context { - ctx := snow.DefaultContextTest() - ctx.NetworkID = networkID - ctx.ChainID = chainID - avaxAssetID, err := ids.FromString("2XGxUr7VF7j1iwUp2aiGe4b6Ue2yyNghNS1SuNTNmZ77dPpXFZ") - if err != nil { - tb.Fatal(err) - } - ctx.AVAXAssetID = avaxAssetID - ctx.XChainID = ids.Empty.Prefix(0) - aliaser := ctx.BCLookup.(ids.Aliaser) - - errs := wrappers.Errs{} - errs.Add( - aliaser.Alias(chainID, "X"), - aliaser.Alias(chainID, chainID.String()), - aliaser.Alias(platformChainID, "P"), - aliaser.Alias(platformChainID, platformChainID.String()), - ) - if errs.Errored() { - tb.Fatal(errs.Err) - } - return ctx -} - -func TestTxNil(t *testing.T) { - ctx := NewContext(t) - c := linearcodec.NewDefault() - m := codec.NewDefaultManager() - if err := m.RegisterCodec(CodecVersion, c); err != nil { - t.Fatal(err) - } - - tx := (*Tx)(nil) - if err := tx.SyntacticVerify(ctx, m, ids.Empty, 0, 0, 1); err == nil { - t.Fatalf("Should have erred due to nil tx") - } -} - -func TestTxEmpty(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - tx := &Tx{} - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { - t.Fatalf("Should have erred due to nil tx") - } -} - -func TestTxInvalidCredential(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &Tx{ - Unsigned: &BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{{ - UTXOID: avax.UTXOID{ - TxID: ids.Empty, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 20 * units.KiloAvax, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }}, - }}, - Creds: []*fxs.FxCredential{{Verifiable: &avax.TestVerifiable{Err: errors.New("")}}}, - } - tx.Initialize(nil, nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { - t.Fatalf("Tx should have failed due to an invalid credential") - } -} - -func TestTxInvalidUnsignedTx(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &Tx{ - Unsigned: &BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{ - { - UTXOID: avax.UTXOID{ - TxID: ids.Empty, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 20 * units.KiloAvax, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }, - { - UTXOID: avax.UTXOID{ - TxID: ids.Empty, - OutputIndex: 0, - }, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 20 * units.KiloAvax, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }, - }, - }}, - Creds: []*fxs.FxCredential{ - {Verifiable: &avax.TestVerifiable{}}, - {Verifiable: &avax.TestVerifiable{}}, - }, - } - tx.Initialize(nil, nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { - t.Fatalf("Tx should have failed due to an invalid unsigned tx") - } -} - -func TestTxInvalidNumberOfCredentials(t *testing.T) { - ctx := NewContext(t) - c := setupCodec() - - tx := &Tx{ - Unsigned: &BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, - BlockchainID: chainID, - Ins: []*avax.TransferableInput{ - { - UTXOID: avax.UTXOID{TxID: ids.Empty, OutputIndex: 0}, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 20 * units.KiloAvax, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }, - { - UTXOID: avax.UTXOID{TxID: ids.Empty, OutputIndex: 1}, - Asset: avax.Asset{ID: assetID}, - In: &secp256k1fx.TransferInput{ - Amt: 20 * units.KiloAvax, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, - }, - }, - }, - }, - }}, - Creds: []*fxs.FxCredential{{Verifiable: &avax.TestVerifiable{}}}, - } - tx.Initialize(nil, nil) - - if err := tx.SyntacticVerify(ctx, c, ids.Empty, 0, 0, 1); err == nil { - t.Fatalf("Tx should have failed due to an invalid number of credentials") - } -} diff --git a/avalanchego/vms/avm/txs/visitor.go b/avalanchego/vms/avm/txs/visitor.go index 6a11c745..8de00c1b 100644 --- a/avalanchego/vms/avm/txs/visitor.go +++ b/avalanchego/vms/avm/txs/visitor.go @@ -1,11 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import "github.com/ava-labs/avalanchego/vms/components/avax" -var _ Visitor = &utxoGetter{} +var _ Visitor = (*utxoGetter)(nil) // Allow vm to execute custom logic against the underlying transaction types. type Visitor interface { diff --git a/avalanchego/vms/avm/unique_tx.go b/avalanchego/vms/avm/unique_tx.go index a333edef..6a57a87d 100644 --- a/avalanchego/vms/avm/unique_tx.go +++ b/avalanchego/vms/avm/unique_tx.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm import ( + "context" "errors" "fmt" @@ -13,21 +14,21 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/avm/txs/executor" "github.com/ava-labs/avalanchego/vms/components/avax" ) var ( - errAssetIDMismatch = errors.New("asset IDs in the input don't match the utxo") - errWrongAssetID = errors.New("asset ID must be AVAX in the atomic tx") - errMissingUTXO = errors.New("missing utxo") - errUnknownTx = errors.New("transaction is unknown") - errRejectedTx = errors.New("transaction is rejected") + errMissingUTXO = errors.New("missing utxo") + errUnknownTx = errors.New("transaction is unknown") + errRejectedTx = errors.New("transaction is rejected") ) var ( - _ snowstorm.Tx = &UniqueTx{} - _ cache.Evictable = &UniqueTx{} + _ snowstorm.Tx = (*UniqueTx)(nil) + _ cache.Evictable[ids.ID] = (*UniqueTx)(nil) ) // UniqueTx provides a de-duplication service for txs. This only provides a @@ -54,7 +55,7 @@ type TxCachedState struct { } func (tx *UniqueTx) refresh() { - tx.vm.numTxRefreshes.Inc() + tx.vm.metrics.IncTxRefreshes() if tx.TxCachedState == nil { tx.TxCachedState = &TxCachedState{} @@ -65,7 +66,7 @@ func (tx *UniqueTx) refresh() { unique := tx.vm.DeduplicateTx(tx) prevTx := tx.Tx if unique == tx { - tx.vm.numTxRefreshMisses.Inc() + tx.vm.metrics.IncTxRefreshMisses() // If no one was in the cache, make sure that there wasn't an // intermediate object whose state I must reflect @@ -74,7 +75,7 @@ func (tx *UniqueTx) refresh() { } tx.unique = true } else { - tx.vm.numTxRefreshHits.Inc() + tx.vm.metrics.IncTxRefreshHits() // If someone is in the cache, they must be up to date @@ -103,114 +104,75 @@ func (tx *UniqueTx) Evict() { tx.deps = nil } -func (tx *UniqueTx) setStatus(status choices.Status) error { +func (tx *UniqueTx) setStatus(status choices.Status) { tx.refresh() - if tx.status == status { - return nil + if tx.status != status { + tx.status = status + tx.vm.state.AddStatus(tx.ID(), status) } - tx.status = status - return tx.vm.state.PutStatus(tx.ID(), status) } // ID returns the wrapped txID -func (tx *UniqueTx) ID() ids.ID { return tx.txID } -func (tx *UniqueTx) Key() interface{} { return tx.txID } +func (tx *UniqueTx) ID() ids.ID { + return tx.txID +} + +func (tx *UniqueTx) Key() ids.ID { + return tx.txID +} // Accept is called when the transaction was finalized as accepted by consensus -func (tx *UniqueTx) Accept() error { +func (tx *UniqueTx) Accept(context.Context) error { if s := tx.Status(); s != choices.Processing { return fmt.Errorf("transaction has invalid status: %s", s) } - txID := tx.ID() - defer tx.vm.db.Abort() - - // Fetch the input UTXOs - inputUTXOIDs := tx.InputUTXOs() - inputUTXOs := make([]*avax.UTXO, 0, len(inputUTXOIDs)) - for _, utxoID := range inputUTXOIDs { - // Don't bother fetching the input UTXO if its symbolic - if utxoID.Symbolic() { - continue - } - - utxo, err := tx.vm.getUTXO(utxoID) - if err != nil { - // should never happen because the UTXO was previously verified to - // exist - return fmt.Errorf("error finding UTXO %s: %w", utxoID, err) - } - inputUTXOs = append(inputUTXOs, utxo) - } - - outputUTXOs := tx.UTXOs() - // index input and output UTXOs - if err := tx.vm.addressTxsIndexer.Accept(tx.ID(), inputUTXOs, outputUTXOs); err != nil { - return fmt.Errorf("error indexing tx: %w", err) + if err := tx.vm.onAccept(tx.Tx); err != nil { + return err } - // Remove spent utxos - for _, utxo := range inputUTXOIDs { - if utxo.Symbolic() { - // If the UTXO is symbolic, it can't be spent - continue - } - utxoID := utxo.InputID() - if err := tx.vm.state.DeleteUTXO(utxoID); err != nil { - return fmt.Errorf("couldn't delete UTXO %s: %w", utxoID, err) - } + executor := &executor.Executor{ + Codec: tx.vm.txBackend.Codec, + State: tx.vm.state, + Tx: tx.Tx, } - // Add new utxos - for _, utxo := range outputUTXOs { - if err := tx.vm.state.PutUTXO(utxo); err != nil { - return fmt.Errorf("couldn't put UTXO %s: %w", utxo.InputID(), err) - } + err := tx.Tx.Unsigned.Visit(executor) + if err != nil { + return fmt.Errorf("error staging accepted state changes: %w", err) } - if err := tx.setStatus(choices.Accepted); err != nil { - return fmt.Errorf("couldn't set status of tx %s: %w", txID, err) - } + tx.setStatus(choices.Accepted) - commitBatch, err := tx.vm.db.CommitBatch() + commitBatch, err := tx.vm.state.CommitBatch() if err != nil { + txID := tx.ID() return fmt.Errorf("couldn't create commitBatch while processing tx %s: %w", txID, err) } - err = tx.Tx.Unsigned.Visit(&executeTx{ - tx: tx.Tx, - batch: commitBatch, - sharedMemory: tx.vm.ctx.SharedMemory, - parser: tx.vm.parser, - }) + defer tx.vm.state.Abort() + err = tx.vm.ctx.SharedMemory.Apply( + executor.AtomicRequests, + commitBatch, + ) if err != nil { - return fmt.Errorf("ExecuteWithSideEffects erred while processing tx %s: %w", txID, err) + txID := tx.ID() + return fmt.Errorf("error committing accepted state changes while processing tx %s: %w", txID, err) } - tx.vm.pubsub.Publish(NewPubSubFilterer(tx.Tx)) - tx.vm.walletService.decided(txID) - tx.deps = nil // Needed to prevent a memory leak - return nil + return tx.vm.metrics.MarkTxAccepted(tx.Tx) } // Reject is called when the transaction was finalized as rejected by consensus -func (tx *UniqueTx) Reject() error { - defer tx.vm.db.Abort() - - if err := tx.setStatus(choices.Rejected); err != nil { - tx.vm.ctx.Log.Error("failed to reject tx", - zap.Stringer("txID", tx.txID), - zap.Error(err), - ) - return err - } +func (tx *UniqueTx) Reject(context.Context) error { + tx.setStatus(choices.Rejected) txID := tx.ID() tx.vm.ctx.Log.Debug("rejecting tx", zap.Stringer("txID", txID), ) - if err := tx.vm.db.Commit(); err != nil { + if err := tx.vm.state.Commit(); err != nil { tx.vm.ctx.Log.Error("failed to commit reject", zap.Stringer("txID", tx.txID), zap.Error(err), @@ -221,7 +183,6 @@ func (tx *UniqueTx) Reject() error { tx.vm.walletService.decided(txID) tx.deps = nil // Needed to prevent a memory leak - return nil } @@ -238,7 +199,7 @@ func (tx *UniqueTx) Dependencies() ([]snowstorm.Tx, error) { return tx.deps, nil } - txIDs := ids.Set{} + txIDs := set.Set[ids.ID]{} for _, in := range tx.InputUTXOs() { if in.Symbolic() { continue @@ -283,12 +244,12 @@ func (tx *UniqueTx) InputIDs() []ids.ID { } // Whitelist is not supported by this transaction type, so [false] is returned. -func (tx *UniqueTx) HasWhitelist() bool { +func (*UniqueTx) HasWhitelist() bool { return false } // Whitelist is not supported by this transaction type, so [false] is returned. -func (tx *UniqueTx) Whitelist() (ids.Set, error) { +func (*UniqueTx) Whitelist(context.Context) (set.Set[ids.ID], error) { return nil, nil } @@ -332,7 +293,7 @@ func (tx *UniqueTx) verifyWithoutCacheWrites() error { } // Verify the validity of this transaction -func (tx *UniqueTx) Verify() error { +func (tx *UniqueTx) Verify(context.Context) error { if err := tx.verifyWithoutCacheWrites(); err != nil { return err } @@ -354,29 +315,26 @@ func (tx *UniqueTx) SyntacticVerify() error { } tx.verifiedTx = true - tx.validity = tx.Tx.SyntacticVerify( - tx.vm.ctx, - tx.vm.parser.Codec(), - tx.vm.feeAssetID, - tx.vm.TxFee, - tx.vm.CreateAssetTxFee, - len(tx.vm.fxs), - ) + tx.validity = tx.Tx.Unsigned.Visit(&executor.SyntacticVerifier{ + Backend: tx.vm.txBackend, + Tx: tx.Tx, + }) return tx.validity } // SemanticVerify the validity of this transaction func (tx *UniqueTx) SemanticVerify() error { - // SyntacticVerify sets the error on validity and is checked in the next - // statement - _ = tx.SyntacticVerify() + if err := tx.SyntacticVerify(); err != nil { + return err + } if tx.validity != nil || tx.verifiedState { return tx.validity } - return tx.Unsigned.Visit(&txSemanticVerify{ - tx: tx.Tx, - vm: tx.vm, + return tx.Unsigned.Visit(&executor.SemanticVerifier{ + Backend: tx.vm.txBackend, + State: tx.vm.dagState, + Tx: tx.Tx, }) } diff --git a/avalanchego/vms/avm/utxo/spender.go b/avalanchego/vms/avm/utxo/spender.go new file mode 100644 index 00000000..ed57549d --- /dev/null +++ b/avalanchego/vms/avm/utxo/spender.go @@ -0,0 +1,440 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package utxo + +import ( + "errors" + "fmt" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/nftfx" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" +) + +var ( + errSpendOverflow = errors.New("spent amount overflows uint64") + errInsufficientFunds = errors.New("insufficient funds") + errAddressesCantMintAsset = errors.New("provided addresses don't have the authority to mint the provided asset") +) + +type Spender interface { + // Spend the provided amount while deducting the provided fee. + // Arguments: + // - [utxos] contains assets ID and amount to be spend for each assestID + // - [kc] are the owners of the funds + // - [amounts] is the amount of funds that are available to be spent for each assetID + // Returns: + // - [amountsSpent] the amount of funds that are spent + // - [inputs] the inputs that should be consumed to fund the outputs + // - [signers] the proof of ownership of the funds being moved + Spend( + utxos []*avax.UTXO, + kc *secp256k1fx.Keychain, + amounts map[ids.ID]uint64, + ) ( + map[ids.ID]uint64, // amountsSpent + []*avax.TransferableInput, // inputs + [][]*secp256k1.PrivateKey, // signers + error, + ) + + SpendNFT( + utxos []*avax.UTXO, + kc *secp256k1fx.Keychain, + assetID ids.ID, + groupID uint32, + to ids.ShortID, + ) ( + []*txs.Operation, + [][]*secp256k1.PrivateKey, + error, + ) + + SpendAll( + utxos []*avax.UTXO, + kc *secp256k1fx.Keychain, + ) ( + map[ids.ID]uint64, + []*avax.TransferableInput, + [][]*secp256k1.PrivateKey, + error, + ) + + Mint( + utxos []*avax.UTXO, + kc *secp256k1fx.Keychain, + amounts map[ids.ID]uint64, + to ids.ShortID, + ) ( + []*txs.Operation, + [][]*secp256k1.PrivateKey, + error, + ) + + MintNFT( + utxos []*avax.UTXO, + kc *secp256k1fx.Keychain, + assetID ids.ID, + payload []byte, + to ids.ShortID, + ) ( + []*txs.Operation, + [][]*secp256k1.PrivateKey, + error, + ) +} + +func NewSpender( + clk *mockable.Clock, + codec codec.Manager, +) Spender { + return &spender{ + clock: clk, + codec: codec, + } +} + +type spender struct { + clock *mockable.Clock + codec codec.Manager +} + +func (s *spender) Spend( + utxos []*avax.UTXO, + kc *secp256k1fx.Keychain, + amounts map[ids.ID]uint64, +) ( + map[ids.ID]uint64, // amountsSpent + []*avax.TransferableInput, // inputs + [][]*secp256k1.PrivateKey, // signers + error, +) { + amountsSpent := make(map[ids.ID]uint64, len(amounts)) + time := s.clock.Unix() + + ins := []*avax.TransferableInput{} + keys := [][]*secp256k1.PrivateKey{} + for _, utxo := range utxos { + assetID := utxo.AssetID() + amount := amounts[assetID] + amountSpent := amountsSpent[assetID] + + if amountSpent >= amount { + // we already have enough inputs allocated to this asset + continue + } + + inputIntf, signers, err := kc.Spend(utxo.Out, time) + if err != nil { + // this utxo can't be spent with the current keys right now + continue + } + input, ok := inputIntf.(avax.TransferableIn) + if !ok { + // this input doesn't have an amount, so I don't care about it here + continue + } + newAmountSpent, err := math.Add64(amountSpent, input.Amount()) + if err != nil { + // there was an error calculating the consumed amount, just error + return nil, nil, nil, errSpendOverflow + } + amountsSpent[assetID] = newAmountSpent + + // add the new input to the array + ins = append(ins, &avax.TransferableInput{ + UTXOID: utxo.UTXOID, + Asset: avax.Asset{ID: assetID}, + In: input, + }) + // add the required keys to the array + keys = append(keys, signers) + } + + for asset, amount := range amounts { + if amountsSpent[asset] < amount { + return nil, nil, nil, fmt.Errorf("want to spend %d of asset %s but only have %d", + amount, + asset, + amountsSpent[asset], + ) + } + } + + avax.SortTransferableInputsWithSigners(ins, keys) + return amountsSpent, ins, keys, nil +} + +func (s *spender) SpendNFT( + utxos []*avax.UTXO, + kc *secp256k1fx.Keychain, + assetID ids.ID, + groupID uint32, + to ids.ShortID, +) ( + []*txs.Operation, + [][]*secp256k1.PrivateKey, + error, +) { + time := s.clock.Unix() + + ops := []*txs.Operation{} + keys := [][]*secp256k1.PrivateKey{} + + for _, utxo := range utxos { + // makes sure that the variable isn't overwritten with the next iteration + utxo := utxo + + if len(ops) > 0 { + // we have already been able to create the operation needed + break + } + + if utxo.AssetID() != assetID { + // wrong asset ID + continue + } + out, ok := utxo.Out.(*nftfx.TransferOutput) + if !ok { + // wrong output type + continue + } + if out.GroupID != groupID { + // wrong group id + continue + } + indices, signers, ok := kc.Match(&out.OutputOwners, time) + if !ok { + // unable to spend the output + continue + } + + // add the new operation to the array + ops = append(ops, &txs.Operation{ + Asset: utxo.Asset, + UTXOIDs: []*avax.UTXOID{&utxo.UTXOID}, + Op: &nftfx.TransferOperation{ + Input: secp256k1fx.Input{ + SigIndices: indices, + }, + Output: nftfx.TransferOutput{ + GroupID: out.GroupID, + Payload: out.Payload, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{to}, + }, + }, + }, + }) + // add the required keys to the array + keys = append(keys, signers) + } + + if len(ops) == 0 { + return nil, nil, errInsufficientFunds + } + + txs.SortOperationsWithSigners(ops, keys, s.codec) + return ops, keys, nil +} + +func (s *spender) SpendAll( + utxos []*avax.UTXO, + kc *secp256k1fx.Keychain, +) ( + map[ids.ID]uint64, + []*avax.TransferableInput, + [][]*secp256k1.PrivateKey, + error, +) { + amountsSpent := make(map[ids.ID]uint64) + time := s.clock.Unix() + + ins := []*avax.TransferableInput{} + keys := [][]*secp256k1.PrivateKey{} + for _, utxo := range utxos { + assetID := utxo.AssetID() + amountSpent := amountsSpent[assetID] + + inputIntf, signers, err := kc.Spend(utxo.Out, time) + if err != nil { + // this utxo can't be spent with the current keys right now + continue + } + input, ok := inputIntf.(avax.TransferableIn) + if !ok { + // this input doesn't have an amount, so I don't care about it here + continue + } + newAmountSpent, err := math.Add64(amountSpent, input.Amount()) + if err != nil { + // there was an error calculating the consumed amount, just error + return nil, nil, nil, errSpendOverflow + } + amountsSpent[assetID] = newAmountSpent + + // add the new input to the array + ins = append(ins, &avax.TransferableInput{ + UTXOID: utxo.UTXOID, + Asset: avax.Asset{ID: assetID}, + In: input, + }) + // add the required keys to the array + keys = append(keys, signers) + } + + avax.SortTransferableInputsWithSigners(ins, keys) + return amountsSpent, ins, keys, nil +} + +func (s *spender) Mint( + utxos []*avax.UTXO, + kc *secp256k1fx.Keychain, + amounts map[ids.ID]uint64, + to ids.ShortID, +) ( + []*txs.Operation, + [][]*secp256k1.PrivateKey, + error, +) { + time := s.clock.Unix() + + ops := []*txs.Operation{} + keys := [][]*secp256k1.PrivateKey{} + + for _, utxo := range utxos { + // makes sure that the variable isn't overwritten with the next iteration + utxo := utxo + + assetID := utxo.AssetID() + amount := amounts[assetID] + if amount == 0 { + continue + } + + out, ok := utxo.Out.(*secp256k1fx.MintOutput) + if !ok { + continue + } + + inIntf, signers, err := kc.Spend(out, time) + if err != nil { + continue + } + + in, ok := inIntf.(*secp256k1fx.Input) + if !ok { + continue + } + + // add the operation to the array + ops = append(ops, &txs.Operation{ + Asset: utxo.Asset, + UTXOIDs: []*avax.UTXOID{&utxo.UTXOID}, + Op: &secp256k1fx.MintOperation{ + MintInput: *in, + MintOutput: *out, + TransferOutput: secp256k1fx.TransferOutput{ + Amt: amount, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{to}, + }, + }, + }, + }) + // add the required keys to the array + keys = append(keys, signers) + + // remove the asset from the required amounts to mint + delete(amounts, assetID) + } + + for _, amount := range amounts { + if amount > 0 { + return nil, nil, errAddressesCantMintAsset + } + } + + txs.SortOperationsWithSigners(ops, keys, s.codec) + return ops, keys, nil +} + +func (s *spender) MintNFT( + utxos []*avax.UTXO, + kc *secp256k1fx.Keychain, + assetID ids.ID, + payload []byte, + to ids.ShortID, +) ( + []*txs.Operation, + [][]*secp256k1.PrivateKey, + error, +) { + time := s.clock.Unix() + + ops := []*txs.Operation{} + keys := [][]*secp256k1.PrivateKey{} + + for _, utxo := range utxos { + // makes sure that the variable isn't overwritten with the next iteration + utxo := utxo + + if len(ops) > 0 { + // we have already been able to create the operation needed + break + } + + if utxo.AssetID() != assetID { + // wrong asset id + continue + } + out, ok := utxo.Out.(*nftfx.MintOutput) + if !ok { + // wrong output type + continue + } + + indices, signers, ok := kc.Match(&out.OutputOwners, time) + if !ok { + // unable to spend the output + continue + } + + // add the operation to the array + ops = append(ops, &txs.Operation{ + Asset: avax.Asset{ID: assetID}, + UTXOIDs: []*avax.UTXOID{ + &utxo.UTXOID, + }, + Op: &nftfx.MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: indices, + }, + GroupID: out.GroupID, + Payload: payload, + Outputs: []*secp256k1fx.OutputOwners{{ + Threshold: 1, + Addrs: []ids.ShortID{to}, + }}, + }, + }) + // add the required keys to the array + keys = append(keys, signers) + } + + if len(ops) == 0 { + return nil, nil, errAddressesCantMintAsset + } + + txs.SortOperationsWithSigners(ops, keys, s.codec) + return ops, keys, nil +} diff --git a/avalanchego/vms/avm/vm.go b/avalanchego/vms/avm/vm.go index 87f7ff35..f3cab9ce 100644 --- a/avalanchego/vms/avm/vm.go +++ b/avalanchego/vms/avm/vm.go @@ -1,10 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm import ( - "container/list" + "context" "errors" "fmt" "reflect" @@ -26,25 +26,34 @@ import ( "github.com/ava-labs/avalanchego/pubsub" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/crypto" "github.com/ava-labs/avalanchego/utils/json" + "github.com/ava-labs/avalanchego/utils/linkedhashmap" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/vms/avm/blocks" + "github.com/ava-labs/avalanchego/vms/avm/config" + "github.com/ava-labs/avalanchego/vms/avm/metrics" + "github.com/ava-labs/avalanchego/vms/avm/network" "github.com/ava-labs/avalanchego/vms/avm/states" "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/avm/txs/mempool" + "github.com/ava-labs/avalanchego/vms/avm/utxo" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/index" "github.com/ava-labs/avalanchego/vms/components/keystore" - "github.com/ava-labs/avalanchego/vms/components/verify" - "github.com/ava-labs/avalanchego/vms/nftfx" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - safemath "github.com/ava-labs/avalanchego/utils/math" + blockbuilder "github.com/ava-labs/avalanchego/vms/avm/blocks/builder" + blockexecutor "github.com/ava-labs/avalanchego/vms/avm/blocks/executor" extensions "github.com/ava-labs/avalanchego/vms/avm/fxs" + txexecutor "github.com/ava-labs/avalanchego/vms/avm/txs/executor" ) const ( @@ -59,17 +68,21 @@ var ( errUnknownFx = errors.New("unknown feature extension") errGenesisAssetMustHaveState = errors.New("genesis asset must have non-empty state") errBootstrapping = errors.New("chain is currently bootstrapping") - errInsufficientFunds = errors.New("insufficient funds") - _ vertex.DAGVM = &VM{} + _ vertex.LinearizableVMWithEngine = (*VM)(nil) ) type VM struct { - Factory - metrics + network.Atomic + + config.Config + + metrics metrics.Metrics + avax.AddressManager avax.AtomicUTXOManager ids.Aliaser + utxo.Spender // Contains information of where this VM is executing ctx *snow.Context @@ -77,10 +90,14 @@ type VM struct { // Used to check local time clock mockable.Clock - parser txs.Parser + registerer prometheus.Registerer + + parser blocks.Parser pubsub *pubsub.Server + appSender common.AppSender + // State management state states.State @@ -91,7 +108,7 @@ type VM struct { feeAssetID ids.ID // Asset ID --> Bit set with fx IDs the asset supports - assetToFxCache *cache.LRU + assetToFxCache *cache.LRU[ids.ID, set.Bits64] // Transaction issuing timer *timer.Timer @@ -109,20 +126,28 @@ type VM struct { addressTxsIndexer index.AddressTxsIndexer - uniqueTxs cache.Deduplicator + uniqueTxs cache.Deduplicator[ids.ID, *UniqueTx] + + txBackend *txexecutor.Backend + dagState *dagState + + // These values are only initialized after the chain has been linearized. + blockbuilder.Builder + chainManager blockexecutor.Manager + network network.Network } -func (vm *VM) Connected(nodeID ids.NodeID, nodeVersion *version.Application) error { +func (*VM) Connected(context.Context, ids.NodeID, *version.Application) error { return nil } -func (vm *VM) Disconnected(nodeID ids.NodeID) error { +func (*VM) Disconnected(context.Context, ids.NodeID) error { return nil } /* ****************************************************************************** - ******************************** Avalanche API ******************************* + ********************************* Common VM ********************************** ****************************************************************************** */ @@ -132,15 +157,19 @@ type Config struct { } func (vm *VM) Initialize( + _ context.Context, ctx *snow.Context, dbManager manager.Manager, genesisBytes []byte, - upgradeBytes []byte, + _ []byte, configBytes []byte, toEngine chan<- common.Message, fxs []*common.Fx, - _ common.AppSender, + appSender common.AppSender, ) error { + noopMessageHandler := common.NewNoOpAppHandler(ctx.Log) + vm.Atomic = network.NewAtomic(noopMessageHandler) + avmConfig := Config{} if len(configBytes) > 0 { if err := stdjson.Unmarshal(configBytes, &avmConfig); err != nil { @@ -155,22 +184,27 @@ func (vm *VM) Initialize( if err := ctx.Metrics.Register(registerer); err != nil { return err } + vm.registerer = registerer - err := vm.metrics.Initialize("", registerer) + // Initialize metrics as soon as possible + var err error + vm.metrics, err = metrics.New("", registerer) if err != nil { - return err + return fmt.Errorf("failed to initialize metrics: %w", err) } + vm.AddressManager = avax.NewAddressManager(ctx) vm.Aliaser = ids.NewAliaser() db := dbManager.Current().Database vm.ctx = ctx vm.toEngine = toEngine + vm.appSender = appSender vm.baseDB = db vm.db = versiondb.New(db) - vm.assetToFxCache = &cache.LRU{Size: assetToFxCacheSize} + vm.assetToFxCache = &cache.LRU[ids.ID, set.Bits64]{Size: assetToFxCacheSize} - vm.pubsub = pubsub.New(ctx.NetworkID, ctx.Log) + vm.pubsub = pubsub.New(ctx.Log) typedFxs := make([]extensions.Fx, len(fxs)) vm.fxs = make([]*extensions.ParsedFx, len(fxs)) @@ -190,7 +224,7 @@ func (vm *VM) Initialize( } vm.typeToFxIndex = map[reflect.Type]int{} - vm.parser, err = txs.NewCustomParser( + vm.parser, err = blocks.NewCustomParser( vm.typeToFxIndex, &vm.clock, ctx.Log, @@ -200,9 +234,11 @@ func (vm *VM) Initialize( return err } - vm.AtomicUTXOManager = avax.NewAtomicUTXOManager(ctx.SharedMemory, vm.parser.Codec()) + codec := vm.parser.Codec() + vm.AtomicUTXOManager = avax.NewAtomicUTXOManager(ctx.SharedMemory, codec) + vm.Spender = utxo.NewSpender(&vm.clock, codec) - state, err := states.New(vm.db, vm.parser, registerer) + state, err := states.New(vm.db, vm.parser, vm.registerer) if err != nil { return err } @@ -222,17 +258,16 @@ func (vm *VM) Initialize( go ctx.Log.RecoverAndPanic(vm.timer.Dispatch) vm.batchTimeout = batchTimeout - vm.uniqueTxs = &cache.EvictableLRU{ + vm.uniqueTxs = &cache.EvictableLRU[ids.ID, *UniqueTx]{ Size: txDeduplicatorSize, } vm.walletService.vm = vm - vm.walletService.pendingTxMap = make(map[ids.ID]*list.Element) - vm.walletService.pendingTxOrdering = list.New() + vm.walletService.pendingTxs = linkedhashmap.New[ids.ID, *txs.Tx]() // use no op impl when disabled in config if avmConfig.IndexTransactions { - vm.ctx.Log.Info("address transaction indexing is enabled") - vm.addressTxsIndexer, err = index.NewIndexer(vm.db, vm.ctx.Log, "", registerer, avmConfig.IndexAllowIncomplete) + vm.ctx.Log.Warn("deprecated address transaction indexing is enabled") + vm.addressTxsIndexer, err = index.NewIndexer(vm.db, vm.ctx.Log, "", vm.registerer, avmConfig.IndexAllowIncomplete) if err != nil { return fmt.Errorf("failed to initialize address transaction indexer: %w", err) } @@ -243,11 +278,27 @@ func (vm *VM) Initialize( return fmt.Errorf("failed to initialize disabled indexer: %w", err) } } - return vm.db.Commit() + + vm.txBackend = &txexecutor.Backend{ + Ctx: ctx, + Config: &vm.Config, + Fxs: vm.fxs, + TypeToFxIndex: vm.typeToFxIndex, + Codec: vm.parser.Codec(), + FeeAssetID: vm.feeAssetID, + Bootstrapped: false, + } + vm.dagState = &dagState{ + Chain: vm.state, + vm: vm, + } + + return vm.state.Commit() } // onBootstrapStarted is called by the consensus engine when it starts bootstrapping this chain func (vm *VM) onBootstrapStarted() error { + vm.txBackend.Bootstrapped = false for _, fx := range vm.fxs { if err := fx.Fx.Bootstrapping(); err != nil { return err @@ -257,16 +308,31 @@ func (vm *VM) onBootstrapStarted() error { } func (vm *VM) onNormalOperationsStarted() error { + vm.txBackend.Bootstrapped = true for _, fx := range vm.fxs { if err := fx.Fx.Bootstrapped(); err != nil { return err } } + + txID, err := ids.FromString("2JPwx3rbUy877CWYhtXpfPVS5tD8KfnbiF5pxMRu6jCaq5dnME") + if err != nil { + return err + } + utxoID := avax.UTXOID{ + TxID: txID, + OutputIndex: 192, + } + vm.state.DeleteUTXO(utxoID.InputID()) + if err := vm.state.Commit(); err != nil { + return err + } + vm.bootstrapped = true return nil } -func (vm *VM) SetState(state snow.State) error { +func (vm *VM) SetState(_ context.Context, state snow.State) error { switch state { case snow.Bootstrapping: return vm.onBootstrapStarted() @@ -277,7 +343,7 @@ func (vm *VM) SetState(state snow.State) error { } } -func (vm *VM) Shutdown() error { +func (vm *VM) Shutdown(context.Context) error { if vm.timer == nil { return nil } @@ -288,21 +354,26 @@ func (vm *VM) Shutdown() error { vm.timer.Stop() vm.ctx.Lock.Lock() - return vm.baseDB.Close() + errs := wrappers.Errs{} + errs.Add( + vm.state.Close(), + vm.baseDB.Close(), + ) + return errs.Err } -func (vm *VM) Version() (string, error) { +func (*VM) Version(context.Context) (string, error) { return version.Current.String(), nil } -func (vm *VM) CreateHandlers() (map[string]*common.HTTPHandler, error) { +func (vm *VM) CreateHandlers(context.Context) (map[string]*common.HTTPHandler, error) { codec := json.NewCodec() rpcServer := rpc.NewServer() rpcServer.RegisterCodec(codec, "application/json") rpcServer.RegisterCodec(codec, "application/json;charset=UTF-8") - rpcServer.RegisterInterceptFunc(vm.metrics.apiRequestMetric.InterceptRequest) - rpcServer.RegisterAfterFunc(vm.metrics.apiRequestMetric.AfterRequest) + rpcServer.RegisterInterceptFunc(vm.metrics.InterceptRequest) + rpcServer.RegisterAfterFunc(vm.metrics.AfterRequest) // name this service "avm" if err := rpcServer.RegisterService(&Service{vm: vm}, "avm"); err != nil { return nil, err @@ -311,8 +382,8 @@ func (vm *VM) CreateHandlers() (map[string]*common.HTTPHandler, error) { walletServer := rpc.NewServer() walletServer.RegisterCodec(codec, "application/json") walletServer.RegisterCodec(codec, "application/json;charset=UTF-8") - walletServer.RegisterInterceptFunc(vm.metrics.apiRequestMetric.InterceptRequest) - walletServer.RegisterAfterFunc(vm.metrics.apiRequestMetric.AfterRequest) + walletServer.RegisterInterceptFunc(vm.metrics.InterceptRequest) + walletServer.RegisterAfterFunc(vm.metrics.AfterRequest) // name this service "wallet" err := walletServer.RegisterService(&vm.walletService, "wallet") @@ -323,7 +394,7 @@ func (vm *VM) CreateHandlers() (map[string]*common.HTTPHandler, error) { }, err } -func (vm *VM) CreateStaticHandlers() (map[string]*common.HTTPHandler, error) { +func (*VM) CreateStaticHandlers(context.Context) (map[string]*common.HTTPHandler, error) { newServer := rpc.NewServer() codec := json.NewCodec() newServer.RegisterCodec(codec, "application/json") @@ -336,7 +407,85 @@ func (vm *VM) CreateStaticHandlers() (map[string]*common.HTTPHandler, error) { }, newServer.RegisterService(staticService, "avm") } -func (vm *VM) PendingTxs() []snowstorm.Tx { +/* + ****************************************************************************** + ********************************** Chain VM ********************************** + ****************************************************************************** + */ + +func (vm *VM) GetBlock(_ context.Context, blkID ids.ID) (snowman.Block, error) { + return vm.chainManager.GetBlock(blkID) +} + +func (vm *VM) ParseBlock(_ context.Context, blkBytes []byte) (snowman.Block, error) { + blk, err := vm.parser.ParseBlock(blkBytes) + if err != nil { + return nil, err + } + return vm.chainManager.NewBlock(blk), nil +} + +func (vm *VM) SetPreference(_ context.Context, blkID ids.ID) error { + vm.chainManager.SetPreference(blkID) + return nil +} + +func (vm *VM) LastAccepted(context.Context) (ids.ID, error) { + return vm.chainManager.LastAccepted(), nil +} + +/* + ****************************************************************************** + *********************************** DAG VM *********************************** + ****************************************************************************** + */ + +func (vm *VM) Linearize(_ context.Context, stopVertexID ids.ID, toEngine chan<- common.Message) error { + time := version.GetCortinaTime(vm.ctx.NetworkID) + err := vm.state.InitializeChainState(stopVertexID, time) + if err != nil { + return err + } + + mempool, err := mempool.New("mempool", vm.registerer, toEngine) + if err != nil { + return fmt.Errorf("failed to create mempool: %w", err) + } + + vm.chainManager = blockexecutor.NewManager( + mempool, + vm.metrics, + &chainState{ + State: vm.state, + }, + vm.txBackend, + &vm.clock, + vm.onAccept, + ) + + vm.Builder = blockbuilder.New( + vm.txBackend, + vm.chainManager, + &vm.clock, + mempool, + ) + + vm.network = network.New( + vm.ctx, + vm.parser, + vm.chainManager, + mempool, + vm.appSender, + ) + + // Note: It's important only to switch the networking stack after the full + // chainVM has been initialized. Traffic will immediately start being + // handled asynchronously. + vm.Atomic.Set(vm.network) + return nil +} + +func (vm *VM) PendingTxs(context.Context) []snowstorm.Tx { vm.timer.Cancel() txs := vm.txs @@ -344,11 +493,11 @@ func (vm *VM) PendingTxs() []snowstorm.Tx { return txs } -func (vm *VM) ParseTx(b []byte) (snowstorm.Tx, error) { +func (vm *VM) ParseTx(_ context.Context, b []byte) (snowstorm.Tx, error) { return vm.parseTx(b) } -func (vm *VM) GetTx(txID ids.ID) (snowstorm.Tx, error) { +func (vm *VM) GetTx(_ context.Context, txID ids.ID) (snowstorm.Tx, error) { tx := &UniqueTx{ vm: vm, txID: txID, @@ -372,6 +521,29 @@ func (vm *VM) IssueTx(b []byte) (ids.ID, error) { if !vm.bootstrapped { return ids.ID{}, errBootstrapping } + + // If the chain has been linearized, issue the tx to the network. + if vm.Builder != nil { + tx, err := vm.parser.ParseTx(b) + if err != nil { + vm.ctx.Log.Debug("failed to parse tx", + zap.Error(err), + ) + return ids.ID{}, err + } + + err = vm.network.IssueTx(context.TODO(), tx) + if err != nil { + vm.ctx.Log.Debug("failed to add tx to mempool", + zap.Error(err), + ) + return ids.ID{}, err + } + + return tx.ID(), nil + } + + // TODO: After the chain is linearized, remove the following code. tx, err := vm.parseTx(b) if err != nil { return ids.ID{}, err @@ -383,6 +555,7 @@ func (vm *VM) IssueTx(b []byte) (ids.ID, error) { return tx.ID(), nil } +// TODO: After the chain is linearized, remove this. func (vm *VM) issueStopVertex() error { select { case vm.toEngine <- common.StopVertex: @@ -437,10 +610,10 @@ func (vm *VM) initGenesis(genesisBytes []byte) error { return errGenesisAssetMustHaveState } - tx := txs.Tx{ + tx := &txs.Tx{ Unsigned: &genesisTx.CreateAssetTx, } - if err := vm.parser.InitializeGenesisTx(&tx); err != nil { + if err := vm.parser.InitializeGenesisTx(tx); err != nil { return err } @@ -450,9 +623,7 @@ func (vm *VM) initGenesis(genesisBytes []byte) error { } if !stateInitialized { - if err := vm.initState(tx); err != nil { - return err - } + vm.initState(tx) } if index == 0 { vm.ctx.Log.Info("fee asset is established", @@ -470,27 +641,20 @@ func (vm *VM) initGenesis(genesisBytes []byte) error { return nil } -func (vm *VM) initState(tx txs.Tx) error { +func (vm *VM) initState(tx *txs.Tx) { txID := tx.ID() vm.ctx.Log.Info("initializing genesis asset", zap.Stringer("txID", txID), ) - if err := vm.state.PutTx(txID, &tx); err != nil { - return err - } - if err := vm.state.PutStatus(txID, choices.Accepted); err != nil { - return err - } + vm.state.AddTx(tx) + vm.state.AddStatus(txID, choices.Accepted) for _, utxo := range tx.UTXOs() { - if err := vm.state.PutUTXO(utxo); err != nil { - return err - } + vm.state.AddUTXO(utxo) } - return nil } func (vm *VM) parseTx(bytes []byte) (*UniqueTx, error) { - rawTx, err := vm.parser.Parse(bytes) + rawTx, err := vm.parser.ParseTx(bytes) if err != nil { return nil, err } @@ -507,13 +671,9 @@ func (vm *VM) parseTx(bytes []byte) (*UniqueTx, error) { } if tx.Status() == choices.Unknown { - if err := vm.state.PutTx(tx.ID(), tx.Tx); err != nil { - return nil, err - } - if err := tx.setStatus(choices.Processing); err != nil { - return nil, err - } - return tx, vm.db.Commit() + vm.state.AddTx(tx.Tx) + tx.setStatus(choices.Processing) + return tx, vm.state.Commit() } return tx, nil @@ -529,130 +689,6 @@ func (vm *VM) issueTx(tx snowstorm.Tx) { } } -func (vm *VM) getUTXO(utxoID *avax.UTXOID) (*avax.UTXO, error) { - inputID := utxoID.InputID() - utxo, err := vm.state.GetUTXO(inputID) - if err == nil { - return utxo, nil - } - - inputTx, inputIndex := utxoID.InputSource() - parent := UniqueTx{ - vm: vm, - txID: inputTx, - } - - if err := parent.verifyWithoutCacheWrites(); err != nil { - return nil, errMissingUTXO - } else if status := parent.Status(); status.Decided() { - return nil, errMissingUTXO - } - - parentUTXOs := parent.UTXOs() - if uint32(len(parentUTXOs)) <= inputIndex || int(inputIndex) < 0 { - return nil, errInvalidUTXO - } - return parentUTXOs[int(inputIndex)], nil -} - -func (vm *VM) getFx(val interface{}) (int, error) { - valType := reflect.TypeOf(val) - fx, exists := vm.typeToFxIndex[valType] - if !exists { - return 0, errUnknownFx - } - return fx, nil -} - -func (vm *VM) verifyFxUsage(fxID int, assetID ids.ID) bool { - // Check cache to see whether this asset supports this fx - fxIDsIntf, assetInCache := vm.assetToFxCache.Get(assetID) - if assetInCache { - return fxIDsIntf.(ids.BitSet64).Contains(uint(fxID)) - } - // Caches doesn't say whether this asset support this fx. - // Get the tx that created the asset and check. - tx := &UniqueTx{ - vm: vm, - txID: assetID, - } - if status := tx.Status(); !status.Fetched() { - return false - } - createAssetTx, ok := tx.Unsigned.(*txs.CreateAssetTx) - if !ok { - // This transaction was not an asset creation tx - return false - } - fxIDs := ids.BitSet64(0) - for _, state := range createAssetTx.States { - if state.FxIndex == uint32(fxID) { - // Cache that this asset supports this fx - fxIDs.Add(uint(fxID)) - } - } - vm.assetToFxCache.Put(assetID, fxIDs) - return fxIDs.Contains(uint(fxID)) -} - -func (vm *VM) verifyTransferOfUTXO(utx txs.UnsignedTx, in *avax.TransferableInput, cred verify.Verifiable, utxo *avax.UTXO) error { - fxIndex, err := vm.getFx(cred) - if err != nil { - return err - } - fx := vm.fxs[fxIndex].Fx - - utxoAssetID := utxo.AssetID() - inAssetID := in.AssetID() - if utxoAssetID != inAssetID { - return errAssetIDMismatch - } - - if !vm.verifyFxUsage(fxIndex, inAssetID) { - return errIncompatibleFx - } - - return fx.VerifyTransfer(utx, in.In, cred, utxo.Out) -} - -func (vm *VM) verifyTransfer(tx txs.UnsignedTx, in *avax.TransferableInput, cred verify.Verifiable) error { - utxo, err := vm.getUTXO(&in.UTXOID) - if err != nil { - return err - } - return vm.verifyTransferOfUTXO(tx, in, cred, utxo) -} - -func (vm *VM) verifyOperation(tx *txs.OperationTx, op *txs.Operation, cred verify.Verifiable) error { - opAssetID := op.AssetID() - - numUTXOs := len(op.UTXOIDs) - utxos := make([]interface{}, numUTXOs) - for i, utxoID := range op.UTXOIDs { - utxo, err := vm.getUTXO(utxoID) - if err != nil { - return err - } - - utxoAssetID := utxo.AssetID() - if utxoAssetID != opAssetID { - return errAssetIDMismatch - } - utxos[i] = utxo.Out - } - - fxIndex, err := vm.getFx(op.Op) - if err != nil { - return err - } - fx := vm.fxs[fxIndex].Fx - - if !vm.verifyFxUsage(fxIndex, opAssetID) { - return errIncompatibleFx - } - return fx.VerifyOperation(tx, op.Op, cred, utxos) -} - // LoadUser returns: // 1) The UTXOs that reference one or more addresses controlled by the given user // 2) A keychain that contains this user's keys @@ -661,7 +697,7 @@ func (vm *VM) verifyOperation(tx *txs.OperationTx, op *txs.Operation, cred verif func (vm *VM) LoadUser( username string, password string, - addrsToUse ids.ShortSet, + addrsToUse set.Set[ids.ShortID], ) ( []*avax.UTXO, *secp256k1fx.Keychain, @@ -688,339 +724,6 @@ func (vm *VM) LoadUser( return utxos, kc, user.Close() } -func (vm *VM) Spend( - utxos []*avax.UTXO, - kc *secp256k1fx.Keychain, - amounts map[ids.ID]uint64, -) ( - map[ids.ID]uint64, - []*avax.TransferableInput, - [][]*crypto.PrivateKeySECP256K1R, - error, -) { - amountsSpent := make(map[ids.ID]uint64, len(amounts)) - time := vm.clock.Unix() - - ins := []*avax.TransferableInput{} - keys := [][]*crypto.PrivateKeySECP256K1R{} - for _, utxo := range utxos { - assetID := utxo.AssetID() - amount := amounts[assetID] - amountSpent := amountsSpent[assetID] - - if amountSpent >= amount { - // we already have enough inputs allocated to this asset - continue - } - - inputIntf, signers, err := kc.Spend(utxo.Out, time) - if err != nil { - // this utxo can't be spent with the current keys right now - continue - } - input, ok := inputIntf.(avax.TransferableIn) - if !ok { - // this input doesn't have an amount, so I don't care about it here - continue - } - newAmountSpent, err := safemath.Add64(amountSpent, input.Amount()) - if err != nil { - // there was an error calculating the consumed amount, just error - return nil, nil, nil, errSpendOverflow - } - amountsSpent[assetID] = newAmountSpent - - // add the new input to the array - ins = append(ins, &avax.TransferableInput{ - UTXOID: utxo.UTXOID, - Asset: avax.Asset{ID: assetID}, - In: input, - }) - // add the required keys to the array - keys = append(keys, signers) - } - - for asset, amount := range amounts { - if amountsSpent[asset] < amount { - return nil, nil, nil, fmt.Errorf("want to spend %d of asset %s but only have %d", - amount, - asset, - amountsSpent[asset], - ) - } - } - - avax.SortTransferableInputsWithSigners(ins, keys) - return amountsSpent, ins, keys, nil -} - -func (vm *VM) SpendNFT( - utxos []*avax.UTXO, - kc *secp256k1fx.Keychain, - assetID ids.ID, - groupID uint32, - to ids.ShortID, -) ( - []*txs.Operation, - [][]*crypto.PrivateKeySECP256K1R, - error, -) { - time := vm.clock.Unix() - - ops := []*txs.Operation{} - keys := [][]*crypto.PrivateKeySECP256K1R{} - - for _, utxo := range utxos { - // makes sure that the variable isn't overwritten with the next iteration - utxo := utxo - - if len(ops) > 0 { - // we have already been able to create the operation needed - break - } - - if utxo.AssetID() != assetID { - // wrong asset ID - continue - } - out, ok := utxo.Out.(*nftfx.TransferOutput) - if !ok { - // wrong output type - continue - } - if out.GroupID != groupID { - // wrong group id - continue - } - indices, signers, ok := kc.Match(&out.OutputOwners, time) - if !ok { - // unable to spend the output - continue - } - - // add the new operation to the array - ops = append(ops, &txs.Operation{ - Asset: utxo.Asset, - UTXOIDs: []*avax.UTXOID{&utxo.UTXOID}, - Op: &nftfx.TransferOperation{ - Input: secp256k1fx.Input{ - SigIndices: indices, - }, - Output: nftfx.TransferOutput{ - GroupID: out.GroupID, - Payload: out.Payload, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{to}, - }, - }, - }, - }) - // add the required keys to the array - keys = append(keys, signers) - } - - if len(ops) == 0 { - return nil, nil, errInsufficientFunds - } - - txs.SortOperationsWithSigners(ops, keys, vm.parser.Codec()) - return ops, keys, nil -} - -func (vm *VM) SpendAll( - utxos []*avax.UTXO, - kc *secp256k1fx.Keychain, -) ( - map[ids.ID]uint64, - []*avax.TransferableInput, - [][]*crypto.PrivateKeySECP256K1R, - error, -) { - amountsSpent := make(map[ids.ID]uint64) - time := vm.clock.Unix() - - ins := []*avax.TransferableInput{} - keys := [][]*crypto.PrivateKeySECP256K1R{} - for _, utxo := range utxos { - assetID := utxo.AssetID() - amountSpent := amountsSpent[assetID] - - inputIntf, signers, err := kc.Spend(utxo.Out, time) - if err != nil { - // this utxo can't be spent with the current keys right now - continue - } - input, ok := inputIntf.(avax.TransferableIn) - if !ok { - // this input doesn't have an amount, so I don't care about it here - continue - } - newAmountSpent, err := safemath.Add64(amountSpent, input.Amount()) - if err != nil { - // there was an error calculating the consumed amount, just error - return nil, nil, nil, errSpendOverflow - } - amountsSpent[assetID] = newAmountSpent - - // add the new input to the array - ins = append(ins, &avax.TransferableInput{ - UTXOID: utxo.UTXOID, - Asset: avax.Asset{ID: assetID}, - In: input, - }) - // add the required keys to the array - keys = append(keys, signers) - } - - avax.SortTransferableInputsWithSigners(ins, keys) - return amountsSpent, ins, keys, nil -} - -func (vm *VM) Mint( - utxos []*avax.UTXO, - kc *secp256k1fx.Keychain, - amounts map[ids.ID]uint64, - to ids.ShortID, -) ( - []*txs.Operation, - [][]*crypto.PrivateKeySECP256K1R, - error, -) { - time := vm.clock.Unix() - - ops := []*txs.Operation{} - keys := [][]*crypto.PrivateKeySECP256K1R{} - - for _, utxo := range utxos { - // makes sure that the variable isn't overwritten with the next iteration - utxo := utxo - - assetID := utxo.AssetID() - amount := amounts[assetID] - if amount == 0 { - continue - } - - out, ok := utxo.Out.(*secp256k1fx.MintOutput) - if !ok { - continue - } - - inIntf, signers, err := kc.Spend(out, time) - if err != nil { - continue - } - - in, ok := inIntf.(*secp256k1fx.Input) - if !ok { - continue - } - - // add the operation to the array - ops = append(ops, &txs.Operation{ - Asset: utxo.Asset, - UTXOIDs: []*avax.UTXOID{&utxo.UTXOID}, - Op: &secp256k1fx.MintOperation{ - MintInput: *in, - MintOutput: *out, - TransferOutput: secp256k1fx.TransferOutput{ - Amt: amount, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{to}, - }, - }, - }, - }) - // add the required keys to the array - keys = append(keys, signers) - - // remove the asset from the required amounts to mint - delete(amounts, assetID) - } - - for _, amount := range amounts { - if amount > 0 { - return nil, nil, errAddressesCantMintAsset - } - } - - txs.SortOperationsWithSigners(ops, keys, vm.parser.Codec()) - return ops, keys, nil -} - -func (vm *VM) MintNFT( - utxos []*avax.UTXO, - kc *secp256k1fx.Keychain, - assetID ids.ID, - payload []byte, - to ids.ShortID, -) ( - []*txs.Operation, - [][]*crypto.PrivateKeySECP256K1R, - error, -) { - time := vm.clock.Unix() - - ops := []*txs.Operation{} - keys := [][]*crypto.PrivateKeySECP256K1R{} - - for _, utxo := range utxos { - // makes sure that the variable isn't overwritten with the next iteration - utxo := utxo - - if len(ops) > 0 { - // we have already been able to create the operation needed - break - } - - if utxo.AssetID() != assetID { - // wrong asset id - continue - } - out, ok := utxo.Out.(*nftfx.MintOutput) - if !ok { - // wrong output type - continue - } - - indices, signers, ok := kc.Match(&out.OutputOwners, time) - if !ok { - // unable to spend the output - continue - } - - // add the operation to the array - ops = append(ops, &txs.Operation{ - Asset: avax.Asset{ID: assetID}, - UTXOIDs: []*avax.UTXOID{ - &utxo.UTXOID, - }, - Op: &nftfx.MintOperation{ - MintInput: secp256k1fx.Input{ - SigIndices: indices, - }, - GroupID: out.GroupID, - Payload: payload, - Outputs: []*secp256k1fx.OutputOwners{{ - Threshold: 1, - Addrs: []ids.ShortID{to}, - }}, - }, - }) - // add the required keys to the array - keys = append(keys, signers) - } - - if len(ops) == 0 { - return nil, nil, errAddressesCantMintAsset - } - - txs.SortOperationsWithSigners(ops, keys, vm.parser.Codec()) - return ops, keys, nil -} - // selectChangeAddr returns the change address to be used for [kc] when [changeAddr] is given // as the optional change address argument func (vm *VM) selectChangeAddr(defaultAddr ids.ShortID, changeAddr string) (ids.ShortID, error) { @@ -1046,27 +749,50 @@ func (vm *VM) lookupAssetID(asset string) (ids.ID, error) { return ids.ID{}, fmt.Errorf("asset '%s' not found", asset) } -// This VM doesn't (currently) have any app-specific messages -func (vm *VM) AppRequest(nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { - return nil -} +// Invariant: onAccept is called when [tx] is being marked as accepted, but +// before its state changes are applied. +// Invariant: any error returned by onAccept should be considered fatal. +// TODO: Remove [onAccept] once the deprecated APIs this powers are removed. +func (vm *VM) onAccept(tx *txs.Tx) error { + // Fetch the input UTXOs + txID := tx.ID() + inputUTXOIDs := tx.Unsigned.InputUTXOs() + inputUTXOs := make([]*avax.UTXO, 0, len(inputUTXOIDs)) + for _, utxoID := range inputUTXOIDs { + // Don't bother fetching the input UTXO if its symbolic + if utxoID.Symbolic() { + continue + } -// This VM doesn't (currently) have any app-specific messages -func (vm *VM) AppResponse(nodeID ids.NodeID, requestID uint32, response []byte) error { - return nil -} + utxo, err := vm.state.GetUTXOFromID(utxoID) + if err == database.ErrNotFound { + vm.ctx.Log.Debug("dropping utxo from index", + zap.Stringer("txID", txID), + zap.Stringer("utxoTxID", utxoID.TxID), + zap.Uint32("utxoOutputIndex", utxoID.OutputIndex), + ) + continue + } + if err != nil { + // should never happen because the UTXO was previously verified to + // exist + return fmt.Errorf("error finding UTXO %s: %w", utxoID, err) + } + inputUTXOs = append(inputUTXOs, utxo) + } -// This VM doesn't (currently) have any app-specific messages -func (vm *VM) AppRequestFailed(nodeID ids.NodeID, requestID uint32) error { - return nil -} + outputUTXOs := tx.UTXOs() + // index input and output UTXOs + if err := vm.addressTxsIndexer.Accept(txID, inputUTXOs, outputUTXOs); err != nil { + return fmt.Errorf("error indexing tx: %w", err) + } -// This VM doesn't (currently) have any app-specific messages -func (vm *VM) AppGossip(nodeID ids.NodeID, msg []byte) error { + vm.pubsub.Publish(NewPubSubFilterer(tx)) + vm.walletService.decided(txID) return nil } // UniqueTx de-duplicates the transaction. func (vm *VM) DeduplicateTx(tx *UniqueTx) *UniqueTx { - return vm.uniqueTxs.Deduplicate(tx).(*UniqueTx) + return vm.uniqueTxs.Deduplicate(tx) } diff --git a/avalanchego/vms/avm/vm_benchmark_test.go b/avalanchego/vms/avm/vm_benchmark_test.go index 4a24a920..09dfff61 100644 --- a/avalanchego/vms/avm/vm_benchmark_test.go +++ b/avalanchego/vms/avm/vm_benchmark_test.go @@ -1,14 +1,18 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm import ( + "context" "fmt" "math/rand" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/keystore" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -20,7 +24,7 @@ func BenchmarkLoadUser(b *testing.B) { _, _, vm, _ := GenesisVM(nil) ctx := vm.ctx defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { b.Fatal(err) } ctx.Lock.Unlock() @@ -38,7 +42,7 @@ func BenchmarkLoadUser(b *testing.B) { b.ResetTimer() - fromAddrs := ids.ShortSet{} + fromAddrs := set.Set[ids.ShortID]{} for n := 0; n < b.N; n++ { addrIndex := n % numKeys fromAddrs.Clear() @@ -68,7 +72,7 @@ func GetAllUTXOsBenchmark(b *testing.B, utxoCount int) { _, _, vm, _ := GenesisVM(b) ctx := vm.ctx defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { b.Fatal(err) } ctx.Lock.Unlock() @@ -94,31 +98,20 @@ func GetAllUTXOsBenchmark(b *testing.B, utxoCount int) { }, } - if err := vm.state.PutUTXO(utxo); err != nil { - b.Fatal(err) - } + vm.state.AddUTXO(utxo) } + require.NoError(b, vm.state.Commit()) - addrsSet := ids.ShortSet{} + addrsSet := set.Set[ids.ShortID]{} addrsSet.Add(addr) - var ( - err error - notPaginatedUTXOs []*avax.UTXO - ) - b.ResetTimer() for i := 0; i < b.N; i++ { // Fetch all UTXOs older version - notPaginatedUTXOs, err = avax.GetAllUTXOs(vm.state, addrsSet) - if err != nil { - b.Fatal(err) - } - - if len(notPaginatedUTXOs) != utxoCount { - b.Fatalf("Wrong number of utxos. Expected (%d) returned (%d)", utxoCount, len(notPaginatedUTXOs)) - } + notPaginatedUTXOs, err := avax.GetAllUTXOs(vm.state, addrsSet) + require.NoError(b, err) + require.Len(b, notPaginatedUTXOs, utxoCount) } } diff --git a/avalanchego/vms/avm/vm_regression_test.go b/avalanchego/vms/avm/vm_regression_test.go new file mode 100644 index 00000000..de148c05 --- /dev/null +++ b/avalanchego/vms/avm/vm_regression_test.go @@ -0,0 +1,151 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/ava-labs/avalanchego/vms/nftfx" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" +) + +func TestVerifyFxUsage(t *testing.T) { + require := require.New(t) + vm := &VM{} + ctx := NewContext(t) + ctx.Lock.Lock() + defer func() { + require.NoError(vm.Shutdown(context.Background())) + ctx.Lock.Unlock() + }() + + genesisBytes := BuildGenesisTest(t) + issuer := make(chan common.Message, 1) + err := vm.Initialize( + context.Background(), + ctx, + manager.NewMemDB(version.Semantic1_0_0), + genesisBytes, + nil, + nil, + issuer, + []*common.Fx{ + { + ID: ids.Empty.Prefix(0), + Fx: &secp256k1fx.Fx{}, + }, + { + ID: ids.Empty.Prefix(1), + Fx: &nftfx.Fx{}, + }, + }, + nil, + ) + require.NoError(err) + vm.batchTimeout = 0 + + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) + + createAssetTx := &txs.Tx{Unsigned: &txs.CreateAssetTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: constants.UnitTestID, + BlockchainID: chainID, + }}, + Name: "Team Rocket", + Symbol: "TR", + Denomination: 0, + States: []*txs.InitialState{ + { + FxIndex: 0, + Outs: []verify.State{ + &secp256k1fx.TransferOutput{ + Amt: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + }, + { + FxIndex: 1, + Outs: []verify.State{ + &nftfx.MintOutput{ + GroupID: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + }, + }, + }} + require.NoError(vm.parser.InitializeTx(createAssetTx)) + + _, err = vm.IssueTx(createAssetTx.Bytes()) + require.NoError(err) + + mintNFTTx := &txs.Tx{Unsigned: &txs.OperationTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: constants.UnitTestID, + BlockchainID: chainID, + }}, + Ops: []*txs.Operation{{ + Asset: avax.Asset{ID: createAssetTx.ID()}, + UTXOIDs: []*avax.UTXOID{{ + TxID: createAssetTx.ID(), + OutputIndex: 1, + }}, + Op: &nftfx.MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + GroupID: 1, + Payload: []byte{'h', 'e', 'l', 'l', 'o'}, + Outputs: []*secp256k1fx.OutputOwners{{}}, + }, + }}, + }} + require.NoError(mintNFTTx.SignNFTFx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}})) + + _, err = vm.IssueTx(mintNFTTx.Bytes()) + require.NoError(err) + + spendTx := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: constants.UnitTestID, + BlockchainID: chainID, + Ins: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: createAssetTx.ID(), + OutputIndex: 0, + }, + Asset: avax.Asset{ID: createAssetTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 1, + Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + }, + }}, + }}} + require.NoError(spendTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}})) + + _, err = vm.IssueTx(spendTx.Bytes()) + require.NoError(err) +} diff --git a/avalanchego/vms/avm/vm_test.go b/avalanchego/vms/avm/vm_test.go index 2784d642..f8ee1c4f 100644 --- a/avalanchego/vms/avm/vm_test.go +++ b/avalanchego/vms/avm/vm_test.go @@ -1,14 +1,14 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm import ( "bytes" + "context" "errors" "math" "testing" - "time" stdjson "encoding/json" @@ -18,21 +18,26 @@ import ( "github.com/ava-labs/avalanchego/api/keystore" "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/manager" - "github.com/ava-labs/avalanchego/database/mockdb" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" + "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/cb58" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/avm/fxs" + "github.com/ava-labs/avalanchego/vms/avm/metrics" "github.com/ava-labs/avalanchego/vms/avm/states" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -43,13 +48,11 @@ import ( ) var ( - networkID uint32 = 10 - chainID = ids.ID{5, 4, 3, 2, 1} - testTxFee = uint64(1000) - testBanffTime = time.Date(10000, time.December, 1, 0, 0, 0, 0, time.UTC) - startBalance = uint64(50000) + chainID = ids.ID{5, 4, 3, 2, 1} + testTxFee = uint64(1000) + startBalance = uint64(50000) - keys []*crypto.PrivateKeySECP256K1R + keys []*secp256k1.PrivateKey addrs []ids.ShortID // addrs[i] corresponds to keys[i] assetID = ids.ID{1, 2, 3} @@ -57,10 +60,12 @@ var ( password = "StrnasfqewiurPasswdn56d" // #nosec G101 feeAssetName = "TEST" otherAssetName = "OTHER" + + errMissing = errors.New("missing") ) func init() { - factory := crypto.FactorySECP256K1R{} + factory := secp256k1.Factory{} for _, key := range []string{ "24jUJ9vZexUM6expyMcT48LBx27k1m7xpraoV62oSQAHdziao5", @@ -69,33 +74,22 @@ func init() { } { keyBytes, _ := cb58.Decode(key) pk, _ := factory.ToPrivateKey(keyBytes) - keys = append(keys, pk.(*crypto.PrivateKeySECP256K1R)) + keys = append(keys, pk) addrs = append(addrs, pk.PublicKey().Address()) } } -type snLookup struct { - chainsToSubnet map[ids.ID]ids.ID -} - -func (sn *snLookup) SubnetID(chainID ids.ID) (ids.ID, error) { - subnetID, ok := sn.chainsToSubnet[chainID] - if !ok { - return ids.ID{}, errors.New("") - } - return subnetID, nil -} - func NewContext(tb testing.TB) *snow.Context { genesisBytes := BuildGenesisTest(tb) tx := GetAVAXTxFromGenesisTest(genesisBytes, tb) ctx := snow.DefaultContextTest() - ctx.NetworkID = networkID + ctx.NetworkID = constants.UnitTestID ctx.ChainID = chainID ctx.AVAXAssetID = tx.ID() ctx.XChainID = ids.Empty.Prefix(0) + ctx.CChainID = ids.Empty.Prefix(1) aliaser := ctx.BCLookup.(ids.Aliaser) errs := wrappers.Errs{} @@ -109,18 +103,25 @@ func NewContext(tb testing.TB) *snow.Context { tb.Fatal(errs.Err) } - sn := &snLookup{ - chainsToSubnet: make(map[ids.ID]ids.ID), + ctx.ValidatorState = &validators.TestState{ + GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { + subnetID, ok := map[ids.ID]ids.ID{ + constants.PlatformChainID: ctx.SubnetID, + chainID: ctx.SubnetID, + }[chainID] + if !ok { + return ids.Empty, errMissing + } + return subnetID, nil + }, } - sn.chainsToSubnet[chainID] = ctx.SubnetID - sn.chainsToSubnet[constants.PlatformChainID] = ctx.SubnetID - ctx.SNLookup = sn return ctx } // Returns: -// 1) tx in genesis that creates asset -// 2) the index of the output +// +// 1. tx in genesis that creates asset +// 2. the index of the output func GetCreateTxFromGenesisTest(tb testing.TB, genesisBytes []byte, assetName string) *txs.Tx { parser, err := txs.NewParser([]fxs.Fx{ &secp256k1fx.Fx{}, @@ -302,16 +303,16 @@ func GenesisVMWithArgs(tb testing.TB, additionalFxs []*common.Fx, args *BuildGen ctx.Keystore = userKeystore.NewBlockchainKeyStore(ctx.ChainID) issuer := make(chan common.Message, 1) - vm := &VM{Factory: Factory{ + vm := &VM{Config: config.Config{ TxFee: testTxFee, CreateAssetTxFee: testTxFee, - BanffTime: testBanffTime, }} configBytes, err := stdjson.Marshal(Config{IndexTransactions: true}) if err != nil { tb.Fatal("should not have caused error in creating avm config bytes") } err = vm.Initialize( + context.Background(), ctx, baseDBManager.NewPrefixDBManager([]byte{1}), genesisBytes, @@ -338,11 +339,11 @@ func GenesisVMWithArgs(tb testing.TB, additionalFxs []*common.Fx, args *BuildGen } vm.batchTimeout = 0 - if err := vm.SetState(snow.Bootstrapping); err != nil { + if err := vm.SetState(context.Background(), snow.Bootstrapping); err != nil { tb.Fatal(err) } - if err := vm.SetState(snow.NormalOp); err != nil { + if err := vm.SetState(context.Background(), snow.NormalOp); err != nil { tb.Fatal(err) } @@ -358,7 +359,7 @@ func NewTxWithAsset(t *testing.T, genesisBytes []byte, vm *VM, assetName string) newTx := &txs.Tx{Unsigned: &txs.BaseTx{ BaseTx: avax.BaseTx{ - NetworkID: networkID, + NetworkID: constants.UnitTestID, BlockchainID: chainID, Ins: []*avax.TransferableInput{{ UTXOID: avax.UTXOID{ @@ -377,7 +378,7 @@ func NewTxWithAsset(t *testing.T, genesisBytes []byte, vm *VM, assetName string) }}, }, }} - if err := newTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{keys[0]}}); err != nil { + if err := newTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}}); err != nil { t.Fatal(err) } return newTx @@ -391,7 +392,7 @@ func setupIssueTx(t testing.TB) (chan common.Message, *VM, *snow.Context, []*txs key := keys[0] firstTx := &txs.Tx{Unsigned: &txs.BaseTx{ BaseTx: avax.BaseTx{ - NetworkID: networkID, + NetworkID: constants.UnitTestID, BlockchainID: chainID, Ins: []*avax.TransferableInput{{ UTXOID: avax.UTXOID{ @@ -420,13 +421,13 @@ func setupIssueTx(t testing.TB) (chan common.Message, *VM, *snow.Context, []*txs }}, }, }} - if err := firstTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{key}}); err != nil { + if err := firstTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { t.Fatal(err) } secondTx := &txs.Tx{Unsigned: &txs.BaseTx{ BaseTx: avax.BaseTx{ - NetworkID: networkID, + NetworkID: constants.UnitTestID, BlockchainID: chainID, Ins: []*avax.TransferableInput{{ UTXOID: avax.UTXOID{ @@ -455,7 +456,7 @@ func setupIssueTx(t testing.TB) (chan common.Message, *VM, *snow.Context, []*txs }}, }, }} - if err := secondTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{key}}); err != nil { + if err := secondTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { t.Fatal(err) } return issuer, vm, ctx, []*txs.Tx{avaxTx, firstTx, secondTx} @@ -466,13 +467,14 @@ func TestInvalidGenesis(t *testing.T) { ctx := NewContext(t) ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } ctx.Lock.Unlock() }() err := vm.Initialize( + context.Background(), ctx, // context manager.NewMemDB(version.Semantic1_0_0), // dbManager nil, // genesisState @@ -492,7 +494,7 @@ func TestInvalidFx(t *testing.T) { ctx := NewContext(t) ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } ctx.Lock.Unlock() @@ -500,6 +502,7 @@ func TestInvalidFx(t *testing.T) { genesisBytes := BuildGenesisTest(t) err := vm.Initialize( + context.Background(), ctx, // context manager.NewMemDB(version.Semantic1_0_0), // dbManager genesisBytes, // genesisState @@ -521,7 +524,7 @@ func TestFxInitializationFailure(t *testing.T) { ctx := NewContext(t) ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } ctx.Lock.Unlock() @@ -529,6 +532,7 @@ func TestFxInitializationFailure(t *testing.T) { genesisBytes := BuildGenesisTest(t) err := vm.Initialize( + context.Background(), ctx, // context manager.NewMemDB(version.Semantic1_0_0), // dbManager genesisBytes, // genesisState @@ -554,7 +558,7 @@ func TestIssueTx(t *testing.T) { genesisBytes, issuer, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } ctx.Lock.Unlock() @@ -577,7 +581,8 @@ func TestIssueTx(t *testing.T) { } ctx.Lock.Lock() - if txs := vm.PendingTxs(); len(txs) != 1 { + txs := vm.PendingTxs(context.Background()) + if len(txs) != 1 { t.Fatalf("Should have returned %d tx(s)", 1) } } @@ -587,7 +592,7 @@ func TestIssueTx(t *testing.T) { func TestIssueDependentTx(t *testing.T) { issuer, vm, ctx, txs := setupIssueTx(t) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } ctx.Lock.Unlock() @@ -611,7 +616,8 @@ func TestIssueDependentTx(t *testing.T) { } ctx.Lock.Lock() - if txs := vm.PendingTxs(); len(txs) != 2 { + pendingTxs := vm.PendingTxs(context.Background()) + if len(pendingTxs) != 2 { t.Fatalf("Should have returned %d tx(s)", 2) } } @@ -622,7 +628,7 @@ func TestIssueNFT(t *testing.T) { ctx := NewContext(t) ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } ctx.Lock.Unlock() @@ -631,6 +637,7 @@ func TestIssueNFT(t *testing.T) { genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) err := vm.Initialize( + context.Background(), ctx, manager.NewMemDB(version.Semantic1_0_0), genesisBytes, @@ -654,19 +661,19 @@ func TestIssueNFT(t *testing.T) { } vm.batchTimeout = 0 - err = vm.SetState(snow.Bootstrapping) + err = vm.SetState(context.Background(), snow.Bootstrapping) if err != nil { t.Fatal(err) } - err = vm.SetState(snow.NormalOp) + err = vm.SetState(context.Background(), snow.NormalOp) if err != nil { t.Fatal(err) } createAssetTx := &txs.Tx{Unsigned: &txs.CreateAssetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, + NetworkID: constants.UnitTestID, BlockchainID: chainID, }}, Name: "Team Rocket", @@ -696,13 +703,13 @@ func TestIssueNFT(t *testing.T) { t.Fatal(err) } - if _, err = vm.IssueTx(createAssetTx.Bytes()); err != nil { + if _, err := vm.IssueTx(createAssetTx.Bytes()); err != nil { t.Fatal(err) } mintNFTTx := &txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, + NetworkID: constants.UnitTestID, BlockchainID: chainID, }}, Ops: []*txs.Operation{{ @@ -721,18 +728,18 @@ func TestIssueNFT(t *testing.T) { }, }}, }} - if err := mintNFTTx.SignNFTFx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{keys[0]}}); err != nil { + if err := mintNFTTx.SignNFTFx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}}); err != nil { t.Fatal(err) } - if _, err = vm.IssueTx(mintNFTTx.Bytes()); err != nil { + if _, err := vm.IssueTx(mintNFTTx.Bytes()); err != nil { t.Fatal(err) } transferNFTTx := &txs.Tx{ Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, + NetworkID: constants.UnitTestID, BlockchainID: chainID, }}, Ops: []*txs.Operation{{ @@ -759,7 +766,7 @@ func TestIssueNFT(t *testing.T) { t.Fatal(err) } - if _, err = vm.IssueTx(transferNFTTx.Bytes()); err != nil { + if _, err := vm.IssueTx(transferNFTTx.Bytes()); err != nil { t.Fatal(err) } } @@ -770,7 +777,7 @@ func TestIssueProperty(t *testing.T) { ctx := NewContext(t) ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } ctx.Lock.Unlock() @@ -779,6 +786,7 @@ func TestIssueProperty(t *testing.T) { genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) err := vm.Initialize( + context.Background(), ctx, manager.NewMemDB(version.Semantic1_0_0), genesisBytes, @@ -806,19 +814,19 @@ func TestIssueProperty(t *testing.T) { } vm.batchTimeout = 0 - err = vm.SetState(snow.Bootstrapping) + err = vm.SetState(context.Background(), snow.Bootstrapping) if err != nil { t.Fatal(err) } - err = vm.SetState(snow.NormalOp) + err = vm.SetState(context.Background(), snow.NormalOp) if err != nil { t.Fatal(err) } createAssetTx := &txs.Tx{Unsigned: &txs.CreateAssetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, + NetworkID: constants.UnitTestID, BlockchainID: chainID, }}, Name: "Team Rocket", @@ -840,13 +848,13 @@ func TestIssueProperty(t *testing.T) { t.Fatal(err) } - if _, err = vm.IssueTx(createAssetTx.Bytes()); err != nil { + if _, err := vm.IssueTx(createAssetTx.Bytes()); err != nil { t.Fatal(err) } mintPropertyTx := &txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, + NetworkID: constants.UnitTestID, BlockchainID: chainID, }}, Ops: []*txs.Operation{{ @@ -871,20 +879,20 @@ func TestIssueProperty(t *testing.T) { }} codec := vm.parser.Codec() - err = mintPropertyTx.SignPropertyFx(codec, [][]*crypto.PrivateKeySECP256K1R{ + err = mintPropertyTx.SignPropertyFx(codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }) if err != nil { t.Fatal(err) } - if _, err = vm.IssueTx(mintPropertyTx.Bytes()); err != nil { + if _, err := vm.IssueTx(mintPropertyTx.Bytes()); err != nil { t.Fatal(err) } burnPropertyTx := &txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, + NetworkID: constants.UnitTestID, BlockchainID: chainID, }}, Ops: []*txs.Operation{{ @@ -897,14 +905,14 @@ func TestIssueProperty(t *testing.T) { }}, }} - err = burnPropertyTx.SignPropertyFx(codec, [][]*crypto.PrivateKeySECP256K1R{ + err = burnPropertyTx.SignPropertyFx(codec, [][]*secp256k1.PrivateKey{ {}, }) if err != nil { t.Fatal(err) } - if _, err = vm.IssueTx(burnPropertyTx.Bytes()); err != nil { + if _, err := vm.IssueTx(burnPropertyTx.Bytes()); err != nil { t.Fatal(err) } } @@ -970,7 +978,7 @@ func TestIssueTxWithFeeAsset(t *testing.T) { genesisBytes, issuer, vm, _ := setupTxFeeAssets(t) ctx := vm.ctx defer func() { - err := vm.Shutdown() + err := vm.Shutdown(context.Background()) require.NoError(t, err) ctx.Lock.Unlock() }() @@ -987,15 +995,16 @@ func TestIssueTxWithFeeAsset(t *testing.T) { require.Equal(t, msg, common.PendingTxs) ctx.Lock.Lock() - require.Len(t, vm.PendingTxs(), 1) - t.Log(vm.PendingTxs()) + txs := vm.PendingTxs(context.Background()) + require.Len(t, txs, 1) + t.Log(txs) } func TestIssueTxWithAnotherAsset(t *testing.T) { genesisBytes, issuer, vm, _ := setupTxFeeAssets(t) ctx := vm.ctx defer func() { - err := vm.Shutdown() + err := vm.Shutdown(context.Background()) require.NoError(t, err) ctx.Lock.Unlock() }() @@ -1006,7 +1015,7 @@ func TestIssueTxWithAnotherAsset(t *testing.T) { newTx := &txs.Tx{Unsigned: &txs.BaseTx{ BaseTx: avax.BaseTx{ - NetworkID: networkID, + NetworkID: constants.UnitTestID, BlockchainID: chainID, Ins: []*avax.TransferableInput{ // fee asset @@ -1044,7 +1053,7 @@ func TestIssueTxWithAnotherAsset(t *testing.T) { }, }, }} - if err := newTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{keys[0]}, {keys[0]}}); err != nil { + if err := newTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}, {keys[0]}}); err != nil { t.Fatal(err) } @@ -1058,13 +1067,14 @@ func TestIssueTxWithAnotherAsset(t *testing.T) { require.Equal(t, msg, common.PendingTxs) ctx.Lock.Lock() - require.Len(t, vm.PendingTxs(), 1) + txs := vm.PendingTxs(context.Background()) + require.Len(t, txs, 1) } func TestVMFormat(t *testing.T) { _, _, vm, _ := GenesisVM(t) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } vm.ctx.Lock.Unlock() @@ -1093,7 +1103,7 @@ func TestTxCached(t *testing.T) { genesisBytes, _, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } ctx.Lock.Unlock() @@ -1102,34 +1112,32 @@ func TestTxCached(t *testing.T) { newTx := NewTx(t, genesisBytes, vm) txBytes := newTx.Bytes() - _, err := vm.ParseTx(txBytes) + _, err := vm.ParseTx(context.Background(), txBytes) require.NoError(t, err) - db := mockdb.New() - called := new(bool) - db.OnGet = func([]byte) ([]byte, error) { - *called = true - return nil, errors.New("") - } - registerer := prometheus.NewRegistry() - err = vm.metrics.Initialize("", registerer) + vm.metrics, err = metrics.New("", registerer) + require.NoError(t, err) + + db := memdb.New() + vdb := versiondb.New(db) + vm.state, err = states.New(vdb, vm.parser, registerer) require.NoError(t, err) - vm.state, err = states.New(prefixdb.New([]byte("tx"), db), vm.parser, registerer) + _, err = vm.ParseTx(context.Background(), txBytes) require.NoError(t, err) - _, err = vm.ParseTx(txBytes) + count, err := database.Count(vdb) require.NoError(t, err) - require.False(t, *called, "shouldn't have called the DB") + require.Zero(t, count) } func TestTxNotCached(t *testing.T) { genesisBytes, _, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } ctx.Lock.Unlock() @@ -1138,54 +1146,51 @@ func TestTxNotCached(t *testing.T) { newTx := NewTx(t, genesisBytes, vm) txBytes := newTx.Bytes() - _, err := vm.ParseTx(txBytes) + _, err := vm.ParseTx(context.Background(), txBytes) require.NoError(t, err) - db := mockdb.New() - called := new(bool) - db.OnGet = func([]byte) ([]byte, error) { - *called = true - return nil, errors.New("") - } - db.OnPut = func([]byte, []byte) error { return nil } - registerer := prometheus.NewRegistry() require.NoError(t, err) - err = vm.metrics.Initialize("", registerer) + vm.metrics, err = metrics.New("", registerer) require.NoError(t, err) - vm.state, err = states.New(db, vm.parser, registerer) + db := memdb.New() + vdb := versiondb.New(db) + vm.state, err = states.New(vdb, vm.parser, registerer) require.NoError(t, err) vm.uniqueTxs.Flush() - _, err = vm.ParseTx(txBytes) + _, err = vm.ParseTx(context.Background(), txBytes) + require.NoError(t, err) + + count, err := database.Count(vdb) require.NoError(t, err) - require.True(t, *called, "should have called the DB") + require.NotZero(t, count) } func TestTxVerifyAfterIssueTx(t *testing.T) { issuer, vm, ctx, issueTxs := setupIssueTx(t) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } ctx.Lock.Unlock() }() firstTx := issueTxs[1] secondTx := issueTxs[2] - parsedSecondTx, err := vm.ParseTx(secondTx.Bytes()) + parsedSecondTx, err := vm.ParseTx(context.Background(), secondTx.Bytes()) if err != nil { t.Fatal(err) } - if err := parsedSecondTx.Verify(); err != nil { + if err := parsedSecondTx.Verify(context.Background()); err != nil { t.Fatal(err) } if _, err := vm.IssueTx(firstTx.Bytes()); err != nil { t.Fatal(err) } - if err := parsedSecondTx.Accept(); err != nil { + if err := parsedSecondTx.Accept(context.Background()); err != nil { t.Fatal(err) } ctx.Lock.Unlock() @@ -1196,13 +1201,13 @@ func TestTxVerifyAfterIssueTx(t *testing.T) { } ctx.Lock.Lock() - txs := vm.PendingTxs() + txs := vm.PendingTxs(context.Background()) if len(txs) != 1 { t.Fatalf("Should have returned %d tx(s)", 1) } parsedFirstTx := txs[0] - if err := parsedFirstTx.Verify(); err == nil { + if err := parsedFirstTx.Verify(context.Background()); err == nil { t.Fatalf("Should have erred due to a missing UTXO") } } @@ -1210,7 +1215,7 @@ func TestTxVerifyAfterIssueTx(t *testing.T) { func TestTxVerifyAfterGet(t *testing.T) { _, vm, ctx, issueTxs := setupIssueTx(t) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } ctx.Lock.Unlock() @@ -1218,24 +1223,24 @@ func TestTxVerifyAfterGet(t *testing.T) { firstTx := issueTxs[1] secondTx := issueTxs[2] - parsedSecondTx, err := vm.ParseTx(secondTx.Bytes()) + parsedSecondTx, err := vm.ParseTx(context.Background(), secondTx.Bytes()) if err != nil { t.Fatal(err) } - if err := parsedSecondTx.Verify(); err != nil { + if err := parsedSecondTx.Verify(context.Background()); err != nil { t.Fatal(err) } if _, err := vm.IssueTx(firstTx.Bytes()); err != nil { t.Fatal(err) } - parsedFirstTx, err := vm.GetTx(firstTx.ID()) + parsedFirstTx, err := vm.GetTx(context.Background(), firstTx.ID()) if err != nil { t.Fatal(err) } - if err := parsedSecondTx.Accept(); err != nil { + if err := parsedSecondTx.Accept(context.Background()); err != nil { t.Fatal(err) } - if err := parsedFirstTx.Verify(); err == nil { + if err := parsedFirstTx.Verify(context.Background()); err == nil { t.Fatalf("Should have erred due to a missing UTXO") } } @@ -1243,7 +1248,7 @@ func TestTxVerifyAfterGet(t *testing.T) { func TestTxVerifyAfterVerifyAncestorTx(t *testing.T) { _, vm, ctx, issueTxs := setupIssueTx(t) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } ctx.Lock.Unlock() @@ -1253,7 +1258,7 @@ func TestTxVerifyAfterVerifyAncestorTx(t *testing.T) { secondTx := issueTxs[2] key := keys[0] firstTxDescendant := &txs.Tx{Unsigned: &txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, + NetworkID: constants.UnitTestID, BlockchainID: chainID, Ins: []*avax.TransferableInput{{ UTXOID: avax.UTXOID{ @@ -1281,15 +1286,15 @@ func TestTxVerifyAfterVerifyAncestorTx(t *testing.T) { }, }}, }}} - if err := firstTxDescendant.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{key}}); err != nil { + if err := firstTxDescendant.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { t.Fatal(err) } - parsedSecondTx, err := vm.ParseTx(secondTx.Bytes()) + parsedSecondTx, err := vm.ParseTx(context.Background(), secondTx.Bytes()) if err != nil { t.Fatal(err) } - if err := parsedSecondTx.Verify(); err != nil { + if err := parsedSecondTx.Verify(context.Background()); err != nil { t.Fatal(err) } if _, err := vm.IssueTx(firstTx.Bytes()); err != nil { @@ -1298,14 +1303,14 @@ func TestTxVerifyAfterVerifyAncestorTx(t *testing.T) { if _, err := vm.IssueTx(firstTxDescendant.Bytes()); err != nil { t.Fatal(err) } - parsedFirstTx, err := vm.GetTx(firstTx.ID()) + parsedFirstTx, err := vm.GetTx(context.Background(), firstTx.ID()) if err != nil { t.Fatal(err) } - if err := parsedSecondTx.Accept(); err != nil { + if err := parsedSecondTx.Accept(context.Background()); err != nil { t.Fatal(err) } - if err := parsedFirstTx.Verify(); err == nil { + if err := parsedFirstTx.Verify(context.Background()); err == nil { t.Fatalf("Should have erred due to a missing UTXO") } } @@ -1459,7 +1464,7 @@ func TestImportTxSerialization(t *testing.T) { 0x1f, 0x49, 0x9b, 0x0a, 0x4f, 0xbf, 0x95, 0xfc, 0x31, 0x39, 0x46, 0x4e, 0xa1, 0xaf, 0x00, } - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{keys[0], keys[0]}, {keys[0], keys[0]}}); err != nil { + if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0], keys[0]}, {keys[0], keys[0]}}); err != nil { t.Fatal(err) } require.Equal(t, tx.ID().String(), "pCW7sVBytzdZ1WrqzGY1DvA2S9UaMr72xpUMxVyx1QHBARNYx") @@ -1501,6 +1506,7 @@ func TestIssueImportTx(t *testing.T) { require.NoError(t, err) vm := &VM{} err = vm.Initialize( + context.Background(), ctx, baseDBManager.NewPrefixDBManager([]byte{1}), genesisBytes, @@ -1518,11 +1524,11 @@ func TestIssueImportTx(t *testing.T) { } vm.batchTimeout = 0 - if err = vm.SetState(snow.Bootstrapping); err != nil { + if err := vm.SetState(context.Background(), snow.Bootstrapping); err != nil { t.Fatal(err) } - err = vm.SetState(snow.NormalOp) + err = vm.SetState(context.Background(), snow.NormalOp) if err != nil { t.Fatal(err) } @@ -1541,7 +1547,7 @@ func TestIssueImportTx(t *testing.T) { txAssetID := avax.Asset{ID: avaxID} tx := &txs.Tx{Unsigned: &txs.ImportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, + NetworkID: constants.UnitTestID, BlockchainID: chainID, Outs: []*avax.TransferableOutput{{ Asset: txAssetID, @@ -1566,7 +1572,7 @@ func TestIssueImportTx(t *testing.T) { }, }}, }} - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{key}}); err != nil { + if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { t.Fatal(err) } @@ -1617,23 +1623,23 @@ func TestIssueImportTx(t *testing.T) { ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } ctx.Lock.Unlock() }() - txs := vm.PendingTxs() + txs := vm.PendingTxs(context.Background()) if len(txs) != 1 { t.Fatalf("Should have returned %d tx(s)", 1) } parsedTx := txs[0] - if err := parsedTx.Verify(); err != nil { + if err := parsedTx.Verify(context.Background()); err != nil { t.Fatal("Failed verify", err) } - if err := parsedTx.Accept(); err != nil { + if err := parsedTx.Accept(context.Background()); err != nil { t.Fatal(err) } @@ -1663,12 +1669,13 @@ func TestForceAcceptImportTx(t *testing.T) { vm := &VM{} ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } ctx.Lock.Unlock() }() err := vm.Initialize( + context.Background(), ctx, baseDBManager.NewPrefixDBManager([]byte{1}), genesisBytes, @@ -1686,11 +1693,11 @@ func TestForceAcceptImportTx(t *testing.T) { } vm.batchTimeout = 0 - if err = vm.SetState(snow.Bootstrapping); err != nil { + if err := vm.SetState(context.Background(), snow.Bootstrapping); err != nil { t.Fatal(err) } - err = vm.SetState(snow.NormalOp) + err = vm.SetState(context.Background(), snow.NormalOp) if err != nil { t.Fatal(err) } @@ -1710,7 +1717,7 @@ func TestForceAcceptImportTx(t *testing.T) { tx := &txs.Tx{Unsigned: &txs.ImportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, + NetworkID: constants.UnitTestID, BlockchainID: chainID, }}, SourceChain: constants.PlatformChainID, @@ -1724,20 +1731,20 @@ func TestForceAcceptImportTx(t *testing.T) { }}, }} - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{key}}); err != nil { + if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { t.Fatal(err) } - parsedTx, err := vm.ParseTx(tx.Bytes()) + parsedTx, err := vm.ParseTx(context.Background(), tx.Bytes()) if err != nil { t.Fatal(err) } - if err := parsedTx.Verify(); err == nil { + if err := parsedTx.Verify(context.Background()); err == nil { t.Fatalf("Should have failed verification") } - if err := parsedTx.Accept(); err != nil { + if err := parsedTx.Accept(context.Background()); err != nil { t.Fatal(err) } @@ -1773,6 +1780,7 @@ func TestIssueExportTx(t *testing.T) { ctx.Lock.Lock() vm := &VM{} if err := vm.Initialize( + context.Background(), ctx, baseDBManager.NewPrefixDBManager([]byte{1}), genesisBytes, @@ -1788,11 +1796,11 @@ func TestIssueExportTx(t *testing.T) { } vm.batchTimeout = 0 - if err := vm.SetState(snow.Bootstrapping); err != nil { + if err := vm.SetState(context.Background(), snow.Bootstrapping); err != nil { t.Fatal(err) } - if err := vm.SetState(snow.NormalOp); err != nil { + if err := vm.SetState(context.Background(), snow.NormalOp); err != nil { t.Fatal(err) } @@ -1800,7 +1808,7 @@ func TestIssueExportTx(t *testing.T) { tx := &txs.Tx{Unsigned: &txs.ExportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, + NetworkID: constants.UnitTestID, BlockchainID: chainID, Ins: []*avax.TransferableInput{{ UTXOID: avax.UTXOID{ @@ -1826,7 +1834,7 @@ func TestIssueExportTx(t *testing.T) { }, }}, }} - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{key}}); err != nil { + if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { t.Fatal(err) } @@ -1843,21 +1851,21 @@ func TestIssueExportTx(t *testing.T) { ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } ctx.Lock.Unlock() }() - txs := vm.PendingTxs() + txs := vm.PendingTxs(context.Background()) if len(txs) != 1 { t.Fatalf("Should have returned %d tx(s)", 1) } parsedTx := txs[0] - if err := parsedTx.Verify(); err != nil { + if err := parsedTx.Verify(context.Background()); err != nil { t.Fatal(err) - } else if err := parsedTx.Accept(); err != nil { + } else if err := parsedTx.Accept(context.Background()); err != nil { t.Fatal(err) } @@ -1904,6 +1912,7 @@ func TestClearForceAcceptedExportTx(t *testing.T) { require.NoError(t, err) vm := &VM{} err = vm.Initialize( + context.Background(), ctx, baseDBManager.NewPrefixDBManager([]byte{1}), genesisBytes, @@ -1921,11 +1930,11 @@ func TestClearForceAcceptedExportTx(t *testing.T) { } vm.batchTimeout = 0 - if err = vm.SetState(snow.Bootstrapping); err != nil { + if err := vm.SetState(context.Background(), snow.Bootstrapping); err != nil { t.Fatal(err) } - err = vm.SetState(snow.NormalOp) + err = vm.SetState(context.Background(), snow.NormalOp) if err != nil { t.Fatal(err) } @@ -1935,7 +1944,7 @@ func TestClearForceAcceptedExportTx(t *testing.T) { assetID := avax.Asset{ID: avaxID} tx := &txs.Tx{Unsigned: &txs.ExportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - NetworkID: networkID, + NetworkID: constants.UnitTestID, BlockchainID: chainID, Ins: []*avax.TransferableInput{{ UTXOID: avax.UTXOID{ @@ -1961,7 +1970,7 @@ func TestClearForceAcceptedExportTx(t *testing.T) { }, }}, }} - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*crypto.PrivateKeySECP256K1R{{key}}); err != nil { + if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { t.Fatal(err) } @@ -1978,19 +1987,19 @@ func TestClearForceAcceptedExportTx(t *testing.T) { ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } ctx.Lock.Unlock() }() - txs := vm.PendingTxs() + txs := vm.PendingTxs(context.Background()) if len(txs) != 1 { t.Fatalf("Should have returned %d tx(s)", 1) } parsedTx := txs[0] - if err := parsedTx.Verify(); err != nil { + if err := parsedTx.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -2005,7 +2014,7 @@ func TestClearForceAcceptedExportTx(t *testing.T) { t.Fatal(err) } - if err := parsedTx.Accept(); err != nil { + if err := parsedTx.Accept(context.Background()); err != nil { t.Fatal(err) } diff --git a/avalanchego/vms/avm/wallet_client.go b/avalanchego/vms/avm/wallet_client.go index 5c4e732d..c74918e6 100644 --- a/avalanchego/vms/avm/wallet_client.go +++ b/avalanchego/vms/avm/wallet_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm @@ -15,13 +15,16 @@ import ( "github.com/ava-labs/avalanchego/utils/rpc" ) -var _ WalletClient = &client{} +var _ WalletClient = (*client)(nil) // interface of an AVM wallet client for interacting with avm managed wallet on [chain] type WalletClient interface { // IssueTx issues a transaction to a node and returns the TxID IssueTx(ctx context.Context, tx []byte, options ...rpc.Option) (ids.ID, error) // Send [amount] of [assetID] to address [to] + // + // Deprecated: Transactions should be issued using the + // `avalanchego/wallet/chain/x.Wallet` utility. Send( ctx context.Context, user api.UserPass, @@ -34,6 +37,9 @@ type WalletClient interface { options ...rpc.Option, ) (ids.ID, error) // SendMultiple sends a transaction from [user] funding all [outputs] + // + // Deprecated: Transactions should be issued using the + // `avalanchego/wallet/chain/x.Wallet` utility. SendMultiple( ctx context.Context, user api.UserPass, @@ -51,6 +57,9 @@ type walletClient struct { } // NewWalletClient returns an AVM wallet client for interacting with avm managed wallet on [chain] +// +// Deprecated: Transactions should be issued using the +// `avalanchego/wallet/chain/x.Wallet` utility. func NewWalletClient(uri, chain string) WalletClient { path := fmt.Sprintf( "%s/ext/%s/%s/wallet", @@ -59,7 +68,7 @@ func NewWalletClient(uri, chain string) WalletClient { chain, ) return &walletClient{ - requester: rpc.NewEndpointRequester(path, "wallet"), + requester: rpc.NewEndpointRequester(path), } } @@ -69,7 +78,7 @@ func (c *walletClient) IssueTx(ctx context.Context, txBytes []byte, options ...r return ids.ID{}, err } res := &api.JSONTxID{} - err = c.requester.SendRequest(ctx, "issueTx", &api.FormattedTx{ + err = c.requester.SendRequest(ctx, "wallet.issueTx", &api.FormattedTx{ Tx: txStr, Encoding: formatting.Hex, }, res, options...) @@ -100,7 +109,7 @@ func (c *walletClient) Send( options ...rpc.Option, ) (ids.ID, error) { res := &api.JSONTxID{} - err := c.requester.SendRequest(ctx, "send", &SendArgs{ + err := c.requester.SendRequest(ctx, "wallet.send", &SendArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: user, JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, @@ -132,7 +141,7 @@ func (c *walletClient) SendMultiple( serviceOutputs[i].AssetID = output.AssetID serviceOutputs[i].To = output.To.String() } - err := c.requester.SendRequest(ctx, "sendMultiple", &SendMultipleArgs{ + err := c.requester.SendRequest(ctx, "wallet.sendMultiple", &SendMultipleArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: user, JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, diff --git a/avalanchego/vms/avm/wallet_service.go b/avalanchego/vms/avm/wallet_service.go index 9b6c2dc3..d470cae6 100644 --- a/avalanchego/vms/avm/wallet_service.go +++ b/avalanchego/vms/avm/wallet_service.go @@ -1,16 +1,20 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm import ( - "container/list" "fmt" "net/http" + "go.uber.org/zap" + + "golang.org/x/exp/maps" + "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/formatting" + "github.com/ava-labs/avalanchego/utils/linkedhashmap" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/vms/avm/txs" @@ -19,23 +23,16 @@ import ( ) type WalletService struct { - vm *VM - - pendingTxMap map[ids.ID]*list.Element - pendingTxOrdering *list.List + vm *VM + pendingTxs linkedhashmap.LinkedHashmap[ids.ID, *txs.Tx] } func (w *WalletService) decided(txID ids.ID) { - e, ok := w.pendingTxMap[txID] - if !ok { - return - } - delete(w.pendingTxMap, txID) - w.pendingTxOrdering.Remove(e) + w.pendingTxs.Delete(txID) } func (w *WalletService) issue(txBytes []byte) (ids.ID, error) { - tx, err := w.vm.parser.Parse(txBytes) + tx, err := w.vm.parser.ParseTx(txBytes) if err != nil { return ids.ID{}, err } @@ -45,11 +42,10 @@ func (w *WalletService) issue(txBytes []byte) (ids.ID, error) { return ids.ID{}, err } - if _, dup := w.pendingTxMap[txID]; dup { - return txID, nil + if _, ok := w.pendingTxs.Get(txID); !ok { + w.pendingTxs.Put(txID, tx) } - w.pendingTxMap[txID] = w.pendingTxOrdering.PushBack(tx) return txID, nil } @@ -59,8 +55,10 @@ func (w *WalletService) update(utxos []*avax.UTXO) ([]*avax.UTXO, error) { utxoMap[utxo.InputID()] = utxo } - for e := w.pendingTxOrdering.Front(); e != nil; e = e.Next() { - tx := e.Value.(*txs.Tx) + iter := w.pendingTxs.NewIterator() + + for iter.Next() { + tx := iter.Value() for _, inputUTXO := range tx.Unsigned.InputUTXOs() { if inputUTXO.Symbolic() { continue @@ -77,18 +75,14 @@ func (w *WalletService) update(utxos []*avax.UTXO) ([]*avax.UTXO, error) { } } - newUTXOs := make([]*avax.UTXO, len(utxoMap)) - i := 0 - for _, utxo := range utxoMap { - newUTXOs[i] = utxo - i++ - } - return newUTXOs, nil + return maps.Values(utxoMap), nil } // IssueTx attempts to issue a transaction into consensus -func (w *WalletService) IssueTx(r *http.Request, args *api.FormattedTx, reply *api.JSONTxID) error { - w.vm.ctx.Log.Debug("AVM Wallet: IssueTx called", +func (w *WalletService) IssueTx(_ *http.Request, args *api.FormattedTx, reply *api.JSONTxID) error { + w.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "wallet"), + zap.String("method", "issueTx"), logging.UserString("tx", args.Tx), ) @@ -111,8 +105,10 @@ func (w *WalletService) Send(r *http.Request, args *SendArgs, reply *api.JSONTxI } // SendMultiple sends a transaction with multiple outputs. -func (w *WalletService) SendMultiple(r *http.Request, args *SendMultipleArgs, reply *api.JSONTxIDChangeAddr) error { - w.vm.ctx.Log.Debug("AVM Wallet: SendMultiple", +func (w *WalletService) SendMultiple(_ *http.Request, args *SendMultipleArgs, reply *api.JSONTxIDChangeAddr) error { + w.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "wallet"), + zap.String("method", "sendMultiple"), logging.UserString("username", args.Username), ) @@ -198,10 +194,7 @@ func (w *WalletService) SendMultiple(r *http.Request, args *SendMultipleArgs, re }) } - amountsWithFee := make(map[ids.ID]uint64, len(amounts)+1) - for assetKey, amount := range amounts { - amountsWithFee[assetKey] = amount - } + amountsWithFee := maps.Clone(amounts) amountWithFee, err := math.Add64(amounts[w.vm.feeAssetID], w.vm.TxFee) if err != nil { diff --git a/avalanchego/vms/avm/wallet_service_test.go b/avalanchego/vms/avm/wallet_service_test.go index a400f7c9..23713ff9 100644 --- a/avalanchego/vms/avm/wallet_service_test.go +++ b/avalanchego/vms/avm/wallet_service_test.go @@ -1,15 +1,16 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avm import ( - "container/list" + "context" "testing" "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/linkedhashmap" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/keystore" ) @@ -32,7 +33,10 @@ func setupWS(t *testing.T, isAVAXAsset bool) ([]byte, *VM, *WalletService, *atom genesisTx = GetCreateTxFromGenesisTest(t, genesisBytes, feeAssetName) } - ws := &WalletService{vm: vm, pendingTxMap: make(map[ids.ID]*list.Element), pendingTxOrdering: list.New()} + ws := &WalletService{ + vm: vm, + pendingTxs: linkedhashmap.New[ids.ID, *txs.Tx](), + } return genesisBytes, vm, ws, m, genesisTx } @@ -65,7 +69,7 @@ func TestWalletService_SendMultiple(t *testing.T) { t.Run(tc.name, func(t *testing.T) { _, vm, ws, _, genesisTx := setupWSWithKeys(t, tc.avaxAsset) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } vm.ctx.Lock.Unlock() @@ -123,7 +127,7 @@ func TestWalletService_SendMultiple(t *testing.T) { t.Fatal("Transaction ID returned by SendMultiple does not match the transaction found in vm's pending transactions") } - if _, err = vm.GetTx(reply.TxID); err != nil { + if _, err := vm.GetTx(context.Background(), reply.TxID); err != nil { t.Fatalf("Failed to retrieve created transaction: %s", err) } }) diff --git a/avalanchego/vms/components/avax/addresses.go b/avalanchego/vms/components/avax/addresses.go index db1f97b0..400000f2 100644 --- a/avalanchego/vms/components/avax/addresses.go +++ b/avalanchego/vms/components/avax/addresses.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax @@ -10,9 +10,10 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/formatting/address" + "github.com/ava-labs/avalanchego/utils/set" ) -var _ AddressManager = &addressManager{} +var _ AddressManager = (*addressManager)(nil) type AddressManager interface { // ParseLocalAddress takes in an address for this chain and produces the ID @@ -96,8 +97,8 @@ func (a *addressManager) FormatAddress(chainID ids.ID, addr ids.ShortID) (string return address.Format(chainIDAlias, hrp, addr.Bytes()) } -func ParseLocalAddresses(a AddressManager, addrStrs []string) (ids.ShortSet, error) { - addrs := make(ids.ShortSet, len(addrStrs)) +func ParseLocalAddresses(a AddressManager, addrStrs []string) (set.Set[ids.ShortID], error) { + addrs := make(set.Set[ids.ShortID], len(addrStrs)) for _, addrStr := range addrStrs { addr, err := a.ParseLocalAddress(addrStr) if err != nil { @@ -125,8 +126,8 @@ func ParseServiceAddress(a AddressManager, addrStr string) (ids.ShortID, error) } // ParseServiceAddress get addresses IDs from addresses strings, being them either localized or not -func ParseServiceAddresses(a AddressManager, addrStrs []string) (ids.ShortSet, error) { - addrs := ids.NewShortSet(len(addrStrs)) +func ParseServiceAddresses(a AddressManager, addrStrs []string) (set.Set[ids.ShortID], error) { + addrs := set.NewSet[ids.ShortID](len(addrStrs)) for _, addrStr := range addrStrs { addr, err := ParseServiceAddress(a, addrStr) if err != nil { diff --git a/avalanchego/vms/components/avax/asset.go b/avalanchego/vms/components/avax/asset.go index 975c1e3c..90a3eef6 100644 --- a/avalanchego/vms/components/avax/asset.go +++ b/avalanchego/vms/components/avax/asset.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax @@ -14,7 +14,7 @@ var ( errNilAssetID = errors.New("nil asset ID is not valid") errEmptyAssetID = errors.New("empty asset ID is not valid") - _ verify.Verifiable = &Asset{} + _ verify.Verifiable = (*Asset)(nil) ) type Asset struct { @@ -22,7 +22,9 @@ type Asset struct { } // AssetID returns the ID of the contained asset -func (asset *Asset) AssetID() ids.ID { return asset.ID } +func (asset *Asset) AssetID() ids.ID { + return asset.ID +} func (asset *Asset) Verify() error { switch { diff --git a/avalanchego/vms/components/avax/asset_test.go b/avalanchego/vms/components/avax/asset_test.go index 37ab7a98..b1744f62 100644 --- a/avalanchego/vms/components/avax/asset_test.go +++ b/avalanchego/vms/components/avax/asset_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax diff --git a/avalanchego/vms/components/avax/atomic_utxos.go b/avalanchego/vms/components/avax/atomic_utxos.go index b1293606..20b22420 100644 --- a/avalanchego/vms/components/avax/atomic_utxos.go +++ b/avalanchego/vms/components/avax/atomic_utxos.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax @@ -9,9 +9,10 @@ import ( "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" ) -var _ AtomicUTXOManager = &atomicUTXOManager{} +var _ AtomicUTXOManager = (*atomicUTXOManager)(nil) type AtomicUTXOManager interface { // GetAtomicUTXOs returns exported UTXOs such that at least one of the @@ -26,7 +27,7 @@ type AtomicUTXOManager interface { // * Any error that may have occurred upstream. GetAtomicUTXOs( chainID ids.ID, - addrs ids.ShortSet, + addrs set.Set[ids.ShortID], startAddr ids.ShortID, startUTXOID ids.ID, limit int, @@ -47,7 +48,7 @@ func NewAtomicUTXOManager(sm atomic.SharedMemory, codec codec.Manager) AtomicUTX func (a *atomicUTXOManager) GetAtomicUTXOs( chainID ids.ID, - addrs ids.ShortSet, + addrs set.Set[ids.ShortID], startAddr ids.ShortID, startUTXOID ids.ID, limit int, diff --git a/avalanchego/vms/components/avax/base_tx.go b/avalanchego/vms/components/avax/base_tx.go index 491d2afc..d176d7f0 100644 --- a/avalanchego/vms/components/avax/base_tx.go +++ b/avalanchego/vms/components/avax/base_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax @@ -9,6 +9,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/types" ) @@ -16,10 +17,10 @@ import ( const MaxMemoSize = 256 var ( + ErrNilTx = errors.New("nil tx is not valid") ErrWrongNetworkID = errors.New("tx has wrong network ID") - - errNilTx = errors.New("nil tx is not valid") - errWrongChainID = errors.New("tx has wrong chain ID") + ErrWrongChainID = errors.New("tx has wrong chain ID") + ErrMemoTooLarge = errors.New("memo exceeds maximum length") ) // BaseTx is the basis of all standard transactions. @@ -41,8 +42,8 @@ func (t *BaseTx) InputUTXOs() []*UTXOID { } // ConsumedAssetIDs returns the IDs of the assets this transaction consumes -func (t *BaseTx) ConsumedAssetIDs() ids.Set { - assets := ids.Set{} +func (t *BaseTx) ConsumedAssetIDs() set.Set[ids.ID] { + assets := set.Set[ids.ID]{} for _, in := range t.Ins { assets.Add(in.AssetID()) } @@ -50,23 +51,31 @@ func (t *BaseTx) ConsumedAssetIDs() ids.Set { } // AssetIDs returns the IDs of the assets this transaction depends on -func (t *BaseTx) AssetIDs() ids.Set { return t.ConsumedAssetIDs() } +func (t *BaseTx) AssetIDs() set.Set[ids.ID] { + return t.ConsumedAssetIDs() +} // NumCredentials returns the number of expected credentials -func (t *BaseTx) NumCredentials() int { return len(t.Ins) } +func (t *BaseTx) NumCredentials() int { + return len(t.Ins) +} // Verify ensures that transaction metadata is valid func (t *BaseTx) Verify(ctx *snow.Context) error { switch { case t == nil: - return errNilTx + return ErrNilTx case t.NetworkID != ctx.NetworkID: return ErrWrongNetworkID case t.BlockchainID != ctx.ChainID: - return errWrongChainID + return ErrWrongChainID case len(t.Memo) > MaxMemoSize: - return fmt.Errorf("memo length, %d, exceeds maximum memo length, %d", - len(t.Memo), MaxMemoSize) + return fmt.Errorf( + "%w: %d > %d", + ErrMemoTooLarge, + len(t.Memo), + MaxMemoSize, + ) default: return nil } diff --git a/avalanchego/vms/components/avax/flow_checker.go b/avalanchego/vms/components/avax/flow_checker.go index 448fead0..b0ed8c86 100644 --- a/avalanchego/vms/components/avax/flow_checker.go +++ b/avalanchego/vms/components/avax/flow_checker.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax @@ -11,7 +11,7 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" ) -var errInsufficientFunds = errors.New("insufficient funds") +var ErrInsufficientFunds = errors.New("insufficient funds") type FlowChecker struct { consumed, produced map[ids.ID]uint64 @@ -25,9 +25,13 @@ func NewFlowChecker() *FlowChecker { } } -func (fc *FlowChecker) Consume(assetID ids.ID, amount uint64) { fc.add(fc.consumed, assetID, amount) } +func (fc *FlowChecker) Consume(assetID ids.ID, amount uint64) { + fc.add(fc.consumed, assetID, amount) +} -func (fc *FlowChecker) Produce(assetID ids.ID, amount uint64) { fc.add(fc.produced, assetID, amount) } +func (fc *FlowChecker) Produce(assetID ids.ID, amount uint64) { + fc.add(fc.produced, assetID, amount) +} func (fc *FlowChecker) add(value map[ids.ID]uint64, assetID ids.ID, amount uint64) { var err error @@ -40,7 +44,7 @@ func (fc *FlowChecker) Verify() error { for assetID, producedAssetAmount := range fc.produced { consumedAssetAmount := fc.consumed[assetID] if producedAssetAmount > consumedAssetAmount { - fc.errs.Add(errInsufficientFunds) + fc.errs.Add(ErrInsufficientFunds) break } } diff --git a/avalanchego/vms/components/avax/metadata.go b/avalanchego/vms/components/avax/metadata.go index 6dcb9db3..f03389c4 100644 --- a/avalanchego/vms/components/avax/metadata.go +++ b/avalanchego/vms/components/avax/metadata.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax @@ -15,7 +15,7 @@ var ( errNilMetadata = errors.New("nil metadata is not valid") errMetadataNotInitialize = errors.New("metadata was never initialized and is not valid") - _ verify.Verifiable = &Metadata{} + _ verify.Verifiable = (*Metadata)(nil) ) // TODO: Delete this once the downstream dependencies have been updated. @@ -33,13 +33,19 @@ func (md *Metadata) Initialize(unsignedBytes, bytes []byte) { } // ID returns the unique ID of this data -func (md *Metadata) ID() ids.ID { return md.id } +func (md *Metadata) ID() ids.ID { + return md.id +} // UnsignedBytes returns the unsigned binary representation of this data -func (md *Metadata) Bytes() []byte { return md.unsignedBytes } +func (md *Metadata) Bytes() []byte { + return md.unsignedBytes +} // Bytes returns the binary representation of this data -func (md *Metadata) SignedBytes() []byte { return md.bytes } +func (md *Metadata) SignedBytes() []byte { + return md.bytes +} func (md *Metadata) Verify() error { switch { diff --git a/avalanchego/vms/components/avax/metadata_test.go b/avalanchego/vms/components/avax/metadata_test.go index 2e9c43fd..4d14cb6e 100644 --- a/avalanchego/vms/components/avax/metadata_test.go +++ b/avalanchego/vms/components/avax/metadata_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax diff --git a/avalanchego/vms/components/avax/mock_transferable_in.go b/avalanchego/vms/components/avax/mock_transferable_in.go new file mode 100644 index 00000000..5d1e6fc7 --- /dev/null +++ b/avalanchego/vms/components/avax/mock_transferable_in.go @@ -0,0 +1,93 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/vms/components/avax (interfaces: TransferableIn) + +// Package avax is a generated GoMock package. +package avax + +import ( + reflect "reflect" + + snow "github.com/ava-labs/avalanchego/snow" + gomock "github.com/golang/mock/gomock" +) + +// MockTransferableIn is a mock of TransferableIn interface. +type MockTransferableIn struct { + ctrl *gomock.Controller + recorder *MockTransferableInMockRecorder +} + +// MockTransferableInMockRecorder is the mock recorder for MockTransferableIn. +type MockTransferableInMockRecorder struct { + mock *MockTransferableIn +} + +// NewMockTransferableIn creates a new mock instance. +func NewMockTransferableIn(ctrl *gomock.Controller) *MockTransferableIn { + mock := &MockTransferableIn{ctrl: ctrl} + mock.recorder = &MockTransferableInMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockTransferableIn) EXPECT() *MockTransferableInMockRecorder { + return m.recorder +} + +// Amount mocks base method. +func (m *MockTransferableIn) Amount() uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Amount") + ret0, _ := ret[0].(uint64) + return ret0 +} + +// Amount indicates an expected call of Amount. +func (mr *MockTransferableInMockRecorder) Amount() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Amount", reflect.TypeOf((*MockTransferableIn)(nil).Amount)) +} + +// Cost mocks base method. +func (m *MockTransferableIn) Cost() (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Cost") + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Cost indicates an expected call of Cost. +func (mr *MockTransferableInMockRecorder) Cost() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Cost", reflect.TypeOf((*MockTransferableIn)(nil).Cost)) +} + +// InitCtx mocks base method. +func (m *MockTransferableIn) InitCtx(arg0 *snow.Context) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "InitCtx", arg0) +} + +// InitCtx indicates an expected call of InitCtx. +func (mr *MockTransferableInMockRecorder) InitCtx(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitCtx", reflect.TypeOf((*MockTransferableIn)(nil).InitCtx), arg0) +} + +// Verify mocks base method. +func (m *MockTransferableIn) Verify() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Verify") + ret0, _ := ret[0].(error) + return ret0 +} + +// Verify indicates an expected call of Verify. +func (mr *MockTransferableInMockRecorder) Verify() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verify", reflect.TypeOf((*MockTransferableIn)(nil).Verify)) +} diff --git a/avalanchego/vms/components/avax/mock_transferable_out.go b/avalanchego/vms/components/avax/mock_transferable_out.go index a604af14..bb3ad3ae 100644 --- a/avalanchego/vms/components/avax/mock_transferable_out.go +++ b/avalanchego/vms/components/avax/mock_transferable_out.go @@ -1,3 +1,6 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/components/avax (interfaces: TransferableOut) diff --git a/avalanchego/vms/components/avax/singleton_state.go b/avalanchego/vms/components/avax/singleton_state.go deleted file mode 100644 index 36113089..00000000 --- a/avalanchego/vms/components/avax/singleton_state.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avax - -import ( - "github.com/ava-labs/avalanchego/database" -) - -const ( - IsInitializedKey byte = iota -) - -var ( - isInitializedKey = []byte{IsInitializedKey} - _ SingletonState = &singletonState{} -) - -// SingletonState is a thin wrapper around a database to provide, caching, -// serialization, and de-serialization of singletons. -type SingletonState interface { - IsInitialized() (bool, error) - SetInitialized() error -} - -type singletonState struct { - singletonDB database.Database -} - -func NewSingletonState(db database.Database) SingletonState { - return &singletonState{ - singletonDB: db, - } -} - -func (s *singletonState) IsInitialized() (bool, error) { - return s.singletonDB.Has(isInitializedKey) -} - -func (s *singletonState) SetInitialized() error { - return s.singletonDB.Put(isInitializedKey, nil) -} diff --git a/avalanchego/vms/components/avax/singleton_state_test.go b/avalanchego/vms/components/avax/singleton_state_test.go deleted file mode 100644 index 5b8e97dc..00000000 --- a/avalanchego/vms/components/avax/singleton_state_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avax - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/database/memdb" -) - -func TestSingletonState(t *testing.T) { - require := require.New(t) - - db := memdb.New() - s := NewSingletonState(db) - - isInitialized, err := s.IsInitialized() - require.NoError(err) - require.False(isInitialized) - - err = s.SetInitialized() - require.NoError(err) - - isInitialized, err = s.IsInitialized() - require.NoError(err) - require.True(isInitialized) -} diff --git a/avalanchego/vms/components/avax/state.go b/avalanchego/vms/components/avax/state.go index 181850a3..ab0d42ab 100644 --- a/avalanchego/vms/components/avax/state.go +++ b/avalanchego/vms/components/avax/state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax diff --git a/avalanchego/vms/components/avax/status_state.go b/avalanchego/vms/components/avax/status_state.go deleted file mode 100644 index acbc8474..00000000 --- a/avalanchego/vms/components/avax/status_state.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avax - -import ( - "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/avalanchego/cache" - "github.com/ava-labs/avalanchego/cache/metercacher" - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" -) - -const ( - statusCacheSize = 8192 -) - -// StatusState is a thin wrapper around a database to provide, caching, -// serialization, and de-serialization for statuses. -type StatusState interface { - // Status returns a status from storage. - GetStatus(id ids.ID) (choices.Status, error) - - // PutStatus saves a status in storage. - PutStatus(id ids.ID, status choices.Status) error - - // DeleteStatus removes a status from storage. - DeleteStatus(id ids.ID) error -} - -type statusState struct { - // ID -> Status of thing with that ID, or nil if StatusState doesn't have - // that status. - statusCache cache.Cacher - statusDB database.Database -} - -func NewStatusState(db database.Database) StatusState { - return &statusState{ - statusCache: &cache.LRU{Size: statusCacheSize}, - statusDB: db, - } -} - -func NewMeteredStatusState(db database.Database, metrics prometheus.Registerer) (StatusState, error) { - cache, err := metercacher.New( - "status_cache", - metrics, - &cache.LRU{Size: statusCacheSize}, - ) - return &statusState{ - statusCache: cache, - statusDB: db, - }, err -} - -func (s *statusState) GetStatus(id ids.ID) (choices.Status, error) { - if statusIntf, found := s.statusCache.Get(id); found { - if statusIntf == nil { - return choices.Unknown, database.ErrNotFound - } - return statusIntf.(choices.Status), nil - } - - val, err := database.GetUInt32(s.statusDB, id[:]) - if err == database.ErrNotFound { - s.statusCache.Put(id, nil) - return choices.Unknown, database.ErrNotFound - } - if err != nil { - return choices.Unknown, err - } - - status := choices.Status(val) - if err := status.Valid(); err != nil { - return choices.Unknown, err - } - - s.statusCache.Put(id, status) - return status, nil -} - -func (s *statusState) PutStatus(id ids.ID, status choices.Status) error { - s.statusCache.Put(id, status) - return database.PutUInt32(s.statusDB, id[:], uint32(status)) -} - -func (s *statusState) DeleteStatus(id ids.ID) error { - s.statusCache.Put(id, nil) - return s.statusDB.Delete(id[:]) -} diff --git a/avalanchego/vms/components/avax/status_state_test.go b/avalanchego/vms/components/avax/status_state_test.go deleted file mode 100644 index 955cb837..00000000 --- a/avalanchego/vms/components/avax/status_state_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avax - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" -) - -func TestStatusState(t *testing.T) { - require := require.New(t) - id0 := ids.GenerateTestID() - - db := memdb.New() - s := NewStatusState(db) - - _, err := s.GetStatus(id0) - require.Equal(database.ErrNotFound, err) - - _, err = s.GetStatus(id0) - require.Equal(database.ErrNotFound, err) - - err = s.PutStatus(id0, choices.Accepted) - require.NoError(err) - - status, err := s.GetStatus(id0) - require.NoError(err) - require.Equal(choices.Accepted, status) - - err = s.DeleteStatus(id0) - require.NoError(err) - - _, err = s.GetStatus(id0) - require.Equal(database.ErrNotFound, err) - - err = s.PutStatus(id0, choices.Accepted) - require.NoError(err) - - s = NewStatusState(db) - - status, err = s.GetStatus(id0) - require.NoError(err) - require.Equal(choices.Accepted, status) -} diff --git a/avalanchego/vms/components/avax/test_verifiable.go b/avalanchego/vms/components/avax/test_verifiable.go index 499b25d6..0e9eb877 100644 --- a/avalanchego/vms/components/avax/test_verifiable.go +++ b/avalanchego/vms/components/avax/test_verifiable.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax @@ -7,10 +7,15 @@ import "github.com/ava-labs/avalanchego/snow" type TestVerifiable struct{ Err error } -func (v *TestVerifiable) InitCtx(ctx *snow.Context) {} -func (v *TestVerifiable) Verify() error { return v.Err } +func (*TestVerifiable) InitCtx(*snow.Context) {} -func (v *TestVerifiable) VerifyState() error { return v.Err } +func (v *TestVerifiable) Verify() error { + return v.Err +} + +func (v *TestVerifiable) VerifyState() error { + return v.Err +} type TestTransferable struct { TestVerifiable @@ -18,13 +23,15 @@ type TestTransferable struct { Val uint64 `serialize:"true"` } -func (t *TestTransferable) InitCtx(*snow.Context) { - // no op -} +func (*TestTransferable) InitCtx(*snow.Context) {} -func (t *TestTransferable) Amount() uint64 { return t.Val } +func (t *TestTransferable) Amount() uint64 { + return t.Val +} -func (t *TestTransferable) Cost() (uint64, error) { return 0, nil } +func (*TestTransferable) Cost() (uint64, error) { + return 0, nil +} type TestAddressable struct { TestTransferable `serialize:"true"` @@ -32,4 +39,6 @@ type TestAddressable struct { Addrs [][]byte `serialize:"true"` } -func (a *TestAddressable) Addresses() [][]byte { return a.Addrs } +func (a *TestAddressable) Addresses() [][]byte { + return a.Addrs +} diff --git a/avalanchego/vms/components/avax/transferables.go b/avalanchego/vms/components/avax/transferables.go index 1ae44030..fee08be1 100644 --- a/avalanchego/vms/components/avax/transferables.go +++ b/avalanchego/vms/components/avax/transferables.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax @@ -12,21 +12,22 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/components/verify" ) var ( - errNilTransferableOutput = errors.New("nil transferable output is not valid") - errNilTransferableFxOutput = errors.New("nil transferable feature extension output is not valid") - errOutputsNotSorted = errors.New("outputs not sorted") + ErrNilTransferableOutput = errors.New("nil transferable output is not valid") + ErrNilTransferableFxOutput = errors.New("nil transferable feature extension output is not valid") + ErrOutputsNotSorted = errors.New("outputs not sorted") - errNilTransferableInput = errors.New("nil transferable input is not valid") - errNilTransferableFxInput = errors.New("nil transferable feature extension input is not valid") - errInputsNotSortedUnique = errors.New("inputs not sorted and unique") + ErrNilTransferableInput = errors.New("nil transferable input is not valid") + ErrNilTransferableFxInput = errors.New("nil transferable feature extension input is not valid") + ErrInputsNotSortedUnique = errors.New("inputs not sorted and unique") - _ verify.Verifiable = &TransferableOutput{} - _ verify.Verifiable = &TransferableInput{} + _ verify.Verifiable = (*TransferableOutput)(nil) + _ verify.Verifiable = (*TransferableInput)(nil) + _ utils.Sortable[*TransferableInput] = (*TransferableInput)(nil) ) // Amounter is a data structure that has an amount of something associated with it @@ -72,14 +73,16 @@ func (out *TransferableOutput) InitCtx(ctx *snow.Context) { } // Output returns the feature extension output that this Output is using. -func (out *TransferableOutput) Output() TransferableOut { return out.Out } +func (out *TransferableOutput) Output() TransferableOut { + return out.Out +} func (out *TransferableOutput) Verify() error { switch { case out == nil: - return errNilTransferableOutput + return ErrNilTransferableOutput case out.Out == nil: - return errNilTransferableFxOutput + return ErrNilTransferableFxOutput default: return verify.All(&out.Asset, out.Out) } @@ -114,8 +117,15 @@ func (outs *innerSortTransferableOutputs) Less(i, j int) bool { } return bytes.Compare(iBytes, jBytes) == -1 } -func (outs *innerSortTransferableOutputs) Len() int { return len(outs.outs) } -func (outs *innerSortTransferableOutputs) Swap(i, j int) { o := outs.outs; o[j], o[i] = o[i], o[j] } + +func (outs *innerSortTransferableOutputs) Len() int { + return len(outs.outs) +} + +func (outs *innerSortTransferableOutputs) Swap(i, j int) { + o := outs.outs + o[j], o[i] = o[i], o[j] +} // SortTransferableOutputs sorts output objects func SortTransferableOutputs(outs []*TransferableOutput, c codec.Manager) { @@ -136,46 +146,28 @@ type TransferableInput struct { } // Input returns the feature extension input that this Input is using. -func (in *TransferableInput) Input() TransferableIn { return in.In } +func (in *TransferableInput) Input() TransferableIn { + return in.In +} func (in *TransferableInput) Verify() error { switch { case in == nil: - return errNilTransferableInput + return ErrNilTransferableInput case in.In == nil: - return errNilTransferableFxInput + return ErrNilTransferableFxInput default: return verify.All(&in.UTXOID, &in.Asset, in.In) } } -type innerSortTransferableInputs []*TransferableInput - -func (ins innerSortTransferableInputs) Less(i, j int) bool { - iID, iIndex := ins[i].InputSource() - jID, jIndex := ins[j].InputSource() - - switch bytes.Compare(iID[:], jID[:]) { - case -1: - return true - case 0: - return iIndex < jIndex - default: - return false - } -} -func (ins innerSortTransferableInputs) Len() int { return len(ins) } -func (ins innerSortTransferableInputs) Swap(i, j int) { ins[j], ins[i] = ins[i], ins[j] } - -func SortTransferableInputs(ins []*TransferableInput) { sort.Sort(innerSortTransferableInputs(ins)) } - -func IsSortedAndUniqueTransferableInputs(ins []*TransferableInput) bool { - return utils.IsSortedAndUnique(innerSortTransferableInputs(ins)) +func (in *TransferableInput) Less(other *TransferableInput) bool { + return in.UTXOID.Less(&other.UTXOID) } type innerSortTransferableInputsWithSigners struct { ins []*TransferableInput - signers [][]*crypto.PrivateKeySECP256K1R + signers [][]*secp256k1.PrivateKey } func (ins *innerSortTransferableInputsWithSigners) Less(i, j int) bool { @@ -191,7 +183,11 @@ func (ins *innerSortTransferableInputsWithSigners) Less(i, j int) bool { return false } } -func (ins *innerSortTransferableInputsWithSigners) Len() int { return len(ins.ins) } + +func (ins *innerSortTransferableInputsWithSigners) Len() int { + return len(ins.ins) +} + func (ins *innerSortTransferableInputsWithSigners) Swap(i, j int) { ins.ins[j], ins.ins[i] = ins.ins[i], ins.ins[j] ins.signers[j], ins.signers[i] = ins.signers[i], ins.signers[j] @@ -199,16 +195,10 @@ func (ins *innerSortTransferableInputsWithSigners) Swap(i, j int) { // SortTransferableInputsWithSigners sorts the inputs and signers based on the // input's utxo ID -func SortTransferableInputsWithSigners(ins []*TransferableInput, signers [][]*crypto.PrivateKeySECP256K1R) { +func SortTransferableInputsWithSigners(ins []*TransferableInput, signers [][]*secp256k1.PrivateKey) { sort.Sort(&innerSortTransferableInputsWithSigners{ins: ins, signers: signers}) } -// IsSortedAndUniqueTransferableInputsWithSigners returns true if the inputs are -// sorted and unique -func IsSortedAndUniqueTransferableInputsWithSigners(ins []*TransferableInput, signers [][]*crypto.PrivateKeySECP256K1R) bool { - return utils.IsSortedAndUnique(&innerSortTransferableInputsWithSigners{ins: ins, signers: signers}) -} - // VerifyTx verifies that the inputs and outputs flowcheck, including a fee. // Additionally, this verifies that the inputs and outputs are sorted. func VerifyTx( @@ -231,7 +221,7 @@ func VerifyTx( fc.Produce(out.AssetID(), out.Output().Amount()) } if !IsSortedTransferableOutputs(outs, c) { - return errOutputsNotSorted + return ErrOutputsNotSorted } } @@ -243,8 +233,8 @@ func VerifyTx( } fc.Consume(in.AssetID(), in.Input().Amount()) } - if !IsSortedAndUniqueTransferableInputs(ins) { - return errInputsNotSortedUnique + if !utils.IsSortedAndUniqueSortable(ins) { + return ErrInputsNotSortedUnique } } diff --git a/avalanchego/vms/components/avax/transferables_test.go b/avalanchego/vms/components/avax/transferables_test.go index 23b79415..589fb0e0 100644 --- a/avalanchego/vms/components/avax/transferables_test.go +++ b/avalanchego/vms/components/avax/transferables_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -244,11 +245,11 @@ func TestTransferableInputSorting(t *testing.T) { }, } - if IsSortedAndUniqueTransferableInputs(ins) { + if utils.IsSortedAndUniqueSortable(ins) { t.Fatalf("Shouldn't be sorted") } - SortTransferableInputs(ins) - if !IsSortedAndUniqueTransferableInputs(ins) { + utils.Sort(ins) + if !utils.IsSortedAndUniqueSortable(ins) { t.Fatalf("Should be sorted") } @@ -261,7 +262,7 @@ func TestTransferableInputSorting(t *testing.T) { In: &TestTransferable{}, }) - if IsSortedAndUniqueTransferableInputs(ins) { + if utils.IsSortedAndUniqueSortable(ins) { t.Fatalf("Shouldn't be unique") } } diff --git a/avalanchego/vms/components/avax/utxo.go b/avalanchego/vms/components/avax/utxo.go index 6e1c90d1..afea6891 100644 --- a/avalanchego/vms/components/avax/utxo.go +++ b/avalanchego/vms/components/avax/utxo.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax @@ -13,7 +13,7 @@ var ( errNilUTXO = errors.New("nil utxo is not valid") errEmptyUTXO = errors.New("empty utxo is not valid") - _ verify.Verifiable = &UTXO{} + _ verify.Verifiable = (*UTXO)(nil) ) type UTXO struct { diff --git a/avalanchego/vms/components/avax/utxo_fetching.go b/avalanchego/vms/components/avax/utxo_fetching.go index efc0afd1..18525131 100644 --- a/avalanchego/vms/components/avax/utxo_fetching.go +++ b/avalanchego/vms/components/avax/utxo_fetching.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax @@ -9,12 +9,14 @@ import ( "math" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/set" safemath "github.com/ava-labs/avalanchego/utils/math" ) // GetBalance returns the current balance of [addrs] -func GetBalance(db UTXOReader, addrs ids.ShortSet) (uint64, error) { +func GetBalance(db UTXOReader, addrs set.Set[ids.ShortID]) (uint64, error) { utxos, err := GetAllUTXOs(db, addrs) if err != nil { return 0, fmt.Errorf("couldn't get UTXOs: %w", err) @@ -22,7 +24,8 @@ func GetBalance(db UTXOReader, addrs ids.ShortSet) (uint64, error) { balance := uint64(0) for _, utxo := range utxos { if out, ok := utxo.Out.(Amounter); ok { - if balance, err = safemath.Add64(out.Amount(), balance); err != nil { + balance, err = safemath.Add64(out.Amount(), balance) + if err != nil { return 0, err } } @@ -30,7 +33,7 @@ func GetBalance(db UTXOReader, addrs ids.ShortSet) (uint64, error) { return balance, nil } -func GetAllUTXOs(db UTXOReader, addrs ids.ShortSet) ([]*UTXO, error) { +func GetAllUTXOs(db UTXOReader, addrs set.Set[ids.ShortID]) ([]*UTXO, error) { utxos, _, _, err := GetPaginatedUTXOs( db, addrs, @@ -57,17 +60,18 @@ func GetAllUTXOs(db UTXOReader, addrs ids.ShortSet) ([]*UTXO, error) { // * The ID of the last UTXO fetched func GetPaginatedUTXOs( db UTXOReader, - addrs ids.ShortSet, + addrs set.Set[ids.ShortID], lastAddr ids.ShortID, lastUTXOID ids.ID, limit int, ) ([]*UTXO, ids.ShortID, ids.ID, error) { var ( utxos []*UTXO - seen ids.Set // IDs of UTXOs already in the list - searchSize = limit // the limit diminishes which can impact the expected return - addrsList = addrs.SortedList() // enforces the same ordering for pagination + seen set.Set[ids.ID] // IDs of UTXOs already in the list + searchSize = limit // the limit diminishes which can impact the expected return + addrsList = addrs.List() ) + utils.Sort(addrsList) // enforces the same ordering for pagination for _, addr := range addrsList { start := ids.Empty if comp := bytes.Compare(addr.Bytes(), lastAddr.Bytes()); comp == -1 { // Skip addresses before [startAddr] diff --git a/avalanchego/vms/components/avax/utxo_fetching_test.go b/avalanchego/vms/components/avax/utxo_fetching_test.go index 978984cb..2af04f53 100644 --- a/avalanchego/vms/components/avax/utxo_fetching_test.go +++ b/avalanchego/vms/components/avax/utxo_fetching_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax @@ -12,6 +12,7 @@ import ( "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -22,7 +23,7 @@ func TestFetchUTXOs(t *testing.T) { txID := ids.GenerateTestID() assetID := ids.GenerateTestID() addr := ids.GenerateTestShortID() - addrs := ids.ShortSet{} + addrs := set.Set[ids.ShortID]{} addrs.Add(addr) utxo := &UTXO{ UTXOID: UTXOID{ @@ -75,7 +76,7 @@ func TestGetPaginatedUTXOs(t *testing.T) { addr0 := ids.GenerateTestShortID() addr1 := ids.GenerateTestShortID() addr2 := ids.GenerateTestShortID() - addrs := ids.ShortSet{} + addrs := set.Set[ids.ShortID]{} addrs.Add(addr0, addr1) c := linearcodec.NewDefault() diff --git a/avalanchego/vms/components/avax/utxo_handler.go b/avalanchego/vms/components/avax/utxo_handler.go new file mode 100644 index 00000000..782d8592 --- /dev/null +++ b/avalanchego/vms/components/avax/utxo_handler.go @@ -0,0 +1,32 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avax + +import "github.com/ava-labs/avalanchego/ids" + +// Removes the UTXOs consumed by [ins] from the UTXO set +func Consume(utxoDB UTXODeleter, ins []*TransferableInput) { + for _, input := range ins { + utxoDB.DeleteUTXO(input.InputID()) + } +} + +// Adds the UTXOs created by [outs] to the UTXO set. +// [txID] is the ID of the tx that created [outs]. +func Produce( + utxoDB UTXOAdder, + txID ids.ID, + outs []*TransferableOutput, +) { + for index, out := range outs { + utxoDB.AddUTXO(&UTXO{ + UTXOID: UTXOID{ + TxID: txID, + OutputIndex: uint32(index), + }, + Asset: out.Asset, + Out: out.Output(), + }) + } +} diff --git a/avalanchego/vms/components/avax/utxo_id.go b/avalanchego/vms/components/avax/utxo_id.go index b39177f7..57a4c3c9 100644 --- a/avalanchego/vms/components/avax/utxo_id.go +++ b/avalanchego/vms/components/avax/utxo_id.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax @@ -7,7 +7,8 @@ import ( "bytes" "errors" "fmt" - "sort" + "strconv" + "strings" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" @@ -15,9 +16,13 @@ import ( ) var ( - errNilUTXOID = errors.New("nil utxo ID is not valid") + errNilUTXOID = errors.New("nil utxo ID is not valid") + errMalformedUTXOIDString = errors.New("unexpected number of tokens in string") + errFailedDecodingUTXOIDTxID = errors.New("failed decoding UTXOID TxID") + errFailedDecodingUTXOIDIndex = errors.New("failed decoding UTXOID index") - _ verify.Verifiable = &UTXOID{} + _ verify.Verifiable = (*UTXOID)(nil) + _ utils.Sortable[*UTXOID] = (*UTXOID)(nil) ) type UTXOID struct { @@ -32,7 +37,9 @@ type UTXOID struct { } // InputSource returns the source of the UTXO that this input is spending -func (utxo *UTXOID) InputSource() (ids.ID, uint32) { return utxo.TxID, utxo.OutputIndex } +func (utxo *UTXOID) InputSource() (ids.ID, uint32) { + return utxo.TxID, utxo.OutputIndex +} // InputID returns a unique ID of the UTXO that this input is spending func (utxo *UTXOID) InputID() ids.ID { @@ -44,12 +51,37 @@ func (utxo *UTXOID) InputID() ids.ID { // Symbolic returns if this is the ID of a UTXO in the DB, or if it is a // symbolic input -func (utxo *UTXOID) Symbolic() bool { return utxo.Symbol } +func (utxo *UTXOID) Symbolic() bool { + return utxo.Symbol +} func (utxo *UTXOID) String() string { return fmt.Sprintf("%s:%d", utxo.TxID, utxo.OutputIndex) } +// UTXOIDFromString attempts to parse a string into a UTXOID +func UTXOIDFromString(s string) (*UTXOID, error) { + ss := strings.Split(s, ":") + if len(ss) != 2 { + return nil, errMalformedUTXOIDString + } + + txID, err := ids.FromString(ss[0]) + if err != nil { + return nil, fmt.Errorf("%w: %v", errFailedDecodingUTXOIDTxID, err) + } + + idx, err := strconv.ParseUint(ss[1], 10, 32) + if err != nil { + return nil, fmt.Errorf("%w: %v", errFailedDecodingUTXOIDIndex, err) + } + + return &UTXOID{ + TxID: txID, + OutputIndex: uint32(idx), + }, nil +} + func (utxo *UTXOID) Verify() error { switch { case utxo == nil: @@ -59,26 +91,16 @@ func (utxo *UTXOID) Verify() error { } } -type innerSortUTXOIDs []*UTXOID - -func (utxos innerSortUTXOIDs) Less(i, j int) bool { - iID, iIndex := utxos[i].InputSource() - jID, jIndex := utxos[j].InputSource() +func (utxo *UTXOID) Less(other *UTXOID) bool { + utxoID, utxoIndex := utxo.InputSource() + otherID, otherIndex := other.InputSource() - switch bytes.Compare(iID[:], jID[:]) { + switch bytes.Compare(utxoID[:], otherID[:]) { case -1: return true case 0: - return iIndex < jIndex + return utxoIndex < otherIndex default: return false } } -func (utxos innerSortUTXOIDs) Len() int { return len(utxos) } -func (utxos innerSortUTXOIDs) Swap(i, j int) { utxos[j], utxos[i] = utxos[i], utxos[j] } - -func SortUTXOIDs(utxos []*UTXOID) { sort.Sort(innerSortUTXOIDs(utxos)) } - -func IsSortedAndUniqueUTXOIDs(utxos []*UTXOID) bool { - return utils.IsSortedAndUnique(innerSortUTXOIDs(utxos)) -} diff --git a/avalanchego/vms/components/avax/utxo_id_test.go b/avalanchego/vms/components/avax/utxo_id_test.go index 99a5e8b9..a35ac023 100644 --- a/avalanchego/vms/components/avax/utxo_id_test.go +++ b/avalanchego/vms/components/avax/utxo_id_test.go @@ -1,11 +1,14 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax import ( + "math" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/ids" @@ -58,3 +61,155 @@ func TestUTXOID(t *testing.T) { t.Fatalf("Parsing returned the wrong UTXO ID") } } + +func TestUTXOIDLess(t *testing.T) { + type test struct { + name string + id1 UTXOID + id2 UTXOID + expected bool + } + tests := []test{ + { + name: "same", + id1: UTXOID{}, + id2: UTXOID{}, + expected: false, + }, + { + name: "first id smaller", + id1: UTXOID{}, + id2: UTXOID{ + TxID: ids.ID{1}, + }, + expected: true, + }, + { + name: "first id larger", + id1: UTXOID{ + TxID: ids.ID{1}, + }, + id2: UTXOID{}, + expected: false, + }, + { + name: "first index smaller", + id1: UTXOID{}, + id2: UTXOID{ + OutputIndex: 1, + }, + expected: true, + }, + { + name: "first index larger", + id1: UTXOID{ + OutputIndex: 1, + }, + id2: UTXOID{}, + expected: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + require.Equal(tt.expected, tt.id1.Less(&tt.id2)) + }) + } +} + +func TestUTXOIDFromString(t *testing.T) { + tests := []struct { + description string + utxoID *UTXOID + expectedStr string + parseErr error + }{ + { + description: "empty utxoID", + utxoID: &UTXOID{}, + expectedStr: "11111111111111111111111111111111LpoYY:0", + parseErr: nil, + }, + { + description: "a random utxoID", + utxoID: &UTXOID{ + TxID: ids.Empty.Prefix(2022), + OutputIndex: 2022, + }, + expectedStr: "PkHybKBmFvBkfumRvJZToECJp4oCiziLu95p86rU1THx1WAqa:2022", + parseErr: nil, + }, + { + description: "max output index utxoID", + utxoID: &UTXOID{ + TxID: ids.Empty.Prefix(1789), + OutputIndex: math.MaxUint32, + }, + expectedStr: "Y3sXNphGY121uVzj37rA8ooUAHrfuDZahzLrTq6UauAZTEqoX:4294967295", + parseErr: nil, + }, + { + description: "not enough tokens", + utxoID: &UTXOID{}, + expectedStr: "11111111111111111111111111111111LpoYY", + parseErr: errMalformedUTXOIDString, + }, + { + description: "not enough tokens", + utxoID: &UTXOID{}, + expectedStr: "11111111111111111111111111111111LpoYY:10:10", + parseErr: errMalformedUTXOIDString, + }, + { + description: "missing TxID", + utxoID: &UTXOID{}, + expectedStr: ":2022", + parseErr: errFailedDecodingUTXOIDTxID, + }, + { + description: "non TxID", + utxoID: &UTXOID{}, + expectedStr: "11:NOT_AN_INDEX", + parseErr: errFailedDecodingUTXOIDTxID, + }, + { + description: "missing index", + utxoID: &UTXOID{}, + expectedStr: "11111111111111111111111111111111LpoYY:", + parseErr: errFailedDecodingUTXOIDIndex, + }, + { + description: "non index", + utxoID: &UTXOID{}, + expectedStr: "11111111111111111111111111111111LpoYY:NOT_AN_INDEX", + parseErr: errFailedDecodingUTXOIDIndex, + }, + { + description: "negative index", + utxoID: &UTXOID{}, + expectedStr: "11111111111111111111111111111111LpoYY:-1", + parseErr: errFailedDecodingUTXOIDIndex, + }, + { + description: "index too large", + utxoID: &UTXOID{}, + expectedStr: "11111111111111111111111111111111LpoYY:4294967296", + parseErr: errFailedDecodingUTXOIDIndex, + }, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + require := require.New(t) + + retrievedUTXOID, err := UTXOIDFromString(test.expectedStr) + require.ErrorIs(err, test.parseErr) + + if err == nil { + require.Equal(test.utxoID.InputID(), retrievedUTXOID.InputID()) + require.Equal(test.utxoID, retrievedUTXOID) + require.Equal(test.utxoID.String(), retrievedUTXOID.String()) + } + }) + } +} diff --git a/avalanchego/vms/components/avax/utxo_state.go b/avalanchego/vms/components/avax/utxo_state.go index f8a7aed1..beb1846e 100644 --- a/avalanchego/vms/components/avax/utxo_state.go +++ b/avalanchego/vms/components/avax/utxo_state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax @@ -49,6 +49,14 @@ type UTXOGetter interface { GetUTXO(utxoID ids.ID) (*UTXO, error) } +type UTXOAdder interface { + AddUTXO(utxo *UTXO) +} + +type UTXODeleter interface { + DeleteUTXO(utxoID ids.ID) +} + // UTXOWriter is a thin wrapper around a database to provide storage and // deletion of UTXOs. type UTXOWriter interface { @@ -63,39 +71,39 @@ type utxoState struct { codec codec.Manager // UTXO ID -> *UTXO. If the *UTXO is nil the UTXO doesn't exist - utxoCache cache.Cacher + utxoCache cache.Cacher[ids.ID, *UTXO] utxoDB database.Database indexDB database.Database - indexCache cache.Cacher + indexCache cache.Cacher[string, linkeddb.LinkedDB] } func NewUTXOState(db database.Database, codec codec.Manager) UTXOState { return &utxoState{ codec: codec, - utxoCache: &cache.LRU{Size: utxoCacheSize}, + utxoCache: &cache.LRU[ids.ID, *UTXO]{Size: utxoCacheSize}, utxoDB: prefixdb.New(utxoPrefix, db), indexDB: prefixdb.New(indexPrefix, db), - indexCache: &cache.LRU{Size: indexCacheSize}, + indexCache: &cache.LRU[string, linkeddb.LinkedDB]{Size: indexCacheSize}, } } func NewMeteredUTXOState(db database.Database, codec codec.Manager, metrics prometheus.Registerer) (UTXOState, error) { - utxoCache, err := metercacher.New( + utxoCache, err := metercacher.New[ids.ID, *UTXO]( "utxo_cache", metrics, - &cache.LRU{Size: utxoCacheSize}, + &cache.LRU[ids.ID, *UTXO]{Size: utxoCacheSize}, ) if err != nil { return nil, err } - indexCache, err := metercacher.New( + indexCache, err := metercacher.New[string, linkeddb.LinkedDB]( "index_cache", metrics, - &cache.LRU{ + &cache.LRU[string, linkeddb.LinkedDB]{ Size: indexCacheSize, }, ) @@ -111,11 +119,11 @@ func NewMeteredUTXOState(db database.Database, codec codec.Manager, metrics prom } func (s *utxoState) GetUTXO(utxoID ids.ID) (*UTXO, error) { - if utxoIntf, found := s.utxoCache.Get(utxoID); found { - if utxoIntf == nil { + if utxo, found := s.utxoCache.Get(utxoID); found { + if utxo == nil { return nil, database.ErrNotFound } - return utxoIntf.(*UTXO), nil + return utxo, nil } bytes, err := s.utxoDB.Get(utxoID[:]) @@ -166,6 +174,9 @@ func (s *utxoState) PutUTXO(utxo *UTXO) error { func (s *utxoState) DeleteUTXO(utxoID ids.ID) error { utxo, err := s.GetUTXO(utxoID) + if err == database.ErrNotFound { + return nil + } if err != nil { return err } @@ -214,7 +225,7 @@ func (s *utxoState) UTXOIDs(addr []byte, start ids.ID, limit int) ([]ids.ID, err func (s *utxoState) getIndexDB(addr []byte) linkeddb.LinkedDB { addrStr := string(addr) if indexList, exists := s.indexCache.Get(addrStr); exists { - return indexList.(linkeddb.LinkedDB) + return indexList } indexDB := prefixdb.NewNested(addr, s.indexDB) diff --git a/avalanchego/vms/components/avax/utxo_state_test.go b/avalanchego/vms/components/avax/utxo_state_test.go index d9a6c6dc..993fab76 100644 --- a/avalanchego/vms/components/avax/utxo_state_test.go +++ b/avalanchego/vms/components/avax/utxo_state_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax @@ -64,7 +64,7 @@ func TestUTXOState(t *testing.T) { require.Equal(database.ErrNotFound, err) err = s.DeleteUTXO(utxoID) - require.Equal(database.ErrNotFound, err) + require.NoError(err) err = s.PutUTXO(utxo) require.NoError(err) diff --git a/avalanchego/vms/components/avax/utxo_test.go b/avalanchego/vms/components/avax/utxo_test.go index 600408bb..a872e673 100644 --- a/avalanchego/vms/components/avax/utxo_test.go +++ b/avalanchego/vms/components/avax/utxo_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package avax diff --git a/avalanchego/vms/components/chain/block.go b/avalanchego/vms/components/chain/block.go index 8f946a93..d03659ed 100644 --- a/avalanchego/vms/components/chain/block.go +++ b/avalanchego/vms/components/chain/block.go @@ -1,10 +1,22 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package chain import ( + "context" + "errors" + "fmt" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" +) + +var ( + _ snowman.Block = (*BlockWrapper)(nil) + _ block.WithVerifyContext = (*BlockWrapper)(nil) + + errExpectedBlockWithVerifyContext = errors.New("expected block.WithVerifyContext") ) // BlockWrapper wraps a snowman Block while adding a smart caching layer to improve @@ -20,8 +32,50 @@ type BlockWrapper struct { // Note: it is guaranteed that if a block passes verification it will be added to // consensus and eventually be decided ie. either Accept/Reject will be called // on [bw] removing it from [verifiedBlocks]. -func (bw *BlockWrapper) Verify() error { - if err := bw.Block.Verify(); err != nil { +func (bw *BlockWrapper) Verify(ctx context.Context) error { + if err := bw.Block.Verify(ctx); err != nil { + // Note: we cannot cache blocks failing verification in case + // the error is temporary and the block could become valid in + // the future. + return err + } + + blkID := bw.ID() + bw.state.unverifiedBlocks.Evict(blkID) + bw.state.verifiedBlocks[blkID] = bw + return nil +} + +// ShouldVerifyWithContext checks if the underlying block should be verified +// with a block context. If the underlying block does not implement the +// block.WithVerifyContext interface, returns false without an error. Does not +// touch any block cache. +func (bw *BlockWrapper) ShouldVerifyWithContext(ctx context.Context) (bool, error) { + blkWithCtx, ok := bw.Block.(block.WithVerifyContext) + if !ok { + return false, nil + } + return blkWithCtx.ShouldVerifyWithContext(ctx) +} + +// VerifyWithContext verifies the underlying block with the given block context, +// evicts from the unverified block cache and if the block passes verification, +// adds it to [cache.verifiedBlocks]. +// Note: it is guaranteed that if a block passes verification it will be added +// to consensus and eventually be decided ie. either Accept/Reject will be +// called on [bw] removing it from [verifiedBlocks]. +// +// Note: If the underlying block does not implement the block.WithVerifyContext +// interface, an error is always returned because ShouldVerifyWithContext will +// always return false in this case and VerifyWithContext should never be +// called. +func (bw *BlockWrapper) VerifyWithContext(ctx context.Context, blockCtx *block.Context) error { + blkWithCtx, ok := bw.Block.(block.WithVerifyContext) + if !ok { + return fmt.Errorf("%w but got %T", errExpectedBlockWithVerifyContext, bw.Block) + } + + if err := blkWithCtx.VerifyWithContext(ctx, blockCtx); err != nil { // Note: we cannot cache blocks failing verification in case // the error is temporary and the block could become valid in // the future. @@ -36,20 +90,20 @@ func (bw *BlockWrapper) Verify() error { // Accept accepts the underlying block, removes it from verifiedBlocks, caches it as a decided // block, and updates the last accepted block. -func (bw *BlockWrapper) Accept() error { +func (bw *BlockWrapper) Accept(ctx context.Context) error { blkID := bw.ID() delete(bw.state.verifiedBlocks, blkID) bw.state.decidedBlocks.Put(blkID, bw) bw.state.lastAcceptedBlock = bw - return bw.Block.Accept() + return bw.Block.Accept(ctx) } // Reject rejects the underlying block, removes it from processing blocks, and caches it as a // decided block. -func (bw *BlockWrapper) Reject() error { +func (bw *BlockWrapper) Reject(ctx context.Context) error { blkID := bw.ID() delete(bw.state.verifiedBlocks, blkID) bw.state.decidedBlocks.Put(blkID, bw) - return bw.Block.Reject() + return bw.Block.Reject(ctx) } diff --git a/avalanchego/vms/components/chain/state.go b/avalanchego/vms/components/chain/state.go index d2ca6f5d..fe499194 100644 --- a/avalanchego/vms/components/chain/state.go +++ b/avalanchego/vms/components/chain/state.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package chain import ( + "context" "fmt" "github.com/prometheus/client_golang/prometheus" @@ -14,6 +15,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" ) // State implements an efficient caching layer used to wrap a VM @@ -21,33 +23,34 @@ import ( type State struct { // getBlock retrieves a block from the VM's storage. If getBlock returns // a nil error, then the returned block must not have the status Unknown - getBlock func(ids.ID) (snowman.Block, error) + getBlock func(context.Context, ids.ID) (snowman.Block, error) // unmarshals [b] into a block - unmarshalBlock func([]byte) (snowman.Block, error) + unmarshalBlock func(context.Context, []byte) (snowman.Block, error) + batchedUnmarshalBlock func(context.Context, [][]byte) ([]snowman.Block, error) // buildBlock attempts to build a block on top of the currently preferred block // buildBlock should always return a block with status Processing since it should never // create an unknown block, and building on top of the preferred block should never yield // a block that has already been decided. - buildBlock func() (snowman.Block, error) + buildBlock func(context.Context) (snowman.Block, error) + + // If nil, [BuildBlockWithContext] returns [BuildBlock]. + buildBlockWithContext func(context.Context, *block.Context) (snowman.Block, error) // getStatus returns the status of the block - getStatus func(snowman.Block) (choices.Status, error) + getStatus func(context.Context, snowman.Block) (choices.Status, error) // verifiedBlocks is a map of blocks that have been verified and are // therefore currently in consensus. verifiedBlocks map[ids.ID]*BlockWrapper // decidedBlocks is an LRU cache of decided blocks. - // Every value in [decidedBlocks] is a (*BlockWrapper) - decidedBlocks cache.Cacher + decidedBlocks cache.Cacher[ids.ID, *BlockWrapper] // unverifiedBlocks is an LRU cache of blocks with status processing // that have not yet passed verification. - // Every value in [unverifiedBlocks] is a (*BlockWrapper) - unverifiedBlocks cache.Cacher + unverifiedBlocks cache.Cacher[ids.ID, *BlockWrapper] // missingBlocks is an LRU cache of missing blocks - // Every value in [missingBlocks] is an empty struct. - missingBlocks cache.Cacher + missingBlocks cache.Cacher[ids.ID, struct{}] // string([byte repr. of block]) --> the block's ID - bytesToIDCache cache.Cacher + bytesToIDCache cache.Cacher[string, ids.ID] lastAcceptedBlock *BlockWrapper } @@ -56,11 +59,13 @@ type Config struct { // Cache configuration: DecidedCacheSize, MissingCacheSize, UnverifiedCacheSize, BytesToIDCacheSize int - LastAcceptedBlock snowman.Block - GetBlock func(ids.ID) (snowman.Block, error) - UnmarshalBlock func([]byte) (snowman.Block, error) - BuildBlock func() (snowman.Block, error) - GetBlockIDAtHeight func(uint64) (ids.ID, error) + LastAcceptedBlock snowman.Block + GetBlock func(context.Context, ids.ID) (snowman.Block, error) + UnmarshalBlock func(context.Context, []byte) (snowman.Block, error) + BatchedUnmarshalBlock func(context.Context, [][]byte) ([]snowman.Block, error) + BuildBlock func(context.Context) (snowman.Block, error) + BuildBlockWithContext func(context.Context, *block.Context) (snowman.Block, error) + GetBlockIDAtHeight func(context.Context, uint64) (ids.ID, error) } // Block is an interface wrapping the normal snowman.Block interface to be used in @@ -75,8 +80,8 @@ type Block interface { // passed in from the VM that gets the block ID at a specific height. It is assumed that for any height // less than or equal to the last accepted block, getBlockIDAtHeight returns the accepted blockID at // the requested height. -func produceGetStatus(s *State, getBlockIDAtHeight func(uint64) (ids.ID, error)) func(snowman.Block) (choices.Status, error) { - return func(blk snowman.Block) (choices.Status, error) { +func produceGetStatus(s *State, getBlockIDAtHeight func(context.Context, uint64) (ids.ID, error)) func(context.Context, snowman.Block) (choices.Status, error) { + return func(ctx context.Context, blk snowman.Block) (choices.Status, error) { internalBlk, ok := blk.(Block) if !ok { return choices.Unknown, fmt.Errorf("expected block to match chain Block interface but found block of type %T", blk) @@ -88,7 +93,7 @@ func produceGetStatus(s *State, getBlockIDAtHeight func(uint64) (ids.ID, error)) return choices.Processing, nil } - acceptedID, err := getBlockIDAtHeight(blkHeight) + acceptedID, err := getBlockIDAtHeight(ctx, blkHeight) switch err { case nil: if acceptedID == blk.ID() { @@ -113,9 +118,13 @@ func (s *State) initialize(config *Config) { s.verifiedBlocks = make(map[ids.ID]*BlockWrapper) s.getBlock = config.GetBlock s.buildBlock = config.BuildBlock + s.buildBlockWithContext = config.BuildBlockWithContext s.unmarshalBlock = config.UnmarshalBlock + s.batchedUnmarshalBlock = config.BatchedUnmarshalBlock if config.GetBlockIDAtHeight == nil { - s.getStatus = func(blk snowman.Block) (choices.Status, error) { return blk.Status(), nil } + s.getStatus = func(_ context.Context, blk snowman.Block) (choices.Status, error) { + return blk.Status(), nil + } } else { s.getStatus = produceGetStatus(s, config.GetBlockIDAtHeight) } @@ -129,10 +138,10 @@ func (s *State) initialize(config *Config) { func NewState(config *Config) *State { c := &State{ verifiedBlocks: make(map[ids.ID]*BlockWrapper), - decidedBlocks: &cache.LRU{Size: config.DecidedCacheSize}, - missingBlocks: &cache.LRU{Size: config.MissingCacheSize}, - unverifiedBlocks: &cache.LRU{Size: config.UnverifiedCacheSize}, - bytesToIDCache: &cache.LRU{Size: config.BytesToIDCacheSize}, + decidedBlocks: &cache.LRU[ids.ID, *BlockWrapper]{Size: config.DecidedCacheSize}, + missingBlocks: &cache.LRU[ids.ID, struct{}]{Size: config.MissingCacheSize}, + unverifiedBlocks: &cache.LRU[ids.ID, *BlockWrapper]{Size: config.UnverifiedCacheSize}, + bytesToIDCache: &cache.LRU[string, ids.ID]{Size: config.BytesToIDCacheSize}, } c.initialize(config) return c @@ -142,34 +151,34 @@ func NewMeteredState( registerer prometheus.Registerer, config *Config, ) (*State, error) { - decidedCache, err := metercacher.New( + decidedCache, err := metercacher.New[ids.ID, *BlockWrapper]( "decided_cache", registerer, - &cache.LRU{Size: config.DecidedCacheSize}, + &cache.LRU[ids.ID, *BlockWrapper]{Size: config.DecidedCacheSize}, ) if err != nil { return nil, err } - missingCache, err := metercacher.New( + missingCache, err := metercacher.New[ids.ID, struct{}]( "missing_cache", registerer, - &cache.LRU{Size: config.MissingCacheSize}, + &cache.LRU[ids.ID, struct{}]{Size: config.MissingCacheSize}, ) if err != nil { return nil, err } - unverifiedCache, err := metercacher.New( + unverifiedCache, err := metercacher.New[ids.ID, *BlockWrapper]( "unverified_cache", registerer, - &cache.LRU{Size: config.UnverifiedCacheSize}, + &cache.LRU[ids.ID, *BlockWrapper]{Size: config.UnverifiedCacheSize}, ) if err != nil { return nil, err } - bytesToIDCache, err := metercacher.New( + bytesToIDCache, err := metercacher.New[string, ids.ID]( "bytes_to_id_cache", registerer, - &cache.LRU{Size: config.BytesToIDCacheSize}, + &cache.LRU[string, ids.ID]{Size: config.BytesToIDCacheSize}, ) if err != nil { return nil, err @@ -221,7 +230,7 @@ func (s *State) Flush() { } // GetBlock returns the BlockWrapper as snowman.Block corresponding to [blkID] -func (s *State) GetBlock(blkID ids.ID) (snowman.Block, error) { +func (s *State) GetBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) { if blk, ok := s.getCachedBlock(blkID); ok { return blk, nil } @@ -230,7 +239,7 @@ func (s *State) GetBlock(blkID ids.ID) (snowman.Block, error) { return nil, database.ErrNotFound } - blk, err := s.getBlock(blkID) + blk, err := s.getBlock(ctx, blkID) // If getBlock returns [database.ErrNotFound], State considers // this a cacheable miss. if err == database.ErrNotFound { @@ -242,7 +251,7 @@ func (s *State) GetBlock(blkID ids.ID) (snowman.Block, error) { // Since this block is not in consensus, addBlockOutsideConsensus // is called to add [blk] to the correct cache. - return s.addBlockOutsideConsensus(blk) + return s.addBlockOutsideConsensus(ctx, blk) } // getCachedBlock checks the caches for [blkID] by priority. Returning @@ -253,19 +262,19 @@ func (s *State) getCachedBlock(blkID ids.ID) (snowman.Block, bool) { } if blk, ok := s.decidedBlocks.Get(blkID); ok { - return blk.(snowman.Block), true + return blk, true } if blk, ok := s.unverifiedBlocks.Get(blkID); ok { - return blk.(snowman.Block), true + return blk, true } return nil, false } // GetBlockInternal returns the internal representation of [blkID] -func (s *State) GetBlockInternal(blkID ids.ID) (snowman.Block, error) { - wrappedBlk, err := s.GetBlock(blkID) +func (s *State) GetBlockInternal(ctx context.Context, blkID ids.ID) (snowman.Block, error) { + wrappedBlk, err := s.GetBlock(ctx, blkID) if err != nil { return nil, err } @@ -273,22 +282,21 @@ func (s *State) GetBlockInternal(blkID ids.ID) (snowman.Block, error) { return wrappedBlk.(*BlockWrapper).Block, nil } -// ParseBlock attempts to parse [b] into an internal Block and adds it to the appropriate -// caching layer if successful. -func (s *State) ParseBlock(b []byte) (snowman.Block, error) { +// ParseBlock attempts to parse [b] into an internal Block and adds it to the +// appropriate caching layer if successful. +func (s *State) ParseBlock(ctx context.Context, b []byte) (snowman.Block, error) { // See if we've cached this block's ID by its byte repr. - blkIDIntf, blkIDCached := s.bytesToIDCache.Get(string(b)) + cachedBlkID, blkIDCached := s.bytesToIDCache.Get(string(b)) if blkIDCached { - blkID := blkIDIntf.(ids.ID) // See if we have this block cached - if cachedBlk, ok := s.getCachedBlock(blkID); ok { + if cachedBlk, ok := s.getCachedBlock(cachedBlkID); ok { return cachedBlk, nil } } // We don't have this block cached by its byte repr. // Parse the block from bytes - blk, err := s.unmarshalBlock(b) + blk, err := s.unmarshalBlock(ctx, b) if err != nil { return nil, err } @@ -309,17 +317,118 @@ func (s *State) ParseBlock(b []byte) (snowman.Block, error) { // Since this block is not in consensus, addBlockOutsideConsensus // is called to add [blk] to the correct cache. - return s.addBlockOutsideConsensus(blk) + return s.addBlockOutsideConsensus(ctx, blk) +} + +// BatchedParseBlock implements part of the block.BatchedChainVM interface. In +// addition to performing all the caching as the ParseBlock function, it +// performs at most one call to the underlying VM if [batchedUnmarshalBlock] was +// provided. +func (s *State) BatchedParseBlock(ctx context.Context, blksBytes [][]byte) ([]snowman.Block, error) { + blks := make([]snowman.Block, len(blksBytes)) + idWasCached := make([]bool, len(blksBytes)) + unparsedBlksBytes := make([][]byte, 0, len(blksBytes)) + for i, blkBytes := range blksBytes { + // See if we've cached this block's ID by its byte repr. + blkID, blkIDCached := s.bytesToIDCache.Get(string(blkBytes)) + idWasCached[i] = blkIDCached + if !blkIDCached { + unparsedBlksBytes = append(unparsedBlksBytes, blkBytes) + continue + } + + // See if we have this block cached + if cachedBlk, ok := s.getCachedBlock(blkID); ok { + blks[i] = cachedBlk + } else { + unparsedBlksBytes = append(unparsedBlksBytes, blkBytes) + } + } + + if len(unparsedBlksBytes) == 0 { + return blks, nil + } + + var ( + parsedBlks []snowman.Block + err error + ) + if s.batchedUnmarshalBlock != nil { + parsedBlks, err = s.batchedUnmarshalBlock(ctx, unparsedBlksBytes) + if err != nil { + return nil, err + } + } else { + parsedBlks = make([]snowman.Block, len(unparsedBlksBytes)) + for i, blkBytes := range unparsedBlksBytes { + parsedBlks[i], err = s.unmarshalBlock(ctx, blkBytes) + if err != nil { + return nil, err + } + } + } + + i := 0 + for _, blk := range parsedBlks { + for ; ; i++ { + if blks[i] == nil { + break + } + } + + blkID := blk.ID() + if !idWasCached[i] { + blkBytes := blk.Bytes() + blkBytesStr := string(blkBytes) + s.bytesToIDCache.Put(blkBytesStr, blkID) + + // Check for an existing block, so we can return a unique block + // if processing or simply allow this block to be immediately + // garbage collected if it is already cached. + if cachedBlk, ok := s.getCachedBlock(blkID); ok { + blks[i] = cachedBlk + continue + } + } + + s.missingBlocks.Evict(blkID) + wrappedBlk, err := s.addBlockOutsideConsensus(ctx, blk) + if err != nil { + return nil, err + } + blks[i] = wrappedBlk + } + return blks, nil +} + +// BuildBlockWithContext attempts to build a new internal Block, wraps it, and +// adds it to the appropriate caching layer if successful. +// If [s.buildBlockWithContext] is nil, returns [BuildBlock]. +func (s *State) BuildBlockWithContext(ctx context.Context, blockCtx *block.Context) (snowman.Block, error) { + if s.buildBlockWithContext == nil { + return s.BuildBlock(ctx) + } + + blk, err := s.buildBlockWithContext(ctx, blockCtx) + if err != nil { + return nil, err + } + + return s.deduplicate(ctx, blk) } // BuildBlock attempts to build a new internal Block, wraps it, and adds it // to the appropriate caching layer if successful. -func (s *State) BuildBlock() (snowman.Block, error) { - blk, err := s.buildBlock() +func (s *State) BuildBlock(ctx context.Context) (snowman.Block, error) { + blk, err := s.buildBlock(ctx) if err != nil { return nil, err } + return s.deduplicate(ctx, blk) +} + +func (s *State) deduplicate(ctx context.Context, blk snowman.Block) (snowman.Block, error) { blkID := blk.ID() // Defensive: buildBlock should not return a block that has already been verified. // If it does, make sure to return the existing reference to the block. @@ -331,7 +440,7 @@ func (s *State) BuildBlock() (snowman.Block, error) { s.missingBlocks.Evict(blkID) // wrap the returned block and add it to the correct cache - return s.addBlockOutsideConsensus(blk) + return s.addBlockOutsideConsensus(ctx, blk) } // addBlockOutsideConsensus adds [blk] to the correct cache and returns @@ -339,14 +448,14 @@ func (s *State) BuildBlock() (snowman.Block, error) { // assumes [blk] is a known, non-wrapped block that is not currently // in consensus. [blk] could be either decided or a block that has not yet // been verified and added to consensus. -func (s *State) addBlockOutsideConsensus(blk snowman.Block) (snowman.Block, error) { +func (s *State) addBlockOutsideConsensus(ctx context.Context, blk snowman.Block) (snowman.Block, error) { wrappedBlk := &BlockWrapper{ Block: blk, state: s, } blkID := blk.ID() - status, err := s.getStatus(blk) + status, err := s.getStatus(ctx, blk) if err != nil { return nil, fmt.Errorf("could not get block status for %s due to %w", blkID, err) } @@ -362,7 +471,7 @@ func (s *State) addBlockOutsideConsensus(blk snowman.Block) (snowman.Block, erro return wrappedBlk, nil } -func (s *State) LastAccepted() (ids.ID, error) { +func (s *State) LastAccepted(context.Context) (ids.ID, error) { return s.lastAcceptedBlock.ID(), nil } @@ -375,3 +484,9 @@ func (s *State) LastAcceptedBlock() *BlockWrapper { func (s *State) LastAcceptedBlockInternal() snowman.Block { return s.LastAcceptedBlock().Block } + +// IsProcessing returns whether [blkID] is processing in consensus +func (s *State) IsProcessing(blkID ids.ID) bool { + _, ok := s.verifiedBlocks[blkID] + return ok +} diff --git a/avalanchego/vms/components/chain/state_test.go b/avalanchego/vms/components/chain/state_test.go index c665d782..448fcb44 100644 --- a/avalanchego/vms/components/chain/state_test.go +++ b/avalanchego/vms/components/chain/state_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package chain import ( "bytes" + "context" "errors" "fmt" "testing" @@ -20,14 +21,23 @@ import ( "github.com/ava-labs/avalanchego/utils/hashing" ) -var _ Block = &TestBlock{} +var ( + _ Block = (*TestBlock)(nil) + + errCantBuildBlock = errors.New("can't build new block") + errVerify = errors.New("verify failed") + errAccept = errors.New("accept failed") + errReject = errors.New("reject failed") +) type TestBlock struct { *snowman.TestBlock } // SetStatus sets the status of the Block. -func (b *TestBlock) SetStatus(status choices.Status) { b.TestBlock.TestDecidable.StatusV = status } +func (b *TestBlock) SetStatus(status choices.Status) { + b.TestBlock.TestDecidable.StatusV = status +} // NewTestBlock returns a new test block with height, bytes, and ID derived from [i] // and using [parentID] as the parent block ID @@ -60,7 +70,11 @@ func NewTestBlocks(numBlocks uint64) []*TestBlock { return blks } -func createInternalBlockFuncs(t *testing.T, blks []*TestBlock) (func(id ids.ID) (snowman.Block, error), func(b []byte) (snowman.Block, error), func(height uint64) (ids.ID, error)) { +func createInternalBlockFuncs(t *testing.T, blks []*TestBlock) ( + func(ctx context.Context, blkID ids.ID) (snowman.Block, error), + func(ctx context.Context, b []byte) (snowman.Block, error), + func(ctx context.Context, height uint64) (ids.ID, error), +) { blkMap := make(map[ids.ID]*TestBlock) blkByteMap := make(map[byte]*TestBlock) for _, blk := range blks { @@ -72,7 +86,7 @@ func createInternalBlockFuncs(t *testing.T, blks []*TestBlock) (func(id ids.ID) blkByteMap[blkBytes[0]] = blk } - getBlock := func(id ids.ID) (snowman.Block, error) { + getBlock := func(_ context.Context, id ids.ID) (snowman.Block, error) { blk, ok := blkMap[id] if !ok || !blk.Status().Fetched() { return nil, database.ErrNotFound @@ -81,7 +95,7 @@ func createInternalBlockFuncs(t *testing.T, blks []*TestBlock) (func(id ids.ID) return blk, nil } - parseBlk := func(b []byte) (snowman.Block, error) { + parseBlk := func(_ context.Context, b []byte) (snowman.Block, error) { if len(b) != 1 { return nil, fmt.Errorf("expected block bytes to be length 1, but found %d", len(b)) } @@ -97,7 +111,7 @@ func createInternalBlockFuncs(t *testing.T, blks []*TestBlock) (func(id ids.ID) return blk, nil } - getAcceptedBlockIDAtHeight := func(height uint64) (ids.ID, error) { + getAcceptedBlockIDAtHeight := func(_ context.Context, height uint64) (ids.ID, error) { for _, blk := range blks { if blk.Height() != height { continue @@ -114,8 +128,8 @@ func createInternalBlockFuncs(t *testing.T, blks []*TestBlock) (func(id ids.ID) return getBlock, parseBlk, getAcceptedBlockIDAtHeight } -func cantBuildBlock() (snowman.Block, error) { - return nil, errors.New("can't build new block") +func cantBuildBlock(context.Context) (snowman.Block, error) { + return nil, errCantBuildBlock } // checkProcessingBlock checks that [blk] is of the correct type and is @@ -125,7 +139,7 @@ func checkProcessingBlock(t *testing.T, s *State, blk snowman.Block) { t.Fatalf("Expected block to be of type (*BlockWrapper)") } - parsedBlk, err := s.ParseBlock(blk.Bytes()) + parsedBlk, err := s.ParseBlock(context.Background(), blk.Bytes()) if err != nil { t.Fatalf("Failed to parse verified block due to %s", err) } @@ -142,7 +156,7 @@ func checkProcessingBlock(t *testing.T, s *State, blk snowman.Block) { t.Fatalf("Expected parsed block to return a uniquified block") } - getBlk, err := s.GetBlock(blk.ID()) + getBlk, err := s.GetBlock(context.Background(), blk.ID()) if err != nil { t.Fatalf("Unexpected error during GetBlock for processing block %s", err) } @@ -158,7 +172,7 @@ func checkDecidedBlock(t *testing.T, s *State, blk snowman.Block, expectedStatus t.Fatalf("Expected block to be of type (*BlockWrapper)") } - parsedBlk, err := s.ParseBlock(blk.Bytes()) + parsedBlk, err := s.ParseBlock(context.Background(), blk.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing decided block %s", err) } @@ -176,7 +190,7 @@ func checkDecidedBlock(t *testing.T, s *State, blk snowman.Block, expectedStatus t.Fatalf("Expected parsed block to have been cached, but retrieved non-unique decided block") } - getBlk, err := s.GetBlock(blk.ID()) + getBlk, err := s.GetBlock(context.Background(), blk.ID()) if err != nil { t.Fatalf("Unexpected error during GetBlock for decided block %s", err) } @@ -241,7 +255,7 @@ func TestState(t *testing.T) { GetBlockIDAtHeight: getCanonicalBlockID, }) - lastAccepted, err := chainState.LastAccepted() + lastAccepted, err := chainState.LastAccepted(context.Background()) if err != nil { t.Fatal(err) } @@ -249,32 +263,32 @@ func TestState(t *testing.T) { t.Fatal("Expected last accepted block to be the genesis block") } - wrappedGenesisBlk, err := chainState.GetBlock(genesisBlock.ID()) + wrappedGenesisBlk, err := chainState.GetBlock(context.Background(), genesisBlock.ID()) if err != nil { t.Fatalf("Failed to get genesis block due to: %s", err) } // Check that a cache miss on a block is handled correctly - if _, err := chainState.GetBlock(blk1.ID()); err == nil { + if _, err := chainState.GetBlock(context.Background(), blk1.ID()); err == nil { t.Fatal("expected GetBlock to return an error for blk1 before it's been parsed") } - if _, err := chainState.GetBlock(blk1.ID()); err == nil { + if _, err := chainState.GetBlock(context.Background(), blk1.ID()); err == nil { t.Fatal("expected GetBlock to return an error for blk1 before it's been parsed") } // Parse and verify blk1 and blk2 - parsedBlk1, err := chainState.ParseBlock(blk1.Bytes()) + parsedBlk1, err := chainState.ParseBlock(context.Background(), blk1.Bytes()) if err != nil { t.Fatal("Failed to parse blk1 due to: %w", err) } - if err := parsedBlk1.Verify(); err != nil { + if err := parsedBlk1.Verify(context.Background()); err != nil { t.Fatal("Parsed blk1 failed verification unexpectedly due to %w", err) } - parsedBlk2, err := chainState.ParseBlock(blk2.Bytes()) + parsedBlk2, err := chainState.ParseBlock(context.Background(), blk2.Bytes()) if err != nil { t.Fatalf("Failed to parse blk2 due to: %s", err) } - if err := parsedBlk2.Verify(); err != nil { + if err := parsedBlk2.Verify(context.Background()); err != nil { t.Fatalf("Parsed blk2 failed verification unexpectedly due to %s", err) } @@ -283,11 +297,11 @@ func TestState(t *testing.T) { t.Fatalf("Expected chain state to have 2 processing blocks, but found: %d", numProcessing) } - parsedBlk3, err := chainState.ParseBlock(blk3.Bytes()) + parsedBlk3, err := chainState.ParseBlock(context.Background(), blk3.Bytes()) if err != nil { t.Fatalf("Failed to parse blk3 due to %s", err) } - getBlk3, err := chainState.GetBlock(blk3.ID()) + getBlk3, err := chainState.GetBlock(context.Background(), blk3.ID()) if err != nil { t.Fatalf("Failed to get blk3 due to %s", err) } @@ -299,7 +313,7 @@ func TestState(t *testing.T) { t.Fatalf("Expected State to have 2 processing blocks, but found: %d", numProcessing) } - if err := parsedBlk3.Verify(); err != nil { + if err := parsedBlk3.Verify(context.Background()); err != nil { t.Fatalf("Parsed blk3 failed verification unexpectedly due to %s", err) } // Check that blk3 has been added to processing blocks. @@ -308,13 +322,13 @@ func TestState(t *testing.T) { } // Decide the blocks and ensure they are removed from the processing blocks map - if err := parsedBlk1.Accept(); err != nil { + if err := parsedBlk1.Accept(context.Background()); err != nil { t.Fatal(err) } - if err := parsedBlk2.Accept(); err != nil { + if err := parsedBlk2.Accept(context.Background()); err != nil { t.Fatal(err) } - if err := parsedBlk3.Reject(); err != nil { + if err := parsedBlk3.Reject(context.Background()); err != nil { t.Fatal(err) } @@ -323,7 +337,7 @@ func TestState(t *testing.T) { } // Check that the last accepted block was updated correctly - lastAcceptedID, err := chainState.LastAccepted() + lastAcceptedID, err := chainState.LastAccepted(context.Background()) if err != nil { t.Fatal(err) } @@ -349,7 +363,7 @@ func TestBuildBlock(t *testing.T) { blk1 := testBlks[1] getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(t, testBlks) - buildBlock := func() (snowman.Block, error) { + buildBlock := func(context.Context) (snowman.Block, error) { // Once the block is built, mark it as processing blk1.SetStatus(choices.Processing) return blk1, nil @@ -367,20 +381,20 @@ func TestBuildBlock(t *testing.T) { GetBlockIDAtHeight: getCanonicalBlockID, }) - builtBlk, err := chainState.BuildBlock() + builtBlk, err := chainState.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } require.Len(t, chainState.verifiedBlocks, 0) - if err := builtBlk.Verify(); err != nil { + if err := builtBlk.Verify(context.Background()); err != nil { t.Fatalf("Built block failed verification due to %s", err) } require.Len(t, chainState.verifiedBlocks, 1) checkProcessingBlock(t, chainState, builtBlk) - if err := builtBlk.Accept(); err != nil { + if err := builtBlk.Accept(context.Background()); err != nil { t.Fatalf("Unexpected error while accepting built block %s", err) } @@ -392,11 +406,11 @@ func TestStateDecideBlock(t *testing.T) { genesisBlock := testBlks[0] genesisBlock.SetStatus(choices.Accepted) badAcceptBlk := testBlks[1] - badAcceptBlk.AcceptV = errors.New("this block should fail on Accept") + badAcceptBlk.AcceptV = errAccept badVerifyBlk := testBlks[2] - badVerifyBlk.VerifyV = errors.New("this block should fail verification") + badVerifyBlk.VerifyV = errVerify badRejectBlk := testBlks[3] - badRejectBlk.RejectV = errors.New("this block should fail on reject") + badRejectBlk.RejectV = errReject getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(t, testBlks) chainState := NewState(&Config{ DecidedCacheSize: 2, @@ -411,36 +425,36 @@ func TestStateDecideBlock(t *testing.T) { }) // Parse badVerifyBlk (which should fail verification) - badBlk, err := chainState.ParseBlock(badVerifyBlk.Bytes()) + badBlk, err := chainState.ParseBlock(context.Background(), badVerifyBlk.Bytes()) if err != nil { t.Fatal(err) } - if err := badBlk.Verify(); err == nil { + if err := badBlk.Verify(context.Background()); err == nil { t.Fatal("Bad block should have failed verification") } // Ensure a block that fails verification is not marked as processing require.Len(t, chainState.verifiedBlocks, 0) // Ensure that an error during block acceptance is propagated correctly - badBlk, err = chainState.ParseBlock(badAcceptBlk.Bytes()) + badBlk, err = chainState.ParseBlock(context.Background(), badAcceptBlk.Bytes()) if err != nil { t.Fatal(err) } - if err := badBlk.Verify(); err != nil { + if err := badBlk.Verify(context.Background()); err != nil { t.Fatal(err) } require.Len(t, chainState.verifiedBlocks, 1) - if err := badBlk.Accept(); err == nil { + if err := badBlk.Accept(context.Background()); err == nil { t.Fatal("Block should have errored on Accept") } // Ensure that an error during block reject is propagated correctly - badBlk, err = chainState.ParseBlock(badRejectBlk.Bytes()) + badBlk, err = chainState.ParseBlock(context.Background(), badRejectBlk.Bytes()) if err != nil { t.Fatal(err) } - if err := badBlk.Verify(); err != nil { + if err := badBlk.Verify(context.Background()); err != nil { t.Fatal(err) } // Note: an error during block Accept/Reject is fatal, so it is undefined whether @@ -450,7 +464,7 @@ func TestStateDecideBlock(t *testing.T) { t.Fatalf("Expected number of processing blocks to be either 1 or 2, but found %d", numProcessing) } - if err := badBlk.Reject(); err == nil { + if err := badBlk.Reject(context.Background()); err == nil { t.Fatal("Block should have errored on Reject") } } @@ -475,31 +489,31 @@ func TestStateParent(t *testing.T) { GetBlockIDAtHeight: getCanonicalBlockID, }) - parsedBlk2, err := chainState.ParseBlock(blk2.Bytes()) + parsedBlk2, err := chainState.ParseBlock(context.Background(), blk2.Bytes()) if err != nil { t.Fatal(err) } missingBlk1ID := parsedBlk2.Parent() - if _, err := chainState.GetBlock(missingBlk1ID); err == nil { + if _, err := chainState.GetBlock(context.Background(), missingBlk1ID); err == nil { t.Fatalf("Expected parent of blk2 to be not found") } - parsedBlk1, err := chainState.ParseBlock(blk1.Bytes()) + parsedBlk1, err := chainState.ParseBlock(context.Background(), blk1.Bytes()) if err != nil { t.Fatal(err) } genesisBlkParentID := parsedBlk1.Parent() - genesisBlkParent, err := chainState.GetBlock(genesisBlkParentID) + genesisBlkParent, err := chainState.GetBlock(context.Background(), genesisBlkParentID) if err != nil { t.Fatal(err) } checkAcceptedBlock(t, chainState, genesisBlkParent, true) parentBlk1ID := parsedBlk2.Parent() - parentBlk1, err := chainState.GetBlock(parentBlk1ID) + parentBlk1, err := chainState.GetBlock(context.Background(), parentBlk1ID) if err != nil { t.Fatal(err) } @@ -532,7 +546,7 @@ func TestGetBlockInternal(t *testing.T) { t.Fatalf("Expected LastAcceptedBlockInternal to be blk %s, but found %s", genesisBlock.ID(), genesisBlockInternal.ID()) } - blk, err := chainState.GetBlockInternal(genesisBlock.ID()) + blk, err := chainState.GetBlockInternal(context.Background(), genesisBlock.ID()) if err != nil { t.Fatal(err) } @@ -552,8 +566,8 @@ func TestGetBlockError(t *testing.T) { blk1 := testBlks[1] getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(t, testBlks) - wrappedGetBlock := func(id ids.ID) (snowman.Block, error) { - blk, err := getBlock(id) + wrappedGetBlock := func(ctx context.Context, id ids.ID) (snowman.Block, error) { + blk, err := getBlock(ctx, id) if err != nil { return nil, fmt.Errorf("wrapping error to prevent caching miss: %w", err) } @@ -571,7 +585,7 @@ func TestGetBlockError(t *testing.T) { GetBlockIDAtHeight: getCanonicalBlockID, }) - _, err := chainState.GetBlock(blk1.ID()) + _, err := chainState.GetBlock(context.Background(), blk1.ID()) if err == nil { t.Fatal("Expected GetBlock to return an error for unknown block") } @@ -579,7 +593,7 @@ func TestGetBlockError(t *testing.T) { // Update the status to Processing, so that it will be returned by the internal get block // function. blk1.SetStatus(choices.Processing) - blk, err := chainState.GetBlock(blk1.ID()) + blk, err := chainState.GetBlock(context.Background(), blk1.ID()) if err != nil { t.Fatal(err) } @@ -607,7 +621,7 @@ func TestParseBlockError(t *testing.T) { GetBlockIDAtHeight: getCanonicalBlockID, }) - blk, err := chainState.ParseBlock([]byte{255}) + blk, err := chainState.ParseBlock(context.Background(), []byte{255}) if err == nil { t.Fatalf("Expected ParseBlock to return an error parsing an invalid block but found block of type %T", blk) } @@ -631,7 +645,7 @@ func TestBuildBlockError(t *testing.T) { GetBlockIDAtHeight: getCanonicalBlockID, }) - blk, err := chainState.BuildBlock() + blk, err := chainState.BuildBlock(context.Background()) if err == nil { t.Fatalf("Expected BuildBlock to return an error but found block of type %T", blk) } @@ -675,9 +689,9 @@ func TestStateBytesToIDCache(t *testing.T) { blk2 := testBlks[2] getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(t, testBlks) - buildBlock := func() (snowman.Block, error) { + buildBlock := func(context.Context) (snowman.Block, error) { t.Fatal("shouldn't have been called") - return nil, errors.New("") + return nil, nil } chainState := NewState(&Config{ @@ -693,13 +707,13 @@ func TestStateBytesToIDCache(t *testing.T) { }) // Shouldn't have blk1 ID to start with - _, err := chainState.GetBlock(blk1.ID()) + _, err := chainState.GetBlock(context.Background(), blk1.ID()) require.Error(t, err) _, ok := chainState.bytesToIDCache.Get(string(blk1.Bytes())) require.False(t, ok) // Parse blk1 from bytes - _, err = chainState.ParseBlock(blk1.Bytes()) + _, err = chainState.ParseBlock(context.Background(), blk1.Bytes()) require.NoError(t, err) // blk1 should be in cache now @@ -707,7 +721,7 @@ func TestStateBytesToIDCache(t *testing.T) { require.True(t, ok) // Parse another block - _, err = chainState.ParseBlock(blk2.Bytes()) + _, err = chainState.ParseBlock(context.Background(), blk2.Bytes()) require.NoError(t, err) // Should have bumped blk1 from cache @@ -760,7 +774,7 @@ func TestSetLastAcceptedBlock(t *testing.T) { BuildBlock: cantBuildBlock, GetBlockIDAtHeight: getCanonicalBlockID, }) - lastAcceptedID, err := chainState.LastAccepted() + lastAcceptedID, err := chainState.LastAccepted(context.Background()) if err != nil { t.Fatal(err) } @@ -772,36 +786,36 @@ func TestSetLastAcceptedBlock(t *testing.T) { if err := chainState.SetLastAcceptedBlock(postSetBlk1); err != nil { t.Fatal(err) } - lastAcceptedID, err = chainState.LastAccepted() + lastAcceptedID, err = chainState.LastAccepted(context.Background()) if err != nil { t.Fatal(err) } if lastAcceptedID != postSetBlk1.ID() { t.Fatal("Expected last accepted block to be postSetBlk1") } - if lastAcceptedID = chainState.LastAcceptedBlock().ID(); lastAcceptedID != postSetBlk1.ID() { + if lastAcceptedID := chainState.LastAcceptedBlock().ID(); lastAcceptedID != postSetBlk1.ID() { t.Fatal("Expected last accepted block to be postSetBlk1") } // ensure further blocks can be accepted - parsedpostSetBlk2, err := chainState.ParseBlock(postSetBlk2.Bytes()) + parsedpostSetBlk2, err := chainState.ParseBlock(context.Background(), postSetBlk2.Bytes()) if err != nil { t.Fatal("Failed to parse postSetBlk2 due to: %w", err) } - if err := parsedpostSetBlk2.Verify(); err != nil { + if err := parsedpostSetBlk2.Verify(context.Background()); err != nil { t.Fatal("Parsed postSetBlk2 failed verification unexpectedly due to %w", err) } - if err := parsedpostSetBlk2.Accept(); err != nil { + if err := parsedpostSetBlk2.Accept(context.Background()); err != nil { t.Fatal(err) } - lastAcceptedID, err = chainState.LastAccepted() + lastAcceptedID, err = chainState.LastAccepted(context.Background()) if err != nil { t.Fatal(err) } if lastAcceptedID != postSetBlk2.ID() { t.Fatal("Expected last accepted block to be postSetBlk2") } - if lastAcceptedID = chainState.LastAcceptedBlock().ID(); lastAcceptedID != postSetBlk2.ID() { + if lastAcceptedID := chainState.LastAcceptedBlock().ID(); lastAcceptedID != postSetBlk2.ID() { t.Fatal("Expected last accepted block to be postSetBlk2") } @@ -816,7 +830,7 @@ func TestSetLastAcceptedBlockWithProcessingBlocksErrors(t *testing.T) { resetBlk := testBlks[4] getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(t, testBlks) - buildBlock := func() (snowman.Block, error) { + buildBlock := func(context.Context) (snowman.Block, error) { // Once the block is built, mark it as processing blk1.SetStatus(choices.Processing) return blk1, nil @@ -834,13 +848,13 @@ func TestSetLastAcceptedBlockWithProcessingBlocksErrors(t *testing.T) { GetBlockIDAtHeight: getCanonicalBlockID, }) - builtBlk, err := chainState.BuildBlock() + builtBlk, err := chainState.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } require.Len(t, chainState.verifiedBlocks, 0) - if err := builtBlk.Verify(); err != nil { + if err := builtBlk.Verify(context.Background()); err != nil { t.Fatalf("Built block failed verification due to %s", err) } require.Len(t, chainState.verifiedBlocks, 1) @@ -871,7 +885,7 @@ func TestStateParseTransitivelyAcceptedBlock(t *testing.T) { GetBlockIDAtHeight: getCanonicalBlockID, }) - parsedBlk1, err := chainState.ParseBlock(blk1.Bytes()) + parsedBlk1, err := chainState.ParseBlock(context.Background(), blk1.Bytes()) if err != nil { t.Fatalf("Failed to parse blk1 due to: %s", err) } @@ -880,3 +894,44 @@ func TestStateParseTransitivelyAcceptedBlock(t *testing.T) { t.Fatalf("Parsed blk1 reported incorrect height. Expected %d got %d", blk1.Height(), parsedBlk1.Height()) } } + +func TestIsProcessing(t *testing.T) { + testBlks := NewTestBlocks(2) + genesisBlock := testBlks[0] + genesisBlock.SetStatus(choices.Accepted) + blk1 := testBlks[1] + + getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(t, testBlks) + chainState := NewState(&Config{ + DecidedCacheSize: 2, + MissingCacheSize: 2, + UnverifiedCacheSize: 2, + BytesToIDCacheSize: 2, + LastAcceptedBlock: genesisBlock, + GetBlock: getBlock, + UnmarshalBlock: parseBlock, + BuildBlock: cantBuildBlock, + GetBlockIDAtHeight: getCanonicalBlockID, + }) + + // Parse blk1 + parsedBlk1, err := chainState.ParseBlock(context.Background(), blk1.Bytes()) + require.NoError(t, err) + + // Check that it is not processing in consensus + require.False(t, chainState.IsProcessing(parsedBlk1.ID())) + + // Verify blk1 + err = parsedBlk1.Verify(context.Background()) + require.NoError(t, err) + + // Check that it is processing in consensus + require.True(t, chainState.IsProcessing(parsedBlk1.ID())) + + // Accept blk1 + err = parsedBlk1.Accept(context.Background()) + require.NoError(t, err) + + // Check that it is no longer processing in consensus + require.False(t, chainState.IsProcessing(parsedBlk1.ID())) +} diff --git a/avalanchego/vms/components/index/index.go b/avalanchego/vms/components/index/index.go index eceeddeb..0597f836 100644 --- a/avalanchego/vms/components/index/index.go +++ b/avalanchego/vms/components/index/index.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package index @@ -17,6 +17,7 @@ import ( "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" ) @@ -27,8 +28,8 @@ var ( errIndexingRequiredFromGenesis = errors.New("running would create incomplete index. Allow incomplete indices or re-sync from genesis with indexing enabled") errCausesIncompleteIndex = errors.New("running would create incomplete index. Allow incomplete indices or enable indexing") - _ AddressTxsIndexer = &indexer{} - _ AddressTxsIndexer = &noIndexer{} + _ AddressTxsIndexer = (*indexer)(nil) + _ AddressTxsIndexer = (*noIndexer)(nil) ) // AddressTxsIndexer maintains information about which transactions changed @@ -105,7 +106,7 @@ func (i *indexer) Accept(txID ids.ID, inputUTXOs []*avax.UTXO, outputUTXOs []*av // Address -> AssetID --> exists if the address's balance // of the asset is changed by processing tx [txID] // we do this step separately to simplify the write process later - balanceChanges := make(map[string]map[ids.ID]struct{}) + balanceChanges := map[string]set.Set[ids.ID]{} for _, utxo := range utxos { out, ok := utxo.Out.(avax.Addressable) if !ok { @@ -120,10 +121,10 @@ func (i *indexer) Accept(txID ids.ID, inputUTXOs []*avax.UTXO, outputUTXOs []*av addressChanges, exists := balanceChanges[address] if !exists { - addressChanges = make(map[ids.ID]struct{}) + addressChanges = set.Set[ids.ID]{} balanceChanges[address] = addressChanges } - addressChanges[utxo.AssetID()] = struct{}{} + addressChanges.Add(utxo.AssetID()) } } @@ -251,10 +252,10 @@ func NewNoIndexer(db database.Database, allowIncomplete bool) (AddressTxsIndexer return &noIndexer{}, checkIndexStatus(db, false, allowIncomplete) } -func (i *noIndexer) Accept(ids.ID, []*avax.UTXO, []*avax.UTXO) error { +func (*noIndexer) Accept(ids.ID, []*avax.UTXO, []*avax.UTXO) error { return nil } -func (i *noIndexer) Read([]byte, ids.ID, uint64, uint64) ([]ids.ID, error) { +func (*noIndexer) Read([]byte, ids.ID, uint64, uint64) ([]ids.ID, error) { return nil, nil } diff --git a/avalanchego/vms/components/index/metrics.go b/avalanchego/vms/components/index/metrics.go index 4eb23264..181f9528 100644 --- a/avalanchego/vms/components/index/metrics.go +++ b/avalanchego/vms/components/index/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package index diff --git a/avalanchego/vms/components/keystore/codec.go b/avalanchego/vms/components/keystore/codec.go index fcebcaf8..6e547c9e 100644 --- a/avalanchego/vms/components/keystore/codec.go +++ b/avalanchego/vms/components/keystore/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore diff --git a/avalanchego/vms/components/keystore/user.go b/avalanchego/vms/components/keystore/user.go index 9224cf89..17c95c94 100644 --- a/avalanchego/vms/components/keystore/user.go +++ b/avalanchego/vms/components/keystore/user.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore @@ -11,7 +11,8 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/encdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -25,7 +26,7 @@ var ( errMaxAddresses = fmt.Errorf("keystore user has reached its limit of %d addresses", maxKeystoreAddresses) - _ User = &user{} + _ User = (*user)(nil) ) type User interface { @@ -35,14 +36,14 @@ type User interface { GetAddresses() ([]ids.ShortID, error) // PutKeys persists [privKeys] - PutKeys(privKeys ...*crypto.PrivateKeySECP256K1R) error + PutKeys(privKeys ...*secp256k1.PrivateKey) error // GetKey returns the private key that controls the given address - GetKey(address ids.ShortID) (*crypto.PrivateKeySECP256K1R, error) + GetKey(address ids.ShortID) (*secp256k1.PrivateKey, error) } type user struct { - factory crypto.FactorySECP256K1R + factory secp256k1.Factory db *encdb.Database } @@ -76,8 +77,8 @@ func (u *user) GetAddresses() ([]ids.ShortID, error) { return addresses, err } -func (u *user) PutKeys(privKeys ...*crypto.PrivateKeySECP256K1R) error { - toStore := make([]*crypto.PrivateKeySECP256K1R, 0, len(privKeys)) +func (u *user) PutKeys(privKeys ...*secp256k1.PrivateKey) error { + toStore := make([]*secp256k1.PrivateKey, 0, len(privKeys)) for _, privKey := range privKeys { address := privKey.PublicKey().Address() // address the privKey controls hasAddress, err := u.db.Has(address.Bytes()) @@ -119,26 +120,20 @@ func (u *user) PutKeys(privKeys ...*crypto.PrivateKeySECP256K1R) error { return u.db.Put(addressesKey, addressBytes) } -func (u *user) GetKey(address ids.ShortID) (*crypto.PrivateKeySECP256K1R, error) { +func (u *user) GetKey(address ids.ShortID) (*secp256k1.PrivateKey, error) { bytes, err := u.db.Get(address.Bytes()) if err != nil { return nil, err } - skIntf, err := u.factory.ToPrivateKey(bytes) - if err != nil { - return nil, err - } - sk, ok := skIntf.(*crypto.PrivateKeySECP256K1R) - if !ok { - return nil, fmt.Errorf("expected private key to be type *crypto.PrivateKeySECP256K1R but is type %T", skIntf) - } - return sk, nil + return u.factory.ToPrivateKey(bytes) } -func (u *user) Close() error { return u.db.Close() } +func (u *user) Close() error { + return u.db.Close() +} // Create and store a new key that will be controlled by this user. -func NewKey(u User) (*crypto.PrivateKeySECP256K1R, error) { +func NewKey(u User) (*secp256k1.PrivateKey, error) { keys, err := NewKeys(u, 1) if err != nil { return nil, err @@ -147,19 +142,15 @@ func NewKey(u User) (*crypto.PrivateKeySECP256K1R, error) { } // Create and store [numKeys] new keys that will be controlled by this user. -func NewKeys(u User, numKeys int) ([]*crypto.PrivateKeySECP256K1R, error) { - factory := crypto.FactorySECP256K1R{} +func NewKeys(u User, numKeys int) ([]*secp256k1.PrivateKey, error) { + factory := secp256k1.Factory{} - keys := make([]*crypto.PrivateKeySECP256K1R, numKeys) + keys := make([]*secp256k1.PrivateKey, numKeys) for i := range keys { - skIntf, err := factory.NewPrivateKey() + sk, err := factory.NewPrivateKey() if err != nil { return nil, err } - sk, ok := skIntf.(*crypto.PrivateKeySECP256K1R) - if !ok { - return nil, fmt.Errorf("expected private key to be type *crypto.PrivateKeySECP256K1R but is type %T", skIntf) - } keys[i] = sk } return keys, u.PutKeys(keys...) @@ -170,7 +161,7 @@ func NewKeys(u User, numKeys int) ([]*crypto.PrivateKeySECP256K1R, error) { // is missing, it will be ignored. // If [addresses] is empty, then it will create a keychain using every address // in the provided [user]. -func GetKeychain(u User, addresses ids.ShortSet) (*secp256k1fx.Keychain, error) { +func GetKeychain(u User, addresses set.Set[ids.ShortID]) (*secp256k1fx.Keychain, error) { addrsList := addresses.List() if len(addrsList) == 0 { var err error diff --git a/avalanchego/vms/components/keystore/user_test.go b/avalanchego/vms/components/keystore/user_test.go index 11f64ea8..38c40b78 100644 --- a/avalanchego/vms/components/keystore/user_test.go +++ b/avalanchego/vms/components/keystore/user_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package keystore @@ -11,7 +11,7 @@ import ( "github.com/ava-labs/avalanchego/database/encdb" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" ) // Test user password, must meet minimum complexity/length requirements @@ -37,11 +37,11 @@ func TestUserClosedDB(t *testing.T) { _, err = GetKeychain(u, nil) require.Error(err, "closed db should have caused an error") - factory := crypto.FactorySECP256K1R{} + factory := secp256k1.Factory{} sk, err := factory.NewPrivateKey() require.NoError(err) - err = u.PutKeys(sk.(*crypto.PrivateKeySECP256K1R)) + err = u.PutKeys(sk) require.Error(err, "closed db should have caused an error") } @@ -57,15 +57,15 @@ func TestUser(t *testing.T) { require.NoError(err) require.Empty(addresses, "new user shouldn't have address") - factory := crypto.FactorySECP256K1R{} + factory := secp256k1.Factory{} sk, err := factory.NewPrivateKey() require.NoError(err) - err = u.PutKeys(sk.(*crypto.PrivateKeySECP256K1R)) + err = u.PutKeys(sk) require.NoError(err) // Putting the same key multiple times should be a noop - err = u.PutKeys(sk.(*crypto.PrivateKeySECP256K1R)) + err = u.PutKeys(sk) require.NoError(err) addr := sk.PublicKey().Address() diff --git a/avalanchego/vms/platformvm/message/codec.go b/avalanchego/vms/components/message/codec.go similarity index 77% rename from avalanchego/vms/platformvm/message/codec.go rename to avalanchego/vms/components/message/codec.go index f0c69d6e..d41de9b2 100644 --- a/avalanchego/vms/platformvm/message/codec.go +++ b/avalanchego/vms/components/message/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -11,9 +11,9 @@ import ( ) const ( - codecVersion uint16 = 0 - maxMessageSize = 512 * units.KiB - maxSliceLen = maxMessageSize + codecVersion = 0 + maxMessageSize = 512 * units.KiB + maxSliceLen = maxMessageSize ) // Codec does serialization and deserialization diff --git a/avalanchego/vms/platformvm/message/handler.go b/avalanchego/vms/components/message/handler.go similarity index 89% rename from avalanchego/vms/platformvm/message/handler.go rename to avalanchego/vms/components/message/handler.go index 925e4682..afe12351 100644 --- a/avalanchego/vms/platformvm/message/handler.go +++ b/avalanchego/vms/components/message/handler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message diff --git a/avalanchego/vms/platformvm/message/handler_test.go b/avalanchego/vms/components/message/handler_test.go similarity index 85% rename from avalanchego/vms/platformvm/message/handler_test.go rename to avalanchego/vms/components/message/handler_test.go index 1407d2ce..cd6c5173 100644 --- a/avalanchego/vms/platformvm/message/handler_test.go +++ b/avalanchego/vms/components/message/handler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -33,12 +33,10 @@ func TestHandleTx(t *testing.T) { } func TestNoopHandler(t *testing.T) { - require := require.New(t) - handler := NoopHandler{ Log: logging.NoLog{}, } err := handler.HandleTx(ids.EmptyNodeID, 0, nil) - require.NoError(err) + require.NoError(t, err) } diff --git a/avalanchego/vms/platformvm/message/message.go b/avalanchego/vms/components/message/message.go similarity index 64% rename from avalanchego/vms/platformvm/message/message.go rename to avalanchego/vms/components/message/message.go index 121a61d5..02e05401 100644 --- a/avalanchego/vms/platformvm/message/message.go +++ b/avalanchego/vms/components/message/message.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -10,9 +10,9 @@ import ( ) var ( - _ Message = &Tx{} + _ Message = (*Tx)(nil) - errUnexpectedCodecVersion = errors.New("unexpected codec version") + ErrUnexpectedCodecVersion = errors.New("unexpected codec version") ) type Message interface { @@ -30,17 +30,12 @@ type Message interface { type message []byte -func (m *message) initialize(bytes []byte) { *m = bytes } -func (m *message) Bytes() []byte { return *m } - -type Tx struct { - message - - Tx []byte `serialize:"true"` +func (m *message) initialize(bytes []byte) { + *m = bytes } -func (msg *Tx) Handle(handler Handler, nodeID ids.NodeID, requestID uint32) error { - return handler.HandleTx(nodeID, requestID, msg) +func (m *message) Bytes() []byte { + return *m } func Parse(bytes []byte) (Message, error) { @@ -50,7 +45,7 @@ func Parse(bytes []byte) (Message, error) { return nil, err } if version != codecVersion { - return nil, errUnexpectedCodecVersion + return nil, ErrUnexpectedCodecVersion } msg.initialize(bytes) return msg, nil diff --git a/avalanchego/vms/components/message/message_test.go b/avalanchego/vms/components/message/message_test.go new file mode 100644 index 00000000..dbaf1543 --- /dev/null +++ b/avalanchego/vms/components/message/message_test.go @@ -0,0 +1,19 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/units" +) + +func TestParseGibberish(t *testing.T) { + randomBytes := utils.RandomBytes(256 * units.KiB) + _, err := Parse(randomBytes) + require.Error(t, err) +} diff --git a/avalanchego/vms/components/message/tx.go b/avalanchego/vms/components/message/tx.go new file mode 100644 index 00000000..c930d3f9 --- /dev/null +++ b/avalanchego/vms/components/message/tx.go @@ -0,0 +1,20 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "github.com/ava-labs/avalanchego/ids" +) + +var _ Message = (*Tx)(nil) + +type Tx struct { + message + + Tx []byte `serialize:"true"` +} + +func (msg *Tx) Handle(handler Handler, nodeID ids.NodeID, requestID uint32) error { + return handler.HandleTx(nodeID, requestID, msg) +} diff --git a/avalanchego/vms/platformvm/message/message_test.go b/avalanchego/vms/components/message/tx_test.go similarity index 73% rename from avalanchego/vms/platformvm/message/message_test.go rename to avalanchego/vms/components/message/tx_test.go index 1ffe71b4..58a06e1b 100644 --- a/avalanchego/vms/platformvm/message/message_test.go +++ b/avalanchego/vms/components/message/tx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package message @@ -32,11 +32,3 @@ func TestTx(t *testing.T) { require.Equal(tx, parsedMsg.Tx) } - -func TestParseGibberish(t *testing.T) { - require := require.New(t) - - randomBytes := utils.RandomBytes(256 * units.KiB) - _, err := Parse(randomBytes) - require.Error(err) -} diff --git a/avalanchego/vms/components/state/builtin.go b/avalanchego/vms/components/state/builtin.go deleted file mode 100644 index 53bc80b5..00000000 --- a/avalanchego/vms/components/state/builtin.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package state - -import ( - "errors" - "time" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/utils/wrappers" -) - -var ( - errNotIDType = errors.New("expected ids.ID but got unexpected type") - errNotStatusType = errors.New("expected choices.Status but got unexpected type") - errNotTimeType = errors.New("expected time.Time but got unexpected type") -) - -func marshalID(idIntf interface{}) ([]byte, error) { - if id, ok := idIntf.(ids.ID); ok { - return id[:], nil - } - return nil, errNotIDType -} - -func unmarshalID(bytes []byte) (interface{}, error) { - return ids.ToID(bytes) -} - -func marshalStatus(statusIntf interface{}) ([]byte, error) { - if status, ok := statusIntf.(choices.Status); ok { - return status.Bytes(), nil - } - return nil, errNotStatusType -} - -func unmarshalStatus(bytes []byte) (interface{}, error) { - p := wrappers.Packer{Bytes: bytes} - status := choices.Status(p.UnpackInt()) - if err := status.Valid(); err != nil { - return nil, err - } - return status, p.Err -} - -func marshalTime(timeIntf interface{}) ([]byte, error) { - if t, ok := timeIntf.(time.Time); ok { - p := wrappers.Packer{MaxSize: wrappers.LongLen} - p.PackLong(uint64(t.Unix())) - return p.Bytes, p.Err - } - return nil, errNotTimeType -} - -func unmarshalTime(bytes []byte) (interface{}, error) { - p := wrappers.Packer{Bytes: bytes} - unixTime := p.UnpackLong() - return time.Unix(int64(unixTime), 0), nil -} diff --git a/avalanchego/vms/components/state/state.go b/avalanchego/vms/components/state/state.go deleted file mode 100644 index cb927a8b..00000000 --- a/avalanchego/vms/components/state/state.go +++ /dev/null @@ -1,243 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package state - -import ( - "errors" - "fmt" - "time" - - "github.com/ava-labs/avalanchego/cache" - "github.com/ava-labs/avalanchego/database" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/utils/wrappers" -) - -const cacheSize = 1000 - -var ( - errWrongType = errors.New("value in the database was the wrong type") - - _ State = &state{} -) - -// State is a key-value store where every value is associated with a "type ID". -// Every different type of value must have its own type ID. -// -// For example, if you're storing blocks, accounts and addresses, each of those types -// must have their own type ID. -// -// Each type ID is associated with a function that specifies how to unmarshal bytes -// to a struct/value of a given type. -// -// State has built-in support for putting and getting choices.Status and ids.ID -// To put/get any other type, you must first register that type using RegisterType -type State interface { - // In [db], add a key-value pair. - // [value] will be converted to bytes by calling Bytes() on it. - // [typeID] must have already been registered using RegisterType. - // If [value] is nil, the value associated with [key] and [typeID] is deleted (if it exists). - Put(db database.Database, typeID uint64, key ids.ID, value interface{}) error - - // From [db], get the value of type [typeID] whose key is [key] - // Returns database.ErrNotFound if the entry doesn't exist - Get(db database.Database, typeID uint64, key ids.ID) (interface{}, error) - - // Return whether [key] exists in [db] for type [typeID] - Has(db database.Database, typeID uint64, key ids.ID) (bool, error) - - // PutStatus associates [key] with [status] in [db] - PutStatus(db database.Database, key ids.ID, status choices.Status) error - - // GetStatus gets the status associated with [key] in [db] - GetStatus(db database.Database, key ids.ID) choices.Status - - // PutID associates [key] with [ID] in [db] - PutID(db database.Database, key ids.ID, ID ids.ID) error - - // GetID gets the ID associated with [key] in [db] - GetID(db database.Database, key ids.ID) (ids.ID, error) - - // PutTime associates [key] with [time] in [db] - PutTime(db database.Database, key ids.ID, time time.Time) error - - // GetTime gets the time associated with [key] in [db] - GetTime(db database.Database, key ids.ID) (time.Time, error) - - // Register a new type. - // When values that were Put with [typeID] are retrieved from the database, - // they will be unmarshaled from bytes using [unmarshal]. - // Returns an error if there is already a type with ID [typeID] - RegisterType(typeID uint64, - marshal func(interface{}) ([]byte, error), - unmarshal func([]byte) (interface{}, error), - ) error -} - -type state struct { - // Keys: Type ID - // Values: Function that unmarshals values - // that were Put with that type ID - unmarshallers map[uint64]func([]byte) (interface{}, error) - - // Keys: Type ID - // Values: Function that marshals values - // that were Put with that type ID - marshallers map[uint64]func(interface{}) ([]byte, error) - - // Keys: Type ID - // Values: Cache that stores uniqueIDs for values that were put with that type ID - // (Saves us from having to re-compute uniqueIDs) - uniqueIDCaches map[uint64]*cache.LRU -} - -func (s *state) RegisterType( - typeID uint64, - marshal func(interface{}) ([]byte, error), - unmarshal func([]byte) (interface{}, error), -) error { - if _, exists := s.unmarshallers[typeID]; exists { - return fmt.Errorf("there is already a type with ID %d", typeID) - } - s.marshallers[typeID] = marshal - s.unmarshallers[typeID] = unmarshal - return nil -} - -func (s *state) Put(db database.Database, typeID uint64, key ids.ID, value interface{}) error { - marshaller, exists := s.marshallers[typeID] - if !exists { - return fmt.Errorf("typeID %d has not been registered", typeID) - } - // Get the unique ID of thie key/typeID pair - uID := s.uniqueID(key, typeID) - if value == nil { - return db.Delete(uID[:]) - } - // Put the byte repr. of the value in the database - valueBytes, err := marshaller(value) - if err != nil { - return err - } - return db.Put(uID[:], valueBytes) -} - -func (s *state) Has(db database.Database, typeID uint64, key ids.ID) (bool, error) { - key = s.uniqueID(key, typeID) - return db.Has(key[:]) -} - -func (s *state) Get(db database.Database, typeID uint64, key ids.ID) (interface{}, error) { - unmarshal, exists := s.unmarshallers[typeID] - if !exists { - return nil, fmt.Errorf("typeID %d has not been registered", typeID) - } - - // The unique ID of this key/typeID pair - uID := s.uniqueID(key, typeID) - - // Get the value from the database - valueBytes, err := db.Get(uID[:]) - if err != nil { - return nil, err - } - - // Unmarshal the value from bytes and return it - return unmarshal(valueBytes) -} - -// PutStatus associates [key] with [status] in [db] -func (s *state) PutStatus(db database.Database, key ids.ID, status choices.Status) error { - return s.Put(db, StatusTypeID, key, status) -} - -// GetStatus gets the status associated with [key] in [db] -// Return choices.Processing if can't get the status from database -func (s *state) GetStatus(db database.Database, key ids.ID) choices.Status { - statusInterface, err := s.Get(db, StatusTypeID, key) - if err != nil { - return choices.Processing - } - - status, ok := statusInterface.(choices.Status) - if !ok || status.Valid() != nil { - return choices.Processing - } - - return status -} - -// PutID associates [key] with [ID] in [db] -func (s *state) PutID(db database.Database, key ids.ID, id ids.ID) error { - return s.Put(db, IDTypeID, key, id) -} - -// GetID gets the ID associated with [key] in [db] -func (s *state) GetID(db database.Database, key ids.ID) (ids.ID, error) { - IDInterface, err := s.Get(db, IDTypeID, key) - if err != nil { - return ids.ID{}, err - } - - if ID, ok := IDInterface.(ids.ID); ok { - return ID, nil - } - - return ids.ID{}, errWrongType -} - -// PutTime associates [key] with [time] in [db] -func (s *state) PutTime(db database.Database, key ids.ID, time time.Time) error { - return s.Put(db, TimeTypeID, key, time) -} - -// GetTime gets the time associated with [key] in [db] -func (s *state) GetTime(db database.Database, key ids.ID) (time.Time, error) { - timeInterface, err := s.Get(db, TimeTypeID, key) - if err != nil { - return time.Time{}, err - } - - if time, ok := timeInterface.(time.Time); ok { - return time, nil - } - - return time.Time{}, errWrongType -} - -// Prefix [ID] with [typeID] to prevent key collisions in the database -func (s *state) uniqueID(id ids.ID, typeID uint64) ids.ID { - uIDCache, cacheExists := s.uniqueIDCaches[typeID] - if cacheExists { - if uID, uIDExists := uIDCache.Get(id); uIDExists { // Get the uniqueID associated with [typeID] and [ID] - return uID.(ids.ID) - } - } else { - s.uniqueIDCaches[typeID] = &cache.LRU{Size: cacheSize} - } - uID := id.Prefix(typeID) - s.uniqueIDCaches[typeID].Put(id, uID) - return uID -} - -// NewState returns a new State -func NewState() (State, error) { - state := &state{ - marshallers: make(map[uint64]func(interface{}) ([]byte, error)), - unmarshallers: make(map[uint64]func([]byte) (interface{}, error)), - uniqueIDCaches: make(map[uint64]*cache.LRU), - } - - // Register ID, Status and time.Time so they can be put/get without client code - // having to register them - - errs := wrappers.Errs{} - errs.Add( - state.RegisterType(IDTypeID, marshalID, unmarshalID), - state.RegisterType(StatusTypeID, marshalStatus, unmarshalStatus), - state.RegisterType(TimeTypeID, marshalTime, unmarshalTime), - ) - return state, errs.Err -} diff --git a/avalanchego/vms/components/state/state_test.go b/avalanchego/vms/components/state/state_test.go deleted file mode 100644 index 1230d1ae..00000000 --- a/avalanchego/vms/components/state/state_test.go +++ /dev/null @@ -1,509 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package state - -import ( - "testing" - - "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/hashing" - "github.com/ava-labs/avalanchego/utils/wrappers" -) - -// toy example of a block, just used for testing -type block struct { - parentID ids.ID - value uint64 -} - -const blockSize = 40 // hashing.HashLen (32) + length of uin64 (8) - -func (b *block) Bytes() ([]byte, error) { - p := wrappers.Packer{Bytes: make([]byte, blockSize)} - p.PackFixedBytes(b.parentID[:]) - p.PackLong(b.value) - return p.Bytes, p.Err -} - -func marshalBlock(blk interface{}) ([]byte, error) { - return blk.(*block).Bytes() -} - -func unmarshalBlock(bytes []byte) (interface{}, error) { - p := wrappers.Packer{Bytes: bytes} - - parentID, err := ids.ToID(p.UnpackFixedBytes(hashing.HashLen)) - if err != nil { - return nil, err - } - - value := p.UnpackLong() - - if p.Errored() { - return nil, p.Err - } - - return &block{ - parentID: parentID, - value: value, - }, nil -} - -// toy example of an account, just used for testing -type account struct { - id ids.ID - balance uint64 - nonce uint64 -} - -const accountSize = 32 + 8 + 8 - -func (acc *account) Bytes() ([]byte, error) { - p := wrappers.Packer{Bytes: make([]byte, accountSize)} - p.PackFixedBytes(acc.id[:]) - p.PackLong(acc.balance) - p.PackLong(acc.nonce) - return p.Bytes, p.Err -} - -func marshalAccount(acct interface{}) ([]byte, error) { - return acct.(*account).Bytes() -} - -func unmarshalAccount(bytes []byte) (interface{}, error) { - p := wrappers.Packer{Bytes: bytes} - - id, err := ids.ToID(p.UnpackFixedBytes(hashing.HashLen)) - if err != nil { - return nil, err - } - - balance := p.UnpackLong() - nonce := p.UnpackLong() - - if p.Errored() { - return nil, p.Err - } - - return &account{ - id: id, - balance: balance, - nonce: nonce, - }, nil -} - -// Ensure there is an error if someone tries to do a put without registering the type -func TestPutUnregistered(t *testing.T) { - // make a state and a database - state, err := NewState() - if err != nil { - t.Fatal(err) - } - db := memdb.New() - - // make an account - acc1 := &account{ - id: ids.ID{1, 2, 3}, - balance: 1, - nonce: 2, - } - - if err := state.Put(db, 1, ids.ID{1, 2, 3}, acc1); err == nil { - t.Fatal("should have failed because type ID is unregistred") - } - - // register type - if err := state.RegisterType(1, marshalAccount, unmarshalAccount); err != nil { - t.Fatal(err) - } - - // should not error now - if err := state.Put(db, 1, ids.ID{1, 2, 3}, acc1); err != nil { - t.Fatal(err) - } - - if err := db.Close(); err != nil { - t.Fatal(err) - } -} - -// Ensure there is an error if someone tries to get the value associated with a -// key that doesn't exist -func TestKeyDoesNotExist(t *testing.T) { - // make a state and a database - state, err := NewState() - if err != nil { - t.Fatal(err) - } - db := memdb.New() - - if _, err := state.Get(db, 1, ids.ID{1, 2, 3}); err == nil { - t.Fatal("should have failed because no such key or typeID exists") - } - - // register type with ID 1 - typeID := uint64(1) - if err := state.RegisterType(typeID, marshalAccount, unmarshalAccount); err != nil { - t.Fatal(err) - } - - // Should still fail because there is no value with this key - if _, err := state.Get(db, typeID, ids.ID{1, 2, 3}); err == nil { - t.Fatal("should have failed because no such key exists") - } - - if err := db.Close(); err != nil { - t.Fatal(err) - } -} - -// Ensure there is an error if someone tries to register a type ID that already exists -func TestRegisterExistingTypeID(t *testing.T) { - // make a state and a database - state, err := NewState() - if err != nil { - t.Fatal(err) - } - db := memdb.New() - - // register type with ID 1 - typeID := uint64(1) - if err := state.RegisterType(typeID, marshalBlock, unmarshalBlock); err != nil { - t.Fatal(err) - } - - // try to register the same type ID - if err := state.RegisterType(typeID, marshalAccount, unmarshalAccount); err == nil { - t.Fatal("Should have errored because typeID already registered") - } - - if err := db.Close(); err != nil { - t.Fatal(err) - } -} - -// Ensure there is an error when someone tries to get a value using the wrong typeID -func TestGetWrongTypeID(t *testing.T) { - // make a state and a database - state, err := NewState() - if err != nil { - t.Fatal(err) - } - db := memdb.New() - - // register type with ID 1 - blockTypeID := uint64(1) - if err := state.RegisterType(blockTypeID, marshalBlock, unmarshalBlock); err != nil { - t.Fatal(err) - } - - // make and put a block - block := &block{ - parentID: ids.ID{4, 5, 6}, - value: 5, - } - blockID := ids.ID{1, 2, 3} - if err = state.Put(db, blockTypeID, blockID, block); err != nil { - t.Fatal(err) - } - - // try to get it using the right key but wrong typeID - if _, err := state.Get(db, 2, blockID); err == nil { - t.Fatal("should have failed because type ID is wrong") - } - - if err := db.Close(); err != nil { - t.Fatal(err) - } -} - -// Ensure that there is no error when someone puts two values with the same -// key but different type IDs -func TestSameKeyDifferentTypeID(t *testing.T) { - // make a state and a database - state, err := NewState() - if err != nil { - t.Fatal(err) - } - db := memdb.New() - - // register block type with ID 1 - blockTypeID := uint64(1) - if err := state.RegisterType(blockTypeID, marshalBlock, unmarshalBlock); err != nil { - t.Fatal(err) - } - - // register account type with ID 2 - accountTypeID := uint64(2) - if err := state.RegisterType(accountTypeID, marshalAccount, unmarshalAccount); err != nil { - t.Fatal(err) - } - - sharedKey := ids.ID{1, 2, 3} - - // make an account - acc := &account{ - id: ids.ID{1, 2, 3}, - balance: 1, - nonce: 2, - } - - // put it using sharedKey - if err = state.Put(db, accountTypeID, sharedKey, acc); err != nil { - t.Fatal(err) - } - - // make a block - block1 := &block{ - parentID: ids.ID{4, 5, 6}, - value: 5, - } - - // put it using sharedKey - if err = state.Put(db, blockTypeID, sharedKey, block1); err != nil { - t.Fatal(err) - } - - // ensure the account is still there and correct - accInterface, err := state.Get(db, accountTypeID, sharedKey) - if err != nil { - t.Fatal(err) - } - accFromState, ok := accInterface.(*account) - switch { - case !ok: - t.Fatal("should have been type *account") - case accFromState.balance != acc.balance: - t.Fatal("balances should be same") - case accFromState.id != acc.id: - t.Fatal("ids should be the same") - case accFromState.nonce != acc.nonce: - t.Fatal("nonces should be same") - } - - // ensure the block is still there and correct - blockInterface, err := state.Get(db, blockTypeID, sharedKey) - if err != nil { - t.Fatal(err) - } - - blockFromState, ok := blockInterface.(*block) - switch { - case !ok: - t.Fatal("should have been type *block") - case blockFromState.parentID != block1.parentID: - t.Fatal("parentIDs should be same") - case blockFromState.value != block1.value: - t.Fatal("values should be same") - } - - if err := db.Close(); err != nil { - t.Fatal(err) - } -} - -// Ensure that overwriting a value works -func TestOverwrite(t *testing.T) { - // make a state and a database - state, err := NewState() - if err != nil { - t.Fatal(err) - } - db := memdb.New() - - // register block type with ID 1 - blockTypeID := uint64(1) - if err := state.RegisterType(blockTypeID, marshalBlock, unmarshalBlock); err != nil { - t.Fatal(err) - } - - // make a block - block1 := &block{ - parentID: ids.ID{4, 5, 6}, - value: 5, - } - - key := ids.ID{1, 2, 3} - - // put it - if err = state.Put(db, blockTypeID, key, block1); err != nil { - t.Fatal(err) - } - - // make another block - block2 := &block{ - parentID: ids.ID{100, 200, 1}, - value: 6, - } - - // put it with the same key - if err = state.Put(db, blockTypeID, key, block2); err != nil { - t.Fatal(err) - } - - // ensure the first value was over-written - // get it and make sure it's right - blockInterface, err := state.Get(db, blockTypeID, key) - if err != nil { - t.Fatal(err) - } - - blockFromState, ok := blockInterface.(*block) - switch { - case !ok: - t.Fatal("should have been type *block") - case blockFromState.parentID != block2.parentID: - t.Fatal("parentIDs should be same") - case blockFromState.value != block2.value: - t.Fatal("values should be same") - } - - if err := db.Close(); err != nil { - t.Fatal(err) - } -} - -// Put 4 values, 2 of one type and 2 of another -func TestHappyPath(t *testing.T) { - // make a state and a database - state, err := NewState() - if err != nil { - t.Fatal(err) - } - db := memdb.New() - - accountTypeID := uint64(1) - - // register type account - if err := state.RegisterType(accountTypeID, marshalAccount, unmarshalAccount); err != nil { - t.Fatal(err) - } - - // make an account - acc1 := &account{ - id: ids.ID{1, 2, 3}, - balance: 1, - nonce: 2, - } - - // put it - if err = state.Put(db, accountTypeID, acc1.id, acc1); err != nil { - t.Fatal(err) - } - - // get it and make sure it's right - acc1Interface, err := state.Get(db, accountTypeID, acc1.id) - if err != nil { - t.Fatal(err) - } - - acc1FromState, ok := acc1Interface.(*account) - switch { - case !ok: - t.Fatal("should have been type *account") - case acc1FromState.balance != acc1.balance: - t.Fatal("balances should be same") - case acc1FromState.id != acc1.id: - t.Fatal("ids should be the same") - case acc1FromState.nonce != acc1.nonce: - t.Fatal("nonces should be same") - } - - // make another account - acc2 := &account{ - id: ids.ID{9, 2, 1}, - balance: 7, - nonce: 44, - } - - // put it - if err = state.Put(db, accountTypeID, acc2.id, acc2); err != nil { - t.Fatal(err) - } - - // get it and make sure it's right - acc2Interface, err := state.Get(db, accountTypeID, acc2.id) - if err != nil { - t.Fatal(err) - } - - acc2FromState, ok := acc2Interface.(*account) - switch { - case !ok: - t.Fatal("should have been type *account") - case acc2FromState.balance != acc2.balance: - t.Fatal("balances should be same") - case acc2FromState.id != acc2.id: - t.Fatal("ids should be the same") - case acc2FromState.nonce != acc2.nonce: - t.Fatal("nonces should be same") - } - - // register type block - blockTypeID := uint64(2) - if err := state.RegisterType(blockTypeID, marshalBlock, unmarshalBlock); err != nil { - t.Fatal(err) - } - - // make a block - block1ID := ids.ID{9, 9, 9} - block1 := &block{ - parentID: ids.ID{4, 5, 6}, - value: 5, - } - - // put it - if err = state.Put(db, blockTypeID, block1ID, block1); err != nil { - t.Fatal(err) - } - - // get it and make sure it's right - block1Interface, err := state.Get(db, blockTypeID, block1ID) - if err != nil { - t.Fatal(err) - } - - block1FromState, ok := block1Interface.(*block) - switch { - case !ok: - t.Fatal("should have been type *block") - case block1FromState.parentID != block1.parentID: - t.Fatal("parentIDs should be same") - case block1FromState.value != block1.value: - t.Fatal("values should be same") - } - - // make another block - block2ID := ids.ID{1, 2, 3, 4, 5, 6, 7, 8, 9} - block2 := &block{ - parentID: ids.ID{10, 1, 2}, - value: 67, - } - - // put it - if err = state.Put(db, blockTypeID, block2ID, block2); err != nil { - t.Fatal(err) - } - - // get it and make sure it's right - block2Interface, err := state.Get(db, blockTypeID, block2ID) - if err != nil { - t.Fatal(err) - } - - block2FromState, ok := block2Interface.(*block) - switch { - case !ok: - t.Fatal("should have been type *block") - case block2FromState.parentID != block2.parentID: - t.Fatal("parentIDs should be same") - case block2FromState.value != block2.value: - t.Fatal("values should be same") - } - - if err := db.Close(); err != nil { - t.Fatal(err) - } -} diff --git a/avalanchego/vms/components/state/types.go b/avalanchego/vms/components/state/types.go deleted file mode 100644 index 330b1cf8..00000000 --- a/avalanchego/vms/components/state/types.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package state - -import ( - "math" -) - -const ( - // IDTypeID is the type ID for ids.ID - IDTypeID uint64 = math.MaxUint64 - iota - // StatusTypeID is the type ID for choices.Status - StatusTypeID - // TimeTypeID is the type ID for time - TimeTypeID - // BlockTypeID is the type ID of blocks in state - BlockTypeID -) diff --git a/avalanchego/vms/components/verify/mock_verifiable.go b/avalanchego/vms/components/verify/mock_verifiable.go index 9390c21a..531b6ea3 100644 --- a/avalanchego/vms/components/verify/mock_verifiable.go +++ b/avalanchego/vms/components/verify/mock_verifiable.go @@ -1,3 +1,6 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/components/verify (interfaces: Verifiable) diff --git a/avalanchego/vms/components/verify/subnet.go b/avalanchego/vms/components/verify/subnet.go index ac0c386e..a1030164 100644 --- a/avalanchego/vms/components/verify/subnet.go +++ b/avalanchego/vms/components/verify/subnet.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package verify import ( + "context" "errors" "fmt" @@ -12,24 +13,24 @@ import ( ) var ( - errSameChainID = errors.New("same chainID") - errMismatchedSubnetIDs = errors.New("mismatched subnetIDs") + ErrSameChainID = errors.New("same chainID") + ErrMismatchedSubnetIDs = errors.New("mismatched subnetIDs") ) // SameSubnet verifies that the provided [ctx] was provided to a chain in the // same subnet as [peerChainID], but not the same chain. If this verification // fails, a non-nil error will be returned. -func SameSubnet(ctx *snow.Context, peerChainID ids.ID) error { - if peerChainID == ctx.ChainID { - return errSameChainID +func SameSubnet(ctx context.Context, chainCtx *snow.Context, peerChainID ids.ID) error { + if peerChainID == chainCtx.ChainID { + return ErrSameChainID } - subnetID, err := ctx.SNLookup.SubnetID(peerChainID) + subnetID, err := chainCtx.ValidatorState.GetSubnetID(ctx, peerChainID) if err != nil { return fmt.Errorf("failed to get subnet of %q: %w", peerChainID, err) } - if ctx.SubnetID != subnetID { - return fmt.Errorf("%w; expected %q got %q", errMismatchedSubnetIDs, ctx.SubnetID, subnetID) + if chainCtx.SubnetID != subnetID { + return fmt.Errorf("%w; expected %q got %q", ErrMismatchedSubnetIDs, chainCtx.SubnetID, subnetID) } return nil } diff --git a/avalanchego/vms/components/verify/subnet_test.go b/avalanchego/vms/components/verify/subnet_test.go index 0bfaafe7..a159d226 100644 --- a/avalanchego/vms/components/verify/subnet_test.go +++ b/avalanchego/vms/components/verify/subnet_test.go @@ -1,96 +1,100 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package verify import ( + "context" "errors" "testing" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/validators" ) var errMissing = errors.New("missing") -type snLookup struct { - chainsToSubnet map[ids.ID]ids.ID -} - -func (sn *snLookup) SubnetID(chainID ids.ID) (ids.ID, error) { - subnetID, ok := sn.chainsToSubnet[chainID] - if !ok { - return ids.ID{}, errMissing - } - return subnetID, nil -} - func TestSameSubnet(t *testing.T) { - subnet0 := ids.GenerateTestID() - subnet1 := ids.GenerateTestID() - chain0 := ids.GenerateTestID() - chain1 := ids.GenerateTestID() + subnetID0 := ids.GenerateTestID() + subnetID1 := ids.GenerateTestID() + chainID0 := ids.GenerateTestID() + chainID1 := ids.GenerateTestID() tests := []struct { name string - ctx *snow.Context + ctxF func(*gomock.Controller) *snow.Context chainID ids.ID result error }{ { name: "same chain", - ctx: &snow.Context{ - SubnetID: subnet0, - ChainID: chain0, - SNLookup: &snLookup{}, + ctxF: func(ctrl *gomock.Controller) *snow.Context { + state := validators.NewMockState(ctrl) + return &snow.Context{ + SubnetID: subnetID0, + ChainID: chainID0, + ValidatorState: state, + } }, - chainID: chain0, - result: errSameChainID, + chainID: chainID0, + result: ErrSameChainID, }, { name: "unknown chain", - ctx: &snow.Context{ - SubnetID: subnet0, - ChainID: chain0, - SNLookup: &snLookup{}, + ctxF: func(ctrl *gomock.Controller) *snow.Context { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), chainID1).Return(subnetID1, errMissing) + return &snow.Context{ + SubnetID: subnetID0, + ChainID: chainID0, + ValidatorState: state, + } }, - chainID: chain1, + chainID: chainID1, result: errMissing, }, { name: "wrong subnet", - ctx: &snow.Context{ - SubnetID: subnet0, - ChainID: chain0, - SNLookup: &snLookup{ - chainsToSubnet: map[ids.ID]ids.ID{ - chain1: subnet1, - }, - }, + ctxF: func(ctrl *gomock.Controller) *snow.Context { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), chainID1).Return(subnetID1, nil) + return &snow.Context{ + SubnetID: subnetID0, + ChainID: chainID0, + ValidatorState: state, + } }, - chainID: chain1, - result: errMismatchedSubnetIDs, + chainID: chainID1, + result: ErrMismatchedSubnetIDs, }, { name: "same subnet", - ctx: &snow.Context{ - SubnetID: subnet0, - ChainID: chain0, - SNLookup: &snLookup{ - chainsToSubnet: map[ids.ID]ids.ID{ - chain1: subnet0, - }, - }, + ctxF: func(ctrl *gomock.Controller) *snow.Context { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), chainID1).Return(subnetID0, nil) + return &snow.Context{ + SubnetID: subnetID0, + ChainID: chainID0, + ValidatorState: state, + } }, - chainID: chain1, + chainID: chainID1, result: nil, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - result := SameSubnet(test.ctx, test.chainID) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + ctx := test.ctxF(ctrl) + + result := SameSubnet(context.Background(), ctx, test.chainID) require.ErrorIs(t, result, test.result) }) } diff --git a/avalanchego/vms/components/verify/verification.go b/avalanchego/vms/components/verify/verification.go index 0ccef28c..b615d70b 100644 --- a/avalanchego/vms/components/verify/verification.go +++ b/avalanchego/vms/components/verify/verification.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package verify diff --git a/avalanchego/vms/components/verify/verification_test.go b/avalanchego/vms/components/verify/verification_test.go index c3fac1b4..fe854e16 100644 --- a/avalanchego/vms/components/verify/verification_test.go +++ b/avalanchego/vms/components/verify/verification_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package verify @@ -12,7 +12,9 @@ var errTest = errors.New("non-nil error") type testVerifiable struct{ err error } -func (v testVerifiable) Verify() error { return v.err } +func (v testVerifiable) Verify() error { + return v.err +} func TestAllNil(t *testing.T) { err := All( diff --git a/avalanchego/vms/manager.go b/avalanchego/vms/manager.go index d11dd2cb..d1041d3a 100644 --- a/avalanchego/vms/manager.go +++ b/avalanchego/vms/manager.go @@ -1,38 +1,43 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package vms import ( + "context" "errors" "fmt" + "sync" + + "golang.org/x/exp/maps" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/logging" ) var ( ErrNotFound = errors.New("not found") - _ Manager = &manager{} + _ Manager = (*manager)(nil) ) // A Factory creates new instances of a VM type Factory interface { - New(*snow.Context) (interface{}, error) + New(logging.Logger) (interface{}, error) } // Manager tracks a collection of VM factories, their aliases, and their // versions. // It has the following functionality: -// 1) Register a VM factory. To register a VM is to associate its ID with a -// VMFactory which, when New() is called upon it, creates a new instance of -// that VM. -// 2) Get a VM factory. Given the ID of a VM that has been registered, return -// the factory that the ID is associated with. -// 3) Manage the aliases of VMs -// 3) Manage the versions of VMs +// +// 1. Register a VM factory. To register a VM is to associate its ID with a +// VMFactory which, when New() is called upon it, creates a new instance of +// that VM. +// 2. Get a VM factory. Given the ID of a VM that has been registered, return +// the factory that the ID is associated with. +// 3. Manage the aliases of VMs +// 4. Manage the versions of VMs type Manager interface { ids.Aliaser @@ -42,7 +47,7 @@ type Manager interface { // Map [vmID] to [factory]. [factory] creates new instances of the vm whose // ID is [vmID] - RegisterFactory(vmID ids.ID, factory Factory) error + RegisterFactory(ctx context.Context, vmID ids.ID, factory Factory) error // ListFactories returns all the IDs that have had factories registered. ListFactories() ([]ids.ID, error) @@ -57,6 +62,10 @@ type manager struct { // alias of the VM. That is, [vmID].String() is an alias for [vmID]. ids.Aliaser + log logging.Logger + + lock sync.RWMutex + // Key: A VM's ID // Value: A factory that creates new instances of that VM factories map[ids.ID]Factory @@ -67,22 +76,29 @@ type manager struct { } // NewManager returns an instance of a VM manager -func NewManager() Manager { +func NewManager(log logging.Logger, aliaser ids.Aliaser) Manager { return &manager{ - Aliaser: ids.NewAliaser(), + Aliaser: aliaser, + log: log, factories: make(map[ids.ID]Factory), versions: make(map[ids.ID]string), } } func (m *manager) GetFactory(vmID ids.ID) (Factory, error) { + m.lock.RLock() + defer m.lock.RUnlock() + if factory, ok := m.factories[vmID]; ok { return factory, nil } return nil, fmt.Errorf("%q was %w", vmID, ErrNotFound) } -func (m *manager) RegisterFactory(vmID ids.ID, factory Factory) error { +func (m *manager) RegisterFactory(ctx context.Context, vmID ids.ID, factory Factory) error { + m.lock.Lock() + defer m.lock.Unlock() + if _, exists := m.factories[vmID]; exists { return fmt.Errorf("%q was already registered as a vm", vmID) } @@ -92,7 +108,7 @@ func (m *manager) RegisterFactory(vmID ids.ID, factory Factory) error { m.factories[vmID] = factory - vm, err := factory.New(nil) + vm, err := factory.New(m.log) if err != nil { return err } @@ -102,26 +118,28 @@ func (m *manager) RegisterFactory(vmID ids.ID, factory Factory) error { return nil } - version, err := commonVM.Version() + version, err := commonVM.Version(ctx) if err != nil { // Drop the shutdown error to surface the original error - _ = commonVM.Shutdown() + _ = commonVM.Shutdown(ctx) return err } m.versions[vmID] = version - return commonVM.Shutdown() + return commonVM.Shutdown(ctx) } func (m *manager) ListFactories() ([]ids.ID, error) { - vmIDs := make([]ids.ID, 0, len(m.factories)) - for vmID := range m.factories { - vmIDs = append(vmIDs, vmID) - } - return vmIDs, nil + m.lock.RLock() + defer m.lock.RUnlock() + + return maps.Keys(m.factories), nil } func (m *manager) Versions() (map[string]string, error) { + m.lock.RLock() + defer m.lock.RUnlock() + versions := make(map[string]string, len(m.versions)) for vmID, version := range m.versions { alias, err := m.PrimaryAlias(vmID) diff --git a/avalanchego/vms/metervm/batched_vm.go b/avalanchego/vms/metervm/batched_vm.go index ddcc386c..dad17637 100644 --- a/avalanchego/vms/metervm/batched_vm.go +++ b/avalanchego/vms/metervm/batched_vm.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm import ( + "context" "time" "github.com/ava-labs/avalanchego/ids" @@ -12,17 +13,19 @@ import ( ) func (vm *blockVM) GetAncestors( + ctx context.Context, blkID ids.ID, maxBlocksNum int, maxBlocksSize int, maxBlocksRetrivalTime time.Duration, ) ([][]byte, error) { - if vm.bVM == nil { + if vm.batchedVM == nil { return nil, block.ErrRemoteVMNotImplemented } start := vm.clock.Time() - ancestors, err := vm.bVM.GetAncestors( + ancestors, err := vm.batchedVM.GetAncestors( + ctx, blkID, maxBlocksNum, maxBlocksSize, @@ -33,13 +36,13 @@ func (vm *blockVM) GetAncestors( return ancestors, err } -func (vm *blockVM) BatchedParseBlock(blks [][]byte) ([]snowman.Block, error) { - if vm.bVM == nil { +func (vm *blockVM) BatchedParseBlock(ctx context.Context, blks [][]byte) ([]snowman.Block, error) { + if vm.batchedVM == nil { return nil, block.ErrRemoteVMNotImplemented } start := vm.clock.Time() - blocks, err := vm.bVM.BatchedParseBlock(blks) + blocks, err := vm.batchedVM.BatchedParseBlock(ctx, blks) end := vm.clock.Time() vm.blockMetrics.batchedParseBlock.Observe(float64(end.Sub(start))) diff --git a/avalanchego/vms/metervm/block.go b/avalanchego/vms/metervm/block.go index ade13ca8..17ffffd5 100644 --- a/avalanchego/vms/metervm/block.go +++ b/avalanchego/vms/metervm/block.go @@ -1,13 +1,23 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm -import "github.com/ava-labs/avalanchego/snow/consensus/snowman" +import ( + "context" + "errors" + "fmt" + + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" +) var ( - _ snowman.Block = &meterBlock{} - _ snowman.OracleBlock = &meterBlock{} + _ snowman.Block = (*meterBlock)(nil) + _ snowman.OracleBlock = (*meterBlock)(nil) + _ block.WithVerifyContext = (*meterBlock)(nil) + + errExpectedBlockWithVerifyContext = errors.New("expected block.WithVerifyContext") ) type meterBlock struct { @@ -16,9 +26,9 @@ type meterBlock struct { vm *blockVM } -func (mb *meterBlock) Verify() error { +func (mb *meterBlock) Verify(ctx context.Context) error { start := mb.vm.clock.Time() - err := mb.Block.Verify() + err := mb.Block.Verify(ctx) end := mb.vm.clock.Time() duration := float64(end.Sub(start)) if err != nil { @@ -29,31 +39,31 @@ func (mb *meterBlock) Verify() error { return err } -func (mb *meterBlock) Accept() error { +func (mb *meterBlock) Accept(ctx context.Context) error { start := mb.vm.clock.Time() - err := mb.Block.Accept() + err := mb.Block.Accept(ctx) end := mb.vm.clock.Time() duration := float64(end.Sub(start)) mb.vm.blockMetrics.accept.Observe(duration) return err } -func (mb *meterBlock) Reject() error { +func (mb *meterBlock) Reject(ctx context.Context) error { start := mb.vm.clock.Time() - err := mb.Block.Reject() + err := mb.Block.Reject(ctx) end := mb.vm.clock.Time() duration := float64(end.Sub(start)) mb.vm.blockMetrics.reject.Observe(duration) return err } -func (mb *meterBlock) Options() ([2]snowman.Block, error) { +func (mb *meterBlock) Options(ctx context.Context) ([2]snowman.Block, error) { oracleBlock, ok := mb.Block.(snowman.OracleBlock) if !ok { return [2]snowman.Block{}, snowman.ErrNotOracle } - blks, err := oracleBlock.Options() + blks, err := oracleBlock.Options(ctx) if err != nil { return [2]snowman.Block{}, err } @@ -68,3 +78,35 @@ func (mb *meterBlock) Options() ([2]snowman.Block, error) { }, }, nil } + +func (mb *meterBlock) ShouldVerifyWithContext(ctx context.Context) (bool, error) { + blkWithCtx, ok := mb.Block.(block.WithVerifyContext) + if !ok { + return false, nil + } + + start := mb.vm.clock.Time() + shouldVerify, err := blkWithCtx.ShouldVerifyWithContext(ctx) + end := mb.vm.clock.Time() + duration := float64(end.Sub(start)) + mb.vm.blockMetrics.shouldVerifyWithContext.Observe(duration) + return shouldVerify, err +} + +func (mb *meterBlock) VerifyWithContext(ctx context.Context, blockCtx *block.Context) error { + blkWithCtx, ok := mb.Block.(block.WithVerifyContext) + if !ok { + return fmt.Errorf("%w but got %T", errExpectedBlockWithVerifyContext, mb.Block) + } + + start := mb.vm.clock.Time() + err := blkWithCtx.VerifyWithContext(ctx, blockCtx) + end := mb.vm.clock.Time() + duration := float64(end.Sub(start)) + if err != nil { + mb.vm.blockMetrics.verifyWithContextErr.Observe(duration) + } else { + mb.vm.verifyWithContext.Observe(duration) + } + return err +} diff --git a/avalanchego/vms/metervm/block_metrics.go b/avalanchego/vms/metervm/block_metrics.go index 514a5ea6..10233b72 100644 --- a/avalanchego/vms/metervm/block_metrics.go +++ b/avalanchego/vms/metervm/block_metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm @@ -23,6 +23,13 @@ type blockMetrics struct { verifyErr, accept, reject, + // Block verification with context metrics + shouldVerifyWithContext, + verifyWithContext, + verifyWithContextErr, + // Block building with context metrics + buildBlockWithContext, + buildBlockWithContextErr, // Batched metrics getAncestors, batchedParseBlock, @@ -40,6 +47,7 @@ type blockMetrics struct { } func (m *blockMetrics) Initialize( + supportsBlockBuildingWithContext bool, supportsBatchedFetching bool, supportsHeightIndexing bool, supportsStateSync bool, @@ -59,7 +67,14 @@ func (m *blockMetrics) Initialize( m.verifyErr = newAverager(namespace, "verify_err", reg, &errs) m.accept = newAverager(namespace, "accept", reg, &errs) m.reject = newAverager(namespace, "reject", reg, &errs) + m.shouldVerifyWithContext = newAverager(namespace, "should_verify_with_context", reg, &errs) + m.verifyWithContext = newAverager(namespace, "verify_with_context", reg, &errs) + m.verifyWithContextErr = newAverager(namespace, "verify_with_context_err", reg, &errs) + if supportsBlockBuildingWithContext { + m.buildBlockWithContext = newAverager(namespace, "build_block_with_context", reg, &errs) + m.buildBlockWithContextErr = newAverager(namespace, "build_block_with_context_err", reg, &errs) + } if supportsBatchedFetching { m.getAncestors = newAverager(namespace, "get_ancestors", reg, &errs) m.batchedParseBlock = newAverager(namespace, "batched_parse_block", reg, &errs) diff --git a/avalanchego/vms/metervm/block_vm.go b/avalanchego/vms/metervm/block_vm.go index 102e5437..62f2b4f8 100644 --- a/avalanchego/vms/metervm/block_vm.go +++ b/avalanchego/vms/metervm/block_vm.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm import ( + "context" + "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/api/metrics" @@ -17,36 +19,41 @@ import ( ) var ( - _ block.ChainVM = &blockVM{} - _ block.BatchedChainVM = &blockVM{} - _ block.HeightIndexedChainVM = &blockVM{} - _ block.StateSyncableVM = &blockVM{} + _ block.ChainVM = (*blockVM)(nil) + _ block.BuildBlockWithContextChainVM = (*blockVM)(nil) + _ block.BatchedChainVM = (*blockVM)(nil) + _ block.HeightIndexedChainVM = (*blockVM)(nil) + _ block.StateSyncableVM = (*blockVM)(nil) ) type blockVM struct { block.ChainVM - bVM block.BatchedChainVM - hVM block.HeightIndexedChainVM - ssVM block.StateSyncableVM + buildBlockVM block.BuildBlockWithContextChainVM + batchedVM block.BatchedChainVM + hVM block.HeightIndexedChainVM + ssVM block.StateSyncableVM blockMetrics clock mockable.Clock } func NewBlockVM(vm block.ChainVM) block.ChainVM { - bVM, _ := vm.(block.BatchedChainVM) + buildBlockVM, _ := vm.(block.BuildBlockWithContextChainVM) + batchedVM, _ := vm.(block.BatchedChainVM) hVM, _ := vm.(block.HeightIndexedChainVM) ssVM, _ := vm.(block.StateSyncableVM) return &blockVM{ - ChainVM: vm, - bVM: bVM, - hVM: hVM, - ssVM: ssVM, + ChainVM: vm, + buildBlockVM: buildBlockVM, + batchedVM: batchedVM, + hVM: hVM, + ssVM: ssVM, } } func (vm *blockVM) Initialize( - ctx *snow.Context, + ctx context.Context, + chainCtx *snow.Context, db manager.Manager, genesisBytes, upgradeBytes, @@ -57,7 +64,8 @@ func (vm *blockVM) Initialize( ) error { registerer := prometheus.NewRegistry() err := vm.blockMetrics.Initialize( - vm.bVM != nil, + vm.buildBlockVM != nil, + vm.batchedVM != nil, vm.hVM != nil, vm.ssVM != nil, "", @@ -75,17 +83,17 @@ func (vm *blockVM) Initialize( if err := multiGatherer.Register("", optionalGatherer); err != nil { return err } - if err := ctx.Metrics.Register(multiGatherer); err != nil { + if err := chainCtx.Metrics.Register(multiGatherer); err != nil { return err } - ctx.Metrics = optionalGatherer + chainCtx.Metrics = optionalGatherer - return vm.ChainVM.Initialize(ctx, db, genesisBytes, upgradeBytes, configBytes, toEngine, fxs, appSender) + return vm.ChainVM.Initialize(ctx, chainCtx, db, genesisBytes, upgradeBytes, configBytes, toEngine, fxs, appSender) } -func (vm *blockVM) BuildBlock() (snowman.Block, error) { +func (vm *blockVM) BuildBlock(ctx context.Context) (snowman.Block, error) { start := vm.clock.Time() - blk, err := vm.ChainVM.BuildBlock() + blk, err := vm.ChainVM.BuildBlock(ctx) end := vm.clock.Time() duration := float64(end.Sub(start)) if err != nil { @@ -99,9 +107,9 @@ func (vm *blockVM) BuildBlock() (snowman.Block, error) { }, nil } -func (vm *blockVM) ParseBlock(b []byte) (snowman.Block, error) { +func (vm *blockVM) ParseBlock(ctx context.Context, b []byte) (snowman.Block, error) { start := vm.clock.Time() - blk, err := vm.ChainVM.ParseBlock(b) + blk, err := vm.ChainVM.ParseBlock(ctx, b) end := vm.clock.Time() duration := float64(end.Sub(start)) if err != nil { @@ -115,9 +123,9 @@ func (vm *blockVM) ParseBlock(b []byte) (snowman.Block, error) { }, nil } -func (vm *blockVM) GetBlock(id ids.ID) (snowman.Block, error) { +func (vm *blockVM) GetBlock(ctx context.Context, id ids.ID) (snowman.Block, error) { start := vm.clock.Time() - blk, err := vm.ChainVM.GetBlock(id) + blk, err := vm.ChainVM.GetBlock(ctx, id) end := vm.clock.Time() duration := float64(end.Sub(start)) if err != nil { @@ -131,17 +139,17 @@ func (vm *blockVM) GetBlock(id ids.ID) (snowman.Block, error) { }, nil } -func (vm *blockVM) SetPreference(id ids.ID) error { +func (vm *blockVM) SetPreference(ctx context.Context, id ids.ID) error { start := vm.clock.Time() - err := vm.ChainVM.SetPreference(id) + err := vm.ChainVM.SetPreference(ctx, id) end := vm.clock.Time() vm.blockMetrics.setPreference.Observe(float64(end.Sub(start))) return err } -func (vm *blockVM) LastAccepted() (ids.ID, error) { +func (vm *blockVM) LastAccepted(ctx context.Context) (ids.ID, error) { start := vm.clock.Time() - lastAcceptedID, err := vm.ChainVM.LastAccepted() + lastAcceptedID, err := vm.ChainVM.LastAccepted(ctx) end := vm.clock.Time() vm.blockMetrics.lastAccepted.Observe(float64(end.Sub(start))) return lastAcceptedID, err diff --git a/avalanchego/vms/metervm/build_block_with_context_vm.go b/avalanchego/vms/metervm/build_block_with_context_vm.go new file mode 100644 index 00000000..141d68e0 --- /dev/null +++ b/avalanchego/vms/metervm/build_block_with_context_vm.go @@ -0,0 +1,31 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package metervm + +import ( + "context" + + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" +) + +func (vm *blockVM) BuildBlockWithContext(ctx context.Context, blockCtx *block.Context) (snowman.Block, error) { + if vm.buildBlockVM == nil { + return vm.BuildBlock(ctx) + } + + start := vm.clock.Time() + blk, err := vm.buildBlockVM.BuildBlockWithContext(ctx, blockCtx) + end := vm.clock.Time() + duration := float64(end.Sub(start)) + if err != nil { + vm.blockMetrics.buildBlockWithContextErr.Observe(duration) + return nil, err + } + vm.blockMetrics.buildBlockWithContext.Observe(duration) + return &meterBlock{ + Block: blk, + vm: vm, + }, nil +} diff --git a/avalanchego/vms/metervm/height_indexed_vm.go b/avalanchego/vms/metervm/height_indexed_vm.go index 2e96e023..f13c337c 100644 --- a/avalanchego/vms/metervm/height_indexed_vm.go +++ b/avalanchego/vms/metervm/height_indexed_vm.go @@ -1,32 +1,34 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm import ( + "context" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" ) -func (vm *blockVM) VerifyHeightIndex() error { +func (vm *blockVM) VerifyHeightIndex(ctx context.Context) error { if vm.hVM == nil { return block.ErrHeightIndexedVMNotImplemented } start := vm.clock.Time() - err := vm.hVM.VerifyHeightIndex() + err := vm.hVM.VerifyHeightIndex(ctx) end := vm.clock.Time() vm.blockMetrics.verifyHeightIndex.Observe(float64(end.Sub(start))) return err } -func (vm *blockVM) GetBlockIDAtHeight(height uint64) (ids.ID, error) { +func (vm *blockVM) GetBlockIDAtHeight(ctx context.Context, height uint64) (ids.ID, error) { if vm.hVM == nil { return ids.Empty, block.ErrHeightIndexedVMNotImplemented } start := vm.clock.Time() - blockID, err := vm.hVM.GetBlockIDAtHeight(height) + blockID, err := vm.hVM.GetBlockIDAtHeight(ctx, height) end := vm.clock.Time() vm.blockMetrics.getBlockIDAtHeight.Observe(float64(end.Sub(start))) return blockID, err diff --git a/avalanchego/vms/metervm/metrics.go b/avalanchego/vms/metervm/metrics.go index b5db8d75..eb2c2b40 100644 --- a/avalanchego/vms/metervm/metrics.go +++ b/avalanchego/vms/metervm/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm diff --git a/avalanchego/vms/metervm/state_syncable_vm.go b/avalanchego/vms/metervm/state_syncable_vm.go index 20522d1f..bcb27d68 100644 --- a/avalanchego/vms/metervm/state_syncable_vm.go +++ b/avalanchego/vms/metervm/state_syncable_vm.go @@ -1,55 +1,57 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm import ( + "context" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" ) -func (vm *blockVM) StateSyncEnabled() (bool, error) { +func (vm *blockVM) StateSyncEnabled(ctx context.Context) (bool, error) { if vm.ssVM == nil { return false, nil } start := vm.clock.Time() - enabled, err := vm.ssVM.StateSyncEnabled() + enabled, err := vm.ssVM.StateSyncEnabled(ctx) end := vm.clock.Time() vm.blockMetrics.stateSyncEnabled.Observe(float64(end.Sub(start))) return enabled, err } -func (vm *blockVM) GetOngoingSyncStateSummary() (block.StateSummary, error) { +func (vm *blockVM) GetOngoingSyncStateSummary(ctx context.Context) (block.StateSummary, error) { if vm.ssVM == nil { return nil, block.ErrStateSyncableVMNotImplemented } start := vm.clock.Time() - summary, err := vm.ssVM.GetOngoingSyncStateSummary() + summary, err := vm.ssVM.GetOngoingSyncStateSummary(ctx) end := vm.clock.Time() vm.blockMetrics.getOngoingSyncStateSummary.Observe(float64(end.Sub(start))) return summary, err } -func (vm *blockVM) GetLastStateSummary() (block.StateSummary, error) { +func (vm *blockVM) GetLastStateSummary(ctx context.Context) (block.StateSummary, error) { if vm.ssVM == nil { return nil, block.ErrStateSyncableVMNotImplemented } start := vm.clock.Time() - summary, err := vm.ssVM.GetLastStateSummary() + summary, err := vm.ssVM.GetLastStateSummary(ctx) end := vm.clock.Time() vm.blockMetrics.getLastStateSummary.Observe(float64(end.Sub(start))) return summary, err } -func (vm *blockVM) ParseStateSummary(summaryBytes []byte) (block.StateSummary, error) { +func (vm *blockVM) ParseStateSummary(ctx context.Context, summaryBytes []byte) (block.StateSummary, error) { if vm.ssVM == nil { return nil, block.ErrStateSyncableVMNotImplemented } start := vm.clock.Time() - summary, err := vm.ssVM.ParseStateSummary(summaryBytes) + summary, err := vm.ssVM.ParseStateSummary(ctx, summaryBytes) end := vm.clock.Time() duration := float64(end.Sub(start)) if err != nil { @@ -60,13 +62,13 @@ func (vm *blockVM) ParseStateSummary(summaryBytes []byte) (block.StateSummary, e return summary, nil } -func (vm *blockVM) GetStateSummary(height uint64) (block.StateSummary, error) { +func (vm *blockVM) GetStateSummary(ctx context.Context, height uint64) (block.StateSummary, error) { if vm.ssVM == nil { return nil, block.ErrStateSyncableVMNotImplemented } start := vm.clock.Time() - summary, err := vm.ssVM.GetStateSummary(height) + summary, err := vm.ssVM.GetStateSummary(ctx, height) end := vm.clock.Time() duration := float64(end.Sub(start)) if err != nil { diff --git a/avalanchego/vms/metervm/vertex_metrics.go b/avalanchego/vms/metervm/vertex_metrics.go index 07afc37b..e377dee2 100644 --- a/avalanchego/vms/metervm/vertex_metrics.go +++ b/avalanchego/vms/metervm/vertex_metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm diff --git a/avalanchego/vms/metervm/vertex_vm.go b/avalanchego/vms/metervm/vertex_vm.go index 59995e1e..08268beb 100644 --- a/avalanchego/vms/metervm/vertex_vm.go +++ b/avalanchego/vms/metervm/vertex_vm.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metervm import ( + "context" + "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/api/metrics" @@ -17,24 +19,25 @@ import ( ) var ( - _ vertex.DAGVM = &vertexVM{} - _ snowstorm.Tx = &meterTx{} + _ vertex.LinearizableVMWithEngine = (*vertexVM)(nil) + _ snowstorm.Tx = (*meterTx)(nil) ) -func NewVertexVM(vm vertex.DAGVM) vertex.DAGVM { +func NewVertexVM(vm vertex.LinearizableVMWithEngine) vertex.LinearizableVMWithEngine { return &vertexVM{ - DAGVM: vm, + LinearizableVMWithEngine: vm, } } type vertexVM struct { - vertex.DAGVM + vertex.LinearizableVMWithEngine vertexMetrics clock mockable.Clock } func (vm *vertexVM) Initialize( - ctx *snow.Context, + ctx context.Context, + chainCtx *snow.Context, db manager.Manager, genesisBytes, upgradeBytes, @@ -56,25 +59,35 @@ func (vm *vertexVM) Initialize( if err := multiGatherer.Register("", optionalGatherer); err != nil { return err } - if err := ctx.Metrics.Register(multiGatherer); err != nil { + if err := chainCtx.Metrics.Register(multiGatherer); err != nil { return err } - ctx.Metrics = optionalGatherer - - return vm.DAGVM.Initialize(ctx, db, genesisBytes, upgradeBytes, configBytes, toEngine, fxs, appSender) + chainCtx.Metrics = optionalGatherer + + return vm.LinearizableVMWithEngine.Initialize( + ctx, + chainCtx, + db, + genesisBytes, + upgradeBytes, + configBytes, + toEngine, + fxs, + appSender, + ) } -func (vm *vertexVM) PendingTxs() []snowstorm.Tx { +func (vm *vertexVM) PendingTxs(ctx context.Context) []snowstorm.Tx { start := vm.clock.Time() - txs := vm.DAGVM.PendingTxs() + txs := vm.LinearizableVMWithEngine.PendingTxs(ctx) end := vm.clock.Time() vm.vertexMetrics.pending.Observe(float64(end.Sub(start))) return txs } -func (vm *vertexVM) ParseTx(b []byte) (snowstorm.Tx, error) { +func (vm *vertexVM) ParseTx(ctx context.Context, b []byte) (snowstorm.Tx, error) { start := vm.clock.Time() - tx, err := vm.DAGVM.ParseTx(b) + tx, err := vm.LinearizableVMWithEngine.ParseTx(ctx, b) end := vm.clock.Time() duration := float64(end.Sub(start)) if err != nil { @@ -88,9 +101,9 @@ func (vm *vertexVM) ParseTx(b []byte) (snowstorm.Tx, error) { }, nil } -func (vm *vertexVM) GetTx(txID ids.ID) (snowstorm.Tx, error) { +func (vm *vertexVM) GetTx(ctx context.Context, txID ids.ID) (snowstorm.Tx, error) { start := vm.clock.Time() - tx, err := vm.DAGVM.GetTx(txID) + tx, err := vm.LinearizableVMWithEngine.GetTx(ctx, txID) end := vm.clock.Time() duration := float64(end.Sub(start)) if err != nil { @@ -110,9 +123,9 @@ type meterTx struct { vm *vertexVM } -func (mtx *meterTx) Verify() error { +func (mtx *meterTx) Verify(ctx context.Context) error { start := mtx.vm.clock.Time() - err := mtx.Tx.Verify() + err := mtx.Tx.Verify(ctx) end := mtx.vm.clock.Time() duration := float64(end.Sub(start)) if err != nil { @@ -123,17 +136,17 @@ func (mtx *meterTx) Verify() error { return err } -func (mtx *meterTx) Accept() error { +func (mtx *meterTx) Accept(ctx context.Context) error { start := mtx.vm.clock.Time() - err := mtx.Tx.Accept() + err := mtx.Tx.Accept(ctx) end := mtx.vm.clock.Time() mtx.vm.vertexMetrics.accept.Observe(float64(end.Sub(start))) return err } -func (mtx *meterTx) Reject() error { +func (mtx *meterTx) Reject(ctx context.Context) error { start := mtx.vm.clock.Time() - err := mtx.Tx.Reject() + err := mtx.Tx.Reject(ctx) end := mtx.vm.clock.Time() mtx.vm.vertexMetrics.reject.Observe(float64(end.Sub(start))) return err diff --git a/avalanchego/vms/mock_manager.go b/avalanchego/vms/mock_manager.go index 536c872a..021ca596 100644 --- a/avalanchego/vms/mock_manager.go +++ b/avalanchego/vms/mock_manager.go @@ -1,14 +1,18 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. -// Source: vms/manager.go +// Source: github.com/ava-labs/avalanchego/vms (interfaces: Factory,Manager) // Package vms is a generated GoMock package. package vms import ( + context "context" reflect "reflect" ids "github.com/ava-labs/avalanchego/ids" - snow "github.com/ava-labs/avalanchego/snow" + logging "github.com/ava-labs/avalanchego/utils/logging" gomock "github.com/golang/mock/gomock" ) @@ -36,7 +40,7 @@ func (m *MockFactory) EXPECT() *MockFactoryMockRecorder { } // New mocks base method. -func (m *MockFactory) New(arg0 *snow.Context) (interface{}, error) { +func (m *MockFactory) New(arg0 logging.Logger) (interface{}, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "New", arg0) ret0, _ := ret[0].(interface{}) @@ -74,47 +78,47 @@ func (m *MockManager) EXPECT() *MockManagerMockRecorder { } // Alias mocks base method. -func (m *MockManager) Alias(id ids.ID, alias string) error { +func (m *MockManager) Alias(arg0 ids.ID, arg1 string) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Alias", id, alias) + ret := m.ctrl.Call(m, "Alias", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } // Alias indicates an expected call of Alias. -func (mr *MockManagerMockRecorder) Alias(id, alias interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) Alias(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Alias", reflect.TypeOf((*MockManager)(nil).Alias), id, alias) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Alias", reflect.TypeOf((*MockManager)(nil).Alias), arg0, arg1) } // Aliases mocks base method. -func (m *MockManager) Aliases(id ids.ID) ([]string, error) { +func (m *MockManager) Aliases(arg0 ids.ID) ([]string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Aliases", id) + ret := m.ctrl.Call(m, "Aliases", arg0) ret0, _ := ret[0].([]string) ret1, _ := ret[1].(error) return ret0, ret1 } // Aliases indicates an expected call of Aliases. -func (mr *MockManagerMockRecorder) Aliases(id interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) Aliases(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Aliases", reflect.TypeOf((*MockManager)(nil).Aliases), id) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Aliases", reflect.TypeOf((*MockManager)(nil).Aliases), arg0) } // GetFactory mocks base method. -func (m *MockManager) GetFactory(vmID ids.ID) (Factory, error) { +func (m *MockManager) GetFactory(arg0 ids.ID) (Factory, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetFactory", vmID) + ret := m.ctrl.Call(m, "GetFactory", arg0) ret0, _ := ret[0].(Factory) ret1, _ := ret[1].(error) return ret0, ret1 } // GetFactory indicates an expected call of GetFactory. -func (mr *MockManagerMockRecorder) GetFactory(vmID interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) GetFactory(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFactory", reflect.TypeOf((*MockManager)(nil).GetFactory), vmID) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetFactory", reflect.TypeOf((*MockManager)(nil).GetFactory), arg0) } // ListFactories mocks base method. @@ -133,73 +137,73 @@ func (mr *MockManagerMockRecorder) ListFactories() *gomock.Call { } // Lookup mocks base method. -func (m *MockManager) Lookup(alias string) (ids.ID, error) { +func (m *MockManager) Lookup(arg0 string) (ids.ID, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Lookup", alias) + ret := m.ctrl.Call(m, "Lookup", arg0) ret0, _ := ret[0].(ids.ID) ret1, _ := ret[1].(error) return ret0, ret1 } // Lookup indicates an expected call of Lookup. -func (mr *MockManagerMockRecorder) Lookup(alias interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) Lookup(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Lookup", reflect.TypeOf((*MockManager)(nil).Lookup), alias) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Lookup", reflect.TypeOf((*MockManager)(nil).Lookup), arg0) } // PrimaryAlias mocks base method. -func (m *MockManager) PrimaryAlias(id ids.ID) (string, error) { +func (m *MockManager) PrimaryAlias(arg0 ids.ID) (string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PrimaryAlias", id) + ret := m.ctrl.Call(m, "PrimaryAlias", arg0) ret0, _ := ret[0].(string) ret1, _ := ret[1].(error) return ret0, ret1 } // PrimaryAlias indicates an expected call of PrimaryAlias. -func (mr *MockManagerMockRecorder) PrimaryAlias(id interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) PrimaryAlias(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrimaryAlias", reflect.TypeOf((*MockManager)(nil).PrimaryAlias), id) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrimaryAlias", reflect.TypeOf((*MockManager)(nil).PrimaryAlias), arg0) } // PrimaryAliasOrDefault mocks base method. -func (m *MockManager) PrimaryAliasOrDefault(id ids.ID) string { +func (m *MockManager) PrimaryAliasOrDefault(arg0 ids.ID) string { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PrimaryAliasOrDefault", id) + ret := m.ctrl.Call(m, "PrimaryAliasOrDefault", arg0) ret0, _ := ret[0].(string) return ret0 } // PrimaryAliasOrDefault indicates an expected call of PrimaryAliasOrDefault. -func (mr *MockManagerMockRecorder) PrimaryAliasOrDefault(id interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) PrimaryAliasOrDefault(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrimaryAliasOrDefault", reflect.TypeOf((*MockManager)(nil).PrimaryAliasOrDefault), id) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PrimaryAliasOrDefault", reflect.TypeOf((*MockManager)(nil).PrimaryAliasOrDefault), arg0) } // RegisterFactory mocks base method. -func (m *MockManager) RegisterFactory(vmID ids.ID, factory Factory) error { +func (m *MockManager) RegisterFactory(arg0 context.Context, arg1 ids.ID, arg2 Factory) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RegisterFactory", vmID, factory) + ret := m.ctrl.Call(m, "RegisterFactory", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // RegisterFactory indicates an expected call of RegisterFactory. -func (mr *MockManagerMockRecorder) RegisterFactory(vmID, factory interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) RegisterFactory(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterFactory", reflect.TypeOf((*MockManager)(nil).RegisterFactory), vmID, factory) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterFactory", reflect.TypeOf((*MockManager)(nil).RegisterFactory), arg0, arg1, arg2) } // RemoveAliases mocks base method. -func (m *MockManager) RemoveAliases(id ids.ID) { +func (m *MockManager) RemoveAliases(arg0 ids.ID) { m.ctrl.T.Helper() - m.ctrl.Call(m, "RemoveAliases", id) + m.ctrl.Call(m, "RemoveAliases", arg0) } // RemoveAliases indicates an expected call of RemoveAliases. -func (mr *MockManagerMockRecorder) RemoveAliases(id interface{}) *gomock.Call { +func (mr *MockManagerMockRecorder) RemoveAliases(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveAliases", reflect.TypeOf((*MockManager)(nil).RemoveAliases), id) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveAliases", reflect.TypeOf((*MockManager)(nil).RemoveAliases), arg0) } // Versions mocks base method. diff --git a/avalanchego/vms/nftfx/credential.go b/avalanchego/vms/nftfx/credential.go index e98c7915..56f3ec0b 100644 --- a/avalanchego/vms/nftfx/credential.go +++ b/avalanchego/vms/nftfx/credential.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx diff --git a/avalanchego/vms/nftfx/credential_test.go b/avalanchego/vms/nftfx/credential_test.go index 73e03413..e27d441b 100644 --- a/avalanchego/vms/nftfx/credential_test.go +++ b/avalanchego/vms/nftfx/credential_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx diff --git a/avalanchego/vms/nftfx/factory.go b/avalanchego/vms/nftfx/factory.go index ddcd585b..e52d629f 100644 --- a/avalanchego/vms/nftfx/factory.go +++ b/avalanchego/vms/nftfx/factory.go @@ -1,16 +1,16 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms" ) var ( - _ vms.Factory = &Factory{} + _ vms.Factory = (*Factory)(nil) // ID that this Fx uses when labeled ID = ids.ID{'n', 'f', 't', 'f', 'x'} @@ -18,4 +18,6 @@ var ( type Factory struct{} -func (f *Factory) New(*snow.Context) (interface{}, error) { return &Fx{}, nil } +func (*Factory) New(logging.Logger) (interface{}, error) { + return &Fx{}, nil +} diff --git a/avalanchego/vms/nftfx/factory_test.go b/avalanchego/vms/nftfx/factory_test.go index 82dbafa7..83aa31d1 100644 --- a/avalanchego/vms/nftfx/factory_test.go +++ b/avalanchego/vms/nftfx/factory_test.go @@ -1,15 +1,17 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx import ( "testing" + + "github.com/ava-labs/avalanchego/utils/logging" ) func TestFactory(t *testing.T) { factory := Factory{} - if fx, err := factory.New(nil); err != nil { + if fx, err := factory.New(logging.NoLog{}); err != nil { t.Fatal(err) } else if fx == nil { t.Fatalf("Factory.New returned nil") diff --git a/avalanchego/vms/nftfx/fx.go b/avalanchego/vms/nftfx/fx.go index d61ee231..d11e47e4 100644 --- a/avalanchego/vms/nftfx/fx.go +++ b/avalanchego/vms/nftfx/fx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx @@ -107,4 +107,6 @@ func (fx *Fx) VerifyTransferOperation(tx secp256k1fx.UnsignedTx, op *TransferOpe } } -func (fx *Fx) VerifyTransfer(_, _, _, _ interface{}) error { return errCantTransfer } +func (*Fx) VerifyTransfer(_, _, _, _ interface{}) error { + return errCantTransfer +} diff --git a/avalanchego/vms/nftfx/fx_test.go b/avalanchego/vms/nftfx/fx_test.go index 22cae043..cb464dd1 100644 --- a/avalanchego/vms/nftfx/fx_test.go +++ b/avalanchego/vms/nftfx/fx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx @@ -9,7 +9,7 @@ import ( "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -17,7 +17,7 @@ import ( var ( txBytes = []byte{0, 1, 2, 3, 4, 5} - sigBytes = [crypto.SECP256K1RSigLen]byte{ + sigBytes = [secp256k1.SignatureLen]byte{ 0x0e, 0x33, 0x4e, 0xbc, 0x67, 0xa7, 0x3f, 0xe8, 0x24, 0x33, 0xac, 0xa3, 0x47, 0x88, 0xa6, 0x3d, 0x58, 0xe5, 0x8e, 0xf0, 0x3a, 0xd5, 0x84, 0xf1, @@ -61,7 +61,7 @@ func TestFxVerifyMintOperation(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { @@ -71,7 +71,7 @@ func TestFxVerifyMintOperation(t *testing.T) { UnsignedBytes: txBytes, } cred := &Credential{Credential: secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, }} @@ -99,14 +99,14 @@ func TestFxVerifyMintOperationWrongTx(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { t.Fatal(err) } cred := &Credential{Credential: secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, }} @@ -134,7 +134,7 @@ func TestFxVerifyMintOperationWrongNumberUTXOs(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { @@ -144,7 +144,7 @@ func TestFxVerifyMintOperationWrongNumberUTXOs(t *testing.T) { UnsignedBytes: txBytes, } cred := &Credential{Credential: secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, }} @@ -166,7 +166,7 @@ func TestFxVerifyMintOperationWrongCredential(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { @@ -199,7 +199,7 @@ func TestFxVerifyMintOperationInvalidUTXO(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { @@ -209,7 +209,7 @@ func TestFxVerifyMintOperationInvalidUTXO(t *testing.T) { UnsignedBytes: txBytes, } cred := &Credential{Credential: secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, }} @@ -231,7 +231,7 @@ func TestFxVerifyMintOperationFailingVerification(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { @@ -241,7 +241,7 @@ func TestFxVerifyMintOperationFailingVerification(t *testing.T) { UnsignedBytes: txBytes, } cred := &Credential{Credential: secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, }} @@ -270,7 +270,7 @@ func TestFxVerifyMintOperationInvalidGroupID(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { @@ -280,7 +280,7 @@ func TestFxVerifyMintOperationInvalidGroupID(t *testing.T) { UnsignedBytes: txBytes, } cred := &Credential{Credential: secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, }} @@ -309,7 +309,7 @@ func TestFxVerifyTransferOperation(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { @@ -319,7 +319,7 @@ func TestFxVerifyTransferOperation(t *testing.T) { UnsignedBytes: txBytes, } cred := &Credential{Credential: secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, }} @@ -361,7 +361,7 @@ func TestFxVerifyTransferOperationWrongUTXO(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { @@ -371,7 +371,7 @@ func TestFxVerifyTransferOperationWrongUTXO(t *testing.T) { UnsignedBytes: txBytes, } cred := &Credential{Credential: secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, }} @@ -403,7 +403,7 @@ func TestFxVerifyTransferOperationFailedVerify(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { @@ -413,7 +413,7 @@ func TestFxVerifyTransferOperationFailedVerify(t *testing.T) { UnsignedBytes: txBytes, } cred := &Credential{Credential: secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, }} @@ -452,7 +452,7 @@ func TestFxVerifyTransferOperationWrongGroupID(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { @@ -462,7 +462,7 @@ func TestFxVerifyTransferOperationWrongGroupID(t *testing.T) { UnsignedBytes: txBytes, } cred := &Credential{Credential: secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, }} @@ -504,7 +504,7 @@ func TestFxVerifyTransferOperationWrongBytes(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { @@ -514,7 +514,7 @@ func TestFxVerifyTransferOperationWrongBytes(t *testing.T) { UnsignedBytes: txBytes, } cred := &Credential{Credential: secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, }} @@ -556,7 +556,7 @@ func TestFxVerifyTransferOperationTooSoon(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { @@ -566,7 +566,7 @@ func TestFxVerifyTransferOperationTooSoon(t *testing.T) { UnsignedBytes: txBytes, } cred := &Credential{Credential: secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, }} @@ -574,7 +574,7 @@ func TestFxVerifyTransferOperationTooSoon(t *testing.T) { GroupID: 1, Payload: []byte{2}, OutputOwners: secp256k1fx.OutputOwners{ - Locktime: vm.CLK.Unix() + 1, + Locktime: vm.Clk.Unix() + 1, Threshold: 1, Addrs: []ids.ShortID{ addr, @@ -609,7 +609,7 @@ func TestFxVerifyOperationUnknownOperation(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { @@ -619,7 +619,7 @@ func TestFxVerifyOperationUnknownOperation(t *testing.T) { UnsignedBytes: txBytes, } cred := &Credential{Credential: secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, }} @@ -646,7 +646,7 @@ func TestFxVerifyTransfer(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { diff --git a/avalanchego/vms/nftfx/mint_operation.go b/avalanchego/vms/nftfx/mint_operation.go index 0616c475..2d1c5bbb 100644 --- a/avalanchego/vms/nftfx/mint_operation.go +++ b/avalanchego/vms/nftfx/mint_operation.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx diff --git a/avalanchego/vms/nftfx/mint_operation_test.go b/avalanchego/vms/nftfx/mint_operation_test.go index da9576dd..d462885e 100644 --- a/avalanchego/vms/nftfx/mint_operation_test.go +++ b/avalanchego/vms/nftfx/mint_operation_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx diff --git a/avalanchego/vms/nftfx/mint_output.go b/avalanchego/vms/nftfx/mint_output.go index 93c849ac..fe91a1b4 100644 --- a/avalanchego/vms/nftfx/mint_output.go +++ b/avalanchego/vms/nftfx/mint_output.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx @@ -16,7 +16,7 @@ type MintOutput struct { // MarshalJSON marshals Amt and the embedded OutputOwners struct // into a JSON readable format -// If OutputOwners cannot be serialised then this will return error +// If OutputOwners cannot be serialized then this will return error func (out *MintOutput) MarshalJSON() ([]byte, error) { result, err := out.OutputOwners.Fields() if err != nil { diff --git a/avalanchego/vms/nftfx/mint_output_test.go b/avalanchego/vms/nftfx/mint_output_test.go index aae77829..c33ede0e 100644 --- a/avalanchego/vms/nftfx/mint_output_test.go +++ b/avalanchego/vms/nftfx/mint_output_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx diff --git a/avalanchego/vms/nftfx/transfer_operation.go b/avalanchego/vms/nftfx/transfer_operation.go index f4b8e41b..010d4389 100644 --- a/avalanchego/vms/nftfx/transfer_operation.go +++ b/avalanchego/vms/nftfx/transfer_operation.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx diff --git a/avalanchego/vms/nftfx/transfer_operation_test.go b/avalanchego/vms/nftfx/transfer_operation_test.go index d6abc186..15d39501 100644 --- a/avalanchego/vms/nftfx/transfer_operation_test.go +++ b/avalanchego/vms/nftfx/transfer_operation_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx diff --git a/avalanchego/vms/nftfx/transfer_output.go b/avalanchego/vms/nftfx/transfer_output.go index a4adb5ed..2d4182ac 100644 --- a/avalanchego/vms/nftfx/transfer_output.go +++ b/avalanchego/vms/nftfx/transfer_output.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx @@ -22,7 +22,7 @@ const ( var ( errNilTransferOutput = errors.New("nil transfer output") errPayloadTooLarge = errors.New("payload too large") - _ verify.State = &TransferOutput{} + _ verify.State = (*TransferOutput)(nil) ) type TransferOutput struct { @@ -33,7 +33,7 @@ type TransferOutput struct { // MarshalJSON marshals Amt and the embedded OutputOwners struct // into a JSON readable format -// If OutputOwners cannot be serialised then this will return error +// If OutputOwners cannot be serialized then this will return error func (out *TransferOutput) MarshalJSON() ([]byte, error) { result, err := out.OutputOwners.Fields() if err != nil { @@ -56,4 +56,6 @@ func (out *TransferOutput) Verify() error { } } -func (out *TransferOutput) VerifyState() error { return out.Verify() } +func (out *TransferOutput) VerifyState() error { + return out.Verify() +} diff --git a/avalanchego/vms/nftfx/transfer_output_test.go b/avalanchego/vms/nftfx/transfer_output_test.go index 954ba195..a95a7467 100644 --- a/avalanchego/vms/nftfx/transfer_output_test.go +++ b/avalanchego/vms/nftfx/transfer_output_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package nftfx diff --git a/avalanchego/vms/platformvm/api/static_client.go b/avalanchego/vms/platformvm/api/static_client.go index 9148d9a5..9dea3666 100644 --- a/avalanchego/vms/platformvm/api/static_client.go +++ b/avalanchego/vms/platformvm/api/static_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package api @@ -9,7 +9,7 @@ import ( "github.com/ava-labs/avalanchego/utils/rpc" ) -var _ StaticClient = &staticClient{} +var _ StaticClient = (*staticClient)(nil) // StaticClient for interacting with the platformvm static api type StaticClient interface { @@ -29,8 +29,7 @@ type staticClient struct { // NewClient returns a platformvm client for interacting with the platformvm static api func NewStaticClient(uri string) StaticClient { return &staticClient{requester: rpc.NewEndpointRequester( - uri+"/ext/vm/platform", - "platform", + uri + "/ext/vm/platform", )} } @@ -40,6 +39,6 @@ func (c *staticClient) BuildGenesis( options ...rpc.Option, ) (resp *BuildGenesisReply, err error) { resp = &BuildGenesisReply{} - err = c.requester.SendRequest(ctx, "buildGenesis", args, resp, options...) + err = c.requester.SendRequest(ctx, "platform.buildGenesis", args, resp, options...) return resp, err } diff --git a/avalanchego/vms/platformvm/api/static_service.go b/avalanchego/vms/platformvm/api/static_service.go index 1f9ba9ac..9067d6b8 100644 --- a/avalanchego/vms/platformvm/api/static_service.go +++ b/avalanchego/vms/platformvm/api/static_service.go @@ -1,26 +1,25 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package api import ( - "bytes" "errors" "fmt" "net/http" - "sort" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/genesis" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/txs/txheap" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -34,6 +33,8 @@ var ( errUTXOHasNoValue = errors.New("genesis UTXO has no value") errValidatorAddsNoValue = errors.New("validator would have already unstaked") errStakeOverflow = errors.New("validator stake exceeds limit") + + _ utils.Sortable[UTXO] = UTXO{} ) // StaticService defines the static API methods exposed by the platform VM @@ -47,6 +48,33 @@ type UTXO struct { Message string `json:"message"` } +// TODO can we define this on *UTXO? +func (utxo UTXO) Less(other UTXO) bool { + if utxo.Locktime < other.Locktime { + return true + } else if utxo.Locktime > other.Locktime { + return false + } + + if utxo.Amount < other.Amount { + return true + } else if utxo.Amount > other.Amount { + return false + } + + utxoAddr, err := bech32ToID(utxo.Address) + if err != nil { + return false + } + + otherAddr, err := bech32ToID(other.Address) + if err != nil { + return false + } + + return utxoAddr.Less(otherAddr) +} + // TODO: Refactor APIStaker, APIValidators and merge them together for // PermissionedValidators + PermissionlessValidators. @@ -56,13 +84,17 @@ type UTXO struct { // [StartTime] is the Unix time when they start staking // [Endtime] is the Unix time repr. of when they are done staking // [NodeID] is the node ID of the staker +// [Uptime] is the observed uptime of this staker type Staker struct { - TxID ids.ID `json:"txID"` - StartTime json.Uint64 `json:"startTime"` - EndTime json.Uint64 `json:"endTime"` - Weight *json.Uint64 `json:"weight,omitempty"` + TxID ids.ID `json:"txID"` + StartTime json.Uint64 `json:"startTime"` + EndTime json.Uint64 `json:"endTime"` + Weight json.Uint64 `json:"weight"` + NodeID ids.NodeID `json:"nodeID"` + + // Deprecated: Use Weight instead + // TODO: remove [StakeAmount] after enough time for dependencies to update StakeAmount *json.Uint64 `json:"stakeAmount,omitempty"` - NodeID ids.NodeID `json:"nodeID"` } // Owner is the repr. of a reward owner sent over APIs. @@ -83,22 +115,28 @@ type PermissionlessValidator struct { ValidationRewardOwner *Owner `json:"validationRewardOwner,omitempty"` // The owner of the rewards from delegations during the validation period, // if applicable. - DelegationRewardOwner *Owner `json:"delegationRewardOwner,omitempty"` - PotentialReward *json.Uint64 `json:"potentialReward,omitempty"` - DelegationFee json.Float32 `json:"delegationFee"` - ExactDelegationFee *json.Uint32 `json:"exactDelegationFee,omitempty"` - Uptime *json.Float32 `json:"uptime,omitempty"` - Connected bool `json:"connected"` - Staked []UTXO `json:"staked,omitempty"` + DelegationRewardOwner *Owner `json:"delegationRewardOwner,omitempty"` + PotentialReward *json.Uint64 `json:"potentialReward,omitempty"` + AccruedDelegateeReward *json.Uint64 `json:"accruedDelegateeReward,omitempty"` + DelegationFee json.Float32 `json:"delegationFee"` + ExactDelegationFee *json.Uint32 `json:"exactDelegationFee,omitempty"` + Uptime *json.Float32 `json:"uptime,omitempty"` + Connected bool `json:"connected"` + Staked []UTXO `json:"staked,omitempty"` + Signer *signer.ProofOfPossession `json:"signer,omitempty"` + // The delegators delegating to this validator - Delegators []PrimaryDelegator `json:"delegators"` + DelegatorCount *json.Uint64 `json:"delegatorCount,omitempty"` + DelegatorWeight *json.Uint64 `json:"delegatorWeight,omitempty"` + Delegators *[]PrimaryDelegator `json:"delegators,omitempty"` } // PermissionedValidator is the repr. of a permissioned validator sent over APIs. type PermissionedValidator struct { Staker // The owner the staking reward, if applicable, will go to - Connected bool `json:"connected"` + Connected bool `json:"connected"` + Uptime *json.Float32 `json:"uptime,omitempty"` } // PrimaryDelegator is the repr. of a primary network delegator sent over APIs. @@ -108,17 +146,6 @@ type PrimaryDelegator struct { PotentialReward *json.Uint64 `json:"potentialReward,omitempty"` } -func (v *Staker) GetWeight() uint64 { - switch { - case v.Weight != nil: - return uint64(*v.Weight) - case v.StakeAmount != nil: - return uint64(*v.StakeAmount) - default: - return 0 - } -} - // Chain defines a chain that exists // at the network's genesis. // [GenesisData] is the initial state of the chain. @@ -169,7 +196,7 @@ func bech32ToID(addrStr string) (ids.ShortID, error) { } // BuildGenesis build the genesis state of the Platform Chain (and thereby the Avalanche network.) -func (ss *StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, reply *BuildGenesisReply) error { +func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, reply *BuildGenesisReply) error { // Specify the UTXOs on the Platform chain that exist at genesis. utxos := make([]*genesis.UTXO, 0, len(args.UTXOs)) for i, apiUTXO := range args.UTXOs { @@ -217,7 +244,7 @@ func (ss *StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, r for _, vdr := range args.Validators { weight := uint64(0) stake := make([]*avax.TransferableOutput, len(vdr.Staked)) - sortUTXOs(vdr.Staked) + utils.Sort(vdr.Staked) for i, apiUTXO := range vdr.Staked { addrID, err := bech32ToID(apiUTXO.Address) if err != nil { @@ -268,7 +295,7 @@ func (ss *StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, r } owner.Addrs = append(owner.Addrs, addrID) } - ids.SortShortIDs(owner.Addrs) + utils.Sort(owner.Addrs) delegationFee := uint32(0) if vdr.ExactDelegationFee != nil { @@ -280,7 +307,7 @@ func (ss *StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, r NetworkID: uint32(args.NetworkID), BlockchainID: ids.Empty, }}, - Validator: validator.Validator{ + Validator: txs.Validator{ NodeID: vdr.NodeID, Start: uint64(args.Time), End: uint64(vdr.EndTime), @@ -290,7 +317,7 @@ func (ss *StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, r RewardsOwner: owner, DelegationShares: delegationFee, }} - if err := tx.Sign(txs.GenesisCodec, nil); err != nil { + if err := tx.Initialize(txs.GenesisCodec); err != nil { return err } @@ -316,7 +343,7 @@ func (ss *StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, r GenesisData: genesisBytes, SubnetAuth: &secp256k1fx.Input{}, }} - if err := tx.Sign(txs.GenesisCodec, nil); err != nil { + if err := tx.Initialize(txs.GenesisCodec); err != nil { return err } @@ -347,36 +374,3 @@ func (ss *StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, r reply.Encoding = args.Encoding return nil } - -type innerSortUTXO []UTXO - -func (s innerSortUTXO) Less(i, j int) bool { - if s[i].Locktime < s[j].Locktime { - return true - } else if s[i].Locktime > s[j].Locktime { - return false - } - - if s[i].Amount < s[j].Amount { - return true - } else if s[i].Amount > s[j].Amount { - return false - } - - iAddrID, err := bech32ToID(s[i].Address) - if err != nil { - return false - } - - jAddrID, err := bech32ToID(s[j].Address) - if err != nil { - return false - } - - return bytes.Compare(iAddrID.Bytes(), jAddrID.Bytes()) == -1 -} - -func (s innerSortUTXO) Len() int { return len(s) } -func (s innerSortUTXO) Swap(i, j int) { s[j], s[i] = s[i], s[j] } - -func sortUTXOs(utxos []UTXO) { sort.Sort(innerSortUTXO(utxos)) } diff --git a/avalanchego/vms/platformvm/api/static_service_test.go b/avalanchego/vms/platformvm/api/static_service_test.go index d90735ce..e11c1a29 100644 --- a/avalanchego/vms/platformvm/api/static_service_test.go +++ b/avalanchego/vms/platformvm/api/static_service_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package api @@ -6,6 +6,8 @@ package api import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/formatting" @@ -17,12 +19,12 @@ import ( const testNetworkID = 10 // To be used in tests func TestBuildGenesisInvalidUTXOBalance(t *testing.T) { + require := require.New(t) nodeID := ids.NodeID{1, 2, 3} hrp := constants.NetworkIDToHRP[testNetworkID] addr, err := address.FormatBech32(hrp, nodeID.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + utxo := UTXO{ Address: addr, Amount: 0, @@ -31,7 +33,7 @@ func TestBuildGenesisInvalidUTXOBalance(t *testing.T) { validator := PermissionlessValidator{ Staker: Staker{ EndTime: 15, - Weight: &weight, + Weight: weight, NodeID: nodeID, }, RewardOwner: &Owner{ @@ -57,18 +59,16 @@ func TestBuildGenesisInvalidUTXOBalance(t *testing.T) { reply := BuildGenesisReply{} ss := StaticService{} - if err := ss.BuildGenesis(nil, &args, &reply); err == nil { - t.Fatalf("Should have errored due to an invalid balance") - } + require.Error(ss.BuildGenesis(nil, &args, &reply), "should have errored due to an invalid balance") } func TestBuildGenesisInvalidAmount(t *testing.T) { + require := require.New(t) nodeID := ids.NodeID{1, 2, 3} hrp := constants.NetworkIDToHRP[testNetworkID] addr, err := address.FormatBech32(hrp, nodeID.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + utxo := UTXO{ Address: addr, Amount: 123456789, @@ -103,18 +103,16 @@ func TestBuildGenesisInvalidAmount(t *testing.T) { reply := BuildGenesisReply{} ss := StaticService{} - if err := ss.BuildGenesis(nil, &args, &reply); err == nil { - t.Fatalf("Should have errored due to an invalid amount") - } + require.Error(ss.BuildGenesis(nil, &args, &reply), "should have errored due to an invalid amount") } func TestBuildGenesisInvalidEndtime(t *testing.T) { + require := require.New(t) nodeID := ids.NodeID{1, 2, 3} hrp := constants.NetworkIDToHRP[testNetworkID] addr, err := address.FormatBech32(hrp, nodeID.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + utxo := UTXO{ Address: addr, Amount: 123456789, @@ -150,18 +148,16 @@ func TestBuildGenesisInvalidEndtime(t *testing.T) { reply := BuildGenesisReply{} ss := StaticService{} - if err := ss.BuildGenesis(nil, &args, &reply); err == nil { - t.Fatalf("Should have errored due to an invalid end time") - } + require.Error(ss.BuildGenesis(nil, &args, &reply), "should have errored due to an invalid end time") } func TestBuildGenesisReturnsSortedValidators(t *testing.T) { + require := require.New(t) nodeID := ids.NodeID{1} hrp := constants.NetworkIDToHRP[testNetworkID] addr, err := address.FormatBech32(hrp, nodeID.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + utxo := UTXO{ Address: addr, Amount: 123456789, @@ -232,21 +228,101 @@ func TestBuildGenesisReturnsSortedValidators(t *testing.T) { reply := BuildGenesisReply{} ss := StaticService{} - if err := ss.BuildGenesis(nil, &args, &reply); err != nil { - t.Fatalf("BuildGenesis should not have errored but got error: %s", err) - } + require.NoError(ss.BuildGenesis(nil, &args, &reply)) genesisBytes, err := formatting.Decode(reply.Encoding, reply.Bytes) - if err != nil { - t.Fatalf("Problem decoding BuildGenesis response: %s", err) - } + require.NoError(err) genesis, err := genesis.Parse(genesisBytes) + require.NoError(err) + + validators := genesis.Validators + require.Len(validators, 3) +} + +func TestUTXOLess(t *testing.T) { + var ( + smallerAddr = ids.ShortID{} + largerAddr = ids.ShortID{1} + ) + smallerAddrStr, err := address.FormatBech32("avax", smallerAddr[:]) if err != nil { - t.Fatal(err) + panic(err) } - validators := genesis.Validators - if len(validators) != 3 { - t.Fatal("Validators should contain 3 validators") + largerAddrStr, err := address.FormatBech32("avax", largerAddr[:]) + if err != nil { + panic(err) + } + type test struct { + name string + utxo1 UTXO + utxo2 UTXO + expected bool + } + tests := []test{ + { + name: "both empty", + utxo1: UTXO{}, + utxo2: UTXO{}, + expected: false, + }, + { + name: "first locktime smaller", + utxo1: UTXO{}, + utxo2: UTXO{ + Locktime: 1, + }, + expected: true, + }, + { + name: "first locktime larger", + utxo1: UTXO{ + Locktime: 1, + }, + utxo2: UTXO{}, + expected: false, + }, + { + name: "first amount smaller", + utxo1: UTXO{}, + utxo2: UTXO{ + Amount: 1, + }, + expected: true, + }, + { + name: "first amount larger", + utxo1: UTXO{ + Amount: 1, + }, + utxo2: UTXO{}, + expected: false, + }, + { + name: "first address smaller", + utxo1: UTXO{ + Address: smallerAddrStr, + }, + utxo2: UTXO{ + Address: largerAddrStr, + }, + expected: true, + }, + { + name: "first address larger", + utxo1: UTXO{ + Address: largerAddrStr, + }, + utxo2: UTXO{ + Address: smallerAddrStr, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require.Equal(t, tt.expected, tt.utxo1.Less(tt.utxo2)) + }) } } diff --git a/avalanchego/vms/platformvm/blocks/abort_block.go b/avalanchego/vms/platformvm/blocks/abort_block.go index 9ab07071..2f5928f9 100644 --- a/avalanchego/vms/platformvm/blocks/abort_block.go +++ b/avalanchego/vms/platformvm/blocks/abort_block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package blocks @@ -12,8 +12,8 @@ import ( ) var ( - _ BanffBlock = &BanffAbortBlock{} - _ Block = &ApricotAbortBlock{} + _ BanffBlock = (*BanffAbortBlock)(nil) + _ Block = (*ApricotAbortBlock)(nil) ) type BanffAbortBlock struct { @@ -21,8 +21,13 @@ type BanffAbortBlock struct { ApricotAbortBlock `serialize:"true"` } -func (b *BanffAbortBlock) Timestamp() time.Time { return time.Unix(int64(b.Time), 0) } -func (b *BanffAbortBlock) Visit(v Visitor) error { return v.BanffAbortBlock(b) } +func (b *BanffAbortBlock) Timestamp() time.Time { + return time.Unix(int64(b.Time), 0) +} + +func (b *BanffAbortBlock) Visit(v Visitor) error { + return v.BanffAbortBlock(b) +} func NewBanffAbortBlock( timestamp time.Time, @@ -50,11 +55,19 @@ func (b *ApricotAbortBlock) initialize(bytes []byte) error { return nil } -func (*ApricotAbortBlock) InitCtx(ctx *snow.Context) {} +func (*ApricotAbortBlock) InitCtx(*snow.Context) {} -func (*ApricotAbortBlock) Txs() []*txs.Tx { return nil } -func (b *ApricotAbortBlock) Visit(v Visitor) error { return v.ApricotAbortBlock(b) } +func (*ApricotAbortBlock) Txs() []*txs.Tx { + return nil +} + +func (b *ApricotAbortBlock) Visit(v Visitor) error { + return v.ApricotAbortBlock(b) +} +// NewApricotAbortBlock is kept for testing purposes only. +// Following Banff activation and subsequent code cleanup, Apricot Abort blocks +// should be only verified (upon bootstrap), never created anymore func NewApricotAbortBlock( parentID ids.ID, height uint64, diff --git a/avalanchego/vms/platformvm/blocks/abort_block_test.go b/avalanchego/vms/platformvm/blocks/abort_block_test.go index 6ccec281..d85cf12b 100644 --- a/avalanchego/vms/platformvm/blocks/abort_block_test.go +++ b/avalanchego/vms/platformvm/blocks/abort_block_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package blocks @@ -26,7 +26,7 @@ func TestNewBanffAbortBlock(t *testing.T) { require.NoError(err) // Make sure the block is initialized - require.NotNil(blk.Bytes()) + require.NotEmpty(blk.Bytes()) require.Equal(timestamp, blk.Timestamp()) require.Equal(parentID, blk.Parent()) @@ -45,7 +45,7 @@ func TestNewApricotAbortBlock(t *testing.T) { require.NoError(err) // Make sure the block is initialized - require.NotNil(blk.Bytes()) + require.NotEmpty(blk.Bytes()) require.Equal(parentID, blk.Parent()) require.Equal(height, blk.Height()) diff --git a/avalanchego/vms/platformvm/blocks/atomic_block.go b/avalanchego/vms/platformvm/blocks/atomic_block.go index 5ac8389b..94424439 100644 --- a/avalanchego/vms/platformvm/blocks/atomic_block.go +++ b/avalanchego/vms/platformvm/blocks/atomic_block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package blocks @@ -11,7 +11,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) -var _ Block = &ApricotAtomicBlock{} +var _ Block = (*ApricotAtomicBlock)(nil) // ApricotAtomicBlock being accepted results in the atomic transaction contained // in the block to be accepted and committed to the chain. @@ -22,7 +22,7 @@ type ApricotAtomicBlock struct { func (b *ApricotAtomicBlock) initialize(bytes []byte) error { b.CommonBlock.initialize(bytes) - if err := b.Tx.Sign(txs.Codec, nil); err != nil { + if err := b.Tx.Initialize(txs.Codec); err != nil { return fmt.Errorf("failed to initialize tx: %w", err) } return nil @@ -32,8 +32,13 @@ func (b *ApricotAtomicBlock) InitCtx(ctx *snow.Context) { b.Tx.Unsigned.InitCtx(ctx) } -func (b *ApricotAtomicBlock) Txs() []*txs.Tx { return []*txs.Tx{b.Tx} } -func (b *ApricotAtomicBlock) Visit(v Visitor) error { return v.ApricotAtomicBlock(b) } +func (b *ApricotAtomicBlock) Txs() []*txs.Tx { + return []*txs.Tx{b.Tx} +} + +func (b *ApricotAtomicBlock) Visit(v Visitor) error { + return v.ApricotAtomicBlock(b) +} func NewApricotAtomicBlock( parentID ids.ID, diff --git a/avalanchego/vms/platformvm/blocks/atomic_block_test.go b/avalanchego/vms/platformvm/blocks/atomic_block_test.go index b2b510be..7e1b9f09 100644 --- a/avalanchego/vms/platformvm/blocks/atomic_block_test.go +++ b/avalanchego/vms/platformvm/blocks/atomic_block_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package blocks @@ -31,7 +31,7 @@ func TestNewApricotAtomicBlock(t *testing.T) { }, Creds: []verify.Verifiable{}, } - require.NoError(tx.Sign(txs.Codec, nil)) + require.NoError(tx.Initialize(txs.Codec)) blk, err := NewApricotAtomicBlock( parentID, @@ -41,8 +41,8 @@ func TestNewApricotAtomicBlock(t *testing.T) { require.NoError(err) // Make sure the block and tx are initialized - require.NotNil(blk.Bytes()) - require.NotNil(blk.Tx.Bytes()) + require.NotEmpty(blk.Bytes()) + require.NotEmpty(blk.Tx.Bytes()) require.NotEqual(ids.Empty, blk.Tx.ID()) require.Equal(tx.Bytes(), blk.Tx.Bytes()) require.Equal(parentID, blk.Parent()) diff --git a/avalanchego/vms/platformvm/blocks/block.go b/avalanchego/vms/platformvm/blocks/block.go index 8c8b5087..273f379a 100644 --- a/avalanchego/vms/platformvm/blocks/block.go +++ b/avalanchego/vms/platformvm/blocks/block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package blocks diff --git a/avalanchego/vms/platformvm/blocks/builder/apricot_builder.go b/avalanchego/vms/platformvm/blocks/builder/apricot_builder.go deleted file mode 100644 index 8564fdf7..00000000 --- a/avalanchego/vms/platformvm/blocks/builder/apricot_builder.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package builder - -import ( - "fmt" - "time" - - "go.uber.org/zap" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" - "github.com/ava-labs/avalanchego/vms/platformvm/state" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" -) - -func buildApricotBlock( - builder *builder, - parentID ids.ID, - height uint64, - timestamp time.Time, - shouldAdvanceTime bool, - parentState state.Chain, -) (blocks.Block, error) { - // try including as many standard txs as possible. No need to advance chain time - if builder.Mempool.HasApricotDecisionTxs() { - return blocks.NewApricotStandardBlock( - parentID, - height, - builder.Mempool.PeekApricotDecisionTxs(targetBlockSize), - ) - } - - // try rewarding stakers whose staking period ends at current chain time. - stakerTxID, shouldReward, err := builder.getNextStakerToReward(parentState.GetTimestamp(), parentState) - if err != nil { - return nil, fmt.Errorf("could not find next staker to reward: %w", err) - } - if shouldReward { - rewardValidatorTx, err := builder.txBuilder.NewRewardValidatorTx(stakerTxID) - if err != nil { - return nil, fmt.Errorf("could not build tx to reward staker: %w", err) - } - - return blocks.NewApricotProposalBlock( - parentID, - height, - rewardValidatorTx, - ) - } - - // try advancing chain time - if shouldAdvanceTime { - advanceTimeTx, err := builder.txBuilder.NewAdvanceTimeTx(timestamp) - if err != nil { - return nil, fmt.Errorf("could not build tx to reward staker: %w", err) - } - - return blocks.NewApricotProposalBlock( - parentID, - height, - advanceTimeTx, - ) - } - - // Clean out transactions with an invalid timestamp. - builder.dropExpiredStakerTxs(timestamp) - - // Check the mempool - if !builder.Mempool.HasStakerTx() { - builder.txExecutorBackend.Ctx.Log.Debug("no pending txs to issue into a block") - return nil, errNoPendingBlocks - } - - tx, err := nextApricotProposalTx(builder, timestamp, parentState) - if err != nil { - builder.txExecutorBackend.Ctx.Log.Error( - "failed to get the next proposal tx", - zap.Error(err), - ) - return nil, err - } - - return blocks.NewApricotProposalBlock( - parentID, - height, - tx, - ) -} - -// Try to get/make a proposal tx to put into a block. -// Any returned error is unexpected. -func nextApricotProposalTx(builder *builder, timestamp time.Time, parentState state.Chain) (*txs.Tx, error) { - tx := builder.Mempool.PeekStakerTx() - startTime := tx.Unsigned.(txs.Staker).StartTime() - - // Check whether this staker starts within at most [MaxFutureStartTime]. - // If it does, issue the staking tx. - // If it doesn't, issue an advance time tx. - maxChainStartTime := parentState.GetTimestamp().Add(executor.MaxFutureStartTime) - if !startTime.After(maxChainStartTime) { - return tx, nil - } - - // The chain timestamp is too far in the past. Advance it. - advanceTimeTx, err := builder.txBuilder.NewAdvanceTimeTx(timestamp) - if err != nil { - return nil, fmt.Errorf("could not build tx to advance time: %w", err) - } - return advanceTimeTx, nil -} diff --git a/avalanchego/vms/platformvm/blocks/builder/apricot_builder_test.go b/avalanchego/vms/platformvm/blocks/builder/apricot_builder_test.go deleted file mode 100644 index cb11ff9c..00000000 --- a/avalanchego/vms/platformvm/blocks/builder/apricot_builder_test.go +++ /dev/null @@ -1,360 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package builder - -import ( - "testing" - "time" - - "github.com/golang/mock/gomock" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils/crypto" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/components/verify" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" - "github.com/ava-labs/avalanchego/vms/platformvm/state" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" - - txbuilder "github.com/ava-labs/avalanchego/vms/platformvm/txs/builder" - txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" -) - -func TestBuildApricotBlock(t *testing.T) { - var ( - parentID = ids.GenerateTestID() - height = uint64(1337) - output = &avax.TransferableOutput{ - Asset: avax.Asset{ID: ids.GenerateTestID()}, - Out: &secp256k1fx.TransferOutput{ - OutputOwners: secp256k1fx.OutputOwners{ - Addrs: []ids.ShortID{ids.GenerateTestShortID()}, - }, - }, - } - now = time.Now() - parentTimestamp = now.Add(-2 * time.Second) - blockTxs = []*txs.Tx{{ - Unsigned: &txs.AddValidatorTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - Ins: []*avax.TransferableInput{{ - Asset: avax.Asset{ID: ids.GenerateTestID()}, - In: &secp256k1fx.TransferInput{ - Input: secp256k1fx.Input{ - SigIndices: []uint32{0}, - }, - }, - }}, - Outs: []*avax.TransferableOutput{output}, - }}, - Validator: validator.Validator{ - // Shouldn't be dropped - Start: uint64(now.Add(2 * txexecutor.SyncBound).Unix()), - }, - StakeOuts: []*avax.TransferableOutput{output}, - RewardsOwner: &secp256k1fx.OutputOwners{ - Addrs: []ids.ShortID{ids.GenerateTestShortID()}, - }, - }, - Creds: []verify.Verifiable{ - &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{{1, 3, 3, 7}}, - }, - }, - }} - stakerTxID = ids.GenerateTestID() - ) - - type test struct { - name string - builderF func(*gomock.Controller) *builder - timestamp time.Time - shouldAdvanceTime bool - parentStateF func(*gomock.Controller) state.Chain - expectedBlkF func(*require.Assertions) blocks.Block - expectedErr error - } - - tests := []test{ - { - name: "has decision txs", - builderF: func(ctrl *gomock.Controller) *builder { - mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().HasApricotDecisionTxs().Return(true) - mempool.EXPECT().PeekApricotDecisionTxs(targetBlockSize).Return(blockTxs) - return &builder{ - Mempool: mempool, - } - }, - timestamp: time.Time{}, - shouldAdvanceTime: false, - parentStateF: func(ctrl *gomock.Controller) state.Chain { - return state.NewMockChain(ctrl) - }, - expectedBlkF: func(require *require.Assertions) blocks.Block { - expectedBlk, err := blocks.NewApricotStandardBlock( - parentID, - height, - blockTxs, - ) - require.NoError(err) - return expectedBlk - }, - expectedErr: nil, - }, - { - name: "should reward", - builderF: func(ctrl *gomock.Controller) *builder { - // There are no decision txs - mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().HasApricotDecisionTxs().Return(false) - - // The tx builder should be asked to build a reward tx - txBuilder := txbuilder.NewMockBuilder(ctrl) - txBuilder.EXPECT().NewRewardValidatorTx(stakerTxID).Return(blockTxs[0], nil) - - return &builder{ - Mempool: mempool, - txBuilder: txBuilder, - } - }, - timestamp: time.Time{}, - shouldAdvanceTime: false, - parentStateF: func(ctrl *gomock.Controller) state.Chain { - s := state.NewMockChain(ctrl) - - s.EXPECT().GetTimestamp().Return(parentTimestamp) - - // add current validator that ends at [parentTimestamp] - // i.e. it should be rewarded - currentStakerIter := state.NewMockStakerIterator(ctrl) - currentStakerIter.EXPECT().Next().Return(true) - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - TxID: stakerTxID, - Priority: txs.PrimaryNetworkDelegatorCurrentPriority, - EndTime: parentTimestamp, - }) - currentStakerIter.EXPECT().Release() - - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil) - - return s - }, - expectedBlkF: func(require *require.Assertions) blocks.Block { - expectedBlk, err := blocks.NewApricotProposalBlock( - parentID, - height, - blockTxs[0], - ) - require.NoError(err) - return expectedBlk - }, - expectedErr: nil, - }, - { - name: "should advance time", - builderF: func(ctrl *gomock.Controller) *builder { - // There are no decision txs - mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().HasApricotDecisionTxs().Return(false) - - // The tx builder should be asked to build an advance time tx - advanceTimeTx := &txs.Tx{Unsigned: &txs.AdvanceTimeTx{ - Time: uint64(now.Add(-1 * time.Second).Unix()), - }} - txBuilder := txbuilder.NewMockBuilder(ctrl) - txBuilder.EXPECT().NewAdvanceTimeTx(now.Add(-1*time.Second)).Return(advanceTimeTx, nil) - - clk := &mockable.Clock{} - clk.Set(now) - return &builder{ - Mempool: mempool, - txBuilder: txBuilder, - txExecutorBackend: &txexecutor.Backend{ - Clk: clk, - }, - } - }, - timestamp: now.Add(-1 * time.Second), - shouldAdvanceTime: true, - parentStateF: func(ctrl *gomock.Controller) state.Chain { - s := state.NewMockChain(ctrl) - - s.EXPECT().GetTimestamp().Return(parentTimestamp) - - // add current validator that ends at [now] - 1 second. - // Handle calls in [getNextStakerToReward] - // and [GetNextStakerChangeTime] - // when determining whether to issue a reward tx. - currentStakerIter := state.NewMockStakerIterator(ctrl) - gomock.InOrder( - // expect calls from [getNextStakerToReward] - currentStakerIter.EXPECT().Next().Return(true), - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - NextTime: now.Add(-1 * time.Second), - Priority: txs.PrimaryNetworkValidatorCurrentPriority, - }), - currentStakerIter.EXPECT().Release(), - ) - - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil).Times(1) - return s - }, - expectedBlkF: func(require *require.Assertions) blocks.Block { - expectedBlk, err := blocks.NewApricotProposalBlock( - parentID, - height, - &txs.Tx{Unsigned: &txs.AdvanceTimeTx{ // advances time - Time: uint64(now.Add(-1 * time.Second).Unix()), - }}, - ) - require.NoError(err) - return expectedBlk - }, - expectedErr: nil, - }, - { - name: "no proposal tx", - builderF: func(ctrl *gomock.Controller) *builder { - // There are no decision txs - mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().HasApricotDecisionTxs().Return(false) - - // There is a staker tx. - mempool.EXPECT().HasStakerTx().Return(false).AnyTimes() - - clk := &mockable.Clock{} - clk.Set(now) - return &builder{ - Mempool: mempool, - txExecutorBackend: &txexecutor.Backend{ - Ctx: &snow.Context{ - Log: logging.NoLog{}, - }, - Clk: clk, - }, - } - }, - timestamp: time.Time{}, - shouldAdvanceTime: false, - parentStateF: func(ctrl *gomock.Controller) state.Chain { - s := state.NewMockChain(ctrl) - - s.EXPECT().GetTimestamp().Return(parentTimestamp) - - // Next validator change time is in the future. - currentStakerIter := state.NewMockStakerIterator(ctrl) - gomock.InOrder( - // expect calls from [getNextStakerToReward] - currentStakerIter.EXPECT().Next().Return(true), - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - NextTime: now.Add(time.Second), - Priority: txs.PrimaryNetworkValidatorCurrentPriority, - }), - currentStakerIter.EXPECT().Release(), - ) - - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil).Times(1) - return s - }, - expectedBlkF: func(require *require.Assertions) blocks.Block { - expectedBlk, err := blocks.NewApricotProposalBlock( - parentID, - height, - blockTxs[0], - ) - require.NoError(err) - return expectedBlk - }, - expectedErr: errNoPendingBlocks, - }, - { - name: "has a proposal tx", - builderF: func(ctrl *gomock.Controller) *builder { - // There are no decision txs - mempool := mempool.NewMockMempool(ctrl) - mempool.EXPECT().HasApricotDecisionTxs().Return(false) - - // There is a proposal tx. - mempool.EXPECT().HasStakerTx().Return(true).AnyTimes() - mempool.EXPECT().PeekStakerTx().Return(blockTxs[0]).AnyTimes() - - clk := &mockable.Clock{} - clk.Set(now) - return &builder{ - Mempool: mempool, - txExecutorBackend: &txexecutor.Backend{ - Clk: clk, - }, - } - }, - timestamp: time.Time{}, - shouldAdvanceTime: false, - parentStateF: func(ctrl *gomock.Controller) state.Chain { - s := state.NewMockChain(ctrl) - - // Once in [buildBanffBlock], once in [GetNextStakerChangeTime], - s.EXPECT().GetTimestamp().Return(parentTimestamp).Times(2) - - // Handle calls in [getNextStakerToReward] - // and [GetNextStakerChangeTime]. - // Next validator change time is in the future. - currentStakerIter := state.NewMockStakerIterator(ctrl) - gomock.InOrder( - // expect calls from [getNextStakerToReward] - currentStakerIter.EXPECT().Next().Return(true), - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - NextTime: now.Add(time.Second), - Priority: txs.PrimaryNetworkValidatorCurrentPriority, - }), - currentStakerIter.EXPECT().Release(), - ) - - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil).Times(1) - return s - }, - expectedBlkF: func(require *require.Assertions) blocks.Block { - expectedBlk, err := blocks.NewApricotProposalBlock( - parentID, - height, - blockTxs[0], - ) - require.NoError(err) - return expectedBlk - }, - expectedErr: nil, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - gotBlk, err := buildApricotBlock( - tt.builderF(ctrl), - parentID, - height, - tt.timestamp, - tt.shouldAdvanceTime, - tt.parentStateF(ctrl), - ) - if tt.expectedErr != nil { - require.ErrorIs(err, tt.expectedErr) - return - } - require.NoError(err) - require.EqualValues(tt.expectedBlkF(require), gotBlk) - }) - } -} diff --git a/avalanchego/vms/platformvm/blocks/builder/banff_builder.go b/avalanchego/vms/platformvm/blocks/builder/banff_builder.go deleted file mode 100644 index 2ead5e32..00000000 --- a/avalanchego/vms/platformvm/blocks/builder/banff_builder.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package builder - -import ( - "fmt" - "time" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" - "github.com/ava-labs/avalanchego/vms/platformvm/state" -) - -// [timestamp] is min(max(now, parent timestamp), next staker change time) -func buildBanffBlock( - builder *builder, - parentID ids.ID, - height uint64, - timestamp time.Time, - forceAdvanceTime bool, - parentState state.Chain, -) (blocks.Block, error) { - // Try rewarding stakers whose staking period ends at the new chain time. - // This is done first to prioritize advancing the timestamp as quickly as - // possible. - stakerTxID, shouldReward, err := builder.getNextStakerToReward(timestamp, parentState) - if err != nil { - return nil, fmt.Errorf("could not find next staker to reward: %w", err) - } - if shouldReward { - rewardValidatorTx, err := builder.txBuilder.NewRewardValidatorTx(stakerTxID) - if err != nil { - return nil, fmt.Errorf("could not build tx to reward staker: %w", err) - } - - return blocks.NewBanffProposalBlock( - timestamp, - parentID, - height, - rewardValidatorTx, - ) - } - - // Clean out the mempool's transactions with invalid timestamps. - builder.dropExpiredStakerTxs(timestamp) - - // If there is no reason to build a block, don't. - if !builder.Mempool.HasTxs() && !forceAdvanceTime { - builder.txExecutorBackend.Ctx.Log.Debug("no pending txs to issue into a block") - return nil, errNoPendingBlocks - } - - // Issue a block with as many transactions as possible. - return blocks.NewBanffStandardBlock( - timestamp, - parentID, - height, - builder.Mempool.PeekTxs(targetBlockSize), - ) -} diff --git a/avalanchego/vms/platformvm/blocks/builder/banff_builder_test.go b/avalanchego/vms/platformvm/blocks/builder/banff_builder_test.go deleted file mode 100644 index 2f290a1c..00000000 --- a/avalanchego/vms/platformvm/blocks/builder/banff_builder_test.go +++ /dev/null @@ -1,511 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package builder - -import ( - "testing" - "time" - - "github.com/golang/mock/gomock" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/snow/consensus/snowman" - "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/components/verify" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks" - "github.com/ava-labs/avalanchego/vms/platformvm/reward" - "github.com/ava-labs/avalanchego/vms/platformvm/state" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" - "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" - - txbuilder "github.com/ava-labs/avalanchego/vms/platformvm/txs/builder" -) - -func TestBanffFork(t *testing.T) { - require := require.New(t) - - env := newEnvironment(t) - env.ctx.Lock.Lock() - defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } - }() - - chainTime := env.state.GetTimestamp() - env.clk.Set(chainTime) - - apricotTimes := []time.Time{ - chainTime.Add(1 * executor.SyncBound), - chainTime.Add(2 * executor.SyncBound), - } - lastApricotTime := apricotTimes[len(apricotTimes)-1] - env.config.BanffTime = lastApricotTime.Add(time.Second) - - for i, nextValidatorStartTime := range apricotTimes { - // add a validator with the right start time - // so that we can then advance chain time to it - tx, err := env.txBuilder.NewAddValidatorTx( - env.config.MinValidatorStake, - uint64(nextValidatorStartTime.Unix()), - uint64(defaultValidateEndTime.Unix()), - ids.GenerateTestNodeID(), - ids.GenerateTestShortID(), - reward.PercentDenominator, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[i]}, - ids.ShortEmpty, - ) - require.NoError(err) - require.NoError(env.mempool.Add(tx)) - - proposalBlk, err := env.Builder.BuildBlock() - require.NoError(err) - require.NoError(proposalBlk.Verify()) - require.NoError(proposalBlk.Accept()) - require.NoError(env.state.Commit()) - - options, err := proposalBlk.(snowman.OracleBlock).Options() - require.NoError(err) - commitBlk := options[0] - require.NoError(commitBlk.Verify()) - require.NoError(commitBlk.Accept()) - require.NoError(env.state.Commit()) - env.Builder.SetPreference(commitBlk.ID()) - - // advance chain time - env.clk.Set(nextValidatorStartTime) - advanceTimeBlk, err := env.Builder.BuildBlock() - require.NoError(err) - require.NoError(advanceTimeBlk.Verify()) - require.NoError(advanceTimeBlk.Accept()) - require.NoError(env.state.Commit()) - - options, err = advanceTimeBlk.(snowman.OracleBlock).Options() - require.NoError(err) - commitBlk = options[0] - require.NoError(commitBlk.Verify()) - require.NoError(commitBlk.Accept()) - require.NoError(env.state.Commit()) - env.Builder.SetPreference(commitBlk.ID()) - } - - // set local clock at banff time, so to try and build a banff block - localTime := env.config.BanffTime - env.clk.Set(localTime) - - createChainTx, err := env.txBuilder.NewCreateChainTx( - testSubnet1.ID(), - nil, - constants.AVMID, - nil, - "chain name", - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0], preFundedKeys[1]}, - ids.ShortEmpty, - ) - require.NoError(err) - require.NoError(env.mempool.Add(createChainTx)) - - proposalBlk, err := env.Builder.BuildBlock() - require.NoError(err) - require.NoError(proposalBlk.Verify()) - require.NoError(proposalBlk.Accept()) - require.NoError(env.state.Commit()) - - // check Banff fork is activated - require.True(env.state.GetTimestamp().Equal(env.config.BanffTime)) -} - -func TestBuildBanffBlock(t *testing.T) { - var ( - parentID = ids.GenerateTestID() - height = uint64(1337) - output = &avax.TransferableOutput{ - Asset: avax.Asset{ID: ids.GenerateTestID()}, - Out: &secp256k1fx.TransferOutput{ - OutputOwners: secp256k1fx.OutputOwners{ - Addrs: []ids.ShortID{ids.GenerateTestShortID()}, - }, - }, - } - now = time.Now() - parentTimestamp = now.Add(-2 * time.Second) - transactions = []*txs.Tx{{ - Unsigned: &txs.AddValidatorTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - Ins: []*avax.TransferableInput{{ - Asset: avax.Asset{ID: ids.GenerateTestID()}, - In: &secp256k1fx.TransferInput{ - Input: secp256k1fx.Input{ - SigIndices: []uint32{0}, - }, - }, - }}, - Outs: []*avax.TransferableOutput{output}, - }}, - Validator: validator.Validator{ - // Shouldn't be dropped - Start: uint64(now.Add(2 * executor.SyncBound).Unix()), - }, - StakeOuts: []*avax.TransferableOutput{output}, - RewardsOwner: &secp256k1fx.OutputOwners{ - Addrs: []ids.ShortID{ids.GenerateTestShortID()}, - }, - }, - Creds: []verify.Verifiable{ - &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{{1, 3, 3, 7}}, - }, - }, - }} - stakerTxID = ids.GenerateTestID() - ) - - type test struct { - name string - builderF func(*gomock.Controller) *builder - timestamp time.Time - forceAdvanceTime bool - parentStateF func(*gomock.Controller) state.Chain - expectedBlkF func(*require.Assertions) blocks.Block - expectedErr error - } - - tests := []test{ - { - name: "should reward", - builderF: func(ctrl *gomock.Controller) *builder { - mempool := mempool.NewMockMempool(ctrl) - - // The tx builder should be asked to build a reward tx - txBuilder := txbuilder.NewMockBuilder(ctrl) - txBuilder.EXPECT().NewRewardValidatorTx(stakerTxID).Return(transactions[0], nil) - - return &builder{ - Mempool: mempool, - txBuilder: txBuilder, - } - }, - timestamp: parentTimestamp, - forceAdvanceTime: false, - parentStateF: func(ctrl *gomock.Controller) state.Chain { - s := state.NewMockChain(ctrl) - - // add current validator that ends at [parentTimestamp] - // i.e. it should be rewarded - currentStakerIter := state.NewMockStakerIterator(ctrl) - currentStakerIter.EXPECT().Next().Return(true) - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - TxID: stakerTxID, - Priority: txs.PrimaryNetworkDelegatorCurrentPriority, - EndTime: parentTimestamp, - }) - currentStakerIter.EXPECT().Release() - - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil) - return s - }, - expectedBlkF: func(require *require.Assertions) blocks.Block { - expectedBlk, err := blocks.NewBanffProposalBlock( - parentTimestamp, - parentID, - height, - transactions[0], - ) - require.NoError(err) - return expectedBlk - }, - expectedErr: nil, - }, - { - name: "has decision txs", - builderF: func(ctrl *gomock.Controller) *builder { - mempool := mempool.NewMockMempool(ctrl) - - // There are txs. - mempool.EXPECT().HasStakerTx().Return(false) - mempool.EXPECT().HasTxs().Return(true) - mempool.EXPECT().PeekTxs(targetBlockSize).Return(transactions) - return &builder{ - Mempool: mempool, - } - }, - timestamp: parentTimestamp, - forceAdvanceTime: false, - parentStateF: func(ctrl *gomock.Controller) state.Chain { - s := state.NewMockChain(ctrl) - - // Handle calls in [getNextStakerToReward] - // and [GetNextStakerChangeTime]. - // Next validator change time is in the future. - currentStakerIter := state.NewMockStakerIterator(ctrl) - gomock.InOrder( - // expect calls from [getNextStakerToReward] - currentStakerIter.EXPECT().Next().Return(true), - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - NextTime: now.Add(time.Second), - Priority: txs.PrimaryNetworkDelegatorCurrentPriority, - }), - currentStakerIter.EXPECT().Release(), - ) - - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil).Times(1) - return s - }, - expectedBlkF: func(require *require.Assertions) blocks.Block { - expectedBlk, err := blocks.NewBanffStandardBlock( - parentTimestamp, - parentID, - height, - transactions, - ) - require.NoError(err) - return expectedBlk - }, - expectedErr: nil, - }, - { - name: "no stakers tx", - builderF: func(ctrl *gomock.Controller) *builder { - mempool := mempool.NewMockMempool(ctrl) - - // There are no txs. - mempool.EXPECT().HasStakerTx().Return(false) - mempool.EXPECT().HasTxs().Return(false) - - clk := &mockable.Clock{} - clk.Set(now) - return &builder{ - Mempool: mempool, - txExecutorBackend: &executor.Backend{ - Ctx: &snow.Context{ - Log: logging.NoLog{}, - }, - Clk: clk, - }, - } - }, - timestamp: parentTimestamp, - forceAdvanceTime: false, - parentStateF: func(ctrl *gomock.Controller) state.Chain { - s := state.NewMockChain(ctrl) - - // Handle calls in [getNextStakerToReward] - // and [GetNextStakerChangeTime]. - // Next validator change time is in the future. - currentStakerIter := state.NewMockStakerIterator(ctrl) - gomock.InOrder( - // expect calls from [getNextStakerToReward] - currentStakerIter.EXPECT().Next().Return(true), - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - NextTime: now.Add(time.Second), - Priority: txs.PrimaryNetworkDelegatorCurrentPriority, - }), - currentStakerIter.EXPECT().Release(), - ) - - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil).Times(1) - return s - }, - expectedBlkF: func(require *require.Assertions) blocks.Block { - return nil - }, - expectedErr: errNoPendingBlocks, - }, - { - name: "should advance time", - builderF: func(ctrl *gomock.Controller) *builder { - mempool := mempool.NewMockMempool(ctrl) - - // There are no txs. - mempool.EXPECT().HasStakerTx().Return(false) - mempool.EXPECT().HasTxs().Return(false) - mempool.EXPECT().PeekTxs(targetBlockSize).Return(nil) - - clk := &mockable.Clock{} - clk.Set(now) - return &builder{ - Mempool: mempool, - txExecutorBackend: &executor.Backend{ - Clk: clk, - }, - } - }, - timestamp: now.Add(-1 * time.Second), - forceAdvanceTime: true, - parentStateF: func(ctrl *gomock.Controller) state.Chain { - s := state.NewMockChain(ctrl) - - // add current validator that ends at [now] - 1 second. - // That is, it ends in the past but after the current chain time. - // Handle calls in [getNextStakerToReward] - // and [GetNextStakerChangeTime] - // when determining whether to issue a reward tx. - currentStakerIter := state.NewMockStakerIterator(ctrl) - gomock.InOrder( - // expect calls from [getNextStakerToReward] - currentStakerIter.EXPECT().Next().Return(true), - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - NextTime: now.Add(-1 * time.Second), - Priority: txs.PrimaryNetworkDelegatorCurrentPriority, - }), - currentStakerIter.EXPECT().Release(), - ) - - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil).Times(1) - return s - }, - expectedBlkF: func(require *require.Assertions) blocks.Block { - expectedBlk, err := blocks.NewBanffStandardBlock( - now.Add(-1*time.Second), // note the advanced time - parentID, - height, - nil, // empty block to advance time - ) - require.NoError(err) - return expectedBlk - }, - expectedErr: nil, - }, - { - name: "has a staker tx no force", - builderF: func(ctrl *gomock.Controller) *builder { - mempool := mempool.NewMockMempool(ctrl) - - // There is a tx. - mempool.EXPECT().HasStakerTx().Return(false) - mempool.EXPECT().HasTxs().Return(true) - mempool.EXPECT().PeekTxs(targetBlockSize).Return([]*txs.Tx{transactions[0]}) - - clk := &mockable.Clock{} - clk.Set(now) - return &builder{ - Mempool: mempool, - txExecutorBackend: &executor.Backend{ - Clk: clk, - }, - } - }, - timestamp: parentTimestamp, - forceAdvanceTime: false, - parentStateF: func(ctrl *gomock.Controller) state.Chain { - s := state.NewMockChain(ctrl) - - // Handle calls in [getNextStakerToReward] - // and [GetNextStakerChangeTime]. - // Next validator change time is in the future. - currentStakerIter := state.NewMockStakerIterator(ctrl) - gomock.InOrder( - // expect calls from [getNextStakerToReward] - currentStakerIter.EXPECT().Next().Return(true), - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - NextTime: now.Add(time.Second), - Priority: txs.PrimaryNetworkDelegatorCurrentPriority, - }), - currentStakerIter.EXPECT().Release(), - ) - - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil).Times(1) - return s - }, - expectedBlkF: func(require *require.Assertions) blocks.Block { - expectedBlk, err := blocks.NewBanffStandardBlock( - parentTimestamp, - parentID, - height, - []*txs.Tx{transactions[0]}, - ) - require.NoError(err) - return expectedBlk - }, - expectedErr: nil, - }, - { - name: "has a staker tx with force", - builderF: func(ctrl *gomock.Controller) *builder { - mempool := mempool.NewMockMempool(ctrl) - - // There are no decision txs - // There is a staker tx. - mempool.EXPECT().HasStakerTx().Return(false) - mempool.EXPECT().HasTxs().Return(true) - mempool.EXPECT().PeekTxs(targetBlockSize).Return([]*txs.Tx{transactions[0]}) - - clk := &mockable.Clock{} - clk.Set(now) - return &builder{ - Mempool: mempool, - txExecutorBackend: &executor.Backend{ - Clk: clk, - }, - } - }, - timestamp: parentTimestamp, - forceAdvanceTime: true, - parentStateF: func(ctrl *gomock.Controller) state.Chain { - s := state.NewMockChain(ctrl) - - // Handle calls in [getNextStakerToReward] - // and [GetNextStakerChangeTime]. - // Next validator change time is in the future. - currentStakerIter := state.NewMockStakerIterator(ctrl) - gomock.InOrder( - // expect calls from [getNextStakerToReward] - currentStakerIter.EXPECT().Next().Return(true), - currentStakerIter.EXPECT().Value().Return(&state.Staker{ - NextTime: now.Add(time.Second), - Priority: txs.PrimaryNetworkDelegatorCurrentPriority, - }), - currentStakerIter.EXPECT().Release(), - ) - - s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil).Times(1) - return s - }, - expectedBlkF: func(require *require.Assertions) blocks.Block { - expectedBlk, err := blocks.NewBanffStandardBlock( - parentTimestamp, - parentID, - height, - []*txs.Tx{transactions[0]}, - ) - require.NoError(err) - return expectedBlk - }, - expectedErr: nil, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - gotBlk, err := buildBanffBlock( - tt.builderF(ctrl), - parentID, - height, - tt.timestamp, - tt.forceAdvanceTime, - tt.parentStateF(ctrl), - ) - if tt.expectedErr != nil { - require.ErrorIs(err, tt.expectedErr) - return - } - require.NoError(err) - require.EqualValues(tt.expectedBlkF(require), gotBlk) - }) - } -} diff --git a/avalanchego/vms/platformvm/blocks/builder/builder.go b/avalanchego/vms/platformvm/blocks/builder/builder.go index d6657405..947fdf26 100644 --- a/avalanchego/vms/platformvm/blocks/builder/builder.go +++ b/avalanchego/vms/platformvm/blocks/builder/builder.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package builder import ( + "context" "errors" "fmt" "time" @@ -31,10 +32,11 @@ import ( const targetBlockSize = 128 * units.KiB var ( - _ Builder = &builder{} + _ Builder = (*builder)(nil) errEndOfTime = errors.New("program time is suspiciously far in the future") errNoPendingBlocks = errors.New("no pending blocks") + errChainNotSynced = errors.New("chain not synced") ) type Builder interface { @@ -53,7 +55,7 @@ type Builder interface { // BuildBlock is called on timer clock to attempt to create // next block - BuildBlock() (snowman.Block, error) + BuildBlock(context.Context) (snowman.Block, error) // Shutdown cleanly shuts Builder down Shutdown() @@ -123,6 +125,10 @@ func (b *builder) Preferred() (snowman.Block, error) { // AddUnverifiedTx verifies a transaction and attempts to add it to the mempool func (b *builder) AddUnverifiedTx(tx *txs.Tx) error { + if !b.txExecutorBackend.Bootstrapped.Get() { + return errChainNotSynced + } + txID := tx.ID() if b.Mempool.Has(txID) { // If the transaction is already in the mempool - then it looks the same @@ -137,7 +143,7 @@ func (b *builder) AddUnverifiedTx(tx *txs.Tx) error { Tx: tx, } if err := tx.Unsigned.Visit(&verifier); err != nil { - b.MarkDropped(txID, err.Error()) + b.MarkDropped(txID, err) return err } @@ -150,7 +156,7 @@ func (b *builder) AddUnverifiedTx(tx *txs.Tx) error { // BuildBlock builds a block to be added to consensus. // This method removes the transactions from the returned // blocks from the mempool. -func (b *builder) BuildBlock() (snowman.Block, error) { +func (b *builder) BuildBlock(context.Context) (snowman.Block, error) { b.Mempool.DisableAdding() defer func() { b.Mempool.EnableAdding() @@ -209,19 +215,7 @@ func (b *builder) buildBlock() (blocks.Block, error) { } // [timestamp] = min(max(now, parentTime), nextStakerChangeTime) - // If the banff timestamp has come, build banff blocks. - if b.txExecutorBackend.Config.IsBanffActivated(timestamp) { - return buildBanffBlock( - b, - preferredID, - nextHeight, - timestamp, - timeWasCapped, - preferredState, - ) - } - - return buildApricotBlock( + return buildBlock( b, preferredID, nextHeight, @@ -246,41 +240,6 @@ func (b *builder) ResetBlockTimer() { b.timer.SetTimeoutIn(0) } -// getNextStakerToReward returns the next staker txID to remove from the staking -// set with a RewardValidatorTx rather than an AdvanceTimeTx. [chainTimestamp] -// is the timestamp of the chain at the time this validator would be getting -// removed and is used to calculate [shouldReward]. -// Returns: -// - [txID] of the next staker to reward -// - [shouldReward] if the txID exists and is ready to be rewarded -// - [err] if something bad happened -func (b *builder) getNextStakerToReward( - chainTimestamp time.Time, - preferredState state.Chain, -) (ids.ID, bool, error) { - if !chainTimestamp.Before(mockable.MaxTime) { - return ids.Empty, false, errEndOfTime - } - - currentStakerIterator, err := preferredState.GetCurrentStakerIterator() - if err != nil { - return ids.Empty, false, err - } - defer currentStakerIterator.Release() - - for currentStakerIterator.Next() { - currentStaker := currentStakerIterator.Value() - priority := currentStaker.Priority - // If the staker is a permissionless staker (not a permissioned subnet - // validator), it's the next staker we will want to remove with a - // RewardValidatorTx rather than an AdvanceTimeTx. - if priority != txs.SubnetPermissionedValidatorCurrentPriority { - return currentStaker.TxID, chainTimestamp.Equal(currentStaker.EndTime), nil - } - } - return ids.Empty, false, nil -} - // dropExpiredStakerTxs drops add validator/delegator transactions in the // mempool whose start time is not sufficiently far in the future // (i.e. within local time plus [MaxFutureStartFrom]). @@ -296,17 +255,17 @@ func (b *builder) dropExpiredStakerTxs(timestamp time.Time) { } txID := tx.ID() - errMsg := fmt.Sprintf( + err := fmt.Errorf( "synchrony bound (%s) is later than staker start time (%s)", minStartTime, startTime, ) b.Mempool.Remove([]*txs.Tx{tx}) - b.Mempool.MarkDropped(txID, errMsg) // cache tx as dropped + b.Mempool.MarkDropped(txID, err) // cache tx as dropped b.txExecutorBackend.Ctx.Log.Debug("dropping tx", - zap.String("reason", errMsg), zap.Stringer("txID", txID), + zap.Error(err), ) } } @@ -319,7 +278,7 @@ func (b *builder) setNextBuildBlockTime() { ctx.Lock.Lock() defer ctx.Lock.Unlock() - if !b.txExecutorBackend.Bootstrapped.GetValue() { + if !b.txExecutorBackend.Bootstrapped.Get() { ctx.Log.Verbo("skipping block timer reset", zap.String("reason", "not bootstrapped"), ) @@ -373,3 +332,86 @@ func (b *builder) notifyBlockReady() { b.txExecutorBackend.Ctx.Log.Debug("dropping message to consensus engine") } } + +// [timestamp] is min(max(now, parent timestamp), next staker change time) +func buildBlock( + builder *builder, + parentID ids.ID, + height uint64, + timestamp time.Time, + forceAdvanceTime bool, + parentState state.Chain, +) (blocks.Block, error) { + // Try rewarding stakers whose staking period ends at the new chain time. + // This is done first to prioritize advancing the timestamp as quickly as + // possible. + stakerTxID, shouldReward, err := getNextStakerToReward(timestamp, parentState) + if err != nil { + return nil, fmt.Errorf("could not find next staker to reward: %w", err) + } + if shouldReward { + rewardValidatorTx, err := builder.txBuilder.NewRewardValidatorTx(stakerTxID) + if err != nil { + return nil, fmt.Errorf("could not build tx to reward staker: %w", err) + } + + return blocks.NewBanffProposalBlock( + timestamp, + parentID, + height, + rewardValidatorTx, + ) + } + + // Clean out the mempool's transactions with invalid timestamps. + builder.dropExpiredStakerTxs(timestamp) + + // If there is no reason to build a block, don't. + if !builder.Mempool.HasTxs() && !forceAdvanceTime { + builder.txExecutorBackend.Ctx.Log.Debug("no pending txs to issue into a block") + return nil, errNoPendingBlocks + } + + // Issue a block with as many transactions as possible. + return blocks.NewBanffStandardBlock( + timestamp, + parentID, + height, + builder.Mempool.PeekTxs(targetBlockSize), + ) +} + +// getNextStakerToReward returns the next staker txID to remove from the staking +// set with a RewardValidatorTx rather than an AdvanceTimeTx. [chainTimestamp] +// is the timestamp of the chain at the time this validator would be getting +// removed and is used to calculate [shouldReward]. +// Returns: +// - [txID] of the next staker to reward +// - [shouldReward] if the txID exists and is ready to be rewarded +// - [err] if something bad happened +func getNextStakerToReward( + chainTimestamp time.Time, + preferredState state.Chain, +) (ids.ID, bool, error) { + if !chainTimestamp.Before(mockable.MaxTime) { + return ids.Empty, false, errEndOfTime + } + + currentStakerIterator, err := preferredState.GetCurrentStakerIterator() + if err != nil { + return ids.Empty, false, err + } + defer currentStakerIterator.Release() + + for currentStakerIterator.Next() { + currentStaker := currentStakerIterator.Value() + priority := currentStaker.Priority + // If the staker is a permissionless staker (not a permissioned subnet + // validator), it's the next staker we will want to remove with a + // RewardValidatorTx rather than an AdvanceTimeTx. + if priority != txs.SubnetPermissionedValidatorCurrentPriority { + return currentStaker.TxID, chainTimestamp.Equal(currentStaker.EndTime), nil + } + } + return ids.Empty, false, nil +} diff --git a/avalanchego/vms/platformvm/blocks/builder/builder_test.go b/avalanchego/vms/platformvm/blocks/builder/builder_test.go index f4be39d3..8f8954a9 100644 --- a/avalanchego/vms/platformvm/blocks/builder/builder_test.go +++ b/avalanchego/vms/platformvm/blocks/builder/builder_test.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package builder import ( + "context" + "errors" "testing" "time" @@ -12,13 +14,25 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/vms/platformvm/blocks/executor" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/ava-labs/avalanchego/vms/platformvm/blocks" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + + blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/blocks/executor" + txbuilder "github.com/ava-labs/avalanchego/vms/platformvm/txs/builder" + txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" ) +var errTestingDropped = errors.New("testing dropped") + // shows that a locally generated CreateChainTx can be added to mempool and then // removed by inclusion in a block func TestBlockBuilderAddLocalTx(t *testing.T) { @@ -27,33 +41,33 @@ func TestBlockBuilderAddLocalTx(t *testing.T) { env := newEnvironment(t) env.ctx.Lock.Lock() defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() // add a tx to it tx := getValidTx(env.txBuilder, t) txID := tx.ID() - env.sender.SendAppGossipF = func(b []byte) error { return nil } + env.sender.SendAppGossipF = func(context.Context, []byte) error { + return nil + } err := env.Builder.AddUnverifiedTx(tx) - require.NoError(err, "couldn't add tx to mempool") + require.NoError(err) has := env.mempool.Has(txID) - require.True(has, "valid tx not recorded into mempool") + require.True(has) // show that build block include that tx and removes it from mempool - blkIntf, err := env.Builder.BuildBlock() - require.NoError(err, "couldn't build block out of mempool") + blkIntf, err := env.Builder.BuildBlock(context.Background()) + require.NoError(err) - blk, ok := blkIntf.(*executor.Block) - require.True(ok, "expected standard block") - require.Len(blk.Txs(), 1, "standard block should include a single transaction") - require.Equal(txID, blk.Txs()[0].ID(), "standard block does not include expected transaction") + blk, ok := blkIntf.(*blockexecutor.Block) + require.True(ok) + require.Len(blk.Txs(), 1) + require.Equal(txID, blk.Txs()[0].ID()) has = env.mempool.Has(txID) - require.False(has, "tx included in block is still recorded into mempool") + require.False(has) } func TestPreviouslyDroppedTxsCanBeReAddedToMempool(t *testing.T) { @@ -62,9 +76,7 @@ func TestPreviouslyDroppedTxsCanBeReAddedToMempool(t *testing.T) { env := newEnvironment(t) env.ctx.Lock.Lock() defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() // create candidate tx @@ -74,14 +86,14 @@ func TestPreviouslyDroppedTxsCanBeReAddedToMempool(t *testing.T) { // A tx simply added to mempool is obviously not marked as dropped require.NoError(env.mempool.Add(tx)) require.True(env.mempool.Has(txID)) - _, isDropped := env.mempool.GetDropReason(txID) - require.False(isDropped) + reason := env.mempool.GetDropReason(txID) + require.NoError(reason) // When a tx is marked as dropped, it is still available to allow re-issuance - env.mempool.MarkDropped(txID, "dropped for testing") + env.mempool.MarkDropped(txID, errTestingDropped) require.True(env.mempool.Has(txID)) // still available - _, isDropped = env.mempool.GetDropReason(txID) - require.True(isDropped) + reason = env.mempool.GetDropReason(txID) + require.ErrorIs(reason, errTestingDropped) // A previously dropped tx, popped then re-added to mempool, // is not dropped anymore @@ -89,19 +101,17 @@ func TestPreviouslyDroppedTxsCanBeReAddedToMempool(t *testing.T) { require.NoError(env.mempool.Add(tx)) require.True(env.mempool.Has(txID)) - _, isDropped = env.mempool.GetDropReason(txID) - require.False(isDropped) + reason = env.mempool.GetDropReason(txID) + require.NoError(reason) } func TestNoErrorOnUnexpectedSetPreferenceDuringBootstrapping(t *testing.T) { env := newEnvironment(t) env.ctx.Lock.Lock() - env.isBootstrapped.SetValue(false) + env.isBootstrapped.Set(false) env.ctx.Log = logging.NoWarn{} defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(t, shutdownEnvironment(env)) }() env.Builder.SetPreference(ids.GenerateTestID()) // should not panic @@ -278,8 +288,7 @@ func TestGetNextStakerToReward(t *testing.T) { defer ctrl.Finish() state := tt.stateF(ctrl) - b := builder{} - txID, shouldReward, err := b.getNextStakerToReward(tt.timestamp, state) + txID, shouldReward, err := getNextStakerToReward(tt.timestamp, state) if tt.expectedErr != nil { require.Equal(tt.expectedErr, err) return @@ -290,3 +299,387 @@ func TestGetNextStakerToReward(t *testing.T) { }) } } + +func TestBuildBlock(t *testing.T) { + var ( + parentID = ids.GenerateTestID() + height = uint64(1337) + output = &avax.TransferableOutput{ + Asset: avax.Asset{ID: ids.GenerateTestID()}, + Out: &secp256k1fx.TransferOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, + }, + } + now = time.Now() + parentTimestamp = now.Add(-2 * time.Second) + transactions = []*txs.Tx{{ + Unsigned: &txs.AddValidatorTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + Ins: []*avax.TransferableInput{{ + Asset: avax.Asset{ID: ids.GenerateTestID()}, + In: &secp256k1fx.TransferInput{ + Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + }, + }}, + Outs: []*avax.TransferableOutput{output}, + }}, + Validator: txs.Validator{ + // Shouldn't be dropped + Start: uint64(now.Add(2 * txexecutor.SyncBound).Unix()), + }, + StakeOuts: []*avax.TransferableOutput{output}, + RewardsOwner: &secp256k1fx.OutputOwners{ + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, + }, + Creds: []verify.Verifiable{ + &secp256k1fx.Credential{ + Sigs: [][secp256k1.SignatureLen]byte{{1, 3, 3, 7}}, + }, + }, + }} + stakerTxID = ids.GenerateTestID() + ) + + type test struct { + name string + builderF func(*gomock.Controller) *builder + timestamp time.Time + forceAdvanceTime bool + parentStateF func(*gomock.Controller) state.Chain + expectedBlkF func(*require.Assertions) blocks.Block + expectedErr error + } + + tests := []test{ + { + name: "should reward", + builderF: func(ctrl *gomock.Controller) *builder { + mempool := mempool.NewMockMempool(ctrl) + + // The tx builder should be asked to build a reward tx + txBuilder := txbuilder.NewMockBuilder(ctrl) + txBuilder.EXPECT().NewRewardValidatorTx(stakerTxID).Return(transactions[0], nil) + + return &builder{ + Mempool: mempool, + txBuilder: txBuilder, + } + }, + timestamp: parentTimestamp, + forceAdvanceTime: false, + parentStateF: func(ctrl *gomock.Controller) state.Chain { + s := state.NewMockChain(ctrl) + + // add current validator that ends at [parentTimestamp] + // i.e. it should be rewarded + currentStakerIter := state.NewMockStakerIterator(ctrl) + currentStakerIter.EXPECT().Next().Return(true) + currentStakerIter.EXPECT().Value().Return(&state.Staker{ + TxID: stakerTxID, + Priority: txs.PrimaryNetworkDelegatorCurrentPriority, + EndTime: parentTimestamp, + }) + currentStakerIter.EXPECT().Release() + + s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil) + return s + }, + expectedBlkF: func(require *require.Assertions) blocks.Block { + expectedBlk, err := blocks.NewBanffProposalBlock( + parentTimestamp, + parentID, + height, + transactions[0], + ) + require.NoError(err) + return expectedBlk + }, + expectedErr: nil, + }, + { + name: "has decision txs", + builderF: func(ctrl *gomock.Controller) *builder { + mempool := mempool.NewMockMempool(ctrl) + + // There are txs. + mempool.EXPECT().HasStakerTx().Return(false) + mempool.EXPECT().HasTxs().Return(true) + mempool.EXPECT().PeekTxs(targetBlockSize).Return(transactions) + return &builder{ + Mempool: mempool, + } + }, + timestamp: parentTimestamp, + forceAdvanceTime: false, + parentStateF: func(ctrl *gomock.Controller) state.Chain { + s := state.NewMockChain(ctrl) + + // Handle calls in [getNextStakerToReward] + // and [GetNextStakerChangeTime]. + // Next validator change time is in the future. + currentStakerIter := state.NewMockStakerIterator(ctrl) + gomock.InOrder( + // expect calls from [getNextStakerToReward] + currentStakerIter.EXPECT().Next().Return(true), + currentStakerIter.EXPECT().Value().Return(&state.Staker{ + NextTime: now.Add(time.Second), + Priority: txs.PrimaryNetworkDelegatorCurrentPriority, + }), + currentStakerIter.EXPECT().Release(), + ) + + s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil).Times(1) + return s + }, + expectedBlkF: func(require *require.Assertions) blocks.Block { + expectedBlk, err := blocks.NewBanffStandardBlock( + parentTimestamp, + parentID, + height, + transactions, + ) + require.NoError(err) + return expectedBlk + }, + expectedErr: nil, + }, + { + name: "no stakers tx", + builderF: func(ctrl *gomock.Controller) *builder { + mempool := mempool.NewMockMempool(ctrl) + + // There are no txs. + mempool.EXPECT().HasStakerTx().Return(false) + mempool.EXPECT().HasTxs().Return(false) + + clk := &mockable.Clock{} + clk.Set(now) + return &builder{ + Mempool: mempool, + txExecutorBackend: &txexecutor.Backend{ + Ctx: &snow.Context{ + Log: logging.NoLog{}, + }, + Clk: clk, + }, + } + }, + timestamp: parentTimestamp, + forceAdvanceTime: false, + parentStateF: func(ctrl *gomock.Controller) state.Chain { + s := state.NewMockChain(ctrl) + + // Handle calls in [getNextStakerToReward] + // and [GetNextStakerChangeTime]. + // Next validator change time is in the future. + currentStakerIter := state.NewMockStakerIterator(ctrl) + gomock.InOrder( + // expect calls from [getNextStakerToReward] + currentStakerIter.EXPECT().Next().Return(true), + currentStakerIter.EXPECT().Value().Return(&state.Staker{ + NextTime: now.Add(time.Second), + Priority: txs.PrimaryNetworkDelegatorCurrentPriority, + }), + currentStakerIter.EXPECT().Release(), + ) + + s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil).Times(1) + return s + }, + expectedBlkF: func(*require.Assertions) blocks.Block { + return nil + }, + expectedErr: errNoPendingBlocks, + }, + { + name: "should advance time", + builderF: func(ctrl *gomock.Controller) *builder { + mempool := mempool.NewMockMempool(ctrl) + + // There are no txs. + mempool.EXPECT().HasStakerTx().Return(false) + mempool.EXPECT().HasTxs().Return(false) + mempool.EXPECT().PeekTxs(targetBlockSize).Return(nil) + + clk := &mockable.Clock{} + clk.Set(now) + return &builder{ + Mempool: mempool, + txExecutorBackend: &txexecutor.Backend{ + Clk: clk, + }, + } + }, + timestamp: now.Add(-1 * time.Second), + forceAdvanceTime: true, + parentStateF: func(ctrl *gomock.Controller) state.Chain { + s := state.NewMockChain(ctrl) + + // add current validator that ends at [now] - 1 second. + // That is, it ends in the past but after the current chain time. + // Handle calls in [getNextStakerToReward] + // and [GetNextStakerChangeTime] + // when determining whether to issue a reward tx. + currentStakerIter := state.NewMockStakerIterator(ctrl) + gomock.InOrder( + // expect calls from [getNextStakerToReward] + currentStakerIter.EXPECT().Next().Return(true), + currentStakerIter.EXPECT().Value().Return(&state.Staker{ + NextTime: now.Add(-1 * time.Second), + Priority: txs.PrimaryNetworkDelegatorCurrentPriority, + }), + currentStakerIter.EXPECT().Release(), + ) + + s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil).Times(1) + return s + }, + expectedBlkF: func(require *require.Assertions) blocks.Block { + expectedBlk, err := blocks.NewBanffStandardBlock( + now.Add(-1*time.Second), // note the advanced time + parentID, + height, + nil, // empty block to advance time + ) + require.NoError(err) + return expectedBlk + }, + expectedErr: nil, + }, + { + name: "has a staker tx no force", + builderF: func(ctrl *gomock.Controller) *builder { + mempool := mempool.NewMockMempool(ctrl) + + // There is a tx. + mempool.EXPECT().HasStakerTx().Return(false) + mempool.EXPECT().HasTxs().Return(true) + mempool.EXPECT().PeekTxs(targetBlockSize).Return([]*txs.Tx{transactions[0]}) + + clk := &mockable.Clock{} + clk.Set(now) + return &builder{ + Mempool: mempool, + txExecutorBackend: &txexecutor.Backend{ + Clk: clk, + }, + } + }, + timestamp: parentTimestamp, + forceAdvanceTime: false, + parentStateF: func(ctrl *gomock.Controller) state.Chain { + s := state.NewMockChain(ctrl) + + // Handle calls in [getNextStakerToReward] + // and [GetNextStakerChangeTime]. + // Next validator change time is in the future. + currentStakerIter := state.NewMockStakerIterator(ctrl) + gomock.InOrder( + // expect calls from [getNextStakerToReward] + currentStakerIter.EXPECT().Next().Return(true), + currentStakerIter.EXPECT().Value().Return(&state.Staker{ + NextTime: now.Add(time.Second), + Priority: txs.PrimaryNetworkDelegatorCurrentPriority, + }), + currentStakerIter.EXPECT().Release(), + ) + + s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil).Times(1) + return s + }, + expectedBlkF: func(require *require.Assertions) blocks.Block { + expectedBlk, err := blocks.NewBanffStandardBlock( + parentTimestamp, + parentID, + height, + []*txs.Tx{transactions[0]}, + ) + require.NoError(err) + return expectedBlk + }, + expectedErr: nil, + }, + { + name: "has a staker tx with force", + builderF: func(ctrl *gomock.Controller) *builder { + mempool := mempool.NewMockMempool(ctrl) + + // There are no decision txs + // There is a staker tx. + mempool.EXPECT().HasStakerTx().Return(false) + mempool.EXPECT().HasTxs().Return(true) + mempool.EXPECT().PeekTxs(targetBlockSize).Return([]*txs.Tx{transactions[0]}) + + clk := &mockable.Clock{} + clk.Set(now) + return &builder{ + Mempool: mempool, + txExecutorBackend: &txexecutor.Backend{ + Clk: clk, + }, + } + }, + timestamp: parentTimestamp, + forceAdvanceTime: true, + parentStateF: func(ctrl *gomock.Controller) state.Chain { + s := state.NewMockChain(ctrl) + + // Handle calls in [getNextStakerToReward] + // and [GetNextStakerChangeTime]. + // Next validator change time is in the future. + currentStakerIter := state.NewMockStakerIterator(ctrl) + gomock.InOrder( + // expect calls from [getNextStakerToReward] + currentStakerIter.EXPECT().Next().Return(true), + currentStakerIter.EXPECT().Value().Return(&state.Staker{ + NextTime: now.Add(time.Second), + Priority: txs.PrimaryNetworkDelegatorCurrentPriority, + }), + currentStakerIter.EXPECT().Release(), + ) + + s.EXPECT().GetCurrentStakerIterator().Return(currentStakerIter, nil).Times(1) + return s + }, + expectedBlkF: func(require *require.Assertions) blocks.Block { + expectedBlk, err := blocks.NewBanffStandardBlock( + parentTimestamp, + parentID, + height, + []*txs.Tx{transactions[0]}, + ) + require.NoError(err) + return expectedBlk + }, + expectedErr: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + gotBlk, err := buildBlock( + tt.builderF(ctrl), + parentID, + height, + tt.timestamp, + tt.forceAdvanceTime, + tt.parentStateF(ctrl), + ) + if tt.expectedErr != nil { + require.ErrorIs(err, tt.expectedErr) + return + } + require.NoError(err) + require.EqualValues(tt.expectedBlkF(require), gotBlk) + }) + } +} diff --git a/avalanchego/vms/platformvm/blocks/builder/helpers_test.go b/avalanchego/vms/platformvm/blocks/builder/helpers_test.go index f201ec86..73ac66ed 100644 --- a/avalanchego/vms/platformvm/blocks/builder/helpers_test.go +++ b/avalanchego/vms/platformvm/blocks/builder/helpers_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package builder import ( + "context" "errors" "fmt" "testing" @@ -26,7 +27,7 @@ import ( "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/json" @@ -69,7 +70,7 @@ var ( defaultValidateEndTime = defaultValidateStartTime.Add(10 * defaultMinStakingDuration) defaultMinValidatorStake = 5 * units.MilliAvax defaultBalance = 100 * defaultMinValidatorStake - preFundedKeys = crypto.BuildTestKeys() + preFundedKeys = secp256k1.TestKeys() avaxAssetID = ids.ID{'y', 'e', 'e', 't'} defaultTxFee = uint64(100) xChainID = ids.Empty.Prefix(0) @@ -77,6 +78,9 @@ var ( testSubnet1 *txs.Tx testSubnet1ControlKeys = preFundedKeys[0:3] + + errMissingPrimaryValidators = errors.New("missing primary validator set") + errMissing = errors.New("missing") ) type mutableSharedMemory struct { @@ -89,7 +93,7 @@ type environment struct { mempool mempool.Mempool sender *common.SenderTest - isBootstrapped *utils.AtomicBool + isBootstrapped *utils.Atomic[bool] config *config.Config clk *mockable.Clock baseDB *versiondb.Database @@ -104,26 +108,13 @@ type environment struct { backend txexecutor.Backend } -// TODO snLookup currently duplicated in vm_test.go. Consider removing duplication -type snLookup struct { - chainsToSubnet map[ids.ID]ids.ID -} - -func (sn *snLookup) SubnetID(chainID ids.ID) (ids.ID, error) { - subnetID, ok := sn.chainsToSubnet[chainID] - if !ok { - return ids.ID{}, errors.New("") - } - return subnetID, nil -} - func newEnvironment(t *testing.T) *environment { res := &environment{ - isBootstrapped: &utils.AtomicBool{}, + isBootstrapped: &utils.Atomic[bool]{}, config: defaultConfig(), clk: defaultClock(), } - res.isBootstrapped.SetValue(true) + res.isBootstrapped.Set(true) baseDBManager := manager.NewMemDB(version.Semantic1_0_0) res.baseDB = versiondb.New(baseDBManager.Current().Database) @@ -132,14 +123,14 @@ func newEnvironment(t *testing.T) *environment { res.ctx.Lock.Lock() defer res.ctx.Lock.Unlock() - res.fx = defaultFx(res.clk, res.ctx.Log, res.isBootstrapped.GetValue()) + res.fx = defaultFx(res.clk, res.ctx.Log, res.isBootstrapped.Get()) rewardsCalc := reward.NewCalculator(res.config.RewardConfig) res.state = defaultState(res.config, res.ctx, res.baseDB, rewardsCalc) res.atomicUTXOs = avax.NewAtomicUTXOManager(res.ctx.SharedMemory, txs.Codec) res.uptimes = uptime.NewManager(res.state) - res.utxosHandler = utxo.NewHandler(res.ctx, res.clk, res.state, res.fx) + res.utxosHandler = utxo.NewHandler(res.ctx, res.clk, res.fx) res.txBuilder = txbuilder.New( res.ctx, @@ -173,7 +164,7 @@ func newEnvironment(t *testing.T) *environment { ) res.sender = &common.SenderTest{T: t} - metrics, err := metrics.New("", registerer, res.config.WhitelistedSubnets) + metrics, err := metrics.New("", registerer, res.config.TrackedSubnets) if err != nil { panic(fmt.Errorf("failed to create metrics: %w", err)) } @@ -215,7 +206,7 @@ func addSubnet(env *environment) { preFundedKeys[1].PublicKey().Address(), preFundedKeys[2].PublicKey().Address(), }, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, preFundedKeys[0].PublicKey().Address(), ) if err != nil { @@ -240,7 +231,9 @@ func addSubnet(env *environment) { } stateDiff.AddTx(testSubnet1, status.Committed) - stateDiff.Apply(env.state) + if err := stateDiff.Apply(env.state); err != nil { + panic(err) + } } func defaultState( @@ -258,6 +251,7 @@ func defaultState( ctx, metrics.Noop, rewards, + &utils.Atomic[bool]{}, ) if err != nil { panic(err) @@ -280,6 +274,7 @@ func defaultCtx(db database.Database) (*snow.Context, *mutableSharedMemory) { ctx := snow.DefaultContextTest() ctx.NetworkID = 10 ctx.XChainID = xChainID + ctx.CChainID = cChainID ctx.AVAXAssetID = avaxAssetID atomicDB := prefixdb.New([]byte{1}, db) @@ -290,11 +285,17 @@ func defaultCtx(db database.Database) (*snow.Context, *mutableSharedMemory) { } ctx.SharedMemory = msm - ctx.SNLookup = &snLookup{ - chainsToSubnet: map[ids.ID]ids.ID{ - constants.PlatformChainID: constants.PrimaryNetworkID, - xChainID: constants.PrimaryNetworkID, - cChainID: constants.PrimaryNetworkID, + ctx.ValidatorState = &validators.TestState{ + GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { + subnetID, ok := map[ids.ID]ids.ID{ + constants.PlatformChainID: constants.PrimaryNetworkID, + xChainID: constants.PrimaryNetworkID, + cChainID: constants.PrimaryNetworkID, + }[chainID] + if !ok { + return ids.Empty, errMissing + } + return subnetID, nil }, } @@ -302,10 +303,13 @@ func defaultCtx(db database.Database) (*snow.Context, *mutableSharedMemory) { } func defaultConfig() *config.Config { + vdrs := validators.NewManager() + primaryVdrs := validators.NewSet() + _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) return &config.Config{ - Chains: chains.MockManager{}, + Chains: chains.TestManager, UptimeLockedCalculator: uptime.NewLockedCalculator(), - Validators: validators.NewManager(), + Validators: vdrs, TxFee: defaultTxFee, CreateSubnetTxFee: 100 * defaultTxFee, CreateBlockchainTxFee: 100 * defaultTxFee, @@ -322,11 +326,12 @@ func defaultConfig() *config.Config { }, ApricotPhase3Time: defaultValidateEndTime, ApricotPhase5Time: defaultValidateEndTime, - BanffTime: mockable.MaxTime, + BanffTime: time.Time{}, // neglecting fork ordering this for package tests } } func defaultClock() *mockable.Clock { + // set time after Banff fork (and before default nextStakerTime) clk := mockable.Clock{} clk.Set(defaultGenesisTime) return &clk @@ -338,10 +343,21 @@ type fxVMInt struct { log logging.Logger } -func (fvi *fxVMInt) CodecRegistry() codec.Registry { return fvi.registry } -func (fvi *fxVMInt) Clock() *mockable.Clock { return fvi.clk } -func (fvi *fxVMInt) Logger() logging.Logger { return fvi.log } -func (fvi *fxVMInt) EthVerificationEnabled() bool { return false } +func (fvi *fxVMInt) CodecRegistry() codec.Registry { + return fvi.registry +} + +func (fvi *fxVMInt) Clock() *mockable.Clock { + return fvi.clk +} + +func (fvi *fxVMInt) Logger() logging.Logger { + return fvi.log +} + +func (fvi *fxVMInt) EthVerificationEnabled() bool { + return false +} func defaultFx(clk *mockable.Clock, log logging.Logger, isBootstrapped bool) fx.Fx { fxVMInt := &fxVMInt{ @@ -415,7 +431,7 @@ func buildGenesisTest(ctx *snow.Context) []byte { buildGenesisResponse := api.BuildGenesisReply{} platformvmSS := api.StaticService{} if err := platformvmSS.BuildGenesis(nil, &buildGenesisArgs, &buildGenesisResponse); err != nil { - panic(fmt.Errorf("problem while building platform chain's genesis state: %v", err)) + panic(fmt.Errorf("problem while building platform chain's genesis state: %w", err)) } genesisBytes, err := formatting.Decode(buildGenesisResponse.Encoding, buildGenesisResponse.Bytes) @@ -427,19 +443,19 @@ func buildGenesisTest(ctx *snow.Context) []byte { } func shutdownEnvironment(env *environment) error { - if env.isBootstrapped.GetValue() { - primaryValidatorSet, exist := env.config.Validators.GetValidators(constants.PrimaryNetworkID) + if env.isBootstrapped.Get() { + primaryValidatorSet, exist := env.config.Validators.Get(constants.PrimaryNetworkID) if !exist { - return errors.New("no default subnet validators") + return errMissingPrimaryValidators } primaryValidators := primaryValidatorSet.List() validatorIDs := make([]ids.NodeID, len(primaryValidators)) for i, vdr := range primaryValidators { - validatorIDs[i] = vdr.ID() + validatorIDs[i] = vdr.NodeID } - if err := env.uptimes.Shutdown(validatorIDs); err != nil { + if err := env.uptimes.StopTracking(validatorIDs, constants.PrimaryNetworkID); err != nil { return err } if err := env.state.Commit(); err != nil { diff --git a/avalanchego/vms/platformvm/blocks/builder/network.go b/avalanchego/vms/platformvm/blocks/builder/network.go index cb023e81..e1768a13 100644 --- a/avalanchego/vms/platformvm/blocks/builder/network.go +++ b/avalanchego/vms/platformvm/blocks/builder/network.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // TODO: consider moving the network implementation to a separate package @@ -6,6 +6,7 @@ package builder import ( + "context" "fmt" "time" @@ -15,7 +16,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/vms/platformvm/message" + "github.com/ava-labs/avalanchego/vms/components/message" "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) @@ -25,7 +26,7 @@ const ( recentCacheSize = 512 ) -var _ Network = &network{} +var _ Network = (*network)(nil) type Network interface { common.AppHandler @@ -40,7 +41,7 @@ type network struct { // gossip related attributes appSender common.AppSender - recentTxs *cache.LRU + recentTxs *cache.LRU[ids.ID, struct{}] } func NewNetwork( @@ -52,29 +53,47 @@ func NewNetwork( ctx: ctx, blkBuilder: blkBuilder, appSender: appSender, - recentTxs: &cache.LRU{Size: recentCacheSize}, + recentTxs: &cache.LRU[ids.ID, struct{}]{Size: recentCacheSize}, } } -func (n *network) AppRequestFailed(nodeID ids.NodeID, requestID uint32) error { +func (*network) CrossChainAppRequestFailed(context.Context, ids.ID, uint32) error { // This VM currently only supports gossiping of txs, so there are no // requests. return nil } -func (n *network) AppRequest(nodeID ids.NodeID, requestID uint32, deadline time.Time, msgBytes []byte) error { +func (*network) CrossChainAppRequest(context.Context, ids.ID, uint32, time.Time, []byte) error { // This VM currently only supports gossiping of txs, so there are no // requests. return nil } -func (n *network) AppResponse(nodeID ids.NodeID, requestID uint32, msgBytes []byte) error { +func (*network) CrossChainAppResponse(context.Context, ids.ID, uint32, []byte) error { // This VM currently only supports gossiping of txs, so there are no // requests. return nil } -func (n *network) AppGossip(nodeID ids.NodeID, msgBytes []byte) error { +func (*network) AppRequestFailed(context.Context, ids.NodeID, uint32) error { + // This VM currently only supports gossiping of txs, so there are no + // requests. + return nil +} + +func (*network) AppRequest(context.Context, ids.NodeID, uint32, time.Time, []byte) error { + // This VM currently only supports gossiping of txs, so there are no + // requests. + return nil +} + +func (*network) AppResponse(context.Context, ids.NodeID, uint32, []byte) error { + // This VM currently only supports gossiping of txs, so there are no + // requests. + return nil +} + +func (n *network) AppGossip(_ context.Context, nodeID ids.NodeID, msgBytes []byte) error { n.ctx.Log.Debug("called AppGossip message handler", zap.Stringer("nodeID", nodeID), zap.Int("messageLen", len(msgBytes)), @@ -113,13 +132,13 @@ func (n *network) AppGossip(nodeID ids.NodeID, msgBytes []byte) error { n.ctx.Lock.Lock() defer n.ctx.Lock.Unlock() - if _, dropped := n.blkBuilder.GetDropReason(txID); dropped { + if reason := n.blkBuilder.GetDropReason(txID); reason != nil { // If the tx is being dropped - just ignore it return nil } // add to mempool - if err = n.blkBuilder.AddUnverifiedTx(tx); err != nil { + if err := n.blkBuilder.AddUnverifiedTx(tx); err != nil { n.ctx.Log.Debug("tx failed verification", zap.Stringer("nodeID", nodeID), zap.Error(err), @@ -134,7 +153,7 @@ func (n *network) GossipTx(tx *txs.Tx) error { if _, has := n.recentTxs.Get(txID); has { return nil } - n.recentTxs.Put(txID, nil) + n.recentTxs.Put(txID, struct{}{}) n.ctx.Log.Debug("gossiping tx", zap.Stringer("txID", txID), @@ -145,5 +164,5 @@ func (n *network) GossipTx(tx *txs.Tx) error { if err != nil { return fmt.Errorf("GossipTx: failed to build Tx message: %w", err) } - return n.appSender.SendAppGossip(msgBytes) + return n.appSender.SendAppGossip(context.TODO(), msgBytes) } diff --git a/avalanchego/vms/platformvm/blocks/builder/network_test.go b/avalanchego/vms/platformvm/blocks/builder/network_test.go index 00c6d6df..f29f8061 100644 --- a/avalanchego/vms/platformvm/blocks/builder/network_test.go +++ b/avalanchego/vms/platformvm/blocks/builder/network_test.go @@ -1,17 +1,18 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package builder import ( + "context" "testing" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" - "github.com/ava-labs/avalanchego/vms/platformvm/message" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/vms/components/message" "github.com/ava-labs/avalanchego/vms/platformvm/txs" txbuilder "github.com/ava-labs/avalanchego/vms/platformvm/txs/builder" @@ -24,12 +25,10 @@ func getValidTx(txBuilder txbuilder.Builder, t *testing.T) *txs.Tx { constants.AVMID, nil, "chain name", - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, ) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) return tx } @@ -44,7 +43,7 @@ func TestMempoolValidGossipedTxIsAddedToMempool(t *testing.T) { }() var gossipedBytes []byte - env.sender.SendAppGossipF = func(b []byte) error { + env.sender.SendAppGossipF = func(_ context.Context, b []byte) error { gossipedBytes = b return nil } @@ -61,8 +60,8 @@ func TestMempoolValidGossipedTxIsAddedToMempool(t *testing.T) { // Free lock because [AppGossip] waits for the context lock env.ctx.Lock.Unlock() // show that unknown tx is added to mempool - err = env.AppGossip(nodeID, msgBytes) - require.NoError(err, "error in reception of gossiped tx") + err = env.AppGossip(context.Background(), nodeID, msgBytes) + require.NoError(err) require.True(env.Builder.Has(txID)) // Grab lock back env.ctx.Lock.Lock() @@ -72,13 +71,11 @@ func TestMempoolValidGossipedTxIsAddedToMempool(t *testing.T) { // show gossiped bytes can be decoded to the original tx replyIntf, err := message.Parse(gossipedBytes) - require.NoError(err, "failed to parse gossip") - - reply, ok := replyIntf.(*message.Tx) - require.True(ok, "unknown message type") + require.NoError(err) + reply := replyIntf.(*message.Tx) retrivedTx, err := txs.Parse(txs.Codec, reply.Tx) - require.NoError(err, "failed parsing tx") + require.NoError(err) require.Equal(txID, retrivedTx.ID()) } @@ -96,7 +93,7 @@ func TestMempoolInvalidGossipedTxIsNotAddedToMempool(t *testing.T) { // create a tx and mark as invalid tx := getValidTx(env.txBuilder, t) txID := tx.ID() - env.Builder.MarkDropped(txID, "dropped for testing") + env.Builder.MarkDropped(txID, errTestingDropped) // show that the invalid tx is not requested nodeID := ids.GenerateTestNodeID() @@ -104,9 +101,9 @@ func TestMempoolInvalidGossipedTxIsNotAddedToMempool(t *testing.T) { msgBytes, err := message.Build(&msg) require.NoError(err) env.ctx.Lock.Unlock() - err = env.AppGossip(nodeID, msgBytes) + err = env.AppGossip(context.Background(), nodeID, msgBytes) env.ctx.Lock.Lock() - require.NoError(err, "error in reception of gossiped tx") + require.NoError(err) require.False(env.Builder.Has(txID)) } @@ -121,7 +118,7 @@ func TestMempoolNewLocaTxIsGossiped(t *testing.T) { }() var gossipedBytes []byte - env.sender.SendAppGossipF = func(b []byte) error { + env.sender.SendAppGossipF = func(_ context.Context, b []byte) error { gossipedBytes = b return nil } @@ -131,18 +128,16 @@ func TestMempoolNewLocaTxIsGossiped(t *testing.T) { txID := tx.ID() err := env.Builder.AddUnverifiedTx(tx) - require.NoError(err, "couldn't add tx to mempool") + require.NoError(err) require.True(gossipedBytes != nil) // show gossiped bytes can be decoded to the original tx replyIntf, err := message.Parse(gossipedBytes) - require.NoError(err, "failed to parse gossip") - - reply, ok := replyIntf.(*message.Tx) - require.True(ok, "unknown message type") + require.NoError(err) + reply := replyIntf.(*message.Tx) retrivedTx, err := txs.Parse(txs.Codec, reply.Tx) - require.NoError(err, "failed parsing tx") + require.NoError(err) require.Equal(txID, retrivedTx.ID()) @@ -150,7 +145,7 @@ func TestMempoolNewLocaTxIsGossiped(t *testing.T) { gossipedBytes = nil env.Builder.Remove([]*txs.Tx{tx}) err = env.Builder.Add(tx) - require.NoError(err, "could not reintroduce tx to mempool") + require.NoError(err) require.True(gossipedBytes == nil) } diff --git a/avalanchego/vms/platformvm/blocks/builder/standard_block_test.go b/avalanchego/vms/platformvm/blocks/builder/standard_block_test.go index 140d2192..8fa9c716 100644 --- a/avalanchego/vms/platformvm/blocks/builder/standard_block_test.go +++ b/avalanchego/vms/platformvm/blocks/builder/standard_block_test.go @@ -1,18 +1,18 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package builder import ( + "context" "testing" - "time" "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" @@ -25,9 +25,7 @@ func TestAtomicTxImports(t *testing.T) { env := newEnvironment(t) env.ctx.Lock.Lock() defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() utxoID := avax.UTXOID{ @@ -70,19 +68,17 @@ func TestAtomicTxImports(t *testing.T) { tx, err := env.txBuilder.NewImportTx( env.ctx.XChainID, recipientKey.PublicKey().Address(), - []*crypto.PrivateKeySECP256K1R{recipientKey}, + []*secp256k1.PrivateKey{recipientKey}, ids.ShortEmpty, // change addr ) require.NoError(err) - env.state.SetTimestamp(env.config.ApricotPhase5Time.Add(100 * time.Second)) - require.NoError(env.Builder.Add(tx)) - b, err := env.Builder.BuildBlock() + b, err := env.Builder.BuildBlock(context.Background()) require.NoError(err) // Test multiple verify calls work - require.NoError(b.Verify()) - require.NoError(b.Accept()) + require.NoError(b.Verify(context.Background())) + require.NoError(b.Accept(context.Background())) _, txStatus, err := env.state.GetTx(tx.ID()) require.NoError(err) // Ensure transaction is in the committed state diff --git a/avalanchego/vms/platformvm/blocks/codec.go b/avalanchego/vms/platformvm/blocks/codec.go index cd019c72..ac0f42da 100644 --- a/avalanchego/vms/platformvm/blocks/codec.go +++ b/avalanchego/vms/platformvm/blocks/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package blocks diff --git a/avalanchego/vms/platformvm/blocks/commit_block.go b/avalanchego/vms/platformvm/blocks/commit_block.go index 543af466..5247c45d 100644 --- a/avalanchego/vms/platformvm/blocks/commit_block.go +++ b/avalanchego/vms/platformvm/blocks/commit_block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package blocks @@ -12,8 +12,8 @@ import ( ) var ( - _ BanffBlock = &BanffCommitBlock{} - _ Block = &ApricotCommitBlock{} + _ BanffBlock = (*BanffCommitBlock)(nil) + _ Block = (*ApricotCommitBlock)(nil) ) type BanffCommitBlock struct { @@ -21,8 +21,13 @@ type BanffCommitBlock struct { ApricotCommitBlock `serialize:"true"` } -func (b *BanffCommitBlock) Timestamp() time.Time { return time.Unix(int64(b.Time), 0) } -func (b *BanffCommitBlock) Visit(v Visitor) error { return v.BanffCommitBlock(b) } +func (b *BanffCommitBlock) Timestamp() time.Time { + return time.Unix(int64(b.Time), 0) +} + +func (b *BanffCommitBlock) Visit(v Visitor) error { + return v.BanffCommitBlock(b) +} func NewBanffCommitBlock( timestamp time.Time, @@ -50,10 +55,15 @@ func (b *ApricotCommitBlock) initialize(bytes []byte) error { return nil } -func (*ApricotCommitBlock) InitCtx(ctx *snow.Context) {} +func (*ApricotCommitBlock) InitCtx(*snow.Context) {} -func (*ApricotCommitBlock) Txs() []*txs.Tx { return nil } -func (b *ApricotCommitBlock) Visit(v Visitor) error { return v.ApricotCommitBlock(b) } +func (*ApricotCommitBlock) Txs() []*txs.Tx { + return nil +} + +func (b *ApricotCommitBlock) Visit(v Visitor) error { + return v.ApricotCommitBlock(b) +} func NewApricotCommitBlock( parentID ids.ID, diff --git a/avalanchego/vms/platformvm/blocks/commit_block_test.go b/avalanchego/vms/platformvm/blocks/commit_block_test.go index 6a3e428b..24023c40 100644 --- a/avalanchego/vms/platformvm/blocks/commit_block_test.go +++ b/avalanchego/vms/platformvm/blocks/commit_block_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package blocks @@ -26,7 +26,7 @@ func TestNewBanffCommitBlock(t *testing.T) { require.NoError(err) // Make sure the block is initialized - require.NotNil(blk.Bytes()) + require.NotEmpty(blk.Bytes()) require.Equal(timestamp, blk.Timestamp()) require.Equal(parentID, blk.Parent()) @@ -45,7 +45,7 @@ func TestNewApricotCommitBlock(t *testing.T) { require.NoError(err) // Make sure the block is initialized - require.NotNil(blk.Bytes()) + require.NotEmpty(blk.Bytes()) require.Equal(parentID, blk.Parent()) require.Equal(height, blk.Height()) diff --git a/avalanchego/vms/platformvm/blocks/common_block.go b/avalanchego/vms/platformvm/blocks/common_block.go index 53e52d26..1a48e6c7 100644 --- a/avalanchego/vms/platformvm/blocks/common_block.go +++ b/avalanchego/vms/platformvm/blocks/common_block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package blocks @@ -25,7 +25,18 @@ func (b *CommonBlock) initialize(bytes []byte) { b.bytes = bytes } -func (b *CommonBlock) ID() ids.ID { return b.id } -func (b *CommonBlock) Parent() ids.ID { return b.PrntID } -func (b *CommonBlock) Bytes() []byte { return b.bytes } -func (b *CommonBlock) Height() uint64 { return b.Hght } +func (b *CommonBlock) ID() ids.ID { + return b.id +} + +func (b *CommonBlock) Parent() ids.ID { + return b.PrntID +} + +func (b *CommonBlock) Bytes() []byte { + return b.bytes +} + +func (b *CommonBlock) Height() uint64 { + return b.Hght +} diff --git a/avalanchego/vms/platformvm/blocks/executor/acceptor.go b/avalanchego/vms/platformvm/blocks/executor/acceptor.go index 65a82954..5aaa1d82 100644 --- a/avalanchego/vms/platformvm/blocks/executor/acceptor.go +++ b/avalanchego/vms/platformvm/blocks/executor/acceptor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -17,7 +17,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/state" ) -var _ blocks.Visitor = &acceptor{} +var _ blocks.Visitor = (*acceptor)(nil) // acceptor handles the logic for accepting a block. // All errors returned by this struct are fatal and should result in the chain @@ -26,7 +26,7 @@ type acceptor struct { *backend metrics metrics.Metrics recentlyAccepted window.Window[ids.ID] - bootstrapped *utils.AtomicBool + bootstrapped *utils.Atomic[bool] } func (a *acceptor) BanffAbortBlock(b *blocks.BanffAbortBlock) error { @@ -149,7 +149,9 @@ func (a *acceptor) ApricotAtomicBlock(b *blocks.ApricotAtomicBlock) error { } // Update the state to reflect the changes made in [onAcceptState]. - blkState.onAcceptState.Apply(a.state) + if err := blkState.onAcceptState.Apply(a.state); err != nil { + return err + } defer a.state.Abort() batch, err := a.state.CommitBatch() @@ -180,7 +182,7 @@ func (a *acceptor) abortBlock(b blocks.Block) error { return fmt.Errorf("%w: %s", state.ErrMissingParentState, parentID) } - if a.bootstrapped.GetValue() { + if a.bootstrapped.Get() { if parentState.initiallyPreferCommit { a.metrics.MarkOptionVoteLost() } else { @@ -198,7 +200,7 @@ func (a *acceptor) commitBlock(b blocks.Block) error { return fmt.Errorf("%w: %s", state.ErrMissingParentState, parentID) } - if a.bootstrapped.GetValue() { + if a.bootstrapped.Get() { if parentState.initiallyPreferCommit { a.metrics.MarkOptionVoteWon() } else { @@ -233,7 +235,9 @@ func (a *acceptor) optionBlock(b, parent blocks.Block) error { if !ok { return fmt.Errorf("couldn't find state of block %s", blkID) } - blkState.onAcceptState.Apply(a.state) + if err := blkState.onAcceptState.Apply(a.state); err != nil { + return err + } return a.state.Commit() } @@ -271,7 +275,9 @@ func (a *acceptor) standardBlock(b blocks.Block) error { } // Update the state to reflect the changes made in [onAcceptState]. - blkState.onAcceptState.Apply(a.state) + if err := blkState.onAcceptState.Apply(a.state); err != nil { + return err + } defer a.state.Abort() batch, err := a.state.CommitBatch() diff --git a/avalanchego/vms/platformvm/blocks/executor/acceptor_test.go b/avalanchego/vms/platformvm/blocks/executor/acceptor_test.go index fecfefd2..583e3ee6 100644 --- a/avalanchego/vms/platformvm/blocks/executor/acceptor_test.go +++ b/avalanchego/vms/platformvm/blocks/executor/acceptor_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -172,6 +172,7 @@ func TestAcceptorVisitStandardBlock(t *testing.T) { sharedMemory := atomic.NewMockSharedMemory(ctrl) parentID := ids.GenerateTestID() + clk := &mockable.Clock{} acceptor := &acceptor{ backend: &backend{ lastAccepted: parentID, @@ -184,13 +185,14 @@ func TestAcceptorVisitStandardBlock(t *testing.T) { }, metrics: metrics.Noop, recentlyAccepted: window.New[ids.ID](window.Config{ - Clock: &mockable.Clock{}, + Clock: clk, MaxSize: 1, TTL: time.Hour, }), } - blk, err := blocks.NewApricotStandardBlock( + blk, err := blocks.NewBanffStandardBlock( + clk.Time(), parentID, 1, []*txs.Tx{ @@ -211,7 +213,7 @@ func TestAcceptorVisitStandardBlock(t *testing.T) { s.EXPECT().SetHeight(blk.Height()).Times(1) s.EXPECT().AddStatelessBlock(blk, choices.Accepted).Times(1) - err = acceptor.ApricotStandardBlock(blk) + err = acceptor.BanffStandardBlock(blk) require.Error(err, "should fail because the block isn't in the state map") // Set [blk]'s state in the map as though it had been verified. @@ -223,7 +225,9 @@ func TestAcceptorVisitStandardBlock(t *testing.T) { onAcceptState: onAcceptState, atomicRequests: atomicRequests, standardBlockState: standardBlockState{ - onAcceptFunc: func() { calledOnAcceptFunc = true }, + onAcceptFunc: func() { + calledOnAcceptFunc = true + }, }, } // Give [blk] a child. @@ -249,7 +253,7 @@ func TestAcceptorVisitStandardBlock(t *testing.T) { onAcceptState.EXPECT().Apply(s).Times(1) sharedMemory.EXPECT().Apply(atomicRequests, batch).Return(nil).Times(1) - err = acceptor.ApricotStandardBlock(blk) + err = acceptor.BanffStandardBlock(blk) require.NoError(err) require.True(calledOnAcceptFunc) require.Equal(blk.ID(), acceptor.backend.lastAccepted) @@ -280,7 +284,7 @@ func TestAcceptorVisitCommitBlock(t *testing.T) { MaxSize: 1, TTL: time.Hour, }), - bootstrapped: &utils.AtomicBool{}, + bootstrapped: &utils.Atomic[bool]{}, } blk, err := blocks.NewApricotCommitBlock(parentID, 1 /*height*/) @@ -370,7 +374,7 @@ func TestAcceptorVisitAbortBlock(t *testing.T) { MaxSize: 1, TTL: time.Hour, }), - bootstrapped: &utils.AtomicBool{}, + bootstrapped: &utils.Atomic[bool]{}, } blk, err := blocks.NewApricotAbortBlock(parentID, 1 /*height*/) diff --git a/avalanchego/vms/platformvm/blocks/executor/backend.go b/avalanchego/vms/platformvm/blocks/executor/backend.go index 5402da18..56619f3e 100644 --- a/avalanchego/vms/platformvm/blocks/executor/backend.go +++ b/avalanchego/vms/platformvm/blocks/executor/backend.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor diff --git a/avalanchego/vms/platformvm/blocks/executor/backend_test.go b/avalanchego/vms/platformvm/blocks/executor/backend_test.go index 0d7ce90d..63d30873 100644 --- a/avalanchego/vms/platformvm/blocks/executor/backend_test.go +++ b/avalanchego/vms/platformvm/blocks/executor/backend_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -91,7 +91,7 @@ func TestBackendGetBlock(t *testing.T) { { // Case: block is in the map. gotBlk, err := b.GetBlock(blkID1) - require.Nil(err) + require.NoError(err) require.Equal(statelessBlk, gotBlk) } @@ -150,13 +150,12 @@ func TestGetTimestamp(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require := require.New(t) ctrl := gomock.NewController(t) defer ctrl.Finish() backend := tt.backendF(ctrl) gotTimestamp := backend.getTimestamp(blkID) - require.Equal(tt.expectedTimestamp, gotTimestamp) + require.Equal(t, tt.expectedTimestamp, gotTimestamp) }) } } diff --git a/avalanchego/vms/platformvm/blocks/executor/block.go b/avalanchego/vms/platformvm/blocks/executor/block.go index b87131ca..ce6d1f06 100644 --- a/avalanchego/vms/platformvm/blocks/executor/block.go +++ b/avalanchego/vms/platformvm/blocks/executor/block.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor import ( + "context" "fmt" "time" @@ -16,8 +17,8 @@ import ( ) var ( - _ snowman.Block = &Block{} - _ snowman.OracleBlock = &Block{} + _ snowman.Block = (*Block)(nil) + _ snowman.OracleBlock = (*Block)(nil) ) // Exported for testing in platformvm package. @@ -26,7 +27,7 @@ type Block struct { manager *manager } -func (b *Block) Verify() error { +func (b *Block) Verify(context.Context) error { blkID := b.ID() if _, ok := b.manager.blkIDToState[blkID]; ok { // This block has already been verified. @@ -36,11 +37,11 @@ func (b *Block) Verify() error { return b.Visit(b.manager.verifier) } -func (b *Block) Accept() error { +func (b *Block) Accept(context.Context) error { return b.Visit(b.manager.acceptor) } -func (b *Block) Reject() error { +func (b *Block) Reject(context.Context) error { return b.Visit(b.manager.rejector) } @@ -81,7 +82,7 @@ func (b *Block) Timestamp() time.Time { return b.manager.getTimestamp(b.ID()) } -func (b *Block) Options() ([2]snowman.Block, error) { +func (b *Block) Options(context.Context) ([2]snowman.Block, error) { options := options{} if err := b.Block.Visit(&options); err != nil { return [2]snowman.Block{}, err diff --git a/avalanchego/vms/platformvm/blocks/executor/block_state.go b/avalanchego/vms/platformvm/blocks/executor/block_state.go index 074a1088..ced2560b 100644 --- a/avalanchego/vms/platformvm/blocks/executor/block_state.go +++ b/avalanchego/vms/platformvm/blocks/executor/block_state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -8,13 +8,14 @@ import ( "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/platformvm/blocks" "github.com/ava-labs/avalanchego/vms/platformvm/state" ) type standardBlockState struct { onAcceptFunc func() - inputs ids.Set + inputs set.Set[ids.ID] } type proposalBlockState struct { diff --git a/avalanchego/vms/platformvm/blocks/executor/block_test.go b/avalanchego/vms/platformvm/blocks/executor/block_test.go index 25a32179..d27e7e3d 100644 --- a/avalanchego/vms/platformvm/blocks/executor/block_test.go +++ b/avalanchego/vms/platformvm/blocks/executor/block_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor import ( + "context" "testing" "github.com/golang/mock/gomock" @@ -115,12 +116,11 @@ func TestStatus(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require := require.New(t) ctrl := gomock.NewController(t) defer ctrl.Finish() blk := tt.blockF(ctrl) - require.Equal(tt.expectedStatus, blk.Status()) + require.Equal(t, tt.expectedStatus, blk.Status()) }) } } @@ -245,7 +245,7 @@ func TestBlockOptions(t *testing.T) { defer ctrl.Finish() blk := tt.blkF() - options, err := blk.Options() + options, err := blk.Options(context.Background()) if tt.expectedErr != nil { require.ErrorIs(err, tt.expectedErr) return diff --git a/avalanchego/vms/platformvm/blocks/executor/helpers_test.go b/avalanchego/vms/platformvm/blocks/executor/helpers_test.go index e4c5b38e..b6497760 100644 --- a/avalanchego/vms/platformvm/blocks/executor/helpers_test.go +++ b/avalanchego/vms/platformvm/blocks/executor/helpers_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor import ( + "context" "errors" "fmt" "testing" @@ -27,7 +28,7 @@ import ( "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/json" @@ -66,7 +67,7 @@ const ( ) var ( - _ mempool.BlockTimer = &environment{} + _ mempool.BlockTimer = (*environment)(nil) defaultMinStakingDuration = 24 * time.Hour defaultMaxStakingDuration = 365 * 24 * time.Hour @@ -75,7 +76,7 @@ var ( defaultValidateEndTime = defaultValidateStartTime.Add(10 * defaultMinStakingDuration) defaultMinValidatorStake = 5 * units.MilliAvax defaultBalance = 100 * defaultMinValidatorStake - preFundedKeys = crypto.BuildTestKeys() + preFundedKeys = secp256k1.TestKeys() avaxAssetID = ids.ID{'y', 'e', 'e', 't'} defaultTxFee = uint64(100) xChainID = ids.Empty.Prefix(0) @@ -83,6 +84,9 @@ var ( genesisBlkID ids.ID testSubnet1 *txs.Tx + + errMissingPrimaryValidators = errors.New("missing primary validator set") + errMissing = errors.New("missing") ) type stakerStatus uint @@ -107,7 +111,7 @@ type environment struct { mempool mempool.Mempool sender *common.SenderTest - isBootstrapped *utils.AtomicBool + isBootstrapped *utils.Atomic[bool] config *config.Config clk *mockable.Clock baseDB *versiondb.Database @@ -122,35 +126,22 @@ type environment struct { backend *executor.Backend } -func (t *environment) ResetBlockTimer() { +func (*environment) ResetBlockTimer() { // dummy call, do nothing for now } -// TODO snLookup currently duplicated in vm_test.go. Consider removing duplication -type snLookup struct { - chainsToSubnet map[ids.ID]ids.ID -} - -func (sn *snLookup) SubnetID(chainID ids.ID) (ids.ID, error) { - subnetID, ok := sn.chainsToSubnet[chainID] - if !ok { - return ids.ID{}, errors.New("") - } - return subnetID, nil -} - func newEnvironment(t *testing.T, ctrl *gomock.Controller) *environment { res := &environment{ - isBootstrapped: &utils.AtomicBool{}, + isBootstrapped: &utils.Atomic[bool]{}, config: defaultConfig(), clk: defaultClock(), } - res.isBootstrapped.SetValue(true) + res.isBootstrapped.Set(true) baseDBManager := db_manager.NewMemDB(version.Semantic1_0_0) res.baseDB = versiondb.New(baseDBManager.Current().Database) res.ctx = defaultCtx(res.baseDB) - res.fx = defaultFx(res.clk, res.ctx.Log, res.isBootstrapped.GetValue()) + res.fx = defaultFx(res.clk, res.ctx.Log, res.isBootstrapped.Get()) rewardsCalc := reward.NewCalculator(res.config.RewardConfig) res.atomicUTXOs = avax.NewAtomicUTXOManager(res.ctx.SharedMemory, txs.Codec) @@ -158,7 +149,7 @@ func newEnvironment(t *testing.T, ctrl *gomock.Controller) *environment { if ctrl == nil { res.state = defaultState(res.config, res.ctx, res.baseDB, rewardsCalc) res.uptimes = uptime.NewManager(res.state) - res.utxosHandler = utxo.NewHandler(res.ctx, res.clk, res.state, res.fx) + res.utxosHandler = utxo.NewHandler(res.ctx, res.clk, res.fx) res.txBuilder = p_tx_builder.New( res.ctx, res.config, @@ -172,7 +163,7 @@ func newEnvironment(t *testing.T, ctrl *gomock.Controller) *environment { genesisBlkID = ids.GenerateTestID() res.mockedState = state.NewMockState(ctrl) res.uptimes = uptime.NewManager(res.mockedState) - res.utxosHandler = utxo.NewHandler(res.ctx, res.clk, res.mockedState, res.fx) + res.utxosHandler = utxo.NewHandler(res.ctx, res.clk, res.fx) res.txBuilder = p_tx_builder.New( res.ctx, res.config, @@ -250,7 +241,7 @@ func addSubnet(env *environment) { preFundedKeys[1].PublicKey().Address(), preFundedKeys[2].PublicKey().Address(), }, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, preFundedKeys[0].PublicKey().Address(), ) if err != nil { @@ -275,7 +266,9 @@ func addSubnet(env *environment) { } stateDiff.AddTx(testSubnet1, status.Committed) - stateDiff.Apply(env.state) + if err := stateDiff.Apply(env.state); err != nil { + panic(err) + } } func defaultState( @@ -293,6 +286,7 @@ func defaultState( ctx, metrics.Noop, rewards, + &utils.Atomic[bool]{}, ) if err != nil { panic(err) @@ -315,6 +309,7 @@ func defaultCtx(db database.Database) *snow.Context { ctx := snow.DefaultContextTest() ctx.NetworkID = 10 ctx.XChainID = xChainID + ctx.CChainID = cChainID ctx.AVAXAssetID = avaxAssetID atomicDB := prefixdb.New([]byte{1}, db) @@ -322,11 +317,17 @@ func defaultCtx(db database.Database) *snow.Context { ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) - ctx.SNLookup = &snLookup{ - chainsToSubnet: map[ids.ID]ids.ID{ - constants.PlatformChainID: constants.PrimaryNetworkID, - xChainID: constants.PrimaryNetworkID, - cChainID: constants.PrimaryNetworkID, + ctx.ValidatorState = &validators.TestState{ + GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { + subnetID, ok := map[ids.ID]ids.ID{ + constants.PlatformChainID: constants.PrimaryNetworkID, + xChainID: constants.PrimaryNetworkID, + cChainID: constants.PrimaryNetworkID, + }[chainID] + if !ok { + return ids.Empty, errMissing + } + return subnetID, nil }, } @@ -334,10 +335,13 @@ func defaultCtx(db database.Database) *snow.Context { } func defaultConfig() *config.Config { + vdrs := validators.NewManager() + primaryVdrs := validators.NewSet() + _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) return &config.Config{ - Chains: chains.MockManager{}, + Chains: chains.TestManager, UptimeLockedCalculator: uptime.NewLockedCalculator(), - Validators: validators.NewManager(), + Validators: vdrs, TxFee: defaultTxFee, CreateSubnetTxFee: 100 * defaultTxFee, CreateBlockchainTxFee: 100 * defaultTxFee, @@ -370,10 +374,21 @@ type fxVMInt struct { log logging.Logger } -func (fvi *fxVMInt) CodecRegistry() codec.Registry { return fvi.registry } -func (fvi *fxVMInt) Clock() *mockable.Clock { return fvi.clk } -func (fvi *fxVMInt) Logger() logging.Logger { return fvi.log } -func (fvi *fxVMInt) EthVerificationEnabled() bool { return false } +func (fvi *fxVMInt) CodecRegistry() codec.Registry { + return fvi.registry +} + +func (fvi *fxVMInt) Clock() *mockable.Clock { + return fvi.clk +} + +func (fvi *fxVMInt) Logger() logging.Logger { + return fvi.log +} + +func (fvi *fxVMInt) EthVerificationEnabled() bool { + return false +} func defaultFx(clk *mockable.Clock, log logging.Logger, isBootstrapped bool) fx.Fx { fxVMInt := &fxVMInt{ @@ -447,7 +462,7 @@ func buildGenesisTest(ctx *snow.Context) []byte { buildGenesisResponse := api.BuildGenesisReply{} platformvmSS := api.StaticService{} if err := platformvmSS.BuildGenesis(nil, &buildGenesisArgs, &buildGenesisResponse); err != nil { - panic(fmt.Errorf("problem while building platform chain's genesis state: %v", err)) + panic(fmt.Errorf("problem while building platform chain's genesis state: %w", err)) } genesisBytes, err := formatting.Decode(buildGenesisResponse.Encoding, buildGenesisResponse.Bytes) @@ -464,19 +479,19 @@ func shutdownEnvironment(t *environment) error { return nil } - if t.isBootstrapped.GetValue() { - primaryValidatorSet, exist := t.config.Validators.GetValidators(constants.PrimaryNetworkID) + if t.isBootstrapped.Get() { + primaryValidatorSet, exist := t.config.Validators.Get(constants.PrimaryNetworkID) if !exist { - return errors.New("no default subnet validators") + return errMissingPrimaryValidators } primaryValidators := primaryValidatorSet.List() validatorIDs := make([]ids.NodeID, len(primaryValidators)) for i, vdr := range primaryValidators { - validatorIDs[i] = vdr.ID() + validatorIDs[i] = vdr.NodeID } - if err := t.uptimes.Shutdown(validatorIDs); err != nil { + if err := t.uptimes.StopTracking(validatorIDs, constants.PrimaryNetworkID); err != nil { return err } if err := t.state.Commit(); err != nil { @@ -498,7 +513,7 @@ func addPendingValidator( endTime time.Time, nodeID ids.NodeID, rewardAddress ids.ShortID, - keys []*crypto.PrivateKeySECP256K1R, + keys []*secp256k1.PrivateKey, ) (*txs.Tx, error) { addPendingValidatorTx, err := env.txBuilder.NewAddValidatorTx( env.config.MinValidatorStake, @@ -514,10 +529,13 @@ func addPendingValidator( return nil, err } - staker := state.NewPendingStaker( + staker, err := state.NewPendingStaker( addPendingValidatorTx.ID(), addPendingValidatorTx.Unsigned.(*txs.AddValidatorTx), ) + if err != nil { + return nil, err + } env.state.PutPendingValidator(staker) env.state.AddTx(addPendingValidatorTx, status.Committed) diff --git a/avalanchego/vms/platformvm/blocks/executor/manager.go b/avalanchego/vms/platformvm/blocks/executor/manager.go index c1802763..b552bbc1 100644 --- a/avalanchego/vms/platformvm/blocks/executor/manager.go +++ b/avalanchego/vms/platformvm/blocks/executor/manager.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -14,7 +14,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" ) -var _ Manager = &manager{} +var _ Manager = (*manager)(nil) type Manager interface { state.Versions diff --git a/avalanchego/vms/platformvm/blocks/executor/manager_test.go b/avalanchego/vms/platformvm/blocks/executor/manager_test.go index e2345bc3..fb15dd3c 100644 --- a/avalanchego/vms/platformvm/blocks/executor/manager_test.go +++ b/avalanchego/vms/platformvm/blocks/executor/manager_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -65,8 +65,6 @@ func TestGetBlock(t *testing.T) { } func TestManagerLastAccepted(t *testing.T) { - require := require.New(t) - lastAcceptedID := ids.GenerateTestID() manager := &manager{ backend: &backend{ @@ -74,5 +72,5 @@ func TestManagerLastAccepted(t *testing.T) { }, } - require.Equal(lastAcceptedID, manager.LastAccepted()) + require.Equal(t, lastAcceptedID, manager.LastAccepted()) } diff --git a/avalanchego/vms/platformvm/blocks/executor/mock_manager.go b/avalanchego/vms/platformvm/blocks/executor/mock_manager.go new file mode 100644 index 00000000..64be8c70 --- /dev/null +++ b/avalanchego/vms/platformvm/blocks/executor/mock_manager.go @@ -0,0 +1,114 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/vms/platformvm/blocks/executor (interfaces: Manager) + +// Package executor is a generated GoMock package. +package executor + +import ( + reflect "reflect" + + ids "github.com/ava-labs/avalanchego/ids" + snowman "github.com/ava-labs/avalanchego/snow/consensus/snowman" + blocks "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + state "github.com/ava-labs/avalanchego/vms/platformvm/state" + gomock "github.com/golang/mock/gomock" +) + +// MockManager is a mock of Manager interface. +type MockManager struct { + ctrl *gomock.Controller + recorder *MockManagerMockRecorder +} + +// MockManagerMockRecorder is the mock recorder for MockManager. +type MockManagerMockRecorder struct { + mock *MockManager +} + +// NewMockManager creates a new mock instance. +func NewMockManager(ctrl *gomock.Controller) *MockManager { + mock := &MockManager{ctrl: ctrl} + mock.recorder = &MockManagerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockManager) EXPECT() *MockManagerMockRecorder { + return m.recorder +} + +// GetBlock mocks base method. +func (m *MockManager) GetBlock(arg0 ids.ID) (snowman.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBlock", arg0) + ret0, _ := ret[0].(snowman.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetBlock indicates an expected call of GetBlock. +func (mr *MockManagerMockRecorder) GetBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBlock", reflect.TypeOf((*MockManager)(nil).GetBlock), arg0) +} + +// GetState mocks base method. +func (m *MockManager) GetState(arg0 ids.ID) (state.Chain, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetState", arg0) + ret0, _ := ret[0].(state.Chain) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetState indicates an expected call of GetState. +func (mr *MockManagerMockRecorder) GetState(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetState", reflect.TypeOf((*MockManager)(nil).GetState), arg0) +} + +// GetStatelessBlock mocks base method. +func (m *MockManager) GetStatelessBlock(arg0 ids.ID) (blocks.Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetStatelessBlock", arg0) + ret0, _ := ret[0].(blocks.Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetStatelessBlock indicates an expected call of GetStatelessBlock. +func (mr *MockManagerMockRecorder) GetStatelessBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStatelessBlock", reflect.TypeOf((*MockManager)(nil).GetStatelessBlock), arg0) +} + +// LastAccepted mocks base method. +func (m *MockManager) LastAccepted() ids.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LastAccepted") + ret0, _ := ret[0].(ids.ID) + return ret0 +} + +// LastAccepted indicates an expected call of LastAccepted. +func (mr *MockManagerMockRecorder) LastAccepted() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LastAccepted", reflect.TypeOf((*MockManager)(nil).LastAccepted)) +} + +// NewBlock mocks base method. +func (m *MockManager) NewBlock(arg0 blocks.Block) snowman.Block { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NewBlock", arg0) + ret0, _ := ret[0].(snowman.Block) + return ret0 +} + +// NewBlock indicates an expected call of NewBlock. +func (mr *MockManagerMockRecorder) NewBlock(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NewBlock", reflect.TypeOf((*MockManager)(nil).NewBlock), arg0) +} diff --git a/avalanchego/vms/platformvm/blocks/executor/options.go b/avalanchego/vms/platformvm/blocks/executor/options.go index e455a7b7..8c0bf3af 100644 --- a/avalanchego/vms/platformvm/blocks/executor/options.go +++ b/avalanchego/vms/platformvm/blocks/executor/options.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -10,7 +10,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/blocks" ) -var _ blocks.Visitor = &verifier{} +var _ blocks.Visitor = (*verifier)(nil) // options supports build new option blocks type options struct { diff --git a/avalanchego/vms/platformvm/blocks/executor/options_test.go b/avalanchego/vms/platformvm/blocks/executor/options_test.go index 0313e89b..66a7b382 100644 --- a/avalanchego/vms/platformvm/blocks/executor/options_test.go +++ b/avalanchego/vms/platformvm/blocks/executor/options_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor diff --git a/avalanchego/vms/platformvm/blocks/executor/proposal_block_test.go b/avalanchego/vms/platformvm/blocks/executor/proposal_block_test.go index 9e80e8f2..3d1569ad 100644 --- a/avalanchego/vms/platformvm/blocks/executor/proposal_block_test.go +++ b/avalanchego/vms/platformvm/blocks/executor/proposal_block_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor import ( + "context" "fmt" "testing" "time" @@ -16,8 +17,9 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/blocks" "github.com/ava-labs/avalanchego/vms/platformvm/reward" @@ -25,7 +27,6 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -36,9 +37,7 @@ func TestApricotProposalBlockTimeVerification(t *testing.T) { env := newEnvironment(t, ctrl) defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() // create apricotParentBlk. It's a standard one for simplicity @@ -66,7 +65,7 @@ func TestApricotProposalBlockTimeVerification(t *testing.T) { // create a proposal transaction to be included into proposal block utx := &txs.AddValidatorTx{ BaseTx: txs.BaseTx{}, - Validator: validator.Validator{End: uint64(chainTime.Unix())}, + Validator: txs.Validator{End: uint64(chainTime.Unix())}, StakeOuts: []*avax.TransferableOutput{ { Asset: avax.Asset{ @@ -81,7 +80,7 @@ func TestApricotProposalBlockTimeVerification(t *testing.T) { DelegationShares: uint32(defaultTxFee), } addValTx := &txs.Tx{Unsigned: utx} - require.NoError(addValTx.Sign(txs.Codec, nil)) + require.NoError(addValTx.Initialize(txs.Codec)) blkTx := &txs.Tx{ Unsigned: &txs.RewardValidatorTx{ TxID: addValTx.ID(), @@ -111,8 +110,9 @@ func TestApricotProposalBlockTimeVerification(t *testing.T) { }, nil) onParentAccept.EXPECT().GetTx(addValTx.ID()).Return(addValTx, status.Committed, nil) onParentAccept.EXPECT().GetCurrentSupply(constants.PrimaryNetworkID).Return(uint64(1000), nil).AnyTimes() + onParentAccept.EXPECT().GetDelegateeReward(constants.PrimaryNetworkID, utx.NodeID()).Return(uint64(0), nil).AnyTimes() - env.mockedState.EXPECT().GetUptime(gomock.Any()).Return( + env.mockedState.EXPECT().GetUptime(gomock.Any(), constants.PrimaryNetworkID).Return( time.Duration(1000), /*upDuration*/ time.Time{}, /*lastUpdated*/ nil, /*err*/ @@ -127,7 +127,7 @@ func TestApricotProposalBlockTimeVerification(t *testing.T) { require.NoError(err) block := env.blkManager.NewBlock(statelessProposalBlock) - require.Error(block.Verify()) + require.Error(block.Verify(context.Background())) // valid statelessProposalBlock, err = blocks.NewApricotProposalBlock( @@ -138,7 +138,7 @@ func TestApricotProposalBlockTimeVerification(t *testing.T) { require.NoError(err) block = env.blkManager.NewBlock(statelessProposalBlock) - require.NoError(block.Verify()) + require.NoError(block.Verify(context.Background())) } func TestBanffProposalBlockTimeVerification(t *testing.T) { @@ -148,9 +148,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { env := newEnvironment(t, ctrl) defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() env.clk.Set(defaultGenesisTime) env.config.BanffTime = time.Time{} // activate Banff @@ -194,7 +192,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { nextStakerTime := chainTime.Add(executor.SyncBound).Add(-1 * time.Second) unsignedNextStakerTx := &txs.AddValidatorTx{ BaseTx: txs.BaseTx{}, - Validator: validator.Validator{End: uint64(nextStakerTime.Unix())}, + Validator: txs.Validator{End: uint64(nextStakerTime.Unix())}, StakeOuts: []*avax.TransferableOutput{ { Asset: avax.Asset{ @@ -209,7 +207,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { DelegationShares: uint32(defaultTxFee), } nextStakerTx := &txs.Tx{Unsigned: unsignedNextStakerTx} - require.NoError(nextStakerTx.Sign(txs.Codec, nil)) + require.NoError(nextStakerTx.Initialize(txs.Codec)) nextStakerTxID := nextStakerTx.ID() onParentAccept.EXPECT().GetCurrentValidator(unsignedNextStakerTx.SubnetID(), unsignedNextStakerTx.NodeID()).Return(&state.Staker{ @@ -232,12 +230,14 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { currentStakersIt.EXPECT().Release().AnyTimes() onParentAccept.EXPECT().GetCurrentStakerIterator().Return(currentStakersIt, nil).AnyTimes() + onParentAccept.EXPECT().GetDelegateeReward(constants.PrimaryNetworkID, unsignedNextStakerTx.NodeID()).Return(uint64(0), nil).AnyTimes() + pendingStakersIt := state.NewMockStakerIterator(ctrl) pendingStakersIt.EXPECT().Next().Return(false).AnyTimes() // no pending stakers pendingStakersIt.EXPECT().Release().AnyTimes() onParentAccept.EXPECT().GetPendingStakerIterator().Return(pendingStakersIt, nil).AnyTimes() - env.mockedState.EXPECT().GetUptime(gomock.Any()).Return( + env.mockedState.EXPECT().GetUptime(gomock.Any(), gomock.Any()).Return( time.Duration(1000), /*upDuration*/ time.Time{}, /*lastUpdated*/ nil, /*err*/ @@ -249,7 +249,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { TxID: nextStakerTxID, }, } - require.NoError(blkTx.Sign(txs.Codec, nil)) + require.NoError(blkTx.Initialize(txs.Codec)) { // wrong height @@ -262,7 +262,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { require.NoError(err) block := env.blkManager.NewBlock(statelessProposalBlock) - require.Error(block.Verify()) + require.Error(block.Verify(context.Background())) } { @@ -275,7 +275,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { require.NoError(err) block := env.blkManager.NewBlock(statelessProposalBlock) - require.Error(block.Verify()) + require.Error(block.Verify(context.Background())) } { @@ -289,7 +289,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { require.NoError(err) block := env.blkManager.NewBlock(statelessProposalBlock) - require.Error(block.Verify()) + require.Error(block.Verify(context.Background())) } { @@ -304,7 +304,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { require.NoError(err) block := env.blkManager.NewBlock(statelessProposalBlock) - require.Error(block.Verify()) + require.Error(block.Verify(context.Background())) } { @@ -319,7 +319,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { require.NoError(err) block := env.blkManager.NewBlock(statelessProposalBlock) - require.Error(block.Verify()) + require.Error(block.Verify(context.Background())) } { @@ -329,7 +329,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { Time: uint64(nextStakerTime.Unix()), }, } - require.NoError(invalidTx.Sign(txs.Codec, nil)) + require.NoError(invalidTx.Initialize(txs.Codec)) statelessProposalBlock, err := blocks.NewBanffProposalBlock( parentTime.Add(time.Second), parentID, @@ -339,7 +339,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { require.NoError(err) block := env.blkManager.NewBlock(statelessProposalBlock) - require.Error(block.Verify()) + require.Error(block.Verify(context.Background())) } { @@ -354,7 +354,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { statelessProposalBlock.Transactions = []*txs.Tx{blkTx} block := env.blkManager.NewBlock(statelessProposalBlock) - require.ErrorIs(block.Verify(), errBanffProposalBlockWithMultipleTransactions) + require.ErrorIs(block.Verify(context.Background()), errBanffProposalBlockWithMultipleTransactions) } { @@ -368,7 +368,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { require.NoError(err) block := env.blkManager.NewBlock(statelessProposalBlock) - require.NoError(block.Verify()) + require.NoError(block.Verify(context.Background())) } } @@ -549,17 +549,18 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { } for _, test := range tests { - t.Run(test.description, func(ts *testing.T) { - require := require.New(ts) + t.Run(test.description, func(t *testing.T) { + require := require.New(t) env := newEnvironment(t, nil) defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() env.config.BanffTime = time.Time{} // activate Banff - env.config.WhitelistedSubnets.Add(testSubnet1.ID()) + + subnetID := testSubnet1.ID() + env.config.TrackedSubnets.Add(subnetID) + env.config.Validators.Add(subnetID, validators.NewSet()) for _, staker := range test.stakers { tx, err := env.txBuilder.NewAddValidatorTx( @@ -569,15 +570,16 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { staker.nodeID, staker.rewardAddress, reward.PercentDenominator, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, ) require.NoError(err) - staker := state.NewPendingStaker( + staker, err := state.NewPendingStaker( tx.ID(), tx.Unsigned.(*txs.AddValidatorTx), ) + require.NoError(err) env.state.PutPendingValidator(staker) env.state.AddTx(tx, status.Committed) @@ -590,16 +592,17 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { uint64(subStaker.startTime.Unix()), uint64(subStaker.endTime.Unix()), subStaker.nodeID, // validator ID - testSubnet1.ID(), // Subnet ID - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0], preFundedKeys[1]}, + subnetID, // Subnet ID + []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, ) require.NoError(err) - subnetStaker := state.NewPendingStaker( + subnetStaker, err := state.NewPendingStaker( tx.ID(), tx.Unsigned.(*txs.AddSubnetValidatorTx), ) + require.NoError(err) env.state.PutPendingValidator(subnetStaker) env.state.AddTx(tx, status.Committed) @@ -619,17 +622,19 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { staker0.nodeID, staker0.rewardAddress, reward.PercentDenominator, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0], preFundedKeys[1]}, + []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, ) require.NoError(err) // store Staker0 to state - staker0 := state.NewCurrentStaker( + staker0, err := state.NewCurrentStaker( addStaker0.ID(), addStaker0.Unsigned.(*txs.AddValidatorTx), 0, ) + require.NoError(err) + env.state.PutCurrentValidator(staker0) env.state.AddTx(addStaker0, status.Committed) require.NoError(env.state.Commit()) @@ -639,7 +644,7 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { TxID: staker0.TxID, }, } - require.NoError(s0RewardTx.Sign(txs.Codec, nil)) + require.NoError(s0RewardTx.Initialize(txs.Codec)) // build proposal block moving ahead chain time // as well as rewarding staker0 @@ -656,14 +661,14 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { // verify and accept the block block := env.blkManager.NewBlock(statelessProposalBlock) - require.NoError(block.Verify()) - options, err := block.(snowman.OracleBlock).Options() + require.NoError(block.Verify(context.Background())) + options, err := block.(snowman.OracleBlock).Options(context.Background()) require.NoError(err) - require.NoError(options[0].Verify()) + require.NoError(options[0].Verify(context.Background())) - require.NoError(block.Accept()) - require.NoError(options[0].Accept()) + require.NoError(block.Accept(context.Background())) + require.NoError(options[0].Accept(context.Background())) } require.NoError(env.state.Commit()) @@ -672,20 +677,20 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { case pending: _, err := env.state.GetPendingValidator(constants.PrimaryNetworkID, stakerNodeID) require.NoError(err) - require.False(env.config.Validators.Contains(constants.PrimaryNetworkID, stakerNodeID)) + require.False(validators.Contains(env.config.Validators, constants.PrimaryNetworkID, stakerNodeID)) case current: _, err := env.state.GetCurrentValidator(constants.PrimaryNetworkID, stakerNodeID) require.NoError(err) - require.True(env.config.Validators.Contains(constants.PrimaryNetworkID, stakerNodeID)) + require.True(validators.Contains(env.config.Validators, constants.PrimaryNetworkID, stakerNodeID)) } } for stakerNodeID, status := range test.expectedSubnetStakers { switch status { case pending: - require.False(env.config.Validators.Contains(testSubnet1.ID(), stakerNodeID)) + require.False(validators.Contains(env.config.Validators, subnetID, stakerNodeID)) case current: - require.True(env.config.Validators.Contains(testSubnet1.ID(), stakerNodeID)) + require.True(validators.Contains(env.config.Validators, subnetID, stakerNodeID)) } } }) @@ -696,12 +701,13 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { require := require.New(t) env := newEnvironment(t, nil) defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() env.config.BanffTime = time.Time{} // activate Banff - env.config.WhitelistedSubnets.Add(testSubnet1.ID()) + + subnetID := testSubnet1.ID() + env.config.TrackedSubnets.Add(subnetID) + env.config.Validators.Add(subnetID, validators.NewSet()) // Add a subnet validator to the staker set subnetValidatorNodeID := ids.NodeID(preFundedKeys[0].PublicKey().Address()) @@ -713,17 +719,18 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { uint64(subnetVdr1StartTime.Unix()), // Start time uint64(subnetVdr1EndTime.Unix()), // end time subnetValidatorNodeID, // Node ID - testSubnet1.ID(), // Subnet ID - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0], preFundedKeys[1]}, + subnetID, // Subnet ID + []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, ) require.NoError(err) - staker := state.NewCurrentStaker( + staker, err := state.NewCurrentStaker( tx.ID(), tx.Unsigned.(*txs.AddSubnetValidatorTx), 0, ) + require.NoError(err) env.state.PutCurrentValidator(staker) env.state.AddTx(tx, status.Committed) @@ -738,16 +745,17 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { uint64(subnetVdr1EndTime.Add(time.Second).Unix()), // Start time uint64(subnetVdr1EndTime.Add(time.Second).Add(defaultMinStakingDuration).Unix()), // end time subnetVdr2NodeID, // Node ID - testSubnet1.ID(), // Subnet ID - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0], preFundedKeys[1]}, + subnetID, // Subnet ID + []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, ) require.NoError(err) - staker = state.NewPendingStaker( + staker, err = state.NewPendingStaker( tx.ID(), tx.Unsigned.(*txs.AddSubnetValidatorTx), ) + require.NoError(err) env.state.PutPendingValidator(staker) env.state.AddTx(tx, status.Committed) @@ -769,17 +777,19 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { ids.GenerateTestNodeID(), ids.GenerateTestShortID(), reward.PercentDenominator, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0], preFundedKeys[1]}, + []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, ) require.NoError(err) // store Staker0 to state - staker = state.NewCurrentStaker( + staker, err = state.NewCurrentStaker( addStaker0.ID(), addStaker0.Unsigned.(*txs.AddValidatorTx), 0, ) + require.NoError(err) + env.state.PutCurrentValidator(staker) env.state.AddTx(addStaker0, status.Committed) require.NoError(env.state.Commit()) @@ -790,7 +800,7 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { TxID: addStaker0.ID(), }, } - require.NoError(s0RewardTx.Sign(txs.Codec, nil)) + require.NoError(s0RewardTx.Initialize(txs.Codec)) // build proposal block moving ahead chain time preferredID := env.state.GetLastAccepted() @@ -804,39 +814,39 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { ) require.NoError(err) propBlk := env.blkManager.NewBlock(statelessProposalBlock) - require.NoError(propBlk.Verify()) // verify and update staker set + require.NoError(propBlk.Verify(context.Background())) // verify and update staker set - options, err := propBlk.(snowman.OracleBlock).Options() + options, err := propBlk.(snowman.OracleBlock).Options(context.Background()) require.NoError(err) commitBlk := options[0] - require.NoError(commitBlk.Verify()) + require.NoError(commitBlk.Verify(context.Background())) blkStateMap := env.blkManager.(*manager).blkIDToState updatedState := blkStateMap[commitBlk.ID()].onAcceptState - _, err = updatedState.GetCurrentValidator(testSubnet1.ID(), subnetValidatorNodeID) + _, err = updatedState.GetCurrentValidator(subnetID, subnetValidatorNodeID) require.ErrorIs(err, database.ErrNotFound) // Check VM Validators are removed successfully - require.NoError(propBlk.Accept()) - require.NoError(commitBlk.Accept()) - require.False(env.config.Validators.Contains(testSubnet1.ID(), subnetVdr2NodeID)) - require.False(env.config.Validators.Contains(testSubnet1.ID(), subnetValidatorNodeID)) + require.NoError(propBlk.Accept(context.Background())) + require.NoError(commitBlk.Accept(context.Background())) + require.False(validators.Contains(env.config.Validators, subnetID, subnetVdr2NodeID)) + require.False(validators.Contains(env.config.Validators, subnetID, subnetValidatorNodeID)) } -func TestBanffProposalBlockWhitelistedSubnet(t *testing.T) { - require := require.New(t) - - for _, whitelist := range []bool{true, false} { - t.Run(fmt.Sprintf("whitelisted %t", whitelist), func(ts *testing.T) { +func TestBanffProposalBlockTrackedSubnet(t *testing.T) { + for _, tracked := range []bool{true, false} { + t.Run(fmt.Sprintf("tracked %t", tracked), func(ts *testing.T) { + require := require.New(t) env := newEnvironment(t, nil) defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() env.config.BanffTime = time.Time{} // activate Banff - if whitelist { - env.config.WhitelistedSubnets.Add(testSubnet1.ID()) + + subnetID := testSubnet1.ID() + if tracked { + env.config.TrackedSubnets.Add(subnetID) + env.config.Validators.Add(subnetID, validators.NewSet()) } // Add a subnet validator to the staker set @@ -849,16 +859,17 @@ func TestBanffProposalBlockWhitelistedSubnet(t *testing.T) { uint64(subnetVdr1StartTime.Unix()), // Start time uint64(subnetVdr1EndTime.Unix()), // end time subnetValidatorNodeID, // Node ID - testSubnet1.ID(), // Subnet ID - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0], preFundedKeys[1]}, + subnetID, // Subnet ID + []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, ) require.NoError(err) - staker := state.NewPendingStaker( + staker, err := state.NewPendingStaker( tx.ID(), tx.Unsigned.(*txs.AddSubnetValidatorTx), ) + require.NoError(err) env.state.PutPendingValidator(staker) env.state.AddTx(tx, status.Committed) @@ -878,17 +889,19 @@ func TestBanffProposalBlockWhitelistedSubnet(t *testing.T) { ids.GenerateTestNodeID(), ids.GenerateTestShortID(), reward.PercentDenominator, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0], preFundedKeys[1]}, + []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, ) require.NoError(err) // store Staker0 to state - staker = state.NewCurrentStaker( + staker, err = state.NewCurrentStaker( addStaker0.ID(), addStaker0.Unsigned.(*txs.AddValidatorTx), 0, ) + require.NoError(err) + env.state.PutCurrentValidator(staker) env.state.AddTx(addStaker0, status.Committed) require.NoError(env.state.Commit()) @@ -899,7 +912,7 @@ func TestBanffProposalBlockWhitelistedSubnet(t *testing.T) { TxID: addStaker0.ID(), }, } - require.NoError(s0RewardTx.Sign(txs.Codec, nil)) + require.NoError(s0RewardTx.Initialize(txs.Codec)) // build proposal block moving ahead chain time preferredID := env.state.GetLastAccepted() @@ -913,15 +926,15 @@ func TestBanffProposalBlockWhitelistedSubnet(t *testing.T) { ) require.NoError(err) propBlk := env.blkManager.NewBlock(statelessProposalBlock) - require.NoError(propBlk.Verify()) // verify update staker set - options, err := propBlk.(snowman.OracleBlock).Options() + require.NoError(propBlk.Verify(context.Background())) // verify update staker set + options, err := propBlk.(snowman.OracleBlock).Options(context.Background()) require.NoError(err) commitBlk := options[0] - require.NoError(commitBlk.Verify()) + require.NoError(commitBlk.Verify(context.Background())) - require.NoError(propBlk.Accept()) - require.NoError(commitBlk.Accept()) - require.Equal(whitelist, env.config.Validators.Contains(testSubnet1.ID(), subnetValidatorNodeID)) + require.NoError(propBlk.Accept(context.Background())) + require.NoError(commitBlk.Accept(context.Background())) + require.Equal(tracked, validators.Contains(env.config.Validators, subnetID, subnetValidatorNodeID)) }) } } @@ -930,9 +943,7 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { require := require.New(t) env := newEnvironment(t, nil) defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() env.config.BanffTime = time.Time{} // activate Banff @@ -948,7 +959,7 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { pendingValidatorEndTime, nodeID, rewardAddress, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ) require.NoError(err) @@ -963,17 +974,19 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { ids.GenerateTestNodeID(), ids.GenerateTestShortID(), reward.PercentDenominator, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0], preFundedKeys[1]}, + []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, ) require.NoError(err) // store Staker0 to state - staker := state.NewCurrentStaker( + staker, err := state.NewCurrentStaker( addStaker0.ID(), addStaker0.Unsigned.(*txs.AddValidatorTx), 0, ) + require.NoError(err) + env.state.PutCurrentValidator(staker) env.state.AddTx(addStaker0, status.Committed) require.NoError(env.state.Commit()) @@ -984,7 +997,7 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { TxID: addStaker0.ID(), }, } - require.NoError(s0RewardTx.Sign(txs.Codec, nil)) + require.NoError(s0RewardTx.Initialize(txs.Codec)) // build proposal block moving ahead chain time preferredID := env.state.GetLastAccepted() @@ -998,20 +1011,20 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { ) require.NoError(err) propBlk := env.blkManager.NewBlock(statelessProposalBlock) - require.NoError(propBlk.Verify()) + require.NoError(propBlk.Verify(context.Background())) - options, err := propBlk.(snowman.OracleBlock).Options() + options, err := propBlk.(snowman.OracleBlock).Options(context.Background()) require.NoError(err) commitBlk := options[0] - require.NoError(commitBlk.Verify()) + require.NoError(commitBlk.Verify(context.Background())) - require.NoError(propBlk.Accept()) - require.NoError(commitBlk.Accept()) + require.NoError(propBlk.Accept(context.Background())) + require.NoError(commitBlk.Accept(context.Background())) // Test validator weight before delegation - primarySet, ok := env.config.Validators.GetValidators(constants.PrimaryNetworkID) + primarySet, ok := env.config.Validators.Get(constants.PrimaryNetworkID) require.True(ok) - vdrWeight, _ := primarySet.GetWeight(nodeID) + vdrWeight := primarySet.GetWeight(nodeID) require.Equal(env.config.MinValidatorStake, vdrWeight) // Add delegator @@ -1024,7 +1037,7 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { uint64(pendingDelegatorEndTime.Unix()), nodeID, preFundedKeys[0].PublicKey().Address(), - []*crypto.PrivateKeySECP256K1R{ + []*secp256k1.PrivateKey{ preFundedKeys[0], preFundedKeys[1], preFundedKeys[4], @@ -1033,10 +1046,11 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { ) require.NoError(err) - staker = state.NewPendingStaker( + staker, err = state.NewPendingStaker( addDelegatorTx.ID(), addDelegatorTx.Unsigned.(*txs.AddDelegatorTx), ) + require.NoError(err) env.state.PutPendingDelegator(staker) env.state.AddTx(addDelegatorTx, status.Committed) @@ -1053,17 +1067,19 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { ids.GenerateTestNodeID(), ids.GenerateTestShortID(), reward.PercentDenominator, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0], preFundedKeys[1]}, + []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, ) require.NoError(err) // store Staker0 to state - staker = state.NewCurrentStaker( + staker, err = state.NewCurrentStaker( addStaker0.ID(), addStaker0.Unsigned.(*txs.AddValidatorTx), 0, ) + require.NoError(err) + env.state.PutCurrentValidator(staker) env.state.AddTx(addStaker0, status.Committed) require.NoError(env.state.Commit()) @@ -1074,7 +1090,7 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { TxID: addStaker0.ID(), }, } - require.NoError(s0RewardTx.Sign(txs.Codec, nil)) + require.NoError(s0RewardTx.Initialize(txs.Codec)) // Advance Time preferredID = env.state.GetLastAccepted() @@ -1089,18 +1105,18 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { require.NoError(err) propBlk = env.blkManager.NewBlock(statelessProposalBlock) - require.NoError(propBlk.Verify()) + require.NoError(propBlk.Verify(context.Background())) - options, err = propBlk.(snowman.OracleBlock).Options() + options, err = propBlk.(snowman.OracleBlock).Options(context.Background()) require.NoError(err) commitBlk = options[0] - require.NoError(commitBlk.Verify()) + require.NoError(commitBlk.Verify(context.Background())) - require.NoError(propBlk.Accept()) - require.NoError(commitBlk.Accept()) + require.NoError(propBlk.Accept(context.Background())) + require.NoError(commitBlk.Accept(context.Background())) // Test validator weight after delegation - vdrWeight, _ = primarySet.GetWeight(nodeID) + vdrWeight = primarySet.GetWeight(nodeID) require.Equal(env.config.MinDelegatorStake+env.config.MinValidatorStake, vdrWeight) } @@ -1108,9 +1124,7 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { require := require.New(t) env := newEnvironment(t, nil) defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() env.config.BanffTime = time.Time{} // activate Banff @@ -1118,7 +1132,7 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { // Add a pending validator pendingValidatorStartTime := defaultGenesisTime.Add(1 * time.Second) pendingValidatorEndTime := pendingValidatorStartTime.Add(defaultMinStakingDuration) - factory := crypto.FactorySECP256K1R{} + factory := secp256k1.Factory{} nodeIDKey, _ := factory.NewPrivateKey() rewardAddress := nodeIDKey.PublicKey().Address() nodeID := ids.NodeID(rewardAddress) @@ -1129,7 +1143,7 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { pendingValidatorEndTime, nodeID, rewardAddress, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ) require.NoError(err) @@ -1144,17 +1158,19 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { ids.GenerateTestNodeID(), ids.GenerateTestShortID(), reward.PercentDenominator, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0], preFundedKeys[1]}, + []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, ) require.NoError(err) // store Staker0 to state - staker := state.NewCurrentStaker( + staker, err := state.NewCurrentStaker( addStaker0.ID(), addStaker0.Unsigned.(*txs.AddValidatorTx), 0, ) + require.NoError(err) + env.state.PutCurrentValidator(staker) env.state.AddTx(addStaker0, status.Committed) require.NoError(env.state.Commit()) @@ -1165,7 +1181,7 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { TxID: addStaker0.ID(), }, } - require.NoError(s0RewardTx.Sign(txs.Codec, nil)) + require.NoError(s0RewardTx.Initialize(txs.Codec)) // build proposal block moving ahead chain time preferredID := env.state.GetLastAccepted() @@ -1179,20 +1195,20 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { ) require.NoError(err) propBlk := env.blkManager.NewBlock(statelessProposalBlock) - require.NoError(propBlk.Verify()) + require.NoError(propBlk.Verify(context.Background())) - options, err := propBlk.(snowman.OracleBlock).Options() + options, err := propBlk.(snowman.OracleBlock).Options(context.Background()) require.NoError(err) commitBlk := options[0] - require.NoError(commitBlk.Verify()) + require.NoError(commitBlk.Verify(context.Background())) - require.NoError(propBlk.Accept()) - require.NoError(commitBlk.Accept()) + require.NoError(propBlk.Accept(context.Background())) + require.NoError(commitBlk.Accept(context.Background())) // Test validator weight before delegation - primarySet, ok := env.config.Validators.GetValidators(constants.PrimaryNetworkID) + primarySet, ok := env.config.Validators.Get(constants.PrimaryNetworkID) require.True(ok) - vdrWeight, _ := primarySet.GetWeight(nodeID) + vdrWeight := primarySet.GetWeight(nodeID) require.Equal(env.config.MinValidatorStake, vdrWeight) // Add delegator @@ -1204,7 +1220,7 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { uint64(pendingDelegatorEndTime.Unix()), nodeID, preFundedKeys[0].PublicKey().Address(), - []*crypto.PrivateKeySECP256K1R{ + []*secp256k1.PrivateKey{ preFundedKeys[0], preFundedKeys[1], preFundedKeys[4], @@ -1213,10 +1229,11 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { ) require.NoError(err) - staker = state.NewPendingStaker( + staker, err = state.NewPendingStaker( addDelegatorTx.ID(), addDelegatorTx.Unsigned.(*txs.AddDelegatorTx), ) + require.NoError(err) env.state.PutPendingDelegator(staker) env.state.AddTx(addDelegatorTx, status.Committed) @@ -1233,17 +1250,19 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { ids.GenerateTestNodeID(), ids.GenerateTestShortID(), reward.PercentDenominator, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0], preFundedKeys[1]}, + []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, ) require.NoError(err) // store Staker0 to state - staker = state.NewCurrentStaker( + staker, err = state.NewCurrentStaker( addStaker0.ID(), addStaker0.Unsigned.(*txs.AddValidatorTx), 0, ) + require.NoError(err) + env.state.PutCurrentValidator(staker) env.state.AddTx(addStaker0, status.Committed) require.NoError(env.state.Commit()) @@ -1254,7 +1273,7 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { TxID: addStaker0.ID(), }, } - require.NoError(s0RewardTx.Sign(txs.Codec, nil)) + require.NoError(s0RewardTx.Initialize(txs.Codec)) // Advance Time preferredID = env.state.GetLastAccepted() @@ -1268,17 +1287,17 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { ) require.NoError(err) propBlk = env.blkManager.NewBlock(statelessProposalBlock) - require.NoError(propBlk.Verify()) + require.NoError(propBlk.Verify(context.Background())) - options, err = propBlk.(snowman.OracleBlock).Options() + options, err = propBlk.(snowman.OracleBlock).Options(context.Background()) require.NoError(err) commitBlk = options[0] - require.NoError(commitBlk.Verify()) + require.NoError(commitBlk.Verify(context.Background())) - require.NoError(propBlk.Accept()) - require.NoError(commitBlk.Accept()) + require.NoError(propBlk.Accept(context.Background())) + require.NoError(commitBlk.Accept(context.Background())) // Test validator weight after delegation - vdrWeight, _ = primarySet.GetWeight(nodeID) + vdrWeight = primarySet.GetWeight(nodeID) require.Equal(env.config.MinDelegatorStake+env.config.MinValidatorStake, vdrWeight) } diff --git a/avalanchego/vms/platformvm/blocks/executor/rejector.go b/avalanchego/vms/platformvm/blocks/executor/rejector.go index 5fbdac2c..6c039b29 100644 --- a/avalanchego/vms/platformvm/blocks/executor/rejector.go +++ b/avalanchego/vms/platformvm/blocks/executor/rejector.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -10,7 +10,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/blocks" ) -var _ blocks.Visitor = &rejector{} +var _ blocks.Visitor = (*rejector)(nil) // rejector handles the logic for rejecting a block. // All errors returned by this struct are fatal and should result in the chain diff --git a/avalanchego/vms/platformvm/blocks/executor/rejector_test.go b/avalanchego/vms/platformvm/blocks/executor/rejector_test.go index 48b51ed4..3c909e6b 100644 --- a/avalanchego/vms/platformvm/blocks/executor/rejector_test.go +++ b/avalanchego/vms/platformvm/blocks/executor/rejector_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor import ( "testing" + "time" "github.com/golang/mock/gomock" @@ -33,7 +34,8 @@ func TestRejectBlock(t *testing.T) { { name: "proposal block", newBlockFunc: func() (blocks.Block, error) { - return blocks.NewApricotProposalBlock( + return blocks.NewBanffProposalBlock( + time.Now(), ids.GenerateTestID(), 1, &txs.Tx{ @@ -46,7 +48,7 @@ func TestRejectBlock(t *testing.T) { ) }, rejectFunc: func(r *rejector, b blocks.Block) error { - return r.ApricotProposalBlock(b.(*blocks.ApricotProposalBlock)) + return r.BanffProposalBlock(b.(*blocks.BanffProposalBlock)) }, }, { @@ -71,7 +73,8 @@ func TestRejectBlock(t *testing.T) { { name: "standard block", newBlockFunc: func() (blocks.Block, error) { - return blocks.NewApricotStandardBlock( + return blocks.NewBanffStandardBlock( + time.Now(), ids.GenerateTestID(), 1, []*txs.Tx{ @@ -86,25 +89,25 @@ func TestRejectBlock(t *testing.T) { ) }, rejectFunc: func(r *rejector, b blocks.Block) error { - return r.ApricotStandardBlock(b.(*blocks.ApricotStandardBlock)) + return r.BanffStandardBlock(b.(*blocks.BanffStandardBlock)) }, }, { name: "commit", newBlockFunc: func() (blocks.Block, error) { - return blocks.NewApricotCommitBlock(ids.GenerateTestID() /*parent*/, 1 /*height*/) + return blocks.NewBanffCommitBlock(time.Now(), ids.GenerateTestID() /*parent*/, 1 /*height*/) }, rejectFunc: func(r *rejector, blk blocks.Block) error { - return r.ApricotCommitBlock(blk.(*blocks.ApricotCommitBlock)) + return r.BanffCommitBlock(blk.(*blocks.BanffCommitBlock)) }, }, { name: "abort", newBlockFunc: func() (blocks.Block, error) { - return blocks.NewApricotAbortBlock(ids.GenerateTestID() /*parent*/, 1 /*height*/) + return blocks.NewBanffAbortBlock(time.Now(), ids.GenerateTestID() /*parent*/, 1 /*height*/) }, rejectFunc: func(r *rejector, blk blocks.Block) error { - return r.ApricotAbortBlock(blk.(*blocks.ApricotAbortBlock)) + return r.BanffAbortBlock(blk.(*blocks.BanffAbortBlock)) }, }, } diff --git a/avalanchego/vms/platformvm/blocks/executor/standard_block_test.go b/avalanchego/vms/platformvm/blocks/executor/standard_block_test.go index 31143a9c..433983c9 100644 --- a/avalanchego/vms/platformvm/blocks/executor/standard_block_test.go +++ b/avalanchego/vms/platformvm/blocks/executor/standard_block_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor import ( + "context" "fmt" "testing" "time" @@ -14,8 +15,9 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/blocks" "github.com/ava-labs/avalanchego/vms/platformvm/state" @@ -33,9 +35,7 @@ func TestApricotStandardBlockTimeVerification(t *testing.T) { env := newEnvironment(t, ctrl) defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() // setup and store parent block @@ -71,7 +71,7 @@ func TestApricotStandardBlockTimeVerification(t *testing.T) { ) require.NoError(err) block := env.blkManager.NewBlock(apricotChildBlk) - require.Error(block.Verify()) + require.Error(block.Verify(context.Background())) // valid height apricotChildBlk, err = blocks.NewApricotStandardBlock( @@ -81,7 +81,7 @@ func TestApricotStandardBlockTimeVerification(t *testing.T) { ) require.NoError(err) block = env.blkManager.NewBlock(apricotChildBlk) - require.NoError(block.Verify()) + require.NoError(block.Verify(context.Background())) } func TestBanffStandardBlockTimeVerification(t *testing.T) { @@ -91,9 +91,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { env := newEnvironment(t, ctrl) defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() now := env.clk.Time() env.clk.Set(now) @@ -178,7 +176,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { Owner: &secp256k1fx.OutputOwners{}, } tx := &txs.Tx{Unsigned: utx} - require.NoError(tx.Sign(txs.Codec, [][]*crypto.PrivateKeySECP256K1R{{}})) + require.NoError(tx.Sign(txs.Codec, [][]*secp256k1.PrivateKey{{}})) { // wrong version @@ -189,7 +187,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { ) require.NoError(err) block := env.blkManager.NewBlock(banffChildBlk) - require.Error(block.Verify()) + require.Error(block.Verify(context.Background())) } { @@ -203,7 +201,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { ) require.NoError(err) block := env.blkManager.NewBlock(banffChildBlk) - require.Error(block.Verify()) + require.Error(block.Verify(context.Background())) } { @@ -217,7 +215,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { ) require.NoError(err) block := env.blkManager.NewBlock(banffChildBlk) - require.Error(block.Verify()) + require.Error(block.Verify(context.Background())) } { @@ -231,7 +229,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { ) require.NoError(err) block := env.blkManager.NewBlock(banffChildBlk) - require.Error(block.Verify()) + require.Error(block.Verify(context.Background())) } { @@ -245,7 +243,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { ) require.NoError(err) block := env.blkManager.NewBlock(banffChildBlk) - require.Error(block.Verify()) + require.Error(block.Verify(context.Background())) } { @@ -259,7 +257,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { ) require.NoError(err) block := env.blkManager.NewBlock(banffChildBlk) - require.ErrorIs(block.Verify(), errBanffStandardBlockWithoutChanges) + require.ErrorIs(block.Verify(context.Background()), errBanffStandardBlockWithoutChanges) } { @@ -273,7 +271,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { ) require.NoError(err) block := env.blkManager.NewBlock(banffChildBlk) - require.NoError(block.Verify()) + require.NoError(block.Verify(context.Background())) } { @@ -287,7 +285,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { ) require.NoError(err) block := env.blkManager.NewBlock(banffChildBlk) - require.NoError(block.Verify()) + require.NoError(block.Verify(context.Background())) } } @@ -312,7 +310,7 @@ func TestBanffStandardBlockUpdatePrimaryNetworkStakers(t *testing.T) { pendingValidatorEndTime, nodeID, rewardAddress, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ) require.NoError(err) @@ -330,22 +328,22 @@ func TestBanffStandardBlockUpdatePrimaryNetworkStakers(t *testing.T) { block := env.blkManager.NewBlock(statelessStandardBlock) // update staker set - require.NoError(block.Verify()) + require.NoError(block.Verify(context.Background())) // tests blkStateMap := env.blkManager.(*manager).blkIDToState updatedState := blkStateMap[block.ID()].onAcceptState currentValidator, err := updatedState.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) require.NoError(err) - require.True(currentValidator.TxID == addPendingValidatorTx.ID(), "Added the wrong tx to the validator set") + require.Equal(addPendingValidatorTx.ID(), currentValidator.TxID) require.EqualValues(0, currentValidator.PotentialReward) // Rewards are 0 on Flare _, err = updatedState.GetPendingValidator(constants.PrimaryNetworkID, nodeID) require.ErrorIs(err, database.ErrNotFound) // Test VM validators - require.NoError(block.Accept()) - require.True(env.config.Validators.Contains(constants.PrimaryNetworkID, nodeID)) + require.NoError(block.Accept(context.Background())) + require.True(validators.Contains(env.config.Validators, constants.PrimaryNetworkID, nodeID)) } // Ensure semantic verification updates the current and pending staker sets correctly. @@ -484,16 +482,17 @@ func TestBanffStandardBlockUpdateStakers(t *testing.T) { } for _, test := range tests { - t.Run(test.description, func(ts *testing.T) { - require := require.New(ts) + t.Run(test.description, func(t *testing.T) { + require := require.New(t) env := newEnvironment(t, nil) defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() env.config.BanffTime = time.Time{} // activate Banff - env.config.WhitelistedSubnets.Add(testSubnet1.ID()) + + subnetID := testSubnet1.ID() + env.config.TrackedSubnets.Add(subnetID) + env.config.Validators.Add(subnetID, validators.NewSet()) for _, staker := range test.stakers { _, err := addPendingValidator( @@ -502,7 +501,7 @@ func TestBanffStandardBlockUpdateStakers(t *testing.T) { staker.endTime, staker.nodeID, staker.rewardAddress, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ) require.NoError(err) } @@ -512,17 +511,18 @@ func TestBanffStandardBlockUpdateStakers(t *testing.T) { 10, // Weight uint64(staker.startTime.Unix()), uint64(staker.endTime.Unix()), - staker.nodeID, // validator ID - testSubnet1.ID(), // Subnet ID - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0], preFundedKeys[1]}, + staker.nodeID, // validator ID + subnetID, // Subnet ID + []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, ) require.NoError(err) - staker := state.NewPendingStaker( + staker, err := state.NewPendingStaker( tx.ID(), tx.Unsigned.(*txs.AddSubnetValidatorTx), ) + require.NoError(err) env.state.PutPendingValidator(staker) env.state.AddTx(tx, status.Committed) @@ -548,8 +548,8 @@ func TestBanffStandardBlockUpdateStakers(t *testing.T) { require.NoError(err) // update staker set - require.NoError(block.Verify()) - require.NoError(block.Accept()) + require.NoError(block.Verify(context.Background())) + require.NoError(block.Accept(context.Background())) } for stakerNodeID, status := range test.expectedStakers { @@ -557,20 +557,20 @@ func TestBanffStandardBlockUpdateStakers(t *testing.T) { case pending: _, err := env.state.GetPendingValidator(constants.PrimaryNetworkID, stakerNodeID) require.NoError(err) - require.False(env.config.Validators.Contains(constants.PrimaryNetworkID, stakerNodeID)) + require.False(validators.Contains(env.config.Validators, constants.PrimaryNetworkID, stakerNodeID)) case current: _, err := env.state.GetCurrentValidator(constants.PrimaryNetworkID, stakerNodeID) require.NoError(err) - require.True(env.config.Validators.Contains(constants.PrimaryNetworkID, stakerNodeID)) + require.True(validators.Contains(env.config.Validators, constants.PrimaryNetworkID, stakerNodeID)) } } for stakerNodeID, status := range test.expectedSubnetStakers { switch status { case pending: - require.False(env.config.Validators.Contains(testSubnet1.ID(), stakerNodeID)) + require.False(validators.Contains(env.config.Validators, subnetID, stakerNodeID)) case current: - require.True(env.config.Validators.Contains(testSubnet1.ID(), stakerNodeID)) + require.True(validators.Contains(env.config.Validators, subnetID, stakerNodeID)) } } }) @@ -585,12 +585,13 @@ func TestBanffStandardBlockRemoveSubnetValidator(t *testing.T) { require := require.New(t) env := newEnvironment(t, nil) defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() env.config.BanffTime = time.Time{} // activate Banff - env.config.WhitelistedSubnets.Add(testSubnet1.ID()) + + subnetID := testSubnet1.ID() + env.config.TrackedSubnets.Add(subnetID) + env.config.Validators.Add(subnetID, validators.NewSet()) // Add a subnet validator to the staker set subnetValidatorNodeID := ids.NodeID(preFundedKeys[0].PublicKey().Address()) @@ -602,17 +603,18 @@ func TestBanffStandardBlockRemoveSubnetValidator(t *testing.T) { uint64(subnetVdr1StartTime.Unix()), // Start time uint64(subnetVdr1EndTime.Unix()), // end time subnetValidatorNodeID, // Node ID - testSubnet1.ID(), // Subnet ID - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0], preFundedKeys[1]}, + subnetID, // Subnet ID + []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, ) require.NoError(err) - staker := state.NewCurrentStaker( + staker, err := state.NewCurrentStaker( tx.ID(), tx.Unsigned.(*txs.AddSubnetValidatorTx), 0, ) + require.NoError(err) env.state.PutCurrentValidator(staker) env.state.AddTx(tx, status.Committed) @@ -627,16 +629,17 @@ func TestBanffStandardBlockRemoveSubnetValidator(t *testing.T) { uint64(subnetVdr1EndTime.Add(time.Second).Unix()), // Start time uint64(subnetVdr1EndTime.Add(time.Second).Add(defaultMinStakingDuration).Unix()), // end time subnetVdr2NodeID, // Node ID - testSubnet1.ID(), // Subnet ID - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0], preFundedKeys[1]}, + subnetID, // Subnet ID + []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, ) require.NoError(err) - staker = state.NewPendingStaker( + staker, err = state.NewPendingStaker( tx.ID(), tx.Unsigned.(*txs.AddSubnetValidatorTx), ) + require.NoError(err) env.state.PutPendingValidator(staker) env.state.AddTx(tx, status.Committed) @@ -660,33 +663,33 @@ func TestBanffStandardBlockRemoveSubnetValidator(t *testing.T) { block := env.blkManager.NewBlock(statelessStandardBlock) // update staker set - require.NoError(block.Verify()) + require.NoError(block.Verify(context.Background())) blkStateMap := env.blkManager.(*manager).blkIDToState updatedState := blkStateMap[block.ID()].onAcceptState - _, err = updatedState.GetCurrentValidator(testSubnet1.ID(), subnetValidatorNodeID) + _, err = updatedState.GetCurrentValidator(subnetID, subnetValidatorNodeID) require.ErrorIs(err, database.ErrNotFound) // Check VM Validators are removed successfully - require.NoError(block.Accept()) - require.False(env.config.Validators.Contains(testSubnet1.ID(), subnetVdr2NodeID)) - require.False(env.config.Validators.Contains(testSubnet1.ID(), subnetValidatorNodeID)) + require.NoError(block.Accept(context.Background())) + require.False(validators.Contains(env.config.Validators, subnetID, subnetVdr2NodeID)) + require.False(validators.Contains(env.config.Validators, subnetID, subnetValidatorNodeID)) } -func TestBanffStandardBlockWhitelistedSubnet(t *testing.T) { - require := require.New(t) - - for _, whitelist := range []bool{true, false} { - t.Run(fmt.Sprintf("whitelisted %t", whitelist), func(ts *testing.T) { +func TestBanffStandardBlockTrackedSubnet(t *testing.T) { + for _, tracked := range []bool{true, false} { + t.Run(fmt.Sprintf("tracked %t", tracked), func(t *testing.T) { + require := require.New(t) env := newEnvironment(t, nil) defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() env.config.BanffTime = time.Time{} // activate Banff - if whitelist { - env.config.WhitelistedSubnets.Add(testSubnet1.ID()) + + subnetID := testSubnet1.ID() + if tracked { + env.config.TrackedSubnets.Add(subnetID) + env.config.Validators.Add(subnetID, validators.NewSet()) } // Add a subnet validator to the staker set @@ -699,16 +702,17 @@ func TestBanffStandardBlockWhitelistedSubnet(t *testing.T) { uint64(subnetVdr1StartTime.Unix()), // Start time uint64(subnetVdr1EndTime.Unix()), // end time subnetValidatorNodeID, // Node ID - testSubnet1.ID(), // Subnet ID - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0], preFundedKeys[1]}, + subnetID, // Subnet ID + []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, ) require.NoError(err) - staker := state.NewPendingStaker( + staker, err := state.NewPendingStaker( tx.ID(), tx.Unsigned.(*txs.AddSubnetValidatorTx), ) + require.NoError(err) env.state.PutPendingValidator(staker) env.state.AddTx(tx, status.Committed) @@ -731,9 +735,9 @@ func TestBanffStandardBlockWhitelistedSubnet(t *testing.T) { block := env.blkManager.NewBlock(statelessStandardBlock) // update staker set - require.NoError(block.Verify()) - require.NoError(block.Accept()) - require.Equal(whitelist, env.config.Validators.Contains(testSubnet1.ID(), subnetValidatorNodeID)) + require.NoError(block.Verify(context.Background())) + require.NoError(block.Accept(context.Background())) + require.Equal(tracked, validators.Contains(env.config.Validators, subnetID, subnetValidatorNodeID)) }) } } @@ -742,9 +746,7 @@ func TestBanffStandardBlockDelegatorStakerWeight(t *testing.T) { require := require.New(t) env := newEnvironment(t, nil) defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() env.config.BanffTime = time.Time{} // activate Banff @@ -760,7 +762,7 @@ func TestBanffStandardBlockDelegatorStakerWeight(t *testing.T) { pendingValidatorEndTime, nodeID, rewardAddress, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ) require.NoError(err) @@ -776,14 +778,14 @@ func TestBanffStandardBlockDelegatorStakerWeight(t *testing.T) { ) require.NoError(err) block := env.blkManager.NewBlock(statelessStandardBlock) - require.NoError(block.Verify()) - require.NoError(block.Accept()) + require.NoError(block.Verify(context.Background())) + require.NoError(block.Accept(context.Background())) require.NoError(env.state.Commit()) // Test validator weight before delegation - primarySet, ok := env.config.Validators.GetValidators(constants.PrimaryNetworkID) + primarySet, ok := env.config.Validators.Get(constants.PrimaryNetworkID) require.True(ok) - vdrWeight, _ := primarySet.GetWeight(nodeID) + vdrWeight := primarySet.GetWeight(nodeID) require.Equal(env.config.MinValidatorStake, vdrWeight) // Add delegator @@ -796,7 +798,7 @@ func TestBanffStandardBlockDelegatorStakerWeight(t *testing.T) { uint64(pendingDelegatorEndTime.Unix()), nodeID, preFundedKeys[0].PublicKey().Address(), - []*crypto.PrivateKeySECP256K1R{ + []*secp256k1.PrivateKey{ preFundedKeys[0], preFundedKeys[1], preFundedKeys[4], @@ -805,10 +807,11 @@ func TestBanffStandardBlockDelegatorStakerWeight(t *testing.T) { ) require.NoError(err) - staker := state.NewPendingStaker( + staker, err := state.NewPendingStaker( addDelegatorTx.ID(), addDelegatorTx.Unsigned.(*txs.AddDelegatorTx), ) + require.NoError(err) env.state.PutPendingDelegator(staker) env.state.AddTx(addDelegatorTx, status.Committed) @@ -827,11 +830,11 @@ func TestBanffStandardBlockDelegatorStakerWeight(t *testing.T) { ) require.NoError(err) block = env.blkManager.NewBlock(statelessStandardBlock) - require.NoError(block.Verify()) - require.NoError(block.Accept()) + require.NoError(block.Verify(context.Background())) + require.NoError(block.Accept(context.Background())) require.NoError(env.state.Commit()) // Test validator weight after delegation - vdrWeight, _ = primarySet.GetWeight(nodeID) + vdrWeight = primarySet.GetWeight(nodeID) require.Equal(env.config.MinDelegatorStake+env.config.MinValidatorStake, vdrWeight) } diff --git a/avalanchego/vms/platformvm/blocks/executor/verifier.go b/avalanchego/vms/platformvm/blocks/executor/verifier.go index bee7ac66..9f72b509 100644 --- a/avalanchego/vms/platformvm/blocks/executor/verifier.go +++ b/avalanchego/vms/platformvm/blocks/executor/verifier.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -9,6 +9,7 @@ import ( "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/platformvm/blocks" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" @@ -17,9 +18,8 @@ import ( ) var ( - _ blocks.Visitor = &verifier{} + _ blocks.Visitor = (*verifier)(nil) - errBanffBlockIssuedBeforeFork = errors.New("banff block issued before fork") errApricotBlockIssuedAfterFork = errors.New("apricot block issued after fork") errBanffProposalBlockWithMultipleTransactions = errors.New("BanffProposalBlock contains multiple transactions") errBanffStandardBlockWithoutChanges = errors.New("BanffStandardBlock performs no state changes") @@ -196,7 +196,7 @@ func (v *verifier) ApricotAtomicBlock(b *blocks.ApricotAtomicBlock) error { if err := b.Tx.Unsigned.Visit(&atomicExecutor); err != nil { txID := b.Tx.ID() - v.MarkDropped(txID, err.Error()) // cache tx as dropped + v.MarkDropped(txID, err) // cache tx as dropped return fmt.Errorf("tx %s failed semantic verification: %w", txID, err) } @@ -222,7 +222,7 @@ func (v *verifier) ApricotAtomicBlock(b *blocks.ApricotAtomicBlock) error { } func (v *verifier) banffOptionBlock(b blocks.BanffBlock) error { - if err := v.banffCommonBlock(b); err != nil { + if err := v.commonBlock(b); err != nil { return err } @@ -245,7 +245,7 @@ func (v *verifier) banffOptionBlock(b blocks.BanffBlock) error { } func (v *verifier) banffNonOptionBlock(b blocks.BanffBlock) error { - if err := v.banffCommonBlock(b); err != nil { + if err := v.commonBlock(b); err != nil { return err } @@ -279,14 +279,6 @@ func (v *verifier) banffNonOptionBlock(b blocks.BanffBlock) error { ) } -func (v *verifier) banffCommonBlock(b blocks.BanffBlock) error { - timestamp := b.Timestamp() - if !v.txExecutorBackend.Config.IsBanffActivated(timestamp) { - return fmt.Errorf("%w: timestamp = %s", errBanffBlockIssuedBeforeFork, timestamp) - } - return v.commonBlock(b) -} - func (v *verifier) apricotCommonBlock(b blocks.Block) error { // We can use the parent timestamp here, because we are guaranteed that the // parent was verified. Apricot blocks only update the timestamp with @@ -372,7 +364,7 @@ func (v *verifier) proposalBlock( if err := b.Tx.Unsigned.Visit(&txExecutor); err != nil { txID := b.Tx.ID() - v.MarkDropped(txID, err.Error()) // cache tx as dropped + v.MarkDropped(txID, err) // cache tx as dropped return err } @@ -419,7 +411,7 @@ func (v *verifier) standardBlock( } if err := tx.Unsigned.Visit(&txExecutor); err != nil { txID := tx.ID() - v.MarkDropped(txID, err.Error()) // cache tx as dropped + v.MarkDropped(txID, err) // cache tx as dropped return err } // ensure it doesn't overlap with current input batch @@ -470,7 +462,7 @@ func (v *verifier) standardBlock( // verifyUniqueInputs verifies that the inputs of the given block are not // duplicated in any of the parent blocks pinned in memory. -func (v *verifier) verifyUniqueInputs(block blocks.Block, inputs ids.Set) error { +func (v *verifier) verifyUniqueInputs(block blocks.Block, inputs set.Set[ids.ID]) error { if inputs.Len() == 0 { return nil } diff --git a/avalanchego/vms/platformvm/blocks/executor/verifier_test.go b/avalanchego/vms/platformvm/blocks/executor/verifier_test.go index b39292e4..ee224f0c 100644 --- a/avalanchego/vms/platformvm/blocks/executor/verifier_test.go +++ b/avalanchego/vms/platformvm/blocks/executor/verifier_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor import ( + "context" "testing" "time" @@ -17,6 +18,7 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm/blocks" @@ -96,7 +98,7 @@ func TestVerifierVisitProposalBlock(t *testing.T) { // Visit the block blk := manager.NewBlock(apricotBlk) - err = blk.Verify() + err = blk.Verify(context.Background()) require.NoError(err) require.Contains(verifier.backend.blkIDToState, apricotBlk.ID()) gotBlkState := verifier.backend.blkIDToState[apricotBlk.ID()] @@ -113,7 +115,7 @@ func TestVerifierVisitProposalBlock(t *testing.T) { require.Equal(status.Aborted, gotStatus) // Visiting again should return nil without using dependencies. - err = blk.Verify() + err = blk.Verify(context.Background()) require.NoError(err) } @@ -160,7 +162,7 @@ func TestVerifierVisitAtomicBlock(t *testing.T) { onAccept := state.NewMockDiff(ctrl) blkTx := txs.NewMockUnsignedTx(ctrl) - inputs := ids.Set{ids.GenerateTestID(): struct{}{}} + inputs := set.Set[ids.ID]{ids.GenerateTestID(): struct{}{}} blkTx.EXPECT().Visit(gomock.AssignableToTypeOf(&executor.AtomicTxExecutor{})).DoAndReturn( func(e *executor.AtomicTxExecutor) error { e.OnAccept = onAccept @@ -193,7 +195,7 @@ func TestVerifierVisitAtomicBlock(t *testing.T) { onAccept.EXPECT().GetTimestamp().Return(timestamp).Times(1) blk := manager.NewBlock(apricotBlk) - err = blk.Verify() + err = blk.Verify(context.Background()) require.NoError(err) require.Contains(verifier.backend.blkIDToState, apricotBlk.ID()) @@ -204,7 +206,7 @@ func TestVerifierVisitAtomicBlock(t *testing.T) { require.Equal(timestamp, gotBlkState.timestamp) // Visiting again should return nil without using dependencies. - err = blk.Verify() + err = blk.Verify(context.Background()) require.NoError(err) } @@ -264,7 +266,7 @@ func TestVerifierVisitStandardBlock(t *testing.T) { blkTx.EXPECT().Visit(gomock.AssignableToTypeOf(&executor.StandardTxExecutor{})).DoAndReturn( func(e *executor.StandardTxExecutor) error { e.OnAccept = func() {} - e.Inputs = ids.Set{} + e.Inputs = set.Set[ids.ID]{} e.AtomicRequests = atomicRequests return nil }, @@ -295,18 +297,18 @@ func TestVerifierVisitStandardBlock(t *testing.T) { mempool.EXPECT().Remove(apricotBlk.Txs()).Times(1) blk := manager.NewBlock(apricotBlk) - err = blk.Verify() + err = blk.Verify(context.Background()) require.NoError(err) // Assert expected state. require.Contains(verifier.backend.blkIDToState, apricotBlk.ID()) gotBlkState := verifier.backend.blkIDToState[apricotBlk.ID()] require.Equal(apricotBlk, gotBlkState.statelessBlock) - require.Equal(ids.Set{}, gotBlkState.inputs) + require.Equal(set.Set[ids.ID]{}, gotBlkState.inputs) require.Equal(timestamp, gotBlkState.timestamp) // Visiting again should return nil without using dependencies. - err = blk.Verify() + err = blk.Verify(context.Background()) require.NoError(err) } @@ -369,7 +371,7 @@ func TestVerifierVisitCommitBlock(t *testing.T) { // Verify the block. blk := manager.NewBlock(apricotBlk) - err = blk.Verify() + err = blk.Verify(context.Background()) require.NoError(err) // Assert expected state. @@ -379,7 +381,7 @@ func TestVerifierVisitCommitBlock(t *testing.T) { require.Equal(timestamp, gotBlkState.timestamp) // Visiting again should return nil without using dependencies. - err = blk.Verify() + err = blk.Verify(context.Background()) require.NoError(err) } @@ -442,7 +444,7 @@ func TestVerifierVisitAbortBlock(t *testing.T) { // Verify the block. blk := manager.NewBlock(apricotBlk) - err = blk.Verify() + err = blk.Verify(context.Background()) require.NoError(err) // Assert expected state. @@ -452,7 +454,7 @@ func TestVerifierVisitAbortBlock(t *testing.T) { require.Equal(timestamp, gotBlkState.timestamp) // Visiting again should return nil without using dependencies. - err = blk.Verify() + err = blk.Verify(context.Background()) require.NoError(err) } @@ -699,7 +701,7 @@ func TestVerifierVisitStandardBlockWithDuplicateInputs(t *testing.T) { parentID := ids.GenerateTestID() parentStatelessBlk := blocks.NewMockBlock(ctrl) parentState := state.NewMockDiff(ctrl) - atomicInputs := ids.Set{ + atomicInputs := set.Set[ids.ID]{ ids.GenerateTestID(): struct{}{}, } diff --git a/avalanchego/vms/platformvm/blocks/mock_block.go b/avalanchego/vms/platformvm/blocks/mock_block.go index 8f88c68e..8bc912e6 100644 --- a/avalanchego/vms/platformvm/blocks/mock_block.go +++ b/avalanchego/vms/platformvm/blocks/mock_block.go @@ -1,3 +1,6 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/platformvm/blocks (interfaces: Block) diff --git a/avalanchego/vms/platformvm/blocks/parse.go b/avalanchego/vms/platformvm/blocks/parse.go index a80ec3d4..27d83773 100644 --- a/avalanchego/vms/platformvm/blocks/parse.go +++ b/avalanchego/vms/platformvm/blocks/parse.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package blocks diff --git a/avalanchego/vms/platformvm/blocks/parse_test.go b/avalanchego/vms/platformvm/blocks/parse_test.go index 5454254d..9ab17af7 100644 --- a/avalanchego/vms/platformvm/blocks/parse_test.go +++ b/avalanchego/vms/platformvm/blocks/parse_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package blocks @@ -11,13 +11,13 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -var preFundedKeys = crypto.BuildTestKeys() +var preFundedKeys = secp256k1.TestKeys() func TestStandardBlocks(t *testing.T) { // check Apricot standard block can be built and parsed @@ -294,7 +294,7 @@ func testAtomicTx() (*txs.Tx, error) { }, }}, } - signers := [][]*crypto.PrivateKeySECP256K1R{{preFundedKeys[0]}} + signers := [][]*secp256k1.PrivateKey{{preFundedKeys[0]}} return txs.NewSigned(utx, txs.Codec, signers) } @@ -338,7 +338,7 @@ func testDecisionTxs() ([]*txs.Tx, error) { SubnetAuth: &secp256k1fx.Input{SigIndices: []uint32{1}}, } - signers := [][]*crypto.PrivateKeySECP256K1R{{preFundedKeys[0]}} + signers := [][]*secp256k1.PrivateKey{{preFundedKeys[0]}} tx, err := txs.NewSigned(utx, txs.Codec, signers) if err != nil { return nil, err @@ -353,6 +353,6 @@ func testProposalTx() (*txs.Tx, error) { TxID: ids.ID{'r', 'e', 'w', 'a', 'r', 'd', 'I', 'D'}, } - signers := [][]*crypto.PrivateKeySECP256K1R{{preFundedKeys[0]}} + signers := [][]*secp256k1.PrivateKey{{preFundedKeys[0]}} return txs.NewSigned(utx, txs.Codec, signers) } diff --git a/avalanchego/vms/platformvm/blocks/proposal_block.go b/avalanchego/vms/platformvm/blocks/proposal_block.go index 354d1c0b..11e9c22a 100644 --- a/avalanchego/vms/platformvm/blocks/proposal_block.go +++ b/avalanchego/vms/platformvm/blocks/proposal_block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package blocks @@ -13,8 +13,8 @@ import ( ) var ( - _ BanffBlock = &BanffProposalBlock{} - _ Block = &ApricotProposalBlock{} + _ BanffBlock = (*BanffProposalBlock)(nil) + _ Block = (*ApricotProposalBlock)(nil) ) type BanffProposalBlock struct { @@ -35,8 +35,13 @@ func (b *BanffProposalBlock) InitCtx(ctx *snow.Context) { b.ApricotProposalBlock.InitCtx(ctx) } -func (b *BanffProposalBlock) Timestamp() time.Time { return time.Unix(int64(b.Time), 0) } -func (b *BanffProposalBlock) Visit(v Visitor) error { return v.BanffProposalBlock(b) } +func (b *BanffProposalBlock) Timestamp() time.Time { + return time.Unix(int64(b.Time), 0) +} + +func (b *BanffProposalBlock) Visit(v Visitor) error { + return v.BanffProposalBlock(b) +} func NewBanffProposalBlock( timestamp time.Time, @@ -64,7 +69,7 @@ type ApricotProposalBlock struct { func (b *ApricotProposalBlock) initialize(bytes []byte) error { b.CommonBlock.initialize(bytes) - if err := b.Tx.Sign(txs.Codec, nil); err != nil { + if err := b.Tx.Initialize(txs.Codec); err != nil { return fmt.Errorf("failed to initialize tx: %w", err) } return nil @@ -74,9 +79,17 @@ func (b *ApricotProposalBlock) InitCtx(ctx *snow.Context) { b.Tx.Unsigned.InitCtx(ctx) } -func (b *ApricotProposalBlock) Txs() []*txs.Tx { return []*txs.Tx{b.Tx} } -func (b *ApricotProposalBlock) Visit(v Visitor) error { return v.ApricotProposalBlock(b) } +func (b *ApricotProposalBlock) Txs() []*txs.Tx { + return []*txs.Tx{b.Tx} +} + +func (b *ApricotProposalBlock) Visit(v Visitor) error { + return v.ApricotProposalBlock(b) +} +// NewApricotProposalBlock is kept for testing purposes only. +// Following Banff activation and subsequent code cleanup, Apricot Proposal blocks +// should be only verified (upon bootstrap), never created anymore func NewApricotProposalBlock( parentID ids.ID, height uint64, diff --git a/avalanchego/vms/platformvm/blocks/proposal_block_test.go b/avalanchego/vms/platformvm/blocks/proposal_block_test.go index 7e5a5219..50affc5f 100644 --- a/avalanchego/vms/platformvm/blocks/proposal_block_test.go +++ b/avalanchego/vms/platformvm/blocks/proposal_block_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package blocks @@ -13,7 +13,6 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -33,14 +32,14 @@ func TestNewBanffProposalBlock(t *testing.T) { }, }, StakeOuts: []*avax.TransferableOutput{}, - Validator: validator.Validator{}, + Validator: txs.Validator{}, RewardsOwner: &secp256k1fx.OutputOwners{ Addrs: []ids.ShortID{}, }, }, Creds: []verify.Verifiable{}, } - require.NoError(tx.Sign(txs.Codec, nil)) + require.NoError(tx.Initialize(txs.Codec)) blk, err := NewBanffProposalBlock( timestamp, @@ -51,8 +50,8 @@ func TestNewBanffProposalBlock(t *testing.T) { require.NoError(err) // Make sure the block and tx are initialized - require.NotNil(blk.Bytes()) - require.NotNil(blk.Tx.Bytes()) + require.NotEmpty(blk.Bytes()) + require.NotEmpty(blk.Tx.Bytes()) require.NotEqual(ids.Empty, blk.Tx.ID()) require.Equal(tx.Bytes(), blk.Tx.Bytes()) require.Equal(timestamp, blk.Timestamp()) @@ -75,14 +74,14 @@ func TestNewApricotProposalBlock(t *testing.T) { }, }, StakeOuts: []*avax.TransferableOutput{}, - Validator: validator.Validator{}, + Validator: txs.Validator{}, RewardsOwner: &secp256k1fx.OutputOwners{ Addrs: []ids.ShortID{}, }, }, Creds: []verify.Verifiable{}, } - require.NoError(tx.Sign(txs.Codec, nil)) + require.NoError(tx.Initialize(txs.Codec)) blk, err := NewApricotProposalBlock( parentID, @@ -92,8 +91,8 @@ func TestNewApricotProposalBlock(t *testing.T) { require.NoError(err) // Make sure the block and tx are initialized - require.NotNil(blk.Bytes()) - require.NotNil(blk.Tx.Bytes()) + require.NotEmpty(blk.Bytes()) + require.NotEmpty(blk.Tx.Bytes()) require.NotEqual(ids.Empty, blk.Tx.ID()) require.Equal(tx.Bytes(), blk.Tx.Bytes()) require.Equal(parentID, blk.Parent()) diff --git a/avalanchego/vms/platformvm/blocks/standard_block.go b/avalanchego/vms/platformvm/blocks/standard_block.go index dba0a857..72684c82 100644 --- a/avalanchego/vms/platformvm/blocks/standard_block.go +++ b/avalanchego/vms/platformvm/blocks/standard_block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package blocks @@ -13,8 +13,8 @@ import ( ) var ( - _ BanffBlock = &BanffStandardBlock{} - _ Block = &ApricotStandardBlock{} + _ BanffBlock = (*BanffStandardBlock)(nil) + _ Block = (*ApricotStandardBlock)(nil) ) type BanffStandardBlock struct { @@ -22,8 +22,13 @@ type BanffStandardBlock struct { ApricotStandardBlock `serialize:"true"` } -func (b *BanffStandardBlock) Timestamp() time.Time { return time.Unix(int64(b.Time), 0) } -func (b *BanffStandardBlock) Visit(v Visitor) error { return v.BanffStandardBlock(b) } +func (b *BanffStandardBlock) Timestamp() time.Time { + return time.Unix(int64(b.Time), 0) +} + +func (b *BanffStandardBlock) Visit(v Visitor) error { + return v.BanffStandardBlock(b) +} func NewBanffStandardBlock( timestamp time.Time, @@ -52,7 +57,7 @@ type ApricotStandardBlock struct { func (b *ApricotStandardBlock) initialize(bytes []byte) error { b.CommonBlock.initialize(bytes) for _, tx := range b.Transactions { - if err := tx.Sign(txs.Codec, nil); err != nil { + if err := tx.Initialize(txs.Codec); err != nil { return fmt.Errorf("failed to sign block: %w", err) } } @@ -65,9 +70,17 @@ func (b *ApricotStandardBlock) InitCtx(ctx *snow.Context) { } } -func (b *ApricotStandardBlock) Txs() []*txs.Tx { return b.Transactions } -func (b *ApricotStandardBlock) Visit(v Visitor) error { return v.ApricotStandardBlock(b) } +func (b *ApricotStandardBlock) Txs() []*txs.Tx { + return b.Transactions +} + +func (b *ApricotStandardBlock) Visit(v Visitor) error { + return v.ApricotStandardBlock(b) +} +// NewApricotStandardBlock is kept for testing purposes only. +// Following Banff activation and subsequent code cleanup, Apricot Standard blocks +// should be only verified (upon bootstrap), never created anymore func NewApricotStandardBlock( parentID ids.ID, height uint64, diff --git a/avalanchego/vms/platformvm/blocks/standard_block_test.go b/avalanchego/vms/platformvm/blocks/standard_block_test.go index ac9578e2..b5c5c065 100644 --- a/avalanchego/vms/platformvm/blocks/standard_block_test.go +++ b/avalanchego/vms/platformvm/blocks/standard_block_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package blocks @@ -13,7 +13,6 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -33,14 +32,14 @@ func TestNewBanffStandardBlock(t *testing.T) { }, }, StakeOuts: []*avax.TransferableOutput{}, - Validator: validator.Validator{}, + Validator: txs.Validator{}, RewardsOwner: &secp256k1fx.OutputOwners{ Addrs: []ids.ShortID{}, }, }, Creds: []verify.Verifiable{}, } - require.NoError(tx.Sign(txs.Codec, nil)) + require.NoError(tx.Initialize(txs.Codec)) blk, err := NewBanffStandardBlock( timestamp, @@ -51,8 +50,8 @@ func TestNewBanffStandardBlock(t *testing.T) { require.NoError(err) // Make sure the block and tx are initialized - require.NotNil(blk.Bytes()) - require.NotNil(blk.Transactions[0].Bytes()) + require.NotEmpty(blk.Bytes()) + require.NotEmpty(blk.Transactions[0].Bytes()) require.NotEqual(ids.Empty, blk.Transactions[0].ID()) require.Equal(tx.Bytes(), blk.Transactions[0].Bytes()) require.Equal(timestamp, blk.Timestamp()) @@ -75,14 +74,14 @@ func TestNewApricotStandardBlock(t *testing.T) { }, }, StakeOuts: []*avax.TransferableOutput{}, - Validator: validator.Validator{}, + Validator: txs.Validator{}, RewardsOwner: &secp256k1fx.OutputOwners{ Addrs: []ids.ShortID{}, }, }, Creds: []verify.Verifiable{}, } - require.NoError(tx.Sign(txs.Codec, nil)) + require.NoError(tx.Initialize(txs.Codec)) blk, err := NewApricotStandardBlock( parentID, @@ -92,8 +91,8 @@ func TestNewApricotStandardBlock(t *testing.T) { require.NoError(err) // Make sure the block and tx are initialized - require.NotNil(blk.Bytes()) - require.NotNil(blk.Transactions[0].Bytes()) + require.NotEmpty(blk.Bytes()) + require.NotEmpty(blk.Transactions[0].Bytes()) require.NotEqual(ids.Empty, blk.Transactions[0].ID()) require.Equal(tx.Bytes(), blk.Transactions[0].Bytes()) require.Equal(parentID, blk.Parent()) diff --git a/avalanchego/vms/platformvm/blocks/visitor.go b/avalanchego/vms/platformvm/blocks/visitor.go index 9cee93b8..929c615b 100644 --- a/avalanchego/vms/platformvm/blocks/visitor.go +++ b/avalanchego/vms/platformvm/blocks/visitor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package blocks diff --git a/avalanchego/vms/platformvm/client.go b/avalanchego/vms/platformvm/client.go index 752893cf..8a0cf008 100644 --- a/avalanchego/vms/platformvm/client.go +++ b/avalanchego/vms/platformvm/client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm @@ -10,7 +10,7 @@ import ( "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/json" @@ -20,21 +20,31 @@ import ( platformapi "github.com/ava-labs/avalanchego/vms/platformvm/api" ) -var _ Client = &client{} +var _ Client = (*client)(nil) // Client interface for interacting with the P Chain endpoint type Client interface { // GetHeight returns the current block height of the P Chain GetHeight(ctx context.Context, options ...rpc.Option) (uint64, error) // ExportKey returns the private key corresponding to [address] from [user]'s account - ExportKey(ctx context.Context, user api.UserPass, address ids.ShortID, options ...rpc.Option) (*crypto.PrivateKeySECP256K1R, error) + // + // Deprecated: Keys should no longer be stored on the node. + ExportKey(ctx context.Context, user api.UserPass, address ids.ShortID, options ...rpc.Option) (*secp256k1.PrivateKey, error) // ImportKey imports the specified [privateKey] to [user]'s keystore - ImportKey(ctx context.Context, user api.UserPass, privateKey *crypto.PrivateKeySECP256K1R, options ...rpc.Option) (ids.ShortID, error) + // + // Deprecated: Keys should no longer be stored on the node. + ImportKey(ctx context.Context, user api.UserPass, privateKey *secp256k1.PrivateKey, options ...rpc.Option) (ids.ShortID, error) // GetBalance returns the balance of [addrs] on the P Chain + // + // Deprecated: GetUTXOs should be used instead. GetBalance(ctx context.Context, addrs []ids.ShortID, options ...rpc.Option) (*GetBalanceResponse, error) // CreateAddress creates a new address for [user] + // + // Deprecated: Keys should no longer be stored on the node. CreateAddress(ctx context.Context, user api.UserPass, options ...rpc.Option) (ids.ShortID, error) // ListAddresses returns an array of platform addresses controlled by [user] + // + // Deprecated: Keys should no longer be stored on the node. ListAddresses(ctx context.Context, user api.UserPass, options ...rpc.Option) ([]ids.ShortID, error) // GetUTXOs returns the byte representation of the UTXOs controlled by [addrs] GetUTXOs( @@ -57,6 +67,8 @@ type Client interface { options ...rpc.Option, ) ([][]byte, ids.ShortID, ids.ID, error) // GetSubnets returns information about the specified subnets + // + // Deprecated: Subnets should be fetched from a dedicated indexer. GetSubnets(ctx context.Context, subnetIDs []ids.ID, options ...rpc.Option) ([]ClientSubnet, error) // GetStakingAssetID returns the assetID of the asset used for staking on // subnet corresponding to [subnetID] @@ -71,6 +83,9 @@ type Client interface { SampleValidators(ctx context.Context, subnetID ids.ID, sampleSize uint16, options ...rpc.Option) ([]ids.NodeID, error) // AddValidator issues a transaction to add a validator to the primary network // and returns the txID + // + // Deprecated: Transactions should be issued using the + // `avalanchego/wallet/chain/p.Wallet` utility. AddValidator( ctx context.Context, user api.UserPass, @@ -86,6 +101,9 @@ type Client interface { ) (ids.ID, error) // AddDelegator issues a transaction to add a delegator to the primary network // and returns the txID + // + // Deprecated: Transactions should be issued using the + // `avalanchego/wallet/chain/p.Wallet` utility. AddDelegator( ctx context.Context, user api.UserPass, @@ -100,6 +118,9 @@ type Client interface { ) (ids.ID, error) // AddSubnetValidator issues a transaction to add validator [nodeID] to subnet // with ID [subnetID] and returns the txID + // + // Deprecated: Transactions should be issued using the + // `avalanchego/wallet/chain/p.Wallet` utility. AddSubnetValidator( ctx context.Context, user api.UserPass, @@ -113,6 +134,9 @@ type Client interface { options ...rpc.Option, ) (ids.ID, error) // CreateSubnet issues a transaction to create [subnet] and returns the txID + // + // Deprecated: Transactions should be issued using the + // `avalanchego/wallet/chain/p.Wallet` utility. CreateSubnet( ctx context.Context, user api.UserPass, @@ -123,6 +147,9 @@ type Client interface { options ...rpc.Option, ) (ids.ID, error) // ExportAVAX issues an ExportTx transaction and returns the txID + // + // Deprecated: Transactions should be issued using the + // `avalanchego/wallet/chain/p.Wallet` utility. ExportAVAX( ctx context.Context, user api.UserPass, @@ -134,6 +161,9 @@ type Client interface { options ...rpc.Option, ) (ids.ID, error) // ImportAVAX issues an ImportTx transaction and returns the txID + // + // Deprecated: Transactions should be issued using the + // `avalanchego/wallet/chain/p.Wallet` utility. ImportAVAX( ctx context.Context, user api.UserPass, @@ -144,6 +174,9 @@ type Client interface { options ...rpc.Option, ) (ids.ID, error) // CreateBlockchain issues a CreateBlockchain transaction and returns the txID + // + // Deprecated: Transactions should be issued using the + // `avalanchego/wallet/chain/p.Wallet` utility. CreateBlockchain( ctx context.Context, user api.UserPass, @@ -163,6 +196,8 @@ type Client interface { // Validates returns the list of blockchains that are validated by the subnet with ID [subnetID] Validates(ctx context.Context, subnetID ids.ID, options ...rpc.Option) ([]ids.ID, error) // GetBlockchains returns the list of blockchains on the platform + // + // Deprecated: Blockchains should be fetched from a dedicated indexer. GetBlockchains(ctx context.Context, options ...rpc.Option) ([]APIBlockchain, error) // IssueTx issues the transaction and returns its txID IssueTx(ctx context.Context, tx []byte, options ...rpc.Option) (ids.ID, error) @@ -172,6 +207,8 @@ type Client interface { GetTxStatus(ctx context.Context, txID ids.ID, options ...rpc.Option) (*GetTxStatusResponse, error) // AwaitTxDecided polls [GetTxStatus] until a status is returned that // implies the tx may be decided. + // TODO: Move this function off of the Client interface into a utility + // function. AwaitTxDecided( ctx context.Context, txID ids.ID, @@ -180,6 +217,9 @@ type Client interface { ) (*GetTxStatusResponse, error) // GetStake returns the amount of nAVAX that [addrs] have cumulatively // staked on the Primary Network. + // + // Deprecated: Stake should be calculated using GetTx, GetCurrentValidators, + // and GetPendingValidators. GetStake(ctx context.Context, addrs []ids.ShortID, options ...rpc.Option) (map[ids.ID]uint64, [][]byte, error) // GetMinStake returns the minimum staking amount in nAVAX for validators // and delegators respectively @@ -188,6 +228,9 @@ type Client interface { GetTotalStake(ctx context.Context, subnetID ids.ID, options ...rpc.Option) (uint64, error) // GetMaxStakeAmount returns the maximum amount of nAVAX staking to the named // node during the time period. + // + // Deprecated: The MaxStakeAmount should be calculated using + // GetCurrentValidators, and GetPendingValidators. GetMaxStakeAmount( ctx context.Context, subnetID ids.ID, @@ -197,6 +240,8 @@ type Client interface { options ...rpc.Option, ) (uint64, error) // GetRewardUTXOs returns the reward UTXOs for a transaction + // + // Deprecated: GetRewardUTXOs should be fetched from a dedicated indexer. GetRewardUTXOs(context.Context, *api.GetTxArgs, ...rpc.Option) ([][]byte, error) // GetTimestamp returns the current chain timestamp GetTimestamp(ctx context.Context, options ...rpc.Option) (time.Time, error) @@ -215,29 +260,28 @@ type client struct { // NewClient returns a Client for interacting with the P Chain endpoint func NewClient(uri string) Client { return &client{requester: rpc.NewEndpointRequester( - uri+"/ext/P", - "platform", + uri + "/ext/P", )} } func (c *client) GetHeight(ctx context.Context, options ...rpc.Option) (uint64, error) { - res := &GetHeightResponse{} - err := c.requester.SendRequest(ctx, "getHeight", struct{}{}, res, options...) + res := &api.GetHeightResponse{} + err := c.requester.SendRequest(ctx, "platform.getHeight", struct{}{}, res, options...) return uint64(res.Height), err } -func (c *client) ExportKey(ctx context.Context, user api.UserPass, address ids.ShortID, options ...rpc.Option) (*crypto.PrivateKeySECP256K1R, error) { +func (c *client) ExportKey(ctx context.Context, user api.UserPass, address ids.ShortID, options ...rpc.Option) (*secp256k1.PrivateKey, error) { res := &ExportKeyReply{} - err := c.requester.SendRequest(ctx, "exportKey", &ExportKeyArgs{ + err := c.requester.SendRequest(ctx, "platform.exportKey", &ExportKeyArgs{ UserPass: user, Address: address.String(), }, res, options...) return res.PrivateKey, err } -func (c *client) ImportKey(ctx context.Context, user api.UserPass, privateKey *crypto.PrivateKeySECP256K1R, options ...rpc.Option) (ids.ShortID, error) { +func (c *client) ImportKey(ctx context.Context, user api.UserPass, privateKey *secp256k1.PrivateKey, options ...rpc.Option) (ids.ShortID, error) { res := &api.JSONAddress{} - err := c.requester.SendRequest(ctx, "importKey", &ImportKeyArgs{ + err := c.requester.SendRequest(ctx, "platform.importKey", &ImportKeyArgs{ UserPass: user, PrivateKey: privateKey, }, res, options...) @@ -249,7 +293,7 @@ func (c *client) ImportKey(ctx context.Context, user api.UserPass, privateKey *c func (c *client) GetBalance(ctx context.Context, addrs []ids.ShortID, options ...rpc.Option) (*GetBalanceResponse, error) { res := &GetBalanceResponse{} - err := c.requester.SendRequest(ctx, "getBalance", &GetBalanceRequest{ + err := c.requester.SendRequest(ctx, "platform.getBalance", &GetBalanceRequest{ Addresses: ids.ShortIDsToStrings(addrs), }, res, options...) return res, err @@ -257,7 +301,7 @@ func (c *client) GetBalance(ctx context.Context, addrs []ids.ShortID, options .. func (c *client) CreateAddress(ctx context.Context, user api.UserPass, options ...rpc.Option) (ids.ShortID, error) { res := &api.JSONAddress{} - err := c.requester.SendRequest(ctx, "createAddress", &user, res, options...) + err := c.requester.SendRequest(ctx, "platform.createAddress", &user, res, options...) if err != nil { return ids.ShortID{}, err } @@ -266,7 +310,7 @@ func (c *client) CreateAddress(ctx context.Context, user api.UserPass, options . func (c *client) ListAddresses(ctx context.Context, user api.UserPass, options ...rpc.Option) ([]ids.ShortID, error) { res := &api.JSONAddresses{} - err := c.requester.SendRequest(ctx, "listAddresses", &user, res, options...) + err := c.requester.SendRequest(ctx, "platform.listAddresses", &user, res, options...) if err != nil { return nil, err } @@ -294,7 +338,7 @@ func (c *client) GetAtomicUTXOs( options ...rpc.Option, ) ([][]byte, ids.ShortID, ids.ID, error) { res := &api.GetUTXOsReply{} - err := c.requester.SendRequest(ctx, "getUTXOs", &api.GetUTXOsArgs{ + err := c.requester.SendRequest(ctx, "platform.getUTXOs", &api.GetUTXOsArgs{ Addresses: ids.ShortIDsToStrings(addrs), SourceChain: sourceChain, Limit: json.Uint32(limit), @@ -337,7 +381,7 @@ type ClientSubnet struct { func (c *client) GetSubnets(ctx context.Context, ids []ids.ID, options ...rpc.Option) ([]ClientSubnet, error) { res := &GetSubnetsResponse{} - err := c.requester.SendRequest(ctx, "getSubnets", &GetSubnetsArgs{ + err := c.requester.SendRequest(ctx, "platform.getSubnets", &GetSubnetsArgs{ IDs: ids, }, res, options...) if err != nil { @@ -361,7 +405,7 @@ func (c *client) GetSubnets(ctx context.Context, ids []ids.ID, options ...rpc.Op func (c *client) GetStakingAssetID(ctx context.Context, subnetID ids.ID, options ...rpc.Option) (ids.ID, error) { res := &GetStakingAssetIDResponse{} - err := c.requester.SendRequest(ctx, "getStakingAssetID", &GetStakingAssetIDArgs{ + err := c.requester.SendRequest(ctx, "platform.getStakingAssetID", &GetStakingAssetIDArgs{ SubnetID: subnetID, }, res, options...) return res.AssetID, err @@ -374,7 +418,7 @@ func (c *client) GetCurrentValidators( options ...rpc.Option, ) ([]ClientPermissionlessValidator, error) { res := &GetCurrentValidatorsReply{} - err := c.requester.SendRequest(ctx, "getCurrentValidators", &GetCurrentValidatorsArgs{ + err := c.requester.SendRequest(ctx, "platform.getCurrentValidators", &GetCurrentValidatorsArgs{ SubnetID: subnetID, NodeIDs: nodeIDs, }, res, options...) @@ -391,7 +435,7 @@ func (c *client) GetPendingValidators( options ...rpc.Option, ) ([]interface{}, []interface{}, error) { res := &GetPendingValidatorsReply{} - err := c.requester.SendRequest(ctx, "getPendingValidators", &GetPendingValidatorsArgs{ + err := c.requester.SendRequest(ctx, "platform.getPendingValidators", &GetPendingValidatorsArgs{ SubnetID: subnetID, NodeIDs: nodeIDs, }, res, options...) @@ -400,7 +444,7 @@ func (c *client) GetPendingValidators( func (c *client) GetCurrentSupply(ctx context.Context, subnetID ids.ID, options ...rpc.Option) (uint64, error) { res := &GetCurrentSupplyReply{} - err := c.requester.SendRequest(ctx, "getCurrentSupply", &GetCurrentSupplyArgs{ + err := c.requester.SendRequest(ctx, "platform.getCurrentSupply", &GetCurrentSupplyArgs{ SubnetID: subnetID, }, res, options...) return uint64(res.Supply), err @@ -408,7 +452,7 @@ func (c *client) GetCurrentSupply(ctx context.Context, subnetID ids.ID, options func (c *client) SampleValidators(ctx context.Context, subnetID ids.ID, sampleSize uint16, options ...rpc.Option) ([]ids.NodeID, error) { res := &SampleValidatorsReply{} - err := c.requester.SendRequest(ctx, "sampleValidators", &SampleValidatorsArgs{ + err := c.requester.SendRequest(ctx, "platform.sampleValidators", &SampleValidatorsArgs{ SubnetID: subnetID, Size: json.Uint16(sampleSize), }, res, options...) @@ -430,7 +474,7 @@ func (c *client) AddValidator( ) (ids.ID, error) { res := &api.JSONTxID{} jsonStakeAmount := json.Uint64(stakeAmount) - err := c.requester.SendRequest(ctx, "addValidator", &AddValidatorArgs{ + err := c.requester.SendRequest(ctx, "platform.addValidator", &AddValidatorArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: user, JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, @@ -438,6 +482,7 @@ func (c *client) AddValidator( }, Staker: platformapi.Staker{ NodeID: nodeID, + Weight: jsonStakeAmount, StakeAmount: &jsonStakeAmount, StartTime: json.Uint64(startTime), EndTime: json.Uint64(endTime), @@ -462,7 +507,7 @@ func (c *client) AddDelegator( ) (ids.ID, error) { res := &api.JSONTxID{} jsonStakeAmount := json.Uint64(stakeAmount) - err := c.requester.SendRequest(ctx, "addDelegator", &AddDelegatorArgs{ + err := c.requester.SendRequest(ctx, "platform.addDelegator", &AddDelegatorArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: user, JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, @@ -470,6 +515,7 @@ func (c *client) AddDelegator( }, Staker: platformapi.Staker{ NodeID: nodeID, + Weight: jsonStakeAmount, StakeAmount: &jsonStakeAmount, StartTime: json.Uint64(startTime), EndTime: json.Uint64(endTime), @@ -493,7 +539,7 @@ func (c *client) AddSubnetValidator( ) (ids.ID, error) { res := &api.JSONTxID{} jsonStakeAmount := json.Uint64(stakeAmount) - err := c.requester.SendRequest(ctx, "addSubnetValidator", &AddSubnetValidatorArgs{ + err := c.requester.SendRequest(ctx, "platform.addSubnetValidator", &AddSubnetValidatorArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: user, JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, @@ -501,6 +547,7 @@ func (c *client) AddSubnetValidator( }, Staker: platformapi.Staker{ NodeID: nodeID, + Weight: jsonStakeAmount, StakeAmount: &jsonStakeAmount, StartTime: json.Uint64(startTime), EndTime: json.Uint64(endTime), @@ -520,7 +567,7 @@ func (c *client) CreateSubnet( options ...rpc.Option, ) (ids.ID, error) { res := &api.JSONTxID{} - err := c.requester.SendRequest(ctx, "createSubnet", &CreateSubnetArgs{ + err := c.requester.SendRequest(ctx, "platform.createSubnet", &CreateSubnetArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: user, JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, @@ -545,7 +592,7 @@ func (c *client) ExportAVAX( options ...rpc.Option, ) (ids.ID, error) { res := &api.JSONTxID{} - err := c.requester.SendRequest(ctx, "exportAVAX", &ExportAVAXArgs{ + err := c.requester.SendRequest(ctx, "platform.exportAVAX", &ExportAVAXArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: user, JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, @@ -568,7 +615,7 @@ func (c *client) ImportAVAX( options ...rpc.Option, ) (ids.ID, error) { res := &api.JSONTxID{} - err := c.requester.SendRequest(ctx, "importAVAX", &ImportAVAXArgs{ + err := c.requester.SendRequest(ctx, "platform.importAVAX", &ImportAVAXArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: user, JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, @@ -598,7 +645,7 @@ func (c *client) CreateBlockchain( } res := &api.JSONTxID{} - err = c.requester.SendRequest(ctx, "createBlockchain", &CreateBlockchainArgs{ + err = c.requester.SendRequest(ctx, "platform.createBlockchain", &CreateBlockchainArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: user, JSONFromAddrs: api.JSONFromAddrs{From: ids.ShortIDsToStrings(from)}, @@ -616,7 +663,7 @@ func (c *client) CreateBlockchain( func (c *client) GetBlockchainStatus(ctx context.Context, blockchainID string, options ...rpc.Option) (status.BlockchainStatus, error) { res := &GetBlockchainStatusReply{} - err := c.requester.SendRequest(ctx, "getBlockchainStatus", &GetBlockchainStatusArgs{ + err := c.requester.SendRequest(ctx, "platform.getBlockchainStatus", &GetBlockchainStatusArgs{ BlockchainID: blockchainID, }, res, options...) return res.Status, err @@ -624,7 +671,7 @@ func (c *client) GetBlockchainStatus(ctx context.Context, blockchainID string, o func (c *client) ValidatedBy(ctx context.Context, blockchainID ids.ID, options ...rpc.Option) (ids.ID, error) { res := &ValidatedByResponse{} - err := c.requester.SendRequest(ctx, "validatedBy", &ValidatedByArgs{ + err := c.requester.SendRequest(ctx, "platform.validatedBy", &ValidatedByArgs{ BlockchainID: blockchainID, }, res, options...) return res.SubnetID, err @@ -632,7 +679,7 @@ func (c *client) ValidatedBy(ctx context.Context, blockchainID ids.ID, options . func (c *client) Validates(ctx context.Context, subnetID ids.ID, options ...rpc.Option) ([]ids.ID, error) { res := &ValidatesResponse{} - err := c.requester.SendRequest(ctx, "validates", &ValidatesArgs{ + err := c.requester.SendRequest(ctx, "platform.validates", &ValidatesArgs{ SubnetID: subnetID, }, res, options...) return res.BlockchainIDs, err @@ -640,7 +687,7 @@ func (c *client) Validates(ctx context.Context, subnetID ids.ID, options ...rpc. func (c *client) GetBlockchains(ctx context.Context, options ...rpc.Option) ([]APIBlockchain, error) { res := &GetBlockchainsResponse{} - err := c.requester.SendRequest(ctx, "getBlockchains", struct{}{}, res, options...) + err := c.requester.SendRequest(ctx, "platform.getBlockchains", struct{}{}, res, options...) return res.Blockchains, err } @@ -651,7 +698,7 @@ func (c *client) IssueTx(ctx context.Context, txBytes []byte, options ...rpc.Opt } res := &api.JSONTxID{} - err = c.requester.SendRequest(ctx, "issueTx", &api.FormattedTx{ + err = c.requester.SendRequest(ctx, "platform.issueTx", &api.FormattedTx{ Tx: txStr, Encoding: formatting.Hex, }, res, options...) @@ -660,7 +707,7 @@ func (c *client) IssueTx(ctx context.Context, txBytes []byte, options ...rpc.Opt func (c *client) GetTx(ctx context.Context, txID ids.ID, options ...rpc.Option) ([]byte, error) { res := &api.FormattedTx{} - err := c.requester.SendRequest(ctx, "getTx", &api.GetTxArgs{ + err := c.requester.SendRequest(ctx, "platform.getTx", &api.GetTxArgs{ TxID: txID, Encoding: formatting.Hex, }, res, options...) @@ -674,7 +721,7 @@ func (c *client) GetTxStatus(ctx context.Context, txID ids.ID, options ...rpc.Op res := new(GetTxStatusResponse) err := c.requester.SendRequest( ctx, - "getTxStatus", + "platform.getTxStatus", &GetTxStatusArgs{ TxID: txID, }, @@ -707,7 +754,7 @@ func (c *client) AwaitTxDecided(ctx context.Context, txID ids.ID, freq time.Dura func (c *client) GetStake(ctx context.Context, addrs []ids.ShortID, options ...rpc.Option) (map[ids.ID]uint64, [][]byte, error) { res := new(GetStakeReply) - err := c.requester.SendRequest(ctx, "getStake", &GetStakeArgs{ + err := c.requester.SendRequest(ctx, "platform.getStake", &GetStakeArgs{ JSONAddresses: api.JSONAddresses{ Addresses: ids.ShortIDsToStrings(addrs), }, @@ -735,7 +782,7 @@ func (c *client) GetStake(ctx context.Context, addrs []ids.ShortID, options ...r func (c *client) GetMinStake(ctx context.Context, subnetID ids.ID, options ...rpc.Option) (uint64, uint64, error) { res := new(GetMinStakeReply) - err := c.requester.SendRequest(ctx, "getMinStake", &GetMinStakeArgs{ + err := c.requester.SendRequest(ctx, "platform.getMinStake", &GetMinStakeArgs{ SubnetID: subnetID, }, res, options...) return uint64(res.MinValidatorStake), uint64(res.MinDelegatorStake), err @@ -743,7 +790,7 @@ func (c *client) GetMinStake(ctx context.Context, subnetID ids.ID, options ...rp func (c *client) GetTotalStake(ctx context.Context, subnetID ids.ID, options ...rpc.Option) (uint64, error) { res := new(GetTotalStakeReply) - err := c.requester.SendRequest(ctx, "getTotalStake", &GetTotalStakeArgs{ + err := c.requester.SendRequest(ctx, "platform.getTotalStake", &GetTotalStakeArgs{ SubnetID: subnetID, }, res, options...) var amount json.Uint64 @@ -757,7 +804,7 @@ func (c *client) GetTotalStake(ctx context.Context, subnetID ids.ID, options ... func (c *client) GetMaxStakeAmount(ctx context.Context, subnetID ids.ID, nodeID ids.NodeID, startTime, endTime uint64, options ...rpc.Option) (uint64, error) { res := new(GetMaxStakeAmountReply) - err := c.requester.SendRequest(ctx, "getMaxStakeAmount", &GetMaxStakeAmountArgs{ + err := c.requester.SendRequest(ctx, "platform.getMaxStakeAmount", &GetMaxStakeAmountArgs{ SubnetID: subnetID, NodeID: nodeID, StartTime: json.Uint64(startTime), @@ -768,7 +815,7 @@ func (c *client) GetMaxStakeAmount(ctx context.Context, subnetID ids.ID, nodeID func (c *client) GetRewardUTXOs(ctx context.Context, args *api.GetTxArgs, options ...rpc.Option) ([][]byte, error) { res := &GetRewardUTXOsReply{} - err := c.requester.SendRequest(ctx, "getRewardUTXOs", args, res, options...) + err := c.requester.SendRequest(ctx, "platform.getRewardUTXOs", args, res, options...) if err != nil { return nil, err } @@ -785,13 +832,13 @@ func (c *client) GetRewardUTXOs(ctx context.Context, args *api.GetTxArgs, option func (c *client) GetTimestamp(ctx context.Context, options ...rpc.Option) (time.Time, error) { res := &GetTimestampReply{} - err := c.requester.SendRequest(ctx, "getTimestamp", struct{}{}, res, options...) + err := c.requester.SendRequest(ctx, "platform.getTimestamp", struct{}{}, res, options...) return res.Timestamp, err } func (c *client) GetValidatorsAt(ctx context.Context, subnetID ids.ID, height uint64, options ...rpc.Option) (map[ids.NodeID]uint64, error) { res := &GetValidatorsAtReply{} - err := c.requester.SendRequest(ctx, "getValidatorsAt", &GetValidatorsAtArgs{ + err := c.requester.SendRequest(ctx, "platform.getValidatorsAt", &GetValidatorsAtArgs{ SubnetID: subnetID, Height: json.Uint64(height), }, res, options...) @@ -800,7 +847,7 @@ func (c *client) GetValidatorsAt(ctx context.Context, subnetID ids.ID, height ui func (c *client) GetBlock(ctx context.Context, blockID ids.ID, options ...rpc.Option) ([]byte, error) { response := &api.FormattedBlock{} - if err := c.requester.SendRequest(ctx, "getBlock", &api.GetBlockArgs{ + if err := c.requester.SendRequest(ctx, "platform.getBlock", &api.GetBlockArgs{ BlockID: blockID, Encoding: formatting.Hex, }, response, options...); err != nil { diff --git a/avalanchego/vms/platformvm/client_permissionless_validator.go b/avalanchego/vms/platformvm/client_permissionless_validator.go index 3baa426e..c9baac85 100644 --- a/avalanchego/vms/platformvm/client_permissionless_validator.go +++ b/avalanchego/vms/platformvm/client_permissionless_validator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm @@ -9,6 +9,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/vms/platformvm/api" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" ) // ClientStaker is the representation of a staker sent via client. @@ -20,7 +21,7 @@ type ClientStaker struct { // the Unix time when they are done staking EndTime uint64 // the validator weight when sampling validators - Weight *uint64 + Weight uint64 // the amount of tokens being staked. StakeAmount *uint64 // the node ID of the staker @@ -38,14 +39,18 @@ type ClientOwner struct { // over client type ClientPermissionlessValidator struct { ClientStaker - ValidationRewardOwner *ClientOwner - DelegationRewardOwner *ClientOwner - PotentialReward *uint64 - DelegationFee float32 - Uptime *float32 - Connected *bool + ValidationRewardOwner *ClientOwner + DelegationRewardOwner *ClientOwner + PotentialReward *uint64 + AccruedDelegateeReward *uint64 + DelegationFee float32 + Uptime *float32 + Connected *bool + Signer *signer.ProofOfPossession // The delegators delegating to this validator - Delegators []ClientDelegator + DelegatorCount *uint64 + DelegatorWeight *uint64 + Delegators []ClientDelegator } // ClientDelegator is the repr. of a delegator sent over client @@ -60,7 +65,7 @@ func apiStakerToClientStaker(validator api.Staker) ClientStaker { TxID: validator.TxID, StartTime: uint64(validator.StartTime), EndTime: uint64(validator.EndTime), - Weight: (*uint64)(validator.Weight), + Weight: uint64(validator.Weight), StakeAmount: (*uint64)(validator.StakeAmount), NodeID: validator.NodeID, } @@ -103,29 +108,36 @@ func getClientPermissionlessValidators(validatorsSliceIntf []interface{}) ([]Cli return nil, err } - clientDelegators := make([]ClientDelegator, len(apiValidator.Delegators)) - for j, apiDelegator := range apiValidator.Delegators { - rewardOwner, err := apiOwnerToClientOwner(apiDelegator.RewardOwner) - if err != nil { - return nil, err - } - - clientDelegators[j] = ClientDelegator{ - ClientStaker: apiStakerToClientStaker(apiDelegator.Staker), - RewardOwner: rewardOwner, - PotentialReward: (*uint64)(apiDelegator.PotentialReward), + var clientDelegators []ClientDelegator + if apiValidator.Delegators != nil { + clientDelegators = make([]ClientDelegator, len(*apiValidator.Delegators)) + for j, apiDelegator := range *apiValidator.Delegators { + rewardOwner, err := apiOwnerToClientOwner(apiDelegator.RewardOwner) + if err != nil { + return nil, err + } + + clientDelegators[j] = ClientDelegator{ + ClientStaker: apiStakerToClientStaker(apiDelegator.Staker), + RewardOwner: rewardOwner, + PotentialReward: (*uint64)(apiDelegator.PotentialReward), + } } } clientValidators[i] = ClientPermissionlessValidator{ - ClientStaker: apiStakerToClientStaker(apiValidator.Staker), - ValidationRewardOwner: validationRewardOwner, - DelegationRewardOwner: delegationRewardOwner, - PotentialReward: (*uint64)(apiValidator.PotentialReward), - DelegationFee: float32(apiValidator.DelegationFee), - Uptime: (*float32)(apiValidator.Uptime), - Connected: &apiValidator.Connected, - Delegators: clientDelegators, + ClientStaker: apiStakerToClientStaker(apiValidator.Staker), + ValidationRewardOwner: validationRewardOwner, + DelegationRewardOwner: delegationRewardOwner, + PotentialReward: (*uint64)(apiValidator.PotentialReward), + AccruedDelegateeReward: (*uint64)(apiValidator.AccruedDelegateeReward), + DelegationFee: float32(apiValidator.DelegationFee), + Uptime: (*float32)(apiValidator.Uptime), + Connected: &apiValidator.Connected, + Signer: apiValidator.Signer, + DelegatorCount: (*uint64)(apiValidator.DelegatorCount), + DelegatorWeight: (*uint64)(apiValidator.DelegatorWeight), + Delegators: clientDelegators, } } return clientValidators, nil diff --git a/avalanchego/vms/platformvm/config/config.go b/avalanchego/vms/platformvm/config/config.go index f86cbf50..02b3e6f3 100644 --- a/avalanchego/vms/platformvm/config/config.go +++ b/avalanchego/vms/platformvm/config/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package config @@ -8,10 +8,10 @@ import ( "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) @@ -22,11 +22,13 @@ type Config struct { Chains chains.Manager // Node's validator set maps subnetID -> validators of the subnet + // + // Invariant: The primary network's validator set should have been added to + // the manager before calling VM.Initialize. + // Invariant: The primary network's validator set should be empty before + // calling VM.Initialize. Validators validators.Manager - // Provides access to subnet tracking - SubnetTracker common.SubnetTracker - // Provides access to the uptime manager as a thread safe data structure UptimeLockedCalculator uptime.LockedCalculator @@ -34,7 +36,7 @@ type Config struct { StakingEnabled bool // Set of subnets that this node is validating - WhitelistedSubnets ids.Set + TrackedSubnets set.Set[ids.ID] // Fee that is burned by every non-state creating transaction TxFee uint64 @@ -95,6 +97,27 @@ type Config struct { // Time of the Banff network upgrade BanffTime time.Time + + // Time of the Cortina network upgrade + CortinaTime time.Time + + // Subnet ID --> Minimum portion of the subnet's stake this node must be + // connected to in order to report healthy. + // [constants.PrimaryNetworkID] is always a key in this map. + // If a subnet is in this map, but it isn't tracked, its corresponding value + // isn't used. + // If a subnet is tracked but not in this map, we use the value for the + // Primary Network. + MinPercentConnectedStakeHealthy map[ids.ID]float64 + + // UseCurrentHeight forces [GetMinimumHeight] to return the current height + // of the P-Chain instead of the oldest block in the [recentlyAccepted] + // window. + // + // This config is particularly useful for triggering proposervm activation + // on recently created subnets (without this, users need to wait for + // [recentlyAcceptedWindowTTL] to pass for activation to occur). + UseCurrentHeight bool } func (c *Config) IsApricotPhase3Activated(timestamp time.Time) bool { @@ -128,15 +151,17 @@ func (c *Config) GetCreateSubnetTxFee(timestamp time.Time) uint64 { func (c *Config) CreateChain(chainID ids.ID, tx *txs.CreateChainTx) { if c.StakingEnabled && // Staking is enabled, so nodes might not validate all chains constants.PrimaryNetworkID != tx.SubnetID && // All nodes must validate the primary network - !c.WhitelistedSubnets.Contains(tx.SubnetID) { // This node doesn't validate this blockchain + !c.TrackedSubnets.Contains(tx.SubnetID) { // This node doesn't validate this blockchain return } - c.Chains.CreateChain(chains.ChainParameters{ + chainParams := chains.ChainParameters{ ID: chainID, SubnetID: tx.SubnetID, GenesisData: tx.GenesisData, VMID: tx.VMID, FxIDs: tx.FxIDs, - }) + } + + c.Chains.QueueChainCreation(chainParams) } diff --git a/avalanchego/vms/platformvm/docs/chain_time_update.md b/avalanchego/vms/platformvm/docs/chain_time_update.md new file mode 100644 index 00000000..622b1fe7 --- /dev/null +++ b/avalanchego/vms/platformvm/docs/chain_time_update.md @@ -0,0 +1,35 @@ +# Chain time update mechanism + +The activation of the Banff fork changes the way P-chain tracks its `ChainTime`. In this brief document we detail these changes. + +## About `ChainTime` + +One of the P-chain's main responsibilities is to record staking periods of any staker (i.e. any validator or delegator) on any subnet to duly reward their activity. + +The P-chain tracks a network agreed timestamp called `ChainTime` that allows nodes to reach agreement about when a staker starts and stops staking. These start/stop times are basic inputs to determine whether the staker should be rewarded based on what percentage of `ChainTime` it was perceived as active from other validators. + +Note that this `ChainTime` has nothing to do with the `Snowman++` timestamp. `Snowman++` timestamps are local times used to reduce network congestion and have no role in rewarding of any staker. + +## Pre Banff fork context + +Before the Banff fork activation, `ChainTime` was incremented by an `AdvanceTimeTx` transaction, being included into an `ApricotProposalBlock` block type. Validators voted on `ChainTime` advance by accepting either the `ApricotCommitBlock` or the `ApricotAbortBlock` following the `ApricotProposalBlock`. `ChainTime` was moved ahead only if the `CommitBlock` was accepted. + +`AdvanceTimeTx` transactions are subject to three main validations: + +1. *Strict Monotonicity*: proposed time must be *strictly* greater than current `ChainTime`. +2. *Synchronicity*: proposed time must not be greater than node’s current time plus a synchronicity bound (currently set to 10 seconds). +3. *No Skipping*: proposed time must be less than or equal to the next staking event, that is start/end of any staker. + +Note that *Synchronicity* makes sure that `ChainTime` approximates “real” time flow. If we dropped synchronicity requirement, a staker could declare any staking time and immediately push `ChainTime` to the end, so as to pocket a reward without having actually carried out any activity in the “real” time. + +## Post Banff fork context + +Following the Banff fork activation, `AdvanceTimeTx`s cannot be included anymore in any block. Instead, each P-chain block type explicitly serializes a timestamp so that `ChainTime` is set to the block timestamp once the block is accepted. + +Validation rules for block timestamps varies slightly depending on block types: + +* `BanffCommitBlock`s and `BanffAbortBlock`s timestamp must be equal to the timestamp of the `BanffProposalBlock` they depend upon. +* `BanffStandardBlock`s and `BanffProposalBlock`s share `AdvanceTimeTx`s validation rules with the exception of the *strict monotonicity*: + 1. *Monotonicity*: block timestamp must be *greater than or equal to* the current `ChainTime` (which is also its parent's timestamp if the parent was accepted). + 2. *Synchronicity*: block timestamp must not be greater than node’s current time plus a synchronicity bound (currently set to 10 seconds). + 3. *No Skipping*: proposed time must be less than or equal to the next staking event (a staker starting or stopping). diff --git a/avalanchego/vms/platformvm/factory.go b/avalanchego/vms/platformvm/factory.go index 045c93a4..5673bebe 100644 --- a/avalanchego/vms/platformvm/factory.go +++ b/avalanchego/vms/platformvm/factory.go @@ -1,15 +1,15 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm import ( - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms" "github.com/ava-labs/avalanchego/vms/platformvm/config" ) -var _ vms.Factory = &Factory{} +var _ vms.Factory = (*Factory)(nil) // Factory can create new instances of the Platform Chain type Factory struct { @@ -17,6 +17,6 @@ type Factory struct { } // New returns a new instance of the Platform Chain -func (f *Factory) New(*snow.Context) (interface{}, error) { - return &VM{Factory: *f}, nil +func (f *Factory) New(logging.Logger) (interface{}, error) { + return &VM{Config: f.Config}, nil } diff --git a/avalanchego/vms/platformvm/fx/fx.go b/avalanchego/vms/platformvm/fx/fx.go index d2f15ad0..8bb95a2e 100644 --- a/avalanchego/vms/platformvm/fx/fx.go +++ b/avalanchego/vms/platformvm/fx/fx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package fx @@ -9,7 +9,7 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -var _ Fx = &secp256k1fx.Fx{} +var _ Fx = (*secp256k1fx.Fx)(nil) // Fx is the interface a feature extension must implement to support the // Platform Chain. diff --git a/avalanchego/vms/platformvm/fx/mock_fx.go b/avalanchego/vms/platformvm/fx/mock_fx.go index e1a71eec..78a2d4e6 100644 --- a/avalanchego/vms/platformvm/fx/mock_fx.go +++ b/avalanchego/vms/platformvm/fx/mock_fx.go @@ -1,5 +1,8 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. -// Source: vms/platformvm/fx/fx.go +// Source: github.com/ava-labs/avalanchego/vms/platformvm/fx (interfaces: Fx,Owner) // Package fx is a generated GoMock package. package fx @@ -63,60 +66,60 @@ func (mr *MockFxMockRecorder) Bootstrapping() *gomock.Call { } // CreateOutput mocks base method. -func (m *MockFx) CreateOutput(amount uint64, controlGroup interface{}) (interface{}, error) { +func (m *MockFx) CreateOutput(arg0 uint64, arg1 interface{}) (interface{}, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CreateOutput", amount, controlGroup) + ret := m.ctrl.Call(m, "CreateOutput", arg0, arg1) ret0, _ := ret[0].(interface{}) ret1, _ := ret[1].(error) return ret0, ret1 } // CreateOutput indicates an expected call of CreateOutput. -func (mr *MockFxMockRecorder) CreateOutput(amount, controlGroup interface{}) *gomock.Call { +func (mr *MockFxMockRecorder) CreateOutput(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOutput", reflect.TypeOf((*MockFx)(nil).CreateOutput), amount, controlGroup) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOutput", reflect.TypeOf((*MockFx)(nil).CreateOutput), arg0, arg1) } // Initialize mocks base method. -func (m *MockFx) Initialize(vm interface{}) error { +func (m *MockFx) Initialize(arg0 interface{}) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Initialize", vm) + ret := m.ctrl.Call(m, "Initialize", arg0) ret0, _ := ret[0].(error) return ret0 } // Initialize indicates an expected call of Initialize. -func (mr *MockFxMockRecorder) Initialize(vm interface{}) *gomock.Call { +func (mr *MockFxMockRecorder) Initialize(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockFx)(nil).Initialize), vm) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockFx)(nil).Initialize), arg0) } // VerifyPermission mocks base method. -func (m *MockFx) VerifyPermission(tx, in, cred, controlGroup interface{}) error { +func (m *MockFx) VerifyPermission(arg0, arg1, arg2, arg3 interface{}) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "VerifyPermission", tx, in, cred, controlGroup) + ret := m.ctrl.Call(m, "VerifyPermission", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // VerifyPermission indicates an expected call of VerifyPermission. -func (mr *MockFxMockRecorder) VerifyPermission(tx, in, cred, controlGroup interface{}) *gomock.Call { +func (mr *MockFxMockRecorder) VerifyPermission(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyPermission", reflect.TypeOf((*MockFx)(nil).VerifyPermission), tx, in, cred, controlGroup) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyPermission", reflect.TypeOf((*MockFx)(nil).VerifyPermission), arg0, arg1, arg2, arg3) } // VerifyTransfer mocks base method. -func (m *MockFx) VerifyTransfer(tx, in, cred, utxo interface{}) error { +func (m *MockFx) VerifyTransfer(arg0, arg1, arg2, arg3 interface{}) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "VerifyTransfer", tx, in, cred, utxo) + ret := m.ctrl.Call(m, "VerifyTransfer", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // VerifyTransfer indicates an expected call of VerifyTransfer. -func (mr *MockFxMockRecorder) VerifyTransfer(tx, in, cred, utxo interface{}) *gomock.Call { +func (mr *MockFxMockRecorder) VerifyTransfer(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyTransfer", reflect.TypeOf((*MockFx)(nil).VerifyTransfer), tx, in, cred, utxo) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "VerifyTransfer", reflect.TypeOf((*MockFx)(nil).VerifyTransfer), arg0, arg1, arg2, arg3) } // MockOwner is a mock of Owner interface. @@ -143,15 +146,15 @@ func (m *MockOwner) EXPECT() *MockOwnerMockRecorder { } // InitCtx mocks base method. -func (m *MockOwner) InitCtx(ctx *snow.Context) { +func (m *MockOwner) InitCtx(arg0 *snow.Context) { m.ctrl.T.Helper() - m.ctrl.Call(m, "InitCtx", ctx) + m.ctrl.Call(m, "InitCtx", arg0) } // InitCtx indicates an expected call of InitCtx. -func (mr *MockOwnerMockRecorder) InitCtx(ctx interface{}) *gomock.Call { +func (mr *MockOwnerMockRecorder) InitCtx(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitCtx", reflect.TypeOf((*MockOwner)(nil).InitCtx), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitCtx", reflect.TypeOf((*MockOwner)(nil).InitCtx), arg0) } // Verify mocks base method. @@ -167,40 +170,3 @@ func (mr *MockOwnerMockRecorder) Verify() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verify", reflect.TypeOf((*MockOwner)(nil).Verify)) } - -// MockOwned is a mock of Owned interface. -type MockOwned struct { - ctrl *gomock.Controller - recorder *MockOwnedMockRecorder -} - -// MockOwnedMockRecorder is the mock recorder for MockOwned. -type MockOwnedMockRecorder struct { - mock *MockOwned -} - -// NewMockOwned creates a new mock instance. -func NewMockOwned(ctrl *gomock.Controller) *MockOwned { - mock := &MockOwned{ctrl: ctrl} - mock.recorder = &MockOwnedMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockOwned) EXPECT() *MockOwnedMockRecorder { - return m.recorder -} - -// Owners mocks base method. -func (m *MockOwned) Owners() interface{} { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Owners") - ret0, _ := ret[0].(interface{}) - return ret0 -} - -// Owners indicates an expected call of Owners. -func (mr *MockOwnedMockRecorder) Owners() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Owners", reflect.TypeOf((*MockOwned)(nil).Owners)) -} diff --git a/avalanchego/vms/platformvm/genesis/codec.go b/avalanchego/vms/platformvm/genesis/codec.go index f112ea34..29f19bc8 100644 --- a/avalanchego/vms/platformvm/genesis/codec.go +++ b/avalanchego/vms/platformvm/genesis/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis diff --git a/avalanchego/vms/platformvm/genesis/genesis.go b/avalanchego/vms/platformvm/genesis/genesis.go index 65cb47dc..6a62f978 100644 --- a/avalanchego/vms/platformvm/genesis/genesis.go +++ b/avalanchego/vms/platformvm/genesis/genesis.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package genesis @@ -30,12 +30,12 @@ func Parse(genesisBytes []byte) (*Genesis, error) { return nil, err } for _, tx := range gen.Validators { - if err := tx.Sign(txs.GenesisCodec, nil); err != nil { + if err := tx.Initialize(txs.GenesisCodec); err != nil { return nil, err } } for _, tx := range gen.Chains { - if err := tx.Sign(txs.GenesisCodec, nil); err != nil { + if err := tx.Initialize(txs.GenesisCodec); err != nil { return nil, err } } diff --git a/avalanchego/vms/platformvm/health.go b/avalanchego/vms/platformvm/health.go index 249766a3..38073674 100644 --- a/avalanchego/vms/platformvm/health.go +++ b/avalanchego/vms/platformvm/health.go @@ -1,20 +1,26 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm import ( + "context" + "errors" "fmt" "strings" + "time" + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/utils/constants" ) -// MinConnectedStake is the minimum percentage of the Primary Network's that -// this node must be connected to to be considered healthy -const MinConnectedStake = .80 +const fallbackMinPercentConnected = 0.8 + +var errNotEnoughStake = errors.New("not connected to enough stake") -func (vm *VM) HealthCheck() (interface{}, error) { +func (vm *VM) HealthCheck(context.Context) (interface{}, error) { // Returns nil if this node is connected to > alpha percent of the Primary Network's stake primaryPercentConnected, err := vm.getPercentConnected(constants.PrimaryNetworkID) if err != nil { @@ -25,40 +31,84 @@ func (vm *VM) HealthCheck() (interface{}, error) { "primary-percentConnected": primaryPercentConnected, } - // TODO: Use alpha from consensus instead of const + localPrimaryValidator, err := vm.state.GetCurrentValidator( + constants.PrimaryNetworkID, + vm.ctx.NodeID, + ) + switch err { + case nil: + vm.metrics.SetTimeUntilUnstake(time.Until(localPrimaryValidator.EndTime)) + case database.ErrNotFound: + vm.metrics.SetTimeUntilUnstake(0) + default: + return nil, fmt.Errorf("couldn't get current local validator: %w", err) + } + + primaryMinPercentConnected, ok := vm.MinPercentConnectedStakeHealthy[constants.PrimaryNetworkID] + if !ok { + // This should never happen according to the comment for + // [MinPercentConnectedStakeHealthy] but we include it here to avoid the + // situation where a regression causes the key to be missing so that we + // don't accidentally set [primaryMinPercentConnected] to 0. + vm.ctx.Log.Warn("primary network min connected stake not given", + zap.Float64("fallback value", fallbackMinPercentConnected), + ) + primaryMinPercentConnected = fallbackMinPercentConnected + } + var errorReasons []string - if primaryPercentConnected < MinConnectedStake { + if primaryPercentConnected < primaryMinPercentConnected { errorReasons = append(errorReasons, fmt.Sprintf("connected to %f%% of primary network stake; should be connected to at least %f%%", primaryPercentConnected*100, - MinConnectedStake*100, + primaryMinPercentConnected*100, ), ) } - for subnetID := range vm.WhitelistedSubnets { + for subnetID := range vm.TrackedSubnets { percentConnected, err := vm.getPercentConnected(subnetID) if err != nil { return nil, fmt.Errorf("couldn't get percent connected for %q: %w", subnetID, err) } + minPercentConnected, ok := vm.MinPercentConnectedStakeHealthy[subnetID] + if !ok { + minPercentConnected = primaryMinPercentConnected + } vm.metrics.SetSubnetPercentConnected(subnetID, percentConnected) key := fmt.Sprintf("%s-percentConnected", subnetID) details[key] = percentConnected - if percentConnected < MinConnectedStake { + localSubnetValidator, err := vm.state.GetCurrentValidator( + subnetID, + vm.ctx.NodeID, + ) + switch err { + case nil: + vm.metrics.SetTimeUntilSubnetUnstake(subnetID, time.Until(localSubnetValidator.EndTime)) + case database.ErrNotFound: + vm.metrics.SetTimeUntilSubnetUnstake(subnetID, 0) + default: + return nil, fmt.Errorf("couldn't get current subnet validator of %q: %w", subnetID, err) + } + + if percentConnected < minPercentConnected { errorReasons = append(errorReasons, fmt.Sprintf("connected to %f%% of %q weight; should be connected to at least %f%%", percentConnected*100, subnetID, - MinConnectedStake*100, + minPercentConnected*100, ), ) } } - if len(errorReasons) > 0 { - err = fmt.Errorf("platform layer is unhealthy reason: %s", strings.Join(errorReasons, ", ")) + if len(errorReasons) == 0 || !vm.StakingEnabled { + return details, nil } - return details, err + return details, fmt.Errorf("platform layer is unhealthy err: %w, details: %s", + errNotEnoughStake, + strings.Join(errorReasons, ", "), + ) } diff --git a/avalanchego/vms/platformvm/health_test.go b/avalanchego/vms/platformvm/health_test.go new file mode 100644 index 00000000..7a7d67b4 --- /dev/null +++ b/avalanchego/vms/platformvm/health_test.go @@ -0,0 +1,110 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/version" +) + +const defaultMinConnectedStake = 0.8 + +func TestHealthCheckPrimaryNetwork(t *testing.T) { + require := require.New(t) + + vm, _, _ := defaultVM() + vm.ctx.Lock.Lock() + + defer func() { + require.NoError(vm.Shutdown(context.Background())) + vm.ctx.Lock.Unlock() + }() + genesisState, _ := defaultGenesis() + for index, validator := range genesisState.Validators { + err := vm.Connected(context.Background(), validator.NodeID, version.CurrentApp) + require.NoError(err) + details, err := vm.HealthCheck(context.Background()) + if float64((index+1)*20) >= defaultMinConnectedStake*100 { + require.NoError(err) + } else { + require.Contains(details, "primary-percentConnected") + require.ErrorIs(err, errNotEnoughStake) + } + } +} + +func TestHealthCheckSubnet(t *testing.T) { + tests := map[string]struct { + minStake float64 + useDefault bool + }{ + "default min stake": { + useDefault: true, + minStake: 0, + }, + "custom min stake": { + minStake: 0.40, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + require := require.New(t) + + vm, _, _ := defaultVM() + vm.ctx.Lock.Lock() + defer func() { + require.NoError(vm.Shutdown(context.Background())) + vm.ctx.Lock.Unlock() + }() + + subnetID := ids.GenerateTestID() + subnetVdrs := validators.NewSet() + vm.TrackedSubnets.Add(subnetID) + testVdrCount := 4 + for i := 0; i < testVdrCount; i++ { + subnetVal := ids.GenerateTestNodeID() + err := subnetVdrs.Add(subnetVal, nil, ids.Empty, 100) + require.NoError(err) + } + ok := vm.Validators.Add(subnetID, subnetVdrs) + require.True(ok) + + // connect to all primary network validators first + genesisState, _ := defaultGenesis() + for _, validator := range genesisState.Validators { + err := vm.Connected(context.Background(), validator.NodeID, version.CurrentApp) + require.NoError(err) + } + var expectedMinStake float64 + if test.useDefault { + expectedMinStake = defaultMinConnectedStake + } else { + expectedMinStake = test.minStake + vm.MinPercentConnectedStakeHealthy = map[ids.ID]float64{ + subnetID: expectedMinStake, + } + } + for index, vdr := range subnetVdrs.List() { + err := vm.ConnectedSubnet(context.Background(), vdr.NodeID, subnetID) + require.NoError(err) + details, err := vm.HealthCheck(context.Background()) + connectedPerc := float64((index + 1) * (100 / testVdrCount)) + if connectedPerc >= expectedMinStake*100 { + require.NoError(err) + } else { + require.Contains(details, fmt.Sprintf("%s-percentConnected", subnetID)) + require.ErrorIs(err, errNotEnoughStake) + } + } + }) + } +} diff --git a/avalanchego/vms/platformvm/metrics/block_metrics.go b/avalanchego/vms/platformvm/metrics/block_metrics.go index 1a10267b..97156672 100644 --- a/avalanchego/vms/platformvm/metrics/block_metrics.go +++ b/avalanchego/vms/platformvm/metrics/block_metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics @@ -12,7 +12,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/blocks" ) -var _ blocks.Visitor = &blockMetrics{} +var _ blocks.Visitor = (*blockMetrics)(nil) type blockMetrics struct { txMetrics *txMetrics diff --git a/avalanchego/vms/platformvm/metrics/metrics.go b/avalanchego/vms/platformvm/metrics/metrics.go index 18cc253d..b1ff4af8 100644 --- a/avalanchego/vms/platformvm/metrics/metrics.go +++ b/avalanchego/vms/platformvm/metrics/metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics @@ -10,11 +10,12 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/metric" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/platformvm/blocks" ) -var _ Metrics = &metrics{} +var _ Metrics = (*metrics)(nil) type Metrics interface { metric.APIInterceptor @@ -38,6 +39,10 @@ type Metrics interface { SetLocalStake(uint64) // Mark that this much stake is staked in the network. SetTotalStake(uint64) + // Mark when this node will unstake from the Primary Network. + SetTimeUntilUnstake(time.Duration) + // Mark when this node will unstake from a subnet. + SetTimeUntilSubnetUnstake(subnetID ids.ID, timeUntilUnstake time.Duration) // Mark that this node is connected to this percent of a subnet's stake. SetSubnetPercentConnected(subnetID ids.ID, percent float64) // Mark that this node is connected to this percent of the Primary Network's @@ -48,7 +53,7 @@ type Metrics interface { func New( namespace string, registerer prometheus.Registerer, - whitelistedSubnets ids.Set, + trackedSubnets set.Set[ids.ID], ) (Metrics, error) { blockMetrics, err := newBlockMetrics(namespace, registerer) m := &metrics{ @@ -67,6 +72,19 @@ func New( }, []string{"subnetID"}, ), + timeUntilUnstake: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "time_until_unstake", + Help: "Time (in ns) until this node leaves the Primary Network's validator set", + }), + timeUntilSubnetUnstake: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "time_until_unstake_subnet", + Help: "Time (in ns) until this node leaves the subnet's validator set", + }, + []string{"subnetID"}, + ), localStake: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Name: "local_staked", @@ -119,6 +137,8 @@ func New( registerer.Register(m.percentConnected), registerer.Register(m.subnetPercentConnected), + registerer.Register(m.timeUntilUnstake), + registerer.Register(m.timeUntilSubnetUnstake), registerer.Register(m.localStake), registerer.Register(m.totalStake), @@ -131,8 +151,8 @@ func New( registerer.Register(m.validatorSetsDuration), ) - // init subnet tracker metrics with whitelisted subnets - for subnetID := range whitelistedSubnets { + // init subnet tracker metrics with tracked subnets + for subnetID := range trackedSubnets { // initialize to 0 m.subnetPercentConnected.WithLabelValues(subnetID.String()).Set(0) } @@ -146,6 +166,8 @@ type metrics struct { percentConnected prometheus.Gauge subnetPercentConnected *prometheus.GaugeVec + timeUntilUnstake prometheus.Gauge + timeUntilSubnetUnstake *prometheus.GaugeVec localStake prometheus.Gauge totalStake prometheus.Gauge @@ -193,6 +215,14 @@ func (m *metrics) SetTotalStake(s uint64) { m.totalStake.Set(float64(s)) } +func (m *metrics) SetTimeUntilUnstake(timeUntilUnstake time.Duration) { + m.timeUntilUnstake.Set(float64(timeUntilUnstake)) +} + +func (m *metrics) SetTimeUntilSubnetUnstake(subnetID ids.ID, timeUntilUnstake time.Duration) { + m.timeUntilSubnetUnstake.WithLabelValues(subnetID.String()).Set(float64(timeUntilUnstake)) +} + func (m *metrics) SetSubnetPercentConnected(subnetID ids.ID, percent float64) { m.subnetPercentConnected.WithLabelValues(subnetID.String()).Set(percent) } diff --git a/avalanchego/vms/platformvm/metrics/no_op.go b/avalanchego/vms/platformvm/metrics/no_op.go index d3fb9923..d5948348 100644 --- a/avalanchego/vms/platformvm/metrics/no_op.go +++ b/avalanchego/vms/platformvm/metrics/no_op.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics @@ -21,7 +21,9 @@ func (noopMetrics) MarkOptionVoteWon() {} func (noopMetrics) MarkOptionVoteLost() {} -func (noopMetrics) MarkAccepted(blocks.Block) error { return nil } +func (noopMetrics) MarkAccepted(blocks.Block) error { + return nil +} func (noopMetrics) InterceptRequest(i *rpc.RequestInfo) *http.Request { return i.Request @@ -41,6 +43,10 @@ func (noopMetrics) SetLocalStake(uint64) {} func (noopMetrics) SetTotalStake(uint64) {} +func (noopMetrics) SetTimeUntilUnstake(time.Duration) {} + +func (noopMetrics) SetTimeUntilSubnetUnstake(ids.ID, time.Duration) {} + func (noopMetrics) SetSubnetPercentConnected(ids.ID, float64) {} func (noopMetrics) SetPercentConnected(float64) {} diff --git a/avalanchego/vms/platformvm/metrics/tx_metrics.go b/avalanchego/vms/platformvm/metrics/tx_metrics.go index a47757e8..118f1156 100644 --- a/avalanchego/vms/platformvm/metrics/tx_metrics.go +++ b/avalanchego/vms/platformvm/metrics/tx_metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package metrics @@ -12,7 +12,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) -var _ txs.Visitor = &txMetrics{} +var _ txs.Visitor = (*txMetrics)(nil) type txMetrics struct { numAddDelegatorTxs, @@ -93,12 +93,12 @@ func (m *txMetrics) CreateSubnetTx(*txs.CreateSubnetTx) error { return nil } -func (m *txMetrics) ImportTx(tx *txs.ImportTx) error { +func (m *txMetrics) ImportTx(*txs.ImportTx) error { m.numImportTxs.Inc() return nil } -func (m *txMetrics) ExportTx(tx *txs.ExportTx) error { +func (m *txMetrics) ExportTx(*txs.ExportTx) error { m.numExportTxs.Inc() return nil } diff --git a/avalanchego/vms/platformvm/reward/calculator.go b/avalanchego/vms/platformvm/reward/calculator.go index 8669ab90..5903969a 100644 --- a/avalanchego/vms/platformvm/reward/calculator.go +++ b/avalanchego/vms/platformvm/reward/calculator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package reward @@ -8,7 +8,7 @@ import ( "time" ) -var _ Calculator = &calculator{} +var _ Calculator = (*calculator)(nil) type Calculator interface { Calculate(stakedDuration time.Duration, stakedAmount, currentSupply uint64) uint64 diff --git a/avalanchego/vms/platformvm/reward/calculator_test.go b/avalanchego/vms/platformvm/reward/calculator_test.go index 4bdb716a..f3b4e363 100644 --- a/avalanchego/vms/platformvm/reward/calculator_test.go +++ b/avalanchego/vms/platformvm/reward/calculator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package reward @@ -9,6 +9,7 @@ import ( "time" "github.com/ava-labs/avalanchego/utils/units" + "github.com/stretchr/testify/require" ) const ( @@ -34,15 +35,13 @@ func TestLongerDurationBonus(t *testing.T) { r := c.Calculate(shortDuration, shortBalance, 359*units.MegaAvax+shortBalance) shortBalance += r } - r := c.Calculate(totalDuration%shortDuration, shortBalance, 359*units.MegaAvax+shortBalance) - shortBalance += r + reward := c.Calculate(totalDuration%shortDuration, shortBalance, 359*units.MegaAvax+shortBalance) + shortBalance += reward longBalance := units.KiloAvax longBalance += c.Calculate(totalDuration, longBalance, 359*units.MegaAvax+longBalance) - if shortBalance != longBalance { - t.Fatalf("non-zero rewards") - } + require.Equal(t, shortBalance, longBalance) } func TestRewards(t *testing.T) { @@ -123,56 +122,12 @@ func TestRewards(t *testing.T) { test.expectedReward, ) t.Run(name, func(t *testing.T) { - r := c.Calculate( + reward := c.Calculate( test.duration, test.stakeAmount, test.existingAmount, ) - if r != test.expectedReward { - t.Fatalf("expected %d; got %d", test.expectedReward, r) - } + require.Equal(t, test.expectedReward, reward) }) } } - -// func TestRewardsOverflow(t *testing.T) { -// require := require.New(t) - -// var ( -// maxSupply uint64 = math.MaxUint64 -// initialSupply uint64 = 1 -// ) -// c := NewCalculator(Config{ -// MaxConsumptionRate: PercentDenominator, -// MinConsumptionRate: PercentDenominator, -// MintingPeriod: defaultMinStakingDuration, -// SupplyCap: maxSupply, -// }) -// rewards := c.Calculate( -// defaultMinStakingDuration, -// maxSupply, // The staked amount is larger than the current supply -// initialSupply, -// ) -// require.Equal(maxSupply-initialSupply, rewards) -// } - -// func TestRewardsMint(t *testing.T) { -// require := require.New(t) - -// var ( -// maxSupply uint64 = 1000 -// initialSupply uint64 = 1 -// ) -// c := NewCalculator(Config{ -// MaxConsumptionRate: PercentDenominator, -// MinConsumptionRate: PercentDenominator, -// MintingPeriod: defaultMinStakingDuration, -// SupplyCap: maxSupply, -// }) -// rewards := c.Calculate( -// defaultMinStakingDuration, -// maxSupply, // The staked amount is larger than the current supply -// initialSupply, -// ) -// require.Equal(maxSupply-initialSupply, rewards) -// } diff --git a/avalanchego/vms/platformvm/reward/config.go b/avalanchego/vms/platformvm/reward/config.go index cf4fa8e3..17a0a0d0 100644 --- a/avalanchego/vms/platformvm/reward/config.go +++ b/avalanchego/vms/platformvm/reward/config.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package reward diff --git a/avalanchego/vms/platformvm/service.go b/avalanchego/vms/platformvm/service.go index 8d2b3f01..99021845 100644 --- a/avalanchego/vms/platformvm/service.go +++ b/avalanchego/vms/platformvm/service.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm import ( + "context" "errors" "fmt" "net/http" @@ -13,20 +14,28 @@ import ( "go.uber.org/zap" + "golang.org/x/exp/maps" + "github.com/ava-labs/avalanchego/api" + "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/keystore" + "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" + "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/txs/builder" @@ -46,6 +55,10 @@ const ( // Minimum amount of delay to allow a transaction to be issued through the // API minAddStakerDelay = 2 * executor.SyncBound + + // Note: Staker attributes cache should be large enough so that no evictions + // happen when the API loops through all stakers. + stakerAttributesCacheSize = 100_000 ) var ( @@ -55,8 +68,6 @@ var ( errInvalidDelegationRate = errors.New("argument 'delegationFeeRate' must be between 0 and 100, inclusive") errNoAddresses = errors.New("no addresses provided") errNoKeys = errors.New("user has no keys or funds") - errNoPrimaryValidators = errors.New("no default subnet validators") - errNoValidators = errors.New("no subnet validators") errStartTimeTooSoon = fmt.Errorf("start time must be at least %s in the future", minAddStakerDelay) errStartTimeTooLate = errors.New("start time is too far in the future") errNamedSubnetCantBePrimary = errors.New("subnet validator attempts to validate primary network") @@ -71,21 +82,33 @@ var ( // Service defines the API calls that can be made to the platform chain type Service struct { - vm *VM - addrManager avax.AddressManager + vm *VM + addrManager avax.AddressManager + stakerAttributesCache *cache.LRU[ids.ID, *stakerAttributes] } -type GetHeightResponse struct { - Height json.Uint64 `json:"height"` +// All attributes are optional and may not be filled for each stakerTx. +type stakerAttributes struct { + shares uint32 + rewardsOwner fx.Owner + validationRewardsOwner fx.Owner + delegationRewardsOwner fx.Owner + proofOfPossession *signer.ProofOfPossession } // GetHeight returns the height of the last accepted block -func (service *Service) GetHeight(r *http.Request, args *struct{}, response *GetHeightResponse) error { - lastAcceptedID, err := service.vm.LastAccepted() +func (s *Service) GetHeight(r *http.Request, _ *struct{}, response *api.GetHeightResponse) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "platform"), + zap.String("method", "getHeight"), + ) + + ctx := r.Context() + lastAcceptedID, err := s.vm.LastAccepted(ctx) if err != nil { return fmt.Errorf("couldn't get last accepted block ID: %w", err) } - lastAccepted, err := service.vm.GetBlock(lastAcceptedID) + lastAccepted, err := s.vm.GetBlock(ctx, lastAcceptedID) if err != nil { return fmt.Errorf("couldn't get last accepted block: %w", err) } @@ -102,19 +125,23 @@ type ExportKeyArgs struct { // ExportKeyReply is the response for ExportKey type ExportKeyReply struct { // The decrypted PrivateKey for the Address provided in the arguments - PrivateKey *crypto.PrivateKeySECP256K1R `json:"privateKey"` + PrivateKey *secp256k1.PrivateKey `json:"privateKey"` } // ExportKey returns a private key from the provided user -func (service *Service) ExportKey(r *http.Request, args *ExportKeyArgs, reply *ExportKeyReply) error { - service.vm.ctx.Log.Debug("Platform: ExportKey called") +func (s *Service) ExportKey(_ *http.Request, args *ExportKeyArgs, reply *ExportKeyReply) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "platform"), + zap.String("method", "exportKey"), + logging.UserString("username", args.Username), + ) - address, err := avax.ParseServiceAddress(service.addrManager, args.Address) + address, err := avax.ParseServiceAddress(s.addrManager, args.Address) if err != nil { return fmt.Errorf("couldn't parse %s to address: %w", args.Address, err) } - user, err := keystore.NewUserFromKeystore(service.vm.ctx.Keystore, args.Username, args.Password) + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { return err } @@ -132,12 +159,14 @@ func (service *Service) ExportKey(r *http.Request, args *ExportKeyArgs, reply *E // ImportKeyArgs are arguments for ImportKey type ImportKeyArgs struct { api.UserPass - PrivateKey *crypto.PrivateKeySECP256K1R `json:"privateKey"` + PrivateKey *secp256k1.PrivateKey `json:"privateKey"` } // ImportKey adds a private key to the provided user -func (service *Service) ImportKey(r *http.Request, args *ImportKeyArgs, reply *api.JSONAddress) error { - service.vm.ctx.Log.Debug("Platform: ImportKey called", +func (s *Service) ImportKey(_ *http.Request, args *ImportKeyArgs, reply *api.JSONAddress) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "platform"), + zap.String("method", "importKey"), logging.UserString("username", args.Username), ) @@ -146,12 +175,12 @@ func (service *Service) ImportKey(r *http.Request, args *ImportKeyArgs, reply *a } var err error - reply.Address, err = service.addrManager.FormatLocalAddress(args.PrivateKey.PublicKey().Address()) + reply.Address, err = s.addrManager.FormatLocalAddress(args.PrivateKey.PublicKey().Address()) if err != nil { return fmt.Errorf("problem formatting address: %w", err) } - user, err := keystore.NewUserFromKeystore(service.vm.ctx.Keystore, args.Username, args.Password) + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { return err } @@ -170,8 +199,6 @@ func (service *Service) ImportKey(r *http.Request, args *ImportKeyArgs, reply *a */ type GetBalanceRequest struct { - // TODO: remove Address - Address *string `json:"address,omitempty"` Addresses []string `json:"addresses"` } @@ -191,27 +218,25 @@ type GetBalanceResponse struct { } // GetBalance gets the balance of an address -func (service *Service) GetBalance(_ *http.Request, args *GetBalanceRequest, response *GetBalanceResponse) error { - if args.Address != nil { - args.Addresses = append(args.Addresses, *args.Address) - } - - service.vm.ctx.Log.Debug("Platform: GetBalance called", +func (s *Service) GetBalance(_ *http.Request, args *GetBalanceRequest, response *GetBalanceResponse) error { + s.vm.ctx.Log.Debug("deprecated API called", + zap.String("service", "platform"), + zap.String("method", "getBalance"), logging.UserStrings("addresses", args.Addresses), ) // Parse to address - addrs, err := avax.ParseServiceAddresses(service.addrManager, args.Addresses) + addrs, err := avax.ParseServiceAddresses(s.addrManager, args.Addresses) if err != nil { return err } - utxos, err := avax.GetAllUTXOs(service.vm.state, addrs) + utxos, err := avax.GetAllUTXOs(s.vm.state, addrs) if err != nil { return fmt.Errorf("couldn't get UTXO set of %v: %w", args.Addresses, err) } - currentTime := service.vm.clock.Unix() + currentTime := s.vm.clock.Unix() unlockeds := map[ids.ID]uint64{} lockedStakeables := map[ids.ID]uint64{} @@ -241,7 +266,7 @@ utxoFor: innerOut, ok := out.TransferableOut.(*secp256k1fx.TransferOutput) switch { case !ok: - service.vm.ctx.Log.Warn("unexpected output type in UTXO", + s.vm.ctx.Log.Warn("unexpected output type in UTXO", zap.String("type", fmt.Sprintf("%T", out.TransferableOut)), ) continue utxoFor @@ -274,10 +299,7 @@ utxoFor: response.UTXOIDs = append(response.UTXOIDs, &utxo.UTXOID) } - balances := map[ids.ID]uint64{} - for assetID, amount := range lockedStakeables { - balances[assetID] = amount - } + balances := maps.Clone(lockedStakeables) for assetID, amount := range lockedNotStakeables { newBalance, err := math.Add64(balances[assetID], amount) if err != nil { @@ -299,10 +321,10 @@ utxoFor: response.Unlockeds = newJSONBalanceMap(unlockeds) response.LockedStakeables = newJSONBalanceMap(lockedStakeables) response.LockedNotStakeables = newJSONBalanceMap(lockedNotStakeables) - response.Balance = response.Balances[service.vm.ctx.AVAXAssetID] - response.Unlocked = response.Unlockeds[service.vm.ctx.AVAXAssetID] - response.LockedStakeable = response.LockedStakeables[service.vm.ctx.AVAXAssetID] - response.LockedNotStakeable = response.LockedNotStakeables[service.vm.ctx.AVAXAssetID] + response.Balance = response.Balances[s.vm.ctx.AVAXAssetID] + response.Unlocked = response.Unlockeds[s.vm.ctx.AVAXAssetID] + response.LockedStakeable = response.LockedStakeables[s.vm.ctx.AVAXAssetID] + response.LockedNotStakeable = response.LockedNotStakeables[s.vm.ctx.AVAXAssetID] return nil } @@ -316,10 +338,14 @@ func newJSONBalanceMap(balanceMap map[ids.ID]uint64) map[ids.ID]json.Uint64 { // CreateAddress creates an address controlled by [args.Username] // Returns the newly created address -func (service *Service) CreateAddress(_ *http.Request, args *api.UserPass, response *api.JSONAddress) error { - service.vm.ctx.Log.Debug("Platform: CreateAddress called") +func (s *Service) CreateAddress(_ *http.Request, args *api.UserPass, response *api.JSONAddress) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "platform"), + zap.String("method", "createAddress"), + logging.UserString("username", args.Username), + ) - user, err := keystore.NewUserFromKeystore(service.vm.ctx.Keystore, args.Username, args.Password) + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { return err } @@ -330,7 +356,7 @@ func (service *Service) CreateAddress(_ *http.Request, args *api.UserPass, respo return err } - response.Address, err = service.addrManager.FormatLocalAddress(key.PublicKey().Address()) + response.Address, err = s.addrManager.FormatLocalAddress(key.PublicKey().Address()) if err != nil { return fmt.Errorf("problem formatting address: %w", err) } @@ -338,10 +364,14 @@ func (service *Service) CreateAddress(_ *http.Request, args *api.UserPass, respo } // ListAddresses returns the addresses controlled by [args.Username] -func (service *Service) ListAddresses(_ *http.Request, args *api.UserPass, response *api.JSONAddresses) error { - service.vm.ctx.Log.Debug("Platform: ListAddresses called") +func (s *Service) ListAddresses(_ *http.Request, args *api.UserPass, response *api.JSONAddresses) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "platform"), + zap.String("method", "listAddresses"), + logging.UserString("username", args.Username), + ) - user, err := keystore.NewUserFromKeystore(service.vm.ctx.Keystore, args.Username, args.Password) + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { return err } @@ -353,7 +383,7 @@ func (service *Service) ListAddresses(_ *http.Request, args *api.UserPass, respo } response.Addresses = make([]string, len(addresses)) for i, addr := range addresses { - response.Addresses[i], err = service.addrManager.FormatLocalAddress(addr) + response.Addresses[i], err = s.addrManager.FormatLocalAddress(addr) if err != nil { return fmt.Errorf("problem formatting address: %w", err) } @@ -369,8 +399,11 @@ type Index struct { } // GetUTXOs returns the UTXOs controlled by the given addresses -func (service *Service) GetUTXOs(_ *http.Request, args *api.GetUTXOsArgs, response *api.GetUTXOsReply) error { - service.vm.ctx.Log.Debug("Platform: GetUTXOs called") +func (s *Service) GetUTXOs(_ *http.Request, args *api.GetUTXOsArgs, response *api.GetUTXOsReply) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "platform"), + zap.String("method", "getUTXOs"), + ) if len(args.Addresses) == 0 { return errNoAddresses @@ -381,16 +414,16 @@ func (service *Service) GetUTXOs(_ *http.Request, args *api.GetUTXOsArgs, respon var sourceChain ids.ID if args.SourceChain == "" { - sourceChain = service.vm.ctx.ChainID + sourceChain = s.vm.ctx.ChainID } else { - chainID, err := service.vm.ctx.BCLookup.Lookup(args.SourceChain) + chainID, err := s.vm.ctx.BCLookup.Lookup(args.SourceChain) if err != nil { return fmt.Errorf("problem parsing source chainID %q: %w", args.SourceChain, err) } sourceChain = chainID } - addrSet, err := avax.ParseServiceAddresses(service.addrManager, args.Addresses) + addrSet, err := avax.ParseServiceAddresses(s.addrManager, args.Addresses) if err != nil { return err } @@ -398,7 +431,7 @@ func (service *Service) GetUTXOs(_ *http.Request, args *api.GetUTXOsArgs, respon startAddr := ids.ShortEmpty startUTXO := ids.Empty if args.StartIndex.Address != "" || args.StartIndex.UTXO != "" { - startAddr, err = avax.ParseServiceAddress(service.addrManager, args.StartIndex.Address) + startAddr, err = avax.ParseServiceAddress(s.addrManager, args.StartIndex.Address) if err != nil { return fmt.Errorf("couldn't parse start index address %q: %w", args.StartIndex.Address, err) } @@ -417,16 +450,16 @@ func (service *Service) GetUTXOs(_ *http.Request, args *api.GetUTXOsArgs, respon if limit <= 0 || builder.MaxPageSize < limit { limit = builder.MaxPageSize } - if sourceChain == service.vm.ctx.ChainID { + if sourceChain == s.vm.ctx.ChainID { utxos, endAddr, endUTXOID, err = avax.GetPaginatedUTXOs( - service.vm.state, + s.vm.state, addrSet, startAddr, startUTXO, limit, ) } else { - utxos, endAddr, endUTXOID, err = service.vm.atomicUtxosManager.GetAtomicUTXOs( + utxos, endAddr, endUTXOID, err = s.vm.atomicUtxosManager.GetAtomicUTXOs( sourceChain, addrSet, startAddr, @@ -450,7 +483,7 @@ func (service *Service) GetUTXOs(_ *http.Request, args *api.GetUTXOsArgs, respon } } - endAddress, err := service.addrManager.FormatLocalAddress(endAddr) + endAddress, err := s.addrManager.FormatLocalAddress(endAddr) if err != nil { return fmt.Errorf("problem formatting address: %w", err) } @@ -496,12 +529,15 @@ type GetSubnetsResponse struct { // GetSubnets returns the subnets whose ID are in [args.IDs] // The response will include the primary network -func (service *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, response *GetSubnetsResponse) error { - service.vm.ctx.Log.Debug("Platform: GetSubnets called") +func (s *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, response *GetSubnetsResponse) error { + s.vm.ctx.Log.Debug("deprecated API called", + zap.String("service", "platform"), + zap.String("method", "getSubnets"), + ) getAll := len(args.IDs) == 0 if getAll { - subnets, err := service.vm.state.GetSubnets() // all subnets + subnets, err := s.vm.state.GetSubnets() // all subnets if err != nil { return fmt.Errorf("error getting subnets from database: %w", err) } @@ -509,7 +545,7 @@ func (service *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, respon response.Subnets = make([]APISubnet, len(subnets)+1) for i, subnet := range subnets { subnetID := subnet.ID() - if _, err := service.vm.state.GetSubnetTransformation(subnetID); err == nil { + if _, err := s.vm.state.GetSubnetTransformation(subnetID); err == nil { response.Subnets[i] = APISubnet{ ID: subnetID, ControlKeys: []string{}, @@ -522,7 +558,7 @@ func (service *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, respon owner := unsignedTx.Owner.(*secp256k1fx.OutputOwners) controlAddrs := []string{} for _, controlKeyID := range owner.Addrs { - addr, err := service.addrManager.FormatLocalAddress(controlKeyID) + addr, err := s.addrManager.FormatLocalAddress(controlKeyID) if err != nil { return fmt.Errorf("problem formatting address: %w", err) } @@ -543,7 +579,7 @@ func (service *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, respon return nil } - subnetSet := ids.NewSet(len(args.IDs)) + subnetSet := set.NewSet[ids.ID](len(args.IDs)) for _, subnetID := range args.IDs { if subnetSet.Contains(subnetID) { continue @@ -561,7 +597,7 @@ func (service *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, respon continue } - if _, err := service.vm.state.GetSubnetTransformation(subnetID); err == nil { + if _, err := s.vm.state.GetSubnetTransformation(subnetID); err == nil { response.Subnets = append(response.Subnets, APISubnet{ ID: subnetID, ControlKeys: []string{}, @@ -570,7 +606,7 @@ func (service *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, respon continue } - subnetTx, _, err := service.vm.state.GetTx(subnetID) + subnetTx, _, err := s.vm.state.GetTx(subnetID) if err == database.ErrNotFound { continue } @@ -589,7 +625,7 @@ func (service *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, respon controlAddrs := make([]string, len(owner.Addrs)) for i, controlKeyID := range owner.Addrs { - addr, err := service.addrManager.FormatLocalAddress(controlKeyID) + addr, err := s.addrManager.FormatLocalAddress(controlKeyID) if err != nil { return fmt.Errorf("problem formatting address: %w", err) } @@ -617,15 +653,18 @@ type GetStakingAssetIDResponse struct { // GetStakingAssetID returns the assetID of the token used to stake on the // provided subnet -func (service *Service) GetStakingAssetID(_ *http.Request, args *GetStakingAssetIDArgs, response *GetStakingAssetIDResponse) error { - service.vm.ctx.Log.Debug("Platform: GetStakingAssetID called") +func (s *Service) GetStakingAssetID(_ *http.Request, args *GetStakingAssetIDArgs, response *GetStakingAssetIDResponse) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "platform"), + zap.String("method", "getStakingAssetID"), + ) if args.SubnetID == constants.PrimaryNetworkID { - response.AssetID = service.vm.ctx.AVAXAssetID + response.AssetID = s.vm.ctx.AVAXAssetID return nil } - transformSubnetIntf, err := service.vm.state.GetSubnetTransformation(args.SubnetID) + transformSubnetIntf, err := s.vm.state.GetSubnetTransformation(args.SubnetID) if err != nil { return fmt.Errorf( "failed fetching subnet transformation for %s: %w", @@ -669,9 +708,56 @@ type GetCurrentValidatorsReply struct { Validators []interface{} `json:"validators"` } -// GetCurrentValidators returns current validators and delegators -func (service *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentValidatorsArgs, reply *GetCurrentValidatorsReply) error { - service.vm.ctx.Log.Debug("Platform: GetCurrentValidators called") +func (s *Service) loadStakerTxAttributes(txID ids.ID) (*stakerAttributes, error) { + // Lookup tx from the cache first. + attr, found := s.stakerAttributesCache.Get(txID) + if found { + return attr, nil + } + + // Tx not available in cache; pull it from disk and populate the cache. + tx, _, err := s.vm.state.GetTx(txID) + if err != nil { + return nil, err + } + + switch stakerTx := tx.Unsigned.(type) { + case txs.ValidatorTx: + var pop *signer.ProofOfPossession + if staker, ok := stakerTx.(*txs.AddPermissionlessValidatorTx); ok { + if s, ok := staker.Signer.(*signer.ProofOfPossession); ok { + pop = s + } + } + + attr = &stakerAttributes{ + shares: stakerTx.Shares(), + validationRewardsOwner: stakerTx.ValidationRewardsOwner(), + delegationRewardsOwner: stakerTx.DelegationRewardsOwner(), + proofOfPossession: pop, + } + + case txs.DelegatorTx: + attr = &stakerAttributes{ + rewardsOwner: stakerTx.RewardsOwner(), + } + + default: + return nil, fmt.Errorf("unexpected staker tx type %T", tx.Unsigned) + } + + s.stakerAttributesCache.Put(txID, attr) + return attr, nil +} + +// GetCurrentValidators returns the current validators. If a single nodeID +// is provided, full delegators information is also returned. Otherwise only +// delegators' number and total weight is returned. +func (s *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentValidatorsArgs, reply *GetCurrentValidatorsReply) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "platform"), + zap.String("method", "getCurrentValidators"), + ) reply.Validators = []interface{}{} @@ -679,160 +765,186 @@ func (service *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentVa vdrToDelegators := map[ids.NodeID][]platformapi.PrimaryDelegator{} // Create set of nodeIDs - nodeIDs := ids.NodeIDSet{} + nodeIDs := set.Set[ids.NodeID]{} nodeIDs.Add(args.NodeIDs...) - includeAllNodes := nodeIDs.Len() == 0 - currentStakerIterator, err := service.vm.state.GetCurrentStakerIterator() - if err != nil { - return err - } - defer currentStakerIterator.Release() + numNodeIDs := nodeIDs.Len() + targetStakers := make([]*state.Staker, 0, numNodeIDs) + if numNodeIDs == 0 { // Include all nodes + currentStakerIterator, err := s.vm.state.GetCurrentStakerIterator() + if err != nil { + return err + } + // TODO: avoid iterating over delegators here. + for currentStakerIterator.Next() { + staker := currentStakerIterator.Value() + if args.SubnetID != staker.SubnetID { + continue + } + targetStakers = append(targetStakers, staker) + } + currentStakerIterator.Release() + } else { + for nodeID := range nodeIDs { + staker, err := s.vm.state.GetCurrentValidator(args.SubnetID, nodeID) + switch err { + case nil: + case database.ErrNotFound: + // nothing to do, continue + continue + default: + return err + } + targetStakers = append(targetStakers, staker) - // TODO: do not iterate over all stakers when nodeIDs given. Use currentValidators.ValidatorSet for iteration - for currentStakerIterator.Next() { // Iterates in order of increasing stop time - staker := currentStakerIterator.Value() - if args.SubnetID != staker.SubnetID { - continue + // TODO: avoid iterating over delegators when numNodeIDs > 1. + delegatorsIt, err := s.vm.state.GetCurrentDelegatorIterator(args.SubnetID, nodeID) + if err != nil { + return err + } + for delegatorsIt.Next() { + staker := delegatorsIt.Value() + targetStakers = append(targetStakers, staker) + } + delegatorsIt.Release() } - if !includeAllNodes && !nodeIDs.Contains(staker.NodeID) { - continue + } + + for _, currentStaker := range targetStakers { + nodeID := currentStaker.NodeID + weight := json.Uint64(currentStaker.Weight) + apiStaker := platformapi.Staker{ + TxID: currentStaker.TxID, + StartTime: json.Uint64(currentStaker.StartTime.Unix()), + EndTime: json.Uint64(currentStaker.EndTime.Unix()), + Weight: weight, + StakeAmount: &weight, + NodeID: nodeID, } + potentialReward := json.Uint64(currentStaker.PotentialReward) - tx, _, err := service.vm.state.GetTx(staker.TxID) + delegateeReward, err := s.vm.state.GetDelegateeReward(currentStaker.SubnetID, currentStaker.NodeID) if err != nil { return err } + jsonDelegateeReward := json.Uint64(delegateeReward) - txID := staker.TxID - nodeID := staker.NodeID - weight := json.Uint64(staker.Weight) - startTime := json.Uint64(staker.StartTime.Unix()) - endTime := json.Uint64(staker.EndTime.Unix()) - potentialReward := json.Uint64(staker.PotentialReward) - - switch staker := tx.Unsigned.(type) { - case txs.ValidatorTx: - shares := staker.Shares() - delegationFee := json.Float32(100 * float32(shares) / float32(reward.PercentDenominator)) - - primaryNetworkStaker, err := service.vm.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) + switch currentStaker.Priority { + case txs.PrimaryNetworkValidatorCurrentPriority, txs.SubnetPermissionlessValidatorCurrentPriority: + attr, err := s.loadStakerTxAttributes(currentStaker.TxID) if err != nil { return err } - // TODO: calculate subnet uptimes - rawUptime, err := service.vm.uptimeManager.CalculateUptimePercentFrom(nodeID, primaryNetworkStaker.StartTime) + shares := attr.shares + delegationFee := json.Float32(100 * float32(shares) / float32(reward.PercentDenominator)) + + uptime, err := s.getAPIUptime(currentStaker) if err != nil { return err } - uptime := json.Float32(rawUptime) - - connected := service.vm.uptimeManager.IsConnected(nodeID) - tracksSubnet := args.SubnetID == constants.PrimaryNetworkID || service.vm.SubnetTracker.TracksSubnet(nodeID, args.SubnetID) + connected := s.vm.uptimeManager.IsConnected(nodeID, args.SubnetID) var ( validationRewardOwner *platformapi.Owner delegationRewardOwner *platformapi.Owner ) - validationOwner, ok := staker.ValidationRewardsOwner().(*secp256k1fx.OutputOwners) + validationOwner, ok := attr.validationRewardsOwner.(*secp256k1fx.OutputOwners) if ok { - validationRewardOwner = &platformapi.Owner{ - Locktime: json.Uint64(validationOwner.Locktime), - Threshold: json.Uint32(validationOwner.Threshold), - } - for _, addr := range validationOwner.Addrs { - addrStr, err := service.addrManager.FormatLocalAddress(addr) - if err != nil { - return err - } - validationRewardOwner.Addresses = append(validationRewardOwner.Addresses, addrStr) + validationRewardOwner, err = s.getAPIOwner(validationOwner) + if err != nil { + return err } } - delegationOwner, ok := staker.DelegationRewardsOwner().(*secp256k1fx.OutputOwners) + delegationOwner, ok := attr.delegationRewardsOwner.(*secp256k1fx.OutputOwners) if ok { - delegationRewardOwner = &platformapi.Owner{ - Locktime: json.Uint64(delegationOwner.Locktime), - Threshold: json.Uint32(delegationOwner.Threshold), - } - for _, addr := range delegationOwner.Addrs { - addrStr, err := service.addrManager.FormatLocalAddress(addr) - if err != nil { - return err - } - delegationRewardOwner.Addresses = append(delegationRewardOwner.Addresses, addrStr) + delegationRewardOwner, err = s.getAPIOwner(delegationOwner) + if err != nil { + return err } } - reply.Validators = append(reply.Validators, platformapi.PermissionlessValidator{ - Staker: platformapi.Staker{ - TxID: txID, - NodeID: nodeID, - StartTime: startTime, - EndTime: endTime, - StakeAmount: &weight, - }, - Uptime: &uptime, - Connected: connected && tracksSubnet, - PotentialReward: &potentialReward, - RewardOwner: validationRewardOwner, - ValidationRewardOwner: validationRewardOwner, - DelegationRewardOwner: delegationRewardOwner, - DelegationFee: delegationFee, - }) - case txs.DelegatorTx: + vdr := platformapi.PermissionlessValidator{ + Staker: apiStaker, + Uptime: uptime, + Connected: connected, + PotentialReward: &potentialReward, + AccruedDelegateeReward: &jsonDelegateeReward, + RewardOwner: validationRewardOwner, + ValidationRewardOwner: validationRewardOwner, + DelegationRewardOwner: delegationRewardOwner, + DelegationFee: delegationFee, + Signer: attr.proofOfPossession, + } + reply.Validators = append(reply.Validators, vdr) + + case txs.PrimaryNetworkDelegatorCurrentPriority, txs.SubnetPermissionlessDelegatorCurrentPriority: var rewardOwner *platformapi.Owner - owner, ok := staker.RewardsOwner().(*secp256k1fx.OutputOwners) - if ok { - rewardOwner = &platformapi.Owner{ - Locktime: json.Uint64(owner.Locktime), - Threshold: json.Uint32(owner.Threshold), + // If we are handling multiple nodeIDs, we don't return the + // delegator information. + if numNodeIDs == 1 { + attr, err := s.loadStakerTxAttributes(currentStaker.TxID) + if err != nil { + return err } - for _, addr := range owner.Addrs { - addrStr, err := service.addrManager.FormatLocalAddress(addr) + owner, ok := attr.rewardsOwner.(*secp256k1fx.OutputOwners) + if ok { + rewardOwner, err = s.getAPIOwner(owner) if err != nil { return err } - rewardOwner.Addresses = append(rewardOwner.Addresses, addrStr) } } delegator := platformapi.PrimaryDelegator{ - Staker: platformapi.Staker{ - TxID: txID, - StartTime: startTime, - EndTime: endTime, - StakeAmount: &weight, - NodeID: nodeID, - }, + Staker: apiStaker, RewardOwner: rewardOwner, PotentialReward: &potentialReward, } vdrToDelegators[delegator.NodeID] = append(vdrToDelegators[delegator.NodeID], delegator) - case *txs.AddSubnetValidatorTx: - connected := service.vm.uptimeManager.IsConnected(nodeID) - tracksSubnet := service.vm.SubnetTracker.TracksSubnet(nodeID, args.SubnetID) + + case txs.SubnetPermissionedValidatorCurrentPriority: + uptime, err := s.getAPIUptime(currentStaker) + if err != nil { + return err + } + connected := s.vm.uptimeManager.IsConnected(nodeID, args.SubnetID) reply.Validators = append(reply.Validators, platformapi.PermissionedValidator{ - Staker: platformapi.Staker{ - NodeID: nodeID, - TxID: txID, - StartTime: startTime, - EndTime: endTime, - Weight: &weight, - }, - Connected: connected && tracksSubnet, + Staker: apiStaker, + Connected: connected, + Uptime: uptime, }) + default: - return fmt.Errorf("expected validator but got %T", tx.Unsigned) + return fmt.Errorf("unexpected staker priority %d", currentStaker.Priority) } } + // handle delegators' information for i, vdrIntf := range reply.Validators { vdr, ok := vdrIntf.(platformapi.PermissionlessValidator) if !ok { continue } - vdr.Delegators = vdrToDelegators[vdr.NodeID] + delegators, ok := vdrToDelegators[vdr.NodeID] + if !ok { + // If we are expected to populate the delegators field, we should + // always return a non-nil value. + delegators = []platformapi.PrimaryDelegator{} + } + delegatorCount := json.Uint64(len(delegators)) + delegatorWeight := json.Uint64(0) + for _, d := range delegators { + delegatorWeight += d.Weight + } + + vdr.DelegatorCount = &delegatorCount + vdr.DelegatorWeight = &delegatorWeight + + if numNodeIDs == 1 { + // queried a specific validator, load all of its delegators + vdr.Delegators = &delegators + } reply.Validators[i] = vdr } @@ -852,93 +964,108 @@ type GetPendingValidatorsArgs struct { } // GetPendingValidatorsReply are the results from calling GetPendingValidators. -// Unlike GetCurrentValidatorsReply, each validator has a null delegator list. type GetPendingValidatorsReply struct { Validators []interface{} `json:"validators"` Delegators []interface{} `json:"delegators"` } -// GetPendingValidators returns the list of pending validators -func (service *Service) GetPendingValidators(_ *http.Request, args *GetPendingValidatorsArgs, reply *GetPendingValidatorsReply) error { - service.vm.ctx.Log.Debug("Platform: GetPendingValidators called") +// GetPendingValidators returns the lists of pending validators and delegators. +func (s *Service) GetPendingValidators(_ *http.Request, args *GetPendingValidatorsArgs, reply *GetPendingValidatorsReply) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "platform"), + zap.String("method", "getPendingValidators"), + ) reply.Validators = []interface{}{} reply.Delegators = []interface{}{} // Create set of nodeIDs - nodeIDs := ids.NodeIDSet{} + nodeIDs := set.Set[ids.NodeID]{} nodeIDs.Add(args.NodeIDs...) - includeAllNodes := nodeIDs.Len() == 0 - pendingStakerIterator, err := service.vm.state.GetPendingStakerIterator() - if err != nil { - return err - } - defer pendingStakerIterator.Release() - - for pendingStakerIterator.Next() { // Iterates in order of increasing start time - staker := pendingStakerIterator.Value() - if args.SubnetID != staker.SubnetID { - continue + numNodeIDs := nodeIDs.Len() + targetStakers := make([]*state.Staker, 0, numNodeIDs) + if numNodeIDs == 0 { // Include all nodes + pendingStakerIterator, err := s.vm.state.GetPendingStakerIterator() + if err != nil { + return err } - if !includeAllNodes && !nodeIDs.Contains(staker.NodeID) { - continue + for pendingStakerIterator.Next() { // Iterates in order of increasing stop time + staker := pendingStakerIterator.Value() + if args.SubnetID != staker.SubnetID { + continue + } + targetStakers = append(targetStakers, staker) } + pendingStakerIterator.Release() + } else { + for nodeID := range nodeIDs { + staker, err := s.vm.state.GetPendingValidator(args.SubnetID, nodeID) + switch err { + case nil: + case database.ErrNotFound: + // nothing to do, continue + continue + default: + return err + } + targetStakers = append(targetStakers, staker) - tx, _, err := service.vm.state.GetTx(staker.TxID) - if err != nil { - return err + delegatorsIt, err := s.vm.state.GetPendingDelegatorIterator(args.SubnetID, nodeID) + if err != nil { + return err + } + for delegatorsIt.Next() { + staker := delegatorsIt.Value() + targetStakers = append(targetStakers, staker) + } + delegatorsIt.Release() + } + } + + for _, pendingStaker := range targetStakers { + nodeID := pendingStaker.NodeID + weight := json.Uint64(pendingStaker.Weight) + apiStaker := platformapi.Staker{ + TxID: pendingStaker.TxID, + NodeID: nodeID, + StartTime: json.Uint64(pendingStaker.StartTime.Unix()), + EndTime: json.Uint64(pendingStaker.EndTime.Unix()), + Weight: weight, + StakeAmount: &weight, } - txID := staker.TxID - nodeID := staker.NodeID - weight := json.Uint64(staker.Weight) - startTime := json.Uint64(staker.StartTime.Unix()) - endTime := json.Uint64(staker.EndTime.Unix()) + switch pendingStaker.Priority { + case txs.PrimaryNetworkValidatorPendingPriority, txs.SubnetPermissionlessValidatorPendingPriority: + attr, err := s.loadStakerTxAttributes(pendingStaker.TxID) + if err != nil { + return err + } - switch staker := tx.Unsigned.(type) { - case txs.ValidatorTx: - shares := staker.Shares() + shares := attr.shares delegationFee := json.Float32(100 * float32(shares) / float32(reward.PercentDenominator)) - connected := service.vm.uptimeManager.IsConnected(nodeID) - tracksSubnet := args.SubnetID == constants.PrimaryNetworkID || service.vm.SubnetTracker.TracksSubnet(nodeID, args.SubnetID) - reply.Validators = append(reply.Validators, platformapi.PermissionlessValidator{ - Staker: platformapi.Staker{ - TxID: txID, - NodeID: nodeID, - StartTime: startTime, - EndTime: endTime, - StakeAmount: &weight, - }, + connected := s.vm.uptimeManager.IsConnected(nodeID, args.SubnetID) + vdr := platformapi.PermissionlessValidator{ + Staker: apiStaker, DelegationFee: delegationFee, - Connected: connected && tracksSubnet, - }) + Connected: connected, + Signer: attr.proofOfPossession, + } + reply.Validators = append(reply.Validators, vdr) - case txs.DelegatorTx: - reply.Delegators = append(reply.Delegators, platformapi.Staker{ - TxID: txID, - NodeID: nodeID, - StartTime: startTime, - EndTime: endTime, - StakeAmount: &weight, - }) + case txs.PrimaryNetworkDelegatorApricotPendingPriority, txs.PrimaryNetworkDelegatorBanffPendingPriority, txs.SubnetPermissionlessDelegatorPendingPriority: + reply.Delegators = append(reply.Delegators, apiStaker) - case *txs.AddSubnetValidatorTx: - connected := service.vm.uptimeManager.IsConnected(nodeID) - tracksSubnet := service.vm.SubnetTracker.TracksSubnet(nodeID, args.SubnetID) + case txs.SubnetPermissionedValidatorPendingPriority: + connected := s.vm.uptimeManager.IsConnected(nodeID, args.SubnetID) reply.Validators = append(reply.Validators, platformapi.PermissionedValidator{ - Staker: platformapi.Staker{ - NodeID: nodeID, - TxID: txID, - StartTime: startTime, - EndTime: endTime, - Weight: &weight, - }, - Connected: connected && tracksSubnet, + Staker: apiStaker, + Connected: connected, }) + default: - return fmt.Errorf("expected validator but got %T", tx.Unsigned) + return fmt.Errorf("unexpected staker priority %d", pendingStaker.Priority) } } return nil @@ -955,10 +1082,13 @@ type GetCurrentSupplyReply struct { } // GetCurrentSupply returns an upper bound on the supply of AVAX in the system -func (service *Service) GetCurrentSupply(_ *http.Request, args *GetCurrentSupplyArgs, reply *GetCurrentSupplyReply) error { - service.vm.ctx.Log.Debug("Platform: GetCurrentSupply called") +func (s *Service) GetCurrentSupply(_ *http.Request, args *GetCurrentSupplyArgs, reply *GetCurrentSupplyReply) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "platform"), + zap.String("method", "getCurrentSupply"), + ) - supply, err := service.vm.state.GetCurrentSupply(args.SubnetID) + supply, err := s.vm.state.GetCurrentSupply(args.SubnetID) reply.Supply = json.Uint64(supply) return err } @@ -979,12 +1109,14 @@ type SampleValidatorsReply struct { } // SampleValidators returns a sampling of the list of current validators -func (service *Service) SampleValidators(_ *http.Request, args *SampleValidatorsArgs, reply *SampleValidatorsReply) error { - service.vm.ctx.Log.Debug("Platform: SampleValidators called", +func (s *Service) SampleValidators(_ *http.Request, args *SampleValidatorsArgs, reply *SampleValidatorsReply) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "platform"), + zap.String("method", "sampleValidators"), zap.Uint16("size", uint16(args.Size)), ) - validators, ok := service.vm.Validators.GetValidators(args.SubnetID) + validators, ok := s.vm.Validators.Get(args.SubnetID) if !ok { return fmt.Errorf( "couldn't get validators of subnet %q. Is it being validated?", @@ -997,11 +1129,12 @@ func (service *Service) SampleValidators(_ *http.Request, args *SampleValidators return fmt.Errorf("sampling errored with %w", err) } - reply.Validators = make([]ids.NodeID, int(args.Size)) - for i, vdr := range sample { - reply.Validators[i] = vdr.ID() + if sample == nil { + reply.Validators = []ids.NodeID{} + } else { + utils.Sort(sample) + reply.Validators = sample } - ids.SortNodeIDs(reply.Validators) return nil } @@ -1023,10 +1156,13 @@ type AddValidatorArgs struct { // AddValidator creates and signs and issues a transaction to add a validator to // the primary network -func (service *Service) AddValidator(_ *http.Request, args *AddValidatorArgs, reply *api.JSONTxIDChangeAddr) error { - service.vm.ctx.Log.Debug("Platform: AddValidator called") +func (s *Service) AddValidator(_ *http.Request, args *AddValidatorArgs, reply *api.JSONTxIDChangeAddr) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "platform"), + zap.String("method", "addValidator"), + ) - now := service.vm.clock.Time() + now := s.vm.clock.Time() minAddStakerTime := now.Add(minAddStakerDelay) minAddStakerUnix := json.Uint64(minAddStakerTime.Unix()) maxAddStakerTime := now.Add(executor.MaxFutureStartTime) @@ -1050,24 +1186,24 @@ func (service *Service) AddValidator(_ *http.Request, args *AddValidatorArgs, re // Parse the node ID var nodeID ids.NodeID if args.NodeID == ids.EmptyNodeID { // If ID unspecified, use this node's ID - nodeID = service.vm.ctx.NodeID + nodeID = s.vm.ctx.NodeID } else { nodeID = args.NodeID } // Parse the from addresses - fromAddrs, err := avax.ParseServiceAddresses(service.addrManager, args.From) + fromAddrs, err := avax.ParseServiceAddresses(s.addrManager, args.From) if err != nil { return err } // Parse the reward address - rewardAddress, err := avax.ParseServiceAddress(service.addrManager, args.RewardAddress) + rewardAddress, err := avax.ParseServiceAddress(s.addrManager, args.RewardAddress) if err != nil { return fmt.Errorf("problem while parsing reward address: %w", err) } - user, err := keystore.NewUserFromKeystore(service.vm.ctx.Keystore, args.Username, args.Password) + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { return err } @@ -1085,15 +1221,20 @@ func (service *Service) AddValidator(_ *http.Request, args *AddValidatorArgs, re } changeAddr := privKeys.Keys[0].PublicKey().Address() // By default, use a key controlled by the user if args.ChangeAddr != "" { - changeAddr, err = avax.ParseServiceAddress(service.addrManager, args.ChangeAddr) + changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) if err != nil { return fmt.Errorf("couldn't parse changeAddr: %w", err) } } + // TODO: Remove after StakeAmount is removed from [args]. + if args.StakeAmount != nil { + args.Weight = *args.StakeAmount + } + // Create the transaction - tx, err := service.vm.txBuilder.NewAddValidatorTx( - args.GetWeight(), // Stake amount + tx, err := s.vm.txBuilder.NewAddValidatorTx( + uint64(args.Weight), // Stake amount uint64(args.StartTime), // Start time uint64(args.EndTime), // End time nodeID, // Node ID @@ -1107,12 +1248,12 @@ func (service *Service) AddValidator(_ *http.Request, args *AddValidatorArgs, re } reply.TxID = tx.ID() - reply.ChangeAddr, err = service.addrManager.FormatLocalAddress(changeAddr) + reply.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) errs := wrappers.Errs{} errs.Add( err, - service.vm.Builder.AddUnverifiedTx(tx), + s.vm.Builder.AddUnverifiedTx(tx), user.Close(), ) return errs.Err @@ -1128,10 +1269,13 @@ type AddDelegatorArgs struct { // AddDelegator creates and signs and issues a transaction to add a delegator to // the primary network -func (service *Service) AddDelegator(_ *http.Request, args *AddDelegatorArgs, reply *api.JSONTxIDChangeAddr) error { - service.vm.ctx.Log.Debug("Platform: AddDelegator called") +func (s *Service) AddDelegator(_ *http.Request, args *AddDelegatorArgs, reply *api.JSONTxIDChangeAddr) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "platform"), + zap.String("method", "addDelegator"), + ) - now := service.vm.clock.Time() + now := s.vm.clock.Time() minAddStakerTime := now.Add(minAddStakerDelay) minAddStakerUnix := json.Uint64(minAddStakerTime.Unix()) maxAddStakerTime := now.Add(executor.MaxFutureStartTime) @@ -1152,24 +1296,24 @@ func (service *Service) AddDelegator(_ *http.Request, args *AddDelegatorArgs, re var nodeID ids.NodeID if args.NodeID == ids.EmptyNodeID { // If ID unspecified, use this node's ID - nodeID = service.vm.ctx.NodeID + nodeID = s.vm.ctx.NodeID } else { nodeID = args.NodeID } // Parse the reward address - rewardAddress, err := avax.ParseServiceAddress(service.addrManager, args.RewardAddress) + rewardAddress, err := avax.ParseServiceAddress(s.addrManager, args.RewardAddress) if err != nil { return fmt.Errorf("problem parsing 'rewardAddress': %w", err) } // Parse the from addresses - fromAddrs, err := avax.ParseServiceAddresses(service.addrManager, args.From) + fromAddrs, err := avax.ParseServiceAddresses(s.addrManager, args.From) if err != nil { return err } - user, err := keystore.NewUserFromKeystore(service.vm.ctx.Keystore, args.Username, args.Password) + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { return err } @@ -1187,15 +1331,20 @@ func (service *Service) AddDelegator(_ *http.Request, args *AddDelegatorArgs, re } changeAddr := privKeys.Keys[0].PublicKey().Address() // By default, use a key controlled by the user if args.ChangeAddr != "" { - changeAddr, err = avax.ParseServiceAddress(service.addrManager, args.ChangeAddr) + changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) if err != nil { return fmt.Errorf("couldn't parse changeAddr: %w", err) } } + // TODO: Remove after StakeAmount is removed from [args]. + if args.StakeAmount != nil { + args.Weight = *args.StakeAmount + } + // Create the transaction - tx, err := service.vm.txBuilder.NewAddDelegatorTx( - args.GetWeight(), // Stake amount + tx, err := s.vm.txBuilder.NewAddDelegatorTx( + uint64(args.Weight), // Stake amount uint64(args.StartTime), // Start time uint64(args.EndTime), // End time nodeID, // Node ID @@ -1208,12 +1357,12 @@ func (service *Service) AddDelegator(_ *http.Request, args *AddDelegatorArgs, re } reply.TxID = tx.ID() - reply.ChangeAddr, err = service.addrManager.FormatLocalAddress(changeAddr) + reply.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) errs := wrappers.Errs{} errs.Add( err, - service.vm.Builder.AddUnverifiedTx(tx), + s.vm.Builder.AddUnverifiedTx(tx), user.Close(), ) return errs.Err @@ -1230,10 +1379,13 @@ type AddSubnetValidatorArgs struct { // AddSubnetValidator creates and signs and issues a transaction to add a // validator to a subnet other than the primary network -func (service *Service) AddSubnetValidator(_ *http.Request, args *AddSubnetValidatorArgs, response *api.JSONTxIDChangeAddr) error { - service.vm.ctx.Log.Debug("Platform: AddSubnetValidator called") +func (s *Service) AddSubnetValidator(_ *http.Request, args *AddSubnetValidatorArgs, response *api.JSONTxIDChangeAddr) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "platform"), + zap.String("method", "addSubnetValidator"), + ) - now := service.vm.clock.Time() + now := s.vm.clock.Time() minAddStakerTime := now.Add(minAddStakerDelay) minAddStakerUnix := json.Uint64(minAddStakerTime.Unix()) maxAddStakerTime := now.Add(executor.MaxFutureStartTime) @@ -1262,12 +1414,12 @@ func (service *Service) AddSubnetValidator(_ *http.Request, args *AddSubnetValid } // Parse the from addresses - fromAddrs, err := avax.ParseServiceAddresses(service.addrManager, args.From) + fromAddrs, err := avax.ParseServiceAddresses(s.addrManager, args.From) if err != nil { return err } - user, err := keystore.NewUserFromKeystore(service.vm.ctx.Keystore, args.Username, args.Password) + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { return err } @@ -1284,15 +1436,20 @@ func (service *Service) AddSubnetValidator(_ *http.Request, args *AddSubnetValid } changeAddr := keys.Keys[0].PublicKey().Address() // By default, use a key controlled by the user if args.ChangeAddr != "" { - changeAddr, err = avax.ParseServiceAddress(service.addrManager, args.ChangeAddr) + changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) if err != nil { return fmt.Errorf("couldn't parse changeAddr: %w", err) } } + // TODO: Remove after StakeAmount is removed from [args]. + if args.StakeAmount != nil { + args.Weight = *args.StakeAmount + } + // Create the transaction - tx, err := service.vm.txBuilder.NewAddSubnetValidatorTx( - args.GetWeight(), // Stake amount + tx, err := s.vm.txBuilder.NewAddSubnetValidatorTx( + uint64(args.Weight), // Stake amount uint64(args.StartTime), // Start time uint64(args.EndTime), // End time args.NodeID, // Node ID @@ -1305,12 +1462,12 @@ func (service *Service) AddSubnetValidator(_ *http.Request, args *AddSubnetValid } response.TxID = tx.ID() - response.ChangeAddr, err = service.addrManager.FormatLocalAddress(changeAddr) + response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) errs := wrappers.Errs{} errs.Add( err, - service.vm.Builder.AddUnverifiedTx(tx), + s.vm.Builder.AddUnverifiedTx(tx), user.Close(), ) return errs.Err @@ -1326,22 +1483,25 @@ type CreateSubnetArgs struct { // CreateSubnet creates and signs and issues a transaction to create a new // subnet -func (service *Service) CreateSubnet(_ *http.Request, args *CreateSubnetArgs, response *api.JSONTxIDChangeAddr) error { - service.vm.ctx.Log.Debug("Platform: CreateSubnet called") +func (s *Service) CreateSubnet(_ *http.Request, args *CreateSubnetArgs, response *api.JSONTxIDChangeAddr) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "platform"), + zap.String("method", "createSubnet"), + ) // Parse the control keys - controlKeys, err := avax.ParseServiceAddresses(service.addrManager, args.ControlKeys) + controlKeys, err := avax.ParseServiceAddresses(s.addrManager, args.ControlKeys) if err != nil { return err } // Parse the from addresses - fromAddrs, err := avax.ParseServiceAddresses(service.addrManager, args.From) + fromAddrs, err := avax.ParseServiceAddresses(s.addrManager, args.From) if err != nil { return err } - user, err := keystore.NewUserFromKeystore(service.vm.ctx.Keystore, args.Username, args.Password) + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { return err } @@ -1359,14 +1519,14 @@ func (service *Service) CreateSubnet(_ *http.Request, args *CreateSubnetArgs, re } changeAddr := privKeys.Keys[0].PublicKey().Address() // By default, use a key controlled by the user if args.ChangeAddr != "" { - changeAddr, err = avax.ParseServiceAddress(service.addrManager, args.ChangeAddr) + changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) if err != nil { return fmt.Errorf("couldn't parse changeAddr: %w", err) } } // Create the transaction - tx, err := service.vm.txBuilder.NewCreateSubnetTx( + tx, err := s.vm.txBuilder.NewCreateSubnetTx( uint32(args.Threshold), // Threshold controlKeys.List(), // Control Addresses privKeys.Keys, // Private keys @@ -1377,12 +1537,12 @@ func (service *Service) CreateSubnet(_ *http.Request, args *CreateSubnetArgs, re } response.TxID = tx.ID() - response.ChangeAddr, err = service.addrManager.FormatLocalAddress(changeAddr) + response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) errs := wrappers.Errs{} errs.Add( err, - service.vm.Builder.AddUnverifiedTx(tx), + s.vm.Builder.AddUnverifiedTx(tx), user.Close(), ) return errs.Err @@ -1406,17 +1566,20 @@ type ExportAVAXArgs struct { // ExportAVAX exports AVAX from the P-Chain to the X-Chain // It must be imported on the X-Chain to complete the transfer -func (service *Service) ExportAVAX(_ *http.Request, args *ExportAVAXArgs, response *api.JSONTxIDChangeAddr) error { - service.vm.ctx.Log.Debug("Platform: ExportAVAX called") +func (s *Service) ExportAVAX(_ *http.Request, args *ExportAVAXArgs, response *api.JSONTxIDChangeAddr) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "platform"), + zap.String("method", "exportAVAX"), + ) if args.Amount == 0 { return errNoAmount } // Get the chainID and parse the to address - chainID, to, err := service.addrManager.ParseAddress(args.To) + chainID, to, err := s.addrManager.ParseAddress(args.To) if err != nil { - chainID, err = service.vm.ctx.BCLookup.Lookup(args.TargetChain) + chainID, err = s.vm.ctx.BCLookup.Lookup(args.TargetChain) if err != nil { return err } @@ -1427,12 +1590,12 @@ func (service *Service) ExportAVAX(_ *http.Request, args *ExportAVAXArgs, respon } // Parse the from addresses - fromAddrs, err := avax.ParseServiceAddresses(service.addrManager, args.From) + fromAddrs, err := avax.ParseServiceAddresses(s.addrManager, args.From) if err != nil { return err } - user, err := keystore.NewUserFromKeystore(service.vm.ctx.Keystore, args.Username, args.Password) + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { return err } @@ -1450,14 +1613,14 @@ func (service *Service) ExportAVAX(_ *http.Request, args *ExportAVAXArgs, respon } changeAddr := privKeys.Keys[0].PublicKey().Address() // By default, use a key controlled by the user if args.ChangeAddr != "" { - changeAddr, err = avax.ParseServiceAddress(service.addrManager, args.ChangeAddr) + changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) if err != nil { return fmt.Errorf("couldn't parse changeAddr: %w", err) } } // Create the transaction - tx, err := service.vm.txBuilder.NewExportTx( + tx, err := s.vm.txBuilder.NewExportTx( uint64(args.Amount), // Amount chainID, // ID of the chain to send the funds to to, // Address @@ -1469,12 +1632,12 @@ func (service *Service) ExportAVAX(_ *http.Request, args *ExportAVAXArgs, respon } response.TxID = tx.ID() - response.ChangeAddr, err = service.addrManager.FormatLocalAddress(changeAddr) + response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) errs := wrappers.Errs{} errs.Add( err, - service.vm.Builder.AddUnverifiedTx(tx), + s.vm.Builder.AddUnverifiedTx(tx), user.Close(), ) return errs.Err @@ -1494,28 +1657,31 @@ type ImportAVAXArgs struct { // ImportAVAX issues a transaction to import AVAX from the X-chain. The AVAX // must have already been exported from the X-Chain. -func (service *Service) ImportAVAX(_ *http.Request, args *ImportAVAXArgs, response *api.JSONTxIDChangeAddr) error { - service.vm.ctx.Log.Debug("Platform: ImportAVAX called") +func (s *Service) ImportAVAX(_ *http.Request, args *ImportAVAXArgs, response *api.JSONTxIDChangeAddr) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "platform"), + zap.String("method", "importAVAX"), + ) // Parse the sourceCHain - chainID, err := service.vm.ctx.BCLookup.Lookup(args.SourceChain) + chainID, err := s.vm.ctx.BCLookup.Lookup(args.SourceChain) if err != nil { return fmt.Errorf("problem parsing chainID %q: %w", args.SourceChain, err) } // Parse the to address - to, err := avax.ParseServiceAddress(service.addrManager, args.To) + to, err := avax.ParseServiceAddress(s.addrManager, args.To) if err != nil { // Parse address return fmt.Errorf("couldn't parse argument 'to' to an address: %w", err) } // Parse the from addresses - fromAddrs, err := avax.ParseServiceAddresses(service.addrManager, args.From) + fromAddrs, err := avax.ParseServiceAddresses(s.addrManager, args.From) if err != nil { return err } - user, err := keystore.NewUserFromKeystore(service.vm.ctx.Keystore, args.Username, args.Password) + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { return err } @@ -1533,13 +1699,13 @@ func (service *Service) ImportAVAX(_ *http.Request, args *ImportAVAXArgs, respon } changeAddr := privKeys.Keys[0].PublicKey().Address() // By default, use a key controlled by the user if args.ChangeAddr != "" { - changeAddr, err = avax.ParseServiceAddress(service.addrManager, args.ChangeAddr) + changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) if err != nil { return fmt.Errorf("couldn't parse changeAddr: %w", err) } } - tx, err := service.vm.txBuilder.NewImportTx( + tx, err := s.vm.txBuilder.NewImportTx( chainID, to, privKeys.Keys, @@ -1550,12 +1716,12 @@ func (service *Service) ImportAVAX(_ *http.Request, args *ImportAVAXArgs, respon } response.TxID = tx.ID() - response.ChangeAddr, err = service.addrManager.FormatLocalAddress(changeAddr) + response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) errs := wrappers.Errs{} errs.Add( err, - service.vm.Builder.AddUnverifiedTx(tx), + s.vm.Builder.AddUnverifiedTx(tx), user.Close(), ) return errs.Err @@ -1586,8 +1752,11 @@ type CreateBlockchainArgs struct { } // CreateBlockchain issues a transaction to create a new blockchain -func (service *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchainArgs, response *api.JSONTxIDChangeAddr) error { - service.vm.ctx.Log.Debug("Platform: CreateBlockchain called") +func (s *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchainArgs, response *api.JSONTxIDChangeAddr) error { + s.vm.ctx.Log.Warn("deprecated API called", + zap.String("service", "platform"), + zap.String("method", "createBlockchain"), + ) switch { case args.Name == "": @@ -1601,14 +1770,14 @@ func (service *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchain return fmt.Errorf("problem parsing genesis data: %w", err) } - vmID, err := service.vm.Chains.LookupVM(args.VMID) + vmID, err := s.vm.Chains.LookupVM(args.VMID) if err != nil { return fmt.Errorf("no VM with ID '%s' found", args.VMID) } fxIDs := []ids.ID(nil) for _, fxIDStr := range args.FxIDs { - fxID, err := service.vm.Chains.LookupVM(fxIDStr) + fxID, err := s.vm.Chains.LookupVM(fxIDStr) if err != nil { return fmt.Errorf("no FX with ID '%s' found", fxIDStr) } @@ -1616,7 +1785,7 @@ func (service *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchain } // If creating AVM instance, use secp256k1fx // TODO: Document FXs and have user specify them in API call - fxIDsSet := ids.Set{} + fxIDsSet := set.Set[ids.ID]{} fxIDsSet.Add(fxIDs...) if vmID == constants.AVMID && !fxIDsSet.Contains(secp256k1fx.ID) { fxIDs = append(fxIDs, secp256k1fx.ID) @@ -1627,12 +1796,12 @@ func (service *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchain } // Parse the from addresses - fromAddrs, err := avax.ParseServiceAddresses(service.addrManager, args.From) + fromAddrs, err := avax.ParseServiceAddresses(s.addrManager, args.From) if err != nil { return err } - user, err := keystore.NewUserFromKeystore(service.vm.ctx.Keystore, args.Username, args.Password) + user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, args.Username, args.Password) if err != nil { return err } @@ -1650,14 +1819,14 @@ func (service *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchain } changeAddr := keys.Keys[0].PublicKey().Address() // By default, use a key controlled by the user if args.ChangeAddr != "" { - changeAddr, err = avax.ParseServiceAddress(service.addrManager, args.ChangeAddr) + changeAddr, err = avax.ParseServiceAddress(s.addrManager, args.ChangeAddr) if err != nil { return fmt.Errorf("couldn't parse changeAddr: %w", err) } } // Create the transaction - tx, err := service.vm.txBuilder.NewCreateChainTx( + tx, err := s.vm.txBuilder.NewCreateChainTx( args.SubnetID, genesisBytes, vmID, @@ -1671,12 +1840,12 @@ func (service *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchain } response.TxID = tx.ID() - response.ChangeAddr, err = service.addrManager.FormatLocalAddress(changeAddr) + response.ChangeAddr, err = s.addrManager.FormatLocalAddress(changeAddr) errs := wrappers.Errs{} errs.Add( err, - service.vm.Builder.AddUnverifiedTx(tx), + s.vm.Builder.AddUnverifiedTx(tx), user.Close(), ) return errs.Err @@ -1695,16 +1864,19 @@ type GetBlockchainStatusReply struct { } // GetBlockchainStatus gets the status of a blockchain with the ID [args.BlockchainID]. -func (service *Service) GetBlockchainStatus(_ *http.Request, args *GetBlockchainStatusArgs, reply *GetBlockchainStatusReply) error { - service.vm.ctx.Log.Debug("Platform: GetBlockchainStatus called") +func (s *Service) GetBlockchainStatus(r *http.Request, args *GetBlockchainStatusArgs, reply *GetBlockchainStatusReply) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "platform"), + zap.String("method", "getBlockchainStatus"), + ) if args.BlockchainID == "" { return errMissingBlockchainID } // if its aliased then vm created this chain. - if aliasedID, err := service.vm.Chains.Lookup(args.BlockchainID); err == nil { - if service.nodeValidates(aliasedID) { + if aliasedID, err := s.vm.Chains.Lookup(args.BlockchainID); err == nil { + if s.nodeValidates(aliasedID) { reply.Status = status.Validating return nil } @@ -1718,12 +1890,13 @@ func (service *Service) GetBlockchainStatus(_ *http.Request, args *GetBlockchain return fmt.Errorf("problem parsing blockchainID %q: %w", args.BlockchainID, err) } - lastAcceptedID, err := service.vm.LastAccepted() + ctx := r.Context() + lastAcceptedID, err := s.vm.LastAccepted(ctx) if err != nil { return fmt.Errorf("problem loading last accepted ID: %w", err) } - exists, err := service.chainExists(lastAcceptedID, blockchainID) + exists, err := s.chainExists(ctx, lastAcceptedID, blockchainID) if err != nil { return fmt.Errorf("problem looking up blockchain: %w", err) } @@ -1732,11 +1905,11 @@ func (service *Service) GetBlockchainStatus(_ *http.Request, args *GetBlockchain return nil } - preferredBlk, err := service.vm.Preferred() + preferredBlk, err := s.vm.Preferred() if err != nil { return fmt.Errorf("could not retrieve preferred block, err %w", err) } - preferred, err := service.chainExists(preferredBlk.ID(), blockchainID) + preferred, err := s.chainExists(ctx, preferredBlk.ID(), blockchainID) if err != nil { return fmt.Errorf("problem looking up blockchain: %w", err) } @@ -1748,8 +1921,8 @@ func (service *Service) GetBlockchainStatus(_ *http.Request, args *GetBlockchain return nil } -func (service *Service) nodeValidates(blockchainID ids.ID) bool { - chainTx, _, err := service.vm.state.GetTx(blockchainID) +func (s *Service) nodeValidates(blockchainID ids.ID) bool { + chainTx, _, err := s.vm.state.GetTx(blockchainID) if err != nil { return false } @@ -1759,22 +1932,22 @@ func (service *Service) nodeValidates(blockchainID ids.ID) bool { return false } - validators, ok := service.vm.Validators.GetValidators(chain.SubnetID) + validators, ok := s.vm.Validators.Get(chain.SubnetID) if !ok { return false } - return validators.Contains(service.vm.ctx.NodeID) + return validators.Contains(s.vm.ctx.NodeID) } -func (service *Service) chainExists(blockID ids.ID, chainID ids.ID) (bool, error) { - state, ok := service.vm.manager.GetState(blockID) +func (s *Service) chainExists(ctx context.Context, blockID ids.ID, chainID ids.ID) (bool, error) { + state, ok := s.vm.manager.GetState(blockID) if !ok { - block, err := service.vm.GetBlock(blockID) + block, err := s.vm.GetBlock(ctx, blockID) if err != nil { return false, err } - state, ok = service.vm.manager.GetState(block.Parent()) + state, ok = s.vm.manager.GetState(block.Parent()) if !ok { return false, errMissingDecisionBlock } @@ -1804,23 +1977,16 @@ type ValidatedByResponse struct { } // ValidatedBy returns the ID of the Subnet that validates [args.BlockchainID] -func (service *Service) ValidatedBy(_ *http.Request, args *ValidatedByArgs, response *ValidatedByResponse) error { - service.vm.ctx.Log.Debug("Platform: ValidatedBy called") +func (s *Service) ValidatedBy(r *http.Request, args *ValidatedByArgs, response *ValidatedByResponse) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "platform"), + zap.String("method", "validatedBy"), + ) - chainTx, _, err := service.vm.state.GetTx(args.BlockchainID) - if err != nil { - return fmt.Errorf( - "problem retrieving blockchain %q: %w", - args.BlockchainID, - err, - ) - } - chain, ok := chainTx.Unsigned.(*txs.CreateChainTx) - if !ok { - return fmt.Errorf("%q is not a blockchain", args.BlockchainID) - } - response.SubnetID = chain.SubnetID - return nil + var err error + ctx := r.Context() + response.SubnetID, err = s.vm.GetSubnetID(ctx, args.BlockchainID) + return err } // ValidatesArgs are the arguments to Validates @@ -1834,11 +2000,14 @@ type ValidatesResponse struct { } // Validates returns the IDs of the blockchains validated by [args.SubnetID] -func (service *Service) Validates(_ *http.Request, args *ValidatesArgs, response *ValidatesResponse) error { - service.vm.ctx.Log.Debug("Platform: Validates called") +func (s *Service) Validates(_ *http.Request, args *ValidatesArgs, response *ValidatesResponse) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "platform"), + zap.String("method", "validates"), + ) if args.SubnetID != constants.PrimaryNetworkID { - subnetTx, _, err := service.vm.state.GetTx(args.SubnetID) + subnetTx, _, err := s.vm.state.GetTx(args.SubnetID) if err != nil { return fmt.Errorf( "problem retrieving subnet %q: %w", @@ -1853,7 +2022,7 @@ func (service *Service) Validates(_ *http.Request, args *ValidatesArgs, response } // Get the chains that exist - chains, err := service.vm.state.GetChains(args.SubnetID) + chains, err := s.vm.state.GetChains(args.SubnetID) if err != nil { return fmt.Errorf("problem retrieving chains for subnet %q: %w", args.SubnetID, err) } @@ -1887,10 +2056,13 @@ type GetBlockchainsResponse struct { } // GetBlockchains returns all of the blockchains that exist -func (service *Service) GetBlockchains(_ *http.Request, args *struct{}, response *GetBlockchainsResponse) error { - service.vm.ctx.Log.Debug("Platform: GetBlockchains called") +func (s *Service) GetBlockchains(_ *http.Request, _ *struct{}, response *GetBlockchainsResponse) error { + s.vm.ctx.Log.Debug("deprecated API called", + zap.String("service", "platform"), + zap.String("method", "getBlockchains"), + ) - subnets, err := service.vm.state.GetSubnets() + subnets, err := s.vm.state.GetSubnets() if err != nil { return fmt.Errorf("couldn't retrieve subnets: %w", err) } @@ -1898,7 +2070,7 @@ func (service *Service) GetBlockchains(_ *http.Request, args *struct{}, response response.Blockchains = []APIBlockchain{} for _, subnet := range subnets { subnetID := subnet.ID() - chains, err := service.vm.state.GetChains(subnetID) + chains, err := s.vm.state.GetChains(subnetID) if err != nil { return fmt.Errorf( "couldn't retrieve chains for subnet %q: %w", @@ -1922,7 +2094,7 @@ func (service *Service) GetBlockchains(_ *http.Request, args *struct{}, response } } - chains, err := service.vm.state.GetChains(constants.PrimaryNetworkID) + chains, err := s.vm.state.GetChains(constants.PrimaryNetworkID) if err != nil { return fmt.Errorf("couldn't retrieve subnets: %w", err) } @@ -1944,8 +2116,11 @@ func (service *Service) GetBlockchains(_ *http.Request, args *struct{}, response } // IssueTx issues a tx -func (service *Service) IssueTx(_ *http.Request, args *api.FormattedTx, response *api.JSONTxID) error { - service.vm.ctx.Log.Debug("Platform: IssueTx called") +func (s *Service) IssueTx(_ *http.Request, args *api.FormattedTx, response *api.JSONTxID) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "platform"), + zap.String("method", "issueTx"), + ) txBytes, err := formatting.Decode(args.Encoding, args.Tx) if err != nil { @@ -1955,7 +2130,7 @@ func (service *Service) IssueTx(_ *http.Request, args *api.FormattedTx, response if err != nil { return fmt.Errorf("couldn't parse tx: %w", err) } - if err := service.vm.Builder.AddUnverifiedTx(tx); err != nil { + if err := s.vm.Builder.AddUnverifiedTx(tx); err != nil { return fmt.Errorf("couldn't issue tx: %w", err) } @@ -1964,10 +2139,13 @@ func (service *Service) IssueTx(_ *http.Request, args *api.FormattedTx, response } // GetTx gets a tx -func (service *Service) GetTx(_ *http.Request, args *api.GetTxArgs, response *api.GetTxReply) error { - service.vm.ctx.Log.Debug("Platform: GetTx called") +func (s *Service) GetTx(_ *http.Request, args *api.GetTxArgs, response *api.GetTxReply) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "platform"), + zap.String("method", "getTx"), + ) - tx, _, err := service.vm.state.GetTx(args.TxID) + tx, _, err := s.vm.state.GetTx(args.TxID) if err != nil { return fmt.Errorf("couldn't get tx: %w", err) } @@ -1975,7 +2153,7 @@ func (service *Service) GetTx(_ *http.Request, args *api.GetTxArgs, response *ap response.Encoding = args.Encoding if args.Encoding == formatting.JSON { - tx.Unsigned.InitCtx(service.vm.ctx) + tx.Unsigned.InitCtx(s.vm.ctx) response.Tx = tx return nil } @@ -1989,16 +2167,6 @@ func (service *Service) GetTx(_ *http.Request, args *api.GetTxArgs, response *ap type GetTxStatusArgs struct { TxID ids.ID `json:"txID"` - // Returns a response that looks like this: - // { - // "jsonrpc": "2.0", - // "result": { - // "status":"[Status]", - // "reason":"[Reason tx was dropped, if applicable]" - // }, - // "id": 1 - // } - // "reason" is only present if the status is dropped } type GetTxStatusResponse struct { @@ -2009,12 +2177,13 @@ type GetTxStatusResponse struct { } // GetTxStatus gets a tx's status -func (service *Service) GetTxStatus(_ *http.Request, args *GetTxStatusArgs, response *GetTxStatusResponse) error { - service.vm.ctx.Log.Debug("Platform: GetTxStatus called", - zap.Stringer("txID", args.TxID), +func (s *Service) GetTxStatus(_ *http.Request, args *GetTxStatusArgs, response *GetTxStatusResponse) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "platform"), + zap.String("method", "getTxStatus"), ) - _, txStatus, err := service.vm.state.GetTx(args.TxID) + _, txStatus, err := s.vm.state.GetTx(args.TxID) if err == nil { // Found the status. Report it. response.Status = txStatus return nil @@ -2025,13 +2194,13 @@ func (service *Service) GetTxStatus(_ *http.Request, args *GetTxStatusArgs, resp // The status of this transaction is not in the database - check if the tx // is in the preferred block's db. If so, return that it's processing. - prefBlk, err := service.vm.Preferred() + prefBlk, err := s.vm.Preferred() if err != nil { return err } preferredID := prefBlk.ID() - onAccept, ok := service.vm.manager.GetState(preferredID) + onAccept, ok := s.vm.manager.GetState(preferredID) if !ok { return fmt.Errorf("could not retrieve state for block %s", preferredID) } @@ -2046,7 +2215,7 @@ func (service *Service) GetTxStatus(_ *http.Request, args *GetTxStatusArgs, resp return err } - if service.vm.Builder.Has(args.TxID) { + if s.vm.Builder.Has(args.TxID) { // Found the tx in the mempool. Report tx is processing. response.Status = status.Processing return nil @@ -2054,8 +2223,8 @@ func (service *Service) GetTxStatus(_ *http.Request, args *GetTxStatusArgs, resp // Note: we check if tx is dropped only after having looked for it // in the database and the mempool, because dropped txs may be re-issued. - reason, dropped := service.vm.Builder.GetDropReason(args.TxID) - if !dropped { + reason := s.vm.Builder.GetDropReason(args.TxID) + if reason == nil { // The tx isn't being tracked by the node. response.Status = status.Unknown return nil @@ -2063,7 +2232,7 @@ func (service *Service) GetTxStatus(_ *http.Request, args *GetTxStatusArgs, resp // The tx was recently dropped because it was invalid. response.Status = status.Dropped - response.Reason = reason + response.Reason = reason.Error() return nil } @@ -2083,58 +2252,6 @@ type GetStakeReply struct { Encoding formatting.Encoding `json:"encoding"` } -// Takes in a staker and a set of addresses -// Returns: -// 1) The total amount staked by addresses in [addrs] -// 2) The staked outputs -func (service *Service) getStakeHelper(tx *txs.Tx, addrs ids.ShortSet, totalAmountStaked map[ids.ID]uint64) []avax.TransferableOutput { - staker, ok := tx.Unsigned.(txs.PermissionlessStaker) - if !ok { - return nil - } - - stake := staker.Stake() - stakedOuts := make([]avax.TransferableOutput, 0, len(stake)) - // Go through all of the staked outputs - for _, output := range stake { - out := output.Out - if lockedOut, ok := out.(*stakeable.LockOut); ok { - // This output can only be used for staking until [stakeOnlyUntil] - out = lockedOut.TransferableOut - } - secpOut, ok := out.(*secp256k1fx.TransferOutput) - if !ok { - continue - } - - // Check whether this output is owned by one of the given addresses - contains := false - for _, addr := range secpOut.Addrs { - if addrs.Contains(addr) { - contains = true - break - } - } - if !contains { - // This output isn't owned by one of the given addresses. Ignore. - continue - } - - assetID := output.AssetID() - newAmount, err := math.Add64(totalAmountStaked[assetID], secpOut.Amt) - if err != nil { - newAmount = stdmath.MaxUint64 - } - totalAmountStaked[assetID] = newAmount - - stakedOuts = append( - stakedOuts, - *output, - ) - } - return stakedOuts -} - // GetStake returns the amount of nAVAX that [args.Addresses] have cumulatively // staked on the Primary Network. // @@ -2143,19 +2260,22 @@ func (service *Service) getStakeHelper(tx *txs.Tx, addrs ids.ShortSet, totalAmou // This method only concerns itself with the Primary Network, not subnets // TODO: Improve the performance of this method by maintaining this data // in a data structure rather than re-calculating it by iterating over stakers -func (service *Service) GetStake(_ *http.Request, args *GetStakeArgs, response *GetStakeReply) error { - service.vm.ctx.Log.Debug("Platform: GetStake called") +func (s *Service) GetStake(_ *http.Request, args *GetStakeArgs, response *GetStakeReply) error { + s.vm.ctx.Log.Debug("deprecated API called", + zap.String("service", "platform"), + zap.String("method", "getStake"), + ) if len(args.Addresses) > maxGetStakeAddrs { return fmt.Errorf("%d addresses provided but this method can take at most %d", len(args.Addresses), maxGetStakeAddrs) } - addrs, err := avax.ParseServiceAddresses(service.addrManager, args.Addresses) + addrs, err := avax.ParseServiceAddresses(s.addrManager, args.Addresses) if err != nil { return err } - currentStakerIterator, err := service.vm.state.GetCurrentStakerIterator() + currentStakerIterator, err := s.vm.state.GetCurrentStakerIterator() if err != nil { return err } @@ -2168,15 +2288,15 @@ func (service *Service) GetStake(_ *http.Request, args *GetStakeArgs, response * for currentStakerIterator.Next() { // Iterates over current stakers staker := currentStakerIterator.Value() - tx, _, err := service.vm.state.GetTx(staker.TxID) + tx, _, err := s.vm.state.GetTx(staker.TxID) if err != nil { return err } - stakedOuts = append(stakedOuts, service.getStakeHelper(tx, addrs, totalAmountStaked)...) + stakedOuts = append(stakedOuts, getStakeHelper(tx, addrs, totalAmountStaked)...) } - pendingStakerIterator, err := service.vm.state.GetPendingStakerIterator() + pendingStakerIterator, err := s.vm.state.GetPendingStakerIterator() if err != nil { return err } @@ -2185,16 +2305,16 @@ func (service *Service) GetStake(_ *http.Request, args *GetStakeArgs, response * for pendingStakerIterator.Next() { // Iterates over pending stakers staker := pendingStakerIterator.Value() - tx, _, err := service.vm.state.GetTx(staker.TxID) + tx, _, err := s.vm.state.GetTx(staker.TxID) if err != nil { return err } - stakedOuts = append(stakedOuts, service.getStakeHelper(tx, addrs, totalAmountStaked)...) + stakedOuts = append(stakedOuts, getStakeHelper(tx, addrs, totalAmountStaked)...) } response.Stakeds = newJSONBalanceMap(totalAmountStaked) - response.Staked = response.Stakeds[service.vm.ctx.AVAXAssetID] + response.Staked = response.Stakeds[s.vm.ctx.AVAXAssetID] response.Outputs = make([]string, len(stakedOuts)) for i, output := range stakedOuts { bytes, err := txs.Codec.Marshal(txs.Version, output) @@ -2225,16 +2345,21 @@ type GetMinStakeReply struct { } // GetMinStake returns the minimum staking amount in nAVAX. -func (service *Service) GetMinStake(_ *http.Request, args *GetMinStakeArgs, reply *GetMinStakeReply) error { +func (s *Service) GetMinStake(_ *http.Request, args *GetMinStakeArgs, reply *GetMinStakeReply) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "platform"), + zap.String("method", "getMinStake"), + ) + if args.SubnetID == constants.PrimaryNetworkID { - timestamp := service.vm.state.GetTimestamp() - minValidatorStake, _, minDelegatorStake, _, _, _, _, _, _, _ := executor.GetCurrentInflationSettings(timestamp, service.vm.ctx.NetworkID, &service.vm.Config) + timestamp := s.vm.state.GetTimestamp() + minValidatorStake, _, minDelegatorStake, _, _, _, _, _, _, _ := executor.GetCurrentInflationSettings(timestamp, s.vm.ctx.NetworkID, &s.vm.Config) reply.MinValidatorStake = json.Uint64(minValidatorStake) reply.MinDelegatorStake = json.Uint64(minDelegatorStake) return nil } - transformSubnetIntf, err := service.vm.state.GetSubnetTransformation(args.SubnetID) + transformSubnetIntf, err := s.vm.state.GetSubnetTransformation(args.SubnetID) if err != nil { return fmt.Errorf( "failed fetching subnet transformation for %s: %w", @@ -2265,16 +2390,22 @@ type GetTotalStakeArgs struct { // GetTotalStakeReply is the response from calling GetTotalStake. type GetTotalStakeReply struct { - // TODO: deprecate one of these fields - Stake json.Uint64 `json:"stake"` + // Deprecated: Use Weight instead. + Stake json.Uint64 `json:"stake"` + Weight json.Uint64 `json:"weight"` } // GetTotalStake returns the total amount staked on the Primary Network -func (service *Service) GetTotalStake(_ *http.Request, args *GetTotalStakeArgs, reply *GetTotalStakeReply) error { - vdrs, ok := service.vm.Validators.GetValidators(args.SubnetID) +func (s *Service) GetTotalStake(_ *http.Request, args *GetTotalStakeArgs, reply *GetTotalStakeReply) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "platform"), + zap.String("method", "getTotalStake"), + ) + + vdrs, ok := s.vm.Validators.Get(args.SubnetID) if !ok { - return errNoValidators + return errMissingValidatorSet } weight := json.Uint64(vdrs.Weight()) reply.Weight = weight @@ -2297,19 +2428,24 @@ type GetMaxStakeAmountReply struct { // GetMaxStakeAmount returns the maximum amount of nAVAX staking to the named // node during the time period. -func (service *Service) GetMaxStakeAmount(_ *http.Request, args *GetMaxStakeAmountArgs, reply *GetMaxStakeAmountReply) error { +func (s *Service) GetMaxStakeAmount(_ *http.Request, args *GetMaxStakeAmountArgs, reply *GetMaxStakeAmountReply) error { + s.vm.ctx.Log.Debug("deprecated API called", + zap.String("service", "platform"), + zap.String("method", "getMaxStakeAmount"), + ) + startTime := time.Unix(int64(args.StartTime), 0) endTime := time.Unix(int64(args.EndTime), 0) if startTime.After(endTime) { return errStartAfterEndTime } - now := service.vm.state.GetTimestamp() + now := s.vm.state.GetTimestamp() if startTime.Before(now) { return errStartTimeInThePast } - staker, err := executor.GetValidator(service.vm.state, args.SubnetID, args.NodeID) + staker, err := executor.GetValidator(s.vm.state, args.SubnetID, args.NodeID) if err == database.ErrNotFound { return nil } @@ -2324,7 +2460,7 @@ func (service *Service) GetMaxStakeAmount(_ *http.Request, args *GetMaxStakeAmou return nil } - maxStakeAmount, err := executor.GetMaxWeight(service.vm.state, staker, startTime, endTime) + maxStakeAmount, err := executor.GetMaxWeight(s.vm.state, staker, startTime, endTime) reply.Amount = json.Uint64(maxStakeAmount) return err } @@ -2341,10 +2477,13 @@ type GetRewardUTXOsReply struct { // GetRewardUTXOs returns the UTXOs that were rewarded after the provided // transaction's staking period ended. -func (service *Service) GetRewardUTXOs(_ *http.Request, args *api.GetTxArgs, reply *GetRewardUTXOsReply) error { - service.vm.ctx.Log.Debug("Platform: GetRewardUTXOs called") +func (s *Service) GetRewardUTXOs(_ *http.Request, args *api.GetTxArgs, reply *GetRewardUTXOsReply) error { + s.vm.ctx.Log.Debug("deprecated API called", + zap.String("service", "platform"), + zap.String("method", "getRewardUTXOs"), + ) - utxos, err := service.vm.state.GetRewardUTXOs(args.TxID) + utxos, err := s.vm.state.GetRewardUTXOs(args.TxID) if err != nil { return fmt.Errorf("couldn't get reward UTXOs: %w", err) } @@ -2374,10 +2513,13 @@ type GetTimestampReply struct { } // GetTimestamp returns the current timestamp on chain. -func (service *Service) GetTimestamp(_ *http.Request, args *struct{}, reply *GetTimestampReply) error { - service.vm.ctx.Log.Debug("Platform: GetTimestamp called") +func (s *Service) GetTimestamp(_ *http.Request, _ *struct{}, reply *GetTimestampReply) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "platform"), + zap.String("method", "getTimestamp"), + ) - reply.Timestamp = service.vm.state.GetTimestamp() + reply.Timestamp = s.vm.state.GetTimestamp() return nil } @@ -2389,40 +2531,51 @@ type GetValidatorsAtArgs struct { // GetValidatorsAtReply is the response from GetValidatorsAt type GetValidatorsAtReply struct { + // TODO should we change this to map[ids.NodeID]*validators.Validator? + // We'd have to add a MarshalJSON method to validators.Validator. Validators map[ids.NodeID]uint64 `json:"validators"` } // GetValidatorsAt returns the weights of the validator set of a provided subnet // at the specified height. -func (service *Service) GetValidatorsAt(_ *http.Request, args *GetValidatorsAtArgs, reply *GetValidatorsAtReply) error { +func (s *Service) GetValidatorsAt(r *http.Request, args *GetValidatorsAtArgs, reply *GetValidatorsAtReply) error { height := uint64(args.Height) - service.vm.ctx.Log.Debug("Platform: GetValidatorsAt called", + s.vm.ctx.Log.Debug("API called", + zap.String("service", "platform"), + zap.String("method", "getValidatorsAt"), zap.Uint64("height", height), zap.Stringer("subnetID", args.SubnetID), ) + ctx := r.Context() var err error - reply.Validators, err = service.vm.GetValidatorSet(height, args.SubnetID) + vdrs, err := s.vm.GetValidatorSet(ctx, height, args.SubnetID) if err != nil { - return fmt.Errorf("couldn't get validator set: %w", err) + return fmt.Errorf("failed to get validator set: %w", err) + } + reply.Validators = make(map[ids.NodeID]uint64, len(vdrs)) + for _, vdr := range vdrs { + reply.Validators[vdr.NodeID] = vdr.Weight } return nil } -func (service *Service) GetBlock(_ *http.Request, args *api.GetBlockArgs, response *api.GetBlockResponse) error { - service.vm.ctx.Log.Debug("Platform: GetBlock called", +func (s *Service) GetBlock(_ *http.Request, args *api.GetBlockArgs, response *api.GetBlockResponse) error { + s.vm.ctx.Log.Debug("API called", + zap.String("service", "platform"), + zap.String("method", "getBlock"), zap.Stringer("blkID", args.BlockID), zap.Stringer("encoding", args.Encoding), ) - block, err := service.vm.manager.GetStatelessBlock(args.BlockID) + block, err := s.vm.manager.GetStatelessBlock(args.BlockID) if err != nil { return fmt.Errorf("couldn't get block with id %s: %w", args.BlockID, err) } response.Encoding = args.Encoding if args.Encoding == formatting.JSON { - block.InitCtx(service.vm.ctx) + block.InitCtx(s.vm.ctx) response.Block = block return nil } @@ -2434,3 +2587,86 @@ func (service *Service) GetBlock(_ *http.Request, args *api.GetBlockArgs, respon return nil } + +func (s *Service) getAPIUptime(staker *state.Staker) (*json.Float32, error) { + // Only report uptimes that we have been actively tracking. + if constants.PrimaryNetworkID != staker.SubnetID && !s.vm.TrackedSubnets.Contains(staker.SubnetID) { + return nil, nil + } + + rawUptime, err := s.vm.uptimeManager.CalculateUptimePercentFrom(staker.NodeID, staker.SubnetID, staker.StartTime) + if err != nil { + return nil, err + } + // Transform this to a percentage (0-100) to make it consistent + // with observedUptime in info.peers API + uptime := json.Float32(rawUptime * 100) + return &uptime, nil +} + +func (s *Service) getAPIOwner(owner *secp256k1fx.OutputOwners) (*platformapi.Owner, error) { + apiOwner := &platformapi.Owner{ + Locktime: json.Uint64(owner.Locktime), + Threshold: json.Uint32(owner.Threshold), + } + for _, addr := range owner.Addrs { + addrStr, err := s.addrManager.FormatLocalAddress(addr) + if err != nil { + return nil, err + } + apiOwner.Addresses = append(apiOwner.Addresses, addrStr) + } + return apiOwner, nil +} + +// Takes in a staker and a set of addresses +// Returns: +// 1) The total amount staked by addresses in [addrs] +// 2) The staked outputs +func getStakeHelper(tx *txs.Tx, addrs set.Set[ids.ShortID], totalAmountStaked map[ids.ID]uint64) []avax.TransferableOutput { + staker, ok := tx.Unsigned.(txs.PermissionlessStaker) + if !ok { + return nil + } + + stake := staker.Stake() + stakedOuts := make([]avax.TransferableOutput, 0, len(stake)) + // Go through all of the staked outputs + for _, output := range stake { + out := output.Out + if lockedOut, ok := out.(*stakeable.LockOut); ok { + // This output can only be used for staking until [stakeOnlyUntil] + out = lockedOut.TransferableOut + } + secpOut, ok := out.(*secp256k1fx.TransferOutput) + if !ok { + continue + } + + // Check whether this output is owned by one of the given addresses + contains := false + for _, addr := range secpOut.Addrs { + if addrs.Contains(addr) { + contains = true + break + } + } + if !contains { + // This output isn't owned by one of the given addresses. Ignore. + continue + } + + assetID := output.AssetID() + newAmount, err := math.Add64(totalAmountStaked[assetID], secpOut.Amt) + if err != nil { + newAmount = stdmath.MaxUint64 + } + totalAmountStaked[assetID] = newAmount + + stakedOuts = append( + stakedOuts, + *output, + ) + } + return stakedOuts +} diff --git a/avalanchego/vms/platformvm/service_test.go b/avalanchego/vms/platformvm/service_test.go index bd0f8d51..12cd7546 100644 --- a/avalanchego/vms/platformvm/service_test.go +++ b/avalanchego/vms/platformvm/service_test.go @@ -1,10 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm import ( - "bytes" + "context" "errors" "fmt" "math/rand" @@ -17,13 +17,14 @@ import ( "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/api/keystore" + "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/logging" @@ -70,13 +71,16 @@ func defaultService(t *testing.T) (*Service, *mutableSharedMemory) { vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() ks := keystore.New(logging.NoLog{}, manager.NewMemDB(version.Semantic1_0_0)) - if err := ks.CreateUser(testUsername, testPassword); err != nil { - t.Fatal(err) - } + err := ks.CreateUser(testUsername, testPassword) + require.NoError(t, err) + vm.ctx.Keystore = ks.NewBlockchainKeyStore(vm.ctx.ChainID) return &Service{ vm: vm, addrManager: avax.NewAddressManager(vm.ctx), + stakerAttributesCache: &cache.LRU[ids.ID, *stakerAttributes]{ + Size: stakerAttributesCacheSize, + }, }, mutableSharedMemory } @@ -85,116 +89,92 @@ func defaultAddress(t *testing.T, service *Service) { service.vm.ctx.Lock.Lock() defer service.vm.ctx.Lock.Unlock() user, err := vmkeystore.NewUserFromKeystore(service.vm.ctx.Keystore, testUsername, testPassword) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + pk, err := testKeyFactory.ToPrivateKey(testPrivateKey) - if err != nil { - t.Fatal(err) - } - privKey := pk.(*crypto.PrivateKeySECP256K1R) - if err := user.PutKeys(privKey, keys[0]); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + + err = user.PutKeys(pk, keys[0]) + require.NoError(t, err) } func TestAddValidator(t *testing.T) { - expectedJSONString := `{"username":"","password":"","from":null,"changeAddr":"","txID":"11111111111111111111111111111111LpoYY","startTime":"0","endTime":"0","nodeID":"NodeID-111111111111111111116DBWJs","rewardAddress":"","delegationFeeRate":"0.0000"}` + expectedJSONString := `{"username":"","password":"","from":null,"changeAddr":"","txID":"11111111111111111111111111111111LpoYY","startTime":"0","endTime":"0","weight":"0","nodeID":"NodeID-111111111111111111116DBWJs","rewardAddress":"","delegationFeeRate":"0.0000"}` args := AddValidatorArgs{} bytes, err := stdjson.Marshal(&args) - if err != nil { - t.Fatal(err) - } - jsonString := string(bytes) - if jsonString != expectedJSONString { - t.Fatalf("Expected: %s\nResult: %s", expectedJSONString, jsonString) - } + require.NoError(t, err) + require.Equal(t, expectedJSONString, string(bytes)) } func TestCreateBlockchainArgsParsing(t *testing.T) { jsonString := `{"vmID":"lol","fxIDs":["secp256k1"], "name":"awesome", "username":"bob loblaw", "password":"yeet", "genesisData":"SkB92YpWm4Q2iPnLGCuDPZPgUQMxajqQQuz91oi3xD984f8r"}` args := CreateBlockchainArgs{} err := stdjson.Unmarshal([]byte(jsonString), &args) - if err != nil { - t.Fatal(err) - } - if _, err = stdjson.Marshal(args.GenesisData); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + + _, err = stdjson.Marshal(args.GenesisData) + require.NoError(t, err) } func TestExportKey(t *testing.T) { + require := require.New(t) jsonString := `{"username":"ScoobyUser","password":"ShaggyPassword1Zoinks!","address":"` + testAddress + `"}` args := ExportKeyArgs{} err := stdjson.Unmarshal([]byte(jsonString), &args) - if err != nil { - t.Fatal(err) - } + require.NoError(err) service, _ := defaultService(t) defaultAddress(t, service) service.vm.ctx.Lock.Lock() defer func() { - if err := service.vm.Shutdown(); err != nil { - t.Fatal(err) - } + err := service.vm.Shutdown(context.Background()) + require.NoError(err) service.vm.ctx.Lock.Unlock() }() reply := ExportKeyReply{} - if err := service.ExportKey(nil, &args, &reply); err != nil { - t.Fatal(err) - } + err = service.ExportKey(nil, &args, &reply) + require.NoError(err) - if !bytes.Equal(testPrivateKey, reply.PrivateKey.Bytes()) { - t.Fatalf("Expected %v, got %v", testPrivateKey, reply.PrivateKey.Bytes()) - } + require.Equal(testPrivateKey, reply.PrivateKey.Bytes()) } func TestImportKey(t *testing.T) { + require := require.New(t) jsonString := `{"username":"ScoobyUser","password":"ShaggyPassword1Zoinks!","privateKey":"PrivateKey-ewoqjP7PxY4yr3iLTpLisriqt94hdyDFNgchSxGGztUrTXtNN"}` args := ImportKeyArgs{} err := stdjson.Unmarshal([]byte(jsonString), &args) - if err != nil { - t.Fatal(err) - } + require.NoError(err) service, _ := defaultService(t) service.vm.ctx.Lock.Lock() defer func() { - if err := service.vm.Shutdown(); err != nil { - t.Fatal(err) - } + err := service.vm.Shutdown(context.Background()) + require.NoError(err) service.vm.ctx.Lock.Unlock() }() reply := api.JSONAddress{} - if err := service.ImportKey(nil, &args, &reply); err != nil { - t.Fatal(err) - } - if testAddress != reply.Address { - t.Fatalf("Expected %q, got %q", testAddress, reply.Address) - } + err = service.ImportKey(nil, &args, &reply) + require.NoError(err) + require.Equal(testAddress, reply.Address) } // Test issuing a tx and accepted func TestGetTxStatus(t *testing.T) { + require := require.New(t) service, mutableSharedMemory := defaultService(t) defaultAddress(t, service) service.vm.ctx.Lock.Lock() defer func() { - if err := service.vm.Shutdown(); err != nil { - t.Fatal(err) - } + err := service.vm.Shutdown(context.Background()) + require.NoError(err) service.vm.ctx.Lock.Unlock() }() - factory := crypto.FactorySECP256K1R{} - recipientKeyIntf, err := factory.NewPrivateKey() - if err != nil { - t.Fatal(err) - } - recipientKey := recipientKeyIntf.(*crypto.PrivateKeySECP256K1R) + factory := secp256k1.Factory{} + recipientKey, err := factory.NewPrivateKey() + require.NoError(err) m := atomic.NewMemory(prefixdb.New([]byte{}, service.vm.dbManager.Current().Database)) @@ -218,27 +198,30 @@ func TestGetTxStatus(t *testing.T) { }, } utxoBytes, err := txs.Codec.Marshal(txs.Version, utxo) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + inputID := utxo.InputID() - if err := peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{service.vm.ctx.ChainID: {PutRequests: []*atomic.Element{{ - Key: inputID[:], - Value: utxoBytes, - Traits: [][]byte{ - recipientKey.PublicKey().Address().Bytes(), + err = peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{ + service.vm.ctx.ChainID: { + PutRequests: []*atomic.Element{ + { + Key: inputID[:], + Value: utxoBytes, + Traits: [][]byte{ + recipientKey.PublicKey().Address().Bytes(), + }, + }, + }, }, - }}}}); err != nil { - t.Fatal(err) - } + }) + require.NoError(err) oldSharedMemory := mutableSharedMemory.SharedMemory mutableSharedMemory.SharedMemory = sm - tx, err := service.vm.txBuilder.NewImportTx(xChainID, ids.ShortEmpty, []*crypto.PrivateKeySECP256K1R{recipientKey}, ids.ShortEmpty) - if err != nil { - t.Fatal(err) - } + tx, err := service.vm.txBuilder.NewImportTx(xChainID, ids.ShortEmpty, []*secp256k1.PrivateKey{recipientKey}, ids.ShortEmpty) + require.NoError(err) + mutableSharedMemory.SharedMemory = oldSharedMemory var ( @@ -246,44 +229,34 @@ func TestGetTxStatus(t *testing.T) { resp GetTxStatusResponse ) err = service.GetTxStatus(nil, arg, &resp) - switch { - case err != nil: - t.Fatal(err) - case resp.Status != status.Unknown: - t.Fatalf("status should be unknown but is %s", resp.Status) - case resp.Reason != "": - t.Fatalf("reason should be empty but is %s", resp.Reason) - } + require.NoError(err) + require.Equal(status.Unknown, resp.Status) + require.Zero(resp.Reason) // put the chain in existing chain list - if err := service.vm.Builder.AddUnverifiedTx(tx); err == nil { - t.Fatal("should have erred because of missing funds") - } + err = service.vm.Builder.AddUnverifiedTx(tx) + require.Error(err) mutableSharedMemory.SharedMemory = sm - if err := service.vm.Builder.AddUnverifiedTx(tx); err != nil { - t.Fatal(err) - } else if block, err := service.vm.BuildBlock(); err != nil { - t.Fatal(err) - } else if blk, ok := block.(*blockexecutor.Block); !ok { - t.Fatalf("should be *blockexecutor.Block but is %T", block) - } else if err := blk.Verify(); err != nil { - t.Fatal(err) - } else if err := blk.Accept(); err != nil { - t.Fatal(err) - } + err = service.vm.Builder.AddUnverifiedTx(tx) + require.NoError(err) + + block, err := service.vm.BuildBlock(context.Background()) + require.NoError(err) + + blk := block.(*blockexecutor.Block) + err = blk.Verify(context.Background()) + require.NoError(err) + + err = blk.Accept(context.Background()) + require.NoError(err) resp = GetTxStatusResponse{} // reset err = service.GetTxStatus(nil, arg, &resp) - switch { - case err != nil: - t.Fatal(err) - case resp.Status != status.Committed: - t.Fatalf("status should be Committed but is %s", resp.Status) - case resp.Reason != "": - t.Fatalf("reason should be empty but is %s", resp.Reason) - } + require.NoError(err) + require.Equal(status.Committed, resp.Status) + require.Zero(resp.Reason) } // Test issuing and then retrieving a transaction @@ -303,7 +276,7 @@ func TestGetTx(t *testing.T) { constants.AVMID, nil, "chain name", - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, keys[0].PublicKey().Address(), // change addr ) }, @@ -318,7 +291,7 @@ func TestGetTx(t *testing.T) { ids.GenerateTestNodeID(), ids.GenerateTestShortID(), 0, - []*crypto.PrivateKeySECP256K1R{keys[0]}, + []*secp256k1.PrivateKey{keys[0]}, keys[0].PublicKey().Address(), // change addr ) }, @@ -330,7 +303,7 @@ func TestGetTx(t *testing.T) { 100, service.vm.ctx.XChainID, ids.GenerateTestShortID(), - []*crypto.PrivateKeySECP256K1R{keys[0]}, + []*secp256k1.PrivateKey{keys[0]}, keys[0].PublicKey().Address(), // change addr ) }, @@ -339,91 +312,87 @@ func TestGetTx(t *testing.T) { for _, test := range tests { for _, encoding := range encodings { - service, _ := defaultService(t) - defaultAddress(t, service) - service.vm.ctx.Lock.Lock() + testName := fmt.Sprintf("test '%s - %s'", + test.description, + encoding.String(), + ) + t.Run(testName, func(t *testing.T) { + require := require.New(t) + service, _ := defaultService(t) + defaultAddress(t, service) + service.vm.ctx.Lock.Lock() + + tx, err := test.createTx(service) + require.NoError(err) + + arg := &api.GetTxArgs{ + TxID: tx.ID(), + Encoding: encoding, + } + var response api.GetTxReply + err = service.GetTx(nil, arg, &response) + require.Error(err) - tx, err := test.createTx(service) - if err != nil { - t.Fatalf("failed test '%s - %s': %s", test.description, encoding.String(), err) - } - arg := &api.GetTxArgs{ - TxID: tx.ID(), - Encoding: encoding, - } - var response api.GetTxReply - if err := service.GetTx(nil, arg, &response); err == nil { - t.Fatalf("failed test '%s - %s': haven't issued tx yet so shouldn't be able to get it", test.description, encoding.String()) - } - if err := service.vm.Builder.AddUnverifiedTx(tx); err != nil { - t.Fatalf("failed test '%s - %s': %s", test.description, encoding.String(), err) - } + err = service.vm.Builder.AddUnverifiedTx(tx) + require.NoError(err) - block, err := service.vm.BuildBlock() - if err != nil { - t.Fatalf("failed test '%s - %s': %s", test.description, encoding.String(), err) - } - if err := block.Verify(); err != nil { - t.Fatalf("failed test '%s - %s': %s", test.description, encoding.String(), err) - } - if err := block.Accept(); err != nil { - t.Fatalf("failed test '%s - %s': %s", test.description, encoding.String(), err) - } - if blk, ok := block.(snowman.OracleBlock); ok { // For proposal blocks, commit them - options, err := blk.Options() - if !errors.Is(err, snowman.ErrNotOracle) { - if err != nil { - t.Fatalf("failed test '%s - %s': %s", test.description, encoding.String(), err) - } - commit := options[0].(*blockexecutor.Block) - if _, ok := commit.Block.(*blocks.ApricotCommitBlock); !ok { - t.Fatalf("failed test '%s - %s': should prefer to commit", test.description, encoding.String()) - } - if err := commit.Verify(); err != nil { - t.Fatalf("failed test '%s - %s': %s", test.description, encoding.String(), err) - } - if err := commit.Accept(); err != nil { - t.Fatalf("failed test '%s - %s': %s", test.description, encoding.String(), err) + block, err := service.vm.BuildBlock(context.Background()) + require.NoError(err) + + err = block.Verify(context.Background()) + require.NoError(err) + + err = block.Accept(context.Background()) + require.NoError(err) + + if blk, ok := block.(snowman.OracleBlock); ok { // For proposal blocks, commit them + options, err := blk.Options(context.Background()) + if !errors.Is(err, snowman.ErrNotOracle) { + require.NoError(err) + + commit := options[0].(*blockexecutor.Block) + _, ok := commit.Block.(*blocks.BanffCommitBlock) + require.True(ok) + + err := commit.Verify(context.Background()) + require.NoError(err) + + err = commit.Accept(context.Background()) + require.NoError(err) } } - } - if err := service.GetTx(nil, arg, &response); err != nil { - t.Fatalf("failed test '%s - %s': %s", test.description, encoding.String(), err) - } - switch encoding { - case formatting.Hex: - // we're always guaranteed a string for hex encodings. - responseTxBytes, err := formatting.Decode(response.Encoding, response.Tx.(string)) - if err != nil { - t.Fatalf("failed test '%s - %s': %s", test.description, encoding.String(), err) - } - if !bytes.Equal(responseTxBytes, tx.Bytes()) { - t.Fatalf("failed test '%s - %s': byte representation of tx in response is incorrect", test.description, encoding.String()) - } - case formatting.JSON: - if response.Tx != tx { - t.Fatalf("failed test '%s - %s': byte representation of tx in response is incorrect", test.description, encoding.String()) + err = service.GetTx(nil, arg, &response) + require.NoError(err) + + switch encoding { + case formatting.Hex: + // we're always guaranteed a string for hex encodings. + responseTxBytes, err := formatting.Decode(response.Encoding, response.Tx.(string)) + require.NoError(err) + require.Equal(tx.Bytes(), responseTxBytes) + + case formatting.JSON: + require.Equal(tx, response.Tx) } - } - if err := service.vm.Shutdown(); err != nil { - t.Fatal(err) - } - service.vm.ctx.Lock.Unlock() + err = service.vm.Shutdown(context.Background()) + require.NoError(err) + service.vm.ctx.Lock.Unlock() + }) } } } // Test method GetBalance func TestGetBalance(t *testing.T) { + require := require.New(t) service, _ := defaultService(t) defaultAddress(t, service) service.vm.ctx.Lock.Lock() defer func() { - if err := service.vm.Shutdown(); err != nil { - t.Fatal(err) - } + err := service.vm.Shutdown(context.Background()) + require.NoError(err) service.vm.ctx.Lock.Unlock() }() @@ -436,21 +405,13 @@ func TestGetBalance(t *testing.T) { }, } reply := GetBalanceResponse{} - if err := service.GetBalance(nil, &request, &reply); err != nil { - t.Fatal(err) - } - if reply.Balance != json.Uint64(defaultBalance) { - t.Fatalf("Wrong balance. Expected %d ; Returned %d", defaultBalance, reply.Balance) - } - if reply.Unlocked != json.Uint64(defaultBalance) { - t.Fatalf("Wrong unlocked balance. Expected %d ; Returned %d", defaultBalance, reply.Unlocked) - } - if reply.LockedStakeable != 0 { - t.Fatalf("Wrong locked stakeable balance. Expected %d ; Returned %d", reply.LockedStakeable, 0) - } - if reply.LockedNotStakeable != 0 { - t.Fatalf("Wrong locked not stakeable balance. Expected %d ; Returned %d", reply.LockedNotStakeable, 0) - } + + require.NoError(service.GetBalance(nil, &request, &reply)) + + require.Equal(json.Uint64(defaultBalance), reply.Balance) + require.Equal(json.Uint64(defaultBalance), reply.Unlocked) + require.Equal(json.Uint64(0), reply.LockedStakeable) + require.Equal(json.Uint64(0), reply.LockedNotStakeable) } } @@ -460,7 +421,7 @@ func TestGetStake(t *testing.T) { defaultAddress(t, service) service.vm.ctx.Lock.Lock() defer func() { - require.NoError(service.vm.Shutdown()) + require.NoError(service.vm.Shutdown(context.Background())) service.vm.ctx.Lock.Unlock() }() @@ -491,11 +452,11 @@ func TestGetStake(t *testing.T) { require.NoError(err) out := output.Out.(*secp256k1fx.TransferOutput) - require.EqualValues(out.Amount(), defaultWeight) - require.EqualValues(out.Threshold, 1) + require.EqualValues(defaultWeight, out.Amount()) + require.EqualValues(1, out.Threshold) require.Len(out.Addrs, 1) require.Equal(keys[i].PublicKey().Address(), out.Addrs[0]) - require.EqualValues(out.Locktime, 0) + require.Zero(out.Locktime) } // Make sure this works for multiple addresses @@ -520,8 +481,8 @@ func TestGetStake(t *testing.T) { out := output.Out.(*secp256k1fx.TransferOutput) require.EqualValues(defaultWeight, out.Amount()) - require.EqualValues(out.Threshold, 1) - require.EqualValues(out.Locktime, 0) + require.EqualValues(1, out.Threshold) + require.Zero(out.Locktime) require.Len(out.Addrs, 1) } @@ -537,16 +498,17 @@ func TestGetStake(t *testing.T) { delegatorEndTime, delegatorNodeID, ids.GenerateTestShortID(), - []*crypto.PrivateKeySECP256K1R{keys[0]}, + []*secp256k1.PrivateKey{keys[0]}, keys[0].PublicKey().Address(), // change addr ) require.NoError(err) - staker := state.NewCurrentStaker( + staker, err := state.NewCurrentStaker( tx.ID(), tx.Unsigned.(*txs.AddDelegatorTx), 0, ) + require.NoError(err) service.vm.state.PutCurrentDelegator(staker) service.vm.state.AddTx(tx, status.Committed) @@ -585,15 +547,16 @@ func TestGetStake(t *testing.T) { pendingStakerNodeID, ids.GenerateTestShortID(), 0, - []*crypto.PrivateKeySECP256K1R{keys[0]}, + []*secp256k1.PrivateKey{keys[0]}, keys[0].PublicKey().Address(), // change addr ) require.NoError(err) - staker = state.NewPendingStaker( + staker, err = state.NewPendingStaker( tx.ID(), tx.Unsigned.(*txs.AddValidatorTx), ) + require.NoError(err) service.vm.state.PutPendingValidator(staker) service.vm.state.AddTx(tx, status.Committed) @@ -619,13 +582,13 @@ func TestGetStake(t *testing.T) { // Test method GetCurrentValidators func TestGetCurrentValidators(t *testing.T) { + require := require.New(t) service, _ := defaultService(t) defaultAddress(t, service) service.vm.ctx.Lock.Lock() defer func() { - if err := service.vm.Shutdown(); err != nil { - t.Fatal(err) - } + err := service.vm.Shutdown(context.Background()) + require.NoError(err) service.vm.ctx.Lock.Unlock() }() @@ -636,46 +599,22 @@ func TestGetCurrentValidators(t *testing.T) { response := GetCurrentValidatorsReply{} err := service.GetCurrentValidators(nil, &args, &response) - switch { - case err != nil: - t.Fatal(err) - case len(response.Validators) != len(genesis.Validators): - t.Fatalf("should be %d validators but are %d", len(genesis.Validators), len(response.Validators)) - } + require.NoError(err) + require.Equal(len(genesis.Validators), len(response.Validators)) for _, vdr := range genesis.Validators { found := false for i := 0; i < len(response.Validators) && !found; i++ { - gotVdr, ok := response.Validators[i].(pchainapi.PermissionlessValidator) - switch { - case !ok: - t.Fatal("expected pchainapi.PermissionlessValidator") - case gotVdr.NodeID != vdr.NodeID: - case gotVdr.EndTime != vdr.EndTime: - t.Fatalf("expected end time of %s to be %v but got %v", - vdr.NodeID, - vdr.EndTime, - gotVdr.EndTime, - ) - case gotVdr.StartTime != vdr.StartTime: - t.Fatalf("expected start time of %s to be %v but got %v", - vdr.NodeID, - vdr.StartTime, - gotVdr.StartTime, - ) - case gotVdr.Weight != vdr.Weight: - t.Fatalf("expected weight of %s to be %v but got %v", - vdr.NodeID, - vdr.Weight, - gotVdr.Weight, - ) - default: - found = true + gotVdr := response.Validators[i].(pchainapi.PermissionlessValidator) + if gotVdr.NodeID != vdr.NodeID { + continue } + + require.Equal(vdr.EndTime, gotVdr.EndTime) + require.Equal(vdr.StartTime, gotVdr.StartTime) + found = true } - if !found { - t.Fatalf("expected validators to contain %s but didn't", vdr.NodeID) - } + require.True(found, "expected validators to contain %s but didn't", vdr.NodeID) } // Add a delegator @@ -684,41 +623,34 @@ func TestGetCurrentValidators(t *testing.T) { delegatorStartTime := uint64(defaultValidateStartTime.Unix()) delegatorEndTime := uint64(defaultValidateStartTime.Add(defaultMinStakingDuration).Unix()) - tx, err := service.vm.txBuilder.NewAddDelegatorTx( + delTx, err := service.vm.txBuilder.NewAddDelegatorTx( stakeAmount, delegatorStartTime, delegatorEndTime, validatorNodeID, ids.GenerateTestShortID(), - []*crypto.PrivateKeySECP256K1R{keys[0]}, + []*secp256k1.PrivateKey{keys[0]}, keys[0].PublicKey().Address(), // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - staker := state.NewCurrentStaker( - tx.ID(), - tx.Unsigned.(*txs.AddDelegatorTx), + staker, err := state.NewCurrentStaker( + delTx.ID(), + delTx.Unsigned.(*txs.AddDelegatorTx), 0, ) + require.NoError(err) service.vm.state.PutCurrentDelegator(staker) - service.vm.state.AddTx(tx, status.Committed) + service.vm.state.AddTx(delTx, status.Committed) err = service.vm.state.Commit() - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Call getCurrentValidators args = GetCurrentValidatorsArgs{SubnetID: constants.PrimaryNetworkID} err = service.GetCurrentValidators(nil, &args, &response) - switch { - case err != nil: - t.Fatal(err) - case len(response.Validators) != len(genesis.Validators): - t.Fatalf("should be %d validators but are %d", len(genesis.Validators), len(response.Validators)) - } + require.NoError(err) + require.Equal(len(genesis.Validators), len(response.Validators)) // Make sure the delegator is there found := false @@ -728,23 +660,50 @@ func TestGetCurrentValidators(t *testing.T) { continue } found = true - if len(vdr.Delegators) != 1 { - t.Fatalf("%s should have 1 delegator", vdr.NodeID) - } - delegator := vdr.Delegators[0] - switch { - case delegator.NodeID != vdr.NodeID: - t.Fatal("wrong node ID") - case uint64(delegator.StartTime) != delegatorStartTime: - t.Fatal("wrong start time") - case uint64(delegator.EndTime) != delegatorEndTime: - t.Fatal("wrong end time") - case delegator.GetWeight() != stakeAmount: - t.Fatalf("wrong weight") + + require.Nil(vdr.Delegators) + + innerArgs := GetCurrentValidatorsArgs{ + SubnetID: constants.PrimaryNetworkID, + NodeIDs: []ids.NodeID{vdr.NodeID}, } + innerResponse := GetCurrentValidatorsReply{} + err = service.GetCurrentValidators(nil, &innerArgs, &innerResponse) + require.NoError(err) + require.Len(innerResponse.Validators, 1) + + innerVdr := innerResponse.Validators[0].(pchainapi.PermissionlessValidator) + require.Equal(vdr.NodeID, innerVdr.NodeID) + + require.NotNil(innerVdr.Delegators) + require.Equal(1, len(*innerVdr.Delegators)) + delegator := (*innerVdr.Delegators)[0] + require.Equal(delegator.NodeID, innerVdr.NodeID) + require.Equal(uint64(delegator.StartTime), delegatorStartTime) + require.Equal(uint64(delegator.EndTime), delegatorEndTime) + require.Equal(uint64(delegator.Weight), stakeAmount) } - if !found { - t.Fatalf("didn't find delegator") + require.True(found) + + // Reward the delegator + tx, err := service.vm.txBuilder.NewRewardValidatorTx(delTx.ID()) + require.NoError(err) + service.vm.state.AddTx(tx, status.Committed) + service.vm.state.DeleteCurrentDelegator(staker) + require.NoError(service.vm.state.SetDelegateeReward(staker.SubnetID, staker.NodeID, 100000)) + require.NoError(service.vm.state.Commit()) + + // Call getValidators + response = GetCurrentValidatorsReply{} + require.NoError(service.GetCurrentValidators(nil, &args, &response)) + require.Equal(len(genesis.Validators), len(response.Validators)) + + for i := 0; i < len(response.Validators); i++ { + vdr := response.Validators[i].(pchainapi.PermissionlessValidator) + if vdr.NodeID != validatorNodeID { + continue + } + require.Equal(uint64(100000), uint64(*vdr.AccruedDelegateeReward)) } } @@ -753,7 +712,7 @@ func TestGetTimestamp(t *testing.T) { service, _ := defaultService(t) service.vm.ctx.Lock.Lock() defer func() { - require.NoError(service.vm.Shutdown()) + require.NoError(service.vm.Shutdown(context.Background())) service.vm.ctx.Lock.Unlock() }() @@ -785,6 +744,7 @@ func TestGetBlock(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + require := require.New(t) service, _ := defaultService(t) service.vm.ctx.Lock.Lock() defer service.vm.ctx.Lock.Unlock() @@ -798,30 +758,26 @@ func TestGetBlock(t *testing.T) { constants.AVMID, nil, "chain name", - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, keys[0].PublicKey().Address(), // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + preferred, err := service.vm.Builder.Preferred() - if err != nil { - t.Fatal(err) - } - statelessBlock, err := blocks.NewApricotStandardBlock( + require.NoError(err) + + statelessBlock, err := blocks.NewBanffStandardBlock( + preferred.Timestamp(), preferred.ID(), preferred.Height()+1, []*txs.Tx{tx}, ) - if err != nil { - t.Fatal("couldn't create block: %w", err) - } + require.NoError(err) + block := service.vm.manager.NewBlock(statelessBlock) - if err := block.Verify(); err != nil { - t.Fatal("couldn't verify block: %w", err) - } else if err := block.Accept(); err != nil { - t.Fatal("couldn't accept block: %w", err) - } + + require.NoError(block.Verify(context.Background())) + require.NoError(block.Accept(context.Background())) args := api.GetBlockArgs{ BlockID: block.ID(), @@ -829,22 +785,20 @@ func TestGetBlock(t *testing.T) { } response := api.GetBlockResponse{} err = service.GetBlock(nil, &args, &response) - if err != nil { - t.Fatal(err) - } + require.NoError(err) switch { case test.encoding == formatting.JSON: - require.Equal(t, statelessBlock, response.Block) + require.Equal(statelessBlock, response.Block) _, err = stdjson.Marshal(response) - require.NoError(t, err) + require.NoError(err) default: decoded, _ := formatting.Decode(response.Encoding, response.Block.(string)) - require.Equal(t, block.Bytes(), decoded) + require.Equal(block.Bytes(), decoded) } - require.Equal(t, test.encoding, response.Encoding) + require.Equal(test.encoding, response.Encoding) }) } } diff --git a/avalanchego/vms/platformvm/signer/empty.go b/avalanchego/vms/platformvm/signer/empty.go index 4e2eba8a..21bfbcab 100644 --- a/avalanchego/vms/platformvm/signer/empty.go +++ b/avalanchego/vms/platformvm/signer/empty.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package signer @@ -7,9 +7,14 @@ import ( "github.com/ava-labs/avalanchego/utils/crypto/bls" ) -var _ Signer = &Empty{} +var _ Signer = (*Empty)(nil) type Empty struct{} -func (*Empty) Verify() error { return nil } -func (*Empty) Key() *bls.PublicKey { return nil } +func (*Empty) Verify() error { + return nil +} + +func (*Empty) Key() *bls.PublicKey { + return nil +} diff --git a/avalanchego/vms/platformvm/signer/empty_test.go b/avalanchego/vms/platformvm/signer/empty_test.go index 3a014a00..e6a6307b 100644 --- a/avalanchego/vms/platformvm/signer/empty_test.go +++ b/avalanchego/vms/platformvm/signer/empty_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package signer @@ -11,7 +11,6 @@ import ( func TestEmpty(t *testing.T) { require := require.New(t) - noSigner := &Empty{} require.NoError(noSigner.Verify()) require.Nil(noSigner.Key()) diff --git a/avalanchego/vms/platformvm/signer/proof_of_possession.go b/avalanchego/vms/platformvm/signer/proof_of_possession.go index 0297c753..35ddcb32 100644 --- a/avalanchego/vms/platformvm/signer/proof_of_possession.go +++ b/avalanchego/vms/platformvm/signer/proof_of_possession.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package signer @@ -12,7 +12,7 @@ import ( ) var ( - _ Signer = &ProofOfPossession{} + _ Signer = (*ProofOfPossession)(nil) errInvalidProofOfPossession = errors.New("invalid proof of possession") ) @@ -59,7 +59,9 @@ func (p *ProofOfPossession) Verify() error { return nil } -func (p *ProofOfPossession) Key() *bls.PublicKey { return p.publicKey } +func (p *ProofOfPossession) Key() *bls.PublicKey { + return p.publicKey +} type jsonProofOfPossession struct { PublicKey string `json:"publicKey"` diff --git a/avalanchego/vms/platformvm/signer/proof_of_possession_test.go b/avalanchego/vms/platformvm/signer/proof_of_possession_test.go index 2c9ea6bc..c29ac1ad 100644 --- a/avalanchego/vms/platformvm/signer/proof_of_possession_test.go +++ b/avalanchego/vms/platformvm/signer/proof_of_possession_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package signer diff --git a/avalanchego/vms/platformvm/signer/signer.go b/avalanchego/vms/platformvm/signer/signer.go index 71259587..7269ad19 100644 --- a/avalanchego/vms/platformvm/signer/signer.go +++ b/avalanchego/vms/platformvm/signer/signer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package signer diff --git a/avalanchego/vms/platformvm/stakeable/stakeable_lock.go b/avalanchego/vms/platformvm/stakeable/stakeable_lock.go index b3d6c72c..5c09cbfd 100644 --- a/avalanchego/vms/platformvm/stakeable/stakeable_lock.go +++ b/avalanchego/vms/platformvm/stakeable/stakeable_lock.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package stakeable diff --git a/avalanchego/vms/platformvm/stakeable/stakeable_lock_test.go b/avalanchego/vms/platformvm/stakeable/stakeable_lock_test.go new file mode 100644 index 00000000..5a6cfce5 --- /dev/null +++ b/avalanchego/vms/platformvm/stakeable/stakeable_lock_test.go @@ -0,0 +1,135 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package stakeable + +import ( + "errors" + "testing" + + "github.com/golang/mock/gomock" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/vms/components/avax" +) + +var errTest = errors.New("hi mom") + +func TestLockOutVerify(t *testing.T) { + tests := []struct { + name string + locktime uint64 + transferableOutF func(*gomock.Controller) avax.TransferableOut + expectedErr error + }{ + { + name: "happy path", + locktime: 1, + transferableOutF: func(ctrl *gomock.Controller) avax.TransferableOut { + o := avax.NewMockTransferableOut(ctrl) + o.EXPECT().Verify().Return(nil) + return o + }, + expectedErr: nil, + }, + { + name: "invalid locktime", + locktime: 0, + transferableOutF: func(ctrl *gomock.Controller) avax.TransferableOut { + return nil + }, + expectedErr: errInvalidLocktime, + }, + { + name: "nested", + locktime: 1, + transferableOutF: func(ctrl *gomock.Controller) avax.TransferableOut { + return &LockOut{} + }, + expectedErr: errNestedStakeableLocks, + }, + { + name: "inner output fails verification", + locktime: 1, + transferableOutF: func(ctrl *gomock.Controller) avax.TransferableOut { + o := avax.NewMockTransferableOut(ctrl) + o.EXPECT().Verify().Return(errTest) + return o + }, + expectedErr: errTest, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + lockOut := &LockOut{ + Locktime: tt.locktime, + TransferableOut: tt.transferableOutF(ctrl), + } + require.Equal(t, tt.expectedErr, lockOut.Verify()) + }) + } +} + +func TestLockInVerify(t *testing.T) { + tests := []struct { + name string + locktime uint64 + transferableInF func(*gomock.Controller) avax.TransferableIn + expectedErr error + }{ + { + name: "happy path", + locktime: 1, + transferableInF: func(ctrl *gomock.Controller) avax.TransferableIn { + o := avax.NewMockTransferableIn(ctrl) + o.EXPECT().Verify().Return(nil) + return o + }, + expectedErr: nil, + }, + { + name: "invalid locktime", + locktime: 0, + transferableInF: func(ctrl *gomock.Controller) avax.TransferableIn { + return nil + }, + expectedErr: errInvalidLocktime, + }, + { + name: "nested", + locktime: 1, + transferableInF: func(ctrl *gomock.Controller) avax.TransferableIn { + return &LockIn{} + }, + expectedErr: errNestedStakeableLocks, + }, + { + name: "inner input fails verification", + locktime: 1, + transferableInF: func(ctrl *gomock.Controller) avax.TransferableIn { + o := avax.NewMockTransferableIn(ctrl) + o.EXPECT().Verify().Return(errTest) + return o + }, + expectedErr: errTest, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + lockOut := &LockIn{ + Locktime: tt.locktime, + TransferableIn: tt.transferableInF(ctrl), + } + require.Equal(t, tt.expectedErr, lockOut.Verify()) + }) + } +} diff --git a/avalanchego/vms/platformvm/state/diff.go b/avalanchego/vms/platformvm/state/diff.go index 32d4a0d9..6efdd9e2 100644 --- a/avalanchego/vms/platformvm/state/diff.go +++ b/avalanchego/vms/platformvm/state/diff.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -16,7 +16,7 @@ import ( ) var ( - _ Diff = &diff{} + _ Diff = (*diff)(nil) ErrMissingParentState = errors.New("missing parent state") ) @@ -24,7 +24,7 @@ var ( type Diff interface { Chain - Apply(State) + Apply(State) error } type diff struct { @@ -39,7 +39,9 @@ type diff struct { currentSupply map[ids.ID]uint64 currentStakerDiffs diffStakers - pendingStakerDiffs diffStakers + // map of subnetID -> nodeID -> total accrued delegatee rewards + modifiedDelegateeRewards map[ids.ID]map[ids.NodeID]uint64 + pendingStakerDiffs diffStakers addedSubnets []*txs.Tx // Subnet ID --> Tx that transforms the subnet @@ -49,19 +51,12 @@ type diff struct { addedChains map[ids.ID][]*txs.Tx cachedChains map[ids.ID][]*txs.Tx - // map of txID -> []*UTXO addedRewardUTXOs map[ids.ID][]*avax.UTXO - // map of txID -> {*txs.Tx, Status} addedTxs map[ids.ID]*txAndStatus // map of modified UTXOID -> *UTXO if the UTXO is nil, it has been removed - modifiedUTXOs map[ids.ID]*utxoModification -} - -type utxoModification struct { - utxoID ids.ID - utxo *avax.UTXO + modifiedUTXOs map[ids.ID]*avax.UTXO } func NewDiff( @@ -119,20 +114,45 @@ func (d *diff) SetCurrentSupply(subnetID ids.ID, currentSupply uint64) { func (d *diff) GetCurrentValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { // If the validator was modified in this diff, return the modified // validator. - newValidator, ok := d.currentStakerDiffs.GetValidator(subnetID, nodeID) - if ok { - if newValidator == nil { - return nil, database.ErrNotFound - } + newValidator, status := d.currentStakerDiffs.GetValidator(subnetID, nodeID) + switch status { + case added: return newValidator, nil + case deleted: + return nil, database.ErrNotFound + default: + // If the validator wasn't modified in this diff, ask the parent state. + parentState, ok := d.stateVersions.GetState(d.parentID) + if !ok { + return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) + } + return parentState.GetCurrentValidator(subnetID, nodeID) } +} - // If the validator wasn't modified in this diff, ask the parent state. +func (d *diff) SetDelegateeReward(subnetID ids.ID, nodeID ids.NodeID, amount uint64) error { + if d.modifiedDelegateeRewards == nil { + d.modifiedDelegateeRewards = make(map[ids.ID]map[ids.NodeID]uint64) + } + nodes, ok := d.modifiedDelegateeRewards[subnetID] + if !ok { + nodes = make(map[ids.NodeID]uint64) + d.modifiedDelegateeRewards[subnetID] = nodes + } + nodes[nodeID] = amount + return nil +} + +func (d *diff) GetDelegateeReward(subnetID ids.ID, nodeID ids.NodeID) (uint64, error) { + amount, modified := d.modifiedDelegateeRewards[subnetID][nodeID] + if modified { + return amount, nil + } parentState, ok := d.stateVersions.GetState(d.parentID) if !ok { - return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) + return 0, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) } - return parentState.GetCurrentValidator(subnetID, nodeID) + return parentState.GetDelegateeReward(subnetID, nodeID) } func (d *diff) PutCurrentValidator(staker *Staker) { @@ -182,20 +202,20 @@ func (d *diff) GetCurrentStakerIterator() (StakerIterator, error) { func (d *diff) GetPendingValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, error) { // If the validator was modified in this diff, return the modified // validator. - newValidator, ok := d.pendingStakerDiffs.GetValidator(subnetID, nodeID) - if ok { - if newValidator == nil { - return nil, database.ErrNotFound - } + newValidator, status := d.pendingStakerDiffs.GetValidator(subnetID, nodeID) + switch status { + case added: return newValidator, nil + case deleted: + return nil, database.ErrNotFound + default: + // If the validator wasn't modified in this diff, ask the parent state. + parentState, ok := d.stateVersions.GetState(d.parentID) + if !ok { + return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) + } + return parentState.GetPendingValidator(subnetID, nodeID) } - - // If the validator wasn't modified in this diff, ask the parent state. - parentState, ok := d.stateVersions.GetState(d.parentID) - if !ok { - return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) - } - return parentState.GetPendingValidator(subnetID, nodeID) } func (d *diff) PutPendingValidator(staker *Staker) { @@ -418,52 +438,44 @@ func (d *diff) GetUTXO(utxoID ids.ID) (*avax.UTXO, error) { } return parentState.GetUTXO(utxoID) } - if utxo.utxo == nil { + if utxo == nil { return nil, database.ErrNotFound } - return utxo.utxo, nil + return utxo, nil } func (d *diff) AddUTXO(utxo *avax.UTXO) { - newUTXO := &utxoModification{ - utxoID: utxo.InputID(), - utxo: utxo, - } if d.modifiedUTXOs == nil { - d.modifiedUTXOs = map[ids.ID]*utxoModification{ - utxo.InputID(): newUTXO, + d.modifiedUTXOs = map[ids.ID]*avax.UTXO{ + utxo.InputID(): utxo, } } else { - d.modifiedUTXOs[utxo.InputID()] = newUTXO + d.modifiedUTXOs[utxo.InputID()] = utxo } } func (d *diff) DeleteUTXO(utxoID ids.ID) { - newUTXO := &utxoModification{ - utxoID: utxoID, - } if d.modifiedUTXOs == nil { - d.modifiedUTXOs = map[ids.ID]*utxoModification{ - utxoID: newUTXO, + d.modifiedUTXOs = map[ids.ID]*avax.UTXO{ + utxoID: nil, } } else { - d.modifiedUTXOs[utxoID] = newUTXO + d.modifiedUTXOs[utxoID] = nil } } -func (d *diff) Apply(baseState State) { +func (d *diff) Apply(baseState State) error { baseState.SetTimestamp(d.timestamp) for subnetID, supply := range d.currentSupply { baseState.SetCurrentSupply(subnetID, supply) } for _, subnetValidatorDiffs := range d.currentStakerDiffs.validatorDiffs { for _, validatorDiff := range subnetValidatorDiffs { - if validatorDiff.validatorModified { - if validatorDiff.validatorDeleted { - baseState.DeleteCurrentValidator(validatorDiff.validator) - } else { - baseState.PutCurrentValidator(validatorDiff.validator) - } + switch validatorDiff.validatorStatus { + case added: + baseState.PutCurrentValidator(validatorDiff.validator) + case deleted: + baseState.DeleteCurrentValidator(validatorDiff.validator) } addedDelegatorIterator := NewTreeIterator(validatorDiff.addedDelegators) @@ -477,14 +489,20 @@ func (d *diff) Apply(baseState State) { } } } + for subnetID, nodes := range d.modifiedDelegateeRewards { + for nodeID, amount := range nodes { + if err := baseState.SetDelegateeReward(subnetID, nodeID, amount); err != nil { + return err + } + } + } for _, subnetValidatorDiffs := range d.pendingStakerDiffs.validatorDiffs { for _, validatorDiff := range subnetValidatorDiffs { - if validatorDiff.validatorModified { - if validatorDiff.validatorDeleted { - baseState.DeletePendingValidator(validatorDiff.validator) - } else { - baseState.PutPendingValidator(validatorDiff.validator) - } + switch validatorDiff.validatorStatus { + case added: + baseState.PutPendingValidator(validatorDiff.validator) + case deleted: + baseState.DeletePendingValidator(validatorDiff.validator) } addedDelegatorIterator := NewTreeIterator(validatorDiff.addedDelegators) @@ -517,11 +535,12 @@ func (d *diff) Apply(baseState State) { baseState.AddRewardUTXO(txID, utxo) } } - for _, utxo := range d.modifiedUTXOs { - if utxo.utxo != nil { - baseState.AddUTXO(utxo.utxo) + for utxoID, utxo := range d.modifiedUTXOs { + if utxo != nil { + baseState.AddUTXO(utxo) } else { - baseState.DeleteUTXO(utxo.utxoID) + baseState.DeleteUTXO(utxoID) } } + return nil } diff --git a/avalanchego/vms/platformvm/state/diff_test.go b/avalanchego/vms/platformvm/state/diff_test.go index 9d0aa0d8..40bfee20 100644 --- a/avalanchego/vms/platformvm/state/diff_test.go +++ b/avalanchego/vms/platformvm/state/diff_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -21,7 +21,6 @@ import ( ) func TestDiffMissingState(t *testing.T) { - require := require.New(t) ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -31,7 +30,7 @@ func TestDiffMissingState(t *testing.T) { versions.EXPECT().GetState(parentID).Times(1).Return(nil, false) _, err := NewDiff(parentID, versions) - require.ErrorIs(err, ErrMissingParentState) + require.ErrorIs(t, err, ErrMissingParentState) } func TestDiffCreation(t *testing.T) { @@ -110,6 +109,7 @@ func TestDiffCurrentValidator(t *testing.T) { d.DeleteCurrentValidator(currentValidator) // Make sure the deletion worked + state.EXPECT().GetCurrentValidator(currentValidator.SubnetID, currentValidator.NodeID).Return(nil, database.ErrNotFound).Times(1) _, err = d.GetCurrentValidator(currentValidator.SubnetID, currentValidator.NodeID) require.ErrorIs(err, database.ErrNotFound) } @@ -147,6 +147,7 @@ func TestDiffPendingValidator(t *testing.T) { d.DeletePendingValidator(pendingValidator) // Make sure the deletion worked + state.EXPECT().GetPendingValidator(pendingValidator.SubnetID, pendingValidator.NodeID).Return(nil, database.ErrNotFound).Times(1) _, err = d.GetPendingValidator(pendingValidator.SubnetID, pendingValidator.NodeID) require.ErrorIs(err, database.ErrNotFound) } @@ -318,8 +319,8 @@ func TestDiffChain(t *testing.T) { gotChains, err := d.GetChains(subnetID) require.NoError(err) require.Len(gotChains, 2) - require.Equal(gotChains[0], parentStateCreateChainTx) - require.Equal(gotChains[1], createChainTx) + require.Equal(parentStateCreateChainTx, gotChains[0]) + require.Equal(createChainTx, gotChains[1]) } func TestDiffTx(t *testing.T) { @@ -345,7 +346,7 @@ func TestDiffTx(t *testing.T) { SubnetID: subnetID, }, } - tx.Initialize(utils.RandomBytes(16), utils.RandomBytes(16)) + tx.SetBytes(utils.RandomBytes(16), utils.RandomBytes(16)) d.AddTx(tx, status.Committed) { @@ -364,7 +365,7 @@ func TestDiffTx(t *testing.T) { SubnetID: subnetID, }, } - parentTx.Initialize(utils.RandomBytes(16), utils.RandomBytes(16)) + parentTx.SetBytes(utils.RandomBytes(16), utils.RandomBytes(16)) state.EXPECT().GetTx(parentTx.ID()).Return(parentTx, status.Committed, nil).Times(1) gotParentTx, gotStatus, err := d.GetTx(parentTx.ID()) require.NoError(err) diff --git a/avalanchego/vms/platformvm/state/empty_iterator.go b/avalanchego/vms/platformvm/state/empty_iterator.go index 6998f29c..69766c19 100644 --- a/avalanchego/vms/platformvm/state/empty_iterator.go +++ b/avalanchego/vms/platformvm/state/empty_iterator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -8,6 +8,12 @@ var EmptyIterator StakerIterator = emptyIterator{} type emptyIterator struct{} -func (emptyIterator) Next() bool { return false } -func (emptyIterator) Value() *Staker { return nil } -func (emptyIterator) Release() {} +func (emptyIterator) Next() bool { + return false +} + +func (emptyIterator) Value() *Staker { + return nil +} + +func (emptyIterator) Release() {} diff --git a/avalanchego/vms/platformvm/state/empty_iterator_test.go b/avalanchego/vms/platformvm/state/empty_iterator_test.go index 1e43252e..b5bb43d1 100644 --- a/avalanchego/vms/platformvm/state/empty_iterator_test.go +++ b/avalanchego/vms/platformvm/state/empty_iterator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/avalanchego/vms/platformvm/state/masked_iterator.go b/avalanchego/vms/platformvm/state/masked_iterator.go index c8bd4c19..5621205c 100644 --- a/avalanchego/vms/platformvm/state/masked_iterator.go +++ b/avalanchego/vms/platformvm/state/masked_iterator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -7,7 +7,7 @@ import ( "github.com/ava-labs/avalanchego/ids" ) -var _ StakerIterator = &maskedIterator{} +var _ StakerIterator = (*maskedIterator)(nil) type maskedIterator struct { parentIterator StakerIterator diff --git a/avalanchego/vms/platformvm/state/masked_iterator_test.go b/avalanchego/vms/platformvm/state/masked_iterator_test.go index d986e036..8ba719d3 100644 --- a/avalanchego/vms/platformvm/state/masked_iterator_test.go +++ b/avalanchego/vms/platformvm/state/masked_iterator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/avalanchego/vms/platformvm/state/merged_iterator.go b/avalanchego/vms/platformvm/state/merged_iterator.go index 4f128fa7..0b91b867 100644 --- a/avalanchego/vms/platformvm/state/merged_iterator.go +++ b/avalanchego/vms/platformvm/state/merged_iterator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -8,8 +8,8 @@ import ( ) var ( - _ StakerIterator = &mergedIterator{} - _ heap.Interface = &mergedIterator{} + _ StakerIterator = (*mergedIterator)(nil) + _ heap.Interface = (*mergedIterator)(nil) ) type mergedIterator struct { diff --git a/avalanchego/vms/platformvm/state/merged_iterator_test.go b/avalanchego/vms/platformvm/state/merged_iterator_test.go index 7adaa7bd..c85b3594 100644 --- a/avalanchego/vms/platformvm/state/merged_iterator_test.go +++ b/avalanchego/vms/platformvm/state/merged_iterator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/avalanchego/vms/platformvm/state/mock_chain.go b/avalanchego/vms/platformvm/state/mock_chain.go index 80fd409c..76cffe5b 100644 --- a/avalanchego/vms/platformvm/state/mock_chain.go +++ b/avalanchego/vms/platformvm/state/mock_chain.go @@ -1,3 +1,6 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/platformvm/state (interfaces: Chain) @@ -245,6 +248,21 @@ func (mr *MockChainMockRecorder) GetCurrentValidator(arg0, arg1 interface{}) *go return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidator", reflect.TypeOf((*MockChain)(nil).GetCurrentValidator), arg0, arg1) } +// GetDelegateeReward mocks base method. +func (m *MockChain) GetDelegateeReward(arg0 ids.ID, arg1 ids.NodeID) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDelegateeReward", arg0, arg1) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDelegateeReward indicates an expected call of GetDelegateeReward. +func (mr *MockChainMockRecorder) GetDelegateeReward(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDelegateeReward", reflect.TypeOf((*MockChain)(nil).GetDelegateeReward), arg0, arg1) +} + // GetPendingDelegatorIterator mocks base method. func (m *MockChain) GetPendingDelegatorIterator(arg0 ids.ID, arg1 ids.NodeID) (StakerIterator, error) { m.ctrl.T.Helper() @@ -440,6 +458,20 @@ func (mr *MockChainMockRecorder) SetCurrentSupply(arg0, arg1 interface{}) *gomoc return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCurrentSupply", reflect.TypeOf((*MockChain)(nil).SetCurrentSupply), arg0, arg1) } +// SetDelegateeReward mocks base method. +func (m *MockChain) SetDelegateeReward(arg0 ids.ID, arg1 ids.NodeID, arg2 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetDelegateeReward", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetDelegateeReward indicates an expected call of SetDelegateeReward. +func (mr *MockChainMockRecorder) SetDelegateeReward(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDelegateeReward", reflect.TypeOf((*MockChain)(nil).SetDelegateeReward), arg0, arg1, arg2) +} + // SetTimestamp mocks base method. func (m *MockChain) SetTimestamp(arg0 time.Time) { m.ctrl.T.Helper() diff --git a/avalanchego/vms/platformvm/state/mock_diff.go b/avalanchego/vms/platformvm/state/mock_diff.go index 9e822441..c4929833 100644 --- a/avalanchego/vms/platformvm/state/mock_diff.go +++ b/avalanchego/vms/platformvm/state/mock_diff.go @@ -1,3 +1,6 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/platformvm/state (interfaces: Diff) @@ -115,9 +118,11 @@ func (mr *MockDiffMockRecorder) AddUTXO(arg0 interface{}) *gomock.Call { } // Apply mocks base method. -func (m *MockDiff) Apply(arg0 State) { +func (m *MockDiff) Apply(arg0 State) error { m.ctrl.T.Helper() - m.ctrl.Call(m, "Apply", arg0) + ret := m.ctrl.Call(m, "Apply", arg0) + ret0, _ := ret[0].(error) + return ret0 } // Apply indicates an expected call of Apply. @@ -261,6 +266,21 @@ func (mr *MockDiffMockRecorder) GetCurrentValidator(arg0, arg1 interface{}) *gom return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidator", reflect.TypeOf((*MockDiff)(nil).GetCurrentValidator), arg0, arg1) } +// GetDelegateeReward mocks base method. +func (m *MockDiff) GetDelegateeReward(arg0 ids.ID, arg1 ids.NodeID) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDelegateeReward", arg0, arg1) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDelegateeReward indicates an expected call of GetDelegateeReward. +func (mr *MockDiffMockRecorder) GetDelegateeReward(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDelegateeReward", reflect.TypeOf((*MockDiff)(nil).GetDelegateeReward), arg0, arg1) +} + // GetPendingDelegatorIterator mocks base method. func (m *MockDiff) GetPendingDelegatorIterator(arg0 ids.ID, arg1 ids.NodeID) (StakerIterator, error) { m.ctrl.T.Helper() @@ -456,6 +476,20 @@ func (mr *MockDiffMockRecorder) SetCurrentSupply(arg0, arg1 interface{}) *gomock return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCurrentSupply", reflect.TypeOf((*MockDiff)(nil).SetCurrentSupply), arg0, arg1) } +// SetDelegateeReward mocks base method. +func (m *MockDiff) SetDelegateeReward(arg0 ids.ID, arg1 ids.NodeID, arg2 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetDelegateeReward", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetDelegateeReward indicates an expected call of SetDelegateeReward. +func (mr *MockDiffMockRecorder) SetDelegateeReward(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDelegateeReward", reflect.TypeOf((*MockDiff)(nil).SetDelegateeReward), arg0, arg1, arg2) +} + // SetTimestamp mocks base method. func (m *MockDiff) SetTimestamp(arg0 time.Time) { m.ctrl.T.Helper() diff --git a/avalanchego/vms/platformvm/state/mock_staker_iterator.go b/avalanchego/vms/platformvm/state/mock_staker_iterator.go index e6da6f11..1c4812b5 100644 --- a/avalanchego/vms/platformvm/state/mock_staker_iterator.go +++ b/avalanchego/vms/platformvm/state/mock_staker_iterator.go @@ -1,3 +1,6 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/platformvm/state (interfaces: StakerIterator) diff --git a/avalanchego/vms/platformvm/state/mock_state.go b/avalanchego/vms/platformvm/state/mock_state.go index 23f833b1..9a610397 100644 --- a/avalanchego/vms/platformvm/state/mock_state.go +++ b/avalanchego/vms/platformvm/state/mock_state.go @@ -1,3 +1,6 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/platformvm/state (interfaces: State) @@ -12,6 +15,7 @@ import ( ids "github.com/ava-labs/avalanchego/ids" choices "github.com/ava-labs/avalanchego/snow/choices" validators "github.com/ava-labs/avalanchego/snow/validators" + bls "github.com/ava-labs/avalanchego/utils/crypto/bls" avax "github.com/ava-labs/avalanchego/vms/components/avax" blocks "github.com/ava-labs/avalanchego/vms/platformvm/blocks" status "github.com/ava-labs/avalanchego/vms/platformvm/status" @@ -320,6 +324,21 @@ func (mr *MockStateMockRecorder) GetCurrentValidator(arg0, arg1 interface{}) *go return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCurrentValidator", reflect.TypeOf((*MockState)(nil).GetCurrentValidator), arg0, arg1) } +// GetDelegateeReward mocks base method. +func (m *MockState) GetDelegateeReward(arg0 ids.ID, arg1 ids.NodeID) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetDelegateeReward", arg0, arg1) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetDelegateeReward indicates an expected call of GetDelegateeReward. +func (mr *MockStateMockRecorder) GetDelegateeReward(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetDelegateeReward", reflect.TypeOf((*MockState)(nil).GetDelegateeReward), arg0, arg1) +} + // GetLastAccepted mocks base method. func (m *MockState) GetLastAccepted() ids.ID { m.ctrl.T.Helper() @@ -409,18 +428,18 @@ func (mr *MockStateMockRecorder) GetRewardUTXOs(arg0 interface{}) *gomock.Call { } // GetStartTime mocks base method. -func (m *MockState) GetStartTime(arg0 ids.NodeID) (time.Time, error) { +func (m *MockState) GetStartTime(arg0 ids.NodeID, arg1 ids.ID) (time.Time, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetStartTime", arg0) + ret := m.ctrl.Call(m, "GetStartTime", arg0, arg1) ret0, _ := ret[0].(time.Time) ret1, _ := ret[1].(error) return ret0, ret1 } // GetStartTime indicates an expected call of GetStartTime. -func (mr *MockStateMockRecorder) GetStartTime(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetStartTime(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStartTime", reflect.TypeOf((*MockState)(nil).GetStartTime), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetStartTime", reflect.TypeOf((*MockState)(nil).GetStartTime), arg0, arg1) } // GetStatelessBlock mocks base method. @@ -515,9 +534,9 @@ func (mr *MockStateMockRecorder) GetUTXO(arg0 interface{}) *gomock.Call { } // GetUptime mocks base method. -func (m *MockState) GetUptime(arg0 ids.NodeID) (time.Duration, time.Time, error) { +func (m *MockState) GetUptime(arg0 ids.NodeID, arg1 ids.ID) (time.Duration, time.Time, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetUptime", arg0) + ret := m.ctrl.Call(m, "GetUptime", arg0, arg1) ret0, _ := ret[0].(time.Duration) ret1, _ := ret[1].(time.Time) ret2, _ := ret[2].(error) @@ -525,9 +544,24 @@ func (m *MockState) GetUptime(arg0 ids.NodeID) (time.Duration, time.Time, error) } // GetUptime indicates an expected call of GetUptime. -func (mr *MockStateMockRecorder) GetUptime(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) GetUptime(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUptime", reflect.TypeOf((*MockState)(nil).GetUptime), arg0, arg1) +} + +// GetValidatorPublicKeyDiffs mocks base method. +func (m *MockState) GetValidatorPublicKeyDiffs(arg0 uint64) (map[ids.NodeID]*bls.PublicKey, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetValidatorPublicKeyDiffs", arg0) + ret0, _ := ret[0].(map[ids.NodeID]*bls.PublicKey) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetValidatorPublicKeyDiffs indicates an expected call of GetValidatorPublicKeyDiffs. +func (mr *MockStateMockRecorder) GetValidatorPublicKeyDiffs(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUptime", reflect.TypeOf((*MockState)(nil).GetUptime), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetValidatorPublicKeyDiffs", reflect.TypeOf((*MockState)(nil).GetValidatorPublicKeyDiffs), arg0) } // GetValidatorWeightDiffs mocks base method. @@ -605,6 +639,20 @@ func (mr *MockStateMockRecorder) SetCurrentSupply(arg0, arg1 interface{}) *gomoc return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetCurrentSupply", reflect.TypeOf((*MockState)(nil).SetCurrentSupply), arg0, arg1) } +// SetDelegateeReward mocks base method. +func (m *MockState) SetDelegateeReward(arg0 ids.ID, arg1 ids.NodeID, arg2 uint64) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetDelegateeReward", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetDelegateeReward indicates an expected call of SetDelegateeReward. +func (mr *MockStateMockRecorder) SetDelegateeReward(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetDelegateeReward", reflect.TypeOf((*MockState)(nil).SetDelegateeReward), arg0, arg1, arg2) +} + // SetHeight mocks base method. func (m *MockState) SetHeight(arg0 uint64) { m.ctrl.T.Helper() @@ -642,17 +690,17 @@ func (mr *MockStateMockRecorder) SetTimestamp(arg0 interface{}) *gomock.Call { } // SetUptime mocks base method. -func (m *MockState) SetUptime(arg0 ids.NodeID, arg1 time.Duration, arg2 time.Time) error { +func (m *MockState) SetUptime(arg0 ids.NodeID, arg1 ids.ID, arg2 time.Duration, arg3 time.Time) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetUptime", arg0, arg1, arg2) + ret := m.ctrl.Call(m, "SetUptime", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // SetUptime indicates an expected call of SetUptime. -func (mr *MockStateMockRecorder) SetUptime(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) SetUptime(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetUptime", reflect.TypeOf((*MockState)(nil).SetUptime), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetUptime", reflect.TypeOf((*MockState)(nil).SetUptime), arg0, arg1, arg2, arg3) } // UTXOIDs mocks base method. @@ -671,16 +719,15 @@ func (mr *MockStateMockRecorder) UTXOIDs(arg0, arg1, arg2 interface{}) *gomock.C } // ValidatorSet mocks base method. -func (m *MockState) ValidatorSet(arg0 ids.ID) (validators.Set, error) { +func (m *MockState) ValidatorSet(arg0 ids.ID, arg1 validators.Set) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ValidatorSet", arg0) - ret0, _ := ret[0].(validators.Set) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "ValidatorSet", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 } // ValidatorSet indicates an expected call of ValidatorSet. -func (mr *MockStateMockRecorder) ValidatorSet(arg0 interface{}) *gomock.Call { +func (mr *MockStateMockRecorder) ValidatorSet(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidatorSet", reflect.TypeOf((*MockState)(nil).ValidatorSet), arg0) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidatorSet", reflect.TypeOf((*MockState)(nil).ValidatorSet), arg0, arg1) } diff --git a/avalanchego/vms/platformvm/state/mock_versions.go b/avalanchego/vms/platformvm/state/mock_versions.go index e9e35c57..e94aa499 100644 --- a/avalanchego/vms/platformvm/state/mock_versions.go +++ b/avalanchego/vms/platformvm/state/mock_versions.go @@ -1,3 +1,6 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/platformvm/state (interfaces: Versions) diff --git a/avalanchego/vms/platformvm/state/slice_iterator_test.go b/avalanchego/vms/platformvm/state/slice_iterator_test.go index 8dbf4ed3..96a686cd 100644 --- a/avalanchego/vms/platformvm/state/slice_iterator_test.go +++ b/avalanchego/vms/platformvm/state/slice_iterator_test.go @@ -1,9 +1,9 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state -var _ StakerIterator = &sliceIterator{} +var _ StakerIterator = (*sliceIterator)(nil) type sliceIterator struct { index int @@ -28,4 +28,4 @@ func (i *sliceIterator) Value() *Staker { return i.stakers[i.index] } -func (i *sliceIterator) Release() {} +func (*sliceIterator) Release() {} diff --git a/avalanchego/vms/platformvm/state/staker.go b/avalanchego/vms/platformvm/state/staker.go index afc19bb0..37bc512e 100644 --- a/avalanchego/vms/platformvm/state/staker.go +++ b/avalanchego/vms/platformvm/state/staker.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -10,10 +10,11 @@ import ( "github.com/google/btree" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) -var _ btree.Item = &Staker{} +var _ btree.LessFunc[*Staker] = (*Staker).Less // StakerIterator defines an interface for iterating over a set of stakers. type StakerIterator interface { @@ -32,9 +33,11 @@ type StakerIterator interface { // Staker contains all information required to represent a validator or // delegator in the current and pending validator sets. +// Invariant: Staker's size is bounded to prevent OOM DoS attacks. type Staker struct { TxID ids.ID NodeID ids.NodeID + PublicKey *bls.PublicKey SubnetID ids.ID Weight uint64 StartTime time.Time @@ -56,16 +59,13 @@ type Staker struct { } // A *Staker is considered to be less than another *Staker when: +// // 1. If its NextTime is before the other's. // 2. If the NextTimes are the same, the *Staker with the lesser priority is the // lesser one. // 3. If the priorities are also the same, the one with the lesser txID is // lesser. -// -// Invariant: [thanIntf] is a *Staker. -func (s *Staker) Less(thanIntf btree.Item) bool { - than := thanIntf.(*Staker) - +func (s *Staker) Less(than *Staker) bool { if s.NextTime.Before(than.NextTime) { return true } @@ -83,11 +83,16 @@ func (s *Staker) Less(thanIntf btree.Item) bool { return bytes.Compare(s.TxID[:], than.TxID[:]) == -1 } -func NewCurrentStaker(txID ids.ID, staker txs.Staker, potentialReward uint64) *Staker { +func NewCurrentStaker(txID ids.ID, staker txs.Staker, potentialReward uint64) (*Staker, error) { + publicKey, _, err := staker.PublicKey() + if err != nil { + return nil, err + } endTime := staker.EndTime() return &Staker{ TxID: txID, NodeID: staker.NodeID(), + PublicKey: publicKey, SubnetID: staker.SubnetID(), Weight: staker.Weight(), StartTime: staker.StartTime(), @@ -95,19 +100,24 @@ func NewCurrentStaker(txID ids.ID, staker txs.Staker, potentialReward uint64) *S PotentialReward: potentialReward, NextTime: endTime, Priority: staker.CurrentPriority(), - } + }, nil } -func NewPendingStaker(txID ids.ID, staker txs.Staker) *Staker { +func NewPendingStaker(txID ids.ID, staker txs.Staker) (*Staker, error) { + publicKey, _, err := staker.PublicKey() + if err != nil { + return nil, err + } startTime := staker.StartTime() return &Staker{ TxID: txID, NodeID: staker.NodeID(), + PublicKey: publicKey, SubnetID: staker.SubnetID(), Weight: staker.Weight(), StartTime: startTime, EndTime: staker.EndTime(), NextTime: startTime, Priority: staker.PendingPriority(), - } + }, nil } diff --git a/avalanchego/vms/platformvm/state/staker_diff_iterator.go b/avalanchego/vms/platformvm/state/staker_diff_iterator.go index d3691ef9..e92f6307 100644 --- a/avalanchego/vms/platformvm/state/staker_diff_iterator.go +++ b/avalanchego/vms/platformvm/state/staker_diff_iterator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -10,9 +10,9 @@ import ( ) var ( - _ StakerDiffIterator = &stakerDiffIterator{} - _ StakerIterator = &mutableStakerIterator{} - _ heap.Interface = &mutableStakerIterator{} + _ StakerDiffIterator = (*stakerDiffIterator)(nil) + _ StakerIterator = (*mutableStakerIterator)(nil) + _ heap.Interface = (*mutableStakerIterator)(nil) ) // StakerDiffIterator is an iterator that iterates over the events that will be diff --git a/avalanchego/vms/platformvm/state/staker_diff_iterator_test.go b/avalanchego/vms/platformvm/state/staker_diff_iterator_test.go index 4f23acfc..c008b06f 100644 --- a/avalanchego/vms/platformvm/state/staker_diff_iterator_test.go +++ b/avalanchego/vms/platformvm/state/staker_diff_iterator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/avalanchego/vms/platformvm/state/staker_status.go b/avalanchego/vms/platformvm/state/staker_status.go new file mode 100644 index 00000000..b74064c4 --- /dev/null +++ b/avalanchego/vms/platformvm/state/staker_status.go @@ -0,0 +1,12 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +const ( + unmodified diffValidatorStatus = iota + added + deleted +) + +type diffValidatorStatus uint8 diff --git a/avalanchego/vms/platformvm/state/staker_test.go b/avalanchego/vms/platformvm/state/staker_test.go index b4678525..bb196e84 100644 --- a/avalanchego/vms/platformvm/state/staker_test.go +++ b/avalanchego/vms/platformvm/state/staker_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state import ( + "errors" "testing" "time" @@ -12,9 +13,12 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) +var errCustom = errors.New("custom") + func TestStakerLess(t *testing.T) { tests := []struct { name string @@ -135,6 +139,9 @@ func TestNewCurrentStaker(t *testing.T) { txID := ids.GenerateTestID() nodeID := ids.GenerateTestNodeID() + sk, err := bls.NewSecretKey() + require.NoError(err) + publicKey := bls.PublicFromSecretKey(sk) subnetID := ids.GenerateTestID() weight := uint64(12345) startTime := time.Now() @@ -144,16 +151,19 @@ func TestNewCurrentStaker(t *testing.T) { stakerTx := txs.NewMockStaker(ctrl) stakerTx.EXPECT().NodeID().Return(nodeID) + stakerTx.EXPECT().PublicKey().Return(publicKey, true, nil) stakerTx.EXPECT().SubnetID().Return(subnetID) stakerTx.EXPECT().Weight().Return(weight) stakerTx.EXPECT().StartTime().Return(startTime) stakerTx.EXPECT().EndTime().Return(endTime) stakerTx.EXPECT().CurrentPriority().Return(currentPriority) - staker := NewCurrentStaker(txID, stakerTx, potentialReward) + staker, err := NewCurrentStaker(txID, stakerTx, potentialReward) require.NotNil(staker) + require.NoError(err) require.Equal(txID, staker.TxID) require.Equal(nodeID, staker.NodeID) + require.Equal(publicKey, staker.PublicKey) require.Equal(subnetID, staker.SubnetID) require.Equal(weight, staker.Weight) require.Equal(startTime, staker.StartTime) @@ -161,6 +171,11 @@ func TestNewCurrentStaker(t *testing.T) { require.Equal(potentialReward, staker.PotentialReward) require.Equal(endTime, staker.NextTime) require.Equal(currentPriority, staker.Priority) + + stakerTx.EXPECT().PublicKey().Return(nil, false, errCustom) + + _, err = NewCurrentStaker(txID, stakerTx, potentialReward) + require.ErrorIs(err, errCustom) } func TestNewPendingStaker(t *testing.T) { @@ -170,6 +185,9 @@ func TestNewPendingStaker(t *testing.T) { txID := ids.GenerateTestID() nodeID := ids.GenerateTestNodeID() + sk, err := bls.NewSecretKey() + require.NoError(err) + publicKey := bls.PublicFromSecretKey(sk) subnetID := ids.GenerateTestID() weight := uint64(12345) startTime := time.Now() @@ -178,16 +196,19 @@ func TestNewPendingStaker(t *testing.T) { stakerTx := txs.NewMockStaker(ctrl) stakerTx.EXPECT().NodeID().Return(nodeID) + stakerTx.EXPECT().PublicKey().Return(publicKey, true, nil) stakerTx.EXPECT().SubnetID().Return(subnetID) stakerTx.EXPECT().Weight().Return(weight) stakerTx.EXPECT().StartTime().Return(startTime) stakerTx.EXPECT().EndTime().Return(endTime) stakerTx.EXPECT().PendingPriority().Return(pendingPriority) - staker := NewPendingStaker(txID, stakerTx) + staker, err := NewPendingStaker(txID, stakerTx) require.NotNil(staker) + require.NoError(err) require.Equal(txID, staker.TxID) require.Equal(nodeID, staker.NodeID) + require.Equal(publicKey, staker.PublicKey) require.Equal(subnetID, staker.SubnetID) require.Equal(weight, staker.Weight) require.Equal(startTime, staker.StartTime) @@ -195,4 +216,9 @@ func TestNewPendingStaker(t *testing.T) { require.Zero(staker.PotentialReward) require.Equal(startTime, staker.NextTime) require.Equal(pendingPriority, staker.Priority) + + stakerTx.EXPECT().PublicKey().Return(nil, false, errCustom) + + _, err = NewPendingStaker(txID, stakerTx) + require.ErrorIs(err, errCustom) } diff --git a/avalanchego/vms/platformvm/state/stakers.go b/avalanchego/vms/platformvm/state/stakers.go index b3deae01..5276ff4f 100644 --- a/avalanchego/vms/platformvm/state/stakers.go +++ b/avalanchego/vms/platformvm/state/stakers.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -33,6 +33,14 @@ type CurrentStakers interface { // Invariant: [staker] is currently a CurrentValidator DeleteCurrentValidator(staker *Staker) + // SetDelegateeReward sets the accrued delegation rewards for [nodeID] on + // [subnetID] to [amount]. + SetDelegateeReward(subnetID ids.ID, nodeID ids.NodeID, amount uint64) error + + // GetDelegateeReward returns the accrued delegation rewards for [nodeID] on + // [subnetID]. + GetDelegateeReward(subnetID ids.ID, nodeID ids.NodeID) (uint64, error) + // GetCurrentDelegatorIterator returns the delegators associated with the // validator on [subnetID] with [nodeID]. Delegators are sorted by their // removal from current staker set. @@ -90,20 +98,20 @@ type PendingStakers interface { type baseStakers struct { // subnetID --> nodeID --> current state for the validator of the subnet validators map[ids.ID]map[ids.NodeID]*baseStaker - stakers *btree.BTree + stakers *btree.BTreeG[*Staker] // subnetID --> nodeID --> diff for that validator since the last db write validatorDiffs map[ids.ID]map[ids.NodeID]*diffValidator } type baseStaker struct { validator *Staker - delegators *btree.BTree + delegators *btree.BTreeG[*Staker] } func newBaseStakers() *baseStakers { return &baseStakers{ validators: make(map[ids.ID]map[ids.NodeID]*baseStaker), - stakers: btree.New(defaultTreeDegree), + stakers: btree.NewG(defaultTreeDegree, (*Staker).Less), validatorDiffs: make(map[ids.ID]map[ids.NodeID]*diffValidator), } } @@ -128,8 +136,7 @@ func (v *baseStakers) PutValidator(staker *Staker) { validator.validator = staker validatorDiff := v.getOrCreateValidatorDiff(staker.SubnetID, staker.NodeID) - validatorDiff.validatorModified = true - validatorDiff.validatorDeleted = false + validatorDiff.validatorStatus = added validatorDiff.validator = staker v.stakers.ReplaceOrInsert(staker) @@ -141,8 +148,7 @@ func (v *baseStakers) DeleteValidator(staker *Staker) { v.pruneValidator(staker.SubnetID, staker.NodeID) validatorDiff := v.getOrCreateValidatorDiff(staker.SubnetID, staker.NodeID) - validatorDiff.validatorModified = true - validatorDiff.validatorDeleted = true + validatorDiff.validatorStatus = deleted validatorDiff.validator = staker v.stakers.Delete(staker) @@ -163,13 +169,13 @@ func (v *baseStakers) GetDelegatorIterator(subnetID ids.ID, nodeID ids.NodeID) S func (v *baseStakers) PutDelegator(staker *Staker) { validator := v.getOrCreateValidator(staker.SubnetID, staker.NodeID) if validator.delegators == nil { - validator.delegators = btree.New(defaultTreeDegree) + validator.delegators = btree.NewG(defaultTreeDegree, (*Staker).Less) } validator.delegators.ReplaceOrInsert(staker) validatorDiff := v.getOrCreateValidatorDiff(staker.SubnetID, staker.NodeID) if validatorDiff.addedDelegators == nil { - validatorDiff.addedDelegators = btree.New(defaultTreeDegree) + validatorDiff.addedDelegators = btree.NewG(defaultTreeDegree, (*Staker).Less) } validatorDiff.addedDelegators.ReplaceOrInsert(staker) @@ -235,7 +241,9 @@ func (v *baseStakers) getOrCreateValidatorDiff(subnetID ids.ID, nodeID ids.NodeI } validatorDiff, ok := subnetValidatorDiffs[nodeID] if !ok { - validatorDiff = &diffValidator{} + validatorDiff = &diffValidator{ + validatorStatus: unmodified, + } subnetValidatorDiffs[nodeID] = validatorDiff } return validatorDiff @@ -244,71 +252,69 @@ func (v *baseStakers) getOrCreateValidatorDiff(subnetID ids.ID, nodeID ids.NodeI type diffStakers struct { // subnetID --> nodeID --> diff for that validator validatorDiffs map[ids.ID]map[ids.NodeID]*diffValidator - addedStakers *btree.BTree + addedStakers *btree.BTreeG[*Staker] deletedStakers map[ids.ID]*Staker } type diffValidator struct { - validatorModified bool - // [validatorDeleted] implies [validatorModified] - validatorDeleted bool - validator *Staker + // validatorStatus describes whether a validator has been added or removed. + // + // validatorStatus is not affected by delegators ops so unmodified does not + // mean that diffValidator hasn't change, since delegators may have changed. + validatorStatus diffValidatorStatus + validator *Staker - addedDelegators *btree.BTree + addedDelegators *btree.BTreeG[*Staker] deletedDelegators map[ids.ID]*Staker } // GetValidator attempts to fetch the validator with the given subnetID and // nodeID. -// -// Returns: -// 1. If the validator was added in this diff, [staker, true] will be returned. -// 2. If the validator was removed in this diff, [nil, true] will be returned. -// 3. If the validator was not modified by this diff, [nil, false] will be -// returned. -func (s *diffStakers) GetValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, bool) { +// Invariant: Assumes that the validator will never be removed and then added. +func (s *diffStakers) GetValidator(subnetID ids.ID, nodeID ids.NodeID) (*Staker, diffValidatorStatus) { subnetValidatorDiffs, ok := s.validatorDiffs[subnetID] if !ok { - return nil, false + return nil, unmodified } validatorDiff, ok := subnetValidatorDiffs[nodeID] if !ok { - return nil, false + return nil, unmodified } - if !validatorDiff.validatorModified { - return nil, false + if validatorDiff.validatorStatus == added { + return validatorDiff.validator, added } - - if validatorDiff.validatorDeleted { - return nil, true - } - return validatorDiff.validator, true + return nil, validatorDiff.validatorStatus } func (s *diffStakers) PutValidator(staker *Staker) { validatorDiff := s.getOrCreateDiff(staker.SubnetID, staker.NodeID) - validatorDiff.validatorModified = true - validatorDiff.validatorDeleted = false + validatorDiff.validatorStatus = added validatorDiff.validator = staker if s.addedStakers == nil { - s.addedStakers = btree.New(defaultTreeDegree) + s.addedStakers = btree.NewG(defaultTreeDegree, (*Staker).Less) } s.addedStakers.ReplaceOrInsert(staker) } func (s *diffStakers) DeleteValidator(staker *Staker) { validatorDiff := s.getOrCreateDiff(staker.SubnetID, staker.NodeID) - validatorDiff.validatorModified = true - validatorDiff.validatorDeleted = true - validatorDiff.validator = staker - - if s.deletedStakers == nil { - s.deletedStakers = make(map[ids.ID]*Staker) + if validatorDiff.validatorStatus == added { + // This validator was added and immediately removed in this diff. We + // treat it as if it was never added. + validatorDiff.validatorStatus = unmodified + s.addedStakers.Delete(validatorDiff.validator) + validatorDiff.validator = nil + } else { + validatorDiff.validatorStatus = deleted + validatorDiff.validator = staker + if s.deletedStakers == nil { + s.deletedStakers = make(map[ids.ID]*Staker) + } + s.deletedStakers[staker.TxID] = staker } - s.deletedStakers[staker.TxID] = staker } func (s *diffStakers) GetDelegatorIterator( @@ -339,12 +345,12 @@ func (s *diffStakers) GetDelegatorIterator( func (s *diffStakers) PutDelegator(staker *Staker) { validatorDiff := s.getOrCreateDiff(staker.SubnetID, staker.NodeID) if validatorDiff.addedDelegators == nil { - validatorDiff.addedDelegators = btree.New(defaultTreeDegree) + validatorDiff.addedDelegators = btree.NewG(defaultTreeDegree, (*Staker).Less) } validatorDiff.addedDelegators.ReplaceOrInsert(staker) if s.addedStakers == nil { - s.addedStakers = btree.New(defaultTreeDegree) + s.addedStakers = btree.NewG(defaultTreeDegree, (*Staker).Less) } s.addedStakers.ReplaceOrInsert(staker) } @@ -383,7 +389,9 @@ func (s *diffStakers) getOrCreateDiff(subnetID ids.ID, nodeID ids.NodeID) *diffV } validatorDiff, ok := subnetValidatorDiffs[nodeID] if !ok { - validatorDiff = &diffValidator{} + validatorDiff = &diffValidator{ + validatorStatus: unmodified, + } subnetValidatorDiffs[nodeID] = validatorDiff } return validatorDiff diff --git a/avalanchego/vms/platformvm/state/stakers_test.go b/avalanchego/vms/platformvm/state/stakers_test.go index d022d9cd..6b8c85ea 100644 --- a/avalanchego/vms/platformvm/state/stakers_test.go +++ b/avalanchego/vms/platformvm/state/stakers_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -146,34 +146,55 @@ func TestDiffStakersValidator(t *testing.T) { v.PutDelegator(delegator) - _, ok := v.GetValidator(ids.GenerateTestID(), delegator.NodeID) - require.False(ok) + // validators not available in the diff are marked as unmodified + _, status := v.GetValidator(ids.GenerateTestID(), delegator.NodeID) + require.Equal(unmodified, status) - _, ok = v.GetValidator(delegator.SubnetID, ids.GenerateTestNodeID()) - require.False(ok) + _, status = v.GetValidator(delegator.SubnetID, ids.GenerateTestNodeID()) + require.Equal(unmodified, status) - _, ok = v.GetValidator(delegator.SubnetID, delegator.NodeID) - require.False(ok) + // delegator addition shouldn't change validatorStatus + _, status = v.GetValidator(delegator.SubnetID, delegator.NodeID) + require.Equal(unmodified, status) stakerIterator := v.GetStakerIterator(EmptyIterator) assertIteratorsEqual(t, NewSliceIterator(delegator), stakerIterator) v.PutValidator(staker) - returnedStaker, ok := v.GetValidator(staker.SubnetID, staker.NodeID) - require.True(ok) + returnedStaker, status := v.GetValidator(staker.SubnetID, staker.NodeID) + require.Equal(added, status) require.Equal(staker, returnedStaker) v.DeleteValidator(staker) - returnedStaker, ok = v.GetValidator(staker.SubnetID, staker.NodeID) - require.True(ok) - require.Nil(returnedStaker) + // Validators created and deleted in the same diff are marked as unmodified. + // This means they won't be pushed to baseState if diff.Apply(baseState) is + // called. + _, status = v.GetValidator(staker.SubnetID, staker.NodeID) + require.Equal(unmodified, status) stakerIterator = v.GetStakerIterator(EmptyIterator) assertIteratorsEqual(t, NewSliceIterator(delegator), stakerIterator) } +func TestDiffStakersDeleteValidator(t *testing.T) { + require := require.New(t) + staker := newTestStaker() + delegator := newTestStaker() + + v := diffStakers{} + + _, status := v.GetValidator(ids.GenerateTestID(), delegator.NodeID) + require.Equal(unmodified, status) + + v.DeleteValidator(staker) + + returnedStaker, status := v.GetValidator(staker.SubnetID, staker.NodeID) + require.Equal(deleted, status) + require.Nil(returnedStaker) +} + func TestDiffStakersDelegator(t *testing.T) { staker := newTestStaker() delegator := newTestStaker() diff --git a/avalanchego/vms/platformvm/state/state.go b/avalanchego/vms/platformvm/state/state.go index efca1c5a..22425aac 100644 --- a/avalanchego/vms/platformvm/state/state.go +++ b/avalanchego/vms/platformvm/state/state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -23,7 +23,9 @@ import ( "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/wrappers" @@ -47,27 +49,31 @@ const ( ) var ( - _ State = &state{} - - ErrDelegatorSubset = errors.New("delegator's time range must be a subset of the validator's time range") - - blockPrefix = []byte("block") - validatorsPrefix = []byte("validators") - currentPrefix = []byte("current") - pendingPrefix = []byte("pending") - validatorPrefix = []byte("validator") - delegatorPrefix = []byte("delegator") - subnetValidatorPrefix = []byte("subnetValidator") - subnetDelegatorPrefix = []byte("subnetDelegator") - validatorDiffsPrefix = []byte("validatorDiffs") - txPrefix = []byte("tx") - rewardUTXOsPrefix = []byte("rewardUTXOs") - utxoPrefix = []byte("utxo") - subnetPrefix = []byte("subnet") - transformedSubnetPrefix = []byte("transformedSubnet") - supplyPrefix = []byte("supply") - chainPrefix = []byte("chain") - singletonPrefix = []byte("singleton") + _ State = (*state)(nil) + + ErrDelegatorSubset = errors.New("delegator's time range must be a subset of the validator's time range") + errMissingValidatorSet = errors.New("missing validator set") + errValidatorSetAlreadyPopulated = errors.New("validator set already populated") + errDuplicateValidatorSet = errors.New("duplicate validator set") + + blockPrefix = []byte("block") + validatorsPrefix = []byte("validators") + currentPrefix = []byte("current") + pendingPrefix = []byte("pending") + validatorPrefix = []byte("validator") + delegatorPrefix = []byte("delegator") + subnetValidatorPrefix = []byte("subnetValidator") + subnetDelegatorPrefix = []byte("subnetDelegator") + validatorWeightDiffsPrefix = []byte("validatorDiffs") + validatorPublicKeyDiffsPrefix = []byte("publicKeyDiffs") + txPrefix = []byte("tx") + rewardUTXOsPrefix = []byte("rewardUTXOs") + utxoPrefix = []byte("utxo") + subnetPrefix = []byte("subnet") + transformedSubnetPrefix = []byte("transformedSubnet") + supplyPrefix = []byte("supply") + chainPrefix = []byte("chain") + singletonPrefix = []byte("singleton") timestampKey = []byte("timestamp") currentSupplyKey = []byte("current supply") @@ -79,9 +85,9 @@ var ( // execution. type Chain interface { Stakers - UTXOAdder - UTXOGetter - UTXODeleter + avax.UTXOAdder + avax.UTXOGetter + avax.UTXODeleter GetNetworkID() uint32 @@ -107,27 +113,26 @@ type Chain interface { AddTx(tx *txs.Tx, status status.Status) } -type LastAccepteder interface { +type State interface { + Chain + uptime.State + avax.UTXOReader + GetLastAccepted() ids.ID SetLastAccepted(blkID ids.ID) -} -type BlockState interface { GetStatelessBlock(blockID ids.ID) (blocks.Block, choices.Status, error) AddStatelessBlock(block blocks.Block, status choices.Status) -} -type State interface { - LastAccepteder - Chain - BlockState - uptime.State - avax.UTXOReader + // ValidatorSet adds all the validators and delegators of [subnetID] into + // [vdrs]. + ValidatorSet(subnetID ids.ID, vdrs validators.Set) error GetValidatorWeightDiffs(height uint64, subnetID ids.ID) (map[ids.NodeID]*ValidatorWeightDiff, error) - // Return the current validator set of [subnetID]. - ValidatorSet(subnetID ids.ID) (validators.Set, error) + // Returns a map of node ID --> BLS Public Key for all validators + // that left the Primary Network validator set. + GetValidatorPublicKeyDiffs(height uint64) (map[ids.NodeID]*bls.PublicKey, error) SetHeight(height uint64) @@ -137,8 +142,8 @@ type State interface { // Commit changes to the base database. Commit() error - // Returns a batch of unwritten changes that, when written, will be commit - // all pending changes to the base database. + // Returns a batch of unwritten changes that, when written, will commit all + // pending changes to the base database. CommitBatch() (database.Batch, error) Close() error @@ -156,13 +161,13 @@ type stateBlk struct { * | |-. current * | | |-. validator * | | | '-. list - * | | | '-- txID -> uptime + potential reward + * | | | '-- txID -> uptime + potential reward + potential delegatee reward * | | |-. delegator * | | | '-. list * | | | '-- txID -> potential reward * | | |-. subnetValidator * | | | '-. list - * | | | '-- txID -> potential reward or nil + * | | | '-- txID -> uptime + potential reward + potential delegatee reward * | | '-. subnetDelegator * | | '-. list * | | '-- txID -> potential reward @@ -179,10 +184,14 @@ type stateBlk struct { * | | '-. subnetDelegator * | | '-. list * | | '-- txID -> nil - * | '-. diffs - * | '-. height+subnet + * | |-. weight diffs + * | | '-. height+subnet + * | | '-. list + * | | '-- nodeID -> weightChange + * | '-. pub key diffs + * | '-. height * | '-. list - * | '-- nodeID -> weightChange + * | '-- nodeID -> public key * |-. blocks * | '-- blockID -> block bytes * |-. txs @@ -207,10 +216,13 @@ type stateBlk struct { * '-- lastAcceptedKey -> lastAccepted */ type state struct { - cfg *config.Config - ctx *snow.Context - metrics metrics.Metrics - rewards reward.Calculator + validatorState + + cfg *config.Config + ctx *snow.Context + metrics metrics.Metrics + rewards reward.Calculator + bootstrapped *utils.Atomic[bool] baseDB *versiondb.Database @@ -220,11 +232,10 @@ type state struct { currentHeight uint64 addedBlocks map[ids.ID]stateBlk // map of blockID -> Block - blockCache cache.Cacher // cache of blockID -> Block, if the entry is nil, it is not in the database - blockDB database.Database - - uptimes map[ids.NodeID]*uptimeAndReward // nodeID -> uptimes - updatedUptimes map[ids.NodeID]struct{} // nodeID -> nil + // cache of blockID -> Block + // If the block isn't known, nil is cached. + blockCache cache.Cacher[ids.ID, *stateBlk] + blockDB database.Database validatorsDB database.Database currentValidatorsDB database.Database @@ -246,15 +257,18 @@ type state struct { pendingSubnetDelegatorBaseDB database.Database pendingSubnetDelegatorList linkeddb.LinkedDB - validatorDiffsCache cache.Cacher // cache of heightWithSubnet -> map[ids.ShortID]*ValidatorWeightDiff - validatorDiffsDB database.Database + validatorWeightDiffsCache cache.Cacher[string, map[ids.NodeID]*ValidatorWeightDiff] // cache of heightWithSubnet -> map[ids.NodeID]*ValidatorWeightDiff + validatorWeightDiffsDB database.Database - addedTxs map[ids.ID]*txAndStatus // map of txID -> {*txs.Tx, Status} - txCache cache.Cacher // cache of txID -> {*txs.Tx, Status} if the entry is nil, it is not in the database + validatorPublicKeyDiffsCache cache.Cacher[uint64, map[ids.NodeID]*bls.PublicKey] // cache of height -> map[ids.NodeID]*bls.PublicKey + validatorPublicKeyDiffsDB database.Database + + addedTxs map[ids.ID]*txAndStatus // map of txID -> {*txs.Tx, Status} + txCache cache.Cacher[ids.ID, *txAndStatus] // txID -> {*txs.Tx, Status}. If the entry is nil, it isn't in the database txDB database.Database - addedRewardUTXOs map[ids.ID][]*avax.UTXO // map of txID -> []*UTXO - rewardUTXOsCache cache.Cacher // cache of txID -> []*UTXO + addedRewardUTXOs map[ids.ID][]*avax.UTXO // map of txID -> []*UTXO + rewardUTXOsCache cache.Cacher[ids.ID, []*avax.UTXO] // txID -> []*UTXO rewardUTXODB database.Database modifiedUTXOs map[ids.ID]*avax.UTXO // map of modified UTXOID -> *UTXO if the UTXO is nil, it has been removed @@ -266,17 +280,17 @@ type state struct { subnetBaseDB database.Database subnetDB linkeddb.LinkedDB - transformedSubnets map[ids.ID]*txs.Tx // map of subnetID -> transformSubnetTx - transformedSubnetCache cache.Cacher // cache of subnetID -> transformSubnetTx if the entry is nil, it is not in the database + transformedSubnets map[ids.ID]*txs.Tx // map of subnetID -> transformSubnetTx + transformedSubnetCache cache.Cacher[ids.ID, *txs.Tx] // cache of subnetID -> transformSubnetTx if the entry is nil, it is not in the database transformedSubnetDB database.Database - modifiedSupplies map[ids.ID]uint64 // map of subnetID -> current supply - supplyCache cache.Cacher // cache of subnetID -> current supply if the entry is nil, it is not in the database + modifiedSupplies map[ids.ID]uint64 // map of subnetID -> current supply + supplyCache cache.Cacher[ids.ID, *uint64] // cache of subnetID -> current supply if the entry is nil, it is not in the database supplyDB database.Database - addedChains map[ids.ID][]*txs.Tx // maps subnetID -> the newly added chains to the subnet - chainCache cache.Cacher // cache of subnetID -> the chains after all local modifications []*txs.Tx - chainDBCache cache.Cacher // cache of subnetID -> linkedDB + addedChains map[ids.ID][]*txs.Tx // maps subnetID -> the newly added chains to the subnet + chainCache cache.Cacher[ids.ID, []*txs.Tx] // cache of subnetID -> the chains after all local modifications []*txs.Tx + chainDBCache cache.Cacher[ids.ID, linkeddb.LinkedDB] // cache of subnetID -> linkedDB chainDB database.Database // The persisted fields represent the current database value @@ -302,7 +316,7 @@ func (v *ValidatorWeightDiff) Add(negative bool, amount uint64) error { if v.Amount > amount { v.Amount -= amount } else { - v.Amount = math.Diff64(v.Amount, amount) + v.Amount = math.AbsDiff(v.Amount, amount) v.Decrease = negative } return nil @@ -323,15 +337,6 @@ type txAndStatus struct { status status.Status } -type uptimeAndReward struct { - txID ids.ID - lastUpdated time.Time - - UpDuration time.Duration `serialize:"true"` - LastUpdated uint64 `serialize:"true"` // Unix time in seconds - PotentialReward uint64 `serialize:"true"` -} - func New( db database.Database, genesisBytes []byte, @@ -340,6 +345,7 @@ func New( ctx *snow.Context, metrics metrics.Metrics, rewards reward.Calculator, + bootstrapped *utils.Atomic[bool], ) (State, error) { s, err := new( db, @@ -348,6 +354,7 @@ func New( ctx, metricsReg, rewards, + bootstrapped, ) if err != nil { return nil, err @@ -360,13 +367,6 @@ func New( return nil, err } - for _, vdr := range validators.DefaultValidatorList() { - s.uptimes[vdr.ID()] = &uptimeAndReward{ - txID: ids.Empty, - lastUpdated: s.GetTimestamp(), - } - } - return s, nil } @@ -377,11 +377,12 @@ func new( ctx *snow.Context, metricsReg prometheus.Registerer, rewards reward.Calculator, + bootstrapped *utils.Atomic[bool], ) (*state, error) { - blockCache, err := metercacher.New( + blockCache, err := metercacher.New[ids.ID, *stateBlk]( "block_cache", metricsReg, - &cache.LRU{Size: blockCacheSize}, + &cache.LRU[ids.ID, *stateBlk]{Size: blockCacheSize}, ) if err != nil { return nil, err @@ -403,31 +404,40 @@ func new( pendingSubnetValidatorBaseDB := prefixdb.New(subnetValidatorPrefix, pendingValidatorsDB) pendingSubnetDelegatorBaseDB := prefixdb.New(subnetDelegatorPrefix, pendingValidatorsDB) - validatorDiffsDB := prefixdb.New(validatorDiffsPrefix, validatorsDB) + validatorWeightDiffsDB := prefixdb.New(validatorWeightDiffsPrefix, validatorsDB) + validatorWeightDiffsCache, err := metercacher.New[string, map[ids.NodeID]*ValidatorWeightDiff]( + "validator_weight_diffs_cache", + metricsReg, + &cache.LRU[string, map[ids.NodeID]*ValidatorWeightDiff]{Size: validatorDiffsCacheSize}, + ) + if err != nil { + return nil, err + } - validatorDiffsCache, err := metercacher.New( - "validator_diffs_cache", + validatorPublicKeyDiffsDB := prefixdb.New(validatorPublicKeyDiffsPrefix, validatorsDB) + validatorPublicKeyDiffsCache, err := metercacher.New[uint64, map[ids.NodeID]*bls.PublicKey]( + "validator_pub_key_diffs_cache", metricsReg, - &cache.LRU{Size: validatorDiffsCacheSize}, + &cache.LRU[uint64, map[ids.NodeID]*bls.PublicKey]{Size: validatorDiffsCacheSize}, ) if err != nil { return nil, err } - txCache, err := metercacher.New( + txCache, err := metercacher.New[ids.ID, *txAndStatus]( "tx_cache", metricsReg, - &cache.LRU{Size: txCacheSize}, + &cache.LRU[ids.ID, *txAndStatus]{Size: txCacheSize}, ) if err != nil { return nil, err } rewardUTXODB := prefixdb.New(rewardUTXOsPrefix, baseDB) - rewardUTXOsCache, err := metercacher.New( + rewardUTXOsCache, err := metercacher.New[ids.ID, []*avax.UTXO]( "reward_utxos_cache", metricsReg, - &cache.LRU{Size: rewardUTXOsCacheSize}, + &cache.LRU[ids.ID, []*avax.UTXO]{Size: rewardUTXOsCacheSize}, ) if err != nil { return nil, err @@ -441,48 +451,51 @@ func new( subnetBaseDB := prefixdb.New(subnetPrefix, baseDB) - transformedSubnetCache, err := metercacher.New( + transformedSubnetCache, err := metercacher.New[ids.ID, *txs.Tx]( "transformed_subnet_cache", metricsReg, - &cache.LRU{Size: chainCacheSize}, + &cache.LRU[ids.ID, *txs.Tx]{Size: chainCacheSize}, ) if err != nil { return nil, err } - supplyCache, err := metercacher.New( + supplyCache, err := metercacher.New[ids.ID, *uint64]( "supply_cache", metricsReg, - &cache.LRU{Size: chainCacheSize}, + &cache.LRU[ids.ID, *uint64]{Size: chainCacheSize}, ) if err != nil { return nil, err } - chainCache, err := metercacher.New( + chainCache, err := metercacher.New[ids.ID, []*txs.Tx]( "chain_cache", metricsReg, - &cache.LRU{Size: chainCacheSize}, + &cache.LRU[ids.ID, []*txs.Tx]{Size: chainCacheSize}, ) if err != nil { return nil, err } - chainDBCache, err := metercacher.New( + chainDBCache, err := metercacher.New[ids.ID, linkeddb.LinkedDB]( "chain_db_cache", metricsReg, - &cache.LRU{Size: chainDBCacheSize}, + &cache.LRU[ids.ID, linkeddb.LinkedDB]{Size: chainDBCacheSize}, ) if err != nil { return nil, err } return &state{ - cfg: cfg, - ctx: ctx, - metrics: metrics, - rewards: rewards, - baseDB: baseDB, + validatorState: newValidatorState(), + + cfg: cfg, + ctx: ctx, + metrics: metrics, + rewards: rewards, + bootstrapped: bootstrapped, + baseDB: baseDB, addedBlocks: make(map[ids.ID]stateBlk), blockCache: blockCache, @@ -491,9 +504,6 @@ func new( currentStakers: newBaseStakers(), pendingStakers: newBaseStakers(), - uptimes: make(map[ids.NodeID]*uptimeAndReward), - updatedUptimes: make(map[ids.NodeID]struct{}), - validatorsDB: validatorsDB, currentValidatorsDB: currentValidatorsDB, currentValidatorBaseDB: currentValidatorBaseDB, @@ -513,8 +523,10 @@ func new( pendingSubnetValidatorList: linkeddb.NewDefault(pendingSubnetValidatorBaseDB), pendingSubnetDelegatorBaseDB: pendingSubnetDelegatorBaseDB, pendingSubnetDelegatorList: linkeddb.NewDefault(pendingSubnetDelegatorBaseDB), - validatorDiffsDB: validatorDiffsDB, - validatorDiffsCache: validatorDiffsCache, + validatorWeightDiffsDB: validatorWeightDiffsDB, + validatorWeightDiffsCache: validatorWeightDiffsCache, + validatorPublicKeyDiffsCache: validatorPublicKeyDiffsCache, + validatorPublicKeyDiffsDB: validatorPublicKeyDiffsDB, addedTxs: make(map[ids.ID]*txAndStatus), txDB: prefixdb.New(txPrefix, baseDB), @@ -658,11 +670,11 @@ func (s *state) GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) { return tx, nil } - if txIntf, cached := s.transformedSubnetCache.Get(subnetID); cached { - if txIntf == nil { + if tx, cached := s.transformedSubnetCache.Get(subnetID); cached { + if tx == nil { return nil, database.ErrNotFound } - return txIntf.(*txs.Tx), nil + return tx, nil } transformSubnetTxID, err := database.GetID(s.transformedSubnetDB, subnetID[:]) @@ -688,8 +700,8 @@ func (s *state) AddSubnetTransformation(transformSubnetTxIntf *txs.Tx) { } func (s *state) GetChains(subnetID ids.ID) ([]*txs.Tx, error) { - if chainsIntf, cached := s.chainCache.Get(subnetID); cached { - return chainsIntf.([]*txs.Tx), nil + if chains, cached := s.chainCache.Get(subnetID); cached { + return chains, nil } chainDB := s.getChainDB(subnetID) chainDBIt := chainDB.NewIterator() @@ -720,16 +732,15 @@ func (s *state) AddChain(createChainTxIntf *txs.Tx) { createChainTx := createChainTxIntf.Unsigned.(*txs.CreateChainTx) subnetID := createChainTx.SubnetID s.addedChains[subnetID] = append(s.addedChains[subnetID], createChainTxIntf) - if chainsIntf, cached := s.chainCache.Get(subnetID); cached { - chains := chainsIntf.([]*txs.Tx) + if chains, cached := s.chainCache.Get(subnetID); cached { chains = append(chains, createChainTxIntf) s.chainCache.Put(subnetID, chains) } } func (s *state) getChainDB(subnetID ids.ID) linkeddb.LinkedDB { - if chainDBIntf, cached := s.chainDBCache.Get(subnetID); cached { - return chainDBIntf.(linkeddb.LinkedDB) + if chainDB, cached := s.chainDBCache.Get(subnetID); cached { + return chainDB } rawChainDB := prefixdb.New(subnetID[:], s.chainDB) chainDB := linkeddb.NewDefault(rawChainDB) @@ -741,11 +752,10 @@ func (s *state) GetTx(txID ids.ID) (*txs.Tx, status.Status, error) { if tx, exists := s.addedTxs[txID]; exists { return tx.tx, tx.status, nil } - if txIntf, cached := s.txCache.Get(txID); cached { - if txIntf == nil { + if tx, cached := s.txCache.Get(txID); cached { + if tx == nil { return nil, status.Unknown, database.ErrNotFound } - tx := txIntf.(*txAndStatus) return tx.tx, tx.status, nil } txBytes, err := s.txDB.Get(txID[:]) @@ -787,7 +797,7 @@ func (s *state) GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) { return utxos, nil } if utxos, exists := s.rewardUTXOsCache.Get(txID); exists { - return utxos.([]*avax.UTXO), nil + return utxos, nil } rawTxDB := prefixdb.New(txID[:], s.rewardUTXODB) @@ -837,39 +847,29 @@ func (s *state) DeleteUTXO(utxoID ids.ID) { s.modifiedUTXOs[utxoID] = nil } -func (s *state) GetUptime(nodeID ids.NodeID) (upDuration time.Duration, lastUpdated time.Time, err error) { - uptime, exists := s.uptimes[nodeID] - if !exists { - return 0, time.Time{}, database.ErrNotFound +func (s *state) GetStartTime(nodeID ids.NodeID, subnetID ids.ID) (time.Time, error) { + staker, err := s.currentStakers.GetValidator(subnetID, nodeID) + if err != nil { + return time.Time{}, err } - return uptime.UpDuration, uptime.lastUpdated, nil + return staker.StartTime, nil } -func (s *state) SetUptime(nodeID ids.NodeID, upDuration time.Duration, lastUpdated time.Time) error { - uptime, exists := s.uptimes[nodeID] - if !exists { - return database.ErrNotFound - } - uptime.UpDuration = upDuration - uptime.lastUpdated = lastUpdated - s.updatedUptimes[nodeID] = struct{}{} - return nil +func (s *state) GetTimestamp() time.Time { + return s.timestamp } -// Returns the Primary Network start time of current validator [nodeID]. -// Errors if [nodeID] isn't a current validator of the Primary Network. -func (s *state) GetStartTime(nodeID ids.NodeID) (time.Time, error) { - staker, err := s.currentStakers.GetValidator(constants.PrimaryNetworkID, nodeID) - if err != nil { - return time.Time{}, err - } - return staker.StartTime, nil +func (s *state) SetTimestamp(tm time.Time) { + s.timestamp = tm } -func (s *state) GetTimestamp() time.Time { return s.timestamp } -func (s *state) SetTimestamp(tm time.Time) { s.timestamp = tm } -func (s *state) GetLastAccepted() ids.ID { return s.lastAccepted } -func (s *state) SetLastAccepted(lastAccepted ids.ID) { s.lastAccepted = lastAccepted } +func (s *state) GetLastAccepted() ids.ID { + return s.lastAccepted +} + +func (s *state) SetLastAccepted(lastAccepted ids.ID) { + s.lastAccepted = lastAccepted +} func (s *state) GetCurrentSupply(subnetID ids.ID) (uint64, error) { if subnetID == constants.PrimaryNetworkID { @@ -881,12 +881,12 @@ func (s *state) GetCurrentSupply(subnetID ids.ID) (uint64, error) { return supply, nil } - supplyIntf, ok := s.supplyCache.Get(subnetID) + cachedSupply, ok := s.supplyCache.Get(subnetID) if ok { - if supplyIntf == nil { + if cachedSupply == nil { return 0, database.ErrNotFound } - return supplyIntf.(uint64), nil + return *cachedSupply, nil } supply, err := database.GetUInt64(s.supplyDB, subnetID[:]) @@ -898,7 +898,7 @@ func (s *state) GetCurrentSupply(subnetID ids.ID) (uint64, error) { return 0, err } - s.supplyCache.Put(subnetID, supply) + s.supplyCache.Put(subnetID, &supply) return supply, nil } @@ -910,6 +910,26 @@ func (s *state) SetCurrentSupply(subnetID ids.ID, cs uint64) { } } +func (s *state) ValidatorSet(subnetID ids.ID, vdrs validators.Set) error { + for nodeID, validator := range s.currentStakers.validators[subnetID] { + staker := validator.validator + if err := vdrs.Add(nodeID, staker.PublicKey, staker.TxID, staker.Weight); err != nil { + return err + } + + delegatorIterator := NewTreeIterator(validator.delegators) + for delegatorIterator.Next() { + staker := delegatorIterator.Value() + if err := vdrs.AddWeight(nodeID, staker.Weight); err != nil { + delegatorIterator.Release() + return err + } + } + delegatorIterator.Release() + } + return nil +} + func (s *state) GetValidatorWeightDiffs(height uint64, subnetID ids.ID) (map[ids.NodeID]*ValidatorWeightDiff, error) { prefixStruct := heightWithSubnet{ Height: height, @@ -921,11 +941,11 @@ func (s *state) GetValidatorWeightDiffs(height uint64, subnetID ids.ID) (map[ids } prefixStr := string(prefixBytes) - if weightDiffsIntf, ok := s.validatorDiffsCache.Get(prefixStr); ok { - return weightDiffsIntf.(map[ids.NodeID]*ValidatorWeightDiff), nil + if weightDiffs, ok := s.validatorWeightDiffsCache.Get(prefixStr); ok { + return weightDiffs, nil } - rawDiffDB := prefixdb.New(prefixBytes, s.validatorDiffsDB) + rawDiffDB := prefixdb.New(prefixBytes, s.validatorWeightDiffsDB) diffDB := linkeddb.NewDefault(rawDiffDB) diffIter := diffDB.NewIterator() defer diffIter.Release() @@ -946,31 +966,38 @@ func (s *state) GetValidatorWeightDiffs(height uint64, subnetID ids.ID) (map[ids weightDiffs[nodeID] = &weightDiff } - s.validatorDiffsCache.Put(prefixStr, weightDiffs) + s.validatorWeightDiffsCache.Put(prefixStr, weightDiffs) return weightDiffs, diffIter.Error() } -func (s *state) ValidatorSet(subnetID ids.ID) (validators.Set, error) { - vdrs := validators.NewSet() - for nodeID, validator := range s.currentStakers.validators[subnetID] { - staker := validator.validator - if staker != nil { - if err := vdrs.AddWeight(nodeID, staker.Weight); err != nil { - return nil, err - } +func (s *state) GetValidatorPublicKeyDiffs(height uint64) (map[ids.NodeID]*bls.PublicKey, error) { + if publicKeyDiffs, ok := s.validatorPublicKeyDiffsCache.Get(height); ok { + return publicKeyDiffs, nil + } + + heightBytes := database.PackUInt64(height) + rawDiffDB := prefixdb.New(heightBytes, s.validatorPublicKeyDiffsDB) + diffDB := linkeddb.NewDefault(rawDiffDB) + diffIter := diffDB.NewIterator() + defer diffIter.Release() + + pkDiffs := make(map[ids.NodeID]*bls.PublicKey) + for diffIter.Next() { + nodeID, err := ids.ToNodeID(diffIter.Key()) + if err != nil { + return nil, err } - delegatorIterator := NewTreeIterator(validator.delegators) - for delegatorIterator.Next() { - staker := delegatorIterator.Value() - if err := vdrs.AddWeight(nodeID, staker.Weight); err != nil { - delegatorIterator.Release() - return nil, err - } + pkBytes := diffIter.Value() + pk, err := bls.PublicKeyFromBytes(pkBytes) + if err != nil { + return nil, err } - delegatorIterator.Release() + pkDiffs[nodeID] = pk } - return vdrs, nil + + s.validatorPublicKeyDiffsCache.Put(height, pkDiffs) + return pkDiffs, diffIter.Error() } func (s *state) syncGenesis(genesisBlk blocks.Block, genesis *genesis.State) error { @@ -1009,7 +1036,11 @@ func (s *state) syncGenesis(genesisBlk blocks.Block, genesis *genesis.State) err return err } - staker := NewCurrentStaker(vdrTx.ID(), tx, potentialReward) + staker, err := NewCurrentStaker(vdrTx.ID(), tx, potentialReward) + if err != nil { + return err + } + s.PutCurrentValidator(staker) s.AddTx(vdrTx, status.Committed) s.SetCurrentSupply(constants.PrimaryNetworkID, newCurrentSupply) @@ -1031,9 +1062,10 @@ func (s *state) syncGenesis(genesisBlk blocks.Block, genesis *genesis.State) err s.AddTx(chain, status.Committed) } - validators.InitializeDefaultValidators(s.ctx.NetworkID, s.GetTimestamp()) - - return s.write(0) + // updateValidators is set to false here to maintain the invariant that the + // primary network's validator set is empty before the validator sets are + // initialized. + return s.write(false /*=updateValidators*/, 0) } // Load pulls data previously stored on disk that is expected to be in memory. @@ -1043,10 +1075,8 @@ func (s *state) load() error { s.loadMetadata(), s.loadCurrentValidators(), s.loadPendingValidators(), + s.initValidatorSets(), ) - - validators.InitializeDefaultValidators(s.ctx.NetworkID, s.GetTimestamp()) - return errs.Err } @@ -1090,27 +1120,32 @@ func (s *state) loadCurrentValidators() error { return err } - uptimeBytes := validatorIt.Value() - uptime := &uptimeAndReward{ + metadataBytes := validatorIt.Value() + metadata := &validatorMetadata{ txID: txID, + // Note: we don't provide [LastUpdated] here because we expect it to + // always be present on disk. } - if _, err := txs.Codec.Unmarshal(uptimeBytes, uptime); err != nil { + if err := parseValidatorMetadata(metadataBytes, metadata); err != nil { return err } - uptime.lastUpdated = time.Unix(int64(uptime.LastUpdated), 0) stakerTx, ok := tx.Unsigned.(txs.Staker) if !ok { return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) } - staker := NewCurrentStaker(txID, stakerTx, uptime.PotentialReward) + staker, err := NewCurrentStaker(txID, stakerTx, metadata.PotentialReward) + if err != nil { + return err + } + validator := s.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) validator.validator = staker s.currentStakers.stakers.ReplaceOrInsert(staker) - s.uptimes[staker.NodeID] = uptime + s.validatorState.LoadValidatorMetadata(staker.NodeID, staker.SubnetID, metadata) } subnetValidatorIt := s.currentSubnetValidatorList.NewIterator() @@ -1126,27 +1161,32 @@ func (s *state) loadCurrentValidators() error { return err } - // Because permissioned validators originally wrote their values as nil, - // we handle empty [potentialRewardBytes] as 0. - var potentialReward uint64 - potentialRewardBytes := subnetValidatorIt.Value() - if len(potentialRewardBytes) > 0 { - potentialReward, err = database.ParseUInt64(potentialRewardBytes) - if err != nil { - return err - } - } - stakerTx, ok := tx.Unsigned.(txs.Staker) if !ok { return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) } - staker := NewCurrentStaker(txID, stakerTx, potentialReward) + metadataBytes := subnetValidatorIt.Value() + metadata := &validatorMetadata{ + txID: txID, + // use the start time as the fallback value + // in case it's not stored in the database + LastUpdated: uint64(stakerTx.StartTime().Unix()), + } + if err := parseValidatorMetadata(metadataBytes, metadata); err != nil { + return err + } + + staker, err := NewCurrentStaker(txID, stakerTx, metadata.PotentialReward) + if err != nil { + return err + } validator := s.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) validator.validator = staker s.currentStakers.stakers.ReplaceOrInsert(staker) + + s.validatorState.LoadValidatorMetadata(staker.NodeID, staker.SubnetID, metadata) } delegatorIt := s.currentDelegatorList.NewIterator() @@ -1178,10 +1218,14 @@ func (s *state) loadCurrentValidators() error { return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) } - staker := NewCurrentStaker(txID, stakerTx, potentialReward) + staker, err := NewCurrentStaker(txID, stakerTx, potentialReward) + if err != nil { + return err + } + validator := s.currentStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) if validator.delegators == nil { - validator.delegators = btree.New(defaultTreeDegree) + validator.delegators = btree.NewG(defaultTreeDegree, (*Staker).Less) } validator.delegators.ReplaceOrInsert(staker) @@ -1225,7 +1269,11 @@ func (s *state) loadPendingValidators() error { return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) } - staker := NewPendingStaker(txID, stakerTx) + staker, err := NewPendingStaker(txID, stakerTx) + if err != nil { + return err + } + validator := s.pendingStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) validator.validator = staker @@ -1256,10 +1304,14 @@ func (s *state) loadPendingValidators() error { return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) } - staker := NewPendingStaker(txID, stakerTx) + staker, err := NewPendingStaker(txID, stakerTx) + if err != nil { + return err + } + validator := s.pendingStakers.getOrCreateValidator(staker.SubnetID, staker.NodeID) if validator.delegators == nil { - validator.delegators = btree.New(defaultTreeDegree) + validator.delegators = btree.NewG(defaultTreeDegree, (*Staker).Less) } validator.delegators.ReplaceOrInsert(staker) @@ -1277,15 +1329,52 @@ func (s *state) loadPendingValidators() error { return errs.Err } -func (s *state) write(height uint64) error { +// Invariant: initValidatorSets requires loadCurrentValidators to have already +// been called. +func (s *state) initValidatorSets() error { + primaryValidators, ok := s.cfg.Validators.Get(constants.PrimaryNetworkID) + if !ok { + return errMissingValidatorSet + } + if primaryValidators.Len() != 0 { + // Enforce the invariant that the validator set is empty here. + return errValidatorSetAlreadyPopulated + } + err := s.ValidatorSet(constants.PrimaryNetworkID, primaryValidators) + if err != nil { + return err + } + + vl := validators.NewLogger(s.ctx.Log, s.bootstrapped, constants.PrimaryNetworkID, s.ctx.NodeID) + primaryValidators.RegisterCallbackListener(vl) + + s.metrics.SetLocalStake(primaryValidators.GetWeight(s.ctx.NodeID)) + s.metrics.SetTotalStake(primaryValidators.Weight()) + + for subnetID := range s.cfg.TrackedSubnets { + subnetValidators := validators.NewSet() + err := s.ValidatorSet(subnetID, subnetValidators) + if err != nil { + return err + } + + if !s.cfg.Validators.Add(subnetID, subnetValidators) { + return fmt.Errorf("%w: %s", errDuplicateValidatorSet, subnetID) + } + + vl := validators.NewLogger(s.ctx.Log, s.bootstrapped, subnetID, s.ctx.NodeID) + subnetValidators.RegisterCallbackListener(vl) + } + return nil +} + +func (s *state) write(updateValidators bool, height uint64) error { errs := wrappers.Errs{} errs.Add( s.writeBlocks(), - s.writeCurrentPrimaryNetworkStakers(height), - s.writeCurrentSubnetStakers(height), - s.writePendingPrimaryNetworkStakers(), - s.writePendingSubnetStakers(), - s.writeUptimes(), + s.writeCurrentStakers(updateValidators, height), + s.writePendingStakers(), + s.WriteValidatorMetadata(s.currentValidatorList, s.currentSubnetValidatorList), // Must be called after writeCurrentStakers s.writeTXs(), s.writeRewardUTXOs(), s.writeUTXOs(), @@ -1405,7 +1494,9 @@ func (s *state) Abort() { } func (s *state) CommitBatch() (database.Batch, error) { - if err := s.write(s.currentHeight); err != nil { + // updateValidators is set to true here so that the validator manager is + // kept up to date with the last accepted state. + if err := s.write(true /*=updateValidators*/, s.currentHeight); err != nil { return nil, err } return s.baseDB.CommitBatch() @@ -1425,8 +1516,8 @@ func (s *state) writeBlocks() error { } delete(s.addedBlocks, blkID) - s.blockCache.Put(blkID, stateBlk) - if err = s.blockDB.Put(blkID[:], blockBytes); err != nil { + s.blockCache.Put(blkID, &stBlk) + if err := s.blockDB.Put(blkID[:], blockBytes); err != nil { return fmt.Errorf("failed to write block %s: %w", blkID, err) } } @@ -1434,15 +1525,13 @@ func (s *state) writeBlocks() error { } func (s *state) GetStatelessBlock(blockID ids.ID) (blocks.Block, choices.Status, error) { - if blk, exists := s.addedBlocks[blockID]; exists { + if blk, ok := s.addedBlocks[blockID]; ok { return blk.Blk, blk.Status, nil } - if blkIntf, cached := s.blockCache.Get(blockID); cached { - if blkIntf == nil { - return nil, choices.Processing, database.ErrNotFound // status does not matter here + if blkState, ok := s.blockCache.Get(blockID); ok { + if blkState == nil { + return nil, choices.Processing, database.ErrNotFound } - - blkState := blkIntf.(stateBlk) return blkState.Blk, blkState.Status, nil } @@ -1465,168 +1554,102 @@ func (s *state) GetStatelessBlock(blockID ids.ID) (blocks.Block, choices.Status, return nil, choices.Processing, err } - s.blockCache.Put(blockID, blkState) + s.blockCache.Put(blockID, &blkState) return blkState.Blk, blkState.Status, nil } -func (s *state) writeCurrentPrimaryNetworkStakers(height uint64) error { - validatorDiffs, exists := s.currentStakers.validatorDiffs[constants.PrimaryNetworkID] - if !exists { - // If there are no validator changes, we shouldn't update any diffs. - return nil - } +func (s *state) writeCurrentStakers(updateValidators bool, height uint64) error { + heightBytes := database.PackUInt64(height) + rawPublicKeyDiffDB := prefixdb.New(heightBytes, s.validatorPublicKeyDiffsDB) + pkDiffDB := linkeddb.NewDefault(rawPublicKeyDiffDB) + // Node ID --> BLS public key of node before it left the validator set. + pkDiffs := make(map[ids.NodeID]*bls.PublicKey) - prefixStruct := heightWithSubnet{ - Height: height, - SubnetID: constants.PrimaryNetworkID, - } - prefixBytes, err := blocks.GenesisCodec.Marshal(blocks.Version, prefixStruct) - if err != nil { - return fmt.Errorf("failed to create prefix bytes: %w", err) - } - rawDiffDB := prefixdb.New(prefixBytes, s.validatorDiffsDB) - diffDB := linkeddb.NewDefault(rawDiffDB) + for subnetID, validatorDiffs := range s.currentStakers.validatorDiffs { + delete(s.currentStakers.validatorDiffs, subnetID) - weightDiffs := make(map[ids.NodeID]*ValidatorWeightDiff) - for nodeID, validatorDiff := range validatorDiffs { - weightDiff := &ValidatorWeightDiff{} - if validatorDiff.validatorModified { - staker := validatorDiff.validator + // Select db to write to + validatorDB := s.currentSubnetValidatorList + delegatorDB := s.currentSubnetDelegatorList + if subnetID == constants.PrimaryNetworkID { + validatorDB = s.currentValidatorList + delegatorDB = s.currentDelegatorList + } - weightDiff.Decrease = validatorDiff.validatorDeleted - weightDiff.Amount = staker.Weight + prefixStruct := heightWithSubnet{ + Height: height, + SubnetID: subnetID, + } + prefixBytes, err := blocks.GenesisCodec.Marshal(blocks.Version, prefixStruct) + if err != nil { + return fmt.Errorf("failed to create prefix bytes: %w", err) + } + rawWeightDiffDB := prefixdb.New(prefixBytes, s.validatorWeightDiffsDB) + weightDiffDB := linkeddb.NewDefault(rawWeightDiffDB) + weightDiffs := make(map[ids.NodeID]*ValidatorWeightDiff) - if validatorDiff.validatorDeleted { - if err := s.currentValidatorList.Delete(staker.TxID[:]); err != nil { - return fmt.Errorf("failed to delete current staker: %w", err) - } + // Record the change in weight and/or public key for each validator. + for nodeID, validatorDiff := range validatorDiffs { + // Copy [nodeID] so it doesn't get overwritten next iteration. + nodeID := nodeID - delete(s.uptimes, nodeID) - delete(s.updatedUptimes, nodeID) - } else { - vdr := &uptimeAndReward{ + weightDiff := &ValidatorWeightDiff{ + Decrease: validatorDiff.validatorStatus == deleted, + } + switch validatorDiff.validatorStatus { + case added: + staker := validatorDiff.validator + weightDiff.Amount = staker.Weight + + // The validator is being added. + // + // Invariant: It's impossible for a delegator to have been + // rewarded in the same block that the validator was added. + metadata := &validatorMetadata{ txID: staker.TxID, lastUpdated: staker.StartTime, - UpDuration: 0, - LastUpdated: uint64(staker.StartTime.Unix()), - PotentialReward: staker.PotentialReward, + UpDuration: 0, + LastUpdated: uint64(staker.StartTime.Unix()), + PotentialReward: staker.PotentialReward, + PotentialDelegateeReward: 0, } - vdrBytes, err := blocks.GenesisCodec.Marshal(blocks.Version, vdr) + metadataBytes, err := blocks.GenesisCodec.Marshal(blocks.Version, metadata) if err != nil { return fmt.Errorf("failed to serialize current validator: %w", err) } - if err = s.currentValidatorList.Put(staker.TxID[:], vdrBytes); err != nil { + if err = validatorDB.Put(staker.TxID[:], metadataBytes); err != nil { return fmt.Errorf("failed to write current validator to list: %w", err) } - s.uptimes[nodeID] = vdr - } - } - - err := writeCurrentDelegatorDiff( - s.currentDelegatorList, - weightDiff, - validatorDiff, - ) - if err != nil { - return err - } - - if weightDiff.Amount == 0 { - continue - } - weightDiffs[nodeID] = weightDiff - - weightDiffBytes, err := blocks.GenesisCodec.Marshal(blocks.Version, weightDiff) - if err != nil { - return fmt.Errorf("failed to serialize validator weight diff: %w", err) - } - - // Copy so value passed into [Put] doesn't get overwritten next - // iteration - nodeID := nodeID - if err := diffDB.Put(nodeID[:], weightDiffBytes); err != nil { - return err - } - - // TODO: Move the validator set management out of the state package - if weightDiff.Decrease { - err = s.cfg.Validators.RemoveWeight(constants.PrimaryNetworkID, nodeID, weightDiff.Amount) - } else { - err = s.cfg.Validators.AddWeight(constants.PrimaryNetworkID, nodeID, weightDiff.Amount) - } - if err != nil { - return fmt.Errorf("failed to update validator weight: %w", err) - } - } - s.validatorDiffsCache.Put(string(prefixBytes), weightDiffs) - - // Handle default validators - for _, v := range validators.ExpiredDefaultValidators(s.GetNetworkID(), s.GetTimestamp()) { - err := s.cfg.Validators.RemoveWeight(constants.PrimaryNetworkID, v.ID(), v.Weight()) - if err != nil { - return fmt.Errorf("failed to remove default validator weight: %w", err) - } - } - - // TODO: Move validator set management out of the state package - // - // Attempt to update the stake metrics - primaryValidators, ok := s.cfg.Validators.GetValidators(constants.PrimaryNetworkID) - if !ok { - return nil - } - weight, _ := primaryValidators.GetWeight(s.ctx.NodeID) - s.metrics.SetLocalStake(weight) - s.metrics.SetTotalStake(primaryValidators.Weight()) - return nil -} - -func (s *state) writeCurrentSubnetStakers(height uint64) error { - for subnetID, subnetValidatorDiffs := range s.currentStakers.validatorDiffs { - delete(s.currentStakers.validatorDiffs, subnetID) - - if subnetID == constants.PrimaryNetworkID { - // It is assumed that this case is handled separately before calling - // this function. - continue - } - - prefixStruct := heightWithSubnet{ - Height: height, - SubnetID: subnetID, - } - prefixBytes, err := blocks.GenesisCodec.Marshal(blocks.Version, prefixStruct) - if err != nil { - return fmt.Errorf("failed to create prefix bytes: %w", err) - } - rawDiffDB := prefixdb.New(prefixBytes, s.validatorDiffsDB) - diffDB := linkeddb.NewDefault(rawDiffDB) - - weightDiffs := make(map[ids.NodeID]*ValidatorWeightDiff) - for nodeID, validatorDiff := range subnetValidatorDiffs { - weightDiff := &ValidatorWeightDiff{} - if validatorDiff.validatorModified { + s.validatorState.LoadValidatorMetadata(nodeID, subnetID, metadata) + case deleted: staker := validatorDiff.validator - - weightDiff.Decrease = validatorDiff.validatorDeleted weightDiff.Amount = staker.Weight - if validatorDiff.validatorDeleted { - err = s.currentSubnetValidatorList.Delete(staker.TxID[:]) - } else { - err = database.PutUInt64(s.currentSubnetValidatorList, staker.TxID[:], staker.PotentialReward) + // Invariant: Only the Primary Network contains non-nil + // public keys. + if staker.PublicKey != nil { + // Record the public key of the validator being removed. + pkDiffs[nodeID] = staker.PublicKey + + pkBytes := bls.PublicKeyToBytes(staker.PublicKey) + if err := pkDiffDB.Put(nodeID[:], pkBytes); err != nil { + return err + } } - if err != nil { - return fmt.Errorf("failed to update current subnet staker: %w", err) + + if err := validatorDB.Delete(staker.TxID[:]); err != nil { + return fmt.Errorf("failed to delete current staker: %w", err) } + + s.validatorState.DeleteValidatorMetadata(nodeID, subnetID) } err := writeCurrentDelegatorDiff( - s.currentSubnetDelegatorList, + delegatorDB, weightDiff, validatorDiff, ) @@ -1635,6 +1658,7 @@ func (s *state) writeCurrentSubnetStakers(height uint64) error { } if weightDiff.Amount == 0 { + // No weight change to record; go to next validator. continue } weightDiffs[nodeID] = weightDiff @@ -1644,27 +1668,57 @@ func (s *state) writeCurrentSubnetStakers(height uint64) error { return fmt.Errorf("failed to serialize validator weight diff: %w", err) } - // Copy so value passed into [Put] doesn't get overwritten next - // iteration - nodeID := nodeID - if err := diffDB.Put(nodeID[:], weightDiffBytes); err != nil { + if err := weightDiffDB.Put(nodeID[:], weightDiffBytes); err != nil { return err } // TODO: Move the validator set management out of the state package - if s.cfg.WhitelistedSubnets.Contains(subnetID) { - if weightDiff.Decrease { - err = s.cfg.Validators.RemoveWeight(subnetID, nodeID, weightDiff.Amount) + if !updateValidators { + continue + } + + // We only track the current validator set of tracked subnets. + if subnetID != constants.PrimaryNetworkID && !s.cfg.TrackedSubnets.Contains(subnetID) { + continue + } + + if weightDiff.Decrease { + err = validators.RemoveWeight(s.cfg.Validators, subnetID, nodeID, weightDiff.Amount) + } else { + if validatorDiff.validatorStatus == added { + staker := validatorDiff.validator + err = validators.Add( + s.cfg.Validators, + subnetID, + nodeID, + staker.PublicKey, + staker.TxID, + weightDiff.Amount, + ) } else { - err = s.cfg.Validators.AddWeight(subnetID, nodeID, weightDiff.Amount) - } - if err != nil { - return fmt.Errorf("failed to update validator weight: %w", err) + err = validators.AddWeight(s.cfg.Validators, subnetID, nodeID, weightDiff.Amount) } } + if err != nil { + return fmt.Errorf("failed to update validator weight: %w", err) + } } - s.validatorDiffsCache.Put(string(prefixBytes), weightDiffs) + s.validatorWeightDiffsCache.Put(string(prefixBytes), weightDiffs) } + s.validatorPublicKeyDiffsCache.Put(height, pkDiffs) + + // TODO: Move validator set management out of the state package + // + // Attempt to update the stake metrics + if !updateValidators { + return nil + } + primaryValidators, ok := s.cfg.Validators.Get(constants.PrimaryNetworkID) + if !ok { + return nil + } + s.metrics.SetLocalStake(primaryValidators.GetWeight(s.ctx.NodeID)) + s.metrics.SetTotalStake(primaryValidators.Weight()) return nil } @@ -1699,34 +1753,21 @@ func writeCurrentDelegatorDiff( return nil } -func (s *state) writePendingPrimaryNetworkStakers() error { - for _, validatorDiff := range s.pendingStakers.validatorDiffs[constants.PrimaryNetworkID] { - err := writePendingDiff( - s.pendingValidatorList, - s.pendingDelegatorList, - validatorDiff, - ) - if err != nil { - return err - } - } - return nil -} - -func (s *state) writePendingSubnetStakers() error { +func (s *state) writePendingStakers() error { for subnetID, subnetValidatorDiffs := range s.pendingStakers.validatorDiffs { delete(s.pendingStakers.validatorDiffs, subnetID) + validatorDB := s.pendingSubnetValidatorList + delegatorDB := s.pendingSubnetDelegatorList if subnetID == constants.PrimaryNetworkID { - // It is assumed that this case is handled separately before calling - // this function. - continue + validatorDB = s.pendingValidatorList + delegatorDB = s.pendingDelegatorList } for _, validatorDiff := range subnetValidatorDiffs { err := writePendingDiff( - s.pendingSubnetValidatorList, - s.pendingSubnetDelegatorList, + validatorDB, + delegatorDB, validatorDiff, ) if err != nil { @@ -1742,17 +1783,16 @@ func writePendingDiff( pendingDelegatorList linkeddb.LinkedDB, validatorDiff *diffValidator, ) error { - if validatorDiff.validatorModified { - staker := validatorDiff.validator - - var err error - if validatorDiff.validatorDeleted { - err = pendingValidatorList.Delete(staker.TxID[:]) - } else { - err = pendingValidatorList.Put(staker.TxID[:], nil) + switch validatorDiff.validatorStatus { + case added: + err := pendingValidatorList.Put(validatorDiff.validator.TxID[:], nil) + if err != nil { + return fmt.Errorf("failed to add pending validator: %w", err) } + case deleted: + err := pendingValidatorList.Delete(validatorDiff.validator.TxID[:]) if err != nil { - return fmt.Errorf("failed to update pending validator: %w", err) + return fmt.Errorf("failed to delete pending validator: %w", err) } } @@ -1774,30 +1814,6 @@ func writePendingDiff( return nil } -func (s *state) writeUptimes() error { - for nodeID := range s.updatedUptimes { - delete(s.updatedUptimes, nodeID) - - // skip default validator - if validators.IsDefaultValidator(nodeID) { - continue - } - - uptime := s.uptimes[nodeID] - uptime.LastUpdated = uint64(uptime.lastUpdated.Unix()) - - uptimeBytes, err := blocks.GenesisCodec.Marshal(blocks.Version, uptime) - if err != nil { - return fmt.Errorf("failed to serialize uptime: %w", err) - } - - if err := s.currentValidatorList.Put(uptime.txID[:], uptimeBytes); err != nil { - return fmt.Errorf("failed to write uptime: %w", err) - } - } - return nil -} - func (s *state) writeTXs() error { for txID, txStatus := range s.addedTxs { txID := txID @@ -1888,8 +1904,9 @@ func (s *state) writeTransformedSubnets() error { func (s *state) writeSubnetSupplies() error { for subnetID, supply := range s.modifiedSupplies { + supply := supply delete(s.modifiedSupplies, subnetID) - s.supplyCache.Put(subnetID, supply) + s.supplyCache.Put(subnetID, &supply) if err := database.PutUInt64(s.supplyDB, subnetID[:], supply); err != nil { return fmt.Errorf("failed to write subnet supply: %w", err) } diff --git a/avalanchego/vms/platformvm/state/state_test.go b/avalanchego/vms/platformvm/state/state_test.go index 17aa5d42..ef8cdb63 100644 --- a/avalanchego/vms/platformvm/state/state_test.go +++ b/avalanchego/vms/platformvm/state/state_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -17,7 +17,9 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -27,7 +29,6 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/metrics" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -248,7 +249,174 @@ func TestGetValidatorWeightDiffs(t *testing.T) { require.Equal(expectedValidatorWeightDiffs, validatorWeightDiffs) } - state.validatorDiffsCache.Flush() + state.validatorWeightDiffsCache.Flush() + } + } +} + +func TestGetValidatorPublicKeyDiffs(t *testing.T) { + require := require.New(t) + stateIntf, _ := newInitializedState(require) + state := stateIntf.(*state) + + var ( + numNodes = 6 + txIDs = make([]ids.ID, numNodes) + nodeIDs = make([]ids.NodeID, numNodes) + sks = make([]*bls.SecretKey, numNodes) + pks = make([]*bls.PublicKey, numNodes) + pkBytes = make([][]byte, numNodes) + err error + ) + for i := 0; i < numNodes; i++ { + txIDs[i] = ids.GenerateTestID() + nodeIDs[i] = ids.GenerateTestNodeID() + sks[i], err = bls.NewSecretKey() + require.NoError(err) + pks[i] = bls.PublicFromSecretKey(sks[i]) + pkBytes[i] = bls.PublicKeyToBytes(pks[i]) + } + + type stakerDiff struct { + validatorsToAdd []*Staker + validatorsToRemove []*Staker + expectedPublicKeyDiffs map[ids.NodeID]*bls.PublicKey + } + stakerDiffs := []*stakerDiff{ + { + // Add two validators + validatorsToAdd: []*Staker{ + { + TxID: txIDs[0], + NodeID: nodeIDs[0], + Weight: 1, + PublicKey: pks[0], + }, + { + TxID: txIDs[1], + NodeID: nodeIDs[1], + Weight: 10, + PublicKey: pks[1], + }, + }, + expectedPublicKeyDiffs: map[ids.NodeID]*bls.PublicKey{}, + }, + { + // Remove a validator + validatorsToRemove: []*Staker{ + { + TxID: txIDs[0], + NodeID: nodeIDs[0], + Weight: 1, + PublicKey: pks[0], + }, + }, + expectedPublicKeyDiffs: map[ids.NodeID]*bls.PublicKey{ + nodeIDs[0]: pks[0], + }, + }, + { + // Add 2 validators and remove a validator + validatorsToAdd: []*Staker{ + { + TxID: txIDs[2], + NodeID: nodeIDs[2], + Weight: 10, + PublicKey: pks[2], + }, + { + TxID: txIDs[3], + NodeID: nodeIDs[3], + Weight: 10, + PublicKey: pks[3], + }, + }, + validatorsToRemove: []*Staker{ + { + TxID: txIDs[1], + NodeID: nodeIDs[1], + Weight: 10, + PublicKey: pks[1], + }, + }, + expectedPublicKeyDiffs: map[ids.NodeID]*bls.PublicKey{ + nodeIDs[1]: pks[1], + }, + }, + { + // Remove 2 validators and add a validator + validatorsToAdd: []*Staker{ + { + TxID: txIDs[4], + NodeID: nodeIDs[4], + Weight: 10, + PublicKey: pks[4], + }, + }, + validatorsToRemove: []*Staker{ + { + TxID: txIDs[2], + NodeID: nodeIDs[2], + Weight: 10, + PublicKey: pks[2], + }, + { + TxID: txIDs[3], + NodeID: nodeIDs[3], + Weight: 10, + PublicKey: pks[3], + }, + }, + expectedPublicKeyDiffs: map[ids.NodeID]*bls.PublicKey{ + nodeIDs[2]: pks[2], + nodeIDs[3]: pks[3], + }, + }, + { + // Add a validator with no pub key + validatorsToAdd: []*Staker{ + { + TxID: txIDs[5], + NodeID: nodeIDs[5], + Weight: 10, + PublicKey: nil, + }, + }, + expectedPublicKeyDiffs: map[ids.NodeID]*bls.PublicKey{}, + }, + { + // Remove a validator with no pub key + validatorsToRemove: []*Staker{ + { + TxID: txIDs[5], + NodeID: nodeIDs[5], + Weight: 10, + PublicKey: nil, + }, + }, + expectedPublicKeyDiffs: map[ids.NodeID]*bls.PublicKey{}, + }, + } + + for i, stakerDiff := range stakerDiffs { + for _, validator := range stakerDiff.validatorsToAdd { + state.PutCurrentValidator(validator) + } + for _, validator := range stakerDiff.validatorsToRemove { + state.DeleteCurrentValidator(validator) + } + state.SetHeight(uint64(i + 1)) + require.NoError(state.Commit()) + + // Calling write again should not change the state. + state.SetHeight(uint64(i + 1)) + require.NoError(state.Commit()) + + for j, stakerDiff := range stakerDiffs[:i+1] { + pkDiffs, err := state.GetValidatorPublicKeyDiffs(uint64(j + 1)) + require.NoError(err) + require.Equal(stakerDiff.expectedPublicKeyDiffs, pkDiffs) + state.validatorPublicKeyDiffsCache.Flush() } } } @@ -257,7 +425,7 @@ func newInitializedState(require *require.Assertions) (State, database.Database) s, db := newUninitializedState(require) initialValidator := &txs.AddValidatorTx{ - Validator: validator.Validator{ + Validator: txs.Validator{ NodeID: initialNodeID, Start: uint64(initialTime.Unix()), End: uint64(initialValidatorEndTime.Unix()), @@ -275,7 +443,7 @@ func newInitializedState(require *require.Assertions) (State, database.Database) DelegationShares: reward.PercentDenominator, } initialValidatorTx := &txs.Tx{Unsigned: initialValidator} - require.NoError(initialValidatorTx.Sign(txs.Codec, nil)) + require.NoError(initialValidatorTx.Initialize(txs.Codec)) initialChain := &txs.CreateChainTx{ SubnetID: constants.PrimaryNetworkID, @@ -284,7 +452,7 @@ func newInitializedState(require *require.Assertions) (State, database.Database) SubnetAuth: &secp256k1fx.Input{}, } initialChainTx := &txs.Tx{Unsigned: initialChain} - require.NoError(initialChainTx.Sign(txs.Codec, nil)) + require.NoError(initialChainTx.Initialize(txs.Codec)) genesisBlkID := ids.GenerateTestID() genesisState := &genesis.State{ @@ -324,8 +492,8 @@ func newUninitializedState(require *require.Assertions) (State, database.Databas func newStateFromDB(require *require.Assertions, db database.Database) State { vdrs := validators.NewManager() - require.NoError(vdrs.Set(constants.PrimaryNetworkID, validators.NewSet())) - + primaryVdrs := validators.NewSet() + _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) state, err := new( db, metrics.Noop, @@ -340,6 +508,7 @@ func newStateFromDB(require *require.Assertions, db database.Database) State { MintingPeriod: 365 * 24 * time.Hour, SupplyCap: 720 * units.MegaAvax, }), + &utils.Atomic[bool]{}, ) require.NoError(err) require.NotNil(state) @@ -486,3 +655,182 @@ func TestValidatorWeightDiff(t *testing.T) { }) } } + +// Tests PutCurrentValidator, DeleteCurrentValidator, GetCurrentValidator, +// GetValidatorWeightDiffs, GetValidatorPublicKeyDiffs +func TestStateAddRemoveValidator(t *testing.T) { + require := require.New(t) + + state, _ := newInitializedState(require) + + var ( + numNodes = 3 + subnetID = ids.GenerateTestID() + startTime = time.Now() + endTime = startTime.Add(24 * time.Hour) + stakers = make([]Staker, numNodes) + ) + for i := 0; i < numNodes; i++ { + stakers[i] = Staker{ + TxID: ids.GenerateTestID(), + NodeID: ids.GenerateTestNodeID(), + Weight: uint64(i + 1), + StartTime: startTime.Add(time.Duration(i) * time.Second), + EndTime: endTime.Add(time.Duration(i) * time.Second), + PotentialReward: uint64(i + 1), + } + if i%2 == 0 { + stakers[i].SubnetID = subnetID + } else { + sk, err := bls.NewSecretKey() + require.NoError(err) + stakers[i].PublicKey = bls.PublicFromSecretKey(sk) + stakers[i].SubnetID = constants.PrimaryNetworkID + } + } + + type diff struct { + added []Staker + removed []Staker + expectedSubnetWeightDiff map[ids.NodeID]*ValidatorWeightDiff + expectedPrimaryNetworkWeightDiff map[ids.NodeID]*ValidatorWeightDiff + expectedPublicKeyDiff map[ids.NodeID]*bls.PublicKey + } + diffs := []diff{ + { + // Add a subnet validator + added: []Staker{stakers[0]}, + expectedPrimaryNetworkWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{}, + expectedSubnetWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{ + stakers[0].NodeID: { + Decrease: false, + Amount: stakers[0].Weight, + }, + }, + // No diff because this is a subnet validator + expectedPublicKeyDiff: map[ids.NodeID]*bls.PublicKey{}, + }, + { + // Remove a subnet validator + removed: []Staker{stakers[0]}, + expectedPrimaryNetworkWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{}, + expectedSubnetWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{ + stakers[0].NodeID: { + Decrease: true, + Amount: stakers[0].Weight, + }, + }, + // No diff because this is a subnet validator + expectedPublicKeyDiff: map[ids.NodeID]*bls.PublicKey{}, + }, + { // Add a primary network validator + added: []Staker{stakers[1]}, + expectedPrimaryNetworkWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{ + stakers[1].NodeID: { + Decrease: false, + Amount: stakers[1].Weight, + }, + }, + expectedSubnetWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{}, + expectedPublicKeyDiff: map[ids.NodeID]*bls.PublicKey{}, + }, + { // Remove a primary network validator + removed: []Staker{stakers[1]}, + expectedPrimaryNetworkWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{ + stakers[1].NodeID: { + Decrease: true, + Amount: stakers[1].Weight, + }, + }, + expectedSubnetWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{}, + expectedPublicKeyDiff: map[ids.NodeID]*bls.PublicKey{ + stakers[1].NodeID: stakers[1].PublicKey, + }, + }, + { + // Add 2 subnet validators and a primary network validator + added: []Staker{stakers[0], stakers[1], stakers[2]}, + expectedPrimaryNetworkWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{ + stakers[1].NodeID: { + Decrease: false, + Amount: stakers[1].Weight, + }, + }, + expectedSubnetWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{ + stakers[0].NodeID: { + Decrease: false, + Amount: stakers[0].Weight, + }, + stakers[2].NodeID: { + Decrease: false, + Amount: stakers[2].Weight, + }, + }, + expectedPublicKeyDiff: map[ids.NodeID]*bls.PublicKey{}, + }, + { + // Remove 2 subnet validators and a primary network validator. + removed: []Staker{stakers[0], stakers[1], stakers[2]}, + expectedPrimaryNetworkWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{ + stakers[1].NodeID: { + Decrease: true, + Amount: stakers[1].Weight, + }, + }, + expectedSubnetWeightDiff: map[ids.NodeID]*ValidatorWeightDiff{ + stakers[0].NodeID: { + Decrease: true, + Amount: stakers[0].Weight, + }, + stakers[2].NodeID: { + Decrease: true, + Amount: stakers[2].Weight, + }, + }, + expectedPublicKeyDiff: map[ids.NodeID]*bls.PublicKey{ + stakers[1].NodeID: stakers[1].PublicKey, + }, + }, + } + + for i, diff := range diffs { + for _, added := range diff.added { + added := added + state.PutCurrentValidator(&added) + } + for _, removed := range diff.removed { + removed := removed + state.DeleteCurrentValidator(&removed) + } + + newHeight := uint64(i + 1) + state.SetHeight(newHeight) + + require.NoError(state.Commit()) + + for _, added := range diff.added { + gotValidator, err := state.GetCurrentValidator(added.SubnetID, added.NodeID) + require.NoError(err) + require.Equal(added, *gotValidator) + } + + for _, removed := range diff.removed { + _, err := state.GetCurrentValidator(removed.SubnetID, removed.NodeID) + require.ErrorIs(err, database.ErrNotFound) + } + + // Assert that we get the expected weight diffs + gotSubnetWeightDiffs, err := state.GetValidatorWeightDiffs(newHeight, subnetID) + require.NoError(err) + require.Equal(diff.expectedSubnetWeightDiff, gotSubnetWeightDiffs) + + gotWeightDiffs, err := state.GetValidatorWeightDiffs(newHeight, constants.PrimaryNetworkID) + require.NoError(err) + require.Equal(diff.expectedPrimaryNetworkWeightDiff, gotWeightDiffs) + + // Assert that we get the expected public key diff + gotPublicKeyDiffs, err := state.GetValidatorPublicKeyDiffs(newHeight) + require.NoError(err) + require.Equal(diff.expectedPublicKeyDiff, gotPublicKeyDiffs) + } +} diff --git a/avalanchego/vms/platformvm/state/tree_iterator.go b/avalanchego/vms/platformvm/state/tree_iterator.go index 315cd62c..a71b35e2 100644 --- a/avalanchego/vms/platformvm/state/tree_iterator.go +++ b/avalanchego/vms/platformvm/state/tree_iterator.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -11,7 +11,7 @@ import ( const defaultTreeDegree = 2 -var _ StakerIterator = &treeIterator{} +var _ StakerIterator = (*treeIterator)(nil) type treeIterator struct { current *Staker @@ -23,7 +23,7 @@ type treeIterator struct { // NewTreeIterator returns a new iterator of the stakers in [tree] in ascending // order. Note that it isn't safe to modify [tree] while iterating over it. -func NewTreeIterator(tree *btree.BTree) StakerIterator { +func NewTreeIterator(tree *btree.BTreeG[*Staker]) StakerIterator { if tree == nil { return EmptyIterator } @@ -34,9 +34,9 @@ func NewTreeIterator(tree *btree.BTree) StakerIterator { it.wg.Add(1) go func() { defer it.wg.Done() - tree.Ascend(func(i btree.Item) bool { + tree.Ascend(func(i *Staker) bool { select { - case it.next <- i.(*Staker): + case it.next <- i: return true case <-it.release: return false diff --git a/avalanchego/vms/platformvm/state/tree_iterator_test.go b/avalanchego/vms/platformvm/state/tree_iterator_test.go index 16b8b2ee..57fa5727 100644 --- a/avalanchego/vms/platformvm/state/tree_iterator_test.go +++ b/avalanchego/vms/platformvm/state/tree_iterator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -31,7 +31,7 @@ func TestTreeIterator(t *testing.T) { }, } - tree := btree.New(defaultTreeDegree) + tree := btree.NewG(defaultTreeDegree, (*Staker).Less) for _, staker := range stakers { require.Nil(tree.ReplaceOrInsert(staker)) } @@ -46,9 +46,8 @@ func TestTreeIterator(t *testing.T) { } func TestTreeIteratorNil(t *testing.T) { - require := require.New(t) it := NewTreeIterator(nil) - require.False(it.Next()) + require.False(t, it.Next()) it.Release() } @@ -69,7 +68,7 @@ func TestTreeIteratorEarlyRelease(t *testing.T) { }, } - tree := btree.New(defaultTreeDegree) + tree := btree.NewG(defaultTreeDegree, (*Staker).Less) for _, staker := range stakers { require.Nil(tree.ReplaceOrInsert(staker)) } diff --git a/avalanchego/vms/platformvm/state/utxos.go b/avalanchego/vms/platformvm/state/utxos.go deleted file mode 100644 index e9ccff30..00000000 --- a/avalanchego/vms/platformvm/state/utxos.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package state - -import ( - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms/components/avax" -) - -type UTXOGetter interface { - GetUTXO(utxoID ids.ID) (*avax.UTXO, error) -} - -type UTXOAdder interface { - AddUTXO(utxo *avax.UTXO) -} - -type UTXODeleter interface { - DeleteUTXO(utxoID ids.ID) -} diff --git a/avalanchego/vms/platformvm/state/validator_metadata.go b/avalanchego/vms/platformvm/state/validator_metadata.go new file mode 100644 index 00000000..a14b9331 --- /dev/null +++ b/avalanchego/vms/platformvm/state/validator_metadata.go @@ -0,0 +1,265 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "time" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/vms/platformvm/genesis" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" +) + +// preDelegateeRewardSize is the size of codec marshalling +// [preDelegateeRewardMetadata]. +// +// CodecVersionLen + UpDurationLen + LastUpdatedLen + PotentialRewardLen +const preDelegateeRewardSize = wrappers.ShortLen + 3*wrappers.LongLen + +var _ validatorState = (*metadata)(nil) + +type preDelegateeRewardMetadata struct { + UpDuration time.Duration `serialize:"true"` + LastUpdated uint64 `serialize:"true"` // Unix time in seconds + PotentialReward uint64 `serialize:"true"` +} + +type validatorMetadata struct { + UpDuration time.Duration `serialize:"true"` + LastUpdated uint64 `serialize:"true"` // Unix time in seconds + PotentialReward uint64 `serialize:"true"` + PotentialDelegateeReward uint64 `serialize:"true"` + + txID ids.ID + lastUpdated time.Time +} + +// Permissioned validators originally wrote their values as nil. +// With Banff we wrote the potential reward. +// With Cortina we wrote the potential reward with the potential delegatee reward. +// We now write the uptime, reward, and delegatee reward together. +func parseValidatorMetadata(bytes []byte, metadata *validatorMetadata) error { + switch len(bytes) { + case 0: + // nothing was stored + + case database.Uint64Size: + // only potential reward was stored + var err error + metadata.PotentialReward, err = database.ParseUInt64(bytes) + if err != nil { + return err + } + + case preDelegateeRewardSize: + // potential reward and uptime was stored but potential delegatee reward + // was not + tmp := preDelegateeRewardMetadata{} + if _, err := txs.Codec.Unmarshal(bytes, &tmp); err != nil { + return err + } + + metadata.UpDuration = tmp.UpDuration + metadata.LastUpdated = tmp.LastUpdated + metadata.PotentialReward = tmp.PotentialReward + default: + // everything was stored + if _, err := txs.Codec.Unmarshal(bytes, metadata); err != nil { + return err + } + } + metadata.lastUpdated = time.Unix(int64(metadata.LastUpdated), 0) + return nil +} + +type validatorState interface { + // LoadValidatorMetadata sets the [metadata] of [vdrID] on [subnetID]. + // GetUptime and SetUptime will return an error if the [vdrID] and + // [subnetID] hasn't been loaded. This call will not result in a write to + // disk. + LoadValidatorMetadata( + vdrID ids.NodeID, + subnetID ids.ID, + metadata *validatorMetadata, + ) + + // GetUptime returns the current uptime measurements of [vdrID] on + // [subnetID]. + GetUptime( + vdrID ids.NodeID, + subnetID ids.ID, + ) (upDuration time.Duration, lastUpdated time.Time, err error) + + // SetUptime updates the uptime measurements of [vdrID] on [subnetID]. + // Unless these measurements are deleted first, the next call to + // WriteUptimes will write this update to disk. + SetUptime( + vdrID ids.NodeID, + subnetID ids.ID, + upDuration time.Duration, + lastUpdated time.Time, + ) error + + // GetDelegateeReward returns the current rewards accrued to [vdrID] on + // [subnetID]. + GetDelegateeReward( + subnetID ids.ID, + vdrID ids.NodeID, + ) (amount uint64, err error) + + // SetDelegateeReward updates the rewards accrued to [vdrID] on [subnetID]. + // Unless these measurements are deleted first, the next call to + // WriteUptimes will write this update to disk. + SetDelegateeReward( + subnetID ids.ID, + vdrID ids.NodeID, + amount uint64, + ) error + + // DeleteValidatorMetadata removes in-memory references to the metadata of + // [vdrID] on [subnetID]. If there were staged updates from a prior call to + // SetUptime or SetDelegateeReward, the updates will be dropped. This call + // will not result in a write to disk. + DeleteValidatorMetadata(vdrID ids.NodeID, subnetID ids.ID) + + // WriteValidatorMetadata writes all staged updates from prior calls to + // SetUptime or SetDelegateeReward. + WriteValidatorMetadata( + dbPrimary database.KeyValueWriter, + dbSubnet database.KeyValueWriter, + ) error +} + +type metadata struct { + metadata map[ids.NodeID]map[ids.ID]*validatorMetadata // vdrID -> subnetID -> metadata + // updatedMetadata tracks the updates since WriteValidatorMetadata was last called + updatedMetadata map[ids.NodeID]set.Set[ids.ID] // vdrID -> subnetIDs +} + +func newValidatorState() validatorState { + return &metadata{ + metadata: make(map[ids.NodeID]map[ids.ID]*validatorMetadata), + updatedMetadata: make(map[ids.NodeID]set.Set[ids.ID]), + } +} + +func (m *metadata) LoadValidatorMetadata( + vdrID ids.NodeID, + subnetID ids.ID, + uptime *validatorMetadata, +) { + subnetMetadata, ok := m.metadata[vdrID] + if !ok { + subnetMetadata = make(map[ids.ID]*validatorMetadata) + m.metadata[vdrID] = subnetMetadata + } + subnetMetadata[subnetID] = uptime +} + +func (m *metadata) GetUptime( + vdrID ids.NodeID, + subnetID ids.ID, +) (time.Duration, time.Time, error) { + metadata, exists := m.metadata[vdrID][subnetID] + if !exists { + return 0, time.Time{}, database.ErrNotFound + } + return metadata.UpDuration, metadata.lastUpdated, nil +} + +func (m *metadata) SetUptime( + vdrID ids.NodeID, + subnetID ids.ID, + upDuration time.Duration, + lastUpdated time.Time, +) error { + metadata, exists := m.metadata[vdrID][subnetID] + if !exists { + return database.ErrNotFound + } + metadata.UpDuration = upDuration + metadata.lastUpdated = lastUpdated + + m.addUpdatedMetadata(vdrID, subnetID) + return nil +} + +func (m *metadata) GetDelegateeReward( + subnetID ids.ID, + vdrID ids.NodeID, +) (uint64, error) { + metadata, exists := m.metadata[vdrID][subnetID] + if !exists { + return 0, database.ErrNotFound + } + return metadata.PotentialDelegateeReward, nil +} + +func (m *metadata) SetDelegateeReward( + subnetID ids.ID, + vdrID ids.NodeID, + amount uint64, +) error { + metadata, exists := m.metadata[vdrID][subnetID] + if !exists { + return database.ErrNotFound + } + metadata.PotentialDelegateeReward = amount + + m.addUpdatedMetadata(vdrID, subnetID) + return nil +} + +func (m *metadata) DeleteValidatorMetadata(vdrID ids.NodeID, subnetID ids.ID) { + subnetMetadata := m.metadata[vdrID] + delete(subnetMetadata, subnetID) + if len(subnetMetadata) == 0 { + delete(m.metadata, vdrID) + } + + subnetUpdatedMetadata := m.updatedMetadata[vdrID] + subnetUpdatedMetadata.Remove(subnetID) + if subnetUpdatedMetadata.Len() == 0 { + delete(m.updatedMetadata, vdrID) + } +} + +func (m *metadata) WriteValidatorMetadata( + dbPrimary database.KeyValueWriter, + dbSubnet database.KeyValueWriter, +) error { + for vdrID, updatedSubnets := range m.updatedMetadata { + for subnetID := range updatedSubnets { + metadata := m.metadata[vdrID][subnetID] + metadata.LastUpdated = uint64(metadata.lastUpdated.Unix()) + + metadataBytes, err := genesis.Codec.Marshal(txs.Version, metadata) + if err != nil { + return err + } + db := dbSubnet + if subnetID == constants.PrimaryNetworkID { + db = dbPrimary + } + if err := db.Put(metadata.txID[:], metadataBytes); err != nil { + return err + } + } + delete(m.updatedMetadata, vdrID) + } + return nil +} + +func (m *metadata) addUpdatedMetadata(vdrID ids.NodeID, subnetID ids.ID) { + updatedSubnetMetadata, ok := m.updatedMetadata[vdrID] + if !ok { + updatedSubnetMetadata = set.Set[ids.ID]{} + m.updatedMetadata[vdrID] = updatedSubnetMetadata + } + updatedSubnetMetadata.Add(subnetID) +} diff --git a/avalanchego/vms/platformvm/state/validator_metadata_test.go b/avalanchego/vms/platformvm/state/validator_metadata_test.go new file mode 100644 index 00000000..15fc983a --- /dev/null +++ b/avalanchego/vms/platformvm/state/validator_metadata_test.go @@ -0,0 +1,303 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" +) + +func TestValidatorUptimes(t *testing.T) { + require := require.New(t) + state := newValidatorState() + + // get non-existent uptime + nodeID := ids.GenerateTestNodeID() + subnetID := ids.GenerateTestID() + _, _, err := state.GetUptime(nodeID, subnetID) + require.ErrorIs(err, database.ErrNotFound) + + // set non-existent uptime + err = state.SetUptime(nodeID, subnetID, 1, time.Now()) + require.ErrorIs(err, database.ErrNotFound) + + testMetadata := &validatorMetadata{ + UpDuration: time.Hour, + lastUpdated: time.Now(), + } + // load uptime + state.LoadValidatorMetadata(nodeID, subnetID, testMetadata) + + // get uptime + upDuration, lastUpdated, err := state.GetUptime(nodeID, subnetID) + require.NoError(err) + require.Equal(testMetadata.UpDuration, upDuration) + require.Equal(testMetadata.lastUpdated, lastUpdated) + + // set uptime + newUpDuration := testMetadata.UpDuration + 1 + newLastUpdated := testMetadata.lastUpdated.Add(time.Hour) + err = state.SetUptime(nodeID, subnetID, newUpDuration, newLastUpdated) + require.NoError(err) + + // get new uptime + upDuration, lastUpdated, err = state.GetUptime(nodeID, subnetID) + require.NoError(err) + require.Equal(newUpDuration, upDuration) + require.Equal(newLastUpdated, lastUpdated) + + // load uptime changes uptimes + newTestMetadata := &validatorMetadata{ + UpDuration: testMetadata.UpDuration + time.Hour, + lastUpdated: testMetadata.lastUpdated.Add(time.Hour), + } + state.LoadValidatorMetadata(nodeID, subnetID, newTestMetadata) + + // get new uptime + upDuration, lastUpdated, err = state.GetUptime(nodeID, subnetID) + require.NoError(err) + require.Equal(newTestMetadata.UpDuration, upDuration) + require.Equal(newTestMetadata.lastUpdated, lastUpdated) + + // delete uptime + state.DeleteValidatorMetadata(nodeID, subnetID) + + // get deleted uptime + _, _, err = state.GetUptime(nodeID, subnetID) + require.ErrorIs(err, database.ErrNotFound) +} + +func TestWriteValidatorMetadata(t *testing.T) { + require := require.New(t) + state := newValidatorState() + + primaryDB := memdb.New() + subnetDB := memdb.New() + // write empty uptimes + err := state.WriteValidatorMetadata(primaryDB, subnetDB) + require.NoError(err) + + // load uptime + nodeID := ids.GenerateTestNodeID() + subnetID := ids.GenerateTestID() + testUptimeReward := &validatorMetadata{ + UpDuration: time.Hour, + lastUpdated: time.Now(), + PotentialReward: 100, + txID: ids.GenerateTestID(), + } + state.LoadValidatorMetadata(nodeID, subnetID, testUptimeReward) + + // write state, should not reflect to DB yet + err = state.WriteValidatorMetadata(primaryDB, subnetDB) + require.NoError(err) + require.False(primaryDB.Has(testUptimeReward.txID[:])) + require.False(subnetDB.Has(testUptimeReward.txID[:])) + + // get uptime should still return the loaded value + upDuration, lastUpdated, err := state.GetUptime(nodeID, subnetID) + require.NoError(err) + require.Equal(testUptimeReward.UpDuration, upDuration) + require.Equal(testUptimeReward.lastUpdated, lastUpdated) + + // update uptimes + newUpDuration := testUptimeReward.UpDuration + 1 + newLastUpdated := testUptimeReward.lastUpdated.Add(time.Hour) + err = state.SetUptime(nodeID, subnetID, newUpDuration, newLastUpdated) + require.NoError(err) + + // write uptimes, should reflect to subnet DB + err = state.WriteValidatorMetadata(primaryDB, subnetDB) + require.NoError(err) + require.False(primaryDB.Has(testUptimeReward.txID[:])) + require.True(subnetDB.Has(testUptimeReward.txID[:])) +} + +func TestValidatorDelegateeRewards(t *testing.T) { + require := require.New(t) + state := newValidatorState() + + // get non-existent delegatee reward + nodeID := ids.GenerateTestNodeID() + subnetID := ids.GenerateTestID() + _, err := state.GetDelegateeReward(subnetID, nodeID) + require.ErrorIs(err, database.ErrNotFound) + + // set non-existent delegatee reward + err = state.SetDelegateeReward(subnetID, nodeID, 100000) + require.ErrorIs(err, database.ErrNotFound) + + testMetadata := &validatorMetadata{ + PotentialDelegateeReward: 100000, + } + // load delegatee reward + state.LoadValidatorMetadata(nodeID, subnetID, testMetadata) + + // get delegatee reward + delegateeReward, err := state.GetDelegateeReward(subnetID, nodeID) + require.NoError(err) + require.Equal(testMetadata.PotentialDelegateeReward, delegateeReward) + + // set delegatee reward + newDelegateeReward := testMetadata.PotentialDelegateeReward + 100000 + err = state.SetDelegateeReward(subnetID, nodeID, newDelegateeReward) + require.NoError(err) + + // get new delegatee reward + delegateeReward, err = state.GetDelegateeReward(subnetID, nodeID) + require.NoError(err) + require.Equal(newDelegateeReward, delegateeReward) + + // load delegatee reward changes + newTestMetadata := &validatorMetadata{ + PotentialDelegateeReward: testMetadata.PotentialDelegateeReward + 100000, + } + state.LoadValidatorMetadata(nodeID, subnetID, newTestMetadata) + + // get new delegatee reward + delegateeReward, err = state.GetDelegateeReward(subnetID, nodeID) + require.NoError(err) + require.Equal(newTestMetadata.PotentialDelegateeReward, delegateeReward) + + // delete delegatee reward + state.DeleteValidatorMetadata(nodeID, subnetID) + + // get deleted delegatee reward + _, _, err = state.GetUptime(nodeID, subnetID) + require.ErrorIs(err, database.ErrNotFound) +} + +func TestParseValidatorMetadata(t *testing.T) { + type test struct { + name string + bytes []byte + expected *validatorMetadata + shouldErr bool + } + tests := []test{ + { + name: "nil", + bytes: nil, + expected: &validatorMetadata{ + lastUpdated: time.Unix(0, 0), + }, + shouldErr: false, + }, + { + name: "nil", + bytes: []byte{}, + expected: &validatorMetadata{ + lastUpdated: time.Unix(0, 0), + }, + shouldErr: false, + }, + { + name: "potential reward only", + bytes: []byte{ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x86, 0xA0, + }, + expected: &validatorMetadata{ + PotentialReward: 100000, + lastUpdated: time.Unix(0, 0), + }, + shouldErr: false, + }, + { + name: "uptime + potential reward", + bytes: []byte{ + // codec version + 0x00, 0x00, + // up duration + 0x00, 0x00, 0x00, 0x00, 0x00, 0x5B, 0x8D, 0x80, + // last updated + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0D, 0xBB, 0xA0, + // potential reward + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x86, 0xA0, + }, + expected: &validatorMetadata{ + UpDuration: time.Duration(6000000), + LastUpdated: 900000, + PotentialReward: 100000, + lastUpdated: time.Unix(900000, 0), + }, + shouldErr: false, + }, + { + name: "uptime + potential reward + potential delegatee reward", + bytes: []byte{ + // codec version + 0x00, 0x00, + // up duration + 0x00, 0x00, 0x00, 0x00, 0x00, 0x5B, 0x8D, 0x80, + // last updated + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0D, 0xBB, 0xA0, + // potential reward + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x86, 0xA0, + // potential delegatee reward + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4E, 0x20, + }, + expected: &validatorMetadata{ + UpDuration: time.Duration(6000000), + LastUpdated: 900000, + PotentialReward: 100000, + PotentialDelegateeReward: 20000, + lastUpdated: time.Unix(900000, 0), + }, + shouldErr: false, + }, + { + name: "invalid codec version", + bytes: []byte{ + // codec version + 0x00, 0x01, + // up duration + 0x00, 0x00, 0x00, 0x00, 0x00, 0x5B, 0x8D, 0x80, + // last updated + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0D, 0xBB, 0xA0, + // potential reward + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x86, 0xA0, + // potential delegatee reward + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4E, 0x20, + }, + expected: nil, + shouldErr: true, + }, + { + name: "short byte len", + bytes: []byte{ + // codec version + 0x00, 0x00, + // up duration + 0x00, 0x00, 0x00, 0x00, 0x00, 0x5B, 0x8D, 0x80, + // last updated + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0D, 0xBB, 0xA0, + // potential reward + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x86, 0xA0, + // potential delegatee reward + 0x00, 0x00, 0x00, 0x00, 0x4E, 0x20, + }, + expected: nil, + shouldErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + var metadata validatorMetadata + err := parseValidatorMetadata(tt.bytes, &metadata) + if tt.shouldErr { + require.Error(err) + } else { + require.NoError(err) + require.Equal(tt.expected, &metadata) + } + }) + } +} diff --git a/avalanchego/vms/platformvm/state/validator_uptimes.go b/avalanchego/vms/platformvm/state/validator_uptimes.go new file mode 100644 index 00000000..30be7243 --- /dev/null +++ b/avalanchego/vms/platformvm/state/validator_uptimes.go @@ -0,0 +1,165 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package state + +import ( + "time" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/vms/platformvm/genesis" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" +) + +var _ validatorUptimes = (*uptimes)(nil) + +type uptimeAndReward struct { + UpDuration time.Duration `serialize:"true"` + LastUpdated uint64 `serialize:"true"` // Unix time in seconds + PotentialReward uint64 `serialize:"true"` + + txID ids.ID + lastUpdated time.Time +} + +type validatorUptimes interface { + // LoadUptime sets the uptime measurements of [vdrID] on [subnetID] to + // [uptime]. GetUptime and SetUptime will return an error if the [vdrID] and + // [subnetID] hasn't been loaded. This call will not result in a write to disk. + LoadUptime( + vdrID ids.NodeID, + subnetID ids.ID, + uptime *uptimeAndReward, + ) + + // GetUptime returns the current uptime measurements of [vdrID] on + // [subnetID]. + GetUptime( + vdrID ids.NodeID, + subnetID ids.ID, + ) (upDuration time.Duration, lastUpdated time.Time, err error) + + // SetUptime updates the uptime measurements of [vdrID] on [subnetID]. + // Unless these measurements are deleted first, the next call to + // WriteUptimes will write this update to disk. + SetUptime( + vdrID ids.NodeID, + subnetID ids.ID, + upDuration time.Duration, + lastUpdated time.Time, + ) error + + // DeleteUptime removes in-memory references to the uptimes measurements of + // [vdrID] on [subnetID]. If there were staged updates from a prior call to + // SetUptime, the updates will be dropped. This call will not result in a + // write to disk. + DeleteUptime(vdrID ids.NodeID, subnetID ids.ID) + + // WriteUptimes writes all staged updates from a prior call to SetUptime. + WriteUptimes( + dbPrimary database.KeyValueWriter, + dbSubnet database.KeyValueWriter, + ) error +} + +type uptimes struct { + uptimes map[ids.NodeID]map[ids.ID]*uptimeAndReward // vdrID -> subnetID -> uptimes + // updatedUptimes tracks the updates since the last call to WriteUptimes + updatedUptimes map[ids.NodeID]set.Set[ids.ID] // vdrID -> subnetIDs +} + +func newValidatorUptimes() validatorUptimes { + return &uptimes{ + uptimes: make(map[ids.NodeID]map[ids.ID]*uptimeAndReward), + updatedUptimes: make(map[ids.NodeID]set.Set[ids.ID]), + } +} + +func (u *uptimes) LoadUptime( + vdrID ids.NodeID, + subnetID ids.ID, + uptime *uptimeAndReward, +) { + subnetUptimes, ok := u.uptimes[vdrID] + if !ok { + subnetUptimes = make(map[ids.ID]*uptimeAndReward) + u.uptimes[vdrID] = subnetUptimes + } + subnetUptimes[subnetID] = uptime +} + +func (u *uptimes) GetUptime( + vdrID ids.NodeID, + subnetID ids.ID, +) (upDuration time.Duration, lastUpdated time.Time, err error) { + uptime, exists := u.uptimes[vdrID][subnetID] + if !exists { + return 0, time.Time{}, database.ErrNotFound + } + return uptime.UpDuration, uptime.lastUpdated, nil +} + +func (u *uptimes) SetUptime( + vdrID ids.NodeID, + subnetID ids.ID, + upDuration time.Duration, + lastUpdated time.Time, +) error { + uptime, exists := u.uptimes[vdrID][subnetID] + if !exists { + return database.ErrNotFound + } + uptime.UpDuration = upDuration + uptime.lastUpdated = lastUpdated + + updatedSubnetUptimes, ok := u.updatedUptimes[vdrID] + if !ok { + updatedSubnetUptimes = set.Set[ids.ID]{} + u.updatedUptimes[vdrID] = updatedSubnetUptimes + } + updatedSubnetUptimes.Add(subnetID) + return nil +} + +func (u *uptimes) DeleteUptime(vdrID ids.NodeID, subnetID ids.ID) { + subnetUptimes := u.uptimes[vdrID] + delete(subnetUptimes, subnetID) + if len(subnetUptimes) == 0 { + delete(u.uptimes, vdrID) + } + + subnetUpdatedUptimes := u.updatedUptimes[vdrID] + delete(subnetUpdatedUptimes, subnetID) + if len(subnetUpdatedUptimes) == 0 { + delete(u.updatedUptimes, vdrID) + } +} + +func (u *uptimes) WriteUptimes( + dbPrimary database.KeyValueWriter, + dbSubnet database.KeyValueWriter, +) error { + for vdrID, updatedSubnets := range u.updatedUptimes { + for subnetID := range updatedSubnets { + uptime := u.uptimes[vdrID][subnetID] + uptime.LastUpdated = uint64(uptime.lastUpdated.Unix()) + + uptimeBytes, err := genesis.Codec.Marshal(txs.Version, uptime) + if err != nil { + return err + } + db := dbSubnet + if subnetID == constants.PrimaryNetworkID { + db = dbPrimary + } + if err := db.Put(uptime.txID[:], uptimeBytes); err != nil { + return err + } + } + delete(u.updatedUptimes, vdrID) + } + return nil +} diff --git a/avalanchego/vms/platformvm/state/versions.go b/avalanchego/vms/platformvm/state/versions.go index 3668da30..dc2c3527 100644 --- a/avalanchego/vms/platformvm/state/versions.go +++ b/avalanchego/vms/platformvm/state/versions.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -8,5 +8,7 @@ import ( ) type Versions interface { + // GetState returns the state of the chain after [blkID] has been accepted. + // If the state is not known, `false` will be returned. GetState(blkID ids.ID) (Chain, bool) } diff --git a/avalanchego/vms/platformvm/status/blockchain_status.go b/avalanchego/vms/platformvm/status/blockchain_status.go index 6f878ab4..7866e0eb 100644 --- a/avalanchego/vms/platformvm/status/blockchain_status.go +++ b/avalanchego/vms/platformvm/status/blockchain_status.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package status diff --git a/avalanchego/vms/platformvm/status/blockchain_status_test.go b/avalanchego/vms/platformvm/status/blockchain_status_test.go index ce76b266..e4828e68 100644 --- a/avalanchego/vms/platformvm/status/blockchain_status_test.go +++ b/avalanchego/vms/platformvm/status/blockchain_status_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package status diff --git a/avalanchego/vms/platformvm/status/status.go b/avalanchego/vms/platformvm/status/status.go index 8af00df4..a67fb6c3 100644 --- a/avalanchego/vms/platformvm/status/status.go +++ b/avalanchego/vms/platformvm/status/status.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package status diff --git a/avalanchego/vms/platformvm/status/status_test.go b/avalanchego/vms/platformvm/status/status_test.go index 0b9e4066..a97552c1 100644 --- a/avalanchego/vms/platformvm/status/status_test.go +++ b/avalanchego/vms/platformvm/status/status_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package status diff --git a/avalanchego/vms/platformvm/txs/add_delegator_test.go b/avalanchego/vms/platformvm/txs/add_delegator_test.go index fdf5f2f9..45e4872b 100644 --- a/avalanchego/vms/platformvm/txs/add_delegator_test.go +++ b/avalanchego/vms/platformvm/txs/add_delegator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -11,22 +11,21 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -var preFundedKeys = crypto.BuildTestKeys() +var preFundedKeys = secp256k1.TestKeys() func TestAddDelegatorTxSyntacticVerify(t *testing.T) { require := require.New(t) clk := mockable.Clock{} ctx := snow.DefaultContextTest() ctx.AVAXAssetID = ids.GenerateTestID() - signers := [][]*crypto.PrivateKeySECP256K1R{preFundedKeys} + signers := [][]*secp256k1.PrivateKey{preFundedKeys} var ( stx *Tx @@ -83,7 +82,7 @@ func TestAddDelegatorTxSyntacticVerify(t *testing.T) { Ins: inputs, Memo: []byte{1, 2, 3, 4, 5, 6, 7, 8}, }}, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ctx.NodeID, Start: uint64(clk.Time().Unix()), End: uint64(clk.Time().Add(time.Hour).Unix()), @@ -117,11 +116,11 @@ func TestAddDelegatorTxSyntacticVerify(t *testing.T) { // Case: delegator weight is not equal to total stake weight addDelegatorTx.SyntacticallyVerified = false - addDelegatorTx.Validator.Wght = 2 * validatorWeight + addDelegatorTx.Wght = 2 * validatorWeight stx, err = NewSigned(addDelegatorTx, Codec, signers) require.NoError(err) require.ErrorIs(stx.SyntacticVerify(ctx), errDelegatorWeightMismatch) - addDelegatorTx.Validator.Wght = validatorWeight + addDelegatorTx.Wght = validatorWeight } func TestAddDelegatorTxSyntacticVerifyNotAVAX(t *testing.T) { @@ -129,7 +128,7 @@ func TestAddDelegatorTxSyntacticVerifyNotAVAX(t *testing.T) { clk := mockable.Clock{} ctx := snow.DefaultContextTest() ctx.AVAXAssetID = ids.GenerateTestID() - signers := [][]*crypto.PrivateKeySECP256K1R{preFundedKeys} + signers := [][]*secp256k1.PrivateKey{preFundedKeys} var ( stx *Tx @@ -181,7 +180,7 @@ func TestAddDelegatorTxSyntacticVerifyNotAVAX(t *testing.T) { Ins: inputs, Memo: []byte{1, 2, 3, 4, 5, 6, 7, 8}, }}, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ctx.NodeID, Start: uint64(clk.Time().Unix()), End: uint64(clk.Time().Add(time.Hour).Unix()), diff --git a/avalanchego/vms/platformvm/txs/add_delegator_tx.go b/avalanchego/vms/platformvm/txs/add_delegator_tx.go index ae206a8f..a44ddebb 100644 --- a/avalanchego/vms/platformvm/txs/add_delegator_tx.go +++ b/avalanchego/vms/platformvm/txs/add_delegator_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -6,21 +6,20 @@ package txs import ( "errors" "fmt" - "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm/fx" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) var ( - _ DelegatorTx = &AddDelegatorTx{} + _ DelegatorTx = (*AddDelegatorTx)(nil) errDelegatorWeightMismatch = errors.New("delegator weight is not equal to total stake weight") ) @@ -30,7 +29,7 @@ type AddDelegatorTx struct { // Metadata, inputs and outputs BaseTx `serialize:"true"` // Describes the delegatee - Validator validator.Validator `serialize:"true" json:"validator"` + Validator `serialize:"true" json:"validator"` // Where to send staked tokens when done validating StakeOuts []*avax.TransferableOutput `serialize:"true" json:"stake"` // Where to send staking rewards when done validating @@ -49,17 +48,33 @@ func (tx *AddDelegatorTx) InitCtx(ctx *snow.Context) { tx.DelegationRewardsOwner.InitCtx(ctx) } -func (tx *AddDelegatorTx) SubnetID() ids.ID { return constants.PrimaryNetworkID } -func (tx *AddDelegatorTx) NodeID() ids.NodeID { return tx.Validator.NodeID } -func (tx *AddDelegatorTx) StartTime() time.Time { return tx.Validator.StartTime() } -func (tx *AddDelegatorTx) EndTime() time.Time { return tx.Validator.EndTime() } -func (tx *AddDelegatorTx) Weight() uint64 { return tx.Validator.Wght } -func (tx *AddDelegatorTx) PendingPriority() Priority { +func (*AddDelegatorTx) SubnetID() ids.ID { + return constants.PrimaryNetworkID +} + +func (tx *AddDelegatorTx) NodeID() ids.NodeID { + return tx.Validator.NodeID +} + +func (*AddDelegatorTx) PublicKey() (*bls.PublicKey, bool, error) { + return nil, false, nil +} + +func (*AddDelegatorTx) PendingPriority() Priority { return PrimaryNetworkDelegatorApricotPendingPriority } -func (tx *AddDelegatorTx) CurrentPriority() Priority { return PrimaryNetworkDelegatorCurrentPriority } -func (tx *AddDelegatorTx) Stake() []*avax.TransferableOutput { return tx.StakeOuts } -func (tx *AddDelegatorTx) RewardsOwner() fx.Owner { return tx.DelegationRewardsOwner } + +func (*AddDelegatorTx) CurrentPriority() Priority { + return PrimaryNetworkDelegatorCurrentPriority +} + +func (tx *AddDelegatorTx) Stake() []*avax.TransferableOutput { + return tx.StakeOuts +} + +func (tx *AddDelegatorTx) RewardsOwner() fx.Owner { + return tx.DelegationRewardsOwner +} // SyntacticVerify returns nil iff [tx] is valid func (tx *AddDelegatorTx) SyntacticVerify(ctx *snow.Context) error { @@ -97,10 +112,10 @@ func (tx *AddDelegatorTx) SyntacticVerify(ctx *snow.Context) error { switch { case !avax.IsSortedTransferableOutputs(tx.StakeOuts, Codec): return errOutputsNotSorted - case totalStakeWeight != tx.Validator.Wght: + case totalStakeWeight != tx.Wght: return fmt.Errorf("%w, delegator weight %d total stake weight %d", errDelegatorWeightMismatch, - tx.Validator.Wght, + tx.Wght, totalStakeWeight, ) } diff --git a/avalanchego/vms/platformvm/txs/add_permissionless_delegator_tx.go b/avalanchego/vms/platformvm/txs/add_permissionless_delegator_tx.go index eadd23ee..43db685d 100644 --- a/avalanchego/vms/platformvm/txs/add_permissionless_delegator_tx.go +++ b/avalanchego/vms/platformvm/txs/add_permissionless_delegator_tx.go @@ -1,31 +1,30 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( "fmt" - "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm/fx" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -var _ DelegatorTx = &AddPermissionlessDelegatorTx{} +var _ DelegatorTx = (*AddPermissionlessDelegatorTx)(nil) // AddPermissionlessDelegatorTx is an unsigned addPermissionlessDelegatorTx type AddPermissionlessDelegatorTx struct { // Metadata, inputs and outputs BaseTx `serialize:"true"` // Describes the validator - Validator validator.Validator `serialize:"true" json:"validator"` + Validator `serialize:"true" json:"validator"` // ID of the subnet this validator is validating Subnet ids.ID `serialize:"true" json:"subnetID"` // Where to send staked tokens when done validating @@ -46,11 +45,17 @@ func (tx *AddPermissionlessDelegatorTx) InitCtx(ctx *snow.Context) { tx.DelegationRewardsOwner.InitCtx(ctx) } -func (tx *AddPermissionlessDelegatorTx) SubnetID() ids.ID { return tx.Subnet } -func (tx *AddPermissionlessDelegatorTx) NodeID() ids.NodeID { return tx.Validator.NodeID } -func (tx *AddPermissionlessDelegatorTx) StartTime() time.Time { return tx.Validator.StartTime() } -func (tx *AddPermissionlessDelegatorTx) EndTime() time.Time { return tx.Validator.EndTime() } -func (tx *AddPermissionlessDelegatorTx) Weight() uint64 { return tx.Validator.Wght } +func (tx *AddPermissionlessDelegatorTx) SubnetID() ids.ID { + return tx.Subnet +} + +func (tx *AddPermissionlessDelegatorTx) NodeID() ids.NodeID { + return tx.Validator.NodeID +} + +func (*AddPermissionlessDelegatorTx) PublicKey() (*bls.PublicKey, bool, error) { + return nil, false, nil +} func (tx *AddPermissionlessDelegatorTx) PendingPriority() Priority { if tx.Subnet == constants.PrimaryNetworkID { @@ -117,10 +122,10 @@ func (tx *AddPermissionlessDelegatorTx) SyntacticVerify(ctx *snow.Context) error switch { case !avax.IsSortedTransferableOutputs(tx.StakeOuts, Codec): return errOutputsNotSorted - case totalStakeWeight != tx.Validator.Wght: + case totalStakeWeight != tx.Wght: return fmt.Errorf("%w, delegator weight %d total stake weight %d", errDelegatorWeightMismatch, - tx.Validator.Wght, + tx.Wght, totalStakeWeight, ) } diff --git a/avalanchego/vms/platformvm/txs/add_permissionless_delegator_tx_test.go b/avalanchego/vms/platformvm/txs/add_permissionless_delegator_tx_test.go index e921b585..2502cdea 100644 --- a/avalanchego/vms/platformvm/txs/add_permissionless_delegator_tx_test.go +++ b/avalanchego/vms/platformvm/txs/add_permissionless_delegator_tx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -17,10 +17,11 @@ import ( "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/fx" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) +var errCustom = errors.New("custom error") + func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { type test struct { name string @@ -54,8 +55,6 @@ func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { // A BaseTx that fails syntactic verification. invalidBaseTx := BaseTx{} - errCustom := errors.New("custom error") - tests := []test{ { name: "nil tx", @@ -90,7 +89,7 @@ func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { rewardsOwner.EXPECT().Verify().Return(errCustom) return &AddPermissionlessDelegatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ Wght: 1, }, Subnet: ids.GenerateTestID(), @@ -119,7 +118,7 @@ func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { stakeOut.EXPECT().Verify().Return(errCustom) return &AddPermissionlessDelegatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ Wght: 1, }, Subnet: ids.GenerateTestID(), @@ -143,7 +142,7 @@ func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { rewardsOwner.EXPECT().Verify().Return(nil).AnyTimes() return &AddPermissionlessDelegatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ Wght: 1, }, Subnet: ids.GenerateTestID(), @@ -178,7 +177,7 @@ func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { assetID := ids.GenerateTestID() return &AddPermissionlessDelegatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ Wght: 1, }, Subnet: ids.GenerateTestID(), @@ -213,7 +212,7 @@ func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { assetID := ids.GenerateTestID() return &AddPermissionlessDelegatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ Wght: 1, }, Subnet: ids.GenerateTestID(), @@ -248,7 +247,7 @@ func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { assetID := ids.GenerateTestID() return &AddPermissionlessDelegatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ Wght: 2, }, Subnet: ids.GenerateTestID(), @@ -283,7 +282,7 @@ func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { assetID := ids.GenerateTestID() return &AddPermissionlessDelegatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ Wght: 2, }, Subnet: constants.PrimaryNetworkID, @@ -314,21 +313,19 @@ func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require := require.New(t) ctrl := gomock.NewController(t) defer ctrl.Finish() tx := tt.txFunc(ctrl) err := tx.SyntacticVerify(ctx) - require.ErrorIs(err, tt.err) + require.ErrorIs(t, err, tt.err) }) } t.Run("invalid BaseTx", func(t *testing.T) { - require := require.New(t) tx := &AddPermissionlessDelegatorTx{ BaseTx: invalidBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), }, StakeOuts: []*avax.TransferableOutput{ @@ -343,11 +340,10 @@ func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { }, } err := tx.SyntacticVerify(ctx) - require.Error(err) + require.Error(t, err) }) t.Run("stake overflow", func(t *testing.T) { - require := require.New(t) ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -356,7 +352,7 @@ func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { assetID := ids.GenerateTestID() tx := &AddPermissionlessDelegatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), Wght: 1, }, @@ -382,7 +378,7 @@ func TestAddPermissionlessDelegatorTxSyntacticVerify(t *testing.T) { DelegationRewardsOwner: rewardsOwner, } err := tx.SyntacticVerify(ctx) - require.Error(err) + require.Error(t, err) }) } diff --git a/avalanchego/vms/platformvm/txs/add_permissionless_validator_tx.go b/avalanchego/vms/platformvm/txs/add_permissionless_validator_tx.go index 33fb6396..8f313ae0 100644 --- a/avalanchego/vms/platformvm/txs/add_permissionless_validator_tx.go +++ b/avalanchego/vms/platformvm/txs/add_permissionless_validator_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -6,23 +6,22 @@ package txs import ( "errors" "fmt" - "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/signer" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) var ( - _ ValidatorTx = &AddPermissionlessValidatorTx{} + _ ValidatorTx = (*AddPermissionlessValidatorTx)(nil) errEmptyNodeID = errors.New("validator nodeID cannot be empty") errNoStake = errors.New("no stake") @@ -36,7 +35,7 @@ type AddPermissionlessValidatorTx struct { // Metadata, inputs and outputs BaseTx `serialize:"true"` // Describes the validator - Validator validator.Validator `serialize:"true" json:"validator"` + Validator `serialize:"true" json:"validator"` // ID of the subnet this validator is validating Subnet ids.ID `serialize:"true" json:"subnetID"` // If the [Subnet] is the primary network, [Signer] is the BLS key for this @@ -71,11 +70,21 @@ func (tx *AddPermissionlessValidatorTx) InitCtx(ctx *snow.Context) { tx.DelegatorRewardsOwner.InitCtx(ctx) } -func (tx *AddPermissionlessValidatorTx) SubnetID() ids.ID { return tx.Subnet } -func (tx *AddPermissionlessValidatorTx) NodeID() ids.NodeID { return tx.Validator.NodeID } -func (tx *AddPermissionlessValidatorTx) StartTime() time.Time { return tx.Validator.StartTime() } -func (tx *AddPermissionlessValidatorTx) EndTime() time.Time { return tx.Validator.EndTime() } -func (tx *AddPermissionlessValidatorTx) Weight() uint64 { return tx.Validator.Wght } +func (tx *AddPermissionlessValidatorTx) SubnetID() ids.ID { + return tx.Subnet +} + +func (tx *AddPermissionlessValidatorTx) NodeID() ids.NodeID { + return tx.Validator.NodeID +} + +func (tx *AddPermissionlessValidatorTx) PublicKey() (*bls.PublicKey, bool, error) { + if err := tx.Signer.Verify(); err != nil { + return nil, false, err + } + key := tx.Signer.Key() + return key, key != nil, nil +} func (tx *AddPermissionlessValidatorTx) PendingPriority() Priority { if tx.Subnet == constants.PrimaryNetworkID { @@ -165,8 +174,8 @@ func (tx *AddPermissionlessValidatorTx) SyntacticVerify(ctx *snow.Context) error switch { case !avax.IsSortedTransferableOutputs(tx.StakeOuts, Codec): return errOutputsNotSorted - case totalStakeWeight != tx.Validator.Wght: - return fmt.Errorf("%w: weight %d != stake %d", errValidatorWeightMismatch, tx.Validator.Wght, totalStakeWeight) + case totalStakeWeight != tx.Wght: + return fmt.Errorf("%w: weight %d != stake %d", errValidatorWeightMismatch, tx.Wght, totalStakeWeight) } // cache that this is valid diff --git a/avalanchego/vms/platformvm/txs/add_permissionless_validator_tx_test.go b/avalanchego/vms/platformvm/txs/add_permissionless_validator_tx_test.go index 41071f74..704a3d4f 100644 --- a/avalanchego/vms/platformvm/txs/add_permissionless_validator_tx_test.go +++ b/avalanchego/vms/platformvm/txs/add_permissionless_validator_tx_test.go @@ -1,10 +1,9 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( - "errors" "math" "testing" @@ -20,7 +19,6 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/signer" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -62,8 +60,6 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { // A BaseTx that fails syntactic verification. invalidBaseTx := BaseTx{} - errCustom := errors.New("custom error") - tests := []test{ { name: "nil tx", @@ -86,7 +82,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { txFunc: func(*gomock.Controller) *AddPermissionlessValidatorTx { return &AddPermissionlessValidatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.EmptyNodeID, }, } @@ -98,7 +94,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { txFunc: func(*gomock.Controller) *AddPermissionlessValidatorTx { return &AddPermissionlessValidatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), }, StakeOuts: nil, @@ -111,7 +107,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { txFunc: func(*gomock.Controller) *AddPermissionlessValidatorTx { return &AddPermissionlessValidatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), }, StakeOuts: []*avax.TransferableOutput{ @@ -136,7 +132,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { rewardsOwner.EXPECT().Verify().Return(errCustom) return &AddPermissionlessValidatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), Wght: 1, }, @@ -166,7 +162,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { rewardsOwner.EXPECT().Verify().Return(nil).AnyTimes() return &AddPermissionlessValidatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), Wght: 1, }, @@ -199,7 +195,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { stakeOut.EXPECT().Verify().Return(errCustom) return &AddPermissionlessValidatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), Wght: 1, }, @@ -227,7 +223,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { rewardsOwner.EXPECT().Verify().Return(nil).AnyTimes() return &AddPermissionlessValidatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), Wght: 1, }, @@ -266,7 +262,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { assetID := ids.GenerateTestID() return &AddPermissionlessValidatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), Wght: 1, }, @@ -305,7 +301,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { assetID := ids.GenerateTestID() return &AddPermissionlessValidatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), Wght: 1, }, @@ -344,7 +340,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { assetID := ids.GenerateTestID() return &AddPermissionlessValidatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), Wght: 2, }, @@ -383,7 +379,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { assetID := ids.GenerateTestID() return &AddPermissionlessValidatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), Wght: 2, }, @@ -418,21 +414,19 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require := require.New(t) ctrl := gomock.NewController(t) defer ctrl.Finish() tx := tt.txFunc(ctrl) err := tx.SyntacticVerify(ctx) - require.ErrorIs(err, tt.err) + require.ErrorIs(t, err, tt.err) }) } t.Run("invalid BaseTx", func(t *testing.T) { - require := require.New(t) tx := &AddPermissionlessValidatorTx{ BaseTx: invalidBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), }, StakeOuts: []*avax.TransferableOutput{ @@ -448,11 +442,10 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { DelegationShares: reward.PercentDenominator, } err := tx.SyntacticVerify(ctx) - require.Error(err) + require.Error(t, err) }) t.Run("stake overflow", func(t *testing.T) { - require := require.New(t) ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -461,7 +454,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { assetID := ids.GenerateTestID() tx := &AddPermissionlessValidatorTx{ BaseTx: validBaseTx, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ids.GenerateTestNodeID(), Wght: 1, }, @@ -490,7 +483,7 @@ func TestAddPermissionlessValidatorTxSyntacticVerify(t *testing.T) { DelegationShares: reward.PercentDenominator, } err := tx.SyntacticVerify(ctx) - require.Error(err) + require.Error(t, err) }) } diff --git a/avalanchego/vms/platformvm/txs/add_subnet_validator_test.go b/avalanchego/vms/platformvm/txs/add_subnet_validator_test.go index 9ed500e0..c25fd8df 100644 --- a/avalanchego/vms/platformvm/txs/add_subnet_validator_test.go +++ b/avalanchego/vms/platformvm/txs/add_subnet_validator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -12,10 +12,9 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -24,7 +23,7 @@ func TestAddSubnetValidatorTxSyntacticVerify(t *testing.T) { require := require.New(t) clk := mockable.Clock{} ctx := snow.DefaultContextTest() - signers := [][]*crypto.PrivateKeySECP256K1R{preFundedKeys} + signers := [][]*secp256k1.PrivateKey{preFundedKeys} var ( stx *Tx @@ -72,8 +71,8 @@ func TestAddSubnetValidatorTxSyntacticVerify(t *testing.T) { Outs: outputs, Memo: []byte{1, 2, 3, 4, 5, 6, 7, 8}, }}, - Validator: validator.SubnetValidator{ - Validator: validator.Validator{ + SubnetValidator: SubnetValidator{ + Validator: Validator{ NodeID: ctx.NodeID, Start: uint64(clk.Time().Unix()), End: uint64(clk.Time().Add(time.Hour).Unix()), @@ -100,21 +99,21 @@ func TestAddSubnetValidatorTxSyntacticVerify(t *testing.T) { // Case: Missing Subnet ID addSubnetValidatorTx.SyntacticallyVerified = false - addSubnetValidatorTx.Validator.Subnet = ids.Empty + addSubnetValidatorTx.Subnet = ids.Empty stx, err = NewSigned(addSubnetValidatorTx, Codec, signers) require.NoError(err) err = stx.SyntacticVerify(ctx) require.Error(err) - addSubnetValidatorTx.Validator.Subnet = subnetID + addSubnetValidatorTx.Subnet = subnetID // Case: No weight addSubnetValidatorTx.SyntacticallyVerified = false - addSubnetValidatorTx.Validator.Wght = 0 + addSubnetValidatorTx.Wght = 0 stx, err = NewSigned(addSubnetValidatorTx, Codec, signers) require.NoError(err) err = stx.SyntacticVerify(ctx) require.Error(err) - addSubnetValidatorTx.Validator.Wght = validatorWeight + addSubnetValidatorTx.Wght = validatorWeight // Case: Subnet auth indices not unique addSubnetValidatorTx.SyntacticallyVerified = false @@ -129,7 +128,7 @@ func TestAddSubnetValidatorTxSyntacticVerify(t *testing.T) { // Case: adding to Primary Network addSubnetValidatorTx.SyntacticallyVerified = false - addSubnetValidatorTx.Validator.Subnet = constants.PrimaryNetworkID + addSubnetValidatorTx.Subnet = constants.PrimaryNetworkID stx, err = NewSigned(addSubnetValidatorTx, Codec, signers) require.NoError(err) err = stx.SyntacticVerify(ctx) @@ -140,7 +139,7 @@ func TestAddSubnetValidatorMarshal(t *testing.T) { require := require.New(t) clk := mockable.Clock{} ctx := snow.DefaultContextTest() - signers := [][]*crypto.PrivateKeySECP256K1R{preFundedKeys} + signers := [][]*secp256k1.PrivateKey{preFundedKeys} var ( stx *Tx @@ -183,8 +182,8 @@ func TestAddSubnetValidatorMarshal(t *testing.T) { Outs: outputs, Memo: []byte{1, 2, 3, 4, 5, 6, 7, 8}, }}, - Validator: validator.SubnetValidator{ - Validator: validator.Validator{ + SubnetValidator: SubnetValidator{ + Validator: Validator{ NodeID: ctx.NodeID, Start: uint64(clk.Time().Unix()), End: uint64(clk.Time().Add(time.Hour).Unix()), diff --git a/avalanchego/vms/platformvm/txs/add_subnet_validator_tx.go b/avalanchego/vms/platformvm/txs/add_subnet_validator_tx.go index 3cc50d40..0ac3474e 100644 --- a/avalanchego/vms/platformvm/txs/add_subnet_validator_tx.go +++ b/avalanchego/vms/platformvm/txs/add_subnet_validator_tx.go @@ -1,21 +1,20 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( "errors" - "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/vms/components/verify" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" ) var ( - _ StakerTx = &AddSubnetValidatorTx{} + _ StakerTx = (*AddSubnetValidatorTx)(nil) errAddPrimaryNetworkValidator = errors.New("can't add primary network validator with AddSubnetValidatorTx") ) @@ -25,22 +24,24 @@ type AddSubnetValidatorTx struct { // Metadata, inputs and outputs BaseTx `serialize:"true"` // The validator - Validator validator.SubnetValidator `serialize:"true" json:"validator"` + SubnetValidator `serialize:"true" json:"validator"` // Auth that will be allowing this validator into the network SubnetAuth verify.Verifiable `serialize:"true" json:"subnetAuthorization"` } -func (tx *AddSubnetValidatorTx) SubnetID() ids.ID { return tx.Validator.Subnet } -func (tx *AddSubnetValidatorTx) NodeID() ids.NodeID { return tx.Validator.NodeID } -func (tx *AddSubnetValidatorTx) StartTime() time.Time { return tx.Validator.StartTime() } -func (tx *AddSubnetValidatorTx) EndTime() time.Time { return tx.Validator.EndTime() } -func (tx *AddSubnetValidatorTx) Weight() uint64 { return tx.Validator.Wght } +func (tx *AddSubnetValidatorTx) NodeID() ids.NodeID { + return tx.SubnetValidator.NodeID +} + +func (*AddSubnetValidatorTx) PublicKey() (*bls.PublicKey, bool, error) { + return nil, false, nil +} -func (tx *AddSubnetValidatorTx) PendingPriority() Priority { +func (*AddSubnetValidatorTx) PendingPriority() Priority { return SubnetPermissionedValidatorPendingPriority } -func (tx *AddSubnetValidatorTx) CurrentPriority() Priority { +func (*AddSubnetValidatorTx) CurrentPriority() Priority { return SubnetPermissionedValidatorCurrentPriority } @@ -51,7 +52,7 @@ func (tx *AddSubnetValidatorTx) SyntacticVerify(ctx *snow.Context) error { return ErrNilTx case tx.SyntacticallyVerified: // already passed syntactic verification return nil - case tx.Validator.Subnet == constants.PrimaryNetworkID: + case tx.Subnet == constants.PrimaryNetworkID: return errAddPrimaryNetworkValidator } diff --git a/avalanchego/vms/platformvm/txs/add_validator_test.go b/avalanchego/vms/platformvm/txs/add_validator_test.go index 14f333c1..78eda5c9 100644 --- a/avalanchego/vms/platformvm/txs/add_validator_test.go +++ b/avalanchego/vms/platformvm/txs/add_validator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -11,12 +11,11 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -25,7 +24,7 @@ func TestAddValidatorTxSyntacticVerify(t *testing.T) { clk := mockable.Clock{} ctx := snow.DefaultContextTest() ctx.AVAXAssetID = ids.GenerateTestID() - signers := [][]*crypto.PrivateKeySECP256K1R{preFundedKeys} + signers := [][]*secp256k1.PrivateKey{preFundedKeys} var ( stx *Tx @@ -82,7 +81,7 @@ func TestAddValidatorTxSyntacticVerify(t *testing.T) { Ins: inputs, Outs: outputs, }}, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ctx.NodeID, Start: uint64(clk.Time().Unix()), End: uint64(clk.Time().Add(time.Hour).Unix()), @@ -147,7 +146,7 @@ func TestAddValidatorTxSyntacticVerifyNotAVAX(t *testing.T) { clk := mockable.Clock{} ctx := snow.DefaultContextTest() ctx.AVAXAssetID = ids.GenerateTestID() - signers := [][]*crypto.PrivateKeySECP256K1R{preFundedKeys} + signers := [][]*secp256k1.PrivateKey{preFundedKeys} var ( stx *Tx @@ -199,7 +198,7 @@ func TestAddValidatorTxSyntacticVerifyNotAVAX(t *testing.T) { Ins: inputs, Outs: outputs, }}, - Validator: validator.Validator{ + Validator: Validator{ NodeID: ctx.NodeID, Start: uint64(clk.Time().Unix()), End: uint64(clk.Time().Add(time.Hour).Unix()), diff --git a/avalanchego/vms/platformvm/txs/add_validator_tx.go b/avalanchego/vms/platformvm/txs/add_validator_tx.go index aaeb7353..d7101c37 100644 --- a/avalanchego/vms/platformvm/txs/add_validator_tx.go +++ b/avalanchego/vms/platformvm/txs/add_validator_tx.go @@ -1,26 +1,25 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( "fmt" - "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/reward" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) var ( - _ ValidatorTx = &AddValidatorTx{} + _ ValidatorTx = (*AddValidatorTx)(nil) errTooManyShares = fmt.Errorf("a staker can only require at most %d shares from delegators", reward.PercentDenominator) ) @@ -30,7 +29,7 @@ type AddValidatorTx struct { // Metadata, inputs and outputs BaseTx `serialize:"true"` // Describes the delegatee - Validator validator.Validator `serialize:"true" json:"validator"` + Validator `serialize:"true" json:"validator"` // Where to send staked tokens when done validating StakeOuts []*avax.TransferableOutput `serialize:"true" json:"stake"` // Where to send staking rewards when done validating @@ -53,17 +52,41 @@ func (tx *AddValidatorTx) InitCtx(ctx *snow.Context) { tx.RewardsOwner.InitCtx(ctx) } -func (tx *AddValidatorTx) SubnetID() ids.ID { return constants.PrimaryNetworkID } -func (tx *AddValidatorTx) NodeID() ids.NodeID { return tx.Validator.NodeID } -func (tx *AddValidatorTx) StartTime() time.Time { return tx.Validator.StartTime() } -func (tx *AddValidatorTx) EndTime() time.Time { return tx.Validator.EndTime() } -func (tx *AddValidatorTx) Weight() uint64 { return tx.Validator.Wght } -func (tx *AddValidatorTx) PendingPriority() Priority { return PrimaryNetworkValidatorPendingPriority } -func (tx *AddValidatorTx) CurrentPriority() Priority { return PrimaryNetworkValidatorCurrentPriority } -func (tx *AddValidatorTx) Stake() []*avax.TransferableOutput { return tx.StakeOuts } -func (tx *AddValidatorTx) ValidationRewardsOwner() fx.Owner { return tx.RewardsOwner } -func (tx *AddValidatorTx) DelegationRewardsOwner() fx.Owner { return tx.RewardsOwner } -func (tx *AddValidatorTx) Shares() uint32 { return tx.DelegationShares } +func (*AddValidatorTx) SubnetID() ids.ID { + return constants.PrimaryNetworkID +} + +func (tx *AddValidatorTx) NodeID() ids.NodeID { + return tx.Validator.NodeID +} + +func (*AddValidatorTx) PublicKey() (*bls.PublicKey, bool, error) { + return nil, false, nil +} + +func (*AddValidatorTx) PendingPriority() Priority { + return PrimaryNetworkValidatorPendingPriority +} + +func (*AddValidatorTx) CurrentPriority() Priority { + return PrimaryNetworkValidatorCurrentPriority +} + +func (tx *AddValidatorTx) Stake() []*avax.TransferableOutput { + return tx.StakeOuts +} + +func (tx *AddValidatorTx) ValidationRewardsOwner() fx.Owner { + return tx.RewardsOwner +} + +func (tx *AddValidatorTx) DelegationRewardsOwner() fx.Owner { + return tx.RewardsOwner +} + +func (tx *AddValidatorTx) Shares() uint32 { + return tx.DelegationShares +} // SyntacticVerify returns nil iff [tx] is valid func (tx *AddValidatorTx) SyntacticVerify(ctx *snow.Context) error { @@ -103,8 +126,8 @@ func (tx *AddValidatorTx) SyntacticVerify(ctx *snow.Context) error { switch { case !avax.IsSortedTransferableOutputs(tx.StakeOuts, Codec): return errOutputsNotSorted - case totalStakeWeight != tx.Validator.Wght: - return fmt.Errorf("%w: weight %d != stake %d", errValidatorWeightMismatch, tx.Validator.Wght, totalStakeWeight) + case totalStakeWeight != tx.Wght: + return fmt.Errorf("%w: weight %d != stake %d", errValidatorWeightMismatch, tx.Wght, totalStakeWeight) } // cache that this is valid diff --git a/avalanchego/vms/platformvm/txs/advance_time_tx.go b/avalanchego/vms/platformvm/txs/advance_time_tx.go index 67f6ec67..fc889da9 100644 --- a/avalanchego/vms/platformvm/txs/advance_time_tx.go +++ b/avalanchego/vms/platformvm/txs/advance_time_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -8,10 +8,11 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" ) -var _ UnsignedTx = &AdvanceTimeTx{} +var _ UnsignedTx = (*AdvanceTimeTx)(nil) // AdvanceTimeTx is a transaction to increase the chain's timestamp. // When the chain's timestamp is updated (a AdvanceTimeTx is accepted and @@ -26,20 +27,32 @@ type AdvanceTimeTx struct { unsignedBytes []byte // Unsigned byte representation of this data } -func (tx *AdvanceTimeTx) Initialize(unsignedBytes []byte) { tx.unsignedBytes = unsignedBytes } +func (tx *AdvanceTimeTx) SetBytes(unsignedBytes []byte) { + tx.unsignedBytes = unsignedBytes +} -func (tx *AdvanceTimeTx) Bytes() []byte { return tx.unsignedBytes } +func (tx *AdvanceTimeTx) Bytes() []byte { + return tx.unsignedBytes +} -func (tx *AdvanceTimeTx) InitCtx(*snow.Context) {} +func (*AdvanceTimeTx) InitCtx(*snow.Context) {} // Timestamp returns the time this block is proposing the chain should be set to func (tx *AdvanceTimeTx) Timestamp() time.Time { return time.Unix(int64(tx.Time), 0) } -func (tx *AdvanceTimeTx) InputIDs() ids.Set { return nil } -func (tx *AdvanceTimeTx) Outputs() []*avax.TransferableOutput { return nil } -func (tx *AdvanceTimeTx) SyntacticVerify(*snow.Context) error { return nil } +func (*AdvanceTimeTx) InputIDs() set.Set[ids.ID] { + return nil +} + +func (*AdvanceTimeTx) Outputs() []*avax.TransferableOutput { + return nil +} + +func (*AdvanceTimeTx) SyntacticVerify(*snow.Context) error { + return nil +} func (tx *AdvanceTimeTx) Visit(visitor Visitor) error { return visitor.AdvanceTimeTx(tx) diff --git a/avalanchego/vms/platformvm/txs/base_tx.go b/avalanchego/vms/platformvm/txs/base_tx.go index dd027f56..ad8ea2f7 100644 --- a/avalanchego/vms/platformvm/txs/base_tx.go +++ b/avalanchego/vms/platformvm/txs/base_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -9,6 +9,8 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -31,19 +33,25 @@ type BaseTx struct { unsignedBytes []byte // Unsigned byte representation of this data } -func (tx *BaseTx) Initialize(unsignedBytes []byte) { tx.unsignedBytes = unsignedBytes } +func (tx *BaseTx) SetBytes(unsignedBytes []byte) { + tx.unsignedBytes = unsignedBytes +} -func (tx *BaseTx) Bytes() []byte { return tx.unsignedBytes } +func (tx *BaseTx) Bytes() []byte { + return tx.unsignedBytes +} -func (tx *BaseTx) InputIDs() ids.Set { - inputIDs := ids.NewSet(len(tx.Ins)) +func (tx *BaseTx) InputIDs() set.Set[ids.ID] { + inputIDs := set.NewSet[ids.ID](len(tx.Ins)) for _, in := range tx.Ins { inputIDs.Add(in.InputID()) } return inputIDs } -func (tx *BaseTx) Outputs() []*avax.TransferableOutput { return tx.Outs } +func (tx *BaseTx) Outputs() []*avax.TransferableOutput { + return tx.Outs +} // InitCtx sets the FxID fields in the inputs and outputs of this [BaseTx]. Also // sets the [ctx] to the given [vm.ctx] so that the addresses can be json @@ -82,7 +90,7 @@ func (tx *BaseTx) SyntacticVerify(ctx *snow.Context) error { switch { case !avax.IsSortedTransferableOutputs(tx.Outs, Codec): return errOutputsNotSorted - case !avax.IsSortedAndUniqueTransferableInputs(tx.Ins): + case !utils.IsSortedAndUniqueSortable(tx.Ins): return errInputsNotSortedUnique default: return nil diff --git a/avalanchego/vms/platformvm/txs/base_tx_test.go b/avalanchego/vms/platformvm/txs/base_tx_test.go index d65c8731..6e5b5ad7 100644 --- a/avalanchego/vms/platformvm/txs/base_tx_test.go +++ b/avalanchego/vms/platformvm/txs/base_tx_test.go @@ -1,13 +1,14 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( "encoding/json" - "strings" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/vms/components/avax" ) @@ -39,18 +40,12 @@ func TestBaseTxMarshalJSON(t *testing.T) { }} txBytes, err := json.Marshal(tx) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + asString := string(txBytes) - switch { - case !strings.Contains(asString, `"networkID":4`): - t.Fatal("should have network ID") - case !strings.Contains(asString, `"blockchainID":"SYXsAycDPUu4z2ZksJD5fh5nTDcH3vCFHnpcVye5XuJ2jArg"`): - t.Fatal("should have blockchainID ID") - case !strings.Contains(asString, `"inputs":[{"txID":"t64jLxDRmxo8y48WjbRALPAZuSDZ6qPVaaeDzxHA4oSojhLt","outputIndex":5,"assetID":"2KdbbWvpeAShCx5hGbtdF15FMMepq9kajsNTqVvvEbhiCRSxU","fxID":"2mB8TguRrYvbGw7G2UBqKfmL8osS7CfmzAAHSzuZK8bwpRKdY","input":{"Err":null,"Val":100}}]`): - t.Fatal("inputs are wrong") - case !strings.Contains(asString, `"outputs":[{"assetID":"2KdbbWvpeAShCx5hGbtdF15FMMepq9kajsNTqVvvEbhiCRSxU","fxID":"2mB8TguRrYvbGw7G2UBqKfmL8osS7CfmzAAHSzuZK8bwpRKdY","output":{"Err":null,"Val":100}}]`): - t.Fatal("outputs are wrong") - } + + require.Contains(t, asString, `"networkID":4`) + require.Contains(t, asString, `"blockchainID":"SYXsAycDPUu4z2ZksJD5fh5nTDcH3vCFHnpcVye5XuJ2jArg"`) + require.Contains(t, asString, `"inputs":[{"txID":"t64jLxDRmxo8y48WjbRALPAZuSDZ6qPVaaeDzxHA4oSojhLt","outputIndex":5,"assetID":"2KdbbWvpeAShCx5hGbtdF15FMMepq9kajsNTqVvvEbhiCRSxU","fxID":"2mB8TguRrYvbGw7G2UBqKfmL8osS7CfmzAAHSzuZK8bwpRKdY","input":{"Err":null,"Val":100}}]`) + require.Contains(t, asString, `"outputs":[{"assetID":"2KdbbWvpeAShCx5hGbtdF15FMMepq9kajsNTqVvvEbhiCRSxU","fxID":"2mB8TguRrYvbGw7G2UBqKfmL8osS7CfmzAAHSzuZK8bwpRKdY","output":{"Err":null,"Val":100}}]`) } diff --git a/avalanchego/vms/platformvm/txs/builder/builder.go b/avalanchego/vms/platformvm/txs/builder/builder.go index 9e1bbb9c..19aa1e37 100644 --- a/avalanchego/vms/platformvm/txs/builder/builder.go +++ b/avalanchego/vms/platformvm/txs/builder/builder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package builder @@ -10,7 +10,8 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -19,7 +20,6 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/utxo" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -27,7 +27,7 @@ import ( const MaxPageSize = 1024 var ( - _ Builder = &builder{} + _ Builder = (*builder)(nil) errNoFunds = errors.New("no spendable funds were found") ) @@ -46,7 +46,7 @@ type AtomicTxBuilder interface { NewImportTx( chainID ids.ID, to ids.ShortID, - keys []*crypto.PrivateKeySECP256K1R, + keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, ) (*txs.Tx, error) @@ -59,7 +59,7 @@ type AtomicTxBuilder interface { amount uint64, chainID ids.ID, to ids.ShortID, - keys []*crypto.PrivateKeySECP256K1R, + keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, ) (*txs.Tx, error) } @@ -78,7 +78,7 @@ type DecisionTxBuilder interface { vmID ids.ID, fxIDs []ids.ID, chainName string, - keys []*crypto.PrivateKeySECP256K1R, + keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, ) (*txs.Tx, error) @@ -89,7 +89,7 @@ type DecisionTxBuilder interface { NewCreateSubnetTx( threshold uint32, ownerAddrs []ids.ShortID, - keys []*crypto.PrivateKeySECP256K1R, + keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, ) (*txs.Tx, error) } @@ -110,7 +110,7 @@ type ProposalTxBuilder interface { nodeID ids.NodeID, rewardAddress ids.ShortID, shares uint32, - keys []*crypto.PrivateKeySECP256K1R, + keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, ) (*txs.Tx, error) @@ -127,7 +127,7 @@ type ProposalTxBuilder interface { endTime uint64, nodeID ids.NodeID, rewardAddress ids.ShortID, - keys []*crypto.PrivateKeySECP256K1R, + keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, ) (*txs.Tx, error) @@ -144,7 +144,7 @@ type ProposalTxBuilder interface { endTime uint64, nodeID ids.NodeID, subnetID ids.ID, - keys []*crypto.PrivateKeySECP256K1R, + keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, ) (*txs.Tx, error) @@ -155,7 +155,7 @@ type ProposalTxBuilder interface { NewRemoveSubnetValidatorTx( nodeID ids.NodeID, subnetID ids.ID, - keys []*crypto.PrivateKeySECP256K1R, + keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, ) (*txs.Tx, error) @@ -173,7 +173,7 @@ func New( cfg *config.Config, clk *mockable.Clock, fx fx.Fx, - state state.Chain, + state state.State, atomicUTXOManager avax.AtomicUTXOManager, utxoSpender utxo.Spender, ) Builder { @@ -191,7 +191,7 @@ func New( type builder struct { avax.AtomicUTXOManager utxo.Spender - state state.Chain + state state.State cfg *config.Config ctx *snow.Context @@ -202,7 +202,7 @@ type builder struct { func (b *builder) NewImportTx( from ids.ID, to ids.ShortID, - keys []*crypto.PrivateKeySECP256K1R, + keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, ) (*txs.Tx, error) { kc := secp256k1fx.NewKeychain(keys...) @@ -213,7 +213,7 @@ func (b *builder) NewImportTx( } importedInputs := []*avax.TransferableInput{} - signers := [][]*crypto.PrivateKeySECP256K1R{} + signers := [][]*secp256k1.PrivateKey{} importedAmounts := make(map[ids.ID]uint64) now := b.clk.Unix() @@ -250,8 +250,8 @@ func (b *builder) NewImportTx( outs := []*avax.TransferableOutput{} switch { case importedAVAX < b.cfg.TxFee: // imported amount goes toward paying tx fee - var baseSigners [][]*crypto.PrivateKeySECP256K1R - ins, outs, _, baseSigners, err = b.Spend(keys, 0, b.cfg.TxFee-importedAVAX, changeAddr) + var baseSigners [][]*secp256k1.PrivateKey + ins, outs, _, baseSigners, err = b.Spend(b.state, keys, 0, b.cfg.TxFee-importedAVAX, changeAddr) if err != nil { return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) } @@ -302,14 +302,14 @@ func (b *builder) NewExportTx( amount uint64, chainID ids.ID, to ids.ShortID, - keys []*crypto.PrivateKeySECP256K1R, + keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, ) (*txs.Tx, error) { toBurn, err := math.Add64(amount, b.cfg.TxFee) if err != nil { return nil, fmt.Errorf("amount (%d) + tx fee(%d) overflows", amount, b.cfg.TxFee) } - ins, outs, _, signers, err := b.Spend(keys, 0, toBurn, changeAddr) + ins, outs, _, signers, err := b.Spend(b.state, keys, 0, toBurn, changeAddr) if err != nil { return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) } @@ -348,12 +348,12 @@ func (b *builder) NewCreateChainTx( vmID ids.ID, fxIDs []ids.ID, chainName string, - keys []*crypto.PrivateKeySECP256K1R, + keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, ) (*txs.Tx, error) { timestamp := b.state.GetTimestamp() createBlockchainTxFee := b.cfg.GetCreateBlockchainTxFee(timestamp) - ins, outs, _, signers, err := b.Spend(keys, 0, createBlockchainTxFee, changeAddr) + ins, outs, _, signers, err := b.Spend(b.state, keys, 0, createBlockchainTxFee, changeAddr) if err != nil { return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) } @@ -365,7 +365,7 @@ func (b *builder) NewCreateChainTx( signers = append(signers, subnetSigners) // Sort the provided fxIDs - ids.SortIDs(fxIDs) + utils.Sort(fxIDs) // Create the tx utx := &txs.CreateChainTx{ @@ -392,18 +392,18 @@ func (b *builder) NewCreateChainTx( func (b *builder) NewCreateSubnetTx( threshold uint32, ownerAddrs []ids.ShortID, - keys []*crypto.PrivateKeySECP256K1R, + keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, ) (*txs.Tx, error) { timestamp := b.state.GetTimestamp() createSubnetTxFee := b.cfg.GetCreateSubnetTxFee(timestamp) - ins, outs, _, signers, err := b.Spend(keys, 0, createSubnetTxFee, changeAddr) + ins, outs, _, signers, err := b.Spend(b.state, keys, 0, createSubnetTxFee, changeAddr) if err != nil { return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) } // Sort control addresses - ids.SortShortIDs(ownerAddrs) + utils.Sort(ownerAddrs) // Create the tx utx := &txs.CreateSubnetTx{ @@ -432,10 +432,10 @@ func (b *builder) NewAddValidatorTx( nodeID ids.NodeID, rewardAddress ids.ShortID, shares uint32, - keys []*crypto.PrivateKeySECP256K1R, + keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, ) (*txs.Tx, error) { - ins, unstakedOuts, stakedOuts, signers, err := b.Spend(keys, stakeAmount, b.cfg.AddPrimaryNetworkValidatorFee, changeAddr) + ins, unstakedOuts, stakedOuts, signers, err := b.Spend(b.state, keys, stakeAmount, b.cfg.AddPrimaryNetworkValidatorFee, changeAddr) if err != nil { return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) } @@ -447,7 +447,7 @@ func (b *builder) NewAddValidatorTx( Ins: ins, Outs: unstakedOuts, }}, - Validator: validator.Validator{ + Validator: txs.Validator{ NodeID: nodeID, Start: startTime, End: endTime, @@ -474,10 +474,10 @@ func (b *builder) NewAddDelegatorTx( endTime uint64, nodeID ids.NodeID, rewardAddress ids.ShortID, - keys []*crypto.PrivateKeySECP256K1R, + keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, ) (*txs.Tx, error) { - ins, unlockedOuts, lockedOuts, signers, err := b.Spend(keys, stakeAmount, b.cfg.AddPrimaryNetworkDelegatorFee, changeAddr) + ins, unlockedOuts, lockedOuts, signers, err := b.Spend(b.state, keys, stakeAmount, b.cfg.AddPrimaryNetworkDelegatorFee, changeAddr) if err != nil { return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) } @@ -489,7 +489,7 @@ func (b *builder) NewAddDelegatorTx( Ins: ins, Outs: unlockedOuts, }}, - Validator: validator.Validator{ + Validator: txs.Validator{ NodeID: nodeID, Start: startTime, End: endTime, @@ -515,10 +515,10 @@ func (b *builder) NewAddSubnetValidatorTx( endTime uint64, nodeID ids.NodeID, subnetID ids.ID, - keys []*crypto.PrivateKeySECP256K1R, + keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, ) (*txs.Tx, error) { - ins, outs, _, signers, err := b.Spend(keys, 0, b.cfg.TxFee, changeAddr) + ins, outs, _, signers, err := b.Spend(b.state, keys, 0, b.cfg.TxFee, changeAddr) if err != nil { return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) } @@ -537,8 +537,8 @@ func (b *builder) NewAddSubnetValidatorTx( Ins: ins, Outs: outs, }}, - Validator: validator.SubnetValidator{ - Validator: validator.Validator{ + SubnetValidator: txs.SubnetValidator{ + Validator: txs.Validator{ NodeID: nodeID, Start: startTime, End: endTime, @@ -558,10 +558,10 @@ func (b *builder) NewAddSubnetValidatorTx( func (b *builder) NewRemoveSubnetValidatorTx( nodeID ids.NodeID, subnetID ids.ID, - keys []*crypto.PrivateKeySECP256K1R, + keys []*secp256k1.PrivateKey, changeAddr ids.ShortID, ) (*txs.Tx, error) { - ins, outs, _, signers, err := b.Spend(keys, 0, b.cfg.TxFee, changeAddr) + ins, outs, _, signers, err := b.Spend(b.state, keys, 0, b.cfg.TxFee, changeAddr) if err != nil { return nil, fmt.Errorf("couldn't generate tx inputs/outputs: %w", err) } diff --git a/avalanchego/vms/platformvm/txs/builder/mock_builder.go b/avalanchego/vms/platformvm/txs/builder/mock_builder.go index 6e7d8350..1f7c4f3d 100644 --- a/avalanchego/vms/platformvm/txs/builder/mock_builder.go +++ b/avalanchego/vms/platformvm/txs/builder/mock_builder.go @@ -1,3 +1,6 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/platformvm/txs/builder (interfaces: Builder) @@ -9,7 +12,7 @@ import ( time "time" ids "github.com/ava-labs/avalanchego/ids" - crypto "github.com/ava-labs/avalanchego/utils/crypto" + secp256k1 "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" txs "github.com/ava-labs/avalanchego/vms/platformvm/txs" gomock "github.com/golang/mock/gomock" ) @@ -38,7 +41,7 @@ func (m *MockBuilder) EXPECT() *MockBuilderMockRecorder { } // NewAddDelegatorTx mocks base method. -func (m *MockBuilder) NewAddDelegatorTx(arg0, arg1, arg2 uint64, arg3 ids.NodeID, arg4 ids.ShortID, arg5 []*crypto.PrivateKeySECP256K1R, arg6 ids.ShortID) (*txs.Tx, error) { +func (m *MockBuilder) NewAddDelegatorTx(arg0, arg1, arg2 uint64, arg3 ids.NodeID, arg4 ids.ShortID, arg5 []*secp256k1.PrivateKey, arg6 ids.ShortID) (*txs.Tx, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewAddDelegatorTx", arg0, arg1, arg2, arg3, arg4, arg5, arg6) ret0, _ := ret[0].(*txs.Tx) @@ -53,7 +56,7 @@ func (mr *MockBuilderMockRecorder) NewAddDelegatorTx(arg0, arg1, arg2, arg3, arg } // NewAddSubnetValidatorTx mocks base method. -func (m *MockBuilder) NewAddSubnetValidatorTx(arg0, arg1, arg2 uint64, arg3 ids.NodeID, arg4 ids.ID, arg5 []*crypto.PrivateKeySECP256K1R, arg6 ids.ShortID) (*txs.Tx, error) { +func (m *MockBuilder) NewAddSubnetValidatorTx(arg0, arg1, arg2 uint64, arg3 ids.NodeID, arg4 ids.ID, arg5 []*secp256k1.PrivateKey, arg6 ids.ShortID) (*txs.Tx, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewAddSubnetValidatorTx", arg0, arg1, arg2, arg3, arg4, arg5, arg6) ret0, _ := ret[0].(*txs.Tx) @@ -68,7 +71,7 @@ func (mr *MockBuilderMockRecorder) NewAddSubnetValidatorTx(arg0, arg1, arg2, arg } // NewAddValidatorTx mocks base method. -func (m *MockBuilder) NewAddValidatorTx(arg0, arg1, arg2 uint64, arg3 ids.NodeID, arg4 ids.ShortID, arg5 uint32, arg6 []*crypto.PrivateKeySECP256K1R, arg7 ids.ShortID) (*txs.Tx, error) { +func (m *MockBuilder) NewAddValidatorTx(arg0, arg1, arg2 uint64, arg3 ids.NodeID, arg4 ids.ShortID, arg5 uint32, arg6 []*secp256k1.PrivateKey, arg7 ids.ShortID) (*txs.Tx, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewAddValidatorTx", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) ret0, _ := ret[0].(*txs.Tx) @@ -98,7 +101,7 @@ func (mr *MockBuilderMockRecorder) NewAdvanceTimeTx(arg0 interface{}) *gomock.Ca } // NewCreateChainTx mocks base method. -func (m *MockBuilder) NewCreateChainTx(arg0 ids.ID, arg1 []byte, arg2 ids.ID, arg3 []ids.ID, arg4 string, arg5 []*crypto.PrivateKeySECP256K1R, arg6 ids.ShortID) (*txs.Tx, error) { +func (m *MockBuilder) NewCreateChainTx(arg0 ids.ID, arg1 []byte, arg2 ids.ID, arg3 []ids.ID, arg4 string, arg5 []*secp256k1.PrivateKey, arg6 ids.ShortID) (*txs.Tx, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewCreateChainTx", arg0, arg1, arg2, arg3, arg4, arg5, arg6) ret0, _ := ret[0].(*txs.Tx) @@ -113,7 +116,7 @@ func (mr *MockBuilderMockRecorder) NewCreateChainTx(arg0, arg1, arg2, arg3, arg4 } // NewCreateSubnetTx mocks base method. -func (m *MockBuilder) NewCreateSubnetTx(arg0 uint32, arg1 []ids.ShortID, arg2 []*crypto.PrivateKeySECP256K1R, arg3 ids.ShortID) (*txs.Tx, error) { +func (m *MockBuilder) NewCreateSubnetTx(arg0 uint32, arg1 []ids.ShortID, arg2 []*secp256k1.PrivateKey, arg3 ids.ShortID) (*txs.Tx, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewCreateSubnetTx", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(*txs.Tx) @@ -128,7 +131,7 @@ func (mr *MockBuilderMockRecorder) NewCreateSubnetTx(arg0, arg1, arg2, arg3 inte } // NewExportTx mocks base method. -func (m *MockBuilder) NewExportTx(arg0 uint64, arg1 ids.ID, arg2 ids.ShortID, arg3 []*crypto.PrivateKeySECP256K1R, arg4 ids.ShortID) (*txs.Tx, error) { +func (m *MockBuilder) NewExportTx(arg0 uint64, arg1 ids.ID, arg2 ids.ShortID, arg3 []*secp256k1.PrivateKey, arg4 ids.ShortID) (*txs.Tx, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewExportTx", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].(*txs.Tx) @@ -143,7 +146,7 @@ func (mr *MockBuilderMockRecorder) NewExportTx(arg0, arg1, arg2, arg3, arg4 inte } // NewImportTx mocks base method. -func (m *MockBuilder) NewImportTx(arg0 ids.ID, arg1 ids.ShortID, arg2 []*crypto.PrivateKeySECP256K1R, arg3 ids.ShortID) (*txs.Tx, error) { +func (m *MockBuilder) NewImportTx(arg0 ids.ID, arg1 ids.ShortID, arg2 []*secp256k1.PrivateKey, arg3 ids.ShortID) (*txs.Tx, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewImportTx", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(*txs.Tx) @@ -158,7 +161,7 @@ func (mr *MockBuilderMockRecorder) NewImportTx(arg0, arg1, arg2, arg3 interface{ } // NewRemoveSubnetValidatorTx mocks base method. -func (m *MockBuilder) NewRemoveSubnetValidatorTx(arg0 ids.NodeID, arg1 ids.ID, arg2 []*crypto.PrivateKeySECP256K1R, arg3 ids.ShortID) (*txs.Tx, error) { +func (m *MockBuilder) NewRemoveSubnetValidatorTx(arg0 ids.NodeID, arg1 ids.ID, arg2 []*secp256k1.PrivateKey, arg3 ids.ShortID) (*txs.Tx, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "NewRemoveSubnetValidatorTx", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(*txs.Tx) diff --git a/avalanchego/vms/platformvm/txs/codec.go b/avalanchego/vms/platformvm/txs/codec.go index f2ccde3e..d3667b94 100644 --- a/avalanchego/vms/platformvm/txs/codec.go +++ b/avalanchego/vms/platformvm/txs/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/avalanchego/vms/platformvm/txs/create_chain_test.go b/avalanchego/vms/platformvm/txs/create_chain_test.go index cd9eb3cc..60a01269 100644 --- a/avalanchego/vms/platformvm/txs/create_chain_test.go +++ b/avalanchego/vms/platformvm/txs/create_chain_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -6,10 +6,12 @@ package txs import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -17,7 +19,7 @@ import ( func TestUnsignedCreateChainTxVerify(t *testing.T) { ctx := snow.DefaultContextTest() testSubnet1ID := ids.GenerateTestID() - testSubnet1ControlKeys := []*crypto.PrivateKeySECP256K1R{ + testSubnet1ControlKeys := []*secp256k1.PrivateKey{ preFundedKeys[0], preFundedKeys[1], } @@ -30,7 +32,7 @@ func TestUnsignedCreateChainTxVerify(t *testing.T) { vmID ids.ID fxIDs []ids.ID chainName string - keys []*crypto.PrivateKeySECP256K1R + keys []*secp256k1.PrivateKey setup func(*CreateChainTx) *CreateChainTx } @@ -43,8 +45,10 @@ func TestUnsignedCreateChainTxVerify(t *testing.T) { vmID: constants.AVMID, fxIDs: nil, chainName: "yeet", - keys: []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - setup: func(*CreateChainTx) *CreateChainTx { return nil }, + keys: []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + setup: func(*CreateChainTx) *CreateChainTx { + return nil + }, }, { description: "vm ID is empty", @@ -54,8 +58,11 @@ func TestUnsignedCreateChainTxVerify(t *testing.T) { vmID: constants.AVMID, fxIDs: nil, chainName: "yeet", - keys: []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - setup: func(tx *CreateChainTx) *CreateChainTx { tx.VMID = ids.ID{}; return tx }, + keys: []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + setup: func(tx *CreateChainTx) *CreateChainTx { + tx.VMID = ids.ID{} + return tx + }, }, { description: "subnet ID is empty", @@ -65,7 +72,7 @@ func TestUnsignedCreateChainTxVerify(t *testing.T) { vmID: constants.AVMID, fxIDs: nil, chainName: "yeet", - keys: []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + keys: []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, setup: func(tx *CreateChainTx) *CreateChainTx { tx.SubnetID = ids.ID{} return tx @@ -79,7 +86,7 @@ func TestUnsignedCreateChainTxVerify(t *testing.T) { vmID: constants.AVMID, fxIDs: nil, chainName: "yeet", - keys: []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + keys: []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, setup: func(tx *CreateChainTx) *CreateChainTx { tx.SubnetID = ctx.ChainID return tx @@ -93,7 +100,7 @@ func TestUnsignedCreateChainTxVerify(t *testing.T) { vmID: constants.AVMID, fxIDs: nil, chainName: "yeet", - keys: []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + keys: []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, setup: func(tx *CreateChainTx) *CreateChainTx { tx.ChainName = string(make([]byte, MaxNameLen+1)) return tx @@ -107,7 +114,7 @@ func TestUnsignedCreateChainTxVerify(t *testing.T) { vmID: constants.AVMID, fxIDs: nil, chainName: "yeet", - keys: []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + keys: []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, setup: func(tx *CreateChainTx) *CreateChainTx { tx.ChainName = "⌘" return tx @@ -121,7 +128,7 @@ func TestUnsignedCreateChainTxVerify(t *testing.T) { vmID: constants.AVMID, fxIDs: nil, chainName: "yeet", - keys: []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + keys: []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, setup: func(tx *CreateChainTx) *CreateChainTx { tx.GenesisData = make([]byte, MaxGenesisLen+1) return tx @@ -170,18 +177,18 @@ func TestUnsignedCreateChainTxVerify(t *testing.T) { SubnetAuth: subnetAuth, } - signers := [][]*crypto.PrivateKeySECP256K1R{preFundedKeys} + signers := [][]*secp256k1.PrivateKey{preFundedKeys} stx, err := NewSigned(createChainTx, Codec, signers) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) createChainTx.SyntacticallyVerified = false stx.Unsigned = test.setup(createChainTx) - if err := stx.SyntacticVerify(ctx); err != nil && !test.shouldErr { - t.Fatalf("test '%s' shouldn't have erred but got: %s", test.description, err) - } else if err == nil && test.shouldErr { - t.Fatalf("test '%s' didn't error but should have", test.description) + + err = stx.SyntacticVerify(ctx) + if !test.shouldErr { + require.NoError(t, err) + } else { + require.Error(t, err) } } } diff --git a/avalanchego/vms/platformvm/txs/create_chain_tx.go b/avalanchego/vms/platformvm/txs/create_chain_tx.go index 75679513..b329279c 100644 --- a/avalanchego/vms/platformvm/txs/create_chain_tx.go +++ b/avalanchego/vms/platformvm/txs/create_chain_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -9,6 +9,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/verify" @@ -20,7 +21,7 @@ const ( ) var ( - _ UnsignedTx = &CreateChainTx{} + _ UnsignedTx = (*CreateChainTx)(nil) ErrCantValidatePrimaryNetwork = errors.New("new blockchain can't be validated by primary network") @@ -61,7 +62,7 @@ func (tx *CreateChainTx) SyntacticVerify(ctx *snow.Context) error { return errNameTooLong case tx.VMID == ids.Empty: return errInvalidVMID - case !ids.IsSortedAndUniqueIDs(tx.FxIDs): + case !utils.IsSortedAndUniqueSortable(tx.FxIDs): return errFxIDsNotSortedAndUnique case len(tx.GenesisData) > MaxGenesisLen: return errGenesisTooLong diff --git a/avalanchego/vms/platformvm/txs/create_subnet_tx.go b/avalanchego/vms/platformvm/txs/create_subnet_tx.go index a119000c..02f41fae 100644 --- a/avalanchego/vms/platformvm/txs/create_subnet_tx.go +++ b/avalanchego/vms/platformvm/txs/create_subnet_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -8,7 +8,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/fx" ) -var _ UnsignedTx = &CreateSubnetTx{} +var _ UnsignedTx = (*CreateSubnetTx)(nil) // CreateSubnetTx is an unsigned proposal to create a new subnet type CreateSubnetTx struct { diff --git a/avalanchego/vms/platformvm/txs/executor/advance_time_test.go b/avalanchego/vms/platformvm/txs/executor/advance_time_test.go index a284757f..2bacbbab 100644 --- a/avalanchego/vms/platformvm/txs/executor/advance_time_test.go +++ b/avalanchego/vms/platformvm/txs/executor/advance_time_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -12,8 +12,9 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" @@ -24,7 +25,7 @@ import ( // for the primary network func TestAdvanceTimeTxUpdatePrimaryNetworkStakers(t *testing.T) { require := require.New(t) - env := newEnvironment() + env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { require.NoError(shutdownEnvironment(env)) @@ -36,7 +37,7 @@ func TestAdvanceTimeTxUpdatePrimaryNetworkStakers(t *testing.T) { pendingValidatorStartTime := defaultGenesisTime.Add(1 * time.Second) pendingValidatorEndTime := pendingValidatorStartTime.Add(defaultMinStakingDuration) nodeID := ids.GenerateTestNodeID() - addPendingValidatorTx, err := addPendingValidator(env, pendingValidatorStartTime, pendingValidatorEndTime, nodeID, []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}) + addPendingValidatorTx, err := addPendingValidator(env, pendingValidatorStartTime, pendingValidatorEndTime, nodeID, []*secp256k1.PrivateKey{preFundedKeys[0]}) require.NoError(err) tx, err := env.txBuilder.NewAdvanceTimeTx(pendingValidatorStartTime) @@ -72,35 +73,29 @@ func TestAdvanceTimeTxUpdatePrimaryNetworkStakers(t *testing.T) { require.Equal(addPendingValidatorTx.ID(), validatorStaker.TxID) // Test VM validators - executor.OnCommitState.Apply(env.state) + require.NoError(executor.OnCommitState.Apply(env.state)) + env.state.SetHeight(dummyHeight) require.NoError(env.state.Commit()) - require.True(env.config.Validators.Contains(constants.PrimaryNetworkID, nodeID)) + require.True(validators.Contains(env.config.Validators, constants.PrimaryNetworkID, nodeID)) } // Ensure semantic verification fails when proposed timestamp is at or before current timestamp func TestAdvanceTimeTxTimestampTooEarly(t *testing.T) { - env := newEnvironment() + require := require.New(t) + env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() tx, err := env.txBuilder.NewAdvanceTimeTx(defaultGenesisTime) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -109,14 +104,13 @@ func TestAdvanceTimeTxTimestampTooEarly(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should've failed verification because proposed timestamp same as current timestamp") - } + require.Error(err, "should've failed verification because proposed timestamp same as current timestamp") } // Ensure semantic verification fails when proposed timestamp is after next validator set change time func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { - env := newEnvironment() + require := require.New(t) + env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() // Case: Timestamp is after next validator start time @@ -124,24 +118,18 @@ func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { pendingValidatorStartTime := defaultGenesisTime.Add(1 * time.Second) pendingValidatorEndTime := pendingValidatorStartTime.Add(defaultMinStakingDuration) nodeID := ids.GenerateTestNodeID() - _, err := addPendingValidator(env, pendingValidatorStartTime, pendingValidatorEndTime, nodeID, []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}) - require.NoError(t, err) + _, err := addPendingValidator(env, pendingValidatorStartTime, pendingValidatorEndTime, nodeID, []*secp256k1.PrivateKey{preFundedKeys[0]}) + require.NoError(err) { tx, err := env.txBuilder.NewAdvanceTimeTx(pendingValidatorStartTime.Add(1 * time.Second)) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -150,22 +138,17 @@ func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should've failed verification because proposed timestamp is after pending validator start time") - } + require.Error(err, "should've failed verification because proposed timestamp is after pending validator start time") } - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + err = shutdownEnvironment(env) + require.NoError(err) // Case: Timestamp is after next validator end time - env = newEnvironment() + env = newEnvironment(false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() // fast forward clock to 10 seconds before genesis validators stop validating @@ -174,19 +157,13 @@ func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { { // Proposes advancing timestamp to 1 second after genesis validators stop validating tx, err := env.txBuilder.NewAdvanceTimeTx(defaultValidateEndTime.Add(1 * time.Second)) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -195,9 +172,7 @@ func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should've failed verification because proposed timestamp is after pending validator start time") - } + require.Error(err, "should've failed verification because proposed timestamp is after pending validator start time") } } @@ -370,23 +345,27 @@ func TestAdvanceTimeTxUpdateStakers(t *testing.T) { } for _, test := range tests { - t.Run(test.description, func(ts *testing.T) { - require := require.New(ts) - env := newEnvironment() + t.Run(test.description, func(t *testing.T) { + require := require.New(t) + env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { require.NoError(shutdownEnvironment(env)) }() - env.config.WhitelistedSubnets.Add(testSubnet1.ID()) + dummyHeight := uint64(1) + subnetID := testSubnet1.ID() + env.config.TrackedSubnets.Add(subnetID) + env.config.Validators.Add(subnetID, validators.NewSet()) + for _, staker := range test.stakers { _, err := addPendingValidator( env, staker.startTime, staker.endTime, staker.nodeID, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ) require.NoError(err) } @@ -396,17 +375,18 @@ func TestAdvanceTimeTxUpdateStakers(t *testing.T) { 10, // Weight uint64(staker.startTime.Unix()), uint64(staker.endTime.Unix()), - staker.nodeID, // validator ID - testSubnet1.ID(), // Subnet ID - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0], preFundedKeys[1]}, + staker.nodeID, // validator ID + subnetID, // Subnet ID + []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, ) require.NoError(err) - staker := state.NewPendingStaker( + staker, err := state.NewPendingStaker( tx.ID(), tx.Unsigned.(*txs.AddSubnetValidatorTx), ) + require.NoError(err) env.state.PutPendingValidator(staker) env.state.AddTx(tx, status.Committed) @@ -433,7 +413,7 @@ func TestAdvanceTimeTxUpdateStakers(t *testing.T) { } require.NoError(tx.Unsigned.Visit(&executor)) - executor.OnCommitState.Apply(env.state) + require.NoError(executor.OnCommitState.Apply(env.state)) } env.state.SetHeight(dummyHeight) require.NoError(env.state.Commit()) @@ -443,20 +423,20 @@ func TestAdvanceTimeTxUpdateStakers(t *testing.T) { case pending: _, err := env.state.GetPendingValidator(constants.PrimaryNetworkID, stakerNodeID) require.NoError(err) - require.False(env.config.Validators.Contains(constants.PrimaryNetworkID, stakerNodeID)) + require.False(validators.Contains(env.config.Validators, constants.PrimaryNetworkID, stakerNodeID)) case current: _, err := env.state.GetCurrentValidator(constants.PrimaryNetworkID, stakerNodeID) require.NoError(err) - require.True(env.config.Validators.Contains(constants.PrimaryNetworkID, stakerNodeID)) + require.True(validators.Contains(env.config.Validators, constants.PrimaryNetworkID, stakerNodeID)) } } for stakerNodeID, status := range test.expectedSubnetStakers { switch status { case pending: - require.False(env.config.Validators.Contains(testSubnet1.ID(), stakerNodeID)) + require.False(validators.Contains(env.config.Validators, subnetID, stakerNodeID)) case current: - require.True(env.config.Validators.Contains(testSubnet1.ID(), stakerNodeID)) + require.True(validators.Contains(env.config.Validators, subnetID, stakerNodeID)) } } }) @@ -469,12 +449,16 @@ func TestAdvanceTimeTxUpdateStakers(t *testing.T) { // is after the new timestamp func TestAdvanceTimeTxRemoveSubnetValidator(t *testing.T) { require := require.New(t) - env := newEnvironment() + env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { require.NoError(shutdownEnvironment(env)) }() - env.config.WhitelistedSubnets.Add(testSubnet1.ID()) + + subnetID := testSubnet1.ID() + env.config.TrackedSubnets.Add(subnetID) + env.config.Validators.Add(subnetID, validators.NewSet()) + dummyHeight := uint64(1) // Add a subnet validator to the staker set subnetValidatorNodeID := ids.NodeID(preFundedKeys[0].PublicKey().Address()) @@ -486,17 +470,18 @@ func TestAdvanceTimeTxRemoveSubnetValidator(t *testing.T) { uint64(subnetVdr1StartTime.Unix()), // Start time uint64(subnetVdr1EndTime.Unix()), // end time subnetValidatorNodeID, // Node ID - testSubnet1.ID(), // Subnet ID - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0], preFundedKeys[1]}, + subnetID, // Subnet ID + []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, ) require.NoError(err) - staker := state.NewCurrentStaker( + staker, err := state.NewCurrentStaker( tx.ID(), tx.Unsigned.(*txs.AddSubnetValidatorTx), 0, ) + require.NoError(err) env.state.PutCurrentValidator(staker) env.state.AddTx(tx, status.Committed) @@ -512,16 +497,17 @@ func TestAdvanceTimeTxRemoveSubnetValidator(t *testing.T) { uint64(subnetVdr1EndTime.Add(time.Second).Unix()), // Start time uint64(subnetVdr1EndTime.Add(time.Second).Add(defaultMinStakingDuration).Unix()), // end time subnetVdr2NodeID, // Node ID - testSubnet1.ID(), // Subnet ID - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0], preFundedKeys[1]}, // Keys + subnetID, // Subnet ID + []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, // Keys ids.ShortEmpty, // reward address ) require.NoError(err) - staker = state.NewPendingStaker( + staker, err = state.NewPendingStaker( tx.ID(), tx.Unsigned.(*txs.AddSubnetValidatorTx), ) + require.NoError(err) env.state.PutPendingValidator(staker) env.state.AddTx(tx, status.Committed) @@ -549,31 +535,35 @@ func TestAdvanceTimeTxRemoveSubnetValidator(t *testing.T) { } require.NoError(tx.Unsigned.Visit(&executor)) - _, err = executor.OnCommitState.GetCurrentValidator(testSubnet1.ID(), subnetValidatorNodeID) + _, err = executor.OnCommitState.GetCurrentValidator(subnetID, subnetValidatorNodeID) require.ErrorIs(err, database.ErrNotFound) // Check VM Validators are removed successfully - executor.OnCommitState.Apply(env.state) + require.NoError(executor.OnCommitState.Apply(env.state)) + env.state.SetHeight(dummyHeight) require.NoError(env.state.Commit()) - require.False(env.config.Validators.Contains(testSubnet1.ID(), subnetVdr2NodeID)) - require.False(env.config.Validators.Contains(testSubnet1.ID(), subnetValidatorNodeID)) + require.False(validators.Contains(env.config.Validators, subnetID, subnetVdr2NodeID)) + require.False(validators.Contains(env.config.Validators, subnetID, subnetValidatorNodeID)) } -func TestWhitelistedSubnet(t *testing.T) { - for _, whitelist := range []bool{true, false} { - t.Run(fmt.Sprintf("whitelisted %t", whitelist), func(t *testing.T) { - env := newEnvironment() +func TestTrackedSubnet(t *testing.T) { + for _, tracked := range []bool{true, false} { + t.Run(fmt.Sprintf("tracked %t", tracked), func(t *testing.T) { + require := require.New(t) + env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() dummyHeight := uint64(1) - if whitelist { - env.config.WhitelistedSubnets.Add(testSubnet1.ID()) + + subnetID := testSubnet1.ID() + if tracked { + env.config.TrackedSubnets.Add(subnetID) + env.config.Validators.Add(subnetID, validators.NewSet()) } + // Add a subnet validator to the staker set subnetValidatorNodeID := preFundedKeys[0].PublicKey().Address() @@ -584,42 +574,34 @@ func TestWhitelistedSubnet(t *testing.T) { uint64(subnetVdr1StartTime.Unix()), // Start time uint64(subnetVdr1EndTime.Unix()), // end time ids.NodeID(subnetValidatorNodeID), // Node ID - testSubnet1.ID(), // Subnet ID - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0], preFundedKeys[1]}, + subnetID, // Subnet ID + []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - staker := state.NewPendingStaker( + staker, err := state.NewPendingStaker( tx.ID(), tx.Unsigned.(*txs.AddSubnetValidatorTx), ) + require.NoError(err) env.state.PutPendingValidator(staker) env.state.AddTx(tx, status.Committed) env.state.SetHeight(dummyHeight) - if err := env.state.Commit(); err != nil { - t.Fatal(err) - } + err = env.state.Commit() + require.NoError(err) // Advance time to the staker's start time. env.clk.Set(subnetVdr1StartTime) tx, err = env.txBuilder.NewAdvanceTimeTx(subnetVdr1StartTime) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -628,25 +610,23 @@ func TestWhitelistedSubnet(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + + require.NoError(executor.OnCommitState.Apply(env.state)) - executor.OnCommitState.Apply(env.state) env.state.SetHeight(dummyHeight) - require.NoError(t, env.state.Commit()) - require.Equal(t, whitelist, env.config.Validators.Contains(testSubnet1.ID(), ids.NodeID(subnetValidatorNodeID))) + require.NoError(env.state.Commit()) + require.Equal(tracked, validators.Contains(env.config.Validators, subnetID, ids.NodeID(subnetValidatorNodeID))) }) } } func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { - env := newEnvironment() + require := require.New(t) + env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() dummyHeight := uint64(1) @@ -660,22 +640,18 @@ func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { pendingValidatorStartTime, pendingValidatorEndTime, nodeID, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ) - require.NoError(t, err) + require.NoError(err) tx, err := env.txBuilder.NewAdvanceTimeTx(pendingValidatorStartTime) - require.NoError(t, err) + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -684,17 +660,18 @@ func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.NoError(t, err) + require.NoError(err) + + require.NoError(executor.OnCommitState.Apply(env.state)) - executor.OnCommitState.Apply(env.state) env.state.SetHeight(dummyHeight) - require.NoError(t, env.state.Commit()) + require.NoError(env.state.Commit()) // Test validator weight before delegation - primarySet, ok := env.config.Validators.GetValidators(constants.PrimaryNetworkID) - require.True(t, ok) - vdrWeight, _ := primarySet.GetWeight(nodeID) - require.Equal(t, env.config.MinValidatorStake, vdrWeight) + primarySet, ok := env.config.Validators.Get(constants.PrimaryNetworkID) + require.True(ok) + vdrWeight := primarySet.GetWeight(nodeID) + require.Equal(env.config.MinValidatorStake, vdrWeight) // Add delegator pendingDelegatorStartTime := pendingValidatorStartTime.Add(1 * time.Second) @@ -706,38 +683,35 @@ func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { uint64(pendingDelegatorEndTime.Unix()), nodeID, preFundedKeys[0].PublicKey().Address(), - []*crypto.PrivateKeySECP256K1R{ + []*secp256k1.PrivateKey{ preFundedKeys[0], preFundedKeys[1], preFundedKeys[4], }, ids.ShortEmpty, ) - require.NoError(t, err) + require.NoError(err) - staker := state.NewPendingStaker( + staker, err := state.NewPendingStaker( addDelegatorTx.ID(), addDelegatorTx.Unsigned.(*txs.AddDelegatorTx), ) + require.NoError(err) env.state.PutPendingDelegator(staker) env.state.AddTx(addDelegatorTx, status.Committed) env.state.SetHeight(dummyHeight) - require.NoError(t, env.state.Commit()) + require.NoError(env.state.Commit()) // Advance Time tx, err = env.txBuilder.NewAdvanceTimeTx(pendingDelegatorStartTime) - require.NoError(t, err) + require.NoError(err) onCommitState, err = state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err = state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor = ProposalTxExecutor{ OnCommitState: onCommitState, @@ -746,24 +720,24 @@ func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.NoError(t, err) + require.NoError(err) + + require.NoError(executor.OnCommitState.Apply(env.state)) - executor.OnCommitState.Apply(env.state) env.state.SetHeight(dummyHeight) - require.NoError(t, env.state.Commit()) + require.NoError(env.state.Commit()) // Test validator weight after delegation - vdrWeight, _ = primarySet.GetWeight(nodeID) - require.Equal(t, env.config.MinDelegatorStake+env.config.MinValidatorStake, vdrWeight) + vdrWeight = primarySet.GetWeight(nodeID) + require.Equal(env.config.MinDelegatorStake+env.config.MinValidatorStake, vdrWeight) } func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { - env := newEnvironment() + require := require.New(t) + env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() dummyHeight := uint64(1) @@ -772,21 +746,17 @@ func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { pendingValidatorStartTime := defaultGenesisTime.Add(1 * time.Second) pendingValidatorEndTime := pendingValidatorStartTime.Add(defaultMinStakingDuration) nodeID := ids.GenerateTestNodeID() - _, err := addPendingValidator(env, pendingValidatorStartTime, pendingValidatorEndTime, nodeID, []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}) - require.NoError(t, err) + _, err := addPendingValidator(env, pendingValidatorStartTime, pendingValidatorEndTime, nodeID, []*secp256k1.PrivateKey{preFundedKeys[0]}) + require.NoError(err) tx, err := env.txBuilder.NewAdvanceTimeTx(pendingValidatorStartTime) - require.NoError(t, err) + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -795,17 +765,18 @@ func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.NoError(t, err) + require.NoError(err) + + require.NoError(executor.OnCommitState.Apply(env.state)) - executor.OnCommitState.Apply(env.state) env.state.SetHeight(dummyHeight) - require.NoError(t, env.state.Commit()) + require.NoError(env.state.Commit()) // Test validator weight before delegation - primarySet, ok := env.config.Validators.GetValidators(constants.PrimaryNetworkID) - require.True(t, ok) - vdrWeight, _ := primarySet.GetWeight(nodeID) - require.Equal(t, env.config.MinValidatorStake, vdrWeight) + primarySet, ok := env.config.Validators.Get(constants.PrimaryNetworkID) + require.True(ok) + vdrWeight := primarySet.GetWeight(nodeID) + require.Equal(env.config.MinValidatorStake, vdrWeight) // Add delegator pendingDelegatorStartTime := pendingValidatorStartTime.Add(1 * time.Second) @@ -816,34 +787,31 @@ func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { uint64(pendingDelegatorEndTime.Unix()), nodeID, preFundedKeys[0].PublicKey().Address(), - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0], preFundedKeys[1], preFundedKeys[4]}, + []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1], preFundedKeys[4]}, ids.ShortEmpty, ) - require.NoError(t, err) + require.NoError(err) - staker := state.NewPendingStaker( + staker, err := state.NewPendingStaker( addDelegatorTx.ID(), addDelegatorTx.Unsigned.(*txs.AddDelegatorTx), ) + require.NoError(err) env.state.PutPendingDelegator(staker) env.state.AddTx(addDelegatorTx, status.Committed) env.state.SetHeight(dummyHeight) - require.NoError(t, env.state.Commit()) + require.NoError(env.state.Commit()) // Advance Time tx, err = env.txBuilder.NewAdvanceTimeTx(pendingDelegatorStartTime) - require.NoError(t, err) + require.NoError(err) onCommitState, err = state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err = state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor = ProposalTxExecutor{ OnCommitState: onCommitState, @@ -852,43 +820,37 @@ func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.NoError(t, err) + require.NoError(err) + + require.NoError(executor.OnCommitState.Apply(env.state)) - executor.OnCommitState.Apply(env.state) env.state.SetHeight(dummyHeight) - require.NoError(t, env.state.Commit()) + require.NoError(env.state.Commit()) // Test validator weight after delegation - vdrWeight, _ = primarySet.GetWeight(nodeID) - require.Equal(t, env.config.MinDelegatorStake+env.config.MinValidatorStake, vdrWeight) + vdrWeight = primarySet.GetWeight(nodeID) + require.Equal(env.config.MinDelegatorStake+env.config.MinValidatorStake, vdrWeight) } // Test method InitiallyPrefersCommit func TestAdvanceTimeTxInitiallyPrefersCommit(t *testing.T) { - env := newEnvironment() + require := require.New(t) + env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() env.clk.Set(defaultGenesisTime) // VM's clock reads the genesis time // Proposed advancing timestamp to 1 second after sync bound tx, err := env.txBuilder.NewAdvanceTimeTx(defaultGenesisTime.Add(SyncBound)) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -897,39 +859,30 @@ func TestAdvanceTimeTxInitiallyPrefersCommit(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.NoError(t, err) + require.NoError(err) - if !executor.PrefersCommit { - t.Fatal("should prefer to commit this tx because its proposed timestamp it's within sync bound") - } + require.True(executor.PrefersCommit, "should prefer to commit this tx because its proposed timestamp it's within sync bound") } func TestAdvanceTimeTxAfterBanff(t *testing.T) { - env := newEnvironment() + require := require.New(t) + env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() env.clk.Set(defaultGenesisTime) // VM's clock reads the genesis time env.config.BanffTime = defaultGenesisTime.Add(SyncBound) // Proposed advancing timestamp to the banff timestamp tx, err := env.txBuilder.NewAdvanceTimeTx(defaultGenesisTime.Add(SyncBound)) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -938,35 +891,32 @@ func TestAdvanceTimeTxAfterBanff(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.ErrorIs(t, err, errAdvanceTimeTxIssuedAfterBanff) + require.ErrorIs(err, errAdvanceTimeTxIssuedAfterBanff) } // Ensure marshaling/unmarshaling works func TestAdvanceTimeTxUnmarshal(t *testing.T) { - env := newEnvironment() + require := require.New(t) + env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() tx, err := env.txBuilder.NewAdvanceTimeTx(defaultGenesisTime) - if err != nil { - t.Fatal(err) - } + require.NoError(err) bytes, err := txs.Codec.Marshal(txs.Version, tx) - if err != nil { - t.Fatal(err) - } + require.NoError(err) var unmarshaledTx txs.Tx - if _, err := txs.Codec.Unmarshal(bytes, &unmarshaledTx); err != nil { - t.Fatal(err) - } else if tx.Unsigned.(*txs.AdvanceTimeTx).Time != unmarshaledTx.Unsigned.(*txs.AdvanceTimeTx).Time { - t.Fatal("should have same timestamp") - } + _, err = txs.Codec.Unmarshal(bytes, &unmarshaledTx) + require.NoError(err) + + require.Equal( + tx.Unsigned.(*txs.AdvanceTimeTx).Time, + unmarshaledTx.Unsigned.(*txs.AdvanceTimeTx).Time, + ) } func addPendingValidator( @@ -974,7 +924,7 @@ func addPendingValidator( startTime time.Time, endTime time.Time, nodeID ids.NodeID, - keys []*crypto.PrivateKeySECP256K1R, + keys []*secp256k1.PrivateKey, ) (*txs.Tx, error) { addPendingValidatorTx, err := env.txBuilder.NewAddValidatorTx( env.config.MinValidatorStake, @@ -990,10 +940,13 @@ func addPendingValidator( return nil, err } - staker := state.NewPendingStaker( + staker, err := state.NewPendingStaker( addPendingValidatorTx.ID(), addPendingValidatorTx.Unsigned.(*txs.AddValidatorTx), ) + if err != nil { + return nil, err + } env.state.PutPendingValidator(staker) env.state.AddTx(addPendingValidatorTx, status.Committed) diff --git a/avalanchego/vms/platformvm/txs/executor/atomic_tx_executor.go b/avalanchego/vms/platformvm/txs/executor/atomic_tx_executor.go index ade0dc51..266ba4b2 100644 --- a/avalanchego/vms/platformvm/txs/executor/atomic_tx_executor.go +++ b/avalanchego/vms/platformvm/txs/executor/atomic_tx_executor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -6,11 +6,12 @@ package executor import ( "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) -var _ txs.Visitor = &AtomicTxExecutor{} +var _ txs.Visitor = (*AtomicTxExecutor)(nil) // atomicTxExecutor is used to execute atomic transactions pre-AP5. After AP5 // the execution was moved to be performed inside of the standardTxExecutor. @@ -23,21 +24,45 @@ type AtomicTxExecutor struct { // outputs of visitor execution OnAccept state.Diff - Inputs ids.Set + Inputs set.Set[ids.ID] AtomicRequests map[ids.ID]*atomic.Requests } -func (*AtomicTxExecutor) AddValidatorTx(*txs.AddValidatorTx) error { return errWrongTxType } -func (*AtomicTxExecutor) AddSubnetValidatorTx(*txs.AddSubnetValidatorTx) error { return errWrongTxType } -func (*AtomicTxExecutor) AddDelegatorTx(*txs.AddDelegatorTx) error { return errWrongTxType } -func (*AtomicTxExecutor) CreateChainTx(*txs.CreateChainTx) error { return errWrongTxType } -func (*AtomicTxExecutor) CreateSubnetTx(*txs.CreateSubnetTx) error { return errWrongTxType } -func (*AtomicTxExecutor) AdvanceTimeTx(*txs.AdvanceTimeTx) error { return errWrongTxType } -func (*AtomicTxExecutor) RewardValidatorTx(*txs.RewardValidatorTx) error { return errWrongTxType } +func (*AtomicTxExecutor) AddValidatorTx(*txs.AddValidatorTx) error { + return errWrongTxType +} + +func (*AtomicTxExecutor) AddSubnetValidatorTx(*txs.AddSubnetValidatorTx) error { + return errWrongTxType +} + +func (*AtomicTxExecutor) AddDelegatorTx(*txs.AddDelegatorTx) error { + return errWrongTxType +} + +func (*AtomicTxExecutor) CreateChainTx(*txs.CreateChainTx) error { + return errWrongTxType +} + +func (*AtomicTxExecutor) CreateSubnetTx(*txs.CreateSubnetTx) error { + return errWrongTxType +} + +func (*AtomicTxExecutor) AdvanceTimeTx(*txs.AdvanceTimeTx) error { + return errWrongTxType +} + +func (*AtomicTxExecutor) RewardValidatorTx(*txs.RewardValidatorTx) error { + return errWrongTxType +} + func (*AtomicTxExecutor) RemoveSubnetValidatorTx(*txs.RemoveSubnetValidatorTx) error { return errWrongTxType } -func (*AtomicTxExecutor) TransformSubnetTx(*txs.TransformSubnetTx) error { return errWrongTxType } + +func (*AtomicTxExecutor) TransformSubnetTx(*txs.TransformSubnetTx) error { + return errWrongTxType +} func (*AtomicTxExecutor) AddPermissionlessValidatorTx(*txs.AddPermissionlessValidatorTx) error { return errWrongTxType diff --git a/avalanchego/vms/platformvm/txs/executor/backend.go b/avalanchego/vms/platformvm/txs/executor/backend.go index 4f7ac74c..f043521a 100644 --- a/avalanchego/vms/platformvm/txs/executor/backend.go +++ b/avalanchego/vms/platformvm/txs/executor/backend.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -22,5 +22,5 @@ type Backend struct { FlowChecker utxo.Verifier Uptimes uptime.Manager Rewards reward.Calculator - Bootstrapped *utils.AtomicBool + Bootstrapped *utils.Atomic[bool] } diff --git a/avalanchego/vms/platformvm/txs/executor/create_chain_test.go b/avalanchego/vms/platformvm/txs/executor/create_chain_test.go index 551c997f..f451c540 100644 --- a/avalanchego/vms/platformvm/txs/executor/create_chain_test.go +++ b/avalanchego/vms/platformvm/txs/executor/create_chain_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -11,7 +11,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -22,12 +22,11 @@ import ( // Ensure Execute fails when there are not enough control sigs func TestCreateChainTxInsufficientControlSigs(t *testing.T) { - env := newEnvironment() + require := require.New(t) + env := newEnvironment(true /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() tx, err := env.txBuilder.NewCreateChainTx( @@ -36,20 +35,16 @@ func TestCreateChainTxInsufficientControlSigs(t *testing.T) { constants.AVMID, nil, "chain name", - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0], preFundedKeys[1]}, + []*secp256k1.PrivateKey{preFundedKeys[0], preFundedKeys[1]}, ids.ShortEmpty, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Remove a signature tx.Creds[0].(*secp256k1fx.Credential).Sigs = tx.Creds[0].(*secp256k1fx.Credential).Sigs[1:] stateDiff, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := StandardTxExecutor{ Backend: &env.backend, @@ -57,19 +52,16 @@ func TestCreateChainTxInsufficientControlSigs(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have erred because a sig is missing") - } + require.Error(err, "should have erred because a sig is missing") } // Ensure Execute fails when an incorrect control signature is given func TestCreateChainTxWrongControlSig(t *testing.T) { - env := newEnvironment() + require := require.New(t) + env := newEnvironment(true /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() tx, err := env.txBuilder.NewCreateChainTx( @@ -78,31 +70,23 @@ func TestCreateChainTxWrongControlSig(t *testing.T) { constants.AVMID, nil, "chain name", - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Generate new, random key to sign tx with - factory := crypto.FactorySECP256K1R{} + factory := secp256k1.Factory{} key, err := factory.NewPrivateKey() - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Replace a valid signature with one from another key sig, err := key.SignHash(hashing.ComputeHash256(tx.Unsigned.Bytes())) - if err != nil { - t.Fatal(err) - } + require.NoError(err) copy(tx.Creds[0].(*secp256k1fx.Credential).Sigs[0][:], sig) stateDiff, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := StandardTxExecutor{ Backend: &env.backend, @@ -110,20 +94,17 @@ func TestCreateChainTxWrongControlSig(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed verification because a sig is invalid") - } + require.Error(err, "should have failed verification because a sig is invalid") } // Ensure Execute fails when the Subnet the blockchain specifies as // its validator set doesn't exist func TestCreateChainTxNoSuchSubnet(t *testing.T) { - env := newEnvironment() + require := require.New(t) + env := newEnvironment(true /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() tx, err := env.txBuilder.NewCreateChainTx( @@ -132,19 +113,15 @@ func TestCreateChainTxNoSuchSubnet(t *testing.T) { constants.AVMID, nil, "chain name", - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) tx.Unsigned.(*txs.CreateChainTx).SubnetID = ids.GenerateTestID() stateDiff, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := StandardTxExecutor{ Backend: &env.backend, @@ -152,19 +129,16 @@ func TestCreateChainTxNoSuchSubnet(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed because subnet doesn't exist") - } + require.Error(err, "should have failed because subnet doesn't exist") } // Ensure valid tx passes semanticVerify func TestCreateChainTxValid(t *testing.T) { - env := newEnvironment() + require := require.New(t) + env := newEnvironment(true /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() tx, err := env.txBuilder.NewCreateChainTx( @@ -173,17 +147,13 @@ func TestCreateChainTxValid(t *testing.T) { constants.AVMID, nil, "chain name", - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) stateDiff, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := StandardTxExecutor{ Backend: &env.backend, @@ -191,9 +161,7 @@ func TestCreateChainTxValid(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err != nil { - t.Fatalf("expected tx to pass verification but got error: %v", err) - } + require.NoError(err) } func TestCreateChainTxAP3FeeChange(t *testing.T) { @@ -227,15 +195,13 @@ func TestCreateChainTxAP3FeeChange(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) - env := newEnvironment() + env := newEnvironment(true /*=postBanff*/, false /*=postCortina*/) env.config.ApricotPhase3Time = ap3Time defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() - ins, outs, _, signers, err := env.utxosHandler.Spend(preFundedKeys, 0, test.fee, ids.ShortEmpty) + ins, outs, _, signers, err := env.utxosHandler.Spend(env.state, preFundedKeys, 0, test.fee, ids.ShortEmpty) require.NoError(err) subnetAuth, subnetSigners, err := env.utxosHandler.Authorize(env.state, testSubnet1.ID(), preFundedKeys) diff --git a/avalanchego/vms/platformvm/txs/executor/create_subnet_test.go b/avalanchego/vms/platformvm/txs/executor/create_subnet_test.go index c6300c8d..2446d841 100644 --- a/avalanchego/vms/platformvm/txs/executor/create_subnet_test.go +++ b/avalanchego/vms/platformvm/txs/executor/create_subnet_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -48,14 +48,14 @@ func TestCreateSubnetTxAP3FeeChange(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) - env := newEnvironment() + env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) env.config.ApricotPhase3Time = ap3Time env.ctx.Lock.Lock() defer func() { require.NoError(shutdownEnvironment(env)) }() - ins, outs, _, signers, err := env.utxosHandler.Spend(preFundedKeys, 0, test.fee, ids.ShortEmpty) + ins, outs, _, signers, err := env.utxosHandler.Spend(env.state, preFundedKeys, 0, test.fee, ids.ShortEmpty) require.NoError(err) // Create the tx diff --git a/avalanchego/vms/platformvm/txs/executor/export_test.go b/avalanchego/vms/platformvm/txs/executor/export_test.go index 4f6a93b3..ca10b3c1 100644 --- a/avalanchego/vms/platformvm/txs/executor/export_test.go +++ b/avalanchego/vms/platformvm/txs/executor/export_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -12,23 +12,21 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/platformvm/state" ) func TestNewExportTx(t *testing.T) { - env := newEnvironment() + env := newEnvironment(true /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(t, shutdownEnvironment(env)) }() type test struct { description string destinationChainID ids.ID - sourceKeys []*crypto.PrivateKeySECP256K1R + sourceKeys []*secp256k1.PrivateKey timestamp time.Time shouldErr bool shouldVerify bool @@ -40,7 +38,7 @@ func TestNewExportTx(t *testing.T) { { description: "P->X export", destinationChainID: xChainID, - sourceKeys: []*crypto.PrivateKeySECP256K1R{sourceKey}, + sourceKeys: []*secp256k1.PrivateKey{sourceKey}, timestamp: defaultValidateStartTime, shouldErr: false, shouldVerify: true, @@ -48,7 +46,7 @@ func TestNewExportTx(t *testing.T) { { description: "P->C export", destinationChainID: cChainID, - sourceKeys: []*crypto.PrivateKeySECP256K1R{sourceKey}, + sourceKeys: []*secp256k1.PrivateKey{sourceKey}, timestamp: env.config.ApricotPhase5Time, shouldErr: false, shouldVerify: true, diff --git a/avalanchego/vms/platformvm/txs/executor/helpers_test.go b/avalanchego/vms/platformvm/txs/executor/helpers_test.go index 0deab948..6fc814d8 100644 --- a/avalanchego/vms/platformvm/txs/executor/helpers_test.go +++ b/avalanchego/vms/platformvm/txs/executor/helpers_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor import ( + "context" "errors" "fmt" "math" @@ -25,7 +26,7 @@ import ( "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/json" @@ -61,7 +62,7 @@ var ( defaultValidateEndTime = defaultValidateStartTime.Add(10 * defaultMinStakingDuration) defaultMinValidatorStake = 5 * units.MilliAvax defaultBalance = 100 * defaultMinValidatorStake - preFundedKeys = crypto.BuildTestKeys() + preFundedKeys = secp256k1.TestKeys() avaxAssetID = ids.ID{'y', 'e', 'e', 't'} defaultTxFee = uint64(100) xChainID = ids.Empty.Prefix(0) @@ -72,7 +73,10 @@ var ( testSubnet1ControlKeys = preFundedKeys[0:3] // Used to create and use keys. - testKeyfactory crypto.FactorySECP256K1R + testKeyfactory secp256k1.Factory + + errMissingPrimaryValidators = errors.New("missing primary validator set") + errMissing = errors.New("missing") ) type mutableSharedMemory struct { @@ -80,7 +84,7 @@ type mutableSharedMemory struct { } type environment struct { - isBootstrapped *utils.AtomicBool + isBootstrapped *utils.Atomic[bool] config *config.Config clk *mockable.Clock baseDB *versiondb.Database @@ -108,38 +112,25 @@ func (e *environment) SetState(blkID ids.ID, chainState state.Chain) { e.states[blkID] = chainState } -// TODO: snLookup currently duplicated in vm_test.go. Remove duplication -type snLookup struct { - chainsToSubnet map[ids.ID]ids.ID -} - -func (sn *snLookup) SubnetID(chainID ids.ID) (ids.ID, error) { - subnetID, ok := sn.chainsToSubnet[chainID] - if !ok { - return ids.ID{}, errors.New("") - } - return subnetID, nil -} - -func newEnvironment() *environment { - var isBootstrapped utils.AtomicBool - isBootstrapped.SetValue(true) +func newEnvironment(postBanff, postCortina bool) *environment { + var isBootstrapped utils.Atomic[bool] + isBootstrapped.Set(true) - config := defaultConfig() - clk := defaultClock() + config := defaultConfig(postBanff, postCortina) + clk := defaultClock(postBanff || postCortina) baseDBManager := manager.NewMemDB(version.CurrentDatabase) baseDB := versiondb.New(baseDBManager.Current().Database) ctx, msm := defaultCtx(baseDB) - fx := defaultFx(&clk, ctx.Log, isBootstrapped.GetValue()) + fx := defaultFx(&clk, ctx.Log, isBootstrapped.Get()) rewards := reward.NewCalculator(config.RewardConfig) baseState := defaultState(&config, ctx, baseDB, rewards) atomicUTXOs := avax.NewAtomicUTXOManager(ctx.SharedMemory, txs.Codec) uptimes := uptime.NewManager(baseState) - utxoHandler := utxo.NewHandler(ctx, &clk, baseState, fx) + utxoHandler := utxo.NewHandler(ctx, &clk, fx) txBuilder := builder.New( ctx, @@ -197,7 +188,7 @@ func addSubnet( preFundedKeys[1].PublicKey().Address(), preFundedKeys[2].PublicKey().Address(), }, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, preFundedKeys[0].PublicKey().Address(), ) if err != nil { @@ -221,7 +212,9 @@ func addSubnet( } stateDiff.AddTx(testSubnet1, status.Committed) - stateDiff.Apply(env.state) + if err := stateDiff.Apply(env.state); err != nil { + panic(err) + } } func defaultState( @@ -239,6 +232,7 @@ func defaultState( ctx, metrics.Noop, rewards, + &utils.Atomic[bool]{}, ) if err != nil { panic(err) @@ -261,6 +255,7 @@ func defaultCtx(db database.Database) (*snow.Context, *mutableSharedMemory) { ctx := snow.DefaultContextTest() ctx.NetworkID = 10 ctx.XChainID = xChainID + ctx.CChainID = cChainID ctx.AVAXAssetID = avaxAssetID atomicDB := prefixdb.New([]byte{1}, db) @@ -271,22 +266,40 @@ func defaultCtx(db database.Database) (*snow.Context, *mutableSharedMemory) { } ctx.SharedMemory = msm - ctx.SNLookup = &snLookup{ - chainsToSubnet: map[ids.ID]ids.ID{ - constants.PlatformChainID: constants.PrimaryNetworkID, - xChainID: constants.PrimaryNetworkID, - cChainID: constants.PrimaryNetworkID, + ctx.ValidatorState = &validators.TestState{ + GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { + subnetID, ok := map[ids.ID]ids.ID{ + constants.PlatformChainID: constants.PrimaryNetworkID, + xChainID: constants.PrimaryNetworkID, + cChainID: constants.PrimaryNetworkID, + }[chainID] + if !ok { + return ids.Empty, errMissing + } + return subnetID, nil }, } return ctx, msm } -func defaultConfig() config.Config { +func defaultConfig(postBanff, postCortina bool) config.Config { + banffTime := mockable.MaxTime + if postBanff { + banffTime = defaultValidateEndTime.Add(-2 * time.Second) + } + cortinaTime := mockable.MaxTime + if postCortina { + cortinaTime = defaultValidateStartTime.Add(-2 * time.Second) + } + + vdrs := validators.NewManager() + primaryVdrs := validators.NewSet() + _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) return config.Config{ - Chains: chains.MockManager{}, + Chains: chains.TestManager, UptimeLockedCalculator: uptime.NewLockedCalculator(), - Validators: validators.NewManager(), + Validators: vdrs, TxFee: defaultTxFee, CreateSubnetTxFee: 100 * defaultTxFee, CreateBlockchainTxFee: 100 * defaultTxFee, @@ -303,13 +316,19 @@ func defaultConfig() config.Config { }, ApricotPhase3Time: defaultValidateEndTime, ApricotPhase5Time: defaultValidateEndTime, - BanffTime: mockable.MaxTime, + BanffTime: banffTime, + CortinaTime: cortinaTime, } } -func defaultClock() mockable.Clock { +func defaultClock(postFork bool) mockable.Clock { + now := defaultGenesisTime + if postFork { + // 1 second after Banff fork + now = defaultValidateEndTime.Add(-2 * time.Second) + } clk := mockable.Clock{} - clk.Set(defaultGenesisTime) + clk.Set(now) return clk } @@ -319,11 +338,19 @@ type fxVMInt struct { log logging.Logger } -func (fvi *fxVMInt) CodecRegistry() codec.Registry { return fvi.registry } -func (fvi *fxVMInt) Clock() *mockable.Clock { return fvi.clk } -func (fvi *fxVMInt) Logger() logging.Logger { return fvi.log } +func (fvi *fxVMInt) CodecRegistry() codec.Registry { + return fvi.registry +} func (fvi *fxVMInt) EthVerificationEnabled() bool { return false } +func (fvi *fxVMInt) Clock() *mockable.Clock { + return fvi.clk +} + +func (fvi *fxVMInt) Logger() logging.Logger { + return fvi.log +} + func defaultFx(clk *mockable.Clock, log logging.Logger, isBootstrapped bool) fx.Fx { fxVMInt := &fxVMInt{ registry: linearcodec.NewDefault(), @@ -396,7 +423,7 @@ func buildGenesisTest(ctx *snow.Context) []byte { buildGenesisResponse := api.BuildGenesisReply{} platformvmSS := api.StaticService{} if err := platformvmSS.BuildGenesis(nil, &buildGenesisArgs, &buildGenesisResponse); err != nil { - panic(fmt.Errorf("problem while building platform chain's genesis state: %v", err)) + panic(fmt.Errorf("problem while building platform chain's genesis state: %w", err)) } genesisBytes, err := formatting.Decode(buildGenesisResponse.Encoding, buildGenesisResponse.Bytes) @@ -408,21 +435,36 @@ func buildGenesisTest(ctx *snow.Context) []byte { } func shutdownEnvironment(env *environment) error { - if env.isBootstrapped.GetValue() { - primaryValidatorSet, exist := env.config.Validators.GetValidators(constants.PrimaryNetworkID) + if env.isBootstrapped.Get() { + primaryValidatorSet, exist := env.config.Validators.Get(constants.PrimaryNetworkID) if !exist { - return errors.New("no default subnet validators") + return errMissingPrimaryValidators } primaryValidators := primaryValidatorSet.List() validatorIDs := make([]ids.NodeID, len(primaryValidators)) for i, vdr := range primaryValidators { - validatorIDs[i] = vdr.ID() + validatorIDs[i] = vdr.NodeID } - - if err := env.uptimes.Shutdown(validatorIDs); err != nil { + if err := env.uptimes.StopTracking(validatorIDs, constants.PrimaryNetworkID); err != nil { return err } + + for subnetID := range env.config.TrackedSubnets { + vdrs, exist := env.config.Validators.Get(subnetID) + if !exist { + return nil + } + validators := vdrs.List() + + validatorIDs := make([]ids.NodeID, len(validators)) + for i, vdr := range validators { + validatorIDs[i] = vdr.NodeID + } + if err := env.uptimes.StopTracking(validatorIDs, subnetID); err != nil { + return err + } + } env.state.SetHeight( /*height*/ math.MaxUint64) if err := env.state.Commit(); err != nil { return err diff --git a/avalanchego/vms/platformvm/txs/executor/import_test.go b/avalanchego/vms/platformvm/txs/executor/import_test.go index bc5c3738..bb2bdd15 100644 --- a/avalanchego/vms/platformvm/txs/executor/import_test.go +++ b/avalanchego/vms/platformvm/txs/executor/import_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -13,7 +13,7 @@ import ( "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" @@ -21,29 +21,24 @@ import ( ) func TestNewImportTx(t *testing.T) { - env := newEnvironment() + env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(t, shutdownEnvironment(env)) }() type test struct { description string sourceChainID ids.ID sharedMemory atomic.SharedMemory - sourceKeys []*crypto.PrivateKeySECP256K1R + sourceKeys []*secp256k1.PrivateKey timestamp time.Time shouldErr bool shouldVerify bool } - factory := crypto.FactorySECP256K1R{} - sourceKeyIntf, err := factory.NewPrivateKey() - if err != nil { - t.Fatal(err) - } - sourceKey := sourceKeyIntf.(*crypto.PrivateKeySECP256K1R) + factory := secp256k1.Factory{} + sourceKey, err := factory.NewPrivateKey() + require.NoError(t, err) cnt := new(byte) @@ -74,19 +69,24 @@ func TestNewImportTx(t *testing.T) { }, } utxoBytes, err := txs.Codec.Marshal(txs.Version, utxo) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + inputID := utxo.InputID() - if err := peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{env.ctx.ChainID: {PutRequests: []*atomic.Element{{ - Key: inputID[:], - Value: utxoBytes, - Traits: [][]byte{ - sourceKey.PublicKey().Address().Bytes(), + err = peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{ + env.ctx.ChainID: { + PutRequests: []*atomic.Element{ + { + Key: inputID[:], + Value: utxoBytes, + Traits: [][]byte{ + sourceKey.PublicKey().Address().Bytes(), + }, + }, + }, }, - }}}}); err != nil { - t.Fatal(err) - } + }, + ) + require.NoError(t, err) } return sm @@ -104,7 +104,7 @@ func TestNewImportTx(t *testing.T) { env.ctx.AVAXAssetID: env.config.TxFee - 1, }, ), - sourceKeys: []*crypto.PrivateKeySECP256K1R{sourceKey}, + sourceKeys: []*secp256k1.PrivateKey{sourceKey}, shouldErr: true, }, { @@ -116,7 +116,7 @@ func TestNewImportTx(t *testing.T) { env.ctx.AVAXAssetID: env.config.TxFee, }, ), - sourceKeys: []*crypto.PrivateKeySECP256K1R{sourceKey}, + sourceKeys: []*secp256k1.PrivateKey{sourceKey}, shouldErr: false, shouldVerify: true, }, @@ -129,28 +129,13 @@ func TestNewImportTx(t *testing.T) { env.ctx.AVAXAssetID: env.config.TxFee, }, ), - sourceKeys: []*crypto.PrivateKeySECP256K1R{sourceKey}, + sourceKeys: []*secp256k1.PrivateKey{sourceKey}, timestamp: env.config.ApricotPhase5Time, shouldErr: false, shouldVerify: true, }, { - description: "attempting to import non-avax from X-chain pre-banff", - sourceChainID: env.ctx.XChainID, - sharedMemory: fundedSharedMemory( - env.ctx.XChainID, - map[ids.ID]uint64{ - env.ctx.AVAXAssetID: env.config.TxFee, - customAssetID: 1, - }, - ), - sourceKeys: []*crypto.PrivateKeySECP256K1R{sourceKey}, - timestamp: env.config.BanffTime.Add(-time.Second), - shouldErr: false, - shouldVerify: false, - }, - { - description: "attempting to import non-avax from X-chain post-banff", + description: "attempting to import non-avax from X-chain", sourceChainID: env.ctx.XChainID, sharedMemory: fundedSharedMemory( env.ctx.XChainID, @@ -159,7 +144,7 @@ func TestNewImportTx(t *testing.T) { customAssetID: 1, }, ), - sourceKeys: []*crypto.PrivateKeySECP256K1R{sourceKey}, + sourceKeys: []*secp256k1.PrivateKey{sourceKey}, timestamp: env.config.BanffTime, shouldErr: false, shouldVerify: true, @@ -200,7 +185,7 @@ func TestNewImportTx(t *testing.T) { totalOut += out.Out.Amount() } - require.Equal(env.config.TxFee, totalIn-totalOut, "burned too much") + require.Equal(env.config.TxFee, totalIn-totalOut) fakedState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) diff --git a/avalanchego/vms/platformvm/txs/executor/proposal_tx_executor.go b/avalanchego/vms/platformvm/txs/executor/proposal_tx_executor.go index 1f80c08f..af0122e8 100644 --- a/avalanchego/vms/platformvm/txs/executor/proposal_tx_executor.go +++ b/avalanchego/vms/platformvm/txs/executor/proposal_tx_executor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -17,7 +17,6 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/utxo" ) const ( @@ -31,7 +30,7 @@ const ( ) var ( - _ txs.Visitor = &ProposalTxExecutor{} + _ txs.Visitor = (*ProposalTxExecutor)(nil) errChildBlockNotAfterParent = errors.New("proposed timestamp not after current chain time") errInvalidState = errors.New("generated output isn't valid state") @@ -51,10 +50,11 @@ type ProposalTxExecutor struct { *Backend Tx *txs.Tx // [OnCommitState] is the state used for validation. - // In practice, both [OnCommitState] and [onAbortState] are - // identical when passed into this struct, so we could use either. // [OnCommitState] is modified by this struct's methods to // reflect changes made to the state if the proposal is committed. + // + // Invariant: Both [OnCommitState] and [OnAbortState] represent the same + // state when provided to this struct. OnCommitState state.Diff // [OnAbortState] is modified by this struct's methods to // reflect changes made to the state if the proposal is aborted. @@ -67,14 +67,29 @@ type ProposalTxExecutor struct { PrefersCommit bool } -func (*ProposalTxExecutor) CreateChainTx(*txs.CreateChainTx) error { return errWrongTxType } -func (*ProposalTxExecutor) CreateSubnetTx(*txs.CreateSubnetTx) error { return errWrongTxType } -func (*ProposalTxExecutor) ImportTx(*txs.ImportTx) error { return errWrongTxType } -func (*ProposalTxExecutor) ExportTx(*txs.ExportTx) error { return errWrongTxType } +func (*ProposalTxExecutor) CreateChainTx(*txs.CreateChainTx) error { + return errWrongTxType +} + +func (*ProposalTxExecutor) CreateSubnetTx(*txs.CreateSubnetTx) error { + return errWrongTxType +} + +func (*ProposalTxExecutor) ImportTx(*txs.ImportTx) error { + return errWrongTxType +} + +func (*ProposalTxExecutor) ExportTx(*txs.ExportTx) error { + return errWrongTxType +} + func (*ProposalTxExecutor) RemoveSubnetValidatorTx(*txs.RemoveSubnetValidatorTx) error { return errWrongTxType } -func (*ProposalTxExecutor) TransformSubnetTx(*txs.TransformSubnetTx) error { return errWrongTxType } + +func (*ProposalTxExecutor) TransformSubnetTx(*txs.TransformSubnetTx) error { + return errWrongTxType +} func (*ProposalTxExecutor) AddPermissionlessValidatorTx(*txs.AddPermissionlessValidatorTx) error { return errWrongTxType @@ -112,18 +127,22 @@ func (e *ProposalTxExecutor) AddValidatorTx(tx *txs.AddValidatorTx) error { // Set up the state if this tx is committed // Consume the UTXOs - utxo.Consume(e.OnCommitState, tx.Ins) + avax.Consume(e.OnCommitState, tx.Ins) // Produce the UTXOs - utxo.Produce(e.OnCommitState, txID, tx.Outs) + avax.Produce(e.OnCommitState, txID, tx.Outs) + + newStaker, err := state.NewPendingStaker(txID, tx) + if err != nil { + return err + } - newStaker := state.NewPendingStaker(txID, tx) e.OnCommitState.PutPendingValidator(newStaker) // Set up the state if this tx is aborted // Consume the UTXOs - utxo.Consume(e.OnAbortState, tx.Ins) + avax.Consume(e.OnAbortState, tx.Ins) // Produce the UTXOs - utxo.Produce(e.OnAbortState, txID, onAbortOuts) + avax.Produce(e.OnAbortState, txID, onAbortOuts) e.PrefersCommit = tx.StartTime().After(e.Clk.Time()) return nil @@ -156,18 +175,22 @@ func (e *ProposalTxExecutor) AddSubnetValidatorTx(tx *txs.AddSubnetValidatorTx) // Set up the state if this tx is committed // Consume the UTXOs - utxo.Consume(e.OnCommitState, tx.Ins) + avax.Consume(e.OnCommitState, tx.Ins) // Produce the UTXOs - utxo.Produce(e.OnCommitState, txID, tx.Outs) + avax.Produce(e.OnCommitState, txID, tx.Outs) + + newStaker, err := state.NewPendingStaker(txID, tx) + if err != nil { + return err + } - newStaker := state.NewPendingStaker(txID, tx) e.OnCommitState.PutPendingValidator(newStaker) // Set up the state if this tx is aborted // Consume the UTXOs - utxo.Consume(e.OnAbortState, tx.Ins) + avax.Consume(e.OnAbortState, tx.Ins) // Produce the UTXOs - utxo.Produce(e.OnAbortState, txID, tx.Outs) + avax.Produce(e.OnAbortState, txID, tx.Outs) e.PrefersCommit = tx.StartTime().After(e.Clk.Time()) return nil @@ -201,18 +224,22 @@ func (e *ProposalTxExecutor) AddDelegatorTx(tx *txs.AddDelegatorTx) error { // Set up the state if this tx is committed // Consume the UTXOs - utxo.Consume(e.OnCommitState, tx.Ins) + avax.Consume(e.OnCommitState, tx.Ins) // Produce the UTXOs - utxo.Produce(e.OnCommitState, txID, tx.Outs) + avax.Produce(e.OnCommitState, txID, tx.Outs) + + newStaker, err := state.NewPendingStaker(txID, tx) + if err != nil { + return err + } - newStaker := state.NewPendingStaker(txID, tx) e.OnCommitState.PutPendingDelegator(newStaker) // Set up the state if this tx is aborted // Consume the UTXOs - utxo.Consume(e.OnAbortState, tx.Ins) + avax.Consume(e.OnAbortState, tx.Ins) // Produce the UTXOs - utxo.Produce(e.OnAbortState, txID, onAbortOuts) + avax.Produce(e.OnAbortState, txID, onAbortOuts) e.PrefersCommit = tx.StartTime().After(e.Clk.Time()) return nil @@ -355,6 +382,8 @@ func (e *ProposalTxExecutor) RewardValidatorTx(tx *txs.RewardValidatorTx) error e.OnAbortState.AddUTXO(utxo) } + offset := 0 + // Provide the reward here if stakerToRemove.PotentialReward > 0 { validationRewardsOwner := uStakerTx.ValidationRewardsOwner() @@ -378,8 +407,54 @@ func (e *ProposalTxExecutor) RewardValidatorTx(tx *txs.RewardValidatorTx) error e.OnCommitState.AddUTXO(utxo) e.OnCommitState.AddRewardUTXO(tx.TxID, utxo) + + offset++ + } + + // Provide the accrued delegatee rewards from successful delegations here. + delegateeReward, err := e.OnCommitState.GetDelegateeReward( + stakerToRemove.SubnetID, + stakerToRemove.NodeID, + ) + if err != nil { + return fmt.Errorf("failed to fetch accrued delegatee rewards: %w", err) } + if delegateeReward > 0 { + delegationRewardsOwner := uStakerTx.DelegationRewardsOwner() + outIntf, err := e.Fx.CreateOutput(delegateeReward, delegationRewardsOwner) + if err != nil { + return fmt.Errorf("failed to create output: %w", err) + } + out, ok := outIntf.(verify.State) + if !ok { + return errInvalidState + } + + onCommitUtxo := &avax.UTXO{ + UTXOID: avax.UTXOID{ + TxID: tx.TxID, + OutputIndex: uint32(len(outputs) + len(stake) + offset), + }, + Asset: stakeAsset, + Out: out, + } + e.OnCommitState.AddUTXO(onCommitUtxo) + e.OnCommitState.AddRewardUTXO(tx.TxID, onCommitUtxo) + + onAbortUtxo := &avax.UTXO{ + UTXOID: avax.UTXOID{ + TxID: tx.TxID, + // Note: There is no [offset] if the RewardValidatorTx is + // aborted, because the validator reward is not awarded. + OutputIndex: uint32(len(outputs) + len(stake)), + }, + Asset: stakeAsset, + Out: out, + } + e.OnAbortState.AddUTXO(onAbortUtxo) + e.OnAbortState.AddRewardUTXO(tx.TxID, onAbortUtxo) + } // Invariant: A [txs.DelegatorTx] does not also implement the // [txs.ValidatorTx] interface. case txs.DelegatorTx: @@ -477,26 +552,54 @@ func (e *ProposalTxExecutor) RewardValidatorTx(tx *txs.RewardValidatorTx) error // Reward the delegatee here if delegateeReward > 0 { - delegationRewardsOwner := vdrTx.DelegationRewardsOwner() - outIntf, err := e.Fx.CreateOutput(delegateeReward, delegationRewardsOwner) - if err != nil { - return fmt.Errorf("failed to create output: %w", err) - } - out, ok := outIntf.(verify.State) - if !ok { - return errInvalidState - } - utxo := &avax.UTXO{ - UTXOID: avax.UTXOID{ - TxID: tx.TxID, - OutputIndex: uint32(len(outputs) + len(stake) + offset), - }, - Asset: stakeAsset, - Out: out, + if vdrStaker.StartTime.After(e.Config.CortinaTime) { + previousDelegateeReward, err := e.OnCommitState.GetDelegateeReward( + vdrStaker.SubnetID, + vdrStaker.NodeID, + ) + if err != nil { + return fmt.Errorf("failed to get delegatee reward: %w", err) + } + + // Invariant: The rewards calculator can never return a + // [potentialReward] that would overflow the + // accumulated rewards. + newDelegateeReward := previousDelegateeReward + delegateeReward + + // For any validators starting after [CortinaTime], we defer rewarding the + // [delegateeReward] until their staking period is over. + err = e.OnCommitState.SetDelegateeReward( + vdrStaker.SubnetID, + vdrStaker.NodeID, + newDelegateeReward, + ) + if err != nil { + return fmt.Errorf("failed to update delegatee reward: %w", err) + } + } else { + // For any validators who started prior to [CortinaTime], we issue the + // [delegateeReward] immediately. + delegationRewardsOwner := vdrTx.DelegationRewardsOwner() + outIntf, err := e.Fx.CreateOutput(delegateeReward, delegationRewardsOwner) + if err != nil { + return fmt.Errorf("failed to create output: %w", err) + } + out, ok := outIntf.(verify.State) + if !ok { + return errInvalidState + } + utxo := &avax.UTXO{ + UTXOID: avax.UTXOID{ + TxID: tx.TxID, + OutputIndex: uint32(len(outputs) + len(stake) + offset), + }, + Asset: stakeAsset, + Out: out, + } + + e.OnCommitState.AddUTXO(utxo) + e.OnCommitState.AddRewardUTXO(tx.TxID, utxo) } - - e.OnCommitState.AddUTXO(utxo) - e.OnCommitState.AddRewardUTXO(tx.TxID, utxo) } default: // Invariant: Permissioned stakers are removed by the advancement of @@ -511,7 +614,7 @@ func (e *ProposalTxExecutor) RewardValidatorTx(tx *txs.RewardValidatorTx) error if err != nil { return err } - newSupply, err := math.Sub64(currentSupply, stakerToRemove.PotentialReward) + newSupply, err := math.Sub(currentSupply, stakerToRemove.PotentialReward) if err != nil { return err } @@ -536,6 +639,7 @@ func (e *ProposalTxExecutor) RewardValidatorTx(tx *txs.RewardValidatorTx) error // TODO: calculate subnet uptimes uptime, err := e.Uptimes.CalculateUptimePercentFrom( primaryNetworkValidator.NodeID, + constants.PrimaryNetworkID, primaryNetworkValidator.StartTime, ) if err != nil { @@ -695,14 +799,14 @@ func GetMaxWeight( if !delegator.NextTime.Before(startTime) { // We have advanced time to be at the inside of the delegation // window. Make sure that the max weight is updated accordingly. - currentMax = math.Max64(currentMax, currentWeight) + currentMax = math.Max(currentMax, currentWeight) } var op func(uint64, uint64) (uint64, error) if isAdded { op = math.Add64 } else { - op = math.Sub64 + op = math.Sub[uint64] } currentWeight, err = op(currentWeight, delegator.Weight) if err != nil { @@ -712,5 +816,5 @@ func GetMaxWeight( // Because we assume [startTime] < [endTime], we have advanced time to // be at the end of the delegation window. Make sure that the max weight is // updated accordingly. - return math.Max64(currentMax, currentWeight), nil + return math.Max(currentMax, currentWeight), nil } diff --git a/avalanchego/vms/platformvm/txs/executor/proposal_tx_executor_test.go b/avalanchego/vms/platformvm/txs/executor/proposal_tx_executor_test.go index 5567ffe4..5ea50eb8 100644 --- a/avalanchego/vms/platformvm/txs/executor/proposal_tx_executor_test.go +++ b/avalanchego/vms/platformvm/txs/executor/proposal_tx_executor_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -9,8 +9,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/state" @@ -38,25 +40,23 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { newValidatorID, // node ID rewardAddress, // Reward Address reward.PercentDenominator, // Shares - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, ) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - staker := state.NewCurrentStaker( + staker, err := state.NewCurrentStaker( tx.ID(), tx.Unsigned.(*txs.AddValidatorTx), 0, ) + require.NoError(t, err) target.state.PutCurrentValidator(staker) target.state.AddTx(tx, status.Committed) target.state.SetHeight(dummyHeight) - if err := target.state.Commit(); err != nil { - t.Fatal(err) - } + err = target.state.Commit() + require.NoError(t, err) } // [addMaxStakeValidator] adds a new validator to the primary network's @@ -69,28 +69,26 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { newValidatorID, // node ID rewardAddress, // Reward Address reward.PercentDenominator, // Shared - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, ) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - staker := state.NewCurrentStaker( + staker, err := state.NewCurrentStaker( tx.ID(), tx.Unsigned.(*txs.AddValidatorTx), 0, ) + require.NoError(t, err) target.state.PutCurrentValidator(staker) target.state.AddTx(tx, status.Committed) target.state.SetHeight(dummyHeight) - if err := target.state.Commit(); err != nil { - t.Fatal(err) - } + err = target.state.Commit() + require.NoError(t, err) } - dummyH := newEnvironment() + dummyH := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) currentTimestamp := dummyH.state.GetTimestamp() type test struct { @@ -99,7 +97,7 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { endTime uint64 nodeID ids.NodeID rewardAddress ids.ShortID - feeKeys []*crypto.PrivateKeySECP256K1R + feeKeys []*secp256k1.PrivateKey setup func(*environment) AP3Time time.Time shouldErr bool @@ -113,7 +111,7 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { endTime: uint64(defaultValidateEndTime.Unix()) + 1, nodeID: nodeID, rewardAddress: rewardAddress, - feeKeys: []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: nil, AP3Time: defaultGenesisTime, shouldErr: true, @@ -125,7 +123,7 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { endTime: uint64(currentTimestamp.Add(MaxFutureStartTime * 2).Unix()), nodeID: nodeID, rewardAddress: rewardAddress, - feeKeys: []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: nil, AP3Time: defaultGenesisTime, shouldErr: true, @@ -137,7 +135,7 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { endTime: uint64(defaultValidateEndTime.Unix()) + 1, nodeID: nodeID, rewardAddress: rewardAddress, - feeKeys: []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: nil, AP3Time: defaultGenesisTime, shouldErr: true, @@ -149,7 +147,7 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { endTime: uint64(defaultValidateEndTime.Add(-5 * time.Second).Unix()), nodeID: newValidatorID, rewardAddress: rewardAddress, - feeKeys: []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: nil, AP3Time: defaultGenesisTime, shouldErr: true, @@ -161,7 +159,7 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { endTime: newValidatorEndTime, nodeID: newValidatorID, rewardAddress: rewardAddress, - feeKeys: []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: addMinStakeValidator, AP3Time: defaultGenesisTime, shouldErr: true, @@ -173,7 +171,7 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { endTime: newValidatorEndTime + 1, // stop validating subnet after stopping validating primary network nodeID: newValidatorID, rewardAddress: rewardAddress, - feeKeys: []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: addMinStakeValidator, AP3Time: defaultGenesisTime, shouldErr: true, @@ -185,46 +183,44 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { endTime: newValidatorEndTime, // same end time as for primary network nodeID: newValidatorID, rewardAddress: rewardAddress, - feeKeys: []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: addMinStakeValidator, AP3Time: defaultGenesisTime, shouldErr: false, description: "valid", }, { - stakeAmount: dummyH.config.MinDelegatorStake, // weight - startTime: uint64(currentTimestamp.Unix()), // start time - endTime: uint64(defaultValidateEndTime.Unix()), // end time - nodeID: nodeID, // node ID - rewardAddress: rewardAddress, // Reward Address - feeKeys: []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, // tx fee payer + stakeAmount: dummyH.config.MinDelegatorStake, // weight + startTime: uint64(currentTimestamp.Unix()), // start time + endTime: uint64(defaultValidateEndTime.Unix()), // end time + nodeID: nodeID, // node ID + rewardAddress: rewardAddress, // Reward Address + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, // tx fee payer setup: nil, AP3Time: defaultGenesisTime, shouldErr: true, description: "starts validating at current timestamp", }, { - stakeAmount: dummyH.config.MinDelegatorStake, // weight - startTime: uint64(defaultValidateStartTime.Unix()), // start time - endTime: uint64(defaultValidateEndTime.Unix()), // end time - nodeID: nodeID, // node ID - rewardAddress: rewardAddress, // Reward Address - feeKeys: []*crypto.PrivateKeySECP256K1R{preFundedKeys[1]}, // tx fee payer + stakeAmount: dummyH.config.MinDelegatorStake, // weight + startTime: uint64(defaultValidateStartTime.Unix()), // start time + endTime: uint64(defaultValidateEndTime.Unix()), // end time + nodeID: nodeID, // node ID + rewardAddress: rewardAddress, // Reward Address + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[1]}, // tx fee payer setup: func(target *environment) { // Remove all UTXOs owned by keys[1] utxoIDs, err := target.state.UTXOIDs( preFundedKeys[1].PublicKey().Address().Bytes(), ids.Empty, math.MaxInt32) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + for _, utxoID := range utxoIDs { target.state.DeleteUTXO(utxoID) } target.state.SetHeight(dummyHeight) - if err := target.state.Commit(); err != nil { - t.Fatal(err) - } + err = target.state.Commit() + require.NoError(t, err) }, AP3Time: defaultGenesisTime, shouldErr: true, @@ -236,7 +232,7 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { endTime: newValidatorEndTime, // same end time as for primary network nodeID: newValidatorID, rewardAddress: rewardAddress, - feeKeys: []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: addMaxStakeValidator, AP3Time: defaultValidateEndTime, shouldErr: false, @@ -248,7 +244,7 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { endTime: newValidatorEndTime, // same end time as for primary network nodeID: newValidatorID, rewardAddress: rewardAddress, - feeKeys: []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: addMaxStakeValidator, AP3Time: defaultGenesisTime, shouldErr: true, @@ -258,12 +254,11 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { - freshTH := newEnvironment() + require := require.New(t) + freshTH := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) freshTH.config.ApricotPhase3Time = tt.AP3Time defer func() { - if err := shutdownEnvironment(freshTH); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(freshTH)) }() tx, err := freshTH.txBuilder.NewAddDelegatorTx( @@ -275,22 +270,17 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { tt.feeKeys, ids.ShortEmpty, ) - if err != nil { - t.Fatalf("couldn't build tx: %s", err) - } + require.NoError(err) + if tt.setup != nil { tt.setup(freshTH) } onCommitState, err := state.NewDiff(lastAcceptedID, freshTH) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, freshTH) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -299,35 +289,21 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err != nil && !tt.shouldErr { - t.Fatalf("shouldn't have errored but got %s", err) - } else if err == nil && tt.shouldErr { - t.Fatalf("expected test to error but got none") - } - - mempoolExecutor := MempoolTxVerifier{ - Backend: &freshTH.backend, - ParentID: lastAcceptedID, - StateVersions: freshTH, - Tx: tx, - } - err = tx.Unsigned.Visit(&mempoolExecutor) - if err != nil && !tt.shouldErr { - t.Fatalf("shouldn't have errored but got %s", err) - } else if err == nil && tt.shouldErr { - t.Fatalf("expected test to error but got none") + if tt.shouldErr { + require.Error(err) + } else { + require.NoError(err) } }) } } func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { - env := newEnvironment() + require := require.New(t) + env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() nodeID := preFundedKeys[0].PublicKey().Address() @@ -342,22 +318,16 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { uint64(defaultValidateEndTime.Unix())+1, ids.NodeID(nodeID), testSubnet1.ID(), - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -366,9 +336,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed because validator stops validating primary network earlier than subnet") - } + require.Error(err, "should have failed because validator stops validating primary network earlier than subnet") } { @@ -382,22 +350,16 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { uint64(defaultValidateEndTime.Unix()), ids.NodeID(nodeID), testSubnet1.ID(), - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -406,60 +368,48 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err != nil { - t.Fatal(err) - } + require.NoError(err) } // Add a validator to pending validator set of primary network key, err := testKeyfactory.NewPrivateKey() - if err != nil { - t.Fatal(err) - } + require.NoError(err) pendingDSValidatorID := ids.NodeID(key.PublicKey().Address()) // starts validating primary network 10 seconds after genesis - DSStartTime := defaultGenesisTime.Add(10 * time.Second) - DSEndTime := DSStartTime.Add(5 * defaultMinStakingDuration) + dsStartTime := defaultGenesisTime.Add(10 * time.Second) + dsEndTime := dsStartTime.Add(5 * defaultMinStakingDuration) addDSTx, err := env.txBuilder.NewAddValidatorTx( env.config.MinValidatorStake, // stake amount - uint64(DSStartTime.Unix()), // start time - uint64(DSEndTime.Unix()), // end time + uint64(dsStartTime.Unix()), // start time + uint64(dsEndTime.Unix()), // end time pendingDSValidatorID, // node ID nodeID, // reward address reward.PercentDenominator, // shares - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) { // Case: Proposed validator isn't in pending or current validator sets tx, err := env.txBuilder.NewAddSubnetValidatorTx( defaultWeight, - uint64(DSStartTime.Unix()), // start validating subnet before primary network - uint64(DSEndTime.Unix()), + uint64(dsStartTime.Unix()), // start validating subnet before primary network + uint64(dsEndTime.Unix()), pendingDSValidatorID, testSubnet1.ID(), - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -468,24 +418,22 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed because validator not in the current or pending validator sets of the primary network") - } + require.Error(err, "should have failed because validator not in the current or pending validator sets of the primary network") } - staker := state.NewCurrentStaker( + staker, err := state.NewCurrentStaker( addDSTx.ID(), addDSTx.Unsigned.(*txs.AddValidatorTx), 0, ) + require.NoError(err) env.state.PutCurrentValidator(staker) env.state.AddTx(addDSTx, status.Committed) dummyHeight := uint64(1) env.state.SetHeight(dummyHeight) - if err := env.state.Commit(); err != nil { - t.Fatal(err) - } + err = env.state.Commit() + require.NoError(err) // Node with ID key.PublicKey().Address() now a pending validator for primary network @@ -494,26 +442,20 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { // but starts validating subnet before primary network tx, err := env.txBuilder.NewAddSubnetValidatorTx( defaultWeight, - uint64(DSStartTime.Unix())-1, // start validating subnet before primary network - uint64(DSEndTime.Unix()), + uint64(dsStartTime.Unix())-1, // start validating subnet before primary network + uint64(dsEndTime.Unix()), pendingDSValidatorID, testSubnet1.ID(), - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -522,9 +464,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed because validator starts validating primary network before starting to validate primary network") - } + require.Error(err, "should have failed because validator starts validating primary network before starting to validate primary network") } { @@ -532,26 +472,20 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { // but stops validating subnet after primary network tx, err := env.txBuilder.NewAddSubnetValidatorTx( defaultWeight, - uint64(DSStartTime.Unix()), - uint64(DSEndTime.Unix())+1, // stop validating subnet after stopping validating primary network + uint64(dsStartTime.Unix()), + uint64(dsEndTime.Unix())+1, // stop validating subnet after stopping validating primary network pendingDSValidatorID, testSubnet1.ID(), - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -560,9 +494,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed because validator stops validating primary network after stops validating primary network") - } + require.Error(err, "should have failed because validator stops validating primary network after stops validating primary network") } { @@ -570,26 +502,20 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { // period validating subnet is subset of time validating primary network tx, err := env.txBuilder.NewAddSubnetValidatorTx( defaultWeight, - uint64(DSStartTime.Unix()), // same start time as for primary network - uint64(DSEndTime.Unix()), // same end time as for primary network + uint64(dsStartTime.Unix()), // same start time as for primary network + uint64(dsEndTime.Unix()), // same end time as for primary network pendingDSValidatorID, testSubnet1.ID(), - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -598,9 +524,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err != nil { - t.Fatal(err) - } + require.NoError(err) } // Case: Proposed validator start validating at/before current timestamp @@ -615,22 +539,16 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { uint64(newTimestamp.Add(defaultMinStakingDuration).Unix()), // end time ids.NodeID(nodeID), // node ID testSubnet1.ID(), // subnet ID - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -639,9 +557,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed verification because starts validating at current timestamp") - } + require.Error(err, "should have failed verification because starts validating at current timestamp") } // reset the timestamp @@ -655,25 +571,23 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { uint64(defaultValidateEndTime.Unix()), // end time ids.NodeID(nodeID), // node ID testSubnet1.ID(), // subnet ID - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - staker = state.NewCurrentStaker( + staker, err = state.NewCurrentStaker( subnetTx.ID(), subnetTx.Unsigned.(*txs.AddSubnetValidatorTx), 0, ) + require.NoError(err) env.state.PutCurrentValidator(staker) env.state.AddTx(subnetTx, status.Committed) env.state.SetHeight(dummyHeight) - if err := env.state.Commit(); err != nil { - t.Fatal(err) - } + err = env.state.Commit() + require.NoError(err) { // Node with ID nodeIDKey.PublicKey().Address() now validating subnet with ID testSubnet1.ID @@ -683,22 +597,16 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { uint64(defaultValidateEndTime.Unix()), // end time ids.NodeID(nodeID), // node ID testSubnet1.ID(), // subnet ID - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -707,16 +615,13 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Tx: duplicateSubnetTx, } err = duplicateSubnetTx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed verification because validator already validating the specified subnet") - } + require.Error(err, "should have failed verification because validator already validating the specified subnet") } env.state.DeleteCurrentValidator(staker) env.state.SetHeight(dummyHeight) - if err := env.state.Commit(); err != nil { - t.Fatal(err) - } + err = env.state.Commit() + require.NoError(err) { // Case: Too many signatures @@ -726,22 +631,16 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix())+1, // end time ids.NodeID(nodeID), // node ID testSubnet1.ID(), // subnet ID - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -750,9 +649,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed verification because tx has 3 signatures but only 2 needed") - } + require.Error(err, "should have failed verification because tx has 3 signatures but only 2 needed") } { @@ -763,12 +660,10 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix()), // end time ids.NodeID(nodeID), // node ID testSubnet1.ID(), // subnet ID - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[2]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[2]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Remove a signature addSubnetValidatorTx := tx.Unsigned.(*txs.AddSubnetValidatorTx) @@ -778,14 +673,10 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { addSubnetValidatorTx.SyntacticallyVerified = false onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -794,9 +685,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed verification because not enough control sigs") - } + require.Error(err, "should have failed verification because not enough control sigs") } { @@ -807,28 +696,21 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix()), // end time ids.NodeID(nodeID), // node ID testSubnet1.ID(), // subnet ID - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], preFundedKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], preFundedKeys[1]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + // Replace a valid signature with one from keys[3] sig, err := preFundedKeys[3].SignHash(hashing.ComputeHash256(tx.Unsigned.Bytes())) - if err != nil { - t.Fatal(err) - } + require.NoError(err) copy(tx.Creds[0].(*secp256k1fx.Credential).Sigs[0][:], sig) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -837,9 +719,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed verification because a control sig is invalid") - } + require.Error(err, "should have failed verification because a control sig is invalid") } { @@ -851,35 +731,29 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix())+1, // end time ids.NodeID(nodeID), // node ID testSubnet1.ID(), // subnet ID - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - staker = state.NewCurrentStaker( + staker, err = state.NewCurrentStaker( subnetTx.ID(), subnetTx.Unsigned.(*txs.AddSubnetValidatorTx), 0, ) + require.NoError(err) env.state.PutCurrentValidator(staker) env.state.AddTx(tx, status.Committed) env.state.SetHeight(dummyHeight) - if err := env.state.Commit(); err != nil { - t.Fatal(err) - } + err = env.state.Commit() + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -888,19 +762,16 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed verification because validator already in pending validator set of the specified subnet") - } + require.Error(err, "should have failed verification because validator already in pending validator set of the specified subnet") } } func TestProposalTxExecuteAddValidator(t *testing.T) { - env := newEnvironment() + require := require.New(t) + env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() nodeID := ids.GenerateTestNodeID() @@ -914,22 +785,16 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { nodeID, ids.ShortEmpty, reward.PercentDenominator, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -938,9 +803,7 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should've errored because start time too early") - } + require.Error(err, "should've errored because start time too early") } { @@ -952,22 +815,16 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { nodeID, ids.ShortEmpty, reward.PercentDenominator, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -976,9 +833,7 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should've errored because start time too far in the future") - } + require.Error(err, "should've errored because start time too far in the future") } { @@ -990,22 +845,16 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { nodeID, ids.ShortEmpty, reward.PercentDenominator, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -1014,9 +863,7 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should've errored because validator already validating") - } + require.Error(err, "should've errored because validator already validating") } { @@ -1029,36 +876,30 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { nodeID, ids.ShortEmpty, reward.PercentDenominator, // shares - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - staker := state.NewCurrentStaker( + staker, err := state.NewCurrentStaker( tx.ID(), tx.Unsigned.(*txs.AddValidatorTx), 0, ) + require.NoError(err) env.state.PutCurrentValidator(staker) env.state.AddTx(tx, status.Committed) dummyHeight := uint64(1) env.state.SetHeight(dummyHeight) - if err := env.state.Commit(); err != nil { - t.Fatal(err) - } + err = env.state.Commit() + require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -1067,9 +908,7 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed because validator in pending validator set") - } + require.Error(err, "should have failed because validator in pending validator set") } { @@ -1081,31 +920,24 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { nodeID, ids.ShortEmpty, reward.PercentDenominator, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Remove all UTXOs owned by preFundedKeys[0] utxoIDs, err := env.state.UTXOIDs(preFundedKeys[0].PublicKey().Address().Bytes(), ids.Empty, math.MaxInt32) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + for _, utxoID := range utxoIDs { env.state.DeleteUTXO(utxoID) } onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -1114,8 +946,6 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed because tx fee paying key has no funds") - } + require.Error(err, "should have failed because tx fee paying key has no funds") } } diff --git a/avalanchego/vms/platformvm/txs/executor/reward_validator_test.go b/avalanchego/vms/platformvm/txs/executor/reward_validator_test.go index 39da0656..24de2a0c 100644 --- a/avalanchego/vms/platformvm/txs/executor/reward_validator_test.go +++ b/avalanchego/vms/platformvm/txs/executor/reward_validator_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -9,10 +9,12 @@ import ( "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/state" @@ -23,7 +25,7 @@ import ( func TestRewardValidatorTxExecuteOnCommit(t *testing.T) { require := require.New(t) - env := newEnvironment() + env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) defer func() { require.NoError(shutdownEnvironment(env)) }() @@ -45,14 +47,10 @@ func TestRewardValidatorTxExecuteOnCommit(t *testing.T) { require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) txExecutor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -70,14 +68,10 @@ func TestRewardValidatorTxExecuteOnCommit(t *testing.T) { require.NoError(err) onCommitState, err = state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err = state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) txExecutor = ProposalTxExecutor{ OnCommitState: onCommitState, @@ -92,14 +86,10 @@ func TestRewardValidatorTxExecuteOnCommit(t *testing.T) { require.NoError(err) onCommitState, err = state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err = state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) txExecutor = ProposalTxExecutor{ OnCommitState: onCommitState, @@ -124,7 +114,8 @@ func TestRewardValidatorTxExecuteOnCommit(t *testing.T) { oldBalance, err := avax.GetBalance(env.state, stakeOwners) require.NoError(err) - txExecutor.OnCommitState.Apply(env.state) + require.NoError(txExecutor.OnCommitState.Apply(env.state)) + env.state.SetHeight(dummyHeight) require.NoError(env.state.Commit()) @@ -135,7 +126,7 @@ func TestRewardValidatorTxExecuteOnCommit(t *testing.T) { func TestRewardValidatorTxExecuteOnAbort(t *testing.T) { require := require.New(t) - env := newEnvironment() + env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) defer func() { require.NoError(shutdownEnvironment(env)) }() @@ -157,14 +148,10 @@ func TestRewardValidatorTxExecuteOnAbort(t *testing.T) { require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) txExecutor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -194,14 +181,10 @@ func TestRewardValidatorTxExecuteOnAbort(t *testing.T) { require.NoError(err) onCommitState, err = state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err = state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) txExecutor = ProposalTxExecutor{ OnCommitState: onCommitState, @@ -226,7 +209,8 @@ func TestRewardValidatorTxExecuteOnAbort(t *testing.T) { oldBalance, err := avax.GetBalance(env.state, stakeOwners) require.NoError(err) - txExecutor.OnAbortState.Apply(env.state) + require.NoError(txExecutor.OnAbortState.Apply(env.state)) + env.state.SetHeight(dummyHeight) require.NoError(env.state.Commit()) @@ -235,13 +219,517 @@ func TestRewardValidatorTxExecuteOnAbort(t *testing.T) { require.Equal(oldBalance+stakerToRemove.Weight, onAbortBalance) } +func TestRewardDelegatorTxExecuteOnCommitPreDelegateeDeferral(t *testing.T) { + require := require.New(t) + env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + defer func() { + require.NoError(shutdownEnvironment(env)) + }() + dummyHeight := uint64(1) + + vdrRewardAddress := ids.GenerateTestShortID() + delRewardAddress := ids.GenerateTestShortID() + + vdrStartTime := uint64(defaultValidateStartTime.Unix()) + 1 + vdrEndTime := uint64(defaultValidateStartTime.Add(2 * defaultMinStakingDuration).Unix()) + vdrNodeID := ids.GenerateTestNodeID() + + vdrTx, err := env.txBuilder.NewAddValidatorTx( + env.config.MinValidatorStake, // stakeAmt + vdrStartTime, + vdrEndTime, + vdrNodeID, // node ID + vdrRewardAddress, // reward address + reward.PercentDenominator/4, + []*secp256k1.PrivateKey{preFundedKeys[0]}, + ids.ShortEmpty, + ) + require.NoError(err) + + delStartTime := vdrStartTime + delEndTime := vdrEndTime + + delTx, err := env.txBuilder.NewAddDelegatorTx( + env.config.MinDelegatorStake, + delStartTime, + delEndTime, + vdrNodeID, + delRewardAddress, + []*secp256k1.PrivateKey{preFundedKeys[0]}, + ids.ShortEmpty, // Change address + ) + require.NoError(err) + + vdrStaker, err := state.NewCurrentStaker( + vdrTx.ID(), + vdrTx.Unsigned.(*txs.AddValidatorTx), + 0, + ) + require.NoError(err) + + delStaker, err := state.NewCurrentStaker( + delTx.ID(), + delTx.Unsigned.(*txs.AddDelegatorTx), + 1000000, + ) + require.NoError(err) + + env.state.PutCurrentValidator(vdrStaker) + env.state.AddTx(vdrTx, status.Committed) + env.state.PutCurrentDelegator(delStaker) + env.state.AddTx(delTx, status.Committed) + env.state.SetTimestamp(time.Unix(int64(delEndTime), 0)) + env.state.SetHeight(dummyHeight) + require.NoError(env.state.Commit()) + + // test validator stake + vdrSet, ok := env.config.Validators.Get(constants.PrimaryNetworkID) + require.True(ok) + + stake := vdrSet.GetWeight(vdrNodeID) + require.Equal(env.config.MinValidatorStake+env.config.MinDelegatorStake, stake) + + tx, err := env.txBuilder.NewRewardValidatorTx(delTx.ID()) + require.NoError(err) + + onCommitState, err := state.NewDiff(lastAcceptedID, env) + require.NoError(err) + + onAbortState, err := state.NewDiff(lastAcceptedID, env) + require.NoError(err) + + txExecutor := ProposalTxExecutor{ + OnCommitState: onCommitState, + OnAbortState: onAbortState, + Backend: &env.backend, + Tx: tx, + } + err = tx.Unsigned.Visit(&txExecutor) + require.NoError(err) + + vdrDestSet := set.Set[ids.ShortID]{} + vdrDestSet.Add(vdrRewardAddress) + delDestSet := set.Set[ids.ShortID]{} + delDestSet.Add(delRewardAddress) + + expectedReward := uint64(1000000) + + oldVdrBalance, err := avax.GetBalance(env.state, vdrDestSet) + require.NoError(err) + oldDelBalance, err := avax.GetBalance(env.state, delDestSet) + require.NoError(err) + + require.NoError(txExecutor.OnCommitState.Apply(env.state)) + + env.state.SetHeight(dummyHeight) + require.NoError(env.state.Commit()) + + // Since the tx was committed, the delegator and the delegatee should be rewarded. + // The delegator reward should be higher since the delegatee's share is 25%. + commitVdrBalance, err := avax.GetBalance(env.state, vdrDestSet) + require.NoError(err) + vdrReward, err := math.Sub(commitVdrBalance, oldVdrBalance) + require.NoError(err) + require.NotZero(vdrReward, "expected delegatee balance to increase because of reward") + + commitDelBalance, err := avax.GetBalance(env.state, delDestSet) + require.NoError(err) + delReward, err := math.Sub(commitDelBalance, oldDelBalance) + require.NoError(err) + require.NotZero(delReward, "expected delegator balance to increase because of reward") + + require.Less(vdrReward, delReward, "the delegator's reward should be greater than the delegatee's because the delegatee's share is 25%") + require.Equal(expectedReward, delReward+vdrReward, "expected total reward to be %d but is %d", expectedReward, delReward+vdrReward) + + require.Equal(env.config.MinValidatorStake, vdrSet.GetWeight(vdrNodeID)) +} + +func TestRewardDelegatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { + require := require.New(t) + env := newEnvironment(true /*=postBanff*/, true /*=postCortina*/) + defer func() { + require.NoError(shutdownEnvironment(env)) + }() + dummyHeight := uint64(1) + + vdrRewardAddress := ids.GenerateTestShortID() + delRewardAddress := ids.GenerateTestShortID() + + vdrStartTime := uint64(defaultValidateStartTime.Unix()) + 1 + vdrEndTime := uint64(defaultValidateStartTime.Add(2 * defaultMinStakingDuration).Unix()) + vdrNodeID := ids.GenerateTestNodeID() + + vdrTx, err := env.txBuilder.NewAddValidatorTx( + env.config.MinValidatorStake, + vdrStartTime, + vdrEndTime, + vdrNodeID, + vdrRewardAddress, + reward.PercentDenominator/4, + []*secp256k1.PrivateKey{preFundedKeys[0]}, + ids.ShortEmpty, /*=changeAddr*/ + ) + require.NoError(err) + + delStartTime := vdrStartTime + delEndTime := vdrEndTime + + delTx, err := env.txBuilder.NewAddDelegatorTx( + env.config.MinDelegatorStake, + delStartTime, + delEndTime, + vdrNodeID, + delRewardAddress, + []*secp256k1.PrivateKey{preFundedKeys[0]}, + ids.ShortEmpty, /*=changeAddr*/ + ) + require.NoError(err) + + vdrRewardAmt := uint64(2000000) + vdrStaker, err := state.NewCurrentStaker( + vdrTx.ID(), + vdrTx.Unsigned.(*txs.AddValidatorTx), + vdrRewardAmt, + ) + require.NoError(err) + + delRewardAmt := uint64(1000000) + delStaker, err := state.NewCurrentStaker( + delTx.ID(), + delTx.Unsigned.(*txs.AddDelegatorTx), + delRewardAmt, + ) + require.NoError(err) + + env.state.PutCurrentValidator(vdrStaker) + env.state.AddTx(vdrTx, status.Committed) + env.state.PutCurrentDelegator(delStaker) + env.state.AddTx(delTx, status.Committed) + env.state.SetTimestamp(time.Unix(int64(vdrEndTime), 0)) + env.state.SetHeight(dummyHeight) + require.NoError(env.state.Commit()) + + vdrDestSet := set.Set[ids.ShortID]{} + vdrDestSet.Add(vdrRewardAddress) + delDestSet := set.Set[ids.ShortID]{} + delDestSet.Add(delRewardAddress) + + oldVdrBalance, err := avax.GetBalance(env.state, vdrDestSet) + require.NoError(err) + oldDelBalance, err := avax.GetBalance(env.state, delDestSet) + require.NoError(err) + + // test validator stake + vdrSet, ok := env.config.Validators.Get(constants.PrimaryNetworkID) + require.True(ok) + + stake := vdrSet.GetWeight(vdrNodeID) + require.Equal(env.config.MinValidatorStake+env.config.MinDelegatorStake, stake) + + tx, err := env.txBuilder.NewRewardValidatorTx(delTx.ID()) + require.NoError(err) + + // Create Delegator Diff + onCommitState, err := state.NewDiff(lastAcceptedID, env) + require.NoError(err) + + onAbortState, err := state.NewDiff(lastAcceptedID, env) + require.NoError(err) + + txExecutor := ProposalTxExecutor{ + OnCommitState: onCommitState, + OnAbortState: onAbortState, + Backend: &env.backend, + Tx: tx, + } + err = tx.Unsigned.Visit(&txExecutor) + require.NoError(err) + + // The delegator should be rewarded if the ProposalTx is committed. Since the + // delegatee's share is 25%, we expect the delegator to receive 75% of the reward. + // Since this is post [CortinaTime], the delegatee should not be rewarded until a + // RewardValidatorTx is issued for the delegatee. + numDelStakeUTXOs := uint32(len(delTx.Unsigned.InputIDs())) + delRewardUTXOID := &avax.UTXOID{ + TxID: delTx.ID(), + OutputIndex: numDelStakeUTXOs + 1, + } + + utxo, err := onCommitState.GetUTXO(delRewardUTXOID.InputID()) + require.NoError(err) + castUTXO, ok := utxo.Out.(*secp256k1fx.TransferOutput) + require.True(ok) + require.Equal(delRewardAmt*3/4, castUTXO.Amt, "expected delegator balance to increase by 3/4 of reward amount") + require.True(delDestSet.Equals(castUTXO.AddressesSet()), "expected reward UTXO to be issued to delDestSet") + + preCortinaVdrRewardUTXOID := &avax.UTXOID{ + TxID: delTx.ID(), + OutputIndex: numDelStakeUTXOs + 2, + } + _, err = onCommitState.GetUTXO(preCortinaVdrRewardUTXOID.InputID()) + require.ErrorIs(err, database.ErrNotFound) + + // Commit Delegator Diff + require.NoError(txExecutor.OnCommitState.Apply(env.state)) + + env.state.SetHeight(dummyHeight) + require.NoError(env.state.Commit()) + + tx, err = env.txBuilder.NewRewardValidatorTx(vdrStaker.TxID) + require.NoError(err) + + // Create Validator Diff + onCommitState, err = state.NewDiff(lastAcceptedID, env) + require.NoError(err) + + onAbortState, err = state.NewDiff(lastAcceptedID, env) + require.NoError(err) + + txExecutor = ProposalTxExecutor{ + OnCommitState: onCommitState, + OnAbortState: onAbortState, + Backend: &env.backend, + Tx: tx, + } + require.NoError(tx.Unsigned.Visit(&txExecutor)) + + require.NotEqual(vdrStaker.TxID, delStaker.TxID) + + numVdrStakeUTXOs := uint32(len(delTx.Unsigned.InputIDs())) + + // check for validator reward here + vdrRewardUTXOID := &avax.UTXOID{ + TxID: vdrTx.ID(), + OutputIndex: numVdrStakeUTXOs + 1, + } + + utxo, err = onCommitState.GetUTXO(vdrRewardUTXOID.InputID()) + require.NoError(err) + castUTXO, ok = utxo.Out.(*secp256k1fx.TransferOutput) + require.True(ok) + require.Equal(vdrRewardAmt, castUTXO.Amt, "expected validator to be rewarded") + require.True(vdrDestSet.Equals(castUTXO.AddressesSet()), "expected reward UTXO to be issued to vdrDestSet") + + // check for validator's batched delegator rewards here + onCommitVdrDelRewardUTXOID := &avax.UTXOID{ + TxID: vdrTx.ID(), + OutputIndex: numVdrStakeUTXOs + 2, + } + + utxo, err = onCommitState.GetUTXO(onCommitVdrDelRewardUTXOID.InputID()) + require.NoError(err) + castUTXO, ok = utxo.Out.(*secp256k1fx.TransferOutput) + require.True(ok) + require.Equal(delRewardAmt/4, castUTXO.Amt, "expected validator to be rewarded with accrued delegator rewards") + require.True(vdrDestSet.Equals(castUTXO.AddressesSet()), "expected reward UTXO to be issued to vdrDestSet") + + // aborted validator tx should still distribute accrued delegator rewards + onAbortVdrDelRewardUTXOID := &avax.UTXOID{ + TxID: vdrTx.ID(), + OutputIndex: numVdrStakeUTXOs + 1, + } + + utxo, err = onAbortState.GetUTXO(onAbortVdrDelRewardUTXOID.InputID()) + require.NoError(err) + castUTXO, ok = utxo.Out.(*secp256k1fx.TransferOutput) + require.True(ok) + require.Equal(delRewardAmt/4, castUTXO.Amt, "expected validator to be rewarded with accrued delegator rewards") + require.True(vdrDestSet.Equals(castUTXO.AddressesSet()), "expected reward UTXO to be issued to vdrDestSet") + + _, err = onCommitState.GetUTXO(preCortinaVdrRewardUTXOID.InputID()) + require.ErrorIs(err, database.ErrNotFound) + + // Commit Validator Diff + require.NoError(txExecutor.OnCommitState.Apply(env.state)) + + env.state.SetHeight(dummyHeight) + require.NoError(env.state.Commit()) + + // Since the tx was committed, the delegator and the delegatee should be rewarded. + // The delegator reward should be higher since the delegatee's share is 25%. + commitVdrBalance, err := avax.GetBalance(env.state, vdrDestSet) + require.NoError(err) + vdrReward, err := math.Sub(commitVdrBalance, oldVdrBalance) + require.NoError(err) + delegateeReward, err := math.Sub(vdrReward, 2000000) + require.NoError(err) + require.NotZero(delegateeReward, "expected delegatee balance to increase because of reward") + + commitDelBalance, err := avax.GetBalance(env.state, delDestSet) + require.NoError(err) + delReward, err := math.Sub(commitDelBalance, oldDelBalance) + require.NoError(err) + require.NotZero(delReward, "expected delegator balance to increase because of reward") + + require.Less(delegateeReward, delReward, "the delegator's reward should be greater than the delegatee's because the delegatee's share is 25%") + require.Equal(delRewardAmt, delReward+delegateeReward, "expected total reward to be %d but is %d", delRewardAmt, delReward+vdrReward) +} + +func TestRewardDelegatorTxAndValidatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { + require := require.New(t) + env := newEnvironment(true /*=postBanff*/, true /*=postCortina*/) + defer func() { + require.NoError(shutdownEnvironment(env)) + }() + dummyHeight := uint64(1) + + vdrRewardAddress := ids.GenerateTestShortID() + delRewardAddress := ids.GenerateTestShortID() + + vdrStartTime := uint64(defaultValidateStartTime.Unix()) + 1 + vdrEndTime := uint64(defaultValidateStartTime.Add(2 * defaultMinStakingDuration).Unix()) + vdrNodeID := ids.GenerateTestNodeID() + + vdrTx, err := env.txBuilder.NewAddValidatorTx( + env.config.MinValidatorStake, // stakeAmt + vdrStartTime, + vdrEndTime, + vdrNodeID, // node ID + vdrRewardAddress, // reward address + reward.PercentDenominator/4, + []*secp256k1.PrivateKey{preFundedKeys[0]}, + ids.ShortEmpty, + ) + require.NoError(err) + + delStartTime := vdrStartTime + delEndTime := vdrEndTime + + delTx, err := env.txBuilder.NewAddDelegatorTx( + env.config.MinDelegatorStake, + delStartTime, + delEndTime, + vdrNodeID, + delRewardAddress, + []*secp256k1.PrivateKey{preFundedKeys[0]}, + ids.ShortEmpty, // Change address + ) + require.NoError(err) + + vdrRewardAmt := uint64(2000000) + vdrStaker, err := state.NewCurrentStaker( + vdrTx.ID(), + vdrTx.Unsigned.(*txs.AddValidatorTx), + vdrRewardAmt, + ) + require.NoError(err) + + delRewardAmt := uint64(1000000) + delStaker, err := state.NewCurrentStaker( + delTx.ID(), + delTx.Unsigned.(*txs.AddDelegatorTx), + delRewardAmt, + ) + require.NoError(err) + + env.state.PutCurrentValidator(vdrStaker) + env.state.AddTx(vdrTx, status.Committed) + env.state.PutCurrentDelegator(delStaker) + env.state.AddTx(delTx, status.Committed) + env.state.SetTimestamp(time.Unix(int64(vdrEndTime), 0)) + env.state.SetHeight(dummyHeight) + require.NoError(env.state.Commit()) + + vdrDestSet := set.Set[ids.ShortID]{} + vdrDestSet.Add(vdrRewardAddress) + delDestSet := set.Set[ids.ShortID]{} + delDestSet.Add(delRewardAddress) + + oldVdrBalance, err := avax.GetBalance(env.state, vdrDestSet) + require.NoError(err) + oldDelBalance, err := avax.GetBalance(env.state, delDestSet) + require.NoError(err) + + tx, err := env.txBuilder.NewRewardValidatorTx(delTx.ID()) + require.NoError(err) + + // Create Delegator Diffs + delOnCommitState, err := state.NewDiff(lastAcceptedID, env) + require.NoError(err) + + delOnAbortState, err := state.NewDiff(lastAcceptedID, env) + require.NoError(err) + + txExecutor := ProposalTxExecutor{ + OnCommitState: delOnCommitState, + OnAbortState: delOnAbortState, + Backend: &env.backend, + Tx: tx, + } + require.NoError(tx.Unsigned.Visit(&txExecutor)) + + // Create Validator Diffs + testID := ids.GenerateTestID() + env.SetState(testID, delOnCommitState) + + vdrOnCommitState, err := state.NewDiff(testID, env) + require.NoError(err) + + vdrOnAbortState, err := state.NewDiff(testID, env) + require.NoError(err) + + tx, err = env.txBuilder.NewRewardValidatorTx(vdrTx.ID()) + require.NoError(err) + + txExecutor = ProposalTxExecutor{ + OnCommitState: vdrOnCommitState, + OnAbortState: vdrOnAbortState, + Backend: &env.backend, + Tx: tx, + } + require.NoError(tx.Unsigned.Visit(&txExecutor)) + + // aborted validator tx should still distribute accrued delegator rewards + numVdrStakeUTXOs := uint32(len(delTx.Unsigned.InputIDs())) + onAbortVdrDelRewardUTXOID := &avax.UTXOID{ + TxID: vdrTx.ID(), + OutputIndex: numVdrStakeUTXOs + 1, + } + + utxo, err := vdrOnAbortState.GetUTXO(onAbortVdrDelRewardUTXOID.InputID()) + require.NoError(err) + castUTXO, ok := utxo.Out.(*secp256k1fx.TransferOutput) + require.True(ok) + require.Equal(delRewardAmt/4, castUTXO.Amt, "expected validator to be rewarded with accrued delegator rewards") + require.True(vdrDestSet.Equals(castUTXO.AddressesSet()), "expected reward UTXO to be issued to vdrDestSet") + + // Commit Delegator Diff + require.NoError(delOnCommitState.Apply(env.state)) + + env.state.SetHeight(dummyHeight) + require.NoError(env.state.Commit()) + + // Commit Validator Diff + require.NoError(vdrOnCommitState.Apply(env.state)) + + env.state.SetHeight(dummyHeight) + require.NoError(env.state.Commit()) + + // Since the tx was committed, the delegator and the delegatee should be rewarded. + // The delegator reward should be higher since the delegatee's share is 25%. + commitVdrBalance, err := avax.GetBalance(env.state, vdrDestSet) + require.NoError(err) + vdrReward, err := math.Sub(commitVdrBalance, oldVdrBalance) + require.NoError(err) + delegateeReward, err := math.Sub(vdrReward, vdrRewardAmt) + require.NoError(err) + require.NotZero(delegateeReward, "expected delegatee balance to increase because of reward") + + commitDelBalance, err := avax.GetBalance(env.state, delDestSet) + require.NoError(err) + delReward, err := math.Sub(commitDelBalance, oldDelBalance) + require.NoError(err) + require.NotZero(delReward, "expected delegator balance to increase because of reward") + + require.Less(delegateeReward, delReward, "the delegator's reward should be greater than the delegatee's because the delegatee's share is 25%") + require.Equal(delRewardAmt, delReward+delegateeReward, "expected total reward to be %d but is %d", delRewardAmt, delReward+vdrReward) +} + func TestRewardDelegatorTxExecuteOnAbort(t *testing.T) { require := require.New(t) - env := newEnvironment() + env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() dummyHeight := uint64(1) @@ -262,7 +750,7 @@ func TestRewardDelegatorTxExecuteOnAbort(t *testing.T) { vdrNodeID, // node ID vdrRewardAddress, // reward address reward.PercentDenominator/4, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, ) require.NoError(err) @@ -275,22 +763,24 @@ func TestRewardDelegatorTxExecuteOnAbort(t *testing.T) { delEndTime, vdrNodeID, delRewardAddress, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, ) require.NoError(err) - vdrStaker := state.NewCurrentStaker( + vdrStaker, err := state.NewCurrentStaker( vdrTx.ID(), vdrTx.Unsigned.(*txs.AddValidatorTx), 0, ) + require.NoError(err) - delStaker := state.NewCurrentStaker( + delStaker, err := state.NewCurrentStaker( delTx.ID(), delTx.Unsigned.(*txs.AddDelegatorTx), 1000000, ) + require.NoError(err) env.state.PutCurrentValidator(vdrStaker) env.state.AddTx(vdrTx, status.Committed) @@ -304,14 +794,10 @@ func TestRewardDelegatorTxExecuteOnAbort(t *testing.T) { require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAbortState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) txExecutor := ProposalTxExecutor{ OnCommitState: onCommitState, @@ -322,9 +808,9 @@ func TestRewardDelegatorTxExecuteOnAbort(t *testing.T) { err = tx.Unsigned.Visit(&txExecutor) require.NoError(err) - vdrDestSet := ids.ShortSet{} + vdrDestSet := set.Set[ids.ShortID]{} vdrDestSet.Add(vdrRewardAddress) - delDestSet := ids.ShortSet{} + delDestSet := set.Set[ids.ShortID]{} delDestSet.Add(delRewardAddress) expectedReward := uint64(1000000) @@ -334,20 +820,21 @@ func TestRewardDelegatorTxExecuteOnAbort(t *testing.T) { oldDelBalance, err := avax.GetBalance(env.state, delDestSet) require.NoError(err) - txExecutor.OnAbortState.Apply(env.state) + require.NoError(txExecutor.OnAbortState.Apply(env.state)) + env.state.SetHeight(dummyHeight) require.NoError(env.state.Commit()) // If tx is aborted, delegator and delegatee shouldn't get reward newVdrBalance, err := avax.GetBalance(env.state, vdrDestSet) require.NoError(err) - vdrReward, err := math.Sub64(newVdrBalance, oldVdrBalance) + vdrReward, err := math.Sub(newVdrBalance, oldVdrBalance) require.NoError(err) require.Zero(vdrReward, "expected delegatee balance not to increase") newDelBalance, err := avax.GetBalance(env.state, delDestSet) require.NoError(err) - delReward, err := math.Sub64(newDelBalance, oldDelBalance) + delReward, err := math.Sub(newDelBalance, oldDelBalance) require.NoError(err) require.Zero(delReward, "expected delegator balance not to increase") diff --git a/avalanchego/vms/platformvm/txs/executor/staker_tx_verification.go b/avalanchego/vms/platformvm/txs/executor/staker_tx_verification.go index c1c84c64..4a3a49b8 100644 --- a/avalanchego/vms/platformvm/txs/executor/staker_tx_verification.go +++ b/avalanchego/vms/platformvm/txs/executor/staker_tx_verification.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -86,7 +86,7 @@ func verifyAddValidatorTx( copy(outs, tx.Outs) copy(outs[len(tx.Outs):], tx.StakeOuts) - if !backend.Bootstrapped.GetValue() { + if !backend.Bootstrapped.Get() { return outs, nil } @@ -135,7 +135,7 @@ func verifyAddValidatorTx( backend.Ctx.AVAXAssetID: backend.Config.AddPrimaryNetworkValidatorFee, }, ); err != nil { - return nil, fmt.Errorf("%w: %s", errFlowCheckFailed, err) + return nil, fmt.Errorf("%w: %v", errFlowCheckFailed, err) } // Make sure the tx doesn't start too far in the future. This is done last @@ -184,7 +184,7 @@ func verifyAddSubnetValidatorTx( return errStakeTooLong } - if !backend.Bootstrapped.GetValue() { + if !backend.Bootstrapped.Get() { return nil } @@ -200,7 +200,7 @@ func verifyAddSubnetValidatorTx( ) } - _, err := GetValidator(chainState, tx.Validator.Subnet, tx.Validator.NodeID) + _, err := GetValidator(chainState, tx.SubnetValidator.Subnet, tx.Validator.NodeID) if err == nil { return fmt.Errorf( "attempted to issue duplicate subnet validation for %s", @@ -230,7 +230,7 @@ func verifyAddSubnetValidatorTx( return errValidatorSubset } - baseTxCreds, err := verifyPoASubnetAuthorization(backend, chainState, sTx, tx.Validator.Subnet, tx.SubnetAuth) + baseTxCreds, err := verifyPoASubnetAuthorization(backend, chainState, sTx, tx.SubnetValidator.Subnet, tx.SubnetAuth) if err != nil { return err } @@ -246,7 +246,7 @@ func verifyAddSubnetValidatorTx( backend.Ctx.AVAXAssetID: backend.Config.AddSubnetValidatorFee, }, ); err != nil { - return fmt.Errorf("%w: %s", errFlowCheckFailed, err) + return fmt.Errorf("%w: %v", errFlowCheckFailed, err) } // Make sure the tx doesn't start too far in the future. This is done last @@ -287,7 +287,7 @@ func removeSubnetValidatorValidation( if err != nil { // It isn't a current or pending validator. return nil, false, fmt.Errorf( - "%s %w of %s: %s", + "%s %w of %s: %v", tx.NodeID, errNotValidator, tx.Subnet, @@ -300,7 +300,7 @@ func removeSubnetValidatorValidation( return nil, false, errRemovePermissionlessValidator } - if !backend.Bootstrapped.GetValue() { + if !backend.Bootstrapped.Get() { // Not bootstrapped yet -- don't need to do full verification. return vdr, isCurrentValidator, nil } @@ -321,7 +321,7 @@ func removeSubnetValidatorValidation( backend.Ctx.AVAXAssetID: backend.Config.TxFee, }, ); err != nil { - return nil, false, fmt.Errorf("%w: %s", errFlowCheckFailed, err) + return nil, false, fmt.Errorf("%w: %v", errFlowCheckFailed, err) } return vdr, isCurrentValidator, nil @@ -366,7 +366,7 @@ func verifyAddDelegatorTx( copy(outs, tx.Outs) copy(outs[len(tx.Outs):], tx.StakeOuts) - if !backend.Bootstrapped.GetValue() { + if !backend.Bootstrapped.Get() { return outs, nil } @@ -396,12 +396,15 @@ func verifyAddDelegatorTx( } if backend.Config.IsApricotPhase3Activated(currentTimestamp) { - maximumWeight = math.Min64(maximumWeight, maxValidatorStake) + maximumWeight = math.Min(maximumWeight, maxValidatorStake) } txID := sTx.ID() + newStaker, err := state.NewPendingStaker(txID, tx) + if err != nil { + return nil, err + } - newStaker := state.NewPendingStaker(txID, tx) canDelegate, err := canDelegate(chainState, primaryNetworkValidator, maximumWeight, newStaker) if err != nil { return nil, err @@ -421,7 +424,7 @@ func verifyAddDelegatorTx( backend.Ctx.AVAXAssetID: backend.Config.AddPrimaryNetworkDelegatorFee, }, ); err != nil { - return nil, fmt.Errorf("%w: %s", errFlowCheckFailed, err) + return nil, fmt.Errorf("%w: %v", errFlowCheckFailed, err) } // Make sure the tx doesn't start too far in the future. This is done last @@ -454,12 +457,7 @@ func verifyAddPermissionlessValidatorTx( return err } - // Flare does not (yet) allow adding permissionless validator tx - if constants.IsFlareNetworkID(backend.Ctx.NetworkID) || constants.IsSgbNetworkID(backend.Ctx.NetworkID) { - return errWrongTxType - } - - if !backend.Bootstrapped.GetValue() { + if !backend.Bootstrapped.Get() { return nil } @@ -576,7 +574,7 @@ func verifyAddPermissionlessValidatorTx( backend.Ctx.AVAXAssetID: txFee, }, ); err != nil { - return fmt.Errorf("%w: %s", errFlowCheckFailed, err) + return fmt.Errorf("%w: %v", errFlowCheckFailed, err) } // Make sure the tx doesn't start too far in the future. This is done last @@ -640,12 +638,7 @@ func verifyAddPermissionlessDelegatorTx( return err } - // Flare does not (yet) allow adding permissionless delegator tx - if constants.IsFlareNetworkID(backend.Ctx.NetworkID) || constants.IsSgbNetworkID(backend.Ctx.NetworkID) { - return errWrongTxType - } - - if !backend.Bootstrapped.GetValue() { + if !backend.Bootstrapped.Get() { return nil } @@ -707,10 +700,14 @@ func verifyAddPermissionlessDelegatorTx( if err != nil { maximumWeight = stdmath.MaxUint64 } - maximumWeight = math.Min64(maximumWeight, delegatorRules.maxValidatorStake) + maximumWeight = math.Min(maximumWeight, delegatorRules.maxValidatorStake) txID := sTx.ID() - newStaker := state.NewPendingStaker(txID, tx) + newStaker, err := state.NewPendingStaker(txID, tx) + if err != nil { + return err + } + canDelegate, err := canDelegate(chainState, validator, maximumWeight, newStaker) if err != nil { return err @@ -752,7 +749,7 @@ func verifyAddPermissionlessDelegatorTx( backend.Ctx.AVAXAssetID: txFee, }, ); err != nil { - return fmt.Errorf("%w: %s", errFlowCheckFailed, err) + return fmt.Errorf("%w: %v", errFlowCheckFailed, err) } // Make sure the tx doesn't start too far in the future. This is done last diff --git a/avalanchego/vms/platformvm/txs/executor/staker_tx_verification_test.go b/avalanchego/vms/platformvm/txs/executor/staker_tx_verification_test.go index a7a178f5..4c9d32af 100644 --- a/avalanchego/vms/platformvm/txs/executor/staker_tx_verification_test.go +++ b/avalanchego/vms/platformvm/txs/executor/staker_tx_verification_test.go @@ -1,10 +1,9 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor import ( - "errors" "testing" "time" @@ -24,7 +23,6 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/utxo" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -64,7 +62,7 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { Ins: []*avax.TransferableInput{}, }, }, - Validator: validator.Validator{ + Validator: txs.Validator{ NodeID: ids.GenerateTestNodeID(), Start: 1, End: 1 + uint64(unsignedTransformTx.MinStakeDuration), @@ -93,7 +91,7 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { Creds: []verify.Verifiable{}, } ) - verifiedSignedTx.Initialize([]byte{1}, []byte{2}) + verifiedSignedTx.SetBytes([]byte{1}, []byte{2}) tests := []test{ { @@ -106,8 +104,12 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { stateF: func(*gomock.Controller) state.Chain { return nil }, - sTxF: func() *txs.Tx { return nil }, - txF: func() *txs.AddPermissionlessValidatorTx { return nil }, + sTxF: func() *txs.Tx { + return nil + }, + txF: func() *txs.AddPermissionlessValidatorTx { + return nil + }, expectedErr: txs.ErrNilSignedTx, }, { @@ -115,21 +117,25 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { backendF: func(*gomock.Controller) *Backend { return &Backend{ Ctx: snow.DefaultContextTest(), - Bootstrapped: &utils.AtomicBool{}, + Bootstrapped: &utils.Atomic[bool]{}, } }, stateF: func(ctrl *gomock.Controller) state.Chain { return nil }, - sTxF: func() *txs.Tx { return &verifiedSignedTx }, - txF: func() *txs.AddPermissionlessValidatorTx { return nil }, + sTxF: func() *txs.Tx { + return &verifiedSignedTx + }, + txF: func() *txs.AddPermissionlessValidatorTx { + return nil + }, expectedErr: nil, }, { name: "start time too early", backendF: func(*gomock.Controller) *Backend { - bootstrapped := &utils.AtomicBool{} - bootstrapped.SetValue(true) + bootstrapped := &utils.Atomic[bool]{} + bootstrapped.Set(true) return &Backend{ Ctx: snow.DefaultContextTest(), Bootstrapped: bootstrapped, @@ -140,15 +146,19 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { state.EXPECT().GetTimestamp().Return(verifiedTx.StartTime()) return state }, - sTxF: func() *txs.Tx { return &verifiedSignedTx }, - txF: func() *txs.AddPermissionlessValidatorTx { return &verifiedTx }, + sTxF: func() *txs.Tx { + return &verifiedSignedTx + }, + txF: func() *txs.AddPermissionlessValidatorTx { + return &verifiedTx + }, expectedErr: errTimestampNotBeforeStartTime, }, { name: "weight too low", backendF: func(*gomock.Controller) *Backend { - bootstrapped := &utils.AtomicBool{} - bootstrapped.SetValue(true) + bootstrapped := &utils.Atomic[bool]{} + bootstrapped.Set(true) return &Backend{ Ctx: snow.DefaultContextTest(), Bootstrapped: bootstrapped, @@ -160,7 +170,9 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { state.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) return state }, - sTxF: func() *txs.Tx { return &verifiedSignedTx }, + sTxF: func() *txs.Tx { + return &verifiedSignedTx + }, txF: func() *txs.AddPermissionlessValidatorTx { tx := verifiedTx // Note that this copies [verifiedTx] tx.Validator.Wght = unsignedTransformTx.MinValidatorStake - 1 @@ -171,8 +183,8 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { { name: "weight too high", backendF: func(*gomock.Controller) *Backend { - bootstrapped := &utils.AtomicBool{} - bootstrapped.SetValue(true) + bootstrapped := &utils.Atomic[bool]{} + bootstrapped.Set(true) return &Backend{ Ctx: snow.DefaultContextTest(), Bootstrapped: bootstrapped, @@ -184,7 +196,9 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { state.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) return state }, - sTxF: func() *txs.Tx { return &verifiedSignedTx }, + sTxF: func() *txs.Tx { + return &verifiedSignedTx + }, txF: func() *txs.AddPermissionlessValidatorTx { tx := verifiedTx // Note that this copies [verifiedTx] tx.Validator.Wght = unsignedTransformTx.MaxValidatorStake + 1 @@ -195,8 +209,8 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { { name: "insufficient delegation fee", backendF: func(*gomock.Controller) *Backend { - bootstrapped := &utils.AtomicBool{} - bootstrapped.SetValue(true) + bootstrapped := &utils.Atomic[bool]{} + bootstrapped.Set(true) return &Backend{ Ctx: snow.DefaultContextTest(), Bootstrapped: bootstrapped, @@ -208,7 +222,9 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { state.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) return state }, - sTxF: func() *txs.Tx { return &verifiedSignedTx }, + sTxF: func() *txs.Tx { + return &verifiedSignedTx + }, txF: func() *txs.AddPermissionlessValidatorTx { tx := verifiedTx // Note that this copies [verifiedTx] tx.Validator.Wght = unsignedTransformTx.MaxValidatorStake @@ -220,8 +236,8 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { { name: "duration too short", backendF: func(*gomock.Controller) *Backend { - bootstrapped := &utils.AtomicBool{} - bootstrapped.SetValue(true) + bootstrapped := &utils.Atomic[bool]{} + bootstrapped.Set(true) return &Backend{ Ctx: snow.DefaultContextTest(), Bootstrapped: bootstrapped, @@ -233,7 +249,9 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { state.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) return state }, - sTxF: func() *txs.Tx { return &verifiedSignedTx }, + sTxF: func() *txs.Tx { + return &verifiedSignedTx + }, txF: func() *txs.AddPermissionlessValidatorTx { tx := verifiedTx // Note that this copies [verifiedTx] tx.Validator.Wght = unsignedTransformTx.MaxValidatorStake @@ -248,8 +266,8 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { { name: "duration too long", backendF: func(*gomock.Controller) *Backend { - bootstrapped := &utils.AtomicBool{} - bootstrapped.SetValue(true) + bootstrapped := &utils.Atomic[bool]{} + bootstrapped.Set(true) return &Backend{ Ctx: snow.DefaultContextTest(), Bootstrapped: bootstrapped, @@ -261,7 +279,9 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { state.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) return state }, - sTxF: func() *txs.Tx { return &verifiedSignedTx }, + sTxF: func() *txs.Tx { + return &verifiedSignedTx + }, txF: func() *txs.AddPermissionlessValidatorTx { tx := verifiedTx // Note that this copies [verifiedTx] tx.Validator.Wght = unsignedTransformTx.MaxValidatorStake @@ -276,8 +296,8 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { { name: "wrong assetID", backendF: func(*gomock.Controller) *Backend { - bootstrapped := &utils.AtomicBool{} - bootstrapped.SetValue(true) + bootstrapped := &utils.Atomic[bool]{} + bootstrapped.Set(true) return &Backend{ Ctx: snow.DefaultContextTest(), Bootstrapped: bootstrapped, @@ -289,7 +309,9 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { state.EXPECT().GetSubnetTransformation(subnetID).Return(&transformTx, nil) return state }, - sTxF: func() *txs.Tx { return &verifiedSignedTx }, + sTxF: func() *txs.Tx { + return &verifiedSignedTx + }, txF: func() *txs.AddPermissionlessValidatorTx { tx := verifiedTx // Note that this copies [verifiedTx] tx.StakeOuts = []*avax.TransferableOutput{ @@ -306,8 +328,8 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { { name: "duplicate validator", backendF: func(*gomock.Controller) *Backend { - bootstrapped := &utils.AtomicBool{} - bootstrapped.SetValue(true) + bootstrapped := &utils.Atomic[bool]{} + bootstrapped.Set(true) return &Backend{ Ctx: snow.DefaultContextTest(), Bootstrapped: bootstrapped, @@ -321,7 +343,9 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { state.EXPECT().GetCurrentValidator(subnetID, verifiedTx.NodeID()).Return(nil, nil) return state }, - sTxF: func() *txs.Tx { return &verifiedSignedTx }, + sTxF: func() *txs.Tx { + return &verifiedSignedTx + }, txF: func() *txs.AddPermissionlessValidatorTx { return &verifiedTx }, @@ -330,8 +354,8 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { { name: "validator not subset of primary network validator", backendF: func(*gomock.Controller) *Backend { - bootstrapped := &utils.AtomicBool{} - bootstrapped.SetValue(true) + bootstrapped := &utils.Atomic[bool]{} + bootstrapped.Set(true) return &Backend{ Ctx: snow.DefaultContextTest(), Bootstrapped: bootstrapped, @@ -351,7 +375,9 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { mockState.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, verifiedTx.NodeID()).Return(primaryNetworkVdr, nil) return mockState }, - sTxF: func() *txs.Tx { return &verifiedSignedTx }, + sTxF: func() *txs.Tx { + return &verifiedSignedTx + }, txF: func() *txs.AddPermissionlessValidatorTx { return &verifiedTx }, @@ -360,8 +386,8 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { { name: "flow check fails", backendF: func(ctrl *gomock.Controller) *Backend { - bootstrapped := &utils.AtomicBool{} - bootstrapped.SetValue(true) + bootstrapped := &utils.Atomic[bool]{} + bootstrapped.Set(true) flowChecker := utxo.NewMockVerifier(ctrl) flowChecker.EXPECT().VerifySpend( @@ -371,7 +397,7 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { gomock.Any(), gomock.Any(), gomock.Any(), - ).Return(errors.New("flow check failed")) + ).Return(errFlowCheckFailed) return &Backend{ FlowChecker: flowChecker, @@ -395,7 +421,9 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { mockState.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, verifiedTx.NodeID()).Return(primaryNetworkVdr, nil) return mockState }, - sTxF: func() *txs.Tx { return &verifiedSignedTx }, + sTxF: func() *txs.Tx { + return &verifiedSignedTx + }, txF: func() *txs.AddPermissionlessValidatorTx { return &verifiedTx }, @@ -404,8 +432,8 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { { name: "starts too far in the future", backendF: func(ctrl *gomock.Controller) *Backend { - bootstrapped := &utils.AtomicBool{} - bootstrapped.SetValue(true) + bootstrapped := &utils.Atomic[bool]{} + bootstrapped.Set(true) flowChecker := utxo.NewMockVerifier(ctrl) flowChecker.EXPECT().VerifySpend( @@ -439,7 +467,9 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { mockState.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, verifiedTx.NodeID()).Return(primaryNetworkVdr, nil) return mockState }, - sTxF: func() *txs.Tx { return &verifiedSignedTx }, + sTxF: func() *txs.Tx { + return &verifiedSignedTx + }, txF: func() *txs.AddPermissionlessValidatorTx { // Note this copies [verifiedTx] tx := verifiedTx @@ -452,8 +482,8 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { { name: "success", backendF: func(ctrl *gomock.Controller) *Backend { - bootstrapped := &utils.AtomicBool{} - bootstrapped.SetValue(true) + bootstrapped := &utils.Atomic[bool]{} + bootstrapped.Set(true) flowChecker := utxo.NewMockVerifier(ctrl) flowChecker.EXPECT().VerifySpend( @@ -487,7 +517,9 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { mockState.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, verifiedTx.NodeID()).Return(primaryNetworkVdr, nil) return mockState }, - sTxF: func() *txs.Tx { return &verifiedSignedTx }, + sTxF: func() *txs.Tx { + return &verifiedSignedTx + }, txF: func() *txs.AddPermissionlessValidatorTx { return &verifiedTx }, @@ -497,7 +529,6 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require := require.New(t) ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -509,7 +540,7 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { ) err := verifyAddPermissionlessValidatorTx(backend, state, sTx, tx) - require.ErrorIs(err, tt.expectedErr) + require.ErrorIs(t, err, tt.expectedErr) }) } } @@ -535,7 +566,6 @@ func TestGetValidatorRules(t *testing.T) { avaxAssetID = ids.GenerateTestID() customAssetID = ids.GenerateTestID() subnetID = ids.GenerateTestID() - testErr = errors.New("an error") ) tests := []test{ @@ -567,11 +597,11 @@ func TestGetValidatorRules(t *testing.T) { backend: nil, chainStateF: func(ctrl *gomock.Controller) state.Chain { state := state.NewMockChain(ctrl) - state.EXPECT().GetSubnetTransformation(subnetID).Return(nil, testErr) + state.EXPECT().GetSubnetTransformation(subnetID).Return(nil, errTest) return state }, expectedRules: &addValidatorRules{}, - expectedErr: testErr, + expectedErr: errTest, }, { name: "invalid transformation tx", @@ -628,7 +658,7 @@ func TestGetValidatorRules(t *testing.T) { chainState := tt.chainStateF(ctrl) rules, err := getValidatorRules(time.Time{}, tt.backend, chainState, tt.subnetID) if tt.expectedErr != nil { - require.ErrorIs(tt.expectedErr, err) + require.ErrorIs(err, tt.expectedErr) return } require.NoError(err) @@ -656,7 +686,6 @@ func TestGetDelegatorRules(t *testing.T) { avaxAssetID = ids.GenerateTestID() customAssetID = ids.GenerateTestID() subnetID = ids.GenerateTestID() - testErr = errors.New("an error") ) tests := []test{ { @@ -686,11 +715,11 @@ func TestGetDelegatorRules(t *testing.T) { backend: nil, chainStateF: func(ctrl *gomock.Controller) state.Chain { state := state.NewMockChain(ctrl) - state.EXPECT().GetSubnetTransformation(subnetID).Return(nil, testErr) + state.EXPECT().GetSubnetTransformation(subnetID).Return(nil, errTest) return state }, expectedRules: &addDelegatorRules{}, - expectedErr: testErr, + expectedErr: errTest, }, { name: "invalid transformation tx", @@ -748,7 +777,7 @@ func TestGetDelegatorRules(t *testing.T) { chainState := tt.chainStateF(ctrl) rules, err := getDelegatorRules(time.Time{}, tt.backend, chainState, tt.subnetID) if tt.expectedErr != nil { - require.ErrorIs(tt.expectedErr, err) + require.ErrorIs(err, tt.expectedErr) return } require.NoError(err) diff --git a/avalanchego/vms/platformvm/txs/executor/standard_tx_executor.go b/avalanchego/vms/platformvm/txs/executor/standard_tx_executor.go index b62eb424..b498b4bb 100644 --- a/avalanchego/vms/platformvm/txs/executor/standard_tx_executor.go +++ b/avalanchego/vms/platformvm/txs/executor/standard_tx_executor.go @@ -1,31 +1,28 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor import ( + "context" "errors" "fmt" "time" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/utxo" ) var ( - _ txs.Visitor = &StandardTxExecutor{} - - errEmptyNodeID = errors.New("validator nodeID cannot be empty") - errIssuedAddStakerTxBeforeBanff = errors.New("staker transaction issued before Banff") - errCustomAssetBeforeBanff = errors.New("custom assets can only be imported after Banff") - errRemoveSubnetValidatorTxBeforeBanff = errors.New("RemoveSubnetValidatorTx issued before Banff") - errTransformSubnetTxBeforeBanff = errors.New("TransformSubnetTx issued before Banff") - errMaxStakeDurationTooLarge = errors.New("max stake duration must be less than or equal to the global max stake duration") + _ txs.Visitor = (*StandardTxExecutor)(nil) + + errEmptyNodeID = errors.New("validator nodeID cannot be empty") + errMaxStakeDurationTooLarge = errors.New("max stake duration must be less than or equal to the global max stake duration") ) type StandardTxExecutor struct { @@ -36,12 +33,17 @@ type StandardTxExecutor struct { // outputs of visitor execution OnAccept func() // may be nil - Inputs ids.Set + Inputs set.Set[ids.ID] AtomicRequests map[ids.ID]*atomic.Requests // may be nil } -func (*StandardTxExecutor) AdvanceTimeTx(*txs.AdvanceTimeTx) error { return errWrongTxType } -func (*StandardTxExecutor) RewardValidatorTx(*txs.RewardValidatorTx) error { return errWrongTxType } +func (*StandardTxExecutor) AdvanceTimeTx(*txs.AdvanceTimeTx) error { + return errWrongTxType +} + +func (*StandardTxExecutor) RewardValidatorTx(*txs.RewardValidatorTx) error { + return errWrongTxType +} func (e *StandardTxExecutor) CreateChainTx(tx *txs.CreateChainTx) error { if err := e.Tx.SyntacticVerify(e.Ctx); err != nil { @@ -72,15 +74,17 @@ func (e *StandardTxExecutor) CreateChainTx(tx *txs.CreateChainTx) error { txID := e.Tx.ID() // Consume the UTXOS - utxo.Consume(e.State, tx.Ins) + avax.Consume(e.State, tx.Ins) // Produce the UTXOS - utxo.Produce(e.State, txID, tx.Outs) + avax.Produce(e.State, txID, tx.Outs) // Add the new chain to the database e.State.AddChain(e.Tx) // If this proposal is committed and this node is a member of the subnet // that validates the blockchain, create the blockchain - e.OnAccept = func() { e.Config.CreateChain(txID, tx) } + e.OnAccept = func() { + e.Config.CreateChain(txID, tx) + } return nil } @@ -109,9 +113,9 @@ func (e *StandardTxExecutor) CreateSubnetTx(tx *txs.CreateSubnetTx) error { txID := e.Tx.ID() // Consume the UTXOS - utxo.Consume(e.State, tx.Ins) + avax.Consume(e.State, tx.Ins) // Produce the UTXOS - utxo.Produce(e.State, txID, tx.Outs) + avax.Produce(e.State, txID, tx.Outs) // Add the new subnet to the database e.State.AddSubnet(e.Tx) return nil @@ -122,31 +126,17 @@ func (e *StandardTxExecutor) ImportTx(tx *txs.ImportTx) error { return err } - currentChainTime := e.State.GetTimestamp() - - e.Inputs = ids.NewSet(len(tx.ImportedInputs)) + e.Inputs = set.NewSet[ids.ID](len(tx.ImportedInputs)) utxoIDs := make([][]byte, len(tx.ImportedInputs)) for i, in := range tx.ImportedInputs { utxoID := in.UTXOID.InputID() e.Inputs.Add(utxoID) utxoIDs[i] = utxoID[:] - - if !e.Config.IsBanffActivated(currentChainTime) { - // TODO: Remove this check once the Banff network upgrade is - // complete. - // - // Banff network upgrade allows exporting of all assets to the - // P-chain. - assetID := in.AssetID() - if assetID != e.Ctx.AVAXAssetID { - return errCustomAssetBeforeBanff - } - } } - if e.Bootstrapped.GetValue() { - if err := verify.SameSubnet(e.Ctx, tx.SourceChain); err != nil { + if e.Bootstrapped.Get() { + if err := verify.SameSubnet(context.TODO(), e.Ctx, tx.SourceChain); err != nil { return err } @@ -192,9 +182,9 @@ func (e *StandardTxExecutor) ImportTx(tx *txs.ImportTx) error { txID := e.Tx.ID() // Consume the UTXOS - utxo.Consume(e.State, tx.Ins) + avax.Consume(e.State, tx.Ins) // Produce the UTXOS - utxo.Produce(e.State, txID, tx.Outs) + avax.Produce(e.State, txID, tx.Outs) e.AtomicRequests = map[ids.ID]*atomic.Requests{ tx.SourceChain: { @@ -213,8 +203,8 @@ func (e *StandardTxExecutor) ExportTx(tx *txs.ExportTx) error { copy(outs, tx.Outs) copy(outs[len(tx.Outs):], tx.ExportedOutputs) - if e.Bootstrapped.GetValue() { - if err := verify.SameSubnet(e.Ctx, tx.DestinationChain); err != nil { + if e.Bootstrapped.Get() { + if err := verify.SameSubnet(context.TODO(), e.Ctx, tx.DestinationChain); err != nil { return err } } @@ -236,9 +226,9 @@ func (e *StandardTxExecutor) ExportTx(tx *txs.ExportTx) error { txID := e.Tx.ID() // Consume the UTXOS - utxo.Consume(e.State, tx.Ins) + avax.Consume(e.State, tx.Ins) // Produce the UTXOS - utxo.Produce(e.State, txID, tx.Outs) + avax.Produce(e.State, txID, tx.Outs) elems := make([]*atomic.Element, len(tx.ExportedOutputs)) for i, out := range tx.ExportedOutputs { @@ -275,19 +265,6 @@ func (e *StandardTxExecutor) ExportTx(tx *txs.ExportTx) error { } func (e *StandardTxExecutor) AddValidatorTx(tx *txs.AddValidatorTx) error { - // AddValidatorTx is a proposal transaction until the Banff fork - // activation. Following the activation, AddValidatorTxs must be issued into - // StandardBlocks. - currentTimestamp := e.State.GetTimestamp() - if !e.Config.IsBanffActivated(currentTimestamp) { - return fmt.Errorf( - "%w: timestamp (%s) < Banff fork time (%s)", - errIssuedAddStakerTxBeforeBanff, - currentTimestamp, - e.Config.BanffTime, - ) - } - if tx.Validator.NodeID == ids.EmptyNodeID { return errEmptyNodeID } @@ -302,29 +279,19 @@ func (e *StandardTxExecutor) AddValidatorTx(tx *txs.AddValidatorTx) error { } txID := e.Tx.ID() + newStaker, err := state.NewPendingStaker(txID, tx) + if err != nil { + return err + } - newStaker := state.NewPendingStaker(txID, tx) e.State.PutPendingValidator(newStaker) - utxo.Consume(e.State, tx.Ins) - utxo.Produce(e.State, txID, tx.Outs) + avax.Consume(e.State, tx.Ins) + avax.Produce(e.State, txID, tx.Outs) return nil } func (e *StandardTxExecutor) AddSubnetValidatorTx(tx *txs.AddSubnetValidatorTx) error { - // AddSubnetValidatorTx is a proposal transaction until the Banff fork - // activation. Following the activation, AddSubnetValidatorTxs must be - // issued into StandardBlocks. - currentTimestamp := e.State.GetTimestamp() - if !e.Config.IsBanffActivated(currentTimestamp) { - return fmt.Errorf( - "%w: timestamp (%s) < Banff fork time (%s)", - errIssuedAddStakerTxBeforeBanff, - currentTimestamp, - e.Config.BanffTime, - ) - } - if err := verifyAddSubnetValidatorTx( e.Backend, e.State, @@ -335,29 +302,19 @@ func (e *StandardTxExecutor) AddSubnetValidatorTx(tx *txs.AddSubnetValidatorTx) } txID := e.Tx.ID() + newStaker, err := state.NewPendingStaker(txID, tx) + if err != nil { + return err + } - newStaker := state.NewPendingStaker(txID, tx) e.State.PutPendingValidator(newStaker) - utxo.Consume(e.State, tx.Ins) - utxo.Produce(e.State, txID, tx.Outs) + avax.Consume(e.State, tx.Ins) + avax.Produce(e.State, txID, tx.Outs) return nil } func (e *StandardTxExecutor) AddDelegatorTx(tx *txs.AddDelegatorTx) error { - // AddDelegatorTx is a proposal transaction until the Banff fork - // activation. Following the activation, AddDelegatorTxs must be issued into - // StandardBlocks. - currentTimestamp := e.State.GetTimestamp() - if !e.Config.IsBanffActivated(currentTimestamp) { - return fmt.Errorf( - "%w: timestamp (%s) < Banff fork time (%s)", - errIssuedAddStakerTxBeforeBanff, - currentTimestamp, - e.Config.BanffTime, - ) - } - if _, err := verifyAddDelegatorTx( e.Backend, e.State, @@ -368,10 +325,14 @@ func (e *StandardTxExecutor) AddDelegatorTx(tx *txs.AddDelegatorTx) error { } txID := e.Tx.ID() - newStaker := state.NewPendingStaker(txID, tx) + newStaker, err := state.NewPendingStaker(txID, tx) + if err != nil { + return err + } + e.State.PutPendingDelegator(newStaker) - utxo.Consume(e.State, tx.Ins) - utxo.Produce(e.State, txID, tx.Outs) + avax.Consume(e.State, tx.Ins) + avax.Produce(e.State, txID, tx.Outs) return nil } @@ -382,16 +343,6 @@ func (e *StandardTxExecutor) AddDelegatorTx(tx *txs.AddDelegatorTx) error { // [tx.SubnetID]. // Note: [tx.NodeID] may be either a current or pending validator. func (e *StandardTxExecutor) RemoveSubnetValidatorTx(tx *txs.RemoveSubnetValidatorTx) error { - currentTimestamp := e.State.GetTimestamp() - if !e.Config.IsBanffActivated(currentTimestamp) { - return fmt.Errorf( - "%w: timestamp (%s) < Banff fork time (%s)", - errRemoveSubnetValidatorTxBeforeBanff, - currentTimestamp, - e.Config.BanffTime, - ) - } - staker, isCurrentValidator, err := removeSubnetValidatorValidation( e.Backend, e.State, @@ -411,27 +362,13 @@ func (e *StandardTxExecutor) RemoveSubnetValidatorTx(tx *txs.RemoveSubnetValidat // Invariant: There are no permissioned subnet delegators to remove. txID := e.Tx.ID() - utxo.Consume(e.State, tx.Ins) - utxo.Produce(e.State, txID, tx.Outs) + avax.Consume(e.State, tx.Ins) + avax.Produce(e.State, txID, tx.Outs) return nil } func (e *StandardTxExecutor) TransformSubnetTx(tx *txs.TransformSubnetTx) error { - // TODO: Remove this check once the Banff network upgrade is complete. - // - // Banff network upgrade allows transforming a permissioned subnet into - // a permissionless subnet. - currentTimestamp := e.State.GetTimestamp() - if !e.Config.IsBanffActivated(currentTimestamp) { - return fmt.Errorf( - "%w: timestamp (%s) < Banff fork time (%s)", - errTransformSubnetTxBeforeBanff, - currentTimestamp, - e.Config.BanffTime, - ) - } - if err := e.Tx.SyntacticVerify(e.Ctx); err != nil { return err } @@ -468,9 +405,9 @@ func (e *StandardTxExecutor) TransformSubnetTx(tx *txs.TransformSubnetTx) error txID := e.Tx.ID() // Consume the UTXOS - utxo.Consume(e.State, tx.Ins) + avax.Consume(e.State, tx.Ins) // Produce the UTXOS - utxo.Produce(e.State, txID, tx.Outs) + avax.Produce(e.State, txID, tx.Outs) // Transform the new subnet in the database e.State.AddSubnetTransformation(e.Tx) e.State.SetCurrentSupply(tx.Subnet, tx.InitialSupply) @@ -478,17 +415,6 @@ func (e *StandardTxExecutor) TransformSubnetTx(tx *txs.TransformSubnetTx) error } func (e *StandardTxExecutor) AddPermissionlessValidatorTx(tx *txs.AddPermissionlessValidatorTx) error { - // TODO: Remove this check once the Banff network upgrade is complete. - currentTimestamp := e.State.GetTimestamp() - if !e.Config.IsBanffActivated(currentTimestamp) { - return fmt.Errorf( - "%w: timestamp (%s) < Banff fork time (%s)", - errIssuedAddStakerTxBeforeBanff, - currentTimestamp, - e.Config.BanffTime, - ) - } - if err := verifyAddPermissionlessValidatorTx( e.Backend, e.State, @@ -499,27 +425,19 @@ func (e *StandardTxExecutor) AddPermissionlessValidatorTx(tx *txs.AddPermissionl } txID := e.Tx.ID() + newStaker, err := state.NewPendingStaker(txID, tx) + if err != nil { + return err + } - newStaker := state.NewPendingStaker(txID, tx) e.State.PutPendingValidator(newStaker) - utxo.Consume(e.State, tx.Ins) - utxo.Produce(e.State, txID, tx.Outs) + avax.Consume(e.State, tx.Ins) + avax.Produce(e.State, txID, tx.Outs) return nil } func (e *StandardTxExecutor) AddPermissionlessDelegatorTx(tx *txs.AddPermissionlessDelegatorTx) error { - // TODO: Remove this check once the Banff network upgrade is complete. - currentTimestamp := e.State.GetTimestamp() - if !e.Config.IsBanffActivated(currentTimestamp) { - return fmt.Errorf( - "%w: timestamp (%s) < Banff fork time (%s)", - errIssuedAddStakerTxBeforeBanff, - currentTimestamp, - e.Config.BanffTime, - ) - } - if err := verifyAddPermissionlessDelegatorTx( e.Backend, e.State, @@ -530,11 +448,14 @@ func (e *StandardTxExecutor) AddPermissionlessDelegatorTx(tx *txs.AddPermissionl } txID := e.Tx.ID() + newStaker, err := state.NewPendingStaker(txID, tx) + if err != nil { + return err + } - newStaker := state.NewPendingStaker(txID, tx) e.State.PutPendingDelegator(newStaker) - utxo.Consume(e.State, tx.Ins) - utxo.Produce(e.State, txID, tx.Outs) + avax.Consume(e.State, tx.Ins) + avax.Produce(e.State, txID, tx.Outs) return nil } diff --git a/avalanchego/vms/platformvm/txs/executor/standard_tx_executor_test.go b/avalanchego/vms/platformvm/txs/executor/standard_tx_executor_test.go index 718c6f92..5a2581e4 100644 --- a/avalanchego/vms/platformvm/txs/executor/standard_tx_executor_test.go +++ b/avalanchego/vms/platformvm/txs/executor/standard_tx_executor_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -19,7 +19,7 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" @@ -37,13 +37,14 @@ import ( // never overflow const _ time.Duration = math.MaxUint32 * time.Second +var errTest = errors.New("non-nil error") + func TestStandardTxExecutorAddValidatorTxEmptyID(t *testing.T) { - env := newEnvironment() + require := require.New(t) + env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() chainTime := env.state.GetTimestamp() @@ -55,7 +56,7 @@ func TestStandardTxExecutorAddValidatorTxEmptyID(t *testing.T) { }{ { // Case: Before banff banffTime: chainTime.Add(1), - expectedError: errIssuedAddStakerTxBeforeBanff, + expectedError: errEmptyNodeID, }, { // Case: At banff banffTime: chainTime, @@ -77,17 +78,13 @@ func TestStandardTxExecutorAddValidatorTxEmptyID(t *testing.T) { ids.EmptyNodeID, ids.GenerateTestShortID(), reward.PercentDenominator, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) stateDiff, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := StandardTxExecutor{ Backend: &env.backend, @@ -95,7 +92,7 @@ func TestStandardTxExecutorAddValidatorTxEmptyID(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.ErrorIs(t, err, test.expectedError) + require.ErrorIs(err, test.expectedError) } } @@ -118,25 +115,23 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { newValidatorID, // node ID rewardAddress, // Reward Address reward.PercentDenominator, // Shares - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, ) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - staker := state.NewCurrentStaker( + staker, err := state.NewCurrentStaker( tx.ID(), tx.Unsigned.(*txs.AddValidatorTx), 0, ) + require.NoError(t, err) target.state.PutCurrentValidator(staker) target.state.AddTx(tx, status.Committed) target.state.SetHeight(dummyHeight) - if err := target.state.Commit(); err != nil { - t.Fatal(err) - } + err = target.state.Commit() + require.NoError(t, err) } // [addMaxStakeValidator] adds a new validator to the primary network's @@ -149,28 +144,26 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { newValidatorID, // node ID rewardAddress, // Reward Address reward.PercentDenominator, // Shared - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, ) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) - staker := state.NewCurrentStaker( + staker, err := state.NewCurrentStaker( tx.ID(), tx.Unsigned.(*txs.AddValidatorTx), 0, ) + require.NoError(t, err) target.state.PutCurrentValidator(staker) target.state.AddTx(tx, status.Committed) target.state.SetHeight(dummyHeight) - if err := target.state.Commit(); err != nil { - t.Fatal(err) - } + err = target.state.Commit() + require.NoError(t, err) } - dummyH := newEnvironment() + dummyH := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) currentTimestamp := dummyH.state.GetTimestamp() type test struct { @@ -179,7 +172,7 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { endTime uint64 nodeID ids.NodeID rewardAddress ids.ShortID - feeKeys []*crypto.PrivateKeySECP256K1R + feeKeys []*secp256k1.PrivateKey setup func(*environment) AP3Time time.Time shouldErr bool @@ -193,7 +186,7 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { endTime: uint64(defaultValidateEndTime.Unix()) + 1, nodeID: nodeID, rewardAddress: rewardAddress, - feeKeys: []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: nil, AP3Time: defaultGenesisTime, shouldErr: true, @@ -205,7 +198,7 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { endTime: uint64(currentTimestamp.Add(MaxFutureStartTime * 2).Unix()), nodeID: nodeID, rewardAddress: rewardAddress, - feeKeys: []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: nil, AP3Time: defaultGenesisTime, shouldErr: true, @@ -217,7 +210,7 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { endTime: uint64(defaultValidateEndTime.Unix()) + 1, nodeID: nodeID, rewardAddress: rewardAddress, - feeKeys: []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: nil, AP3Time: defaultGenesisTime, shouldErr: true, @@ -229,7 +222,7 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { endTime: uint64(defaultValidateEndTime.Add(-5 * time.Second).Unix()), nodeID: newValidatorID, rewardAddress: rewardAddress, - feeKeys: []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: nil, AP3Time: defaultGenesisTime, shouldErr: true, @@ -241,7 +234,7 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { endTime: newValidatorEndTime, nodeID: newValidatorID, rewardAddress: rewardAddress, - feeKeys: []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: addMinStakeValidator, AP3Time: defaultGenesisTime, shouldErr: true, @@ -253,7 +246,7 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { endTime: newValidatorEndTime + 1, // stop validating subnet after stopping validating primary network nodeID: newValidatorID, rewardAddress: rewardAddress, - feeKeys: []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: addMinStakeValidator, AP3Time: defaultGenesisTime, shouldErr: true, @@ -265,46 +258,44 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { endTime: newValidatorEndTime, // same end time as for primary network nodeID: newValidatorID, rewardAddress: rewardAddress, - feeKeys: []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: addMinStakeValidator, AP3Time: defaultGenesisTime, shouldErr: false, description: "valid", }, { - stakeAmount: dummyH.config.MinDelegatorStake, // weight - startTime: uint64(currentTimestamp.Unix()), // start time - endTime: uint64(defaultValidateEndTime.Unix()), // end time - nodeID: nodeID, // node ID - rewardAddress: rewardAddress, // Reward Address - feeKeys: []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, // tx fee payer + stakeAmount: dummyH.config.MinDelegatorStake, // weight + startTime: uint64(currentTimestamp.Unix()), // start time + endTime: uint64(defaultValidateEndTime.Unix()), // end time + nodeID: nodeID, // node ID + rewardAddress: rewardAddress, // Reward Address + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, // tx fee payer setup: nil, AP3Time: defaultGenesisTime, shouldErr: true, description: "starts validating at current timestamp", }, { - stakeAmount: dummyH.config.MinDelegatorStake, // weight - startTime: uint64(defaultValidateStartTime.Unix()), // start time - endTime: uint64(defaultValidateEndTime.Unix()), // end time - nodeID: nodeID, // node ID - rewardAddress: rewardAddress, // Reward Address - feeKeys: []*crypto.PrivateKeySECP256K1R{preFundedKeys[1]}, // tx fee payer + stakeAmount: dummyH.config.MinDelegatorStake, // weight + startTime: uint64(defaultValidateStartTime.Unix()), // start time + endTime: uint64(defaultValidateEndTime.Unix()), // end time + nodeID: nodeID, // node ID + rewardAddress: rewardAddress, // Reward Address + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[1]}, // tx fee payer setup: func(target *environment) { // Remove all UTXOs owned by keys[1] utxoIDs, err := target.state.UTXOIDs( preFundedKeys[1].PublicKey().Address().Bytes(), ids.Empty, math.MaxInt32) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + for _, utxoID := range utxoIDs { target.state.DeleteUTXO(utxoID) } target.state.SetHeight(dummyHeight) - if err := target.state.Commit(); err != nil { - t.Fatal(err) - } + err = target.state.Commit() + require.NoError(t, err) }, AP3Time: defaultGenesisTime, shouldErr: true, @@ -316,7 +307,7 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { endTime: newValidatorEndTime, // same end time as for primary network nodeID: newValidatorID, rewardAddress: rewardAddress, - feeKeys: []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: addMaxStakeValidator, AP3Time: defaultValidateEndTime, shouldErr: false, @@ -328,7 +319,7 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { endTime: newValidatorEndTime, // same end time as for primary network nodeID: newValidatorID, rewardAddress: rewardAddress, - feeKeys: []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + feeKeys: []*secp256k1.PrivateKey{preFundedKeys[0]}, setup: addMaxStakeValidator, AP3Time: defaultGenesisTime, shouldErr: true, @@ -338,12 +329,11 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { - freshTH := newEnvironment() + require := require.New(t) + freshTH := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) freshTH.config.ApricotPhase3Time = tt.AP3Time defer func() { - if err := shutdownEnvironment(freshTH); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(freshTH)) }() tx, err := freshTH.txBuilder.NewAddDelegatorTx( @@ -355,17 +345,14 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { tt.feeKeys, ids.ShortEmpty, ) - if err != nil { - t.Fatalf("couldn't build tx: %s", err) - } + require.NoError(err) + if tt.setup != nil { tt.setup(freshTH) } onAcceptState, err := state.NewDiff(lastAcceptedID, freshTH) - if err != nil { - t.Fatal(err) - } + require.NoError(err) freshTH.config.BanffTime = onAcceptState.GetTimestamp() @@ -375,10 +362,10 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err != nil && !tt.shouldErr { - t.Fatalf("shouldn't have errored but got %s", err) - } else if err == nil && tt.shouldErr { - t.Fatalf("expected test to error but got none") + if tt.shouldErr { + require.Error(err) + } else { + require.NoError(err) } mempoolExecutor := MempoolTxVerifier{ @@ -388,22 +375,21 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&mempoolExecutor) - if err != nil && !tt.shouldErr { - t.Fatalf("shouldn't have errored but got %s", err) - } else if err == nil && tt.shouldErr { - t.Fatalf("expected test to error but got none") + if tt.shouldErr { + require.Error(err) + } else { + require.NoError(err) } }) } } func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { - env := newEnvironment() + require := require.New(t) + env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() nodeID := preFundedKeys[0].PublicKey().Address() @@ -419,17 +405,13 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { uint64(defaultValidateEndTime.Unix())+1, ids.NodeID(nodeID), testSubnet1.ID(), - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := StandardTxExecutor{ Backend: &env.backend, @@ -437,9 +419,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed because validator stops validating primary network earlier than subnet") - } + require.Error(err, "should have failed because validator stops validating primary network earlier than subnet") } { @@ -453,17 +433,13 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { uint64(defaultValidateEndTime.Unix()), ids.NodeID(nodeID), testSubnet1.ID(), - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := StandardTxExecutor{ Backend: &env.backend, @@ -471,55 +447,46 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err != nil { - t.Fatal(err) - } + require.NoError(err) } // Add a validator to pending validator set of primary network key, err := testKeyfactory.NewPrivateKey() - if err != nil { - t.Fatal(err) - } + require.NoError(err) + pendingDSValidatorID := ids.NodeID(key.PublicKey().Address()) // starts validating primary network 10 seconds after genesis - DSStartTime := defaultGenesisTime.Add(10 * time.Second) - DSEndTime := DSStartTime.Add(5 * defaultMinStakingDuration) + dsStartTime := defaultGenesisTime.Add(10 * time.Second) + dsEndTime := dsStartTime.Add(5 * defaultMinStakingDuration) addDSTx, err := env.txBuilder.NewAddValidatorTx( env.config.MinValidatorStake, // stake amount - uint64(DSStartTime.Unix()), // start time - uint64(DSEndTime.Unix()), // end time + uint64(dsStartTime.Unix()), // start time + uint64(dsEndTime.Unix()), // end time pendingDSValidatorID, // node ID nodeID, // reward address reward.PercentDenominator, // shares - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) { // Case: Proposed validator isn't in pending or current validator sets tx, err := env.txBuilder.NewAddSubnetValidatorTx( defaultWeight, - uint64(DSStartTime.Unix()), // start validating subnet before primary network - uint64(DSEndTime.Unix()), + uint64(dsStartTime.Unix()), // start validating subnet before primary network + uint64(dsEndTime.Unix()), pendingDSValidatorID, testSubnet1.ID(), - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := StandardTxExecutor{ Backend: &env.backend, @@ -527,24 +494,22 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed because validator not in the current or pending validator sets of the primary network") - } + require.Error(err, "should have failed because validator not in the current or pending validator sets of the primary network") } - staker := state.NewCurrentStaker( + staker, err := state.NewCurrentStaker( addDSTx.ID(), addDSTx.Unsigned.(*txs.AddValidatorTx), 0, ) + require.NoError(err) env.state.PutCurrentValidator(staker) env.state.AddTx(addDSTx, status.Committed) dummyHeight := uint64(1) env.state.SetHeight(dummyHeight) - if err := env.state.Commit(); err != nil { - t.Fatal(err) - } + err = env.state.Commit() + require.NoError(err) // Node with ID key.PublicKey().Address() now a pending validator for primary network @@ -553,21 +518,17 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { // but starts validating subnet before primary network tx, err := env.txBuilder.NewAddSubnetValidatorTx( defaultWeight, - uint64(DSStartTime.Unix())-1, // start validating subnet before primary network - uint64(DSEndTime.Unix()), + uint64(dsStartTime.Unix())-1, // start validating subnet before primary network + uint64(dsEndTime.Unix()), pendingDSValidatorID, testSubnet1.ID(), - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := StandardTxExecutor{ Backend: &env.backend, @@ -575,9 +536,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed because validator starts validating primary network before starting to validate primary network") - } + require.Error(err, "should have failed because validator starts validating primary network before starting to validate primary network") } { @@ -585,21 +544,17 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { // but stops validating subnet after primary network tx, err := env.txBuilder.NewAddSubnetValidatorTx( defaultWeight, - uint64(DSStartTime.Unix()), - uint64(DSEndTime.Unix())+1, // stop validating subnet after stopping validating primary network + uint64(dsStartTime.Unix()), + uint64(dsEndTime.Unix())+1, // stop validating subnet after stopping validating primary network pendingDSValidatorID, testSubnet1.ID(), - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := StandardTxExecutor{ Backend: &env.backend, @@ -607,9 +562,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed because validator stops validating primary network after stops validating primary network") - } + require.Error(err, "should have failed because validator stops validating primary network after stops validating primary network") } { @@ -617,31 +570,24 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { // period validating subnet is subset of time validating primary network tx, err := env.txBuilder.NewAddSubnetValidatorTx( defaultWeight, - uint64(DSStartTime.Unix()), // same start time as for primary network - uint64(DSEndTime.Unix()), // same end time as for primary network + uint64(dsStartTime.Unix()), // same start time as for primary network + uint64(dsEndTime.Unix()), // same end time as for primary network pendingDSValidatorID, testSubnet1.ID(), - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } - + require.NoError(err) executor := StandardTxExecutor{ Backend: &env.backend, State: onAcceptState, Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err != nil { - t.Fatal(err) - } + require.NoError(err) } // Case: Proposed validator start validating at/before current timestamp @@ -656,17 +602,13 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { uint64(newTimestamp.Add(defaultMinStakingDuration).Unix()), // end time ids.NodeID(nodeID), // node ID testSubnet1.ID(), // subnet ID - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := StandardTxExecutor{ Backend: &env.backend, @@ -674,9 +616,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed verification because starts validating at current timestamp") - } + require.Error(err, "should have failed verification because starts validating at current timestamp") } // reset the timestamp @@ -690,25 +630,23 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { uint64(defaultValidateEndTime.Unix()), // end time ids.NodeID(nodeID), // node ID testSubnet1.ID(), // subnet ID - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - staker = state.NewCurrentStaker( + staker, err = state.NewCurrentStaker( subnetTx.ID(), subnetTx.Unsigned.(*txs.AddSubnetValidatorTx), 0, ) + require.NoError(err) env.state.PutCurrentValidator(staker) env.state.AddTx(subnetTx, status.Committed) env.state.SetHeight(dummyHeight) - if err := env.state.Commit(); err != nil { - t.Fatal(err) - } + err = env.state.Commit() + require.NoError(err) { // Node with ID nodeIDKey.PublicKey().Address() now validating subnet with ID testSubnet1.ID @@ -718,17 +656,13 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { uint64(defaultValidateEndTime.Unix()), // end time ids.NodeID(nodeID), // node ID testSubnet1.ID(), // subnet ID - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := StandardTxExecutor{ Backend: &env.backend, @@ -736,16 +670,13 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { Tx: duplicateSubnetTx, } err = duplicateSubnetTx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed verification because validator already validating the specified subnet") - } + require.Error(err, "should have failed verification because validator already validating the specified subnet") } env.state.DeleteCurrentValidator(staker) env.state.SetHeight(dummyHeight) - if err := env.state.Commit(); err != nil { - t.Fatal(err) - } + err = env.state.Commit() + require.NoError(err) { // Case: Too many signatures @@ -755,17 +686,13 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix())+1, // end time ids.NodeID(nodeID), // node ID testSubnet1.ID(), // subnet ID - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := StandardTxExecutor{ Backend: &env.backend, @@ -773,9 +700,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed verification because tx has 3 signatures but only 2 needed") - } + require.Error(err, "should have failed verification because tx has 3 signatures but only 2 needed") } { @@ -786,12 +711,10 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix()), // end time ids.NodeID(nodeID), // node ID testSubnet1.ID(), // subnet ID - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[2]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[2]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Remove a signature addSubnetValidatorTx := tx.Unsigned.(*txs.AddSubnetValidatorTx) @@ -801,9 +724,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { addSubnetValidatorTx.SyntacticallyVerified = false onAcceptState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := StandardTxExecutor{ Backend: &env.backend, @@ -811,9 +732,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed verification because not enough control sigs") - } + require.Error(err, "should have failed verification because not enough control sigs") } { @@ -824,23 +743,18 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix()), // end time ids.NodeID(nodeID), // node ID testSubnet1.ID(), // subnet ID - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], preFundedKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], preFundedKeys[1]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + // Replace a valid signature with one from keys[3] sig, err := preFundedKeys[3].SignHash(hashing.ComputeHash256(tx.Unsigned.Bytes())) - if err != nil { - t.Fatal(err) - } + require.NoError(err) copy(tx.Creds[0].(*secp256k1fx.Credential).Sigs[0][:], sig) onAcceptState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := StandardTxExecutor{ Backend: &env.backend, @@ -848,9 +762,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed verification because a control sig is invalid") - } + require.Error(err, "should have failed verification because a control sig is invalid") } { @@ -862,30 +774,26 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { uint64(defaultGenesisTime.Add(defaultMinStakingDuration).Unix())+1, // end time ids.NodeID(nodeID), // node ID testSubnet1.ID(), // subnet ID - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - staker = state.NewCurrentStaker( + staker, err = state.NewCurrentStaker( subnetTx.ID(), subnetTx.Unsigned.(*txs.AddSubnetValidatorTx), 0, ) + require.NoError(err) env.state.PutCurrentValidator(staker) env.state.AddTx(tx, status.Committed) env.state.SetHeight(dummyHeight) - if err := env.state.Commit(); err != nil { - t.Fatal(err) - } + err = env.state.Commit() + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := StandardTxExecutor{ Backend: &env.backend, @@ -893,19 +801,16 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed verification because validator already in pending validator set of the specified subnet") - } + require.Error(err, "should have failed verification because validator already in pending validator set of the specified subnet") } } func TestStandardTxExecutorAddValidator(t *testing.T) { - env := newEnvironment() + require := require.New(t) + env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { - if err := shutdownEnvironment(env); err != nil { - t.Fatal(err) - } + require.NoError(shutdownEnvironment(env)) }() nodeID := ids.GenerateTestNodeID() @@ -921,17 +826,13 @@ func TestStandardTxExecutorAddValidator(t *testing.T) { nodeID, ids.ShortEmpty, reward.PercentDenominator, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := StandardTxExecutor{ Backend: &env.backend, @@ -939,9 +840,7 @@ func TestStandardTxExecutorAddValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should've errored because start time too early") - } + require.Error(err, "should've errored because start time too early") } { @@ -953,17 +852,13 @@ func TestStandardTxExecutorAddValidator(t *testing.T) { nodeID, ids.ShortEmpty, reward.PercentDenominator, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := StandardTxExecutor{ Backend: &env.backend, @@ -971,9 +866,7 @@ func TestStandardTxExecutorAddValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should've errored because start time too far in the future") - } + require.Error(err, "should've errored because start time too far in the future") } { @@ -985,17 +878,13 @@ func TestStandardTxExecutorAddValidator(t *testing.T) { nodeID, ids.ShortEmpty, reward.PercentDenominator, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := StandardTxExecutor{ Backend: &env.backend, @@ -1003,9 +892,7 @@ func TestStandardTxExecutorAddValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should've errored because validator already validating") - } + require.Error(err, "should've errored because validator already validating") } { @@ -1018,31 +905,27 @@ func TestStandardTxExecutorAddValidator(t *testing.T) { nodeID, ids.ShortEmpty, reward.PercentDenominator, // shares - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, // change addr // key ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - staker := state.NewCurrentStaker( + staker, err := state.NewCurrentStaker( tx.ID(), tx.Unsigned.(*txs.AddValidatorTx), 0, ) + require.NoError(err) env.state.PutCurrentValidator(staker) env.state.AddTx(tx, status.Committed) dummyHeight := uint64(1) env.state.SetHeight(dummyHeight) - if err := env.state.Commit(); err != nil { - t.Fatal(err) - } + err = env.state.Commit() + require.NoError(err) onAcceptState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := StandardTxExecutor{ Backend: &env.backend, @@ -1050,9 +933,7 @@ func TestStandardTxExecutorAddValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed because validator in pending validator set") - } + require.Error(err, "should have failed because validator in pending validator set") } { @@ -1064,26 +945,21 @@ func TestStandardTxExecutorAddValidator(t *testing.T) { nodeID, ids.ShortEmpty, reward.PercentDenominator, - []*crypto.PrivateKeySECP256K1R{preFundedKeys[0]}, + []*secp256k1.PrivateKey{preFundedKeys[0]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // Remove all UTXOs owned by preFundedKeys[0] utxoIDs, err := env.state.UTXOIDs(preFundedKeys[0].PublicKey().Address().Bytes(), ids.Empty, math.MaxInt32) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + for _, utxoID := range utxoIDs { env.state.DeleteUTXO(utxoID) } onAcceptState, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - t.Fatal(err) - } + require.NoError(err) executor := StandardTxExecutor{ Backend: &env.backend, @@ -1091,9 +967,7 @@ func TestStandardTxExecutorAddValidator(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - if err == nil { - t.Fatal("should have failed because tx fee paying key has no funds") - } + require.Error(err, "should have failed because tx fee paying key has no funds") } } @@ -1153,9 +1027,7 @@ func newRemoveSubnetValidatorTx(t *testing.T) (*txs.RemoveSubnetValidatorTx, *tx Unsigned: unsignedTx, Creds: creds, } - if err := tx.Sign(txs.Codec, nil); err != nil { - t.Fatal(err) - } + require.NoError(t, tx.Initialize(txs.Codec)) return unsignedTx, tx } @@ -1211,7 +1083,6 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) // Set dependency expectations. - env.state.EXPECT().GetTimestamp().Return(env.banffTime.Add(time.Second)).Times(1) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(env.staker, nil).Times(1) subnetOwner := fx.NewMockOwner(ctrl) subnetTx := &txs.Tx{ @@ -1232,7 +1103,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { Config: &config.Config{ BanffTime: env.banffTime, }, - Bootstrapped: &utils.AtomicBool{}, + Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, Ctx: &snow.Context{}, @@ -1240,32 +1111,11 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { Tx: env.tx, State: env.state, } - e.Bootstrapped.SetValue(true) + e.Bootstrapped.Set(true) return env.unsignedTx, e }, shouldErr: false, }, - { - name: "not yet banff time", - newExecutor: func(ctrl *gomock.Controller) (*txs.RemoveSubnetValidatorTx, *StandardTxExecutor) { - env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) - env.state = state.NewMockDiff(ctrl) - env.state.EXPECT().GetTimestamp().Return(env.banffTime.Add(-1 * time.Second)).Times(1) - e := &StandardTxExecutor{ - Backend: &Backend{ - Config: &config.Config{ - BanffTime: env.banffTime, - }, - Ctx: &snow.Context{}, - }, - State: env.state, - Tx: env.tx, - } - return env.unsignedTx, e - }, - shouldErr: true, - expectedErr: errRemoveSubnetValidatorTxBeforeBanff, - }, { name: "tx fails syntactic verification", newExecutor: func(ctrl *gomock.Controller) (*txs.RemoveSubnetValidatorTx, *StandardTxExecutor) { @@ -1273,13 +1123,12 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { // Setting the subnet ID to the Primary Network ID makes the tx fail syntactic verification env.tx.Unsigned.(*txs.RemoveSubnetValidatorTx).Subnet = constants.PrimaryNetworkID env.state = state.NewMockDiff(ctrl) - env.state.EXPECT().GetTimestamp().Return(env.banffTime).Times(1) e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ BanffTime: env.banffTime, }, - Bootstrapped: &utils.AtomicBool{}, + Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, Ctx: &snow.Context{}, @@ -1287,7 +1136,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { Tx: env.tx, State: env.state, } - e.Bootstrapped.SetValue(true) + e.Bootstrapped.Set(true) return env.unsignedTx, e }, shouldErr: true, @@ -1297,7 +1146,6 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { newExecutor: func(ctrl *gomock.Controller) (*txs.RemoveSubnetValidatorTx, *StandardTxExecutor) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) env.state = state.NewMockDiff(ctrl) - env.state.EXPECT().GetTimestamp().Return(env.banffTime).Times(1) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(nil, database.ErrNotFound) env.state.EXPECT().GetPendingValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(nil, database.ErrNotFound) e := &StandardTxExecutor{ @@ -1305,7 +1153,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { Config: &config.Config{ BanffTime: env.banffTime, }, - Bootstrapped: &utils.AtomicBool{}, + Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, Ctx: &snow.Context{}, @@ -1313,7 +1161,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { Tx: env.tx, State: env.state, } - e.Bootstrapped.SetValue(true) + e.Bootstrapped.Set(true) return env.unsignedTx, e }, shouldErr: true, @@ -1328,14 +1176,13 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { staker.Priority = txs.SubnetPermissionlessValidatorCurrentPriority // Set dependency expectations. - env.state.EXPECT().GetTimestamp().Return(env.banffTime.Add(time.Second)).Times(1) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(&staker, nil).Times(1) e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ BanffTime: env.banffTime, }, - Bootstrapped: &utils.AtomicBool{}, + Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, Ctx: &snow.Context{}, @@ -1343,7 +1190,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { Tx: env.tx, State: env.state, } - e.Bootstrapped.SetValue(true) + e.Bootstrapped.Set(true) return env.unsignedTx, e }, shouldErr: true, @@ -1356,14 +1203,13 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { // Remove credentials env.tx.Creds = nil env.state = state.NewMockDiff(ctrl) - env.state.EXPECT().GetTimestamp().Return(env.banffTime).Times(1) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(env.staker, nil) e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ BanffTime: env.banffTime, }, - Bootstrapped: &utils.AtomicBool{}, + Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, Ctx: &snow.Context{}, @@ -1371,7 +1217,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { Tx: env.tx, State: env.state, } - e.Bootstrapped.SetValue(true) + e.Bootstrapped.Set(true) return env.unsignedTx, e }, shouldErr: true, @@ -1382,7 +1228,6 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { newExecutor: func(ctrl *gomock.Controller) (*txs.RemoveSubnetValidatorTx, *StandardTxExecutor) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) env.state = state.NewMockDiff(ctrl) - env.state.EXPECT().GetTimestamp().Return(env.banffTime).Times(1) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(env.staker, nil) env.state.EXPECT().GetTx(env.unsignedTx.Subnet).Return(nil, status.Unknown, database.ErrNotFound) e := &StandardTxExecutor{ @@ -1390,7 +1235,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { Config: &config.Config{ BanffTime: env.banffTime, }, - Bootstrapped: &utils.AtomicBool{}, + Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, Ctx: &snow.Context{}, @@ -1398,7 +1243,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { Tx: env.tx, State: env.state, } - e.Bootstrapped.SetValue(true) + e.Bootstrapped.Set(true) return env.unsignedTx, e }, shouldErr: true, @@ -1409,7 +1254,6 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { newExecutor: func(ctrl *gomock.Controller) (*txs.RemoveSubnetValidatorTx, *StandardTxExecutor) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) env.state = state.NewMockDiff(ctrl) - env.state.EXPECT().GetTimestamp().Return(env.banffTime).Times(1) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(env.staker, nil) subnetOwner := fx.NewMockOwner(ctrl) subnetTx := &txs.Tx{ @@ -1418,13 +1262,13 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { }, } env.state.EXPECT().GetTx(env.unsignedTx.Subnet).Return(subnetTx, status.Committed, nil) - env.fx.EXPECT().VerifyPermission(gomock.Any(), env.unsignedTx.SubnetAuth, env.tx.Creds[len(env.tx.Creds)-1], subnetOwner).Return(errors.New("")) + env.fx.EXPECT().VerifyPermission(gomock.Any(), env.unsignedTx.SubnetAuth, env.tx.Creds[len(env.tx.Creds)-1], subnetOwner).Return(errTest) e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ BanffTime: env.banffTime, }, - Bootstrapped: &utils.AtomicBool{}, + Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, Ctx: &snow.Context{}, @@ -1432,7 +1276,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { Tx: env.tx, State: env.state, } - e.Bootstrapped.SetValue(true) + e.Bootstrapped.Set(true) return env.unsignedTx, e }, shouldErr: true, @@ -1443,7 +1287,6 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { newExecutor: func(ctrl *gomock.Controller) (*txs.RemoveSubnetValidatorTx, *StandardTxExecutor) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) env.state = state.NewMockDiff(ctrl) - env.state.EXPECT().GetTimestamp().Return(env.banffTime).Times(1) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(env.staker, nil) subnetOwner := fx.NewMockOwner(ctrl) subnetTx := &txs.Tx{ @@ -1455,13 +1298,13 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { env.fx.EXPECT().VerifyPermission(gomock.Any(), env.unsignedTx.SubnetAuth, env.tx.Creds[len(env.tx.Creds)-1], subnetOwner).Return(nil) env.flowChecker.EXPECT().VerifySpend( gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), - ).Return(errors.New("")) + ).Return(errTest) e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ BanffTime: env.banffTime, }, - Bootstrapped: &utils.AtomicBool{}, + Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, Ctx: &snow.Context{}, @@ -1469,7 +1312,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { Tx: env.tx, State: env.state, } - e.Bootstrapped.SetValue(true) + e.Bootstrapped.Set(true) return env.unsignedTx, e }, shouldErr: true, @@ -1565,9 +1408,7 @@ func newTransformSubnetTx(t *testing.T) (*txs.TransformSubnetTx, *txs.Tx) { Unsigned: unsignedTx, Creds: creds, } - if err := tx.Sign(txs.Codec, nil); err != nil { - t.Fatal(err) - } + require.NoError(t, tx.Initialize(txs.Codec)) return unsignedTx, tx } @@ -1615,26 +1456,6 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { } tests := []test{ - { - name: "not yet banff time", - newExecutor: func(ctrl *gomock.Controller) (*txs.TransformSubnetTx, *StandardTxExecutor) { - env := newValidTransformSubnetTxVerifyEnv(t, ctrl) - env.state = state.NewMockDiff(ctrl) - env.state.EXPECT().GetTimestamp().Return(env.banffTime.Add(-1 * time.Second)).Times(1) - e := &StandardTxExecutor{ - Backend: &Backend{ - Config: &config.Config{ - BanffTime: env.banffTime, - }, - Ctx: &snow.Context{}, - }, - State: env.state, - Tx: env.tx, - } - return env.unsignedTx, e - }, - err: errTransformSubnetTxBeforeBanff, - }, { name: "tx fails syntactic verification", newExecutor: func(ctrl *gomock.Controller) (*txs.TransformSubnetTx, *StandardTxExecutor) { @@ -1642,13 +1463,12 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { // Setting the tx to nil makes the tx fail syntactic verification env.tx.Unsigned = (*txs.TransformSubnetTx)(nil) env.state = state.NewMockDiff(ctrl) - env.state.EXPECT().GetTimestamp().Return(env.banffTime).Times(1) e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ BanffTime: env.banffTime, }, - Bootstrapped: &utils.AtomicBool{}, + Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, Ctx: &snow.Context{}, @@ -1656,7 +1476,7 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { Tx: env.tx, State: env.state, } - e.Bootstrapped.SetValue(true) + e.Bootstrapped.Set(true) return env.unsignedTx, e }, err: txs.ErrNilTx, @@ -1667,13 +1487,12 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { env := newValidTransformSubnetTxVerifyEnv(t, ctrl) env.unsignedTx.MaxStakeDuration = math.MaxUint32 env.state = state.NewMockDiff(ctrl) - env.state.EXPECT().GetTimestamp().Return(env.banffTime).Times(1) e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ BanffTime: env.banffTime, }, - Bootstrapped: &utils.AtomicBool{}, + Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, Ctx: &snow.Context{}, @@ -1681,7 +1500,7 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { Tx: env.tx, State: env.state, } - e.Bootstrapped.SetValue(true) + e.Bootstrapped.Set(true) return env.unsignedTx, e }, err: errMaxStakeDurationTooLarge, @@ -1693,14 +1512,13 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { // Remove credentials env.tx.Creds = nil env.state = state.NewMockDiff(ctrl) - env.state.EXPECT().GetTimestamp().Return(env.banffTime).Times(1) e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ BanffTime: env.banffTime, MaxStakeDuration: math.MaxInt64, }, - Bootstrapped: &utils.AtomicBool{}, + Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, Ctx: &snow.Context{}, @@ -1708,7 +1526,7 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { Tx: env.tx, State: env.state, } - e.Bootstrapped.SetValue(true) + e.Bootstrapped.Set(true) return env.unsignedTx, e }, err: errWrongNumberOfCredentials, @@ -1718,7 +1536,6 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { newExecutor: func(ctrl *gomock.Controller) (*txs.TransformSubnetTx, *StandardTxExecutor) { env := newValidTransformSubnetTxVerifyEnv(t, ctrl) env.state = state.NewMockDiff(ctrl) - env.state.EXPECT().GetTimestamp().Return(env.banffTime).Times(1) subnetOwner := fx.NewMockOwner(ctrl) subnetTx := &txs.Tx{ Unsigned: &txs.CreateSubnetTx{ @@ -1737,7 +1554,7 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { BanffTime: env.banffTime, MaxStakeDuration: math.MaxInt64, }, - Bootstrapped: &utils.AtomicBool{}, + Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, Ctx: &snow.Context{}, @@ -1745,7 +1562,7 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { Tx: env.tx, State: env.state, } - e.Bootstrapped.SetValue(true) + e.Bootstrapped.Set(true) return env.unsignedTx, e }, err: errFlowCheckFailed, @@ -1756,7 +1573,6 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { env := newValidTransformSubnetTxVerifyEnv(t, ctrl) // Set dependency expectations. - env.state.EXPECT().GetTimestamp().Return(env.banffTime.Add(time.Second)).Times(1) subnetOwner := fx.NewMockOwner(ctrl) subnetTx := &txs.Tx{ Unsigned: &txs.CreateSubnetTx{ @@ -1779,7 +1595,7 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { BanffTime: env.banffTime, MaxStakeDuration: math.MaxInt64, }, - Bootstrapped: &utils.AtomicBool{}, + Bootstrapped: &utils.Atomic[bool]{}, Fx: env.fx, FlowChecker: env.flowChecker, Ctx: &snow.Context{}, @@ -1787,7 +1603,7 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { Tx: env.tx, State: env.state, } - e.Bootstrapped.SetValue(true) + e.Bootstrapped.Set(true) return env.unsignedTx, e }, err: nil, @@ -1796,13 +1612,12 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require := require.New(t) ctrl := gomock.NewController(t) defer ctrl.Finish() unsignedTx, executor := tt.newExecutor(ctrl) err := executor.TransformSubnetTx(unsignedTx) - require.ErrorIs(err, tt.err) + require.ErrorIs(t, err, tt.err) }) } } diff --git a/avalanchego/vms/platformvm/txs/executor/state_changes.go b/avalanchego/vms/platformvm/txs/executor/state_changes.go index d45f1938..ccf87bce 100644 --- a/avalanchego/vms/platformvm/txs/executor/state_changes.go +++ b/avalanchego/vms/platformvm/txs/executor/state_changes.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -119,6 +119,16 @@ func AdvanceTimeTo( // Add to the staker set any pending stakers whose start time is at or // before the new timestamp + + // Note: we process pending stakers ready to be promoted to current ones and + // then we process current stakers to be demoted out of stakers set. It is + // guaranteed that no promoted stakers would be demoted immediately. A + // failure of this invariant would cause a staker to be added to + // StateChanges and be persisted among current stakers even if it already + // expired. The following invariants ensure this does not happens: + // Invariant: minimum stake duration is > 0, so staker.StartTime != staker.EndTime. + // Invariant: [newChainTime] does not skip stakers set change times. + for pendingStakerIterator.Next() { stakerToRemove := pendingStakerIterator.Value() if stakerToRemove.StartTime.After(newChainTime) { @@ -130,12 +140,6 @@ func AdvanceTimeTo( stakerToAdd.Priority = txs.PendingToCurrentPriorities[stakerToRemove.Priority] if stakerToRemove.Priority == txs.SubnetPermissionedValidatorPendingPriority { - // Invariant: [txTimestamp] <= [nextStakerChangeTime]. - // Invariant: minimum stake duration is > 0. - // - // Both of the above invariants ensure the staker we are adding here - // should never be attempted to be removed in the following loop. - changes.currentValidatorsToAdd = append(changes.currentValidatorsToAdd, &stakerToAdd) changes.pendingValidatorsToRemove = append(changes.pendingValidatorsToRemove, stakerToRemove) continue diff --git a/avalanchego/vms/platformvm/txs/executor/subnet_tx_verification.go b/avalanchego/vms/platformvm/txs/executor/subnet_tx_verification.go index 276481be..defcc608 100644 --- a/avalanchego/vms/platformvm/txs/executor/subnet_tx_verification.go +++ b/avalanchego/vms/platformvm/txs/executor/subnet_tx_verification.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor @@ -70,7 +70,7 @@ func verifySubnetAuthorization( subnetIntf, _, err := chainState.GetTx(subnetID) if err != nil { return nil, fmt.Errorf( - "%w %q: %s", + "%w %q: %v", errCantFindSubnet, subnetID, err, @@ -83,7 +83,7 @@ func verifySubnetAuthorization( } if err := backend.Fx.VerifyPermission(sTx.Unsigned, subnetAuth, subnetCred, subnet.Owner); err != nil { - return nil, fmt.Errorf("%w: %s", errUnauthorizedSubnetModification, err) + return nil, fmt.Errorf("%w: %v", errUnauthorizedSubnetModification, err) } return sTx.Creds[:baseTxCredsLen], nil diff --git a/avalanchego/vms/platformvm/txs/executor/tx_mempool_verifier.go b/avalanchego/vms/platformvm/txs/executor/tx_mempool_verifier.go index 31ca6c36..71a1a61e 100644 --- a/avalanchego/vms/platformvm/txs/executor/tx_mempool_verifier.go +++ b/avalanchego/vms/platformvm/txs/executor/tx_mempool_verifier.go @@ -1,17 +1,19 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package executor import ( "errors" + "fmt" + "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) -var _ txs.Visitor = &MempoolTxVerifier{} +var _ txs.Visitor = (*MempoolTxVerifier)(nil) type MempoolTxVerifier struct { *Backend @@ -20,19 +22,24 @@ type MempoolTxVerifier struct { Tx *txs.Tx } -func (*MempoolTxVerifier) AdvanceTimeTx(*txs.AdvanceTimeTx) error { return errWrongTxType } -func (*MempoolTxVerifier) RewardValidatorTx(*txs.RewardValidatorTx) error { return errWrongTxType } +func (*MempoolTxVerifier) AdvanceTimeTx(*txs.AdvanceTimeTx) error { + return errWrongTxType +} + +func (*MempoolTxVerifier) RewardValidatorTx(*txs.RewardValidatorTx) error { + return errWrongTxType +} func (v *MempoolTxVerifier) AddValidatorTx(tx *txs.AddValidatorTx) error { - return v.proposalTx(tx) + return v.standardTx(tx) } func (v *MempoolTxVerifier) AddSubnetValidatorTx(tx *txs.AddSubnetValidatorTx) error { - return v.proposalTx(tx) + return v.standardTx(tx) } func (v *MempoolTxVerifier) AddDelegatorTx(tx *txs.AddDelegatorTx) error { - return v.proposalTx(tx) + return v.standardTx(tx) } func (v *MempoolTxVerifier) CreateChainTx(tx *txs.CreateChainTx) error { @@ -67,35 +74,16 @@ func (v *MempoolTxVerifier) AddPermissionlessDelegatorTx(tx *txs.AddPermissionle return v.standardTx(tx) } -// TODO: simplify this function after Banff is activated. -func (v *MempoolTxVerifier) proposalTx(tx txs.StakerTx) error { - startTime := tx.StartTime() - maxLocalStartTime := v.Clk.Time().Add(MaxFutureStartTime) - if startTime.After(maxLocalStartTime) { - return errFutureStakeTime - } - - onCommitState, err := state.NewDiff(v.ParentID, v.StateVersions) - if err != nil { - return err - } - - // Make sure that the Banff fork check will pass. - currentChainTime := onCommitState.GetTimestamp() - if v.Backend.Config.IsBanffActivated(currentChainTime) { - return v.standardTx(tx) - } - - onAbortState, err := state.NewDiff(v.ParentID, v.StateVersions) +func (v *MempoolTxVerifier) standardTx(tx txs.UnsignedTx) error { + baseState, err := v.standardBaseState() if err != nil { return err } - executor := ProposalTxExecutor{ - OnCommitState: onCommitState, - OnAbortState: onAbortState, - Backend: v.Backend, - Tx: v.Tx, + executor := StandardTxExecutor{ + Backend: v.Backend, + State: baseState, + Tx: v.Tx, } err = tx.Visit(&executor) // We ignore [errFutureStakeTime] here because the time will be advanced @@ -106,25 +94,52 @@ func (v *MempoolTxVerifier) proposalTx(tx txs.StakerTx) error { return err } -func (v *MempoolTxVerifier) standardTx(tx txs.UnsignedTx) error { - state, err := state.NewDiff( - v.ParentID, - v.StateVersions, - ) +// Upon Banff activation, txs are not verified against current chain time +// but against the block timestamp. [baseTime] calculates +// the right timestamp to be used to mempool tx verification +func (v *MempoolTxVerifier) standardBaseState() (state.Diff, error) { + state, err := state.NewDiff(v.ParentID, v.StateVersions) if err != nil { - return err + return nil, err } - executor := StandardTxExecutor{ - Backend: v.Backend, - State: state, - Tx: v.Tx, + nextBlkTime, err := v.nextBlockTime(state) + if err != nil { + return nil, err } - err = tx.Visit(&executor) - // We ignore [errFutureStakeTime] here because the time will be advanced - // when this transaction is issued. - if errors.Is(err, errFutureStakeTime) { - return nil + + if !v.Backend.Config.IsBanffActivated(nextBlkTime) { + // next tx would be included into an Apricot block + // so we verify it against current chain state + return state, nil } - return err + + // next tx would be included into a Banff block + // so we verify it against duly updated chain state + changes, err := AdvanceTimeTo(v.Backend, state, nextBlkTime) + if err != nil { + return nil, err + } + changes.Apply(state) + state.SetTimestamp(nextBlkTime) + + return state, nil +} + +func (v *MempoolTxVerifier) nextBlockTime(state state.Diff) (time.Time, error) { + var ( + parentTime = state.GetTimestamp() + nextBlkTime = v.Clk.Time() + ) + if parentTime.After(nextBlkTime) { + nextBlkTime = parentTime + } + nextStakerChangeTime, err := GetNextStakerChangeTime(state) + if err != nil { + return time.Time{}, fmt.Errorf("could not calculate next staker change time: %w", err) + } + if !nextBlkTime.Before(nextStakerChangeTime) { + nextBlkTime = nextStakerChangeTime + } + return nextBlkTime, nil } diff --git a/avalanchego/vms/platformvm/txs/export_tx.go b/avalanchego/vms/platformvm/txs/export_tx.go index e05177ac..b124263a 100644 --- a/avalanchego/vms/platformvm/txs/export_tx.go +++ b/avalanchego/vms/platformvm/txs/export_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -15,7 +15,7 @@ import ( ) var ( - _ UnsignedTx = &ExportTx{} + _ UnsignedTx = (*ExportTx)(nil) ErrWrongLocktime = errors.New("wrong locktime reported") errNoExportOutputs = errors.New("no export outputs") diff --git a/avalanchego/vms/platformvm/txs/import_tx.go b/avalanchego/vms/platformvm/txs/import_tx.go index 45cff351..70a79edf 100644 --- a/avalanchego/vms/platformvm/txs/import_tx.go +++ b/avalanchego/vms/platformvm/txs/import_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -9,12 +9,14 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) var ( - _ UnsignedTx = &ImportTx{} + _ UnsignedTx = (*ImportTx)(nil) errNoImportInputs = errors.New("tx has no imported inputs") ) @@ -41,15 +43,15 @@ func (tx *ImportTx) InitCtx(ctx *snow.Context) { } // InputUTXOs returns the UTXOIDs of the imported funds -func (tx *ImportTx) InputUTXOs() ids.Set { - set := ids.NewSet(len(tx.ImportedInputs)) +func (tx *ImportTx) InputUTXOs() set.Set[ids.ID] { + set := set.NewSet[ids.ID](len(tx.ImportedInputs)) for _, in := range tx.ImportedInputs { set.Add(in.InputID()) } return set } -func (tx *ImportTx) InputIDs() ids.Set { +func (tx *ImportTx) InputIDs() set.Set[ids.ID] { inputs := tx.BaseTx.InputIDs() atomicInputs := tx.InputUTXOs() inputs.Union(atomicInputs) @@ -76,7 +78,7 @@ func (tx *ImportTx) SyntacticVerify(ctx *snow.Context) error { return fmt.Errorf("input failed verification: %w", err) } } - if !avax.IsSortedAndUniqueTransferableInputs(tx.ImportedInputs) { + if !utils.IsSortedAndUniqueSortable(tx.ImportedInputs) { return errInputsNotSortedUnique } diff --git a/avalanchego/vms/platformvm/txs/mempool/issuer.go b/avalanchego/vms/platformvm/txs/mempool/issuer.go index 0a7d8177..aa5e5c70 100644 --- a/avalanchego/vms/platformvm/txs/mempool/issuer.go +++ b/avalanchego/vms/platformvm/txs/mempool/issuer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package mempool @@ -10,7 +10,7 @@ import ( ) var ( - _ txs.Visitor = &issuer{} + _ txs.Visitor = (*issuer)(nil) errCantIssueAdvanceTimeTx = errors.New("can not issue an advance time tx") errCantIssueRewardValidatorTx = errors.New("can not issue a reward validator tx") @@ -21,11 +21,11 @@ type issuer struct { tx *txs.Tx } -func (i *issuer) AdvanceTimeTx(tx *txs.AdvanceTimeTx) error { +func (*issuer) AdvanceTimeTx(*txs.AdvanceTimeTx) error { return errCantIssueAdvanceTimeTx } -func (i *issuer) RewardValidatorTx(tx *txs.RewardValidatorTx) error { +func (*issuer) RewardValidatorTx(*txs.RewardValidatorTx) error { return errCantIssueRewardValidatorTx } @@ -34,52 +34,52 @@ func (i *issuer) AddValidatorTx(*txs.AddValidatorTx) error { return nil } -func (i *issuer) AddSubnetValidatorTx(tx *txs.AddSubnetValidatorTx) error { +func (i *issuer) AddSubnetValidatorTx(*txs.AddSubnetValidatorTx) error { i.m.addStakerTx(i.tx) return nil } -func (i *issuer) AddDelegatorTx(tx *txs.AddDelegatorTx) error { +func (i *issuer) AddDelegatorTx(*txs.AddDelegatorTx) error { i.m.addStakerTx(i.tx) return nil } -func (i *issuer) RemoveSubnetValidatorTx(tx *txs.RemoveSubnetValidatorTx) error { +func (i *issuer) RemoveSubnetValidatorTx(*txs.RemoveSubnetValidatorTx) error { i.m.addDecisionTx(i.tx) return nil } -func (i *issuer) CreateChainTx(tx *txs.CreateChainTx) error { +func (i *issuer) CreateChainTx(*txs.CreateChainTx) error { i.m.addDecisionTx(i.tx) return nil } -func (i *issuer) CreateSubnetTx(tx *txs.CreateSubnetTx) error { +func (i *issuer) CreateSubnetTx(*txs.CreateSubnetTx) error { i.m.addDecisionTx(i.tx) return nil } -func (i *issuer) ImportTx(tx *txs.ImportTx) error { +func (i *issuer) ImportTx(*txs.ImportTx) error { i.m.addDecisionTx(i.tx) return nil } -func (i *issuer) ExportTx(tx *txs.ExportTx) error { +func (i *issuer) ExportTx(*txs.ExportTx) error { i.m.addDecisionTx(i.tx) return nil } -func (i *issuer) TransformSubnetTx(tx *txs.TransformSubnetTx) error { +func (i *issuer) TransformSubnetTx(*txs.TransformSubnetTx) error { i.m.addDecisionTx(i.tx) return nil } -func (i *issuer) AddPermissionlessValidatorTx(tx *txs.AddPermissionlessValidatorTx) error { +func (i *issuer) AddPermissionlessValidatorTx(*txs.AddPermissionlessValidatorTx) error { i.m.addStakerTx(i.tx) return nil } -func (i *issuer) AddPermissionlessDelegatorTx(tx *txs.AddPermissionlessDelegatorTx) error { +func (i *issuer) AddPermissionlessDelegatorTx(*txs.AddPermissionlessDelegatorTx) error { i.m.addStakerTx(i.tx) return nil } diff --git a/avalanchego/vms/platformvm/txs/mempool/mempool.go b/avalanchego/vms/platformvm/txs/mempool/mempool.go index c7787af0..cec810fe 100644 --- a/avalanchego/vms/platformvm/txs/mempool/mempool.go +++ b/avalanchego/vms/platformvm/txs/mempool/mempool.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package mempool @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/txs/txheap" @@ -31,7 +32,7 @@ const ( ) var ( - _ Mempool = &mempool{} + _ Mempool = (*mempool)(nil) errMempoolFull = errors.New("mempool is full") ) @@ -60,7 +61,6 @@ type Mempool interface { HasTxs() bool // PeekTxs returns the next txs for Banff blocks // up to maxTxsBytes without removing them from the mempool. - // It returns nil if !HasTxs() PeekTxs(maxTxsBytes int) []*txs.Tx HasStakerTx() bool @@ -73,18 +73,8 @@ type Mempool interface { // not evicted from unissued decision/staker txs. // This allows previously dropped txs to be possibly // reissued. - MarkDropped(txID ids.ID, reason string) - GetDropReason(txID ids.ID) (string, bool) - - // TODO: following Banff, these methods can be removed - - // Pre Banff activation, decision transactions are included into - // standard blocks. - HasApricotDecisionTxs() bool - // PeekApricotDecisionTxs returns the next decisionTxs, up to maxTxsBytes, - // without removing them from the mempool. - // It returns nil if !HasApricotDecisionTxs() - PeekApricotDecisionTxs(maxTxsBytes int) []*txs.Tx + MarkDropped(txID ids.ID, reason error) + GetDropReason(txID ids.ID) error } // Transactions from clients that have not yet been put into blocks and added to @@ -100,10 +90,10 @@ type mempool struct { unissuedStakerTxs txheap.Heap // Key: Tx ID - // Value: String repr. of the verification error - droppedTxIDs *cache.LRU + // Value: Verification error + droppedTxIDs *cache.LRU[ids.ID, error] - consumedUTXOs ids.Set + consumedUTXOs set.Set[ids.ID] blkTimer BlockTimer } @@ -146,15 +136,20 @@ func NewMempool( bytesAvailable: maxMempoolSize, unissuedDecisionTxs: unissuedDecisionTxs, unissuedStakerTxs: unissuedStakerTxs, - droppedTxIDs: &cache.LRU{Size: droppedTxIDsCacheSize}, - consumedUTXOs: ids.NewSet(initialConsumedUTXOsSize), + droppedTxIDs: &cache.LRU[ids.ID, error]{Size: droppedTxIDsCacheSize}, + consumedUTXOs: set.NewSet[ids.ID](initialConsumedUTXOsSize), dropIncoming: false, // enable tx adding by default blkTimer: blkTimer, }, nil } -func (m *mempool) EnableAdding() { m.dropIncoming = false } -func (m *mempool) DisableAdding() { m.dropIncoming = true } +func (m *mempool) EnableAdding() { + m.dropIncoming = false +} + +func (m *mempool) DisableAdding() { + m.dropIncoming = true +} func (m *mempool) Add(tx *txs.Tx) error { if m.dropIncoming { @@ -229,16 +224,16 @@ func (m *mempool) HasTxs() bool { } func (m *mempool) PeekTxs(maxTxsBytes int) []*txs.Tx { - txs, size := m.peekApricotDecisionTxs(maxTxsBytes) + txs := m.unissuedDecisionTxs.List() + txs = append(txs, m.unissuedStakerTxs.List()...) - for _, tx := range m.unissuedStakerTxs.List() { + size := 0 + for i, tx := range txs { size += len(tx.Bytes()) if size > maxTxsBytes { - break + return txs[:i] } - txs = append(txs, tx) } - return txs } @@ -252,9 +247,9 @@ func (m *mempool) addStakerTx(tx *txs.Tx) { m.register(tx) } -func (m *mempool) HasApricotDecisionTxs() bool { return m.unissuedDecisionTxs.Len() > 0 } - -func (m *mempool) HasStakerTx() bool { return m.unissuedStakerTxs.Len() > 0 } +func (m *mempool) HasStakerTx() bool { + return m.unissuedStakerTxs.Len() > 0 +} func (m *mempool) removeDecisionTxs(txs []*txs.Tx) { for _, tx := range txs { @@ -272,27 +267,6 @@ func (m *mempool) removeStakerTx(tx *txs.Tx) { } } -func (m *mempool) PeekApricotDecisionTxs(maxTxsBytes int) []*txs.Tx { - txs, _ := m.peekApricotDecisionTxs(maxTxsBytes) - return txs -} - -func (m *mempool) peekApricotDecisionTxs(maxTxsBytes int) ([]*txs.Tx, int) { - list := m.unissuedDecisionTxs.List() - - totalBytes, txsToKeep := 0, 0 - for _, tx := range list { - totalBytes += len(tx.Bytes()) - if totalBytes > maxTxsBytes { - break - } - txsToKeep++ - } - - list = list[:txsToKeep] - return list, totalBytes -} - func (m *mempool) PeekStakerTx() *txs.Tx { if m.unissuedStakerTxs.Len() == 0 { return nil @@ -301,16 +275,13 @@ func (m *mempool) PeekStakerTx() *txs.Tx { return m.unissuedStakerTxs.Peek() } -func (m *mempool) MarkDropped(txID ids.ID, reason string) { +func (m *mempool) MarkDropped(txID ids.ID, reason error) { m.droppedTxIDs.Put(txID, reason) } -func (m *mempool) GetDropReason(txID ids.ID) (string, bool) { - reason, exist := m.droppedTxIDs.Get(txID) - if !exist { - return "", false - } - return reason.(string), true +func (m *mempool) GetDropReason(txID ids.ID) error { + err, _ := m.droppedTxIDs.Get(txID) + return err } func (m *mempool) register(tx *txs.Tx) { diff --git a/avalanchego/vms/platformvm/txs/mempool/mempool_test.go b/avalanchego/vms/platformvm/txs/mempool/mempool_test.go index 931f6ece..ba25fac2 100644 --- a/avalanchego/vms/platformvm/txs/mempool/mempool_test.go +++ b/avalanchego/vms/platformvm/txs/mempool/mempool_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package mempool @@ -14,21 +14,20 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -var _ BlockTimer = &noopBlkTimer{} +var _ BlockTimer = (*noopBlkTimer)(nil) type noopBlkTimer struct{} -func (bt *noopBlkTimer) ResetBlockTimer() {} +func (*noopBlkTimer) ResetBlockTimer() {} -var preFundedKeys = crypto.BuildTestKeys() +var preFundedKeys = secp256k1.TestKeys() // shows that valid tx is not added to mempool if this would exceed its maximum // size @@ -67,7 +66,7 @@ func TestDecisionTxsInMempool(t *testing.T) { require.NoError(err) // txs must not already there before we start - require.False(mpool.HasApricotDecisionTxs()) + require.False(mpool.HasTxs()) for _, tx := range decisionTxs { // tx not already there @@ -84,7 +83,7 @@ func TestDecisionTxsInMempool(t *testing.T) { require.Equal(tx, retrieved) // we can peek it - peeked := mpool.PeekApricotDecisionTxs(math.MaxInt) + peeked := mpool.PeekTxs(math.MaxInt) // tx will be among those peeked, // in NO PARTICULAR ORDER @@ -226,7 +225,7 @@ func createTestProposalTxs(count int) ([]*txs.Tx, error) { for i := 0; i < count; i++ { utx := &txs.AddValidatorTx{ BaseTx: txs.BaseTx{}, - Validator: validator.Validator{ + Validator: txs.Validator{ Start: uint64(clk.Time().Add(time.Duration(count-i) * time.Second).Unix()), }, StakeOuts: nil, diff --git a/avalanchego/vms/platformvm/txs/mempool/mock_mempool.go b/avalanchego/vms/platformvm/txs/mempool/mock_mempool.go index 6ed0eefd..b4ce5e4b 100644 --- a/avalanchego/vms/platformvm/txs/mempool/mock_mempool.go +++ b/avalanchego/vms/platformvm/txs/mempool/mock_mempool.go @@ -1,3 +1,6 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool (interfaces: Mempool) @@ -88,12 +91,11 @@ func (mr *MockMempoolMockRecorder) Get(arg0 interface{}) *gomock.Call { } // GetDropReason mocks base method. -func (m *MockMempool) GetDropReason(arg0 ids.ID) (string, bool) { +func (m *MockMempool) GetDropReason(arg0 ids.ID) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetDropReason", arg0) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(bool) - return ret0, ret1 + ret0, _ := ret[0].(error) + return ret0 } // GetDropReason indicates an expected call of GetDropReason. @@ -116,20 +118,6 @@ func (mr *MockMempoolMockRecorder) Has(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Has", reflect.TypeOf((*MockMempool)(nil).Has), arg0) } -// HasApricotDecisionTxs mocks base method. -func (m *MockMempool) HasApricotDecisionTxs() bool { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HasApricotDecisionTxs") - ret0, _ := ret[0].(bool) - return ret0 -} - -// HasApricotDecisionTxs indicates an expected call of HasApricotDecisionTxs. -func (mr *MockMempoolMockRecorder) HasApricotDecisionTxs() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasApricotDecisionTxs", reflect.TypeOf((*MockMempool)(nil).HasApricotDecisionTxs)) -} - // HasStakerTx mocks base method. func (m *MockMempool) HasStakerTx() bool { m.ctrl.T.Helper() @@ -159,7 +147,7 @@ func (mr *MockMempoolMockRecorder) HasTxs() *gomock.Call { } // MarkDropped mocks base method. -func (m *MockMempool) MarkDropped(arg0 ids.ID, arg1 string) { +func (m *MockMempool) MarkDropped(arg0 ids.ID, arg1 error) { m.ctrl.T.Helper() m.ctrl.Call(m, "MarkDropped", arg0, arg1) } @@ -170,20 +158,6 @@ func (mr *MockMempoolMockRecorder) MarkDropped(arg0, arg1 interface{}) *gomock.C return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MarkDropped", reflect.TypeOf((*MockMempool)(nil).MarkDropped), arg0, arg1) } -// PeekApricotDecisionTxs mocks base method. -func (m *MockMempool) PeekApricotDecisionTxs(arg0 int) []*txs.Tx { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PeekApricotDecisionTxs", arg0) - ret0, _ := ret[0].([]*txs.Tx) - return ret0 -} - -// PeekApricotDecisionTxs indicates an expected call of PeekApricotDecisionTxs. -func (mr *MockMempoolMockRecorder) PeekApricotDecisionTxs(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PeekApricotDecisionTxs", reflect.TypeOf((*MockMempool)(nil).PeekApricotDecisionTxs), arg0) -} - // PeekStakerTx mocks base method. func (m *MockMempool) PeekStakerTx() *txs.Tx { m.ctrl.T.Helper() diff --git a/avalanchego/vms/platformvm/txs/mempool/remover.go b/avalanchego/vms/platformvm/txs/mempool/remover.go index decdd712..fcdeca38 100644 --- a/avalanchego/vms/platformvm/txs/mempool/remover.go +++ b/avalanchego/vms/platformvm/txs/mempool/remover.go @@ -1,11 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package mempool import "github.com/ava-labs/avalanchego/vms/platformvm/txs" -var _ txs.Visitor = &remover{} +var _ txs.Visitor = (*remover)(nil) type remover struct { m *mempool @@ -27,7 +27,7 @@ func (r *remover) AddDelegatorTx(*txs.AddDelegatorTx) error { return nil } -func (r *remover) RemoveSubnetValidatorTx(tx *txs.RemoveSubnetValidatorTx) error { +func (r *remover) RemoveSubnetValidatorTx(*txs.RemoveSubnetValidatorTx) error { r.m.removeDecisionTxs([]*txs.Tx{r.tx}) return nil } @@ -52,27 +52,27 @@ func (r *remover) ExportTx(*txs.ExportTx) error { return nil } -func (r *remover) TransformSubnetTx(tx *txs.TransformSubnetTx) error { +func (r *remover) TransformSubnetTx(*txs.TransformSubnetTx) error { r.m.removeDecisionTxs([]*txs.Tx{r.tx}) return nil } -func (r *remover) AddPermissionlessValidatorTx(tx *txs.AddPermissionlessValidatorTx) error { +func (r *remover) AddPermissionlessValidatorTx(*txs.AddPermissionlessValidatorTx) error { r.m.removeStakerTx(r.tx) return nil } -func (r *remover) AddPermissionlessDelegatorTx(tx *txs.AddPermissionlessDelegatorTx) error { +func (r *remover) AddPermissionlessDelegatorTx(*txs.AddPermissionlessDelegatorTx) error { r.m.removeStakerTx(r.tx) return nil } -func (r *remover) AdvanceTimeTx(*txs.AdvanceTimeTx) error { +func (*remover) AdvanceTimeTx(*txs.AdvanceTimeTx) error { // this tx is never in mempool return nil } -func (r *remover) RewardValidatorTx(*txs.RewardValidatorTx) error { +func (*remover) RewardValidatorTx(*txs.RewardValidatorTx) error { // this tx is never in mempool return nil } diff --git a/avalanchego/vms/platformvm/txs/mock_staker.go b/avalanchego/vms/platformvm/txs/mock_staker.go index 4e9f5307..0fbaea26 100644 --- a/avalanchego/vms/platformvm/txs/mock_staker.go +++ b/avalanchego/vms/platformvm/txs/mock_staker.go @@ -1,3 +1,6 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/platformvm/txs (interfaces: Staker) @@ -9,6 +12,7 @@ import ( time "time" ids "github.com/ava-labs/avalanchego/ids" + bls "github.com/ava-labs/avalanchego/utils/crypto/bls" gomock "github.com/golang/mock/gomock" ) @@ -91,6 +95,22 @@ func (mr *MockStakerMockRecorder) PendingPriority() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PendingPriority", reflect.TypeOf((*MockStaker)(nil).PendingPriority)) } +// PublicKey mocks base method. +func (m *MockStaker) PublicKey() (*bls.PublicKey, bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PublicKey") + ret0, _ := ret[0].(*bls.PublicKey) + ret1, _ := ret[1].(bool) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// PublicKey indicates an expected call of PublicKey. +func (mr *MockStakerMockRecorder) PublicKey() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublicKey", reflect.TypeOf((*MockStaker)(nil).PublicKey)) +} + // StartTime mocks base method. func (m *MockStaker) StartTime() time.Time { m.ctrl.T.Helper() diff --git a/avalanchego/vms/platformvm/txs/mock_unsigned_tx.go b/avalanchego/vms/platformvm/txs/mock_unsigned_tx.go index 8c4c22c9..8d95b09b 100644 --- a/avalanchego/vms/platformvm/txs/mock_unsigned_tx.go +++ b/avalanchego/vms/platformvm/txs/mock_unsigned_tx.go @@ -1,3 +1,6 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/platformvm/txs (interfaces: UnsignedTx) @@ -5,37 +8,39 @@ package txs import ( + reflect "reflect" + ids "github.com/ava-labs/avalanchego/ids" snow "github.com/ava-labs/avalanchego/snow" + set "github.com/ava-labs/avalanchego/utils/set" avax "github.com/ava-labs/avalanchego/vms/components/avax" gomock "github.com/golang/mock/gomock" - reflect "reflect" ) -// MockUnsignedTx is a mock of UnsignedTx interface +// MockUnsignedTx is a mock of UnsignedTx interface. type MockUnsignedTx struct { ctrl *gomock.Controller recorder *MockUnsignedTxMockRecorder } -// MockUnsignedTxMockRecorder is the mock recorder for MockUnsignedTx +// MockUnsignedTxMockRecorder is the mock recorder for MockUnsignedTx. type MockUnsignedTxMockRecorder struct { mock *MockUnsignedTx } -// NewMockUnsignedTx creates a new mock instance +// NewMockUnsignedTx creates a new mock instance. func NewMockUnsignedTx(ctrl *gomock.Controller) *MockUnsignedTx { mock := &MockUnsignedTx{ctrl: ctrl} mock.recorder = &MockUnsignedTxMockRecorder{mock} return mock } -// EXPECT returns an object that allows the caller to indicate expected use +// EXPECT returns an object that allows the caller to indicate expected use. func (m *MockUnsignedTx) EXPECT() *MockUnsignedTxMockRecorder { return m.recorder } -// Bytes mocks base method +// Bytes mocks base method. func (m *MockUnsignedTx) Bytes() []byte { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Bytes") @@ -43,51 +48,39 @@ func (m *MockUnsignedTx) Bytes() []byte { return ret0 } -// Bytes indicates an expected call of Bytes +// Bytes indicates an expected call of Bytes. func (mr *MockUnsignedTxMockRecorder) Bytes() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bytes", reflect.TypeOf((*MockUnsignedTx)(nil).Bytes)) } -// InitCtx mocks base method +// InitCtx mocks base method. func (m *MockUnsignedTx) InitCtx(arg0 *snow.Context) { m.ctrl.T.Helper() m.ctrl.Call(m, "InitCtx", arg0) } -// InitCtx indicates an expected call of InitCtx +// InitCtx indicates an expected call of InitCtx. func (mr *MockUnsignedTxMockRecorder) InitCtx(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InitCtx", reflect.TypeOf((*MockUnsignedTx)(nil).InitCtx), arg0) } -// Initialize mocks base method -func (m *MockUnsignedTx) Initialize(arg0 []byte) { - m.ctrl.T.Helper() - m.ctrl.Call(m, "Initialize", arg0) -} - -// Initialize indicates an expected call of Initialize -func (mr *MockUnsignedTxMockRecorder) Initialize(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Initialize", reflect.TypeOf((*MockUnsignedTx)(nil).Initialize), arg0) -} - -// InputIDs mocks base method -func (m *MockUnsignedTx) InputIDs() ids.Set { +// InputIDs mocks base method. +func (m *MockUnsignedTx) InputIDs() set.Set[ids.ID] { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "InputIDs") - ret0, _ := ret[0].(ids.Set) + ret0, _ := ret[0].(set.Set[ids.ID]) return ret0 } -// InputIDs indicates an expected call of InputIDs +// InputIDs indicates an expected call of InputIDs. func (mr *MockUnsignedTxMockRecorder) InputIDs() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "InputIDs", reflect.TypeOf((*MockUnsignedTx)(nil).InputIDs)) } -// Outputs mocks base method +// Outputs mocks base method. func (m *MockUnsignedTx) Outputs() []*avax.TransferableOutput { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Outputs") @@ -95,13 +88,25 @@ func (m *MockUnsignedTx) Outputs() []*avax.TransferableOutput { return ret0 } -// Outputs indicates an expected call of Outputs +// Outputs indicates an expected call of Outputs. func (mr *MockUnsignedTxMockRecorder) Outputs() *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Outputs", reflect.TypeOf((*MockUnsignedTx)(nil).Outputs)) } -// SyntacticVerify mocks base method +// SetBytes mocks base method. +func (m *MockUnsignedTx) SetBytes(arg0 []byte) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "SetBytes", arg0) +} + +// SetBytes indicates an expected call of SetBytes. +func (mr *MockUnsignedTxMockRecorder) SetBytes(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetBytes", reflect.TypeOf((*MockUnsignedTx)(nil).SetBytes), arg0) +} + +// SyntacticVerify mocks base method. func (m *MockUnsignedTx) SyntacticVerify(arg0 *snow.Context) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "SyntacticVerify", arg0) @@ -109,13 +114,13 @@ func (m *MockUnsignedTx) SyntacticVerify(arg0 *snow.Context) error { return ret0 } -// SyntacticVerify indicates an expected call of SyntacticVerify +// SyntacticVerify indicates an expected call of SyntacticVerify. func (mr *MockUnsignedTxMockRecorder) SyntacticVerify(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyntacticVerify", reflect.TypeOf((*MockUnsignedTx)(nil).SyntacticVerify), arg0) } -// Visit mocks base method +// Visit mocks base method. func (m *MockUnsignedTx) Visit(arg0 Visitor) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "Visit", arg0) @@ -123,7 +128,7 @@ func (m *MockUnsignedTx) Visit(arg0 Visitor) error { return ret0 } -// Visit indicates an expected call of Visit +// Visit indicates an expected call of Visit. func (mr *MockUnsignedTxMockRecorder) Visit(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Visit", reflect.TypeOf((*MockUnsignedTx)(nil).Visit), arg0) diff --git a/avalanchego/vms/platformvm/txs/priorities.go b/avalanchego/vms/platformvm/txs/priorities.go index 2e7e0161..fdd65d74 100644 --- a/avalanchego/vms/platformvm/txs/priorities.go +++ b/avalanchego/vms/platformvm/txs/priorities.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/avalanchego/vms/platformvm/txs/remove_subnet_validator_tx.go b/avalanchego/vms/platformvm/txs/remove_subnet_validator_tx.go index d2d829fb..fc6126aa 100644 --- a/avalanchego/vms/platformvm/txs/remove_subnet_validator_tx.go +++ b/avalanchego/vms/platformvm/txs/remove_subnet_validator_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -13,7 +13,7 @@ import ( ) var ( - _ UnsignedTx = &RemoveSubnetValidatorTx{} + _ UnsignedTx = (*RemoveSubnetValidatorTx)(nil) errRemovePrimaryNetworkValidator = errors.New("can't remove primary network validator with RemoveSubnetValidatorTx") ) diff --git a/avalanchego/vms/platformvm/txs/remove_subnet_validator_tx_test.go b/avalanchego/vms/platformvm/txs/remove_subnet_validator_tx_test.go index 5fdb9f25..6ad782af 100644 --- a/avalanchego/vms/platformvm/txs/remove_subnet_validator_tx_test.go +++ b/avalanchego/vms/platformvm/txs/remove_subnet_validator_tx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -18,6 +18,8 @@ import ( "github.com/ava-labs/avalanchego/vms/components/verify" ) +var errInvalidSubnetAuth = errors.New("invalid subnet auth") + func TestRemoveSubnetValidatorTxSyntacticVerify(t *testing.T) { type test struct { name string @@ -29,9 +31,8 @@ func TestRemoveSubnetValidatorTxSyntacticVerify(t *testing.T) { } var ( - networkID = uint32(1337) - chainID = ids.GenerateTestID() - errInvalidSubnetAuth = errors.New("invalid subnet auth") + networkID = uint32(1337) + chainID = ids.GenerateTestID() ) ctx := &snow.Context{ @@ -65,8 +66,10 @@ func TestRemoveSubnetValidatorTxSyntacticVerify(t *testing.T) { tests := []test{ { - name: "nil tx", - txFunc: func(*gomock.Controller) *RemoveSubnetValidatorTx { return nil }, + name: "nil tx", + txFunc: func(*gomock.Controller) *RemoveSubnetValidatorTx { + return nil + }, shouldErr: true, }, { diff --git a/avalanchego/vms/platformvm/txs/reward_validator_tx.go b/avalanchego/vms/platformvm/txs/reward_validator_tx.go index 1c03bcb6..d4b579f1 100644 --- a/avalanchego/vms/platformvm/txs/reward_validator_tx.go +++ b/avalanchego/vms/platformvm/txs/reward_validator_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -6,10 +6,11 @@ package txs import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" ) -var _ UnsignedTx = &RewardValidatorTx{} +var _ UnsignedTx = (*RewardValidatorTx)(nil) // RewardValidatorTx is a transaction that represents a proposal to // remove a validator that is currently validating from the validator set. @@ -31,14 +32,27 @@ type RewardValidatorTx struct { unsignedBytes []byte // Unsigned byte representation of this data } -func (tx *RewardValidatorTx) Initialize(unsignedBytes []byte) { +func (tx *RewardValidatorTx) SetBytes(unsignedBytes []byte) { tx.unsignedBytes = unsignedBytes } -func (tx *RewardValidatorTx) InitCtx(*snow.Context) {} -func (tx *RewardValidatorTx) Bytes() []byte { return tx.unsignedBytes } -func (tx *RewardValidatorTx) InputIDs() ids.Set { return nil } -func (tx *RewardValidatorTx) Outputs() []*avax.TransferableOutput { return nil } -func (tx *RewardValidatorTx) SyntacticVerify(*snow.Context) error { return nil } + +func (*RewardValidatorTx) InitCtx(*snow.Context) {} + +func (tx *RewardValidatorTx) Bytes() []byte { + return tx.unsignedBytes +} + +func (*RewardValidatorTx) InputIDs() set.Set[ids.ID] { + return nil +} + +func (*RewardValidatorTx) Outputs() []*avax.TransferableOutput { + return nil +} + +func (*RewardValidatorTx) SyntacticVerify(*snow.Context) error { + return nil +} func (tx *RewardValidatorTx) Visit(visitor Visitor) error { return visitor.RewardValidatorTx(tx) diff --git a/avalanchego/vms/platformvm/txs/staker_tx.go b/avalanchego/vms/platformvm/txs/staker_tx.go index 7039a42d..049d3519 100644 --- a/avalanchego/vms/platformvm/txs/staker_tx.go +++ b/avalanchego/vms/platformvm/txs/staker_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -7,6 +7,7 @@ import ( "time" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/fx" ) @@ -15,20 +16,6 @@ import ( // delegation. type ValidatorTx interface { UnsignedTx - Validator -} - -type DelegatorTx interface { - UnsignedTx - Delegator -} - -type StakerTx interface { - UnsignedTx - Staker -} - -type Validator interface { PermissionlessStaker ValidationRewardsOwner() fx.Owner @@ -36,12 +23,18 @@ type Validator interface { Shares() uint32 } -type Delegator interface { +type DelegatorTx interface { + UnsignedTx PermissionlessStaker RewardsOwner() fx.Owner } +type StakerTx interface { + UnsignedTx + Staker +} + type PermissionlessStaker interface { Staker @@ -52,6 +45,9 @@ type PermissionlessStaker interface { type Staker interface { SubnetID() ids.ID NodeID() ids.NodeID + // PublicKey returns the BLS public key registered by this transaction. If + // there was no key registered by this transaction, it will return false. + PublicKey() (*bls.PublicKey, bool, error) StartTime() time.Time EndTime() time.Time Weight() uint64 diff --git a/avalanchego/vms/platformvm/validator/subnet_validator.go b/avalanchego/vms/platformvm/txs/subnet_validator.go similarity index 81% rename from avalanchego/vms/platformvm/validator/subnet_validator.go rename to avalanchego/vms/platformvm/txs/subnet_validator.go index 88045d1b..d9da9d31 100644 --- a/avalanchego/vms/platformvm/validator/subnet_validator.go +++ b/avalanchego/vms/platformvm/txs/subnet_validator.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package validator +package txs import ( "github.com/ava-labs/avalanchego/ids" @@ -17,7 +17,9 @@ type SubnetValidator struct { } // SubnetID is the ID of the subnet this validator is validating -func (v *SubnetValidator) SubnetID() ids.ID { return v.Subnet } +func (v *SubnetValidator) SubnetID() ids.ID { + return v.Subnet +} // Verify this validator is valid func (v *SubnetValidator) Verify() error { diff --git a/avalanchego/vms/platformvm/txs/subnet_validator_test.go b/avalanchego/vms/platformvm/txs/subnet_validator_test.go new file mode 100644 index 00000000..a38e2d8e --- /dev/null +++ b/avalanchego/vms/platformvm/txs/subnet_validator_test.go @@ -0,0 +1,38 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package txs + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/constants" +) + +func TestSubnetValidatorVerifySubnetID(t *testing.T) { + require := require.New(t) + + // Error path + { + vdr := &SubnetValidator{ + Subnet: constants.PrimaryNetworkID, + } + + require.ErrorIs(vdr.Verify(), errBadSubnetID) + } + + // Happy path + { + vdr := &SubnetValidator{ + Subnet: ids.GenerateTestID(), + Validator: Validator{ + Wght: 1, + }, + } + + require.NoError(vdr.Verify()) + } +} diff --git a/avalanchego/vms/platformvm/txs/transform_subnet_tx.go b/avalanchego/vms/platformvm/txs/transform_subnet_tx.go index 40e560b8..f540ea67 100644 --- a/avalanchego/vms/platformvm/txs/transform_subnet_tx.go +++ b/avalanchego/vms/platformvm/txs/transform_subnet_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -15,7 +15,7 @@ import ( ) var ( - _ UnsignedTx = &TransformSubnetTx{} + _ UnsignedTx = (*TransformSubnetTx)(nil) errCantTransformPrimaryNetwork = errors.New("cannot transform primary network") errEmptyAssetID = errors.New("empty asset ID is not valid") diff --git a/avalanchego/vms/platformvm/txs/transform_subnet_tx_test.go b/avalanchego/vms/platformvm/txs/transform_subnet_tx_test.go index 1a1abd01..82128bfc 100644 --- a/avalanchego/vms/platformvm/txs/transform_subnet_tx_test.go +++ b/avalanchego/vms/platformvm/txs/transform_subnet_tx_test.go @@ -1,10 +1,9 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs import ( - "errors" "testing" "github.com/golang/mock/gomock" @@ -27,9 +26,8 @@ func TestTransformSubnetTxSyntacticVerify(t *testing.T) { } var ( - networkID = uint32(1337) - chainID = ids.GenerateTestID() - errInvalidSubnetAuth = errors.New("invalid subnet auth") + networkID = uint32(1337) + chainID = ids.GenerateTestID() ) ctx := &snow.Context{ @@ -405,18 +403,16 @@ func TestTransformSubnetTxSyntacticVerify(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require := require.New(t) ctrl := gomock.NewController(t) defer ctrl.Finish() tx := tt.txFunc(ctrl) err := tx.SyntacticVerify(ctx) - require.ErrorIs(err, tt.err) + require.ErrorIs(t, err, tt.err) }) } t.Run("invalid BaseTx", func(t *testing.T) { - require := require.New(t) tx := &TransformSubnetTx{ BaseTx: invalidBaseTx, Subnet: ids.GenerateTestID(), @@ -435,6 +431,6 @@ func TestTransformSubnetTxSyntacticVerify(t *testing.T) { UptimeRequirement: reward.PercentDenominator, } err := tx.SyntacticVerify(ctx) - require.Error(err) + require.Error(t, err) }) } diff --git a/avalanchego/vms/platformvm/txs/tx.go b/avalanchego/vms/platformvm/txs/tx.go index fac73c71..a6df33b8 100644 --- a/avalanchego/vms/platformvm/txs/tx.go +++ b/avalanchego/vms/platformvm/txs/tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -10,7 +10,7 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" @@ -38,37 +38,60 @@ type Tx struct { func NewSigned( unsigned UnsignedTx, c codec.Manager, - signers [][]*crypto.PrivateKeySECP256K1R, + signers [][]*secp256k1.PrivateKey, ) (*Tx, error) { res := &Tx{Unsigned: unsigned} return res, res.Sign(c, signers) } +func (tx *Tx) Initialize(c codec.Manager) error { + signedBytes, err := c.Marshal(Version, tx) + if err != nil { + return fmt.Errorf("couldn't marshal ProposalTx: %w", err) + } + + unsignedBytesLen, err := c.Size(Version, &tx.Unsigned) + if err != nil { + return fmt.Errorf("couldn't calculate UnsignedTx marshal length: %w", err) + } + + unsignedBytes := signedBytes[:unsignedBytesLen] + tx.SetBytes(unsignedBytes, signedBytes) + return nil +} + +func (tx *Tx) SetBytes(unsignedBytes, signedBytes []byte) { + tx.Unsigned.SetBytes(unsignedBytes) + tx.bytes = signedBytes + tx.id = hashing.ComputeHash256Array(signedBytes) +} + // Parse signed tx starting from its byte representation. // Note: We explicitly pass the codec in Parse since we may need to parse -// P-Chain genesis txs whose length exceed the max length of txs.Codec. +// P-Chain genesis txs whose length exceed the max length of txs.Codec. func Parse(c codec.Manager, signedBytes []byte) (*Tx, error) { tx := &Tx{} if _, err := c.Unmarshal(signedBytes, tx); err != nil { return nil, fmt.Errorf("couldn't parse tx: %w", err) } - unsignedBytes, err := c.Marshal(Version, &tx.Unsigned) + + unsignedBytesLen, err := c.Size(Version, &tx.Unsigned) if err != nil { - return nil, fmt.Errorf("couldn't marshal UnsignedTx: %w", err) + return nil, fmt.Errorf("couldn't calculate UnsignedTx marshal length: %w", err) } - tx.Initialize(unsignedBytes, signedBytes) + + unsignedBytes := signedBytes[:unsignedBytesLen] + tx.SetBytes(unsignedBytes, signedBytes) return tx, nil } -func (tx *Tx) Initialize(unsignedBytes, signedBytes []byte) { - tx.Unsigned.Initialize(unsignedBytes) - - tx.bytes = signedBytes - tx.id = hashing.ComputeHash256Array(signedBytes) +func (tx *Tx) Bytes() []byte { + return tx.bytes } -func (tx *Tx) Bytes() []byte { return tx.bytes } -func (tx *Tx) ID() ids.ID { return tx.id } +func (tx *Tx) ID() ids.ID { + return tx.id +} // UTXOs returns the UTXOs transaction is producing. func (tx *Tx) UTXOs() []*avax.UTXO { @@ -100,8 +123,8 @@ func (tx *Tx) SyntacticVerify(ctx *snow.Context) error { // Sign this transaction with the provided signers // Note: We explicitly pass the codec in Sign since we may need to sign P-Chain -// genesis txs whose length exceed the max length of txs.Codec. -func (tx *Tx) Sign(c codec.Manager, signers [][]*crypto.PrivateKeySECP256K1R) error { +// genesis txs whose length exceed the max length of txs.Codec. +func (tx *Tx) Sign(c codec.Manager, signers [][]*secp256k1.PrivateKey) error { unsignedBytes, err := c.Marshal(Version, &tx.Unsigned) if err != nil { return fmt.Errorf("couldn't marshal UnsignedTx: %w", err) @@ -111,7 +134,7 @@ func (tx *Tx) Sign(c codec.Manager, signers [][]*crypto.PrivateKeySECP256K1R) er hash := hashing.ComputeHash256(unsignedBytes) for _, keys := range signers { cred := &secp256k1fx.Credential{ - Sigs: make([][crypto.SECP256K1RSigLen]byte, len(keys)), + Sigs: make([][secp256k1.SignatureLen]byte, len(keys)), } for i, key := range keys { sig, err := key.SignHash(hash) // Sign hash @@ -127,6 +150,6 @@ func (tx *Tx) Sign(c codec.Manager, signers [][]*crypto.PrivateKeySECP256K1R) er if err != nil { return fmt.Errorf("couldn't marshal ProposalTx: %w", err) } - tx.Initialize(unsignedBytes, signedBytes) + tx.SetBytes(unsignedBytes, signedBytes) return nil } diff --git a/avalanchego/vms/platformvm/txs/txheap/by_age.go b/avalanchego/vms/platformvm/txs/txheap/by_age.go index a4da6778..a445822d 100644 --- a/avalanchego/vms/platformvm/txs/txheap/by_age.go +++ b/avalanchego/vms/platformvm/txs/txheap/by_age.go @@ -1,9 +1,9 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txheap -var _ Heap = &byAge{} +var _ Heap = (*byAge)(nil) type byAge struct { txHeap diff --git a/avalanchego/vms/platformvm/txs/txheap/by_end_time.go b/avalanchego/vms/platformvm/txs/txheap/by_end_time.go index 9bc4e3f2..2b0cbd8d 100644 --- a/avalanchego/vms/platformvm/txs/txheap/by_end_time.go +++ b/avalanchego/vms/platformvm/txs/txheap/by_end_time.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txheap @@ -9,7 +9,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) -var _ TimedHeap = &byEndTime{} +var _ TimedHeap = (*byEndTime)(nil) type byEndTime struct { txHeap diff --git a/avalanchego/vms/platformvm/txs/txheap/by_end_time_test.go b/avalanchego/vms/platformvm/txs/txheap/by_end_time_test.go index 1c8d484a..05995683 100644 --- a/avalanchego/vms/platformvm/txs/txheap/by_end_time_test.go +++ b/avalanchego/vms/platformvm/txs/txheap/by_end_time_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txheap @@ -11,7 +11,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -23,7 +22,7 @@ func TestByStopTime(t *testing.T) { baseTime := time.Now() utx0 := &txs.AddValidatorTx{ - Validator: validator.Validator{ + Validator: txs.Validator{ NodeID: ids.NodeID{0}, Start: uint64(baseTime.Unix()), End: uint64(baseTime.Unix()) + 1, @@ -31,11 +30,11 @@ func TestByStopTime(t *testing.T) { RewardsOwner: &secp256k1fx.OutputOwners{}, } tx0 := &txs.Tx{Unsigned: utx0} - err := tx0.Sign(txs.Codec, nil) + err := tx0.Initialize(txs.Codec) require.NoError(err) utx1 := &txs.AddValidatorTx{ - Validator: validator.Validator{ + Validator: txs.Validator{ NodeID: ids.NodeID{1}, Start: uint64(baseTime.Unix()), End: uint64(baseTime.Unix()) + 2, @@ -43,11 +42,11 @@ func TestByStopTime(t *testing.T) { RewardsOwner: &secp256k1fx.OutputOwners{}, } tx1 := &txs.Tx{Unsigned: utx1} - err = tx1.Sign(txs.Codec, nil) + err = tx1.Initialize(txs.Codec) require.NoError(err) utx2 := &txs.AddValidatorTx{ - Validator: validator.Validator{ + Validator: txs.Validator{ NodeID: ids.NodeID{1}, Start: uint64(baseTime.Unix()), End: uint64(baseTime.Unix()) + 3, @@ -55,7 +54,7 @@ func TestByStopTime(t *testing.T) { RewardsOwner: &secp256k1fx.OutputOwners{}, } tx2 := &txs.Tx{Unsigned: utx2} - err = tx2.Sign(txs.Codec, nil) + err = tx2.Initialize(txs.Codec) require.NoError(err) txHeap.Add(tx2) diff --git a/avalanchego/vms/platformvm/txs/txheap/by_start_time.go b/avalanchego/vms/platformvm/txs/txheap/by_start_time.go index a816ecff..31834cf0 100644 --- a/avalanchego/vms/platformvm/txs/txheap/by_start_time.go +++ b/avalanchego/vms/platformvm/txs/txheap/by_start_time.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txheap @@ -9,7 +9,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) -var _ TimedHeap = &byStartTime{} +var _ TimedHeap = (*byStartTime)(nil) type TimedHeap interface { Heap diff --git a/avalanchego/vms/platformvm/txs/txheap/by_start_time_test.go b/avalanchego/vms/platformvm/txs/txheap/by_start_time_test.go index d8135346..fe9180d0 100644 --- a/avalanchego/vms/platformvm/txs/txheap/by_start_time_test.go +++ b/avalanchego/vms/platformvm/txs/txheap/by_start_time_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txheap @@ -11,7 +11,6 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -23,7 +22,7 @@ func TestByStartTime(t *testing.T) { baseTime := time.Now() utx0 := &txs.AddValidatorTx{ - Validator: validator.Validator{ + Validator: txs.Validator{ NodeID: ids.NodeID{0}, Start: uint64(baseTime.Unix()) + 1, End: uint64(baseTime.Unix()) + 1, @@ -31,11 +30,11 @@ func TestByStartTime(t *testing.T) { RewardsOwner: &secp256k1fx.OutputOwners{}, } tx0 := &txs.Tx{Unsigned: utx0} - err := tx0.Sign(txs.Codec, nil) + err := tx0.Initialize(txs.Codec) require.NoError(err) utx1 := &txs.AddValidatorTx{ - Validator: validator.Validator{ + Validator: txs.Validator{ NodeID: ids.NodeID{1}, Start: uint64(baseTime.Unix()) + 2, End: uint64(baseTime.Unix()) + 2, @@ -43,11 +42,11 @@ func TestByStartTime(t *testing.T) { RewardsOwner: &secp256k1fx.OutputOwners{}, } tx1 := &txs.Tx{Unsigned: utx1} - err = tx1.Sign(txs.Codec, nil) + err = tx1.Initialize(txs.Codec) require.NoError(err) utx2 := &txs.AddValidatorTx{ - Validator: validator.Validator{ + Validator: txs.Validator{ NodeID: ids.NodeID{1}, Start: uint64(baseTime.Unix()) + 3, End: uint64(baseTime.Unix()) + 3, @@ -55,7 +54,7 @@ func TestByStartTime(t *testing.T) { RewardsOwner: &secp256k1fx.OutputOwners{}, } tx2 := &txs.Tx{Unsigned: utx2} - err = tx2.Sign(txs.Codec, nil) + err = tx2.Initialize(txs.Codec) require.NoError(err) txHeap.Add(tx2) diff --git a/avalanchego/vms/platformvm/txs/txheap/heap.go b/avalanchego/vms/platformvm/txs/txheap/heap.go index 62d94f42..4b6ba686 100644 --- a/avalanchego/vms/platformvm/txs/txheap/heap.go +++ b/avalanchego/vms/platformvm/txs/txheap/heap.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txheap @@ -10,7 +10,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) -var _ Heap = &txHeap{} +var _ Heap = (*txHeap)(nil) type Heap interface { Add(tx *txs.Tx) @@ -41,7 +41,9 @@ func (h *txHeap) initialize(self heap.Interface) { h.txIDToIndex = make(map[ids.ID]int) } -func (h *txHeap) Add(tx *txs.Tx) { heap.Push(h.self, tx) } +func (h *txHeap) Add(tx *txs.Tx) { + heap.Push(h.self, tx) +} func (h *txHeap) Get(txID ids.ID) *txs.Tx { index, exists := h.txIDToIndex[txID] @@ -67,11 +69,17 @@ func (h *txHeap) Remove(txID ids.ID) *txs.Tx { return heap.Remove(h.self, index).(*txs.Tx) } -func (h *txHeap) Peek() *txs.Tx { return h.txs[0].tx } +func (h *txHeap) Peek() *txs.Tx { + return h.txs[0].tx +} -func (h *txHeap) RemoveTop() *txs.Tx { return heap.Pop(h.self).(*txs.Tx) } +func (h *txHeap) RemoveTop() *txs.Tx { + return heap.Pop(h.self).(*txs.Tx) +} -func (h *txHeap) Len() int { return len(h.txs) } +func (h *txHeap) Len() int { + return len(h.txs) +} func (h *txHeap) Swap(i, j int) { // The follow "i"s and "j"s are intentionally swapped to perform the actual diff --git a/avalanchego/vms/platformvm/txs/txheap/with_metrics.go b/avalanchego/vms/platformvm/txs/txheap/with_metrics.go index d652c116..60ab4f93 100644 --- a/avalanchego/vms/platformvm/txs/txheap/with_metrics.go +++ b/avalanchego/vms/platformvm/txs/txheap/with_metrics.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txheap @@ -10,7 +10,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) -var _ Heap = &withMetrics{} +var _ Heap = (*withMetrics)(nil) type withMetrics struct { Heap diff --git a/avalanchego/vms/platformvm/txs/unsigned_tx.go b/avalanchego/vms/platformvm/txs/unsigned_tx.go index 3a40aa03..7fe1702b 100644 --- a/avalanchego/vms/platformvm/txs/unsigned_tx.go +++ b/avalanchego/vms/platformvm/txs/unsigned_tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs @@ -6,6 +6,7 @@ package txs import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -16,10 +17,10 @@ type UnsignedTx interface { // avm. snow.ContextInitializable secp256k1fx.UnsignedTx - Initialize(unsignedBytes []byte) + SetBytes(unsignedBytes []byte) // InputIDs returns the set of inputs this transaction consumes - InputIDs() ids.Set + InputIDs() set.Set[ids.ID] Outputs() []*avax.TransferableOutput diff --git a/avalanchego/vms/platformvm/validator/validator.go b/avalanchego/vms/platformvm/txs/validator.go similarity index 75% rename from avalanchego/vms/platformvm/validator/validator.go rename to avalanchego/vms/platformvm/txs/validator.go index 9ae95eac..79163392 100644 --- a/avalanchego/vms/platformvm/validator/validator.go +++ b/avalanchego/vms/platformvm/txs/validator.go @@ -1,7 +1,7 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package validator +package txs import ( "errors" @@ -30,20 +30,25 @@ type Validator struct { Wght uint64 `serialize:"true" json:"weight"` } -// ID returns the node ID of the validator -func (v *Validator) ID() ids.NodeID { return v.NodeID } - // StartTime is the time that this validator will enter the validator set -func (v *Validator) StartTime() time.Time { return time.Unix(int64(v.Start), 0) } +func (v *Validator) StartTime() time.Time { + return time.Unix(int64(v.Start), 0) +} // EndTime is the time that this validator will leave the validator set -func (v *Validator) EndTime() time.Time { return time.Unix(int64(v.End), 0) } +func (v *Validator) EndTime() time.Time { + return time.Unix(int64(v.End), 0) +} // Duration is the amount of time that this validator will be in the validator set -func (v *Validator) Duration() time.Duration { return v.EndTime().Sub(v.StartTime()) } +func (v *Validator) Duration() time.Duration { + return v.EndTime().Sub(v.StartTime()) +} // Weight is this validator's weight when sampling -func (v *Validator) Weight() uint64 { return v.Wght } +func (v *Validator) Weight() uint64 { + return v.Wght +} // Verify validates the ID for this validator func (v *Validator) Verify() error { diff --git a/avalanchego/vms/platformvm/txs/validator_test.go b/avalanchego/vms/platformvm/txs/validator_test.go new file mode 100644 index 00000000..047c7180 --- /dev/null +++ b/avalanchego/vms/platformvm/txs/validator_test.go @@ -0,0 +1,91 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package txs + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" +) + +const defaultWeight = 10000 + +// each key controls an address that has [defaultBalance] AVAX at genesis +var keys = secp256k1.TestKeys() + +func TestValidatorBoundedBy(t *testing.T) { + require := require.New(t) + + // case 1: a starts, a finishes, b starts, b finishes + aStartTime := uint64(0) + aEndTIme := uint64(1) + a := &Validator{ + NodeID: ids.NodeID(keys[0].PublicKey().Address()), + Start: aStartTime, + End: aEndTIme, + Wght: defaultWeight, + } + + bStartTime := uint64(2) + bEndTime := uint64(3) + b := &Validator{ + NodeID: ids.NodeID(keys[0].PublicKey().Address()), + Start: bStartTime, + End: bEndTime, + Wght: defaultWeight, + } + require.False(a.BoundedBy(b.StartTime(), b.EndTime())) + require.False(b.BoundedBy(a.StartTime(), a.EndTime())) + + // case 2: a starts, b starts, a finishes, b finishes + a.Start = 0 + b.Start = 1 + a.End = 2 + b.End = 3 + require.False(a.BoundedBy(b.StartTime(), b.EndTime())) + require.False(b.BoundedBy(a.StartTime(), a.EndTime())) + + // case 3: a starts, b starts, b finishes, a finishes + a.Start = 0 + b.Start = 1 + b.End = 2 + a.End = 3 + require.False(a.BoundedBy(b.StartTime(), b.EndTime())) + require.True(b.BoundedBy(a.StartTime(), a.EndTime())) + + // case 4: b starts, a starts, a finishes, b finishes + b.Start = 0 + a.Start = 1 + a.End = 2 + b.End = 3 + require.True(a.BoundedBy(b.StartTime(), b.EndTime())) + require.False(b.BoundedBy(a.StartTime(), a.EndTime())) + + // case 5: b starts, b finishes, a starts, a finishes + b.Start = 0 + b.End = 1 + a.Start = 2 + a.End = 3 + require.False(a.BoundedBy(b.StartTime(), b.EndTime())) + require.False(b.BoundedBy(a.StartTime(), a.EndTime())) + + // case 6: b starts, a starts, b finishes, a finishes + b.Start = 0 + a.Start = 1 + b.End = 2 + a.End = 3 + require.False(a.BoundedBy(b.StartTime(), b.EndTime())) + require.False(b.BoundedBy(a.StartTime(), a.EndTime())) + + // case 3: a starts, b starts, b finishes, a finishes + a.Start = 0 + b.Start = 0 + b.End = 1 + a.End = 1 + require.True(a.BoundedBy(b.StartTime(), b.EndTime())) + require.True(b.BoundedBy(a.StartTime(), a.EndTime())) +} diff --git a/avalanchego/vms/platformvm/txs/visitor.go b/avalanchego/vms/platformvm/txs/visitor.go index b7398c43..18455d81 100644 --- a/avalanchego/vms/platformvm/txs/visitor.go +++ b/avalanchego/vms/platformvm/txs/visitor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package txs diff --git a/avalanchego/vms/platformvm/utxo/handler.go b/avalanchego/vms/platformvm/utxo/handler.go index bf61386a..90206bae 100644 --- a/avalanchego/vms/platformvm/utxo/handler.go +++ b/avalanchego/vms/platformvm/utxo/handler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utxo @@ -11,9 +11,10 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" @@ -25,40 +26,14 @@ import ( ) var ( - _ Handler = &handler{} + _ Handler = (*handler)(nil) errCantSign = errors.New("can't sign") errLockedFundsNotMarkedAsLocked = errors.New("locked funds not marked as locked") ) -// Removes the UTXOs consumed by [ins] from the UTXO set -func Consume(utxoDB state.UTXODeleter, ins []*avax.TransferableInput) { - for _, input := range ins { - utxoDB.DeleteUTXO(input.InputID()) - } -} - -// Adds the UTXOs created by [outs] to the UTXO set. -// [txID] is the ID of the tx that created [outs]. -func Produce( - utxoDB state.UTXOAdder, - txID ids.ID, - outs []*avax.TransferableOutput, -) { - for index, out := range outs { - utxoDB.AddUTXO(&avax.UTXO{ - UTXOID: avax.UTXOID{ - TxID: txID, - OutputIndex: uint32(index), - }, - Asset: out.Asset, - Out: out.Output(), - }) - } -} - // TODO: Stake and Authorize should be replaced by similar methods in the -// P-chain wallet +// P-chain wallet type Spender interface { // Spend the provided amount while deducting the provided fee. // Arguments: @@ -74,7 +49,8 @@ type Spender interface { // the staking period // - [signers] the proof of ownership of the funds being moved Spend( - keys []*crypto.PrivateKeySECP256K1R, + utxoReader avax.UTXOReader, + keys []*secp256k1.PrivateKey, amount uint64, fee uint64, changeAddr ids.ShortID, @@ -82,7 +58,7 @@ type Spender interface { []*avax.TransferableInput, // inputs []*avax.TransferableOutput, // returnedOutputs []*avax.TransferableOutput, // stakedOutputs - [][]*crypto.PrivateKeySECP256K1R, // signers + [][]*secp256k1.PrivateKey, // signers error, ) @@ -91,10 +67,10 @@ type Spender interface { Authorize( state state.Chain, subnetID ids.ID, - keys []*crypto.PrivateKeySECP256K1R, + keys []*secp256k1.PrivateKey, ) ( verify.Verifiable, // Input that names owners - []*crypto.PrivateKeySECP256K1R, // Keys that prove ownership + []*secp256k1.PrivateKey, // Keys that prove ownership error, ) } @@ -112,7 +88,7 @@ type Verifier interface { // Note: [unlockedProduced] is modified by this method. VerifySpend( tx txs.UnsignedTx, - utxoDB state.UTXOGetter, + utxoDB avax.UTXOGetter, ins []*avax.TransferableInput, outs []*avax.TransferableOutput, creds []verify.Verifiable, @@ -148,26 +124,24 @@ type Handler interface { func NewHandler( ctx *snow.Context, clk *mockable.Clock, - utxoReader avax.UTXOReader, fx fx.Fx, ) Handler { return &handler{ - ctx: ctx, - clk: clk, - utxosReader: utxoReader, - fx: fx, + ctx: ctx, + clk: clk, + fx: fx, } } type handler struct { - ctx *snow.Context - clk *mockable.Clock - utxosReader avax.UTXOReader - fx fx.Fx + ctx *snow.Context + clk *mockable.Clock + fx fx.Fx } func (h *handler) Spend( - keys []*crypto.PrivateKeySECP256K1R, + utxoReader avax.UTXOReader, + keys []*secp256k1.PrivateKey, amount uint64, fee uint64, changeAddr ids.ShortID, @@ -175,14 +149,14 @@ func (h *handler) Spend( []*avax.TransferableInput, // inputs []*avax.TransferableOutput, // returnedOutputs []*avax.TransferableOutput, // stakedOutputs - [][]*crypto.PrivateKeySECP256K1R, // signers + [][]*secp256k1.PrivateKey, // signers error, ) { - addrs := ids.NewShortSet(len(keys)) // The addresses controlled by [keys] + addrs := set.NewSet[ids.ShortID](len(keys)) // The addresses controlled by [keys] for _, key := range keys { addrs.Add(key.PublicKey().Address()) } - utxos, err := avax.GetAllUTXOs(h.utxosReader, addrs) // The UTXOs controlled by [keys] + utxos, err := avax.GetAllUTXOs(utxoReader, addrs) // The UTXOs controlled by [keys] if err != nil { return nil, nil, nil, nil, fmt.Errorf("couldn't get UTXOs: %w", err) } @@ -195,7 +169,7 @@ func (h *handler) Spend( ins := []*avax.TransferableInput{} returnedOuts := []*avax.TransferableOutput{} stakedOuts := []*avax.TransferableOutput{} - signers := [][]*crypto.PrivateKeySECP256K1R{} + signers := [][]*secp256k1.PrivateKey{} // Amount of AVAX that has been staked amountStaked := uint64(0) @@ -248,7 +222,7 @@ func (h *handler) Spend( remainingValue := in.Amount() // Stake any value that should be staked - amountToStake := math.Min64( + amountToStake := math.Min( amount-amountStaked, // Amount we still need to stake remainingValue, // Amount available to stake ) @@ -300,9 +274,9 @@ func (h *handler) Spend( amountBurned := uint64(0) for _, utxo := range utxos { - // If we have consumed more AVAX than we are trying to stake, and we - // have burned more AVAX then we need to, then we have no need to - // consume more AVAX + // If we have consumed more AVAX than we are trying to stake, + // and we have burned more AVAX than we need to, + // then we have no need to consume more AVAX if amountBurned >= fee && amountStaked >= amount { break } @@ -339,7 +313,7 @@ func (h *handler) Spend( remainingValue := in.Amount() // Burn any value that should be burned - amountToBurn := math.Min64( + amountToBurn := math.Min( fee-amountBurned, // Amount we still need to burn remainingValue, // Amount available to burn ) @@ -347,7 +321,7 @@ func (h *handler) Spend( remainingValue -= amountToBurn // Stake any value that should be staked - amountToStake := math.Min64( + amountToStake := math.Min( amount-amountStaked, // Amount we still need to stake remainingValue, // Amount available to stake ) @@ -411,10 +385,10 @@ func (h *handler) Spend( func (h *handler) Authorize( state state.Chain, subnetID ids.ID, - keys []*crypto.PrivateKeySECP256K1R, + keys []*secp256k1.PrivateKey, ) ( verify.Verifiable, // Input that names owners - []*crypto.PrivateKeySECP256K1R, // Keys that prove ownership + []*secp256k1.PrivateKey, // Keys that prove ownership error, ) { subnetTx, _, err := state.GetTx(subnetID) @@ -453,7 +427,7 @@ func (h *handler) Authorize( func (h *handler) VerifySpend( tx txs.UnsignedTx, - utxoDB state.UTXOGetter, + utxoDB avax.UTXOGetter, ins []*avax.TransferableInput, outs []*avax.TransferableOutput, creds []verify.Verifiable, diff --git a/avalanchego/vms/platformvm/utxo/handler_test.go b/avalanchego/vms/platformvm/utxo/handler_test.go index da538ab3..d5a2759e 100644 --- a/avalanchego/vms/platformvm/utxo/handler_test.go +++ b/avalanchego/vms/platformvm/utxo/handler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package utxo @@ -8,10 +8,11 @@ import ( "testing" "time" - "github.com/ava-labs/avalanchego/database/memdb" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" @@ -20,34 +21,26 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -var _ txs.UnsignedTx = &dummyUnsignedTx{} +var _ txs.UnsignedTx = (*dummyUnsignedTx)(nil) type dummyUnsignedTx struct { txs.BaseTx } -func (du *dummyUnsignedTx) Visit(txs.Visitor) error { +func (*dummyUnsignedTx) Visit(txs.Visitor) error { return nil } func TestVerifySpendUTXOs(t *testing.T) { fx := &secp256k1fx.Fx{} - if err := fx.InitializeVM(&secp256k1fx.TestVM{}); err != nil { - t.Fatal(err) - } - if err := fx.Bootstrapped(); err != nil { - t.Fatal(err) - } + require.NoError(t, fx.InitializeVM(&secp256k1fx.TestVM{})) + require.NoError(t, fx.Bootstrapped()) h := &handler{ ctx: snow.DefaultContextTest(), clk: &mockable.Clock{}, - utxosReader: avax.NewUTXOState( - memdb.New(), - txs.Codec, - ), - fx: fx, + fx: fx, } // The handler time during a test, unless [chainTimestamp] is set @@ -56,7 +49,7 @@ func TestVerifySpendUTXOs(t *testing.T) { unsignedTx := dummyUnsignedTx{ BaseTx: txs.BaseTx{}, } - unsignedTx.Initialize([]byte{0}) + unsignedTx.SetBytes([]byte{0}) customAssetID := ids.GenerateTestID() @@ -323,7 +316,7 @@ func TestVerifySpendUTXOs(t *testing.T) { outs: []*avax.TransferableOutput{}, creds: []verify.Verifiable{ &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ {}, }, }, @@ -1094,6 +1087,7 @@ func TestVerifySpendUTXOs(t *testing.T) { h.clk.Set(now) t.Run(test.description, func(t *testing.T) { + require := require.New(t) err := h.VerifySpendUTXOs( &unsignedTx, test.utxos, @@ -1103,10 +1097,10 @@ func TestVerifySpendUTXOs(t *testing.T) { test.producedAmounts, ) - if err == nil && test.shouldErr { - t.Fatalf("expected error but got none") - } else if err != nil && !test.shouldErr { - t.Fatalf("unexpected error: %s", err) + if test.shouldErr { + require.Error(err) + } else { + require.NoError(err) } }) } diff --git a/avalanchego/vms/platformvm/utxo/mock_verifier.go b/avalanchego/vms/platformvm/utxo/mock_verifier.go index 6dc2b57a..904a1185 100644 --- a/avalanchego/vms/platformvm/utxo/mock_verifier.go +++ b/avalanchego/vms/platformvm/utxo/mock_verifier.go @@ -1,3 +1,6 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/platformvm/utxo (interfaces: Verifier) @@ -10,7 +13,6 @@ import ( ids "github.com/ava-labs/avalanchego/ids" avax "github.com/ava-labs/avalanchego/vms/components/avax" verify "github.com/ava-labs/avalanchego/vms/components/verify" - state "github.com/ava-labs/avalanchego/vms/platformvm/state" txs "github.com/ava-labs/avalanchego/vms/platformvm/txs" gomock "github.com/golang/mock/gomock" ) @@ -39,7 +41,7 @@ func (m *MockVerifier) EXPECT() *MockVerifierMockRecorder { } // VerifySpend mocks base method. -func (m *MockVerifier) VerifySpend(arg0 txs.UnsignedTx, arg1 state.UTXOGetter, arg2 []*avax.TransferableInput, arg3 []*avax.TransferableOutput, arg4 []verify.Verifiable, arg5 map[ids.ID]uint64) error { +func (m *MockVerifier) VerifySpend(arg0 txs.UnsignedTx, arg1 avax.UTXOGetter, arg2 []*avax.TransferableInput, arg3 []*avax.TransferableOutput, arg4 []verify.Verifiable, arg5 map[ids.ID]uint64) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "VerifySpend", arg0, arg1, arg2, arg3, arg4, arg5) ret0, _ := ret[0].(error) diff --git a/avalanchego/vms/platformvm/validator/validator_test.go b/avalanchego/vms/platformvm/validator/validator_test.go deleted file mode 100644 index 8175c5a7..00000000 --- a/avalanchego/vms/platformvm/validator/validator_test.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package validator - -import ( - "errors" - "testing" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto" -) - -const defaultWeight = 10000 - -var ( - errCalculatedSubsetWrong = errors.New("incorrectly calculated whether one duration was subset of other") - - // each key controls an address that has [defaultBalance] AVAX at genesis - keys = crypto.BuildTestKeys() -) - -func TestValidatorBoundedBy(t *testing.T) { - // case 1: a starts, a finishes, b starts, b finishes - aStartTime := uint64(0) - aEndTIme := uint64(1) - a := &Validator{ - NodeID: ids.NodeID(keys[0].PublicKey().Address()), - Start: aStartTime, - End: aEndTIme, - Wght: defaultWeight, - } - - bStartTime := uint64(2) - bEndTime := uint64(3) - b := &Validator{ - NodeID: ids.NodeID(keys[0].PublicKey().Address()), - Start: bStartTime, - End: bEndTime, - Wght: defaultWeight, - } - - if a.BoundedBy(b.StartTime(), b.EndTime()) || b.BoundedBy(a.StartTime(), a.EndTime()) { - t.Fatal(errCalculatedSubsetWrong) - } - - // case 2: a starts, b starts, a finishes, b finishes - a.Start = 0 - b.Start = 1 - a.End = 2 - b.End = 3 - if a.BoundedBy(b.StartTime(), b.EndTime()) || b.BoundedBy(a.StartTime(), a.EndTime()) { - t.Fatal(errCalculatedSubsetWrong) - } - - // case 3: a starts, b starts, b finishes, a finishes - a.Start = 0 - b.Start = 1 - b.End = 2 - a.End = 3 - if a.BoundedBy(b.StartTime(), b.EndTime()) || !b.BoundedBy(a.StartTime(), a.EndTime()) { - t.Fatal(errCalculatedSubsetWrong) - } - - // case 4: b starts, a starts, a finishes, b finishes - b.Start = 0 - a.Start = 1 - a.End = 2 - b.End = 3 - if !a.BoundedBy(b.StartTime(), b.EndTime()) || b.BoundedBy(a.StartTime(), a.EndTime()) { - t.Fatal(errCalculatedSubsetWrong) - } - - // case 5: b starts, b finishes, a starts, a finishes - b.Start = 0 - b.End = 1 - a.Start = 2 - a.End = 3 - if a.BoundedBy(b.StartTime(), b.EndTime()) || b.BoundedBy(a.StartTime(), a.EndTime()) { - t.Fatal(errCalculatedSubsetWrong) - } - - // case 6: b starts, a starts, b finishes, a finishes - b.Start = 0 - a.Start = 1 - b.End = 2 - a.End = 3 - if a.BoundedBy(b.StartTime(), b.EndTime()) || b.BoundedBy(a.StartTime(), a.EndTime()) { - t.Fatal(errCalculatedSubsetWrong) - } - - // case 3: a starts, b starts, b finishes, a finishes - a.Start = 0 - b.Start = 0 - b.End = 1 - a.End = 1 - if !a.BoundedBy(b.StartTime(), b.EndTime()) || !b.BoundedBy(a.StartTime(), a.EndTime()) { - t.Fatal(errCalculatedSubsetWrong) - } -} diff --git a/avalanchego/vms/platformvm/vm.go b/avalanchego/vms/platformvm/vm.go index 68709008..26aed61d 100644 --- a/avalanchego/vms/platformvm/vm.go +++ b/avalanchego/vms/platformvm/vm.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm import ( + "context" "errors" "fmt" "time" @@ -38,6 +39,7 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/api" "github.com/ava-labs/avalanchego/vms/platformvm/blocks" + "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" "github.com/ava-labs/avalanchego/vms/platformvm/reward" @@ -54,22 +56,23 @@ import ( ) const ( - validatorSetsCacheSize = 64 + validatorSetsCacheSize = 512 maxRecentlyAcceptedWindowSize = 256 recentlyAcceptedWindowTTL = 5 * time.Minute ) var ( - _ block.ChainVM = &VM{} - _ secp256k1fx.VM = &VM{} - _ validators.State = &VM{} + _ block.ChainVM = (*VM)(nil) + _ secp256k1fx.VM = (*VM)(nil) + _ validators.State = (*VM)(nil) + _ validators.SubnetConnector = (*VM)(nil) - errWrongCacheType = errors.New("unexpectedly cached type") errMissingValidatorSet = errors.New("missing validator set") + errMissingValidator = errors.New("missing validator") ) type VM struct { - Factory + config.Config blockbuilder.Builder metrics metrics.Metrics @@ -90,48 +93,48 @@ type VM struct { codecRegistry codec.Registry // Bootstrapped remembers if this chain has finished bootstrapping or not - bootstrapped utils.AtomicBool + bootstrapped utils.Atomic[bool] - // Maps caches for each subnet that is currently whitelisted. + // Maps caches for each subnet that is currently tracked. // Key: Subnet ID // Value: cache mapping height -> validator set map - validatorSetCaches map[ids.ID]cache.Cacher + validatorSetCaches map[ids.ID]cache.Cacher[uint64, map[ids.NodeID]*validators.GetValidatorOutput] // sliding window of blocks that were recently accepted recentlyAccepted window.Window[ids.ID] - txBuilder txbuilder.Builder - txExecutorBackend *txexecutor.Backend - manager blockexecutor.Manager + txBuilder txbuilder.Builder + manager blockexecutor.Manager } // Initialize this blockchain. // [vm.ChainManager] and [vm.vdrMgr] must be set before this function is called. func (vm *VM) Initialize( - ctx *snow.Context, + ctx context.Context, + chainCtx *snow.Context, dbManager manager.Manager, genesisBytes []byte, - upgradeBytes []byte, - configBytes []byte, + _ []byte, + _ []byte, toEngine chan<- common.Message, _ []*common.Fx, appSender common.AppSender, ) error { - ctx.Log.Verbo("initializing platform chain") + chainCtx.Log.Verbo("initializing platform chain") registerer := prometheus.NewRegistry() - if err := ctx.Metrics.Register(registerer); err != nil { + if err := chainCtx.Metrics.Register(registerer); err != nil { return err } // Initialize metrics as soon as possible var err error - vm.metrics, err = metrics.New("", registerer, vm.WhitelistedSubnets) + vm.metrics, err = metrics.New("", registerer, vm.TrackedSubnets) if err != nil { return fmt.Errorf("failed to initialize metrics: %w", err) } - vm.ctx = ctx + vm.ctx = chainCtx vm.dbManager = dbManager vm.codecRegistry = linearcodec.NewDefault() @@ -140,7 +143,7 @@ func (vm *VM) Initialize( return err } - vm.validatorSetCaches = make(map[ids.ID]cache.Cacher) + vm.validatorSetCaches = make(map[ids.ID]cache.Cacher[uint64, map[ids.NodeID]*validators.GetValidatorOutput]) vm.recentlyAccepted = window.New[ids.ID]( window.Config{ Clock: &vm.clock, @@ -158,15 +161,16 @@ func (vm *VM) Initialize( vm.ctx, vm.metrics, rewards, + &vm.bootstrapped, ) if err != nil { return err } - vm.atomicUtxosManager = avax.NewAtomicUTXOManager(ctx.SharedMemory, txs.Codec) - utxoHandler := utxo.NewHandler(vm.ctx, &vm.clock, vm.state, vm.fx) + vm.atomicUtxosManager = avax.NewAtomicUTXOManager(chainCtx.SharedMemory, txs.Codec) + utxoHandler := utxo.NewHandler(vm.ctx, &vm.clock, vm.fx) vm.uptimeManager = uptime.NewManager(vm.state) - vm.UptimeLockedCalculator.SetCalculator(&vm.bootstrapped, &ctx.Lock, vm.uptimeManager) + vm.UptimeLockedCalculator.SetCalculator(&vm.bootstrapped, &chainCtx.Lock, vm.uptimeManager) vm.txBuilder = txbuilder.New( vm.ctx, @@ -178,7 +182,7 @@ func (vm *VM) Initialize( utxoHandler, ) - vm.txExecutorBackend = &txexecutor.Backend{ + txExecutorBackend := &txexecutor.Backend{ Config: &vm.Config, Ctx: vm.ctx, Clk: &vm.clock, @@ -200,22 +204,18 @@ func (vm *VM) Initialize( mempool, vm.metrics, vm.state, - vm.txExecutorBackend, + txExecutorBackend, vm.recentlyAccepted, ) vm.Builder = blockbuilder.New( mempool, vm.txBuilder, - vm.txExecutorBackend, + txExecutorBackend, vm.manager, toEngine, appSender, ) - if err := vm.updateValidators(); err != nil { - return fmt.Errorf("failed to update validator sets: %w", err) - } - // Create all of the chains that the database says exist if err := vm.initBlockchains(); err != nil { return fmt.Errorf( @@ -225,10 +225,10 @@ func (vm *VM) Initialize( } lastAcceptedID := vm.state.GetLastAccepted() - ctx.Log.Info("initializing last accepted", + chainCtx.Log.Info("initializing last accepted", zap.Stringer("blkID", lastAcceptedID), ) - return vm.SetPreference(lastAcceptedID) + return vm.SetPreference(ctx, lastAcceptedID) } // Create all chains that exist that this node validates. @@ -238,7 +238,7 @@ func (vm *VM) initBlockchains() error { } if vm.StakingEnabled { - for subnetID := range vm.WhitelistedSubnets { + for subnetID := range vm.TrackedSubnets { if err := vm.createSubnet(subnetID); err != nil { return err } @@ -275,35 +275,39 @@ func (vm *VM) createSubnet(subnetID ids.ID) error { // onBootstrapStarted marks this VM as bootstrapping func (vm *VM) onBootstrapStarted() error { - vm.bootstrapped.SetValue(false) + vm.bootstrapped.Set(false) return vm.fx.Bootstrapping() } // onNormalOperationsStarted marks this VM as bootstrapped func (vm *VM) onNormalOperationsStarted() error { - if vm.bootstrapped.GetValue() { + if vm.bootstrapped.Get() { return nil } - vm.bootstrapped.SetValue(true) + vm.bootstrapped.Set(true) if err := vm.fx.Bootstrapped(); err != nil { return err } - primaryValidatorSet, exist := vm.Validators.GetValidators(constants.PrimaryNetworkID) - if !exist { - return errNoPrimaryValidators + primaryVdrIDs, exists := vm.getValidatorIDs(constants.PrimaryNetworkID) + if !exists { + return errMissingValidatorSet } - primaryValidators := primaryValidatorSet.List() - - validatorIDs := make([]ids.NodeID, len(primaryValidators)) - for i, vdr := range primaryValidators { - validatorIDs[i] = vdr.ID() + if err := vm.uptimeManager.StartTracking(primaryVdrIDs, constants.PrimaryNetworkID); err != nil { + return err } - if err := vm.uptimeManager.StartTracking(validatorIDs); err != nil { - return err + for subnetID := range vm.TrackedSubnets { + vdrIDs, exists := vm.getValidatorIDs(subnetID) + if !exists { + return errMissingValidatorSet + } + if err := vm.uptimeManager.StartTracking(vdrIDs, subnetID); err != nil { + return err + } } + if err := vm.state.Commit(); err != nil { return err } @@ -313,7 +317,7 @@ func (vm *VM) onNormalOperationsStarted() error { return nil } -func (vm *VM) SetState(state snow.State) error { +func (vm *VM) SetState(_ context.Context, state snow.State) error { switch state { case snow.Bootstrapping: return vm.onBootstrapStarted() @@ -325,28 +329,32 @@ func (vm *VM) SetState(state snow.State) error { } // Shutdown this blockchain -func (vm *VM) Shutdown() error { +func (vm *VM) Shutdown(context.Context) error { if vm.dbManager == nil { return nil } vm.Builder.Shutdown() - if vm.bootstrapped.GetValue() { - primaryValidatorSet, exist := vm.Validators.GetValidators(constants.PrimaryNetworkID) - if !exist { - return errNoPrimaryValidators + if vm.bootstrapped.Get() { + primaryVdrIDs, exists := vm.getValidatorIDs(constants.PrimaryNetworkID) + if !exists { + return errMissingValidatorSet } - primaryValidators := primaryValidatorSet.List() - - validatorIDs := make([]ids.NodeID, len(primaryValidators)) - for i, vdr := range primaryValidators { - validatorIDs[i] = vdr.ID() + if err := vm.uptimeManager.StopTracking(primaryVdrIDs, constants.PrimaryNetworkID); err != nil { + return err } - if err := vm.uptimeManager.Shutdown(validatorIDs); err != nil { - return err + for subnetID := range vm.TrackedSubnets { + vdrIDs, exists := vm.getValidatorIDs(subnetID) + if !exists { + return errMissingValidatorSet + } + if err := vm.uptimeManager.StopTracking(vdrIDs, subnetID); err != nil { + return err + } } + if err := vm.state.Commit(); err != nil { return err } @@ -360,7 +368,22 @@ func (vm *VM) Shutdown() error { return errs.Err } -func (vm *VM) ParseBlock(b []byte) (snowman.Block, error) { +func (vm *VM) getValidatorIDs(subnetID ids.ID) ([]ids.NodeID, bool) { + validatorSet, exist := vm.Validators.Get(subnetID) + if !exist { + return nil, false + } + validators := validatorSet.List() + + validatorIDs := make([]ids.NodeID, len(validators)) + for i, vdr := range validators { + validatorIDs[i] = vdr.NodeID + } + + return validatorIDs, true +} + +func (vm *VM) ParseBlock(_ context.Context, b []byte) (snowman.Block, error) { // Note: blocks to be parsed are not verified, so we must used blocks.Codec // rather than blocks.GenesisCodec statelessBlk, err := blocks.Parse(blocks.Codec, b) @@ -370,29 +393,29 @@ func (vm *VM) ParseBlock(b []byte) (snowman.Block, error) { return vm.manager.NewBlock(statelessBlk), nil } -func (vm *VM) GetBlock(blkID ids.ID) (snowman.Block, error) { +func (vm *VM) GetBlock(_ context.Context, blkID ids.ID) (snowman.Block, error) { return vm.manager.GetBlock(blkID) } // LastAccepted returns the block most recently accepted -func (vm *VM) LastAccepted() (ids.ID, error) { +func (vm *VM) LastAccepted(context.Context) (ids.ID, error) { return vm.manager.LastAccepted(), nil } // SetPreference sets the preferred block to be the one with ID [blkID] -func (vm *VM) SetPreference(blkID ids.ID) error { +func (vm *VM) SetPreference(_ context.Context, blkID ids.ID) error { vm.Builder.SetPreference(blkID) return nil } -func (vm *VM) Version() (string, error) { +func (*VM) Version(context.Context) (string, error) { return version.Current.String(), nil } // CreateHandlers returns a map where: // * keys are API endpoint extensions // * values are API handlers -func (vm *VM) CreateHandlers() (map[string]*common.HTTPHandler, error) { +func (vm *VM) CreateHandlers(context.Context) (map[string]*common.HTTPHandler, error) { server := rpc.NewServer() server.RegisterCodec(json.NewCodec(), "application/json") server.RegisterCodec(json.NewCodec(), "application/json;charset=UTF-8") @@ -402,6 +425,9 @@ func (vm *VM) CreateHandlers() (map[string]*common.HTTPHandler, error) { &Service{ vm: vm, addrManager: avax.NewAddressManager(vm.ctx), + stakerAttributesCache: &cache.LRU[ids.ID, *stakerAttributes]{ + Size: stakerAttributesCacheSize, + }, }, "platform", ); err != nil { @@ -418,7 +444,7 @@ func (vm *VM) CreateHandlers() (map[string]*common.HTTPHandler, error) { // CreateStaticHandlers returns a map where: // * keys are API endpoint extensions // * values are API handlers -func (vm *VM) CreateStaticHandlers() (map[string]*common.HTTPHandler, error) { +func (*VM) CreateStaticHandlers(context.Context) (map[string]*common.HTTPHandler, error) { server := rpc.NewServer() server.RegisterCodec(json.NewCodec(), "application/json") server.RegisterCodec(json.NewCodec(), "application/json;charset=UTF-8") @@ -434,12 +460,16 @@ func (vm *VM) CreateStaticHandlers() (map[string]*common.HTTPHandler, error) { }, nil } -func (vm *VM) Connected(vdrID ids.NodeID, _ *version.Application) error { - return vm.uptimeManager.Connect(vdrID) +func (vm *VM) Connected(_ context.Context, nodeID ids.NodeID, _ *version.Application) error { + return vm.uptimeManager.Connect(nodeID, constants.PrimaryNetworkID) +} + +func (vm *VM) ConnectedSubnet(_ context.Context, nodeID ids.NodeID, subnetID ids.ID) error { + return vm.uptimeManager.Connect(nodeID, subnetID) } -func (vm *VM) Disconnected(vdrID ids.NodeID) error { - if err := vm.uptimeManager.Disconnect(vdrID); err != nil { +func (vm *VM) Disconnected(_ context.Context, nodeID ids.NodeID) error { + if err := vm.uptimeManager.Disconnect(nodeID); err != nil { return err } return vm.state.Commit() @@ -447,26 +477,22 @@ func (vm *VM) Disconnected(vdrID ids.NodeID) error { // GetValidatorSet returns the validator set at the specified height for the // provided subnetID. -func (vm *VM) GetValidatorSet(height uint64, subnetID ids.ID) (map[ids.NodeID]uint64, error) { +func (vm *VM) GetValidatorSet(ctx context.Context, height uint64, subnetID ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { validatorSetsCache, exists := vm.validatorSetCaches[subnetID] if !exists { - validatorSetsCache = &cache.LRU{Size: validatorSetsCacheSize} - // Only cache whitelisted subnets - if vm.WhitelistedSubnets.Contains(subnetID) || subnetID == constants.PrimaryNetworkID { + validatorSetsCache = &cache.LRU[uint64, map[ids.NodeID]*validators.GetValidatorOutput]{Size: validatorSetsCacheSize} + // Only cache tracked subnets + if subnetID == constants.PrimaryNetworkID || vm.TrackedSubnets.Contains(subnetID) { vm.validatorSetCaches[subnetID] = validatorSetsCache } } - if validatorSetIntf, ok := validatorSetsCache.Get(height); ok { - validatorSet, ok := validatorSetIntf.(map[ids.NodeID]uint64) - if !ok { - return nil, errWrongCacheType - } + if validatorSet, ok := validatorSetsCache.Get(height); ok { vm.metrics.IncValidatorSetsCached() return validatorSet, nil } - lastAcceptedHeight, err := vm.GetCurrentHeight() + lastAcceptedHeight, err := vm.GetCurrentHeight(ctx) if err != nil { return nil, err } @@ -477,43 +503,88 @@ func (vm *VM) GetValidatorSet(height uint64, subnetID ids.ID) (map[ids.NodeID]ui // get the start time to track metrics startTime := vm.Clock().Time() - currentValidators, ok := vm.Validators.GetValidators(subnetID) + currentSubnetValidators, ok := vm.Validators.Get(subnetID) if !ok { + currentSubnetValidators = validators.NewSet() + if err := vm.state.ValidatorSet(subnetID, currentSubnetValidators); err != nil { + return nil, err + } + } + currentPrimaryNetworkValidators, ok := vm.Validators.Get(constants.PrimaryNetworkID) + if !ok { + // This should never happen return nil, errMissingValidatorSet } - currentValidatorList := currentValidators.List() - vdrSet := make(map[ids.NodeID]uint64, len(currentValidatorList)) - for _, vdr := range currentValidatorList { - vdrSet[vdr.ID()] = vdr.Weight() + currentSubnetValidatorList := currentSubnetValidators.List() + vdrSet := make(map[ids.NodeID]*validators.GetValidatorOutput, len(currentSubnetValidatorList)) + for _, vdr := range currentSubnetValidatorList { + primaryVdr, ok := currentPrimaryNetworkValidators.Get(vdr.NodeID) + if !ok { + // This should never happen + return nil, fmt.Errorf("%w: %s", errMissingValidator, vdr.NodeID) + } + vdrSet[vdr.NodeID] = &validators.GetValidatorOutput{ + NodeID: vdr.NodeID, + PublicKey: primaryVdr.PublicKey, + Weight: vdr.Weight, + } } for i := lastAcceptedHeight; i > height; i-- { - diffs, err := vm.state.GetValidatorWeightDiffs(i, subnetID) + weightDiffs, err := vm.state.GetValidatorWeightDiffs(i, subnetID) if err != nil { return nil, err } - for nodeID, diff := range diffs { + for nodeID, weightDiff := range weightDiffs { + vdr, ok := vdrSet[nodeID] + if !ok { + // This node isn't in the current validator set. + vdr = &validators.GetValidatorOutput{ + NodeID: nodeID, + } + vdrSet[nodeID] = vdr + } + + // The weight of this node changed at this block. var op func(uint64, uint64) (uint64, error) - if diff.Decrease { + if weightDiff.Decrease { // The validator's weight was decreased at this block, so in the // prior block it was higher. op = math.Add64 } else { // The validator's weight was increased at this block, so in the // prior block it was lower. - op = math.Sub64 + op = math.Sub[uint64] } - newWeight, err := op(vdrSet[nodeID], diff.Amount) + // Apply the weight change. + vdr.Weight, err = op(vdr.Weight, weightDiff.Amount) if err != nil { return nil, err } - if newWeight == 0 { + + if vdr.Weight == 0 { + // The validator's weight was 0 before this block so + // they weren't in the validator set. delete(vdrSet, nodeID) - } else { - vdrSet[nodeID] = newWeight + } + } + + pkDiffs, err := vm.state.GetValidatorPublicKeyDiffs(i) + if err != nil { + return nil, err + } + + for nodeID, pk := range pkDiffs { + // pkDiffs includes all primary network key diffs, if we are + // fetching a subnet's validator set, we should ignore non-subnet + // validators. + if vdr, ok := vdrSet[nodeID]; ok { + // The validator's public key was removed at this block, so it + // was in the validator set before. + vdr.PublicKey = pk } } } @@ -528,6 +599,27 @@ func (vm *VM) GetValidatorSet(height uint64, subnetID ids.ID) (map[ids.NodeID]ui return vdrSet, nil } +// GetCurrentHeight returns the height of the last accepted block +func (vm *VM) GetSubnetID(_ context.Context, chainID ids.ID) (ids.ID, error) { + if chainID == constants.PlatformChainID { + return constants.PrimaryNetworkID, nil + } + + chainTx, _, err := vm.state.GetTx(chainID) + if err != nil { + return ids.Empty, fmt.Errorf( + "problem retrieving blockchain %q: %w", + chainID, + err, + ) + } + chain, ok := chainTx.Unsigned.(*txs.CreateChainTx) + if !ok { + return ids.Empty, fmt.Errorf("%q is not a blockchain", chainID) + } + return chain.SubnetID, nil +} + // GetMinimumHeight returns the height of the most recent block beyond the // horizon of our recentlyAccepted window. // @@ -541,75 +633,60 @@ func (vm *VM) GetValidatorSet(height uint64, subnetID ids.ID) (map[ids.NodeID]ui // in the case of a process restart, we default to the lastAccepted block's // height which is likely (but not guaranteed) to also be older than the // window's configured TTL. -func (vm *VM) GetMinimumHeight() (uint64, error) { +// +// If [UseCurrentHeight] is true, we will always return the last accepted block +// height as the minimum. This is used to trigger the proposervm on recently +// created subnets before [recentlyAcceptedWindowTTL]. +func (vm *VM) GetMinimumHeight(ctx context.Context) (uint64, error) { + if vm.Config.UseCurrentHeight { + return vm.GetCurrentHeight(ctx) + } + oldest, ok := vm.recentlyAccepted.Oldest() if !ok { - return vm.GetCurrentHeight() + return vm.GetCurrentHeight(ctx) } - blk, err := vm.GetBlock(oldest) + blk, err := vm.manager.GetBlock(oldest) if err != nil { return 0, err } + // We subtract 1 from the height of [oldest] because we want the height of + // the last block accepted before the [recentlyAccepted] window. + // + // There is guaranteed to be a block accepted before this window because the + // first block added to [recentlyAccepted] window is >= height 1. return blk.Height() - 1, nil } // GetCurrentHeight returns the height of the last accepted block -func (vm *VM) GetCurrentHeight() (uint64, error) { - lastAccepted, err := vm.GetBlock(vm.state.GetLastAccepted()) +func (vm *VM) GetCurrentHeight(context.Context) (uint64, error) { + lastAccepted, err := vm.manager.GetBlock(vm.state.GetLastAccepted()) if err != nil { return 0, err } return lastAccepted.Height(), nil } -func (vm *VM) updateValidators() error { - primaryValidators, err := vm.state.ValidatorSet(constants.PrimaryNetworkID) - if err != nil { - return err - } - - // Songbird (coston, local) has a fixed set of validators in genesis - for _, v := range validators.DefaultValidatorList() { - err := primaryValidators.AddWeight(v.ID(), v.Weight()) - if err != nil { - return err - } - } - - if err := vm.Validators.Set(constants.PrimaryNetworkID, primaryValidators); err != nil { - return err - } - - weight, _ := primaryValidators.GetWeight(vm.ctx.NodeID) - vm.metrics.SetLocalStake(weight) - vm.metrics.SetTotalStake(primaryValidators.Weight()) - - for subnetID := range vm.WhitelistedSubnets { - subnetValidators, err := vm.state.ValidatorSet(subnetID) - if err != nil { - return err - } - if err := vm.Validators.Set(subnetID, subnetValidators); err != nil { - return err - } - } - return nil +func (vm *VM) CodecRegistry() codec.Registry { + return vm.codecRegistry } -func (vm *VM) CodecRegistry() codec.Registry { return vm.codecRegistry } - -func (vm *VM) Clock() *mockable.Clock { return &vm.clock } +func (vm *VM) Clock() *mockable.Clock { + return &vm.clock +} -func (vm *VM) Logger() logging.Logger { return vm.ctx.Log } +func (vm *VM) Logger() logging.Logger { + return vm.ctx.Log +} // Returns the percentage of the total stake of the subnet connected to this // node. func (vm *VM) getPercentConnected(subnetID ids.ID) (float64, error) { - vdrSet, exists := vm.Validators.GetValidators(subnetID) + vdrSet, exists := vm.Validators.Get(subnetID) if !exists { - return 0, errNoValidators + return 0, errMissingValidatorSet } vdrSetWeight := vdrSet.Weight() @@ -622,10 +699,10 @@ func (vm *VM) getPercentConnected(subnetID ids.ID) (float64, error) { err error ) for _, vdr := range vdrSet.List() { - if !vm.uptimeManager.IsConnected(vdr.ID()) { + if !vm.uptimeManager.IsConnected(vdr.NodeID, subnetID) { continue // not connected to us --> don't include } - connectedStake, err = math.Add64(connectedStake, vdr.Weight()) + connectedStake, err = math.Add64(connectedStake, vdr.Weight) if err != nil { return 0, err } diff --git a/avalanchego/vms/platformvm/vm_regression_test.go b/avalanchego/vms/platformvm/vm_regression_test.go index 0083fcfe..a4512077 100644 --- a/avalanchego/vms/platformvm/vm_regression_test.go +++ b/avalanchego/vms/platformvm/vm_regression_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm import ( + "context" "testing" "time" @@ -22,9 +23,9 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" - "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/blocks" @@ -35,7 +36,6 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" - blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/blocks/executor" txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" ) @@ -44,11 +44,11 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { vm, _, _ := defaultVM() vm.ctx.Lock.Lock() defer func() { - require.NoError(vm.Shutdown()) + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() - validatorStartTime := defaultGenesisTime.Add(txexecutor.SyncBound).Add(1 * time.Second) + validatorStartTime := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) nodeID := ids.GenerateTestNodeID() @@ -62,7 +62,7 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { nodeID, changeAddr, reward.PercentDenominator, - []*crypto.PrivateKeySECP256K1R{keys[0]}, + []*secp256k1.PrivateKey{keys[0]}, changeAddr, ) require.NoError(err) @@ -70,17 +70,19 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { // trigger block creation require.NoError(vm.Builder.AddUnverifiedTx(addValidatorTx)) - addValidatorBlock, err := vm.Builder.BuildBlock() + addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) - - verifyAndAcceptProposalCommitment(require, vm, addValidatorBlock) + require.NoError(addValidatorBlock.Verify(context.Background())) + require.NoError(addValidatorBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) vm.clock.Set(validatorStartTime) - firstAdvanceTimeBlock, err := vm.Builder.BuildBlock() + firstAdvanceTimeBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) - - verifyAndAcceptProposalCommitment(require, vm, firstAdvanceTimeBlock) + require.NoError(firstAdvanceTimeBlock.Verify(context.Background())) + require.NoError(firstAdvanceTimeBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) firstDelegatorStartTime := validatorStartTime.Add(txexecutor.SyncBound).Add(1 * time.Second) firstDelegatorEndTime := firstDelegatorStartTime.Add(vm.MinStakeDuration) @@ -92,7 +94,7 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { uint64(firstDelegatorEndTime.Unix()), nodeID, changeAddr, - []*crypto.PrivateKeySECP256K1R{keys[0], keys[1]}, + []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, ) require.NoError(err) @@ -100,17 +102,19 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { // trigger block creation require.NoError(vm.Builder.AddUnverifiedTx(addFirstDelegatorTx)) - addFirstDelegatorBlock, err := vm.Builder.BuildBlock() + addFirstDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) - - verifyAndAcceptProposalCommitment(require, vm, addFirstDelegatorBlock) + require.NoError(addFirstDelegatorBlock.Verify(context.Background())) + require.NoError(addFirstDelegatorBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) vm.clock.Set(firstDelegatorStartTime) - secondAdvanceTimeBlock, err := vm.Builder.BuildBlock() + secondAdvanceTimeBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) - - verifyAndAcceptProposalCommitment(require, vm, secondAdvanceTimeBlock) + require.NoError(secondAdvanceTimeBlock.Verify(context.Background())) + require.NoError(secondAdvanceTimeBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) secondDelegatorStartTime := firstDelegatorEndTime.Add(2 * time.Second) secondDelegatorEndTime := secondDelegatorStartTime.Add(vm.MinStakeDuration) @@ -124,7 +128,7 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { uint64(secondDelegatorEndTime.Unix()), nodeID, changeAddr, - []*crypto.PrivateKeySECP256K1R{keys[0], keys[1], keys[3]}, + []*secp256k1.PrivateKey{keys[0], keys[1], keys[3]}, changeAddr, ) require.NoError(err) @@ -132,10 +136,11 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { // trigger block creation require.NoError(vm.Builder.AddUnverifiedTx(addSecondDelegatorTx)) - addSecondDelegatorBlock, err := vm.Builder.BuildBlock() + addSecondDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) - - verifyAndAcceptProposalCommitment(require, vm, addSecondDelegatorBlock) + require.NoError(addSecondDelegatorBlock.Verify(context.Background())) + require.NoError(addSecondDelegatorBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) thirdDelegatorStartTime := firstDelegatorEndTime.Add(-time.Second) thirdDelegatorEndTime := thirdDelegatorStartTime.Add(vm.MinStakeDuration) @@ -147,7 +152,7 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { uint64(thirdDelegatorEndTime.Unix()), nodeID, changeAddr, - []*crypto.PrivateKeySECP256K1R{keys[0], keys[1], keys[4]}, + []*secp256k1.PrivateKey{keys[0], keys[1], keys[4]}, changeAddr, ) require.NoError(err) @@ -158,7 +163,7 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { } func TestAddDelegatorTxHeapCorruption(t *testing.T) { - validatorStartTime := defaultGenesisTime.Add(txexecutor.SyncBound).Add(1 * time.Second) + validatorStartTime := banffForkTime.Add(txexecutor.SyncBound).Add(1 * time.Second) validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) validatorStake := defaultMaxValidatorStake / 5 @@ -204,7 +209,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { vm.ctx.Lock.Lock() defer func() { - err := vm.Shutdown() + err := vm.Shutdown(context.Background()) require.NoError(err) vm.ctx.Lock.Unlock() @@ -224,7 +229,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { ids.NodeID(id), id, reward.PercentDenominator, - []*crypto.PrivateKeySECP256K1R{keys[0], keys[1]}, + []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, ) require.NoError(err) @@ -234,10 +239,11 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(err) // trigger block creation for the validator tx - addValidatorBlock, err := vm.Builder.BuildBlock() + addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) - - verifyAndAcceptProposalCommitment(require, vm, addValidatorBlock) + require.NoError(addValidatorBlock.Verify(context.Background())) + require.NoError(addValidatorBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) // create valid tx addFirstDelegatorTx, err := vm.txBuilder.NewAddDelegatorTx( @@ -246,7 +252,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { uint64(delegator1EndTime.Unix()), ids.NodeID(id), keys[0].PublicKey().Address(), - []*crypto.PrivateKeySECP256K1R{keys[0], keys[1]}, + []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, ) require.NoError(err) @@ -256,10 +262,11 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(err) // trigger block creation for the first add delegator tx - addFirstDelegatorBlock, err := vm.Builder.BuildBlock() + addFirstDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) - - verifyAndAcceptProposalCommitment(require, vm, addFirstDelegatorBlock) + require.NoError(addFirstDelegatorBlock.Verify(context.Background())) + require.NoError(addFirstDelegatorBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) // create valid tx addSecondDelegatorTx, err := vm.txBuilder.NewAddDelegatorTx( @@ -268,7 +275,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { uint64(delegator2EndTime.Unix()), ids.NodeID(id), keys[0].PublicKey().Address(), - []*crypto.PrivateKeySECP256K1R{keys[0], keys[1]}, + []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, ) require.NoError(err) @@ -278,10 +285,11 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(err) // trigger block creation for the second add delegator tx - addSecondDelegatorBlock, err := vm.Builder.BuildBlock() + addSecondDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) - - verifyAndAcceptProposalCommitment(require, vm, addSecondDelegatorBlock) + require.NoError(addSecondDelegatorBlock.Verify(context.Background())) + require.NoError(addSecondDelegatorBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) // create valid tx addThirdDelegatorTx, err := vm.txBuilder.NewAddDelegatorTx( @@ -290,7 +298,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { uint64(delegator3EndTime.Unix()), ids.NodeID(id), keys[0].PublicKey().Address(), - []*crypto.PrivateKeySECP256K1R{keys[0], keys[1]}, + []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, ) require.NoError(err) @@ -300,10 +308,11 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(err) // trigger block creation for the third add delegator tx - addThirdDelegatorBlock, err := vm.Builder.BuildBlock() + addThirdDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) - - verifyAndAcceptProposalCommitment(require, vm, addThirdDelegatorBlock) + require.NoError(addThirdDelegatorBlock.Verify(context.Background())) + require.NoError(addThirdDelegatorBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) // create valid tx addFourthDelegatorTx, err := vm.txBuilder.NewAddDelegatorTx( @@ -312,7 +321,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { uint64(delegator4EndTime.Unix()), ids.NodeID(id), keys[0].PublicKey().Address(), - []*crypto.PrivateKeySECP256K1R{keys[0], keys[1]}, + []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, ) require.NoError(err) @@ -322,7 +331,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(err) // trigger block creation for the fourth add delegator tx - addFourthDelegatorBlock, err := vm.Builder.BuildBlock() + addFourthDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) if test.shouldFail { require.Error(err, "should have failed to allow new delegator") @@ -330,8 +339,9 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { } require.NoError(err) - - verifyAndAcceptProposalCommitment(require, vm, addFourthDelegatorBlock) + require.NoError(addFourthDelegatorBlock.Verify(context.Background())) + require.NoError(addFourthDelegatorBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) }) } } @@ -339,41 +349,53 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { // Test that calling Verify on a block with an unverified parent doesn't cause a // panic. func TestUnverifiedParentPanicRegression(t *testing.T) { + require := require.New(t) _, genesisBytes := defaultGenesis() baseDBManager := manager.NewMemDB(version.Semantic1_0_0) atomicDB := prefixdb.New([]byte{1}, baseDBManager.Current().Database) - vm := &VM{Factory: Factory{ - Config: config.Config{ - Chains: chains.MockManager{}, - Validators: validators.NewManager(), - UptimeLockedCalculator: uptime.NewLockedCalculator(), - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, - BanffTime: mockable.MaxTime, // Banff not yet active - }, + vdrs := validators.NewManager() + primaryVdrs := validators.NewSet() + _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) + vm := &VM{Config: config.Config{ + Chains: chains.TestManager, + Validators: vdrs, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: defaultRewardConfig, + BanffTime: banffForkTime, }} - vm.clock.Set(defaultGenesisTime) ctx := defaultContext() ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() msgChan := make(chan common.Message, 1) - if err := vm.Initialize(ctx, baseDBManager, genesisBytes, nil, nil, msgChan, nil, nil); err != nil { - t.Fatal(err) - } + err := vm.Initialize( + context.Background(), + ctx, + baseDBManager, + genesisBytes, + nil, + nil, + msgChan, + nil, + nil, + ) + require.NoError(err) m := atomic.NewMemory(atomicDB) vm.ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) + // set time to post Banff fork + vm.clock.Set(banffForkTime.Add(time.Second)) + vm.state.SetTimestamp(banffForkTime.Add(time.Second)) + key0 := keys[0] key1 := keys[1] addr0 := key0.PublicKey().Address() @@ -382,88 +404,75 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { addSubnetTx0, err := vm.txBuilder.NewCreateSubnetTx( 1, []ids.ShortID{addr0}, - []*crypto.PrivateKeySECP256K1R{key0}, + []*secp256k1.PrivateKey{key0}, addr0, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) addSubnetTx1, err := vm.txBuilder.NewCreateSubnetTx( 1, []ids.ShortID{addr1}, - []*crypto.PrivateKeySECP256K1R{key1}, + []*secp256k1.PrivateKey{key1}, addr1, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) addSubnetTx2, err := vm.txBuilder.NewCreateSubnetTx( 1, []ids.ShortID{addr1}, - []*crypto.PrivateKeySECP256K1R{key1}, + []*secp256k1.PrivateKey{key1}, addr0, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) preferred, err := vm.Builder.Preferred() - if err != nil { - t.Fatal(err) - } + require.NoError(err) + + preferredChainTime := preferred.Timestamp() preferredID := preferred.ID() preferredHeight := preferred.Height() - statelessStandardBlk, err := blocks.NewApricotStandardBlock( + statelessStandardBlk, err := blocks.NewBanffStandardBlock( + preferredChainTime, preferredID, preferredHeight+1, []*txs.Tx{addSubnetTx0}, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) addSubnetBlk0 := vm.manager.NewBlock(statelessStandardBlk) - statelessStandardBlk, err = blocks.NewApricotStandardBlock( + statelessStandardBlk, err = blocks.NewBanffStandardBlock( + preferredChainTime, preferredID, preferredHeight+1, []*txs.Tx{addSubnetTx1}, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) addSubnetBlk1 := vm.manager.NewBlock(statelessStandardBlk) - statelessStandardBlk, err = blocks.NewApricotStandardBlock( + statelessStandardBlk, err = blocks.NewBanffStandardBlock( + preferredChainTime, addSubnetBlk1.ID(), preferredHeight+2, []*txs.Tx{addSubnetTx2}, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) addSubnetBlk2 := vm.manager.NewBlock(statelessStandardBlk) - if _, err := vm.ParseBlock(addSubnetBlk0.Bytes()); err != nil { - t.Fatal(err) - } - if _, err := vm.ParseBlock(addSubnetBlk1.Bytes()); err != nil { - t.Fatal(err) - } - if _, err := vm.ParseBlock(addSubnetBlk2.Bytes()); err != nil { - t.Fatal(err) - } + _, err = vm.ParseBlock(context.Background(), addSubnetBlk0.Bytes()) + require.NoError(err) + + _, err = vm.ParseBlock(context.Background(), addSubnetBlk1.Bytes()) + require.NoError(err) + + _, err = vm.ParseBlock(context.Background(), addSubnetBlk2.Bytes()) + require.NoError(err) + + require.NoError(addSubnetBlk0.Verify(context.Background())) + require.NoError(addSubnetBlk0.Accept(context.Background())) - if err := addSubnetBlk0.Verify(); err != nil { - t.Fatal(err) - } - if err := addSubnetBlk0.Accept(); err != nil { - t.Fatal(err) - } // Doesn't matter what verify returns as long as it's not panicking. - _ = addSubnetBlk2.Verify() + _ = addSubnetBlk2.Verify(context.Background()) } func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { @@ -472,13 +481,13 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { vm, baseDB, mutableSharedMemory := defaultVM() vm.ctx.Lock.Lock() defer func() { - err := vm.Shutdown() + err := vm.Shutdown(context.Background()) require.NoError(err) vm.ctx.Lock.Unlock() }() - newValidatorStartTime := defaultGenesisTime.Add(txexecutor.SyncBound).Add(1 * time.Second) + newValidatorStartTime := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) newValidatorEndTime := newValidatorStartTime.Add(defaultMinStakingDuration) key, err := testKeyFactory.NewPrivateKey() @@ -494,44 +503,34 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { nodeID, ids.ShortID(nodeID), reward.PercentDenominator, - []*crypto.PrivateKeySECP256K1R{keys[0]}, + []*secp256k1.PrivateKey{keys[0]}, ids.ShortEmpty, ) require.NoError(err) - // Create the proposal block to add the new validator + // Create the standard block to add the new validator preferred, err := vm.Builder.Preferred() require.NoError(err) + preferredChainTime := preferred.Timestamp() preferredID := preferred.ID() preferredHeight := preferred.Height() - statelessBlk, err := blocks.NewApricotProposalBlock( + statelessBlk, err := blocks.NewBanffStandardBlock( + preferredChainTime, preferredID, preferredHeight+1, - addValidatorTx, + []*txs.Tx{addValidatorTx}, ) require.NoError(err) - addValidatorProposalBlk := vm.manager.NewBlock(statelessBlk) - - err = addValidatorProposalBlk.Verify() - require.NoError(err) - - // Get the commit block to add the new validator - addValidatorProposalOptions, err := addValidatorProposalBlk.(snowman.OracleBlock).Options() - require.NoError(err) - - addValidatorProposalCommitIntf := addValidatorProposalOptions[0] - addValidatorProposalCommit, ok := addValidatorProposalCommitIntf.(*blockexecutor.Block) - require.True(ok) - - err = addValidatorProposalCommit.Verify() + addValidatorStandardBlk := vm.manager.NewBlock(statelessBlk) + err = addValidatorStandardBlk.Verify(context.Background()) require.NoError(err) // Verify that the new validator now in pending validator set { - onAccept, found := vm.manager.GetState(addValidatorProposalCommit.ID()) + onAccept, found := vm.manager.GetState(addValidatorStandardBlk.ID()) require.True(found) _, err := onAccept.GetPendingValidator(constants.PrimaryNetworkID, nodeID) @@ -570,17 +569,19 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { }, } signedImportTx := &txs.Tx{Unsigned: unsignedImportTx} - err = signedImportTx.Sign(txs.Codec, [][]*crypto.PrivateKeySECP256K1R{ + err = signedImportTx.Sign(txs.Codec, [][]*secp256k1.PrivateKey{ {}, // There is one input, with no required signers }) require.NoError(err) // Create the standard block that will fail verification, and then be // re-verified. - preferredID = addValidatorProposalCommit.ID() - preferredHeight = addValidatorProposalCommit.Height() + preferredChainTime = addValidatorStandardBlk.Timestamp() + preferredID = addValidatorStandardBlk.ID() + preferredHeight = addValidatorStandardBlk.Height() - statelessImportBlk, err := blocks.NewApricotStandardBlock( + statelessImportBlk, err := blocks.NewBanffStandardBlock( + preferredChainTime, preferredID, preferredHeight+1, []*txs.Tx{signedImportTx}, @@ -591,7 +592,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { // Because the shared memory UTXO hasn't been populated, this block is // currently invalid. - err = importBlk.Verify() + err = importBlk.Verify(context.Background()) require.Error(err) // Because we no longer ever reject a block in verification, the status @@ -625,60 +626,42 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { // Because the shared memory UTXO has now been populated, the block should // pass verification. - err = importBlk.Verify() + err = importBlk.Verify(context.Background()) require.NoError(err) // The status shouldn't have been changed during a successful verification. importBlkStatus = importBlk.Status() require.Equal(choices.Processing, importBlkStatus) - // Create the tx that would have moved the new validator from the pending + // Move chain time ahead to bring the new validator from the pending // validator set into the current validator set. vm.clock.Set(newValidatorStartTime) - advanceTimeTx, err := vm.txBuilder.NewAdvanceTimeTx(newValidatorStartTime) - require.NoError(err) // Create the proposal block that should have moved the new validator from // the pending validator set into the current validator set. preferredID = importBlk.ID() preferredHeight = importBlk.Height() - statelessAdvanceTimeProposalBlk, err := blocks.NewApricotProposalBlock( + statelessAdvanceTimeStandardBlk, err := blocks.NewBanffStandardBlock( + newValidatorStartTime, preferredID, preferredHeight+1, - advanceTimeTx, + nil, ) require.NoError(err) - advanceTimeProposalBlk := vm.manager.NewBlock(statelessAdvanceTimeProposalBlk) - err = advanceTimeProposalBlk.Verify() - require.NoError(err) - - // Get the commit block that advances the timestamp to the point that the - // validator should be moved from the pending validator set into the current - // validator set. - advanceTimeProposalOptions, err := advanceTimeProposalBlk.(snowman.OracleBlock).Options() - require.NoError(err) - - advanceTimeProposalCommitIntf := advanceTimeProposalOptions[0] - advanceTimeProposalCommit, ok := advanceTimeProposalCommitIntf.(*blockexecutor.Block) - require.True(ok) - _, ok = advanceTimeProposalCommit.Block.(*blocks.ApricotCommitBlock) - require.True(ok) - - err = advanceTimeProposalCommit.Verify() + advanceTimeStandardBlk := vm.manager.NewBlock(statelessAdvanceTimeStandardBlk) + err = advanceTimeStandardBlk.Verify(context.Background()) require.NoError(err) // Accept all the blocks allBlocks := []snowman.Block{ - addValidatorProposalBlk, - addValidatorProposalCommit, + addValidatorStandardBlk, importBlk, - advanceTimeProposalBlk, - advanceTimeProposalCommit, + advanceTimeStandardBlk, } for _, blk := range allBlocks { - err = blk.Accept() + err = blk.Accept(context.Background()) require.NoError(err) status := blk.Status() @@ -686,6 +669,8 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { } // Force a reload of the state from the database. + vm.Config.Validators = validators.NewManager() + vm.Config.Validators.Add(constants.PrimaryNetworkID, validators.NewSet()) is, err := state.New( vm.dbManager.Current().Database, nil, @@ -694,6 +679,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { vm.ctx, metrics.Noop, reward.NewCalculator(vm.Config.RewardConfig), + &utils.Atomic[bool]{}, ) require.NoError(err) vm.state = is @@ -717,7 +703,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { vm, baseDB, mutableSharedMemory := defaultVM() vm.ctx.Lock.Lock() defer func() { - err := vm.Shutdown() + err := vm.Shutdown(context.Background()) require.NoError(err) vm.ctx.Lock.Unlock() @@ -725,7 +711,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { vm.state.SetCurrentSupply(constants.PrimaryNetworkID, defaultRewardConfig.SupplyCap/2) - newValidatorStartTime0 := defaultGenesisTime.Add(txexecutor.SyncBound).Add(1 * time.Second) + newValidatorStartTime0 := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) newValidatorEndTime0 := newValidatorStartTime0.Add(defaultMaxStakingDuration) nodeID0 := ids.NodeID(ids.GenerateTestShortID()) @@ -738,92 +724,64 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { nodeID0, ids.ShortID(nodeID0), reward.PercentDenominator, - []*crypto.PrivateKeySECP256K1R{keys[0]}, + []*secp256k1.PrivateKey{keys[0]}, ids.ShortEmpty, ) require.NoError(err) - // Create the proposal block to add the first new validator + // Create the standard block to add the first new validator preferred, err := vm.Builder.Preferred() require.NoError(err) + preferredChainTime := preferred.Timestamp() preferredID := preferred.ID() preferredHeight := preferred.Height() - statelessAddValidatorProposalBlk0, err := blocks.NewApricotProposalBlock( + statelessAddValidatorStandardBlk0, err := blocks.NewBanffStandardBlock( + preferredChainTime, preferredID, preferredHeight+1, - addValidatorTx0, + []*txs.Tx{addValidatorTx0}, ) require.NoError(err) - addValidatorProposalBlk0 := vm.manager.NewBlock(statelessAddValidatorProposalBlk0) - err = addValidatorProposalBlk0.Verify() - require.NoError(err) - - // Get the commit block to add the first new validator - addValidatorProposalOptions0, err := addValidatorProposalBlk0.(snowman.OracleBlock).Options() - require.NoError(err) - - addValidatorProposalCommitIntf0 := addValidatorProposalOptions0[0] - addValidatorProposalCommit0, ok := addValidatorProposalCommitIntf0.(*blockexecutor.Block) - require.True(ok) - _, ok = addValidatorProposalCommit0.Block.(*blocks.ApricotCommitBlock) - require.True(ok) - - err = addValidatorProposalCommit0.Verify() + addValidatorStandardBlk0 := vm.manager.NewBlock(statelessAddValidatorStandardBlk0) + err = addValidatorStandardBlk0.Verify(context.Background()) require.NoError(err) // Verify that first new validator now in pending validator set { - onAccept, ok := vm.manager.GetState(addValidatorProposalCommit0.ID()) + onAccept, ok := vm.manager.GetState(addValidatorStandardBlk0.ID()) require.True(ok) _, err := onAccept.GetPendingValidator(constants.PrimaryNetworkID, nodeID0) require.NoError(err) } - // Create the tx that moves the first new validator from the pending + // Move chain time to bring the first new validator from the pending // validator set into the current validator set. vm.clock.Set(newValidatorStartTime0) - advanceTimeTx0, err := vm.txBuilder.NewAdvanceTimeTx(newValidatorStartTime0) - require.NoError(err) // Create the proposal block that moves the first new validator from the // pending validator set into the current validator set. - preferredID = addValidatorProposalCommit0.ID() - preferredHeight = addValidatorProposalCommit0.Height() + preferredID = addValidatorStandardBlk0.ID() + preferredHeight = addValidatorStandardBlk0.Height() - statelessAdvanceTimeProposalBlk0, err := blocks.NewApricotProposalBlock( + statelessAdvanceTimeStandardBlk0, err := blocks.NewBanffStandardBlock( + newValidatorStartTime0, preferredID, preferredHeight+1, - advanceTimeTx0, + nil, ) require.NoError(err) - advanceTimeProposalBlk0 := vm.manager.NewBlock(statelessAdvanceTimeProposalBlk0) - - err = advanceTimeProposalBlk0.Verify() - require.NoError(err) - - // Get the commit block that advances the timestamp to the point that the - // first new validator should be moved from the pending validator set into - // the current validator set. - advanceTimeProposalOptions0, err := advanceTimeProposalBlk0.(snowman.OracleBlock).Options() - require.NoError(err) - - advanceTimeProposalCommitIntf0 := advanceTimeProposalOptions0[0] - advanceTimeProposalCommit0, ok := advanceTimeProposalCommitIntf0.(*blockexecutor.Block) - require.True(ok) - _, ok = advanceTimeProposalCommit0.Block.(*blocks.ApricotCommitBlock) - require.True(ok) - - err = advanceTimeProposalCommit0.Verify() + advanceTimeStandardBlk0 := vm.manager.NewBlock(statelessAdvanceTimeStandardBlk0) + err = advanceTimeStandardBlk0.Verify(context.Background()) require.NoError(err) // Verify that the first new validator is now in the current validator set. { - onAccept, ok := vm.manager.GetState(advanceTimeProposalCommit0.ID()) + onAccept, ok := vm.manager.GetState(advanceTimeStandardBlk0.ID()) require.True(ok) _, err := onAccept.GetCurrentValidator(constants.PrimaryNetworkID, nodeID0) @@ -868,17 +826,19 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { }, } signedImportTx := &txs.Tx{Unsigned: unsignedImportTx} - err = signedImportTx.Sign(txs.Codec, [][]*crypto.PrivateKeySECP256K1R{ + err = signedImportTx.Sign(txs.Codec, [][]*secp256k1.PrivateKey{ {}, // There is one input, with no required signers }) require.NoError(err) // Create the standard block that will fail verification, and then be // re-verified. - preferredID = advanceTimeProposalCommit0.ID() - preferredHeight = advanceTimeProposalCommit0.Height() + preferredChainTime = advanceTimeStandardBlk0.Timestamp() + preferredID = advanceTimeStandardBlk0.ID() + preferredHeight = advanceTimeStandardBlk0.Height() - statelessImportBlk, err := blocks.NewApricotStandardBlock( + statelessImportBlk, err := blocks.NewBanffStandardBlock( + preferredChainTime, preferredID, preferredHeight+1, []*txs.Tx{signedImportTx}, @@ -888,7 +848,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { importBlk := vm.manager.NewBlock(statelessImportBlk) // Because the shared memory UTXO hasn't been populated, this block is // currently invalid. - err = importBlk.Verify() + err = importBlk.Verify(context.Background()) require.Error(err) // Because we no longer ever reject a block in verification, the status @@ -922,7 +882,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { // Because the shared memory UTXO has now been populated, the block should // pass verification. - err = importBlk.Verify() + err = importBlk.Verify(context.Background()) require.NoError(err) // The status shouldn't have been changed during a successful verification. @@ -942,90 +902,62 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { nodeID1, ids.ShortID(nodeID1), reward.PercentDenominator, - []*crypto.PrivateKeySECP256K1R{keys[1]}, + []*secp256k1.PrivateKey{keys[1]}, ids.ShortEmpty, ) require.NoError(err) - // Create the proposal block to add the second new validator + // Create the standard block to add the second new validator + preferredChainTime = importBlk.Timestamp() preferredID = importBlk.ID() preferredHeight = importBlk.Height() - statelessAddValidatorProposalBlk1, err := blocks.NewApricotProposalBlock( + statelessAddValidatorStandardBlk1, err := blocks.NewBanffStandardBlock( + preferredChainTime, preferredID, preferredHeight+1, - addValidatorTx1, + []*txs.Tx{addValidatorTx1}, ) require.NoError(err) - addValidatorProposalBlk1 := vm.manager.NewBlock(statelessAddValidatorProposalBlk1) - - err = addValidatorProposalBlk1.Verify() - require.NoError(err) - - // Get the commit block to add the second new validator - addValidatorProposalOptions1, err := addValidatorProposalBlk1.(snowman.OracleBlock).Options() - require.NoError(err) - - addValidatorProposalCommitIntf1 := addValidatorProposalOptions1[0] - addValidatorProposalCommit1, ok := addValidatorProposalCommitIntf1.(*blockexecutor.Block) - require.True(ok) - _, ok = addValidatorProposalCommit1.Block.(*blocks.ApricotCommitBlock) - require.True(ok) + addValidatorStandardBlk1 := vm.manager.NewBlock(statelessAddValidatorStandardBlk1) - err = addValidatorProposalCommit1.Verify() + err = addValidatorStandardBlk1.Verify(context.Background()) require.NoError(err) // Verify that the second new validator now in pending validator set { - onAccept, ok := vm.manager.GetState(addValidatorProposalCommit1.ID()) + onAccept, ok := vm.manager.GetState(addValidatorStandardBlk1.ID()) require.True(ok) _, err := onAccept.GetPendingValidator(constants.PrimaryNetworkID, nodeID1) require.NoError(err) } - // Create the tx that moves the second new validator from the pending + // Move chain time to bring the second new validator from the pending // validator set into the current validator set. vm.clock.Set(newValidatorStartTime1) - advanceTimeTx1, err := vm.txBuilder.NewAdvanceTimeTx(newValidatorStartTime1) - require.NoError(err) // Create the proposal block that moves the second new validator from the // pending validator set into the current validator set. - preferredID = addValidatorProposalCommit1.ID() - preferredHeight = addValidatorProposalCommit1.Height() + preferredID = addValidatorStandardBlk1.ID() + preferredHeight = addValidatorStandardBlk1.Height() - statelessAdvanceTimeProposalBlk1, err := blocks.NewApricotProposalBlock( + statelessAdvanceTimeStandardBlk1, err := blocks.NewBanffStandardBlock( + newValidatorStartTime1, preferredID, preferredHeight+1, - advanceTimeTx1, + nil, ) require.NoError(err) - advanceTimeProposalBlk1 := vm.manager.NewBlock(statelessAdvanceTimeProposalBlk1) - - err = advanceTimeProposalBlk1.Verify() - require.NoError(err) - - // Get the commit block that advances the timestamp to the point that the - // second new validator should be moved from the pending validator set into - // the current validator set. - advanceTimeProposalOptions1, err := advanceTimeProposalBlk1.(snowman.OracleBlock).Options() - require.NoError(err) - - advanceTimeProposalCommitIntf1 := advanceTimeProposalOptions1[0] - advanceTimeProposalCommit1, ok := advanceTimeProposalCommitIntf1.(*blockexecutor.Block) - require.True(ok) - _, ok = advanceTimeProposalCommit1.Block.(*blocks.ApricotCommitBlock) - require.True(ok) - - err = advanceTimeProposalCommit1.Verify() + advanceTimeStandardBlk1 := vm.manager.NewBlock(statelessAdvanceTimeStandardBlk1) + err = advanceTimeStandardBlk1.Verify(context.Background()) require.NoError(err) // Verify that the second new validator is now in the current validator set. { - onAccept, ok := vm.manager.GetState(advanceTimeProposalCommit1.ID()) + onAccept, ok := vm.manager.GetState(advanceTimeStandardBlk1.ID()) require.True(ok) _, err := onAccept.GetCurrentValidator(constants.PrimaryNetworkID, nodeID1) @@ -1040,18 +972,14 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { // Accept all the blocks allBlocks := []snowman.Block{ - addValidatorProposalBlk0, - addValidatorProposalCommit0, - advanceTimeProposalBlk0, - advanceTimeProposalCommit0, + addValidatorStandardBlk0, + advanceTimeStandardBlk0, importBlk, - addValidatorProposalBlk1, - addValidatorProposalCommit1, - advanceTimeProposalBlk1, - advanceTimeProposalCommit1, + addValidatorStandardBlk1, + advanceTimeStandardBlk1, } for _, blk := range allBlocks { - err = blk.Accept() + err = blk.Accept(context.Background()) require.NoError(err) status := blk.Status() @@ -1059,6 +987,8 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { } // Force a reload of the state from the database. + vm.Config.Validators = validators.NewManager() + vm.Config.Validators.Add(constants.PrimaryNetworkID, validators.NewSet()) is, err := state.New( vm.dbManager.Current().Database, nil, @@ -1067,6 +997,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { vm.ctx, metrics.Noop, reward.NewCalculator(vm.Config.RewardConfig), + &utils.Atomic[bool]{}, ) require.NoError(err) vm.state = is @@ -1099,7 +1030,7 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { vm, _, _ := defaultVM() vm.ctx.Lock.Lock() defer func() { - err := vm.Shutdown() + err := vm.Shutdown(context.Background()) require.NoError(err) vm.ctx.Lock.Unlock() @@ -1111,7 +1042,7 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { nodeID3 := ids.NodeID(keys[3].PublicKey().Address()) nodeID4 := ids.NodeID(keys[4].PublicKey().Address()) - currentHeight, err := vm.GetCurrentHeight() + currentHeight, err := vm.GetCurrentHeight(context.Background()) require.NoError(err) require.EqualValues(1, currentHeight) @@ -1122,11 +1053,13 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { nodeID3: defaultWeight, nodeID4: defaultWeight, } - validators, err := vm.GetValidatorSet(1, constants.PrimaryNetworkID) + validators, err := vm.GetValidatorSet(context.Background(), 1, constants.PrimaryNetworkID) require.NoError(err) - require.Equal(expectedValidators1, validators) + for nodeID, weight := range expectedValidators1 { + require.Equal(weight, validators[nodeID].Weight) + } - newValidatorStartTime0 := defaultGenesisTime.Add(txexecutor.SyncBound).Add(1 * time.Second) + newValidatorStartTime0 := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) newValidatorEndTime0 := newValidatorStartTime0.Add(defaultMaxStakingDuration) nodeID5 := ids.GenerateTestNodeID() @@ -1139,70 +1072,76 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { nodeID5, ids.GenerateTestShortID(), reward.PercentDenominator, - []*crypto.PrivateKeySECP256K1R{keys[0]}, + []*secp256k1.PrivateKey{keys[0]}, ids.GenerateTestShortID(), ) require.NoError(err) - // Create the proposal block to add the first new validator + // Create the standard block to add the first new validator preferred, err := vm.Builder.Preferred() require.NoError(err) + preferredChainTime := preferred.Timestamp() preferredID := preferred.ID() preferredHeight := preferred.Height() - statelessProposalBlk, err := blocks.NewApricotProposalBlock( + statelessStandardBlk, err := blocks.NewBanffStandardBlock( + preferredChainTime, preferredID, preferredHeight+1, - addValidatorTx0, + []*txs.Tx{addValidatorTx0}, ) require.NoError(err) - addValidatorProposalBlk0 := vm.manager.NewBlock(statelessProposalBlk) - - verifyAndAcceptProposalCommitment(require, vm, addValidatorProposalBlk0) + addValidatorProposalBlk0 := vm.manager.NewBlock(statelessStandardBlk) + require.NoError(addValidatorProposalBlk0.Verify(context.Background())) + require.NoError(addValidatorProposalBlk0.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - currentHeight, err = vm.GetCurrentHeight() + currentHeight, err = vm.GetCurrentHeight(context.Background()) require.NoError(err) - require.EqualValues(3, currentHeight) + require.EqualValues(2, currentHeight) - for i := uint64(1); i <= 3; i++ { - validators, err = vm.GetValidatorSet(i, constants.PrimaryNetworkID) + for i := uint64(1); i <= 2; i++ { + validators, err = vm.GetValidatorSet(context.Background(), i, constants.PrimaryNetworkID) require.NoError(err) - require.Equal(expectedValidators1, validators) + for nodeID, weight := range expectedValidators1 { + require.Equal(weight, validators[nodeID].Weight) + } } - // Create the tx that moves the first new validator from the pending + // Advance chain time to move the first new validator from the pending // validator set into the current validator set. vm.clock.Set(newValidatorStartTime0) - advanceTimeTx0, err := vm.txBuilder.NewAdvanceTimeTx(newValidatorStartTime0) - require.NoError(err) - // Create the proposal block that moves the first new validator from the + // Create the standard block that moves the first new validator from the // pending validator set into the current validator set. preferred, err = vm.Builder.Preferred() require.NoError(err) - preferredID = preferred.ID() preferredHeight = preferred.Height() - statelessProposalBlk, err = blocks.NewApricotProposalBlock( + statelessStandardBlk, err = blocks.NewBanffStandardBlock( + newValidatorStartTime0, preferredID, preferredHeight+1, - advanceTimeTx0, + nil, ) require.NoError(err) - advanceTimeProposalBlk0 := vm.manager.NewBlock(statelessProposalBlk) - - verifyAndAcceptProposalCommitment(require, vm, advanceTimeProposalBlk0) + advanceTimeProposalBlk0 := vm.manager.NewBlock(statelessStandardBlk) + require.NoError(advanceTimeProposalBlk0.Verify(context.Background())) + require.NoError(advanceTimeProposalBlk0.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - currentHeight, err = vm.GetCurrentHeight() + currentHeight, err = vm.GetCurrentHeight(context.Background()) require.NoError(err) - require.EqualValues(5, currentHeight) + require.EqualValues(3, currentHeight) - for i := uint64(1); i <= 4; i++ { - validators, err = vm.GetValidatorSet(i, constants.PrimaryNetworkID) + for i := uint64(1); i <= 2; i++ { + validators, err = vm.GetValidatorSet(context.Background(), i, constants.PrimaryNetworkID) require.NoError(err) - require.Equal(expectedValidators1, validators) + for nodeID, weight := range expectedValidators1 { + require.Equal(weight, validators[nodeID].Weight) + } } expectedValidators2 := map[ids.NodeID]uint64{ @@ -1213,15 +1152,17 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { nodeID4: defaultWeight, nodeID5: vm.MaxValidatorStake, } - validators, err = vm.GetValidatorSet(5, constants.PrimaryNetworkID) + validators, err = vm.GetValidatorSet(context.Background(), 3, constants.PrimaryNetworkID) require.NoError(err) - require.Equal(expectedValidators2, validators) + for nodeID, weight := range expectedValidators2 { + require.Equal(weight, validators[nodeID].Weight) + } } func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { require := require.New(t) - validatorStartTime := defaultGenesisTime.Add(txexecutor.SyncBound).Add(1 * time.Second) + validatorStartTime := banffForkTime.Add(txexecutor.SyncBound).Add(1 * time.Second) validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) validatorStake := defaultMaxValidatorStake / 5 @@ -1237,7 +1178,7 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { vm.ctx.Lock.Lock() defer func() { - err := vm.Shutdown() + err := vm.Shutdown(context.Background()) require.NoError(err) vm.ctx.Lock.Unlock() @@ -1257,7 +1198,7 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { ids.NodeID(id), id, reward.PercentDenominator, - []*crypto.PrivateKeySECP256K1R{keys[0], keys[1]}, + []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, ) require.NoError(err) @@ -1267,10 +1208,11 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { require.NoError(err) // trigger block creation for the validator tx - addValidatorBlock, err := vm.Builder.BuildBlock() + addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) - - verifyAndAcceptProposalCommitment(require, vm, addValidatorBlock) + require.NoError(addValidatorBlock.Verify(context.Background())) + require.NoError(addValidatorBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) // create valid tx addFirstDelegatorTx, err := vm.txBuilder.NewAddDelegatorTx( @@ -1279,7 +1221,7 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { uint64(delegator1EndTime.Unix()), ids.NodeID(id), keys[0].PublicKey().Address(), - []*crypto.PrivateKeySECP256K1R{keys[0], keys[1]}, + []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, ) require.NoError(err) @@ -1289,10 +1231,11 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { require.NoError(err) // trigger block creation for the first add delegator tx - addFirstDelegatorBlock, err := vm.Builder.BuildBlock() + addFirstDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) - - verifyAndAcceptProposalCommitment(require, vm, addFirstDelegatorBlock) + require.NoError(addFirstDelegatorBlock.Verify(context.Background())) + require.NoError(addFirstDelegatorBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) // create valid tx addSecondDelegatorTx, err := vm.txBuilder.NewAddDelegatorTx( @@ -1301,7 +1244,7 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { uint64(delegator2EndTime.Unix()), ids.NodeID(id), keys[0].PublicKey().Address(), - []*crypto.PrivateKeySECP256K1R{keys[0], keys[1]}, + []*secp256k1.PrivateKey{keys[0], keys[1]}, changeAddr, ) require.NoError(err) @@ -1311,31 +1254,238 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { require.Error(vm.Builder.AddUnverifiedTx(addSecondDelegatorTx)) } -func verifyAndAcceptProposalCommitment(require *require.Assertions, vm *VM, blk snowman.Block) { - // Verify the proposed block - require.NoError(blk.Verify()) +func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t *testing.T) { + require := require.New(t) + + validatorStartTime := banffForkTime.Add(txexecutor.SyncBound).Add(1 * time.Second) + validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) + + vm, _, _ := defaultVM() + + vm.ctx.Lock.Lock() + defer func() { + err := vm.Shutdown(context.Background()) + require.NoError(err) + + vm.ctx.Lock.Unlock() + }() - // Assert preferences are correct - proposalBlk := blk.(snowman.OracleBlock) - options, err := proposalBlk.Options() + key, err := testKeyFactory.NewPrivateKey() require.NoError(err) - // verify the preferences - commit := options[0].(*blockexecutor.Block) - _, ok := commit.Block.(*blocks.ApricotCommitBlock) - require.True(ok, "expected commit block to be preferred") + id := key.PublicKey().Address() + changeAddr := keys[0].PublicKey().Address() - abort := options[1].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.ApricotAbortBlock) - require.True(ok, "expected abort block to be issued") + addValidatorTx, err := vm.txBuilder.NewAddValidatorTx( + defaultMaxValidatorStake, + uint64(validatorStartTime.Unix()), + uint64(validatorEndTime.Unix()), + ids.NodeID(id), + id, + reward.PercentDenominator, + []*secp256k1.PrivateKey{keys[0], keys[1]}, + changeAddr, + ) + require.NoError(err) + + err = vm.Builder.AddUnverifiedTx(addValidatorTx) + require.NoError(err) + + // trigger block creation for the validator tx + addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) + require.NoError(err) + require.NoError(addValidatorBlock.Verify(context.Background())) + require.NoError(addValidatorBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + + createSubnetTx, err := vm.txBuilder.NewCreateSubnetTx( + 1, + []ids.ShortID{changeAddr}, + []*secp256k1.PrivateKey{keys[0], keys[1]}, + changeAddr, + ) + require.NoError(err) + + err = vm.Builder.AddUnverifiedTx(createSubnetTx) + require.NoError(err) + + // trigger block creation for the subnet tx + createSubnetBlock, err := vm.Builder.BuildBlock(context.Background()) + require.NoError(err) + require.NoError(createSubnetBlock.Verify(context.Background())) + require.NoError(createSubnetBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + + addSubnetValidatorTx, err := vm.txBuilder.NewAddSubnetValidatorTx( + defaultMaxValidatorStake, + uint64(validatorStartTime.Unix()), + uint64(validatorEndTime.Unix()), + ids.NodeID(id), + createSubnetTx.ID(), + []*secp256k1.PrivateKey{keys[0], keys[1]}, + changeAddr, + ) + require.NoError(err) + + err = vm.Builder.AddUnverifiedTx(addSubnetValidatorTx) + require.NoError(err) + + // trigger block creation for the validator tx + addSubnetValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) + require.NoError(err) + require.NoError(addSubnetValidatorBlock.Verify(context.Background())) + require.NoError(addSubnetValidatorBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + + emptyValidatorSet, err := vm.GetValidatorSet( + context.Background(), + addSubnetValidatorBlock.Height(), + createSubnetTx.ID(), + ) + require.NoError(err) + require.Empty(emptyValidatorSet) + + removeSubnetValidatorTx, err := vm.txBuilder.NewRemoveSubnetValidatorTx( + ids.NodeID(id), + createSubnetTx.ID(), + []*secp256k1.PrivateKey{keys[0], keys[1]}, + changeAddr, + ) + require.NoError(err) + + // Set the clock so that the validator will be moved from the pending + // validator set into the current validator set. + vm.clock.Set(validatorStartTime) + + err = vm.Builder.AddUnverifiedTx(removeSubnetValidatorTx) + require.NoError(err) + + // trigger block creation for the validator tx + removeSubnetValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) + require.NoError(err) + require.NoError(removeSubnetValidatorBlock.Verify(context.Background())) + require.NoError(removeSubnetValidatorBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + + emptyValidatorSet, err = vm.GetValidatorSet( + context.Background(), + addSubnetValidatorBlock.Height(), + createSubnetTx.ID(), + ) + require.NoError(err) + require.Empty(emptyValidatorSet) +} + +func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *testing.T) { + require := require.New(t) + + validatorStartTime := banffForkTime.Add(txexecutor.SyncBound).Add(1 * time.Second) + validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) + + vm, _, _ := defaultVM() + + vm.ctx.Lock.Lock() + defer func() { + err := vm.Shutdown(context.Background()) + require.NoError(err) - // Verify the options - require.NoError(commit.Verify()) - require.NoError(abort.Verify()) + vm.ctx.Lock.Unlock() + }() + + key, err := testKeyFactory.NewPrivateKey() + require.NoError(err) + + id := key.PublicKey().Address() + changeAddr := keys[0].PublicKey().Address() + + addValidatorTx, err := vm.txBuilder.NewAddValidatorTx( + defaultMaxValidatorStake, + uint64(validatorStartTime.Unix()), + uint64(validatorEndTime.Unix()), + ids.NodeID(id), + id, + reward.PercentDenominator, + []*secp256k1.PrivateKey{keys[0], keys[1]}, + changeAddr, + ) + require.NoError(err) + + err = vm.Builder.AddUnverifiedTx(addValidatorTx) + require.NoError(err) + + // trigger block creation for the validator tx + addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) + require.NoError(err) + require.NoError(addValidatorBlock.Verify(context.Background())) + require.NoError(addValidatorBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + + createSubnetTx, err := vm.txBuilder.NewCreateSubnetTx( + 1, + []ids.ShortID{changeAddr}, + []*secp256k1.PrivateKey{keys[0], keys[1]}, + changeAddr, + ) + require.NoError(err) + + err = vm.Builder.AddUnverifiedTx(createSubnetTx) + require.NoError(err) + + // trigger block creation for the subnet tx + createSubnetBlock, err := vm.Builder.BuildBlock(context.Background()) + require.NoError(err) + require.NoError(createSubnetBlock.Verify(context.Background())) + require.NoError(createSubnetBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + + vm.TrackedSubnets.Add(createSubnetTx.ID()) + subnetValidators := validators.NewSet() + err = vm.state.ValidatorSet(createSubnetTx.ID(), subnetValidators) + require.NoError(err) + + added := vm.Validators.Add(createSubnetTx.ID(), subnetValidators) + require.True(added) + + addSubnetValidatorTx, err := vm.txBuilder.NewAddSubnetValidatorTx( + defaultMaxValidatorStake, + uint64(validatorStartTime.Unix()), + uint64(validatorEndTime.Unix()), + ids.NodeID(id), + createSubnetTx.ID(), + []*secp256k1.PrivateKey{keys[0], keys[1]}, + changeAddr, + ) + require.NoError(err) + + err = vm.Builder.AddUnverifiedTx(addSubnetValidatorTx) + require.NoError(err) - // Accept the proposal block and the commit block - require.NoError(proposalBlk.Accept()) - require.NoError(commit.Accept()) - require.NoError(abort.Reject()) - require.NoError(vm.SetPreference(vm.manager.LastAccepted())) + // trigger block creation for the validator tx + addSubnetValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) + require.NoError(err) + require.NoError(addSubnetValidatorBlock.Verify(context.Background())) + require.NoError(addSubnetValidatorBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + + removeSubnetValidatorTx, err := vm.txBuilder.NewRemoveSubnetValidatorTx( + ids.NodeID(id), + createSubnetTx.ID(), + []*secp256k1.PrivateKey{keys[0], keys[1]}, + changeAddr, + ) + require.NoError(err) + + // Set the clock so that the validator will be moved from the pending + // validator set into the current validator set. + vm.clock.Set(validatorStartTime) + + err = vm.Builder.AddUnverifiedTx(removeSubnetValidatorTx) + require.NoError(err) + + // trigger block creation for the validator tx + removeSubnetValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) + require.NoError(err) + require.NoError(removeSubnetValidatorBlock.Verify(context.Background())) + require.NoError(removeSubnetValidatorBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) } diff --git a/avalanchego/vms/platformvm/vm_test.go b/avalanchego/vms/platformvm/vm_test.go index 35bdcac8..539fd386 100644 --- a/avalanchego/vms/platformvm/vm_test.go +++ b/avalanchego/vms/platformvm/vm_test.go @@ -1,15 +1,18 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package platformvm import ( "bytes" + "context" "errors" "fmt" "testing" "time" + "github.com/golang/mock/gomock" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" @@ -21,6 +24,7 @@ import ( "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/proto/pb/p2p" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowball" @@ -35,14 +39,17 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/timeout" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/subnets" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math/meter" "github.com/ava-labs/avalanchego/utils/resource" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" @@ -53,6 +60,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/blocks" "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -95,8 +103,10 @@ var ( // time that genesis validators stop validating defaultValidateEndTime = defaultValidateStartTime.Add(10 * defaultMinStakingDuration) + banffForkTime = defaultValidateEndTime.Add(-5 * defaultMinStakingDuration) + // each key controls an address that has [defaultBalance] AVAX at genesis - keys = crypto.BuildTestKeys() + keys = secp256k1.TestKeys() defaultMinValidatorStake = 5 * units.MilliAvax defaultMaxValidatorStake = 500 * units.MilliAvax @@ -115,20 +125,10 @@ var ( cChainID = ids.Empty.Prefix(1) // Used to create and use keys. - testKeyFactory crypto.FactorySECP256K1R -) - -type snLookup struct { - chainsToSubnet map[ids.ID]ids.ID -} + testKeyFactory secp256k1.Factory -func (sn *snLookup) SubnetID(chainID ids.ID) (ids.ID, error) { - subnetID, ok := sn.chainsToSubnet[chainID] - if !ok { - return ids.ID{}, errors.New("missing subnet associated with requested chainID") - } - return subnetID, nil -} + errMissing = errors.New("missing") +) type mutableSharedMemory struct { atomic.SharedMemory @@ -138,6 +138,7 @@ func defaultContext() *snow.Context { ctx := snow.DefaultContextTest() ctx.NetworkID = testNetworkID ctx.XChainID = xChainID + ctx.CChainID = cChainID ctx.AVAXAssetID = avaxAssetID aliaser := ids.NewAliaser() @@ -155,11 +156,17 @@ func defaultContext() *snow.Context { } ctx.BCLookup = aliaser - ctx.SNLookup = &snLookup{ - chainsToSubnet: map[ids.ID]ids.ID{ - constants.PlatformChainID: constants.PrimaryNetworkID, - xChainID: constants.PrimaryNetworkID, - cChainID: constants.PrimaryNetworkID, + ctx.ValidatorState = &validators.TestState{ + GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { + subnetID, ok := map[ids.ID]ids.ID{ + constants.PlatformChainID: constants.PrimaryNetworkID, + xChainID: constants.PrimaryNetworkID, + cChainID: constants.PrimaryNetworkID, + }[chainID] + if !ok { + return ids.Empty, errMissing + } + return subnetID, nil }, } return ctx @@ -244,14 +251,14 @@ func BuildGenesisTest(t *testing.T) (*api.BuildGenesisArgs, []byte) { // 1) The genesis state // 2) The byte representation of the default genesis for tests func BuildGenesisTestWithArgs(t *testing.T, args *api.BuildGenesisArgs) (*api.BuildGenesisArgs, []byte) { + require := require.New(t) genesisUTXOs := make([]api.UTXO, len(keys)) hrp := constants.NetworkIDToHRP[testNetworkID] for i, key := range keys { id := key.PublicKey().Address() addr, err := address.FormatBech32(hrp, id.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + genesisUTXOs[i] = api.UTXO{ Amount: json.Uint64(defaultBalance), Address: addr, @@ -262,9 +269,8 @@ func BuildGenesisTestWithArgs(t *testing.T, args *api.BuildGenesisArgs) (*api.Bu for i, key := range keys { nodeID := ids.NodeID(key.PublicKey().Address()) addr, err := address.FormatBech32(hrp, nodeID.Bytes()) - if err != nil { - panic(err) - } + require.NoError(err) + genesisValidators[i] = api.PermissionlessValidator{ Staker: api.Staker{ StartTime: json.Uint64(defaultValidateStartTime.Unix()), @@ -300,45 +306,44 @@ func BuildGenesisTestWithArgs(t *testing.T, args *api.BuildGenesisArgs) (*api.Bu buildGenesisResponse := api.BuildGenesisReply{} platformvmSS := api.StaticService{} - if err := platformvmSS.BuildGenesis(nil, &buildGenesisArgs, &buildGenesisResponse); err != nil { - t.Fatalf("problem while building platform chain's genesis state: %v", err) - } + err := platformvmSS.BuildGenesis(nil, &buildGenesisArgs, &buildGenesisResponse) + require.NoError(err) genesisBytes, err := formatting.Decode(buildGenesisResponse.Encoding, buildGenesisResponse.Bytes) - if err != nil { - t.Fatal(err) - } + require.NoError(err) return &buildGenesisArgs, genesisBytes } func defaultVM() (*VM, database.Database, *mutableSharedMemory) { - vm := &VM{Factory: Factory{ - Config: config.Config{ - Chains: chains.MockManager{}, - UptimeLockedCalculator: uptime.NewLockedCalculator(), - Validators: validators.NewManager(), - TxFee: defaultTxFee, - CreateSubnetTxFee: 100 * defaultTxFee, - TransformSubnetTxFee: 100 * defaultTxFee, - CreateBlockchainTxFee: 100 * defaultTxFee, - MinValidatorStake: defaultMinValidatorStake, - MaxValidatorStake: defaultMaxValidatorStake, - MinDelegatorStake: defaultMinDelegatorStake, - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, - ApricotPhase3Time: defaultValidateEndTime, - ApricotPhase5Time: defaultValidateEndTime, - BanffTime: mockable.MaxTime, - }, + vdrs := validators.NewManager() + primaryVdrs := validators.NewSet() + _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) + vm := &VM{Config: config.Config{ + Chains: chains.TestManager, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + StakingEnabled: true, + Validators: vdrs, + TxFee: defaultTxFee, + CreateSubnetTxFee: 100 * defaultTxFee, + TransformSubnetTxFee: 100 * defaultTxFee, + CreateBlockchainTxFee: 100 * defaultTxFee, + MinValidatorStake: defaultMinValidatorStake, + MaxValidatorStake: defaultMaxValidatorStake, + MinDelegatorStake: defaultMinDelegatorStake, + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: defaultRewardConfig, + ApricotPhase3Time: defaultValidateEndTime, + ApricotPhase5Time: defaultValidateEndTime, + BanffTime: banffForkTime, }} baseDBManager := manager.NewMemDB(version.Semantic1_0_0) chainDBManager := baseDBManager.NewPrefixDBManager([]byte{0}) atomicDB := prefixdb.New([]byte{1}, baseDBManager.Current().Database) - vm.clock.Set(defaultGenesisTime) + vm.clock.Set(banffForkTime.Add(time.Second)) msgChan := make(chan common.Message, 1) ctx := defaultContext() @@ -353,35 +358,51 @@ func defaultVM() (*VM, database.Database, *mutableSharedMemory) { _, genesisBytes := defaultGenesis() appSender := &common.SenderTest{} appSender.CantSendAppGossip = true - appSender.SendAppGossipF = func([]byte) error { return nil } + appSender.SendAppGossipF = func(context.Context, []byte) error { + return nil + } - if err := vm.Initialize(ctx, chainDBManager, genesisBytes, nil, nil, msgChan, nil, appSender); err != nil { + err := vm.Initialize( + context.Background(), + ctx, + chainDBManager, + genesisBytes, + nil, + nil, + msgChan, + nil, + appSender, + ) + if err != nil { panic(err) } - if err := vm.SetState(snow.NormalOp); err != nil { + + err = vm.SetState(context.Background(), snow.NormalOp) + if err != nil { panic(err) } // Create a subnet and store it in testSubnet1 - var err error + // Note: following Banff activation, block acceptance will move + // chain time ahead testSubnet1, err = vm.txBuilder.NewCreateSubnetTx( 2, // threshold; 2 sigs from keys[0], keys[1], keys[2] needed to add validator to this subnet // control keys are keys[0], keys[1], keys[2] []ids.ShortID{keys[0].PublicKey().Address(), keys[1].PublicKey().Address(), keys[2].PublicKey().Address()}, - []*crypto.PrivateKeySECP256K1R{keys[0]}, // pays tx fee - keys[0].PublicKey().Address(), // change addr + []*secp256k1.PrivateKey{keys[0]}, // pays tx fee + keys[0].PublicKey().Address(), // change addr ) if err != nil { panic(err) } else if err := vm.Builder.AddUnverifiedTx(testSubnet1); err != nil { panic(err) - } else if blk, err := vm.Builder.BuildBlock(); err != nil { + } else if blk, err := vm.Builder.BuildBlock(context.Background()); err != nil { panic(err) - } else if err := blk.Verify(); err != nil { + } else if err := blk.Verify(context.Background()); err != nil { panic(err) - } else if err := blk.Accept(); err != nil { + } else if err := blk.Accept(context.Background()); err != nil { panic(err) - } else if err := vm.SetPreference(vm.manager.LastAccepted()); err != nil { + } else if err := vm.SetPreference(context.Background(), vm.manager.LastAccepted()); err != nil { panic(err) } @@ -389,6 +410,7 @@ func defaultVM() (*VM, database.Database, *mutableSharedMemory) { } func GenesisVMWithArgs(t *testing.T, args *api.BuildGenesisArgs) ([]byte, chan common.Message, *VM, *atomic.Memory) { + require := require.New(t) var genesisBytes []byte if args != nil { @@ -397,20 +419,21 @@ func GenesisVMWithArgs(t *testing.T, args *api.BuildGenesisArgs) ([]byte, chan c _, genesisBytes = BuildGenesisTest(t) } - vm := &VM{Factory: Factory{ - Config: config.Config{ - Chains: chains.MockManager{}, - Validators: validators.NewManager(), - UptimeLockedCalculator: uptime.NewLockedCalculator(), - TxFee: defaultTxFee, - MinValidatorStake: defaultMinValidatorStake, - MaxValidatorStake: defaultMaxValidatorStake, - MinDelegatorStake: defaultMinDelegatorStake, - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, - BanffTime: mockable.MaxTime, - }, + vdrs := validators.NewManager() + primaryVdrs := validators.NewSet() + _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) + vm := &VM{Config: config.Config{ + Chains: chains.TestManager, + Validators: vdrs, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + TxFee: defaultTxFee, + MinValidatorStake: defaultMinValidatorStake, + MaxValidatorStake: defaultMaxValidatorStake, + MinDelegatorStake: defaultMinDelegatorStake, + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: defaultRewardConfig, + BanffTime: banffForkTime, }} baseDBManager := manager.NewMemDB(version.Semantic1_0_0) @@ -429,121 +452,111 @@ func GenesisVMWithArgs(t *testing.T, args *api.BuildGenesisArgs) ([]byte, chan c defer ctx.Lock.Unlock() appSender := &common.SenderTest{T: t} appSender.CantSendAppGossip = true - appSender.SendAppGossipF = func([]byte) error { return nil } - if err := vm.Initialize(ctx, chainDBManager, genesisBytes, nil, nil, msgChan, nil, appSender); err != nil { - t.Fatal(err) - } - if err := vm.SetState(snow.NormalOp); err != nil { - panic(err) - } + appSender.SendAppGossipF = func(context.Context, []byte) error { + return nil + } + err := vm.Initialize( + context.Background(), + ctx, + chainDBManager, + genesisBytes, + nil, + nil, + msgChan, + nil, + appSender, + ) + require.NoError(err) + + err = vm.SetState(context.Background(), snow.NormalOp) + require.NoError(err) // Create a subnet and store it in testSubnet1 - var err error testSubnet1, err = vm.txBuilder.NewCreateSubnetTx( 2, // threshold; 2 sigs from keys[0], keys[1], keys[2] needed to add validator to this subnet // control keys are keys[0], keys[1], keys[2] []ids.ShortID{keys[0].PublicKey().Address(), keys[1].PublicKey().Address(), keys[2].PublicKey().Address()}, - []*crypto.PrivateKeySECP256K1R{keys[0]}, // pays tx fee - keys[0].PublicKey().Address(), // change addr + []*secp256k1.PrivateKey{keys[0]}, // pays tx fee + keys[0].PublicKey().Address(), // change addr ) - if err != nil { - panic(err) - } else if err := vm.Builder.AddUnverifiedTx(testSubnet1); err != nil { - panic(err) - } else if blk, err := vm.Builder.BuildBlock(); err != nil { - panic(err) - } else if err := blk.Verify(); err != nil { - panic(err) - } else if err := blk.Accept(); err != nil { - panic(err) - } + require.NoError(err) + + err = vm.Builder.AddUnverifiedTx(testSubnet1) + require.NoError(err) + + blk, err := vm.Builder.BuildBlock(context.Background()) + require.NoError(err) + + err = blk.Verify(context.Background()) + require.NoError(err) + + err = blk.Accept(context.Background()) + require.NoError(err) return genesisBytes, msgChan, vm, m } // Ensure genesis state is parsed from bytes and stored correctly func TestGenesis(t *testing.T) { + require := require.New(t) vm, _, _ := defaultVM() vm.ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } + err := vm.Shutdown(context.Background()) + require.NoError(err) vm.ctx.Lock.Unlock() }() // Ensure the genesis block has been accepted and stored - genesisBlockID, err := vm.LastAccepted() // lastAccepted should be ID of genesis block - if err != nil { - t.Fatal(err) - } - if genesisBlock, err := vm.manager.GetBlock(genesisBlockID); err != nil { - t.Fatalf("couldn't get genesis block: %v", err) - } else if genesisBlock.Status() != choices.Accepted { - t.Fatal("genesis block should be accepted") - } + genesisBlockID, err := vm.LastAccepted(context.Background()) // lastAccepted should be ID of genesis block + require.NoError(err) + + genesisBlock, err := vm.manager.GetBlock(genesisBlockID) + require.NoError(err) + require.Equal(choices.Accepted, genesisBlock.Status()) genesisState, _ := defaultGenesis() // Ensure all the genesis UTXOs are there for _, utxo := range genesisState.UTXOs { _, addrBytes, err := address.ParseBech32(utxo.Address) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + addr, err := ids.ToShortID(addrBytes) - if err != nil { - t.Fatal(err) - } - addrs := ids.ShortSet{} + require.NoError(err) + + addrs := set.Set[ids.ShortID]{} addrs.Add(addr) utxos, err := avax.GetAllUTXOs(vm.state, addrs) - if err != nil { - t.Fatal("couldn't find UTXO") - } else if len(utxos) != 1 { - t.Fatal("expected each address to have one UTXO") - } else if out, ok := utxos[0].Out.(*secp256k1fx.TransferOutput); !ok { - t.Fatal("expected utxo output to be type *secp256k1fx.TransferOutput") - } else if out.Amount() != uint64(utxo.Amount) { + require.NoError(err) + require.Len(utxos, 1) + + out := utxos[0].Out.(*secp256k1fx.TransferOutput) + if out.Amount() != uint64(utxo.Amount) { id := keys[0].PublicKey().Address() hrp := constants.NetworkIDToHRP[testNetworkID] addr, err := address.FormatBech32(hrp, id.Bytes()) - if err != nil { - t.Fatal(err) - } - if utxo.Address == addr { // Address that paid tx fee to create testSubnet1 has less tokens - if out.Amount() != uint64(utxo.Amount)-vm.TxFee { - t.Fatalf("expected UTXO to have value %d but has value %d", uint64(utxo.Amount)-vm.TxFee, out.Amount()) - } - } else { - t.Fatalf("expected UTXO to have value %d but has value %d", uint64(utxo.Amount), out.Amount()) - } + require.NoError(err) + + require.Equal(utxo.Address, addr) + require.Equal(uint64(utxo.Amount)-vm.TxFee, out.Amount()) } } // Ensure current validator set of primary network is correct - vdrSet, ok := vm.Validators.GetValidators(constants.PrimaryNetworkID) - if !ok { - t.Fatalf("Missing the primary network validator set") - } + vdrSet, ok := vm.Validators.Get(constants.PrimaryNetworkID) + require.True(ok) + currentValidators := vdrSet.List() - if len(currentValidators) != len(genesisState.Validators) { - t.Fatal("vm's current validator set is wrong") - } - for _, key := range keys { - if addr := key.PublicKey().Address(); !vdrSet.Contains(ids.NodeID(addr)) { - t.Fatalf("should have had validator with NodeID %s", addr) - } - } + require.Equal(len(currentValidators), len(genesisState.Validators)) - // Ensure genesis timestamp is correct - if timestamp := vm.state.GetTimestamp(); timestamp.Unix() != int64(genesisState.Time) { - t.Fatalf("vm's time is incorrect. Expected %v got %v", genesisState.Time, timestamp) + for _, key := range keys { + nodeID := ids.NodeID(key.PublicKey().Address()) + require.True(vdrSet.Contains(nodeID)) } // Ensure the new subnet we created exists - if _, _, err := vm.state.GetTx(testSubnet1.ID()); err != nil { - t.Fatalf("expected subnet %s to exist", testSubnet1.ID()) - } + _, _, err = vm.state.GetTx(testSubnet1.ID()) + require.NoError(err) } // accept proposal to add validator to primary network @@ -552,11 +565,11 @@ func TestAddValidatorCommit(t *testing.T) { vm, _, _ := defaultVM() vm.ctx.Lock.Lock() defer func() { - require.NoError(vm.Shutdown()) + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() - startTime := defaultGenesisTime.Add(txexecutor.SyncBound).Add(1 * time.Second) + startTime := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) endTime := startTime.Add(defaultMinStakingDuration) nodeID := ids.GenerateTestNodeID() rewardAddress := ids.GenerateTestShortID() @@ -568,8 +581,8 @@ func TestAddValidatorCommit(t *testing.T) { uint64(endTime.Unix()), nodeID, rewardAddress, - 0, - []*crypto.PrivateKeySECP256K1R{keys[0]}, + reward.PercentDenominator, + []*secp256k1.PrivateKey{keys[0]}, ids.ShortEmpty, // change addr ) require.NoError(err) @@ -577,23 +590,11 @@ func TestAddValidatorCommit(t *testing.T) { // trigger block creation require.NoError(vm.Builder.AddUnverifiedTx(tx)) - blk, err := vm.Builder.BuildBlock() - require.NoError(err) - - require.NoError(blk.Verify()) - - // Assert preferences are correct - block := blk.(smcon.OracleBlock) - options, err := block.Options() + blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) - commit := options[0].(*blockexecutor.Block) - _, ok := commit.Block.(*blocks.ApricotCommitBlock) - require.True(ok) - - require.NoError(block.Accept()) - require.NoError(commit.Verify()) - require.NoError(commit.Accept()) // commit the proposal + require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Accept(context.Background())) _, txStatus, err := vm.state.GetTx(tx.ID()) require.NoError(err) @@ -604,14 +605,14 @@ func TestAddValidatorCommit(t *testing.T) { require.NoError(err) } -// verify invalid proposal to add validator to primary network +// verify invalid attempt to add validator to primary network func TestInvalidAddValidatorCommit(t *testing.T) { + require := require.New(t) vm, _, _ := defaultVM() vm.ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } + err := vm.Shutdown(context.Background()) + require.NoError(err) vm.ctx.Lock.Unlock() }() @@ -627,55 +628,52 @@ func TestInvalidAddValidatorCommit(t *testing.T) { uint64(endTime.Unix()), nodeID, ids.ShortID(nodeID), - 0, - []*crypto.PrivateKeySECP256K1R{keys[0]}, + reward.PercentDenominator, + []*secp256k1.PrivateKey{keys[0]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) preferred, err := vm.Builder.Preferred() - if err != nil { - t.Fatal(err) - } + require.NoError(err) + preferredID := preferred.ID() preferredHeight := preferred.Height() - statelessBlk, err := blocks.NewApricotProposalBlock(preferredID, preferredHeight+1, tx) - if err != nil { - t.Fatal(err) - } + statelessBlk, err := blocks.NewBanffStandardBlock( + preferred.Timestamp(), + preferredID, + preferredHeight+1, + []*txs.Tx{tx}, + ) + require.NoError(err) + blk := vm.manager.NewBlock(statelessBlk) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + blkBytes := blk.Bytes() - parsedBlock, err := vm.ParseBlock(blkBytes) - if err != nil { - t.Fatal(err) - } + parsedBlock, err := vm.ParseBlock(context.Background(), blkBytes) + require.NoError(err) + + err = parsedBlock.Verify(context.Background()) + require.Error(err) - if err := parsedBlock.Verify(); err == nil { - t.Fatalf("Should have errored during verification") - } txID := statelessBlk.Txs()[0].ID() - if _, dropped := vm.Builder.GetDropReason(txID); !dropped { - t.Fatal("tx should be in dropped tx cache") - } + reason := vm.Builder.GetDropReason(txID) + require.Error(reason) } -// Reject proposal to add validator to primary network +// Reject attempt to add validator to primary network func TestAddValidatorReject(t *testing.T) { require := require.New(t) vm, _, _ := defaultVM() vm.ctx.Lock.Lock() defer func() { - require.NoError(vm.Shutdown()) + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() - startTime := defaultGenesisTime.Add(txexecutor.SyncBound).Add(1 * time.Second) + startTime := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) endTime := startTime.Add(defaultMinStakingDuration) nodeID := ids.GenerateTestNodeID() rewardAddress := ids.GenerateTestShortID() @@ -687,8 +685,8 @@ func TestAddValidatorReject(t *testing.T) { uint64(endTime.Unix()), nodeID, rewardAddress, - 0, - []*crypto.PrivateKeySECP256K1R{keys[0]}, + reward.PercentDenominator, + []*secp256k1.PrivateKey{keys[0]}, ids.ShortEmpty, // change addr ) require.NoError(err) @@ -696,32 +694,14 @@ func TestAddValidatorReject(t *testing.T) { // trigger block creation require.NoError(vm.Builder.AddUnverifiedTx(tx)) - blk, err := vm.Builder.BuildBlock() - require.NoError(err) - - require.NoError(blk.Verify()) - - // Assert preferences are correct - block := blk.(smcon.OracleBlock) - options, err := block.Options() + blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) - commit := options[0].(*blockexecutor.Block) - _, ok := commit.Block.(*blocks.ApricotCommitBlock) - require.True(ok) - - abort := options[1].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.ApricotAbortBlock) - require.True(ok) - - require.NoError(block.Accept()) - require.NoError(commit.Verify()) - require.NoError(abort.Verify()) - require.NoError(abort.Accept()) // reject the proposal + require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Reject(context.Background())) - _, txStatus, err := vm.state.GetTx(tx.ID()) - require.NoError(err) - require.Equal(status.Aborted, txStatus) + _, _, err = vm.state.GetTx(tx.ID()) + require.Error(err, database.ErrNotFound) _, err = vm.state.GetPendingValidator(constants.PrimaryNetworkID, nodeID) require.ErrorIs(err, database.ErrNotFound) @@ -729,12 +709,12 @@ func TestAddValidatorReject(t *testing.T) { // Reject proposal to add validator to primary network func TestAddValidatorInvalidNotReissued(t *testing.T) { + require := require.New(t) vm, _, _ := defaultVM() vm.ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } + err := vm.Shutdown(context.Background()) + require.NoError(err) vm.ctx.Lock.Unlock() }() @@ -751,18 +731,15 @@ func TestAddValidatorInvalidNotReissued(t *testing.T) { uint64(endTime.Unix()), repeatNodeID, ids.ShortID(repeatNodeID), - 0, - []*crypto.PrivateKeySECP256K1R{keys[0]}, + reward.PercentDenominator, + []*secp256k1.PrivateKey{keys[0]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // trigger block creation - if err := vm.Builder.AddUnverifiedTx(tx); err == nil { - t.Fatal("Expected BuildBlock to error due to adding a validator with a nodeID that is already in the validator set.") - } + err = vm.Builder.AddUnverifiedTx(tx) + require.Error(err, "should have erred due to adding a validator with a nodeID that is already in the validator set") } // Accept proposal to add validator to subnet @@ -771,11 +748,11 @@ func TestAddSubnetValidatorAccept(t *testing.T) { vm, _, _ := defaultVM() vm.ctx.Lock.Lock() defer func() { - require.NoError(vm.Shutdown()) + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() - startTime := defaultValidateStartTime.Add(txexecutor.SyncBound).Add(1 * time.Second) + startTime := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) endTime := startTime.Add(defaultMinStakingDuration) nodeID := ids.NodeID(keys[0].PublicKey().Address()) @@ -788,7 +765,7 @@ func TestAddSubnetValidatorAccept(t *testing.T) { uint64(endTime.Unix()), nodeID, testSubnet1.ID(), - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) require.NoError(err) @@ -796,38 +773,11 @@ func TestAddSubnetValidatorAccept(t *testing.T) { // trigger block creation require.NoError(vm.Builder.AddUnverifiedTx(tx)) - blk, err := vm.Builder.BuildBlock() - require.NoError(err) - - require.NoError(blk.Verify()) - - // Assert preferences are correct - block := blk.(smcon.OracleBlock) - options, err := block.Options() + blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) - commit := options[0].(*blockexecutor.Block) - _, ok := commit.Block.(*blocks.ApricotCommitBlock) - require.True(ok) - - abort := options[1].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.ApricotAbortBlock) - require.True(ok) - - require.NoError(block.Accept()) - require.NoError(commit.Verify()) - require.NoError(abort.Verify()) - - { - onAccept, ok := vm.manager.GetState(abort.ID()) - require.True(ok) - - _, txStatus, err := onAccept.GetTx(tx.ID()) - require.NoError(err) - require.Equal(status.Aborted, txStatus) - } - - require.NoError(commit.Accept()) + require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Accept(context.Background())) _, txStatus, err := vm.state.GetTx(tx.ID()) require.NoError(err) @@ -844,11 +794,11 @@ func TestAddSubnetValidatorReject(t *testing.T) { vm, _, _ := defaultVM() vm.ctx.Lock.Lock() defer func() { - require.NoError(vm.Shutdown()) + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() - startTime := defaultValidateStartTime.Add(txexecutor.SyncBound).Add(1 * time.Second) + startTime := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) endTime := startTime.Add(defaultMinStakingDuration) nodeID := ids.NodeID(keys[0].PublicKey().Address()) @@ -861,7 +811,7 @@ func TestAddSubnetValidatorReject(t *testing.T) { uint64(endTime.Unix()), nodeID, testSubnet1.ID(), - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]}, ids.ShortEmpty, // change addr ) require.NoError(err) @@ -869,42 +819,14 @@ func TestAddSubnetValidatorReject(t *testing.T) { // trigger block creation require.NoError(vm.Builder.AddUnverifiedTx(tx)) - blk, err := vm.Builder.BuildBlock() - require.NoError(err) - - require.NoError(blk.Verify()) - - // Assert preferences are correct - block := blk.(smcon.OracleBlock) - options, err := block.Options() + blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) - commit := options[0].(*blockexecutor.Block) - _, ok := commit.Block.(*blocks.ApricotCommitBlock) - require.True(ok) - - abort := options[1].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.ApricotAbortBlock) - require.True(ok) - - require.NoError(block.Accept()) - require.NoError(commit.Verify()) - - { - onAccept, ok := vm.manager.GetState(commit.ID()) - require.True(ok) - - _, txStatus, err := onAccept.GetTx(tx.ID()) - require.NoError(err) - require.Equal(status.Committed, txStatus) - } - - require.NoError(abort.Verify()) - require.NoError(abort.Accept()) + require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Reject(context.Background())) - _, txStatus, err := vm.state.GetTx(tx.ID()) - require.NoError(err) - require.Equal(status.Aborted, txStatus) + _, _, err = vm.state.GetTx(tx.ID()) + require.Error(err, database.ErrNotFound) // Verify that new validator NOT in pending validator set _, err = vm.state.GetPendingValidator(testSubnet1.ID(), nodeID) @@ -917,33 +839,33 @@ func TestRewardValidatorAccept(t *testing.T) { vm, _, _ := defaultVM() vm.ctx.Lock.Lock() defer func() { - require.NoError(vm.Shutdown()) + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() // Fast forward clock to time for genesis validators to leave vm.clock.Set(defaultValidateEndTime) - blk, err := vm.Builder.BuildBlock() // should contain proposal to advance time + blk, err := vm.Builder.BuildBlock(context.Background()) // should advance time require.NoError(err) - require.NoError(blk.Verify()) + require.NoError(blk.Verify(context.Background())) // Assert preferences are correct block := blk.(smcon.OracleBlock) - options, err := block.Options() + options, err := block.Options(context.Background()) require.NoError(err) commit := options[0].(*blockexecutor.Block) - _, ok := commit.Block.(*blocks.ApricotCommitBlock) + _, ok := commit.Block.(*blocks.BanffCommitBlock) require.True(ok) abort := options[1].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.ApricotAbortBlock) + _, ok = abort.Block.(*blocks.BanffAbortBlock) require.True(ok) - require.NoError(block.Accept()) - require.NoError(commit.Verify()) - require.NoError(abort.Verify()) + require.NoError(block.Accept(context.Background())) + require.NoError(commit.Verify(context.Background())) + require.NoError(abort.Verify(context.Background())) txID := blk.(blocks.Block).Txs()[0].ID() { @@ -955,10 +877,10 @@ func TestRewardValidatorAccept(t *testing.T) { require.Equal(status.Aborted, txStatus) } - require.NoError(commit.Accept()) // advance the timestamp - lastAcceptedID, err := vm.LastAccepted() + require.NoError(commit.Accept(context.Background())) // advance the timestamp + lastAcceptedID, err := vm.LastAccepted(context.Background()) require.NoError(err) - require.NoError(vm.SetPreference(lastAcceptedID)) + require.NoError(vm.SetPreference(context.Background(), lastAcceptedID)) _, txStatus, err := vm.state.GetTx(txID) require.NoError(err) @@ -968,27 +890,27 @@ func TestRewardValidatorAccept(t *testing.T) { timestamp := vm.state.GetTimestamp() require.Equal(defaultValidateEndTime.Unix(), timestamp.Unix()) - blk, err = vm.Builder.BuildBlock() // should contain proposal to reward genesis validator + blk, err = vm.Builder.BuildBlock(context.Background()) // should contain proposal to reward genesis validator require.NoError(err) - require.NoError(blk.Verify()) + require.NoError(blk.Verify(context.Background())) // Assert preferences are correct block = blk.(smcon.OracleBlock) - options, err = block.Options() + options, err = block.Options(context.Background()) require.NoError(err) commit = options[0].(*blockexecutor.Block) - _, ok = commit.Block.(*blocks.ApricotCommitBlock) + _, ok = commit.Block.(*blocks.BanffCommitBlock) require.True(ok) abort = options[1].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.ApricotAbortBlock) + _, ok = abort.Block.(*blocks.BanffAbortBlock) require.True(ok) - require.NoError(block.Accept()) - require.NoError(commit.Verify()) - require.NoError(abort.Verify()) + require.NoError(block.Accept(context.Background())) + require.NoError(commit.Verify(context.Background())) + require.NoError(abort.Verify(context.Background())) txID = blk.(blocks.Block).Txs()[0].ID() { @@ -1000,7 +922,7 @@ func TestRewardValidatorAccept(t *testing.T) { require.Equal(status.Aborted, txStatus) } - require.NoError(commit.Accept()) // reward the genesis validator + require.NoError(commit.Accept(context.Background())) // reward the genesis validator _, txStatus, err = vm.state.GetTx(txID) require.NoError(err) @@ -1016,33 +938,33 @@ func TestRewardValidatorReject(t *testing.T) { vm, _, _ := defaultVM() vm.ctx.Lock.Lock() defer func() { - require.NoError(vm.Shutdown()) + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() // Fast forward clock to time for genesis validators to leave vm.clock.Set(defaultValidateEndTime) - blk, err := vm.Builder.BuildBlock() // should contain proposal to advance time + blk, err := vm.Builder.BuildBlock(context.Background()) // should advance time require.NoError(err) - require.NoError(blk.Verify()) + require.NoError(blk.Verify(context.Background())) // Assert preferences are correct block := blk.(smcon.OracleBlock) - options, err := block.Options() + options, err := block.Options(context.Background()) require.NoError(err) commit := options[0].(*blockexecutor.Block) - _, ok := commit.Block.(*blocks.ApricotCommitBlock) + _, ok := commit.Block.(*blocks.BanffCommitBlock) require.True(ok) abort := options[1].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.ApricotAbortBlock) + _, ok = abort.Block.(*blocks.BanffAbortBlock) require.True(ok) - require.NoError(block.Accept()) - require.NoError(commit.Verify()) - require.NoError(abort.Verify()) + require.NoError(block.Accept(context.Background())) + require.NoError(commit.Verify(context.Background())) + require.NoError(abort.Verify(context.Background())) txID := blk.(blocks.Block).Txs()[0].ID() { @@ -1054,8 +976,8 @@ func TestRewardValidatorReject(t *testing.T) { require.Equal(status.Aborted, txStatus) } - require.NoError(commit.Accept()) // advance the timestamp - require.NoError(vm.SetPreference(vm.manager.LastAccepted())) + require.NoError(commit.Accept(context.Background())) // advance the timestamp + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) _, txStatus, err := vm.state.GetTx(txID) require.NoError(err) @@ -1064,25 +986,25 @@ func TestRewardValidatorReject(t *testing.T) { timestamp := vm.state.GetTimestamp() require.Equal(defaultValidateEndTime.Unix(), timestamp.Unix()) - blk, err = vm.Builder.BuildBlock() // should contain proposal to reward genesis validator + blk, err = vm.Builder.BuildBlock(context.Background()) // should contain proposal to reward genesis validator require.NoError(err) - require.NoError(blk.Verify()) + require.NoError(blk.Verify(context.Background())) block = blk.(smcon.OracleBlock) - options, err = block.Options() + options, err = block.Options(context.Background()) require.NoError(err) commit = options[0].(*blockexecutor.Block) - _, ok = commit.Block.(*blocks.ApricotCommitBlock) + _, ok = commit.Block.(*blocks.BanffCommitBlock) require.True(ok) abort = options[1].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.ApricotAbortBlock) + _, ok = abort.Block.(*blocks.BanffAbortBlock) require.True(ok) - require.NoError(blk.Accept()) - require.NoError(commit.Verify()) + require.NoError(blk.Accept(context.Background())) + require.NoError(commit.Verify(context.Background())) txID = blk.(blocks.Block).Txs()[0].ID() { @@ -1094,8 +1016,8 @@ func TestRewardValidatorReject(t *testing.T) { require.Equal(status.Committed, txStatus) } - require.NoError(abort.Verify()) - require.NoError(abort.Accept()) // do not reward the genesis validator + require.NoError(abort.Verify(context.Background())) + require.NoError(abort.Accept(context.Background())) // do not reward the genesis validator _, txStatus, err = vm.state.GetTx(txID) require.NoError(err) @@ -1111,33 +1033,33 @@ func TestRewardValidatorPreferred(t *testing.T) { vm, _, _ := defaultVM() vm.ctx.Lock.Lock() defer func() { - require.NoError(vm.Shutdown()) + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() // Fast forward clock to time for genesis validators to leave vm.clock.Set(defaultValidateEndTime) - blk, err := vm.Builder.BuildBlock() // should contain proposal to advance time + blk, err := vm.Builder.BuildBlock(context.Background()) // should advance time require.NoError(err) - require.NoError(blk.Verify()) + require.NoError(blk.Verify(context.Background())) // Assert preferences are correct block := blk.(smcon.OracleBlock) - options, err := block.Options() + options, err := block.Options(context.Background()) require.NoError(err) commit := options[0].(*blockexecutor.Block) - _, ok := commit.Block.(*blocks.ApricotCommitBlock) + _, ok := commit.Block.(*blocks.BanffCommitBlock) require.True(ok) abort := options[1].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.ApricotAbortBlock) + _, ok = abort.Block.(*blocks.BanffAbortBlock) require.True(ok) - require.NoError(block.Accept()) - require.NoError(commit.Verify()) - require.NoError(abort.Verify()) + require.NoError(block.Accept(context.Background())) + require.NoError(commit.Verify(context.Background())) + require.NoError(abort.Verify(context.Background())) txID := blk.(blocks.Block).Txs()[0].ID() { @@ -1149,8 +1071,8 @@ func TestRewardValidatorPreferred(t *testing.T) { require.Equal(status.Aborted, txStatus) } - require.NoError(commit.Accept()) // advance the timestamp - require.NoError(vm.SetPreference(vm.manager.LastAccepted())) + require.NoError(commit.Accept(context.Background())) // advance the timestamp + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) _, txStatus, err := vm.state.GetTx(txID) require.NoError(err) @@ -1160,25 +1082,25 @@ func TestRewardValidatorPreferred(t *testing.T) { require.Equal(defaultValidateEndTime.Unix(), timestamp.Unix()) // should contain proposal to reward genesis validator - blk, err = vm.Builder.BuildBlock() + blk, err = vm.Builder.BuildBlock(context.Background()) require.NoError(err) - require.NoError(blk.Verify()) + require.NoError(blk.Verify(context.Background())) block = blk.(smcon.OracleBlock) - options, err = block.Options() + options, err = block.Options(context.Background()) require.NoError(err) commit = options[0].(*blockexecutor.Block) - _, ok = commit.Block.(*blocks.ApricotCommitBlock) + _, ok = commit.Block.(*blocks.BanffCommitBlock) require.True(ok) abort = options[1].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.ApricotAbortBlock) + _, ok = abort.Block.(*blocks.BanffAbortBlock) require.True(ok) - require.NoError(blk.Accept()) - require.NoError(commit.Verify()) + require.NoError(blk.Accept(context.Background())) + require.NoError(commit.Verify(context.Background())) txID = blk.(blocks.Block).Txs()[0].ID() { @@ -1190,8 +1112,8 @@ func TestRewardValidatorPreferred(t *testing.T) { require.Equal(status.Committed, txStatus) } - require.NoError(abort.Verify()) - require.NoError(abort.Accept()) // do not reward the genesis validator + require.NoError(abort.Verify(context.Background())) + require.NoError(abort.Accept(context.Background())) // do not reward the genesis validator _, txStatus, err = vm.state.GetTx(txID) require.NoError(err) @@ -1203,27 +1125,26 @@ func TestRewardValidatorPreferred(t *testing.T) { // Ensure BuildBlock errors when there is no block to build func TestUnneededBuildBlock(t *testing.T) { + require := require.New(t) vm, _, _ := defaultVM() vm.ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } + err := vm.Shutdown(context.Background()) + require.NoError(err) vm.ctx.Lock.Unlock() }() - if _, err := vm.Builder.BuildBlock(); err == nil { - t.Fatalf("Should have errored on BuildBlock") - } + _, err := vm.Builder.BuildBlock(context.Background()) + require.Error(err) } // test acceptance of proposal to create a new chain func TestCreateChain(t *testing.T) { + require := require.New(t) vm, _, _ := defaultVM() vm.ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } + err := vm.Shutdown(context.Background()) + require.NoError(err) vm.ctx.Lock.Unlock() }() @@ -1233,39 +1154,38 @@ func TestCreateChain(t *testing.T) { ids.ID{'t', 'e', 's', 't', 'v', 'm'}, nil, "name", - []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } else if err := vm.Builder.AddUnverifiedTx(tx); err != nil { - t.Fatal(err) - } else if blk, err := vm.Builder.BuildBlock(); err != nil { // should contain proposal to create chain - t.Fatal(err) - } else if err := blk.Verify(); err != nil { - t.Fatal(err) - } else if err := blk.Accept(); err != nil { - t.Fatal(err) - } else if _, txStatus, err := vm.state.GetTx(tx.ID()); err != nil { - t.Fatal(err) - } else if txStatus != status.Committed { - t.Fatalf("status should be Committed but is %s", txStatus) - } + require.NoError(err) + + err = vm.Builder.AddUnverifiedTx(tx) + require.NoError(err) + + blk, err := vm.Builder.BuildBlock(context.Background()) + require.NoError(err) // should contain proposal to create chain + + err = blk.Verify(context.Background()) + require.NoError(err) + + err = blk.Accept(context.Background()) + require.NoError(err) + + _, txStatus, err := vm.state.GetTx(tx.ID()) + require.NoError(err) + require.Equal(status.Committed, txStatus) // Verify chain was created chains, err := vm.state.GetChains(testSubnet1.ID()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + foundNewChain := false for _, chain := range chains { if bytes.Equal(chain.Bytes(), tx.Bytes()) { foundNewChain = true } } - if !foundNewChain { - t.Fatal("should've created new chain but didn't") - } + require.True(foundNewChain) } // test where we: @@ -1278,7 +1198,7 @@ func TestCreateSubnet(t *testing.T) { vm, _, _ := defaultVM() vm.ctx.Lock.Lock() defer func() { - require.NoError(vm.Shutdown()) + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() @@ -1290,20 +1210,20 @@ func TestCreateSubnet(t *testing.T) { keys[0].PublicKey().Address(), keys[1].PublicKey().Address(), }, - []*crypto.PrivateKeySECP256K1R{keys[0]}, // payer - keys[0].PublicKey().Address(), // change addr + []*secp256k1.PrivateKey{keys[0]}, // payer + keys[0].PublicKey().Address(), // change addr ) require.NoError(err) require.NoError(vm.Builder.AddUnverifiedTx(createSubnetTx)) // should contain proposal to create subnet - blk, err := vm.Builder.BuildBlock() + blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) - require.NoError(blk.Verify()) - require.NoError(blk.Accept()) - require.NoError(vm.SetPreference(vm.manager.LastAccepted())) + require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) _, txStatus, err := vm.state.GetTx(createSubnetTx.ID()) require.NoError(err) @@ -1322,7 +1242,7 @@ func TestCreateSubnet(t *testing.T) { require.True(found) // Now that we've created a new subnet, add a validator to that subnet - startTime := defaultValidateStartTime.Add(txexecutor.SyncBound).Add(1 * time.Second) + startTime := vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) endTime := startTime.Add(defaultMinStakingDuration) // [startTime, endTime] is subset of time keys[0] validates default subnet so tx is valid addValidatorTx, err := vm.txBuilder.NewAddSubnetValidatorTx( @@ -1331,48 +1251,21 @@ func TestCreateSubnet(t *testing.T) { uint64(endTime.Unix()), nodeID, createSubnetTx.ID(), - []*crypto.PrivateKeySECP256K1R{keys[0]}, + []*secp256k1.PrivateKey{keys[0]}, ids.ShortEmpty, // change addr ) require.NoError(err) require.NoError(vm.Builder.AddUnverifiedTx(addValidatorTx)) - blk, err = vm.Builder.BuildBlock() // should add validator to the new subnet - require.NoError(err) - - require.NoError(blk.Verify()) - - // Assert preferences are correct and accept the proposal/commit - block := blk.(smcon.OracleBlock) - options, err := block.Options() + blk, err = vm.Builder.BuildBlock(context.Background()) // should add validator to the new subnet require.NoError(err) - commit := options[0].(*blockexecutor.Block) - _, ok := commit.Block.(*blocks.ApricotCommitBlock) - require.True(ok) - - abort := options[1].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.ApricotAbortBlock) - require.True(ok) - - require.NoError(block.Accept()) - require.NoError(commit.Verify()) - require.NoError(abort.Verify()) + require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Accept(context.Background())) // add the validator to pending validator set + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) txID := blk.(blocks.Block).Txs()[0].ID() - { - onAccept, ok := vm.manager.GetState(abort.ID()) - require.True(ok) - - _, txStatus, err := onAccept.GetTx(txID) - require.NoError(err) - require.Equal(status.Aborted, txStatus) - } - - require.NoError(commit.Accept()) // add the validator to pending validator set - require.NoError(vm.SetPreference(vm.manager.LastAccepted())) - _, txStatus, err = vm.state.GetTx(txID) require.NoError(err) require.Equal(status.Committed, txStatus) @@ -1384,91 +1277,24 @@ func TestCreateSubnet(t *testing.T) { // Create a block with an advance time tx that moves validator // from pending to current validator set vm.clock.Set(startTime) - blk, err = vm.Builder.BuildBlock() // should be advance time tx + blk, err = vm.Builder.BuildBlock(context.Background()) // should be advance time tx require.NoError(err) + require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Accept(context.Background())) // move validator addValidatorTx from pending to current + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) - require.NoError(blk.Verify()) + _, err = vm.state.GetPendingValidator(createSubnetTx.ID(), nodeID) + require.ErrorIs(err, database.ErrNotFound) - // Assert preferences are correct and accept the proposal/commit - block = blk.(smcon.OracleBlock) - options, err = block.Options() + _, err = vm.state.GetCurrentValidator(createSubnetTx.ID(), nodeID) require.NoError(err) - commit = options[0].(*blockexecutor.Block) - _, ok = commit.Block.(*blocks.ApricotCommitBlock) - require.True(ok) - - abort = options[1].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.ApricotAbortBlock) - require.True(ok) - - require.NoError(block.Accept()) - require.NoError(commit.Verify()) - require.NoError(abort.Verify()) - - txID = blk.(blocks.Block).Txs()[0].ID() - { - onAccept, ok := vm.manager.GetState(abort.ID()) - require.True(ok) - - _, txStatus, err := onAccept.GetTx(txID) - require.NoError(err) - require.Equal(status.Aborted, txStatus) - } - - require.NoError(commit.Accept()) // move validator addValidatorTx from pending to current - require.NoError(vm.SetPreference(vm.manager.LastAccepted())) - - _, txStatus, err = vm.state.GetTx(txID) - require.NoError(err) - require.Equal(status.Committed, txStatus) - - _, err = vm.state.GetPendingValidator(createSubnetTx.ID(), nodeID) - require.ErrorIs(err, database.ErrNotFound) - - _, err = vm.state.GetCurrentValidator(createSubnetTx.ID(), nodeID) - require.NoError(err) - - // fast forward clock to time validator should stop validating - vm.clock.Set(endTime) - blk, err = vm.Builder.BuildBlock() // should be advance time tx - require.NoError(err) - - require.NoError(blk.Verify()) - - // Assert preferences are correct - // and accept the proposal/commit - block = blk.(smcon.OracleBlock) - options, err = block.Options() - require.NoError(err) - - commit = options[0].(*blockexecutor.Block) - _, ok = commit.Block.(*blocks.ApricotCommitBlock) - require.True(ok) - - abort = options[1].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.ApricotAbortBlock) - require.True(ok) - - require.NoError(block.Accept()) - require.NoError(commit.Verify()) - require.NoError(abort.Verify()) - - txID = blk.(blocks.Block).Txs()[0].ID() - { - onAccept, ok := vm.manager.GetState(abort.ID()) - require.True(ok) - - _, txStatus, err := onAccept.GetTx(txID) - require.NoError(err) - require.Equal(status.Aborted, txStatus) - } - - require.NoError(commit.Accept()) // remove validator from current validator set - - _, txStatus, err = vm.state.GetTx(txID) - require.NoError(err) - require.Equal(status.Committed, txStatus) + // fast forward clock to time validator should stop validating + vm.clock.Set(endTime) + blk, err = vm.Builder.BuildBlock(context.Background()) + require.NoError(err) + require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Accept(context.Background())) // remove validator from current validator set _, err = vm.state.GetPendingValidator(createSubnetTx.ID(), nodeID) require.ErrorIs(err, database.ErrNotFound) @@ -1479,12 +1305,12 @@ func TestCreateSubnet(t *testing.T) { // test asset import func TestAtomicImport(t *testing.T) { + require := require.New(t) vm, baseDB, mutableSharedMemory := defaultVM() vm.ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } + err := vm.Shutdown(context.Background()) + require.NoError(err) vm.ctx.Lock.Unlock() }() @@ -1500,14 +1326,13 @@ func TestAtomicImport(t *testing.T) { mutableSharedMemory.SharedMemory = m.NewSharedMemory(vm.ctx.ChainID) peerSharedMemory := m.NewSharedMemory(vm.ctx.XChainID) - if _, err := vm.txBuilder.NewImportTx( + _, err := vm.txBuilder.NewImportTx( vm.ctx.XChainID, recipientKey.PublicKey().Address(), - []*crypto.PrivateKeySECP256K1R{keys[0]}, + []*secp256k1.PrivateKey{keys[0]}, ids.ShortEmpty, // change addr - ); err == nil { - t.Fatalf("should have errored due to missing utxos") - } + ) + require.Error(err, "should have errored due to missing utxos") // Provide the avm UTXO @@ -1523,57 +1348,62 @@ func TestAtomicImport(t *testing.T) { }, } utxoBytes, err := txs.Codec.Marshal(txs.Version, utxo) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + inputID := utxo.InputID() - if err := peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: []*atomic.Element{{ - Key: inputID[:], - Value: utxoBytes, - Traits: [][]byte{ - recipientKey.PublicKey().Address().Bytes(), + err = peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{ + vm.ctx.ChainID: { + PutRequests: []*atomic.Element{ + { + Key: inputID[:], + Value: utxoBytes, + Traits: [][]byte{ + recipientKey.PublicKey().Address().Bytes(), + }, + }, + }, }, - }}}}); err != nil { - t.Fatal(err) - } + }, + ) + require.NoError(err) tx, err := vm.txBuilder.NewImportTx( vm.ctx.XChainID, recipientKey.PublicKey().Address(), - []*crypto.PrivateKeySECP256K1R{recipientKey}, + []*secp256k1.PrivateKey{recipientKey}, ids.ShortEmpty, // change addr ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + + err = vm.Builder.AddUnverifiedTx(tx) + require.NoError(err) + + blk, err := vm.Builder.BuildBlock(context.Background()) + require.NoError(err) + + err = blk.Verify(context.Background()) + require.NoError(err) + + err = blk.Accept(context.Background()) + require.NoError(err) + + _, txStatus, err := vm.state.GetTx(tx.ID()) + require.NoError(err) + require.Equal(status.Committed, txStatus) - if err := vm.Builder.AddUnverifiedTx(tx); err != nil { - t.Fatal(err) - } else if blk, err := vm.Builder.BuildBlock(); err != nil { - t.Fatal(err) - } else if err := blk.Verify(); err != nil { - t.Fatal(err) - } else if err := blk.Accept(); err != nil { - t.Fatal(err) - } else if _, txStatus, err := vm.state.GetTx(tx.ID()); err != nil { - t.Fatal(err) - } else if txStatus != status.Committed { - t.Fatalf("status should be Committed but is %s", txStatus) - } inputID = utxoID.InputID() - if _, err := vm.ctx.SharedMemory.Get(vm.ctx.XChainID, [][]byte{inputID[:]}); err == nil { - t.Fatalf("shouldn't have been able to read the utxo") - } + _, err = vm.ctx.SharedMemory.Get(vm.ctx.XChainID, [][]byte{inputID[:]}) + require.ErrorIs(err, database.ErrNotFound) } // test optimistic asset import func TestOptimisticAtomicImport(t *testing.T) { + require := require.New(t) vm, _, _ := defaultVM() vm.ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } + err := vm.Shutdown(context.Background()) + require.NoError(err) vm.ctx.Lock.Unlock() }() @@ -1594,14 +1424,12 @@ func TestOptimisticAtomicImport(t *testing.T) { }, }}, }} - if err := tx.Sign(txs.Codec, [][]*crypto.PrivateKeySECP256K1R{{}}); err != nil { - t.Fatal(err) - } + err := tx.Initialize(txs.Codec) + require.NoError(err) preferred, err := vm.Builder.Preferred() - if err != nil { - t.Fatal(err) - } + require.NoError(err) + preferredID := preferred.ID() preferredHeight := preferred.Height() @@ -1610,708 +1438,619 @@ func TestOptimisticAtomicImport(t *testing.T) { preferredHeight+1, tx, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) + blk := vm.manager.NewBlock(statelessBlk) - if err := blk.Verify(); err == nil { - t.Fatalf("Block should have failed verification due to missing UTXOs") - } + err = blk.Verify(context.Background()) + require.Error(err, "should have erred due to missing UTXOs") - if err := vm.SetState(snow.Bootstrapping); err != nil { - t.Fatal(err) - } + err = vm.SetState(context.Background(), snow.Bootstrapping) + require.NoError(err) - if err := blk.Verify(); err != nil { - t.Fatal(err) - } + err = blk.Verify(context.Background()) + require.NoError(err) - if err := blk.Accept(); err != nil { - t.Fatal(err) - } + err = blk.Accept(context.Background()) + require.NoError(err) - if err := vm.SetState(snow.NormalOp); err != nil { - t.Fatal(err) - } + err = vm.SetState(context.Background(), snow.NormalOp) + require.NoError(err) _, txStatus, err := vm.state.GetTx(tx.ID()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if txStatus != status.Committed { - t.Fatalf("Wrong status returned. Expected %s; Got %s", status.Committed, txStatus) - } + require.Equal(status.Committed, txStatus) } // test restarting the node -func TestRestartPartiallyAccepted(t *testing.T) { +func TestRestartFullyAccepted(t *testing.T) { + require := require.New(t) _, genesisBytes := defaultGenesis() db := manager.NewMemDB(version.Semantic1_0_0) firstDB := db.NewPrefixDBManager([]byte{}) - firstVM := &VM{Factory: Factory{ - Config: config.Config{ - Chains: chains.MockManager{}, - Validators: validators.NewManager(), - UptimeLockedCalculator: uptime.NewLockedCalculator(), - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, - BanffTime: mockable.MaxTime, - }, - }} - firstVM.clock.Set(defaultGenesisTime) - firstCtx := defaultContext() - firstCtx.Lock.Lock() - - firstMsgChan := make(chan common.Message, 1) - if err := firstVM.Initialize(firstCtx, firstDB, genesisBytes, nil, nil, firstMsgChan, nil, nil); err != nil { - t.Fatal(err) - } - - genesisID, err := firstVM.LastAccepted() - if err != nil { - t.Fatal(err) - } - - firstAdvanceTimeTx, err := firstVM.txBuilder.NewAdvanceTimeTx(defaultGenesisTime.Add(time.Second)) - if err != nil { - t.Fatal(err) - } - - preferred, err := firstVM.Builder.Preferred() - if err != nil { - t.Fatal(err) - } - preferredID := preferred.ID() - preferredHeight := preferred.Height() - - statelessBlk, err := blocks.NewApricotProposalBlock( - preferredID, - preferredHeight+1, - firstAdvanceTimeTx, - ) - if err != nil { - t.Fatal(err) - } - - firstAdvanceTimeBlk := firstVM.manager.NewBlock(statelessBlk) - - firstVM.clock.Set(defaultGenesisTime.Add(3 * time.Second)) - if err := firstAdvanceTimeBlk.Verify(); err != nil { - t.Fatal(err) - } - - options, err := firstAdvanceTimeBlk.(smcon.OracleBlock).Options() - if err != nil { - t.Fatal(err) - } - firstOption := options[0] - secondOption := options[1] - - if err := firstOption.Verify(); err != nil { - t.Fatal(err) - } else if err := secondOption.Verify(); err != nil { - t.Fatal(err) - } else if err := firstAdvanceTimeBlk.Accept(); err != nil { // time advances to defaultGenesisTime.Add(time.Second) - t.Fatal(err) - } - - // Byte representation of block that proposes advancing time to defaultGenesisTime + 2 seconds - secondAdvanceTimeBlkBytes := []byte{ - 0, 0, - 0, 0, 0, 0, - 6, 150, 225, 43, 97, 69, 215, 238, - 150, 164, 249, 184, 2, 197, 216, 49, - 6, 78, 81, 50, 190, 8, 44, 165, - 219, 127, 96, 39, 235, 155, 17, 108, - 0, 0, 0, 0, - 0, 0, 0, 1, - 0, 0, 0, 19, - 0, 0, 0, 0, 95, 34, 234, 149, - 0, 0, 0, 0, - } - if _, err := firstVM.ParseBlock(secondAdvanceTimeBlkBytes); err != nil { - t.Fatal(err) - } - - if err := firstVM.Shutdown(); err != nil { - t.Fatal(err) - } - firstCtx.Lock.Unlock() - - secondVM := &VM{Factory: Factory{ - Config: config.Config{ - Chains: chains.MockManager{}, - Validators: validators.NewManager(), - UptimeLockedCalculator: uptime.NewLockedCalculator(), - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, - BanffTime: mockable.MaxTime, - }, + firstVdrs := validators.NewManager() + firstPrimaryVdrs := validators.NewSet() + _ = firstVdrs.Add(constants.PrimaryNetworkID, firstPrimaryVdrs) + firstVM := &VM{Config: config.Config{ + Chains: chains.TestManager, + Validators: firstVdrs, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: defaultRewardConfig, + BanffTime: banffForkTime, }} - secondVM.clock.Set(defaultGenesisTime) - secondCtx := defaultContext() - secondCtx.Lock.Lock() - defer func() { - if err := secondVM.Shutdown(); err != nil { - t.Fatal(err) - } - secondCtx.Lock.Unlock() - }() - - secondDB := db.NewPrefixDBManager([]byte{}) - secondMsgChan := make(chan common.Message, 1) - if err := secondVM.Initialize(secondCtx, secondDB, genesisBytes, nil, nil, secondMsgChan, nil, nil); err != nil { - t.Fatal(err) - } + firstCtx := defaultContext() - lastAccepted, err := secondVM.LastAccepted() - if err != nil { - t.Fatal(err) - } - if genesisID != lastAccepted { - t.Fatalf("Shouldn't have changed the genesis") + baseDBManager := manager.NewMemDB(version.Semantic1_0_0) + atomicDB := prefixdb.New([]byte{1}, baseDBManager.Current().Database) + m := atomic.NewMemory(atomicDB) + msm := &mutableSharedMemory{ + SharedMemory: m.NewSharedMemory(firstCtx.ChainID), } -} + firstCtx.SharedMemory = msm -// test restarting the node -func TestRestartFullyAccepted(t *testing.T) { - _, genesisBytes := defaultGenesis() - - db := manager.NewMemDB(version.Semantic1_0_0) - firstDB := db.NewPrefixDBManager([]byte{}) - firstVM := &VM{Factory: Factory{ - Config: config.Config{ - Chains: chains.MockManager{}, - Validators: validators.NewManager(), - UptimeLockedCalculator: uptime.NewLockedCalculator(), - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, - BanffTime: mockable.MaxTime, - }, - }} - - firstVM.clock.Set(defaultGenesisTime) - firstCtx := defaultContext() + initialClkTime := banffForkTime.Add(time.Second) + firstVM.clock.Set(initialClkTime) firstCtx.Lock.Lock() firstMsgChan := make(chan common.Message, 1) - if err := firstVM.Initialize(firstCtx, firstDB, genesisBytes, nil, nil, firstMsgChan, nil, nil); err != nil { - t.Fatal(err) - } + err := firstVM.Initialize( + context.Background(), + firstCtx, + firstDB, + genesisBytes, + nil, + nil, + firstMsgChan, + nil, + nil, + ) + require.NoError(err) - firstAdvanceTimeTx, err := firstVM.txBuilder.NewAdvanceTimeTx(defaultGenesisTime.Add(time.Second)) - if err != nil { - t.Fatal(err) - } + genesisID, err := firstVM.LastAccepted(context.Background()) + require.NoError(err) + nextChainTime := initialClkTime.Add(time.Second) + firstVM.clock.Set(initialClkTime) preferred, err := firstVM.Builder.Preferred() - if err != nil { - t.Fatal(err) - } + require.NoError(err) preferredID := preferred.ID() preferredHeight := preferred.Height() - statelessBlk, err := blocks.NewApricotProposalBlock( + // include a tx to make the block be accepted + tx := &txs.Tx{Unsigned: &txs.ImportTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: firstVM.ctx.NetworkID, + BlockchainID: firstVM.ctx.ChainID, + }}, + SourceChain: firstVM.ctx.XChainID, + ImportedInputs: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: ids.Empty.Prefix(1), + OutputIndex: 1, + }, + Asset: avax.Asset{ID: firstVM.ctx.AVAXAssetID}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + }, + }}, + }} + require.NoError(tx.Initialize(txs.Codec)) + + statelessBlk, err := blocks.NewBanffStandardBlock( + nextChainTime, preferredID, preferredHeight+1, - firstAdvanceTimeTx, + []*txs.Tx{tx}, ) - if err != nil { - t.Fatal(err) - } - firstAdvanceTimeBlk := firstVM.manager.NewBlock(statelessBlk) - firstVM.clock.Set(defaultGenesisTime.Add(3 * time.Second)) - if err := firstAdvanceTimeBlk.Verify(); err != nil { - t.Fatal(err) - } + require.NoError(err) - options, err := firstAdvanceTimeBlk.(smcon.OracleBlock).Options() - if err != nil { - t.Fatal(err) - } else if err := options[0].Verify(); err != nil { - t.Fatal(err) - } else if err := options[1].Verify(); err != nil { - t.Fatal(err) - } else if err := firstAdvanceTimeBlk.Accept(); err != nil { - t.Fatal(err) - } else if err := options[0].Accept(); err != nil { - t.Fatal(err) - } else if err := options[1].Reject(); err != nil { - t.Fatal(err) - } + firstAdvanceTimeBlk := firstVM.manager.NewBlock(statelessBlk) - // Byte representation of block that proposes advancing time to defaultGenesisTime + 2 seconds - secondAdvanceTimeBlkBytes := []byte{ - 0, 0, - 0, 0, 0, 0, - 6, 150, 225, 43, 97, 69, 215, 238, - 150, 164, 249, 184, 2, 197, 216, 49, - 6, 78, 81, 50, 190, 8, 44, 165, - 219, 127, 96, 39, 235, 155, 17, 108, - 0, 0, 0, 0, - 0, 0, 0, 1, - 0, 0, 0, 19, - 0, 0, 0, 0, 95, 34, 234, 149, - 0, 0, 0, 0, - } - if _, err := firstVM.ParseBlock(secondAdvanceTimeBlkBytes); err != nil { - t.Fatal(err) - } + nextChainTime = nextChainTime.Add(2 * time.Second) + firstVM.clock.Set(nextChainTime) + require.NoError(firstAdvanceTimeBlk.Verify(context.Background())) + require.NoError(firstAdvanceTimeBlk.Accept(context.Background())) - if err := firstVM.Shutdown(); err != nil { - t.Fatal(err) - } + require.NoError(firstVM.Shutdown(context.Background())) firstCtx.Lock.Unlock() - secondVM := &VM{Factory: Factory{ - Config: config.Config{ - Chains: chains.MockManager{}, - Validators: validators.NewManager(), - UptimeLockedCalculator: uptime.NewLockedCalculator(), - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, - BanffTime: mockable.MaxTime, - }, + secondVdrs := validators.NewManager() + secondPrimaryVdrs := validators.NewSet() + _ = secondVdrs.Add(constants.PrimaryNetworkID, secondPrimaryVdrs) + secondVM := &VM{Config: config.Config{ + Chains: chains.TestManager, + Validators: secondVdrs, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: defaultRewardConfig, + BanffTime: banffForkTime, }} - secondVM.clock.Set(defaultGenesisTime) secondCtx := defaultContext() + secondCtx.SharedMemory = msm + secondVM.clock.Set(initialClkTime) secondCtx.Lock.Lock() defer func() { - if err := secondVM.Shutdown(); err != nil { - t.Fatal(err) - } + err := secondVM.Shutdown(context.Background()) + require.NoError(err) secondCtx.Lock.Unlock() }() secondDB := db.NewPrefixDBManager([]byte{}) secondMsgChan := make(chan common.Message, 1) - if err := secondVM.Initialize(secondCtx, secondDB, genesisBytes, nil, nil, secondMsgChan, nil, nil); err != nil { - t.Fatal(err) - } - lastAccepted, err := secondVM.LastAccepted() - if err != nil { - t.Fatal(err) - } - if options[0].ID() != lastAccepted { - t.Fatalf("Should have changed the genesis") - } + err = secondVM.Initialize( + context.Background(), + secondCtx, + secondDB, + genesisBytes, + nil, + nil, + secondMsgChan, + nil, + nil, + ) + require.NoError(err) + + lastAccepted, err := secondVM.LastAccepted(context.Background()) + require.NoError(err) + require.Equal(genesisID, lastAccepted) } // test bootstrapping the node func TestBootstrapPartiallyAccepted(t *testing.T) { require := require.New(t) - // TODO: add "useProto=true" once handler supports proto - for _, useProto := range []bool{false} { - t.Run(fmt.Sprintf("use proto buf message creator %v", useProto), func(tt *testing.T) { - _, genesisBytes := defaultGenesis() - - baseDBManager := manager.NewMemDB(version.Semantic1_0_0) - vmDBManager := baseDBManager.NewPrefixDBManager([]byte("vm")) - bootstrappingDB := prefixdb.New([]byte("bootstrapping"), baseDBManager.Current().Database) - - blocked, err := queue.NewWithMissing(bootstrappingDB, "", prometheus.NewRegistry()) - require.NoError(err) + _, genesisBytes := defaultGenesis() - vm := &VM{Factory: Factory{ - Config: config.Config{ - Chains: chains.MockManager{}, - Validators: validators.NewManager(), - UptimeLockedCalculator: uptime.NewLockedCalculator(), - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, - BanffTime: mockable.MaxTime, - }, - }} + baseDBManager := manager.NewMemDB(version.Semantic1_0_0) + vmDBManager := baseDBManager.NewPrefixDBManager([]byte("vm")) + bootstrappingDB := prefixdb.New([]byte("bootstrapping"), baseDBManager.Current().Database) - vm.clock.Set(defaultGenesisTime) - ctx := defaultContext() - consensusCtx := snow.DefaultConsensusContextTest() - consensusCtx.Context = ctx - consensusCtx.SetState(snow.Initializing) - ctx.Lock.Lock() + blocked, err := queue.NewWithMissing(bootstrappingDB, "", prometheus.NewRegistry()) + require.NoError(err) - msgChan := make(chan common.Message, 1) - require.NoError(vm.Initialize(ctx, vmDBManager, genesisBytes, nil, nil, msgChan, nil, nil)) + vdrs := validators.NewManager() + primaryVdrs := validators.NewSet() + _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) + vm := &VM{Config: config.Config{ + Chains: chains.TestManager, + Validators: vdrs, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: defaultRewardConfig, + BanffTime: banffForkTime, + }} - preferred, err := vm.Builder.Preferred() - require.NoError(err) + initialClkTime := banffForkTime.Add(time.Second) + vm.clock.Set(initialClkTime) + ctx := defaultContext() - preferredID := preferred.ID() - preferredHeight := preferred.Height() + atomicDB := prefixdb.New([]byte{1}, baseDBManager.Current().Database) + m := atomic.NewMemory(atomicDB) + msm := &mutableSharedMemory{ + SharedMemory: m.NewSharedMemory(ctx.ChainID), + } + ctx.SharedMemory = msm - advanceTimeTx, err := vm.txBuilder.NewAdvanceTimeTx(defaultGenesisTime.Add(time.Second)) - require.NoError(err) + consensusCtx := snow.DefaultConsensusContextTest() + consensusCtx.Context = ctx + ctx.Lock.Lock() - statelessBlk, err := blocks.NewApricotProposalBlock( - preferredID, - preferredHeight+1, - advanceTimeTx, - ) - require.NoError(err) + msgChan := make(chan common.Message, 1) + err = vm.Initialize( + context.Background(), + ctx, + vmDBManager, + genesisBytes, + nil, + nil, + msgChan, + nil, + nil, + ) + require.NoError(err) - advanceTimeBlk := vm.manager.NewBlock(statelessBlk) - require.NoError(err) + preferred, err := vm.Builder.Preferred() + require.NoError(err) - advanceTimeBlkID := advanceTimeBlk.ID() - advanceTimeBlkBytes := advanceTimeBlk.Bytes() - - peerID := ids.NodeID{1, 2, 3, 4, 5, 4, 3, 2, 1} - vdrs := validators.NewSet() - require.NoError(vdrs.AddWeight(peerID, 1)) - beacons := vdrs - - benchlist := benchlist.NewNoBenchlist() - timeoutManager, err := timeout.NewManager( - &timer.AdaptiveTimeoutConfig{ - InitialTimeout: time.Millisecond, - MinimumTimeout: time.Millisecond, - MaximumTimeout: 10 * time.Second, - TimeoutHalflife: 5 * time.Minute, - TimeoutCoefficient: 1.25, - }, - benchlist, - "", - prometheus.NewRegistry(), - ) - require.NoError(err) + // include a tx to make the block be accepted + tx := &txs.Tx{Unsigned: &txs.ImportTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: vm.ctx.NetworkID, + BlockchainID: vm.ctx.ChainID, + }}, + SourceChain: vm.ctx.XChainID, + ImportedInputs: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: ids.Empty.Prefix(1), + OutputIndex: 1, + }, + Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + }, + }}, + }} + require.NoError(tx.Initialize(txs.Codec)) - go timeoutManager.Dispatch() + nextChainTime := initialClkTime.Add(time.Second) + preferredID := preferred.ID() + preferredHeight := preferred.Height() + statelessBlk, err := blocks.NewBanffStandardBlock( + nextChainTime, + preferredID, + preferredHeight+1, + []*txs.Tx{tx}, + ) + require.NoError(err) - chainRouter := &router.ChainRouter{} + advanceTimeBlk := vm.manager.NewBlock(statelessBlk) + require.NoError(err) - metrics := prometheus.NewRegistry() - mc, err := message.NewCreator(metrics, "dummyNamespace", true, 10*time.Second) - require.NoError(err) - mcProto, err := message.NewCreatorWithProto(metrics, "dummyNamespace", true, 10*time.Second) - require.NoError(err) + advanceTimeBlkID := advanceTimeBlk.ID() + advanceTimeBlkBytes := advanceTimeBlk.Bytes() + + peerID := ids.NodeID{1, 2, 3, 4, 5, 4, 3, 2, 1} + beacons := validators.NewSet() + require.NoError(beacons.Add(peerID, nil, ids.Empty, 1)) + + benchlist := benchlist.NewNoBenchlist() + timeoutManager, err := timeout.NewManager( + &timer.AdaptiveTimeoutConfig{ + InitialTimeout: time.Millisecond, + MinimumTimeout: time.Millisecond, + MaximumTimeout: 10 * time.Second, + TimeoutHalflife: 5 * time.Minute, + TimeoutCoefficient: 1.25, + }, + benchlist, + "", + prometheus.NewRegistry(), + ) + require.NoError(err) - err = chainRouter.Initialize(ids.EmptyNodeID, logging.NoLog{}, mc, timeoutManager, time.Second, ids.Set{}, ids.Set{}, nil, router.HealthConfig{}, "", prometheus.NewRegistry()) - require.NoError(err) + go timeoutManager.Dispatch() - externalSender := &sender.ExternalSenderTest{TB: t} - externalSender.Default(true) - - // Passes messages from the consensus engine to the network - sender, err := sender.New( - consensusCtx, - mc, - mcProto, - time.Now().Add(time.Hour), - externalSender, - chainRouter, - timeoutManager, - sender.GossipConfig{ - AcceptedFrontierPeerSize: 1, - OnAcceptPeerSize: 1, - AppGossipValidatorSize: 1, - AppGossipNonValidatorSize: 1, - }, - ) - require.NoError(err) + chainRouter := &router.ChainRouter{} - var reqID uint32 - externalSender.SendF = func(msg message.OutboundMessage, nodeIDs ids.NodeIDSet, _ ids.ID, _ bool) ids.NodeIDSet { - var inMsg message.InboundMessage - if !useProto { - inMsg, err = mc.Parse(msg.Bytes(), ctx.NodeID, func() {}) - } else { - inMsg, err = mcProto.Parse(msg.Bytes(), ctx.NodeID, func() {}) - } - require.NoError(err) - require.Equal(message.GetAcceptedFrontier, inMsg.Op()) - - requestIDIntf, err := inMsg.Get(message.RequestID) - require.NoError(err) - requestID, ok := requestIDIntf.(uint32) - require.True(ok) + metrics := prometheus.NewRegistry() + mc, err := message.NewCreator(logging.NoLog{}, metrics, "dummyNamespace", constants.DefaultNetworkCompressionType, 10*time.Second) + require.NoError(err) - reqID = requestID - return nodeIDs - } + err = chainRouter.Initialize( + ids.EmptyNodeID, + logging.NoLog{}, + timeoutManager, + time.Second, + set.Set[ids.ID]{}, + true, + set.Set[ids.ID]{}, + nil, + router.HealthConfig{}, + "", + prometheus.NewRegistry(), + ) + require.NoError(err) - isBootstrapped := false - subnet := &common.SubnetTest{ - T: tt, - IsBootstrappedF: func() bool { return isBootstrapped }, - BootstrappedF: func(ids.ID) { isBootstrapped = true }, - } + externalSender := &sender.ExternalSenderTest{TB: t} + externalSender.Default(true) + + // Passes messages from the consensus engine to the network + gossipConfig := subnets.GossipConfig{ + AcceptedFrontierPeerSize: 1, + OnAcceptPeerSize: 1, + AppGossipValidatorSize: 1, + AppGossipNonValidatorSize: 1, + } + sender, err := sender.New( + consensusCtx, + mc, + externalSender, + chainRouter, + timeoutManager, + p2p.EngineType_ENGINE_TYPE_SNOWMAN, + subnets.New(consensusCtx.NodeID, subnets.Config{GossipConfig: gossipConfig}), + ) + require.NoError(err) - peers := tracker.NewPeers() - startup := tracker.NewStartup(peers, (beacons.Weight()+1)/2) - beacons.RegisterCallbackListener(startup) - - // The engine handles consensus - consensus := &smcon.Topological{} - commonCfg := common.Config{ - Ctx: consensusCtx, - Validators: vdrs, - Beacons: beacons, - SampleK: beacons.Len(), - StartupTracker: startup, - Alpha: (beacons.Weight() + 1) / 2, - Sender: sender, - Subnet: subnet, - AncestorsMaxContainersSent: 2000, - AncestorsMaxContainersReceived: 2000, - SharedCfg: &common.SharedConfig{}, - } + var reqID uint32 + externalSender.SendF = func(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { + inMsg, err := mc.Parse(msg.Bytes(), ctx.NodeID, func() {}) + require.NoError(err) + require.Equal(message.GetAcceptedFrontierOp, inMsg.Op()) - snowGetHandler, err := snowgetter.New(vm, commonCfg) - require.NoError(err) + requestID, ok := message.GetRequestID(inMsg.Message()) + require.True(ok) - bootstrapConfig := bootstrap.Config{ - Config: commonCfg, - AllGetsServer: snowGetHandler, - Blocked: blocked, - VM: vm, - } + reqID = requestID + return nodeIDs + } - // Asynchronously passes messages from the network to the consensus engine - cpuTracker, err := timetracker.NewResourceTracker(prometheus.NewRegistry(), resource.NoUsage, meter.ContinuousFactory{}, time.Second) - require.NoError(err) + isBootstrapped := false + bootstrapTracker := &common.BootstrapTrackerTest{ + T: t, + IsBootstrappedF: func() bool { + return isBootstrapped + }, + BootstrappedF: func(ids.ID) { + isBootstrapped = true + }, + } - handler, err := handler.New( - mc, - bootstrapConfig.Ctx, - vdrs, - msgChan, - nil, - time.Hour, - cpuTracker, - ) - require.NoError(err) + peers := tracker.NewPeers() + startup := tracker.NewStartup(peers, (beacons.Weight()+1)/2) + beacons.RegisterCallbackListener(startup) - engineConfig := smeng.Config{ - Ctx: bootstrapConfig.Ctx, - AllGetsServer: snowGetHandler, - VM: bootstrapConfig.VM, - Sender: bootstrapConfig.Sender, - Validators: vdrs, - Params: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 20, - BetaRogue: 20, - ConcurrentRepolls: 1, - OptimalProcessing: 1, - MaxOutstandingItems: 1, - MaxItemProcessingTime: 1, - }, - Consensus: consensus, - } - engine, err := smeng.New(engineConfig) - require.NoError(err) + // The engine handles consensus + consensus := &smcon.Topological{} + commonCfg := common.Config{ + Ctx: consensusCtx, + Beacons: beacons, + SampleK: beacons.Len(), + StartupTracker: startup, + Alpha: (beacons.Weight() + 1) / 2, + Sender: sender, + BootstrapTracker: bootstrapTracker, + AncestorsMaxContainersSent: 2000, + AncestorsMaxContainersReceived: 2000, + SharedCfg: &common.SharedConfig{}, + } - handler.SetConsensus(engine) + snowGetHandler, err := snowgetter.New(vm, commonCfg) + require.NoError(err) - bootstrapper, err := bootstrap.New( - bootstrapConfig, - engine.Start, - ) - require.NoError(err) + bootstrapConfig := bootstrap.Config{ + Config: commonCfg, + AllGetsServer: snowGetHandler, + Blocked: blocked, + VM: vm, + } - handler.SetBootstrapper(bootstrapper) + // Asynchronously passes messages from the network to the consensus engine + cpuTracker, err := timetracker.NewResourceTracker( + prometheus.NewRegistry(), + resource.NoUsage, + meter.ContinuousFactory{}, + time.Second, + ) + require.NoError(err) - // Allow incoming messages to be routed to the new chain - chainRouter.AddChain(handler) - ctx.Lock.Unlock() + h, err := handler.New( + bootstrapConfig.Ctx, + beacons, + msgChan, + time.Hour, + 2, + cpuTracker, + vm, + subnets.New(ctx.NodeID, subnets.Config{}), + ) + require.NoError(err) - handler.Start(false) + engineConfig := smeng.Config{ + Ctx: bootstrapConfig.Ctx, + AllGetsServer: snowGetHandler, + VM: bootstrapConfig.VM, + Sender: bootstrapConfig.Sender, + Validators: beacons, + Params: snowball.Parameters{ + K: 1, + Alpha: 1, + BetaVirtuous: 20, + BetaRogue: 20, + ConcurrentRepolls: 1, + OptimalProcessing: 1, + MaxOutstandingItems: 1, + MaxItemProcessingTime: 1, + }, + Consensus: consensus, + } + engine, err := smeng.New(engineConfig) + require.NoError(err) - ctx.Lock.Lock() - if err := bootstrapper.Connected(peerID, version.CurrentApp); err != nil { - t.Fatal(err) - } + bootstrapper, err := bootstrap.New( + context.Background(), + bootstrapConfig, + engine.Start, + ) + require.NoError(err) - externalSender.SendF = func(msg message.OutboundMessage, nodeIDs ids.NodeIDSet, _ ids.ID, _ bool) ids.NodeIDSet { - inMsg, err := mc.Parse(msg.Bytes(), ctx.NodeID, func() {}) - require.NoError(err) - require.Equal(message.GetAccepted, inMsg.Op()) + h.SetEngineManager(&handler.EngineManager{ + Avalanche: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: bootstrapper, + Consensus: engine, + }, + Snowman: &handler.Engine{ + StateSyncer: nil, + Bootstrapper: bootstrapper, + Consensus: engine, + }, + }) - requestIDIntf, err := inMsg.Get(message.RequestID) - require.NoError(err) + consensusCtx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.NormalOp, + }) - requestID, ok := requestIDIntf.(uint32) - require.True(ok) + // Allow incoming messages to be routed to the new chain + chainRouter.AddChain(context.Background(), h) + ctx.Lock.Unlock() - reqID = requestID - return nodeIDs - } + h.Start(context.Background(), false) - frontier := []ids.ID{advanceTimeBlkID} - if err := bootstrapper.AcceptedFrontier(peerID, reqID, frontier); err != nil { - t.Fatal(err) - } + ctx.Lock.Lock() + err = bootstrapper.Connected(context.Background(), peerID, version.CurrentApp) + require.NoError(err) - externalSender.SendF = func(msg message.OutboundMessage, nodeIDs ids.NodeIDSet, _ ids.ID, _ bool) ids.NodeIDSet { - var inMsg message.InboundMessage - if !useProto { - inMsg, err = mc.Parse(msg.Bytes(), ctx.NodeID, func() {}) - } else { - inMsg, err = mcProto.Parse(msg.Bytes(), ctx.NodeID, func() {}) - } - require.NoError(err) - require.Equal(message.GetAncestors, inMsg.Op()) - - requestIDIntf, err := inMsg.Get(message.RequestID) - require.NoError(err) - requestID, ok := requestIDIntf.(uint32) - require.True(ok) + externalSender.SendF = func(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { + inMsgIntf, err := mc.Parse(msg.Bytes(), ctx.NodeID, func() {}) + require.NoError(err) + require.Equal(message.GetAcceptedOp, inMsgIntf.Op()) + inMsg := inMsgIntf.Message().(*p2p.GetAccepted) - reqID = requestID + reqID = inMsg.RequestId + return nodeIDs + } - containerIDIntf, err := inMsg.Get(message.ContainerID) - require.NoError(err) - containerIDBytes, ok := containerIDIntf.([]byte) - require.True(ok) - containerID, err := ids.ToID(containerIDBytes) - require.NoError(err) - if containerID != advanceTimeBlkID { - t.Fatalf("wrong block requested") - } + frontier := []ids.ID{advanceTimeBlkID} + err = bootstrapper.AcceptedFrontier(context.Background(), peerID, reqID, frontier) + require.NoError(err) - return nodeIDs - } + externalSender.SendF = func(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { + inMsgIntf, err := mc.Parse(msg.Bytes(), ctx.NodeID, func() {}) + require.NoError(err) + require.Equal(message.GetAncestorsOp, inMsgIntf.Op()) + inMsg := inMsgIntf.Message().(*p2p.GetAncestors) - require.NoError(bootstrapper.Accepted(peerID, reqID, frontier)) + reqID = inMsg.RequestId - externalSender.SendF = nil - externalSender.CantSend = false + containerID, err := ids.ToID(inMsg.ContainerId) + require.NoError(err) + require.Equal(advanceTimeBlkID, containerID) + return nodeIDs + } - require.NoError(bootstrapper.Ancestors(peerID, reqID, [][]byte{advanceTimeBlkBytes})) + require.NoError(bootstrapper.Accepted(context.Background(), peerID, reqID, frontier)) - preferred, err = vm.Builder.Preferred() - require.NoError(err) + externalSender.SendF = nil + externalSender.CantSend = false - options, err := advanceTimeBlk.(smcon.OracleBlock).Options() - require.NoError(err) + require.NoError(bootstrapper.Ancestors(context.Background(), peerID, reqID, [][]byte{advanceTimeBlkBytes})) - // Because the block needs to have been verified for it's preference to be - // set correctly, we manually select the correct preference here. - advanceTimePreference := options[0] + preferred, err = vm.Builder.Preferred() + require.NoError(err) - if preferred.ID() != advanceTimePreference.ID() { - tt.Fatalf("wrong preference reported after bootstrapping to proposal block\nPreferred: %s\nExpected: %s\nGenesis: %s", - preferred.ID(), - advanceTimePreference.ID(), - preferredID) - } - ctx.Lock.Unlock() + require.Equal(advanceTimeBlk.ID(), preferred.ID()) - chainRouter.Shutdown() - }) - } + ctx.Lock.Unlock() + chainRouter.Shutdown(context.Background()) } func TestUnverifiedParent(t *testing.T) { + require := require.New(t) _, genesisBytes := defaultGenesis() dbManager := manager.NewMemDB(version.Semantic1_0_0) - vm := &VM{Factory: Factory{ - Config: config.Config{ - Chains: chains.MockManager{}, - Validators: validators.NewManager(), - UptimeLockedCalculator: uptime.NewLockedCalculator(), - MinStakeDuration: defaultMinStakingDuration, - MaxStakeDuration: defaultMaxStakingDuration, - RewardConfig: defaultRewardConfig, - BanffTime: mockable.MaxTime, - }, + vdrs := validators.NewManager() + primaryVdrs := validators.NewSet() + _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) + vm := &VM{Config: config.Config{ + Chains: chains.TestManager, + Validators: vdrs, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + MinStakeDuration: defaultMinStakingDuration, + MaxStakeDuration: defaultMaxStakingDuration, + RewardConfig: defaultRewardConfig, + BanffTime: banffForkTime, }} - vm.clock.Set(defaultGenesisTime) + initialClkTime := banffForkTime.Add(time.Second) + vm.clock.Set(initialClkTime) ctx := defaultContext() ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() msgChan := make(chan common.Message, 1) - if err := vm.Initialize(ctx, dbManager, genesisBytes, nil, nil, msgChan, nil, nil); err != nil { - t.Fatal(err) - } + err := vm.Initialize( + context.Background(), + ctx, + dbManager, + genesisBytes, + nil, + nil, + msgChan, + nil, + nil, + ) + require.NoError(err) - firstAdvanceTimeTx, err := vm.txBuilder.NewAdvanceTimeTx(defaultGenesisTime.Add(time.Second)) - if err != nil { - t.Fatal(err) - } + // include a tx1 to make the block be accepted + tx1 := &txs.Tx{Unsigned: &txs.ImportTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: vm.ctx.NetworkID, + BlockchainID: vm.ctx.ChainID, + }}, + SourceChain: vm.ctx.XChainID, + ImportedInputs: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: ids.Empty.Prefix(1), + OutputIndex: 1, + }, + Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + }, + }}, + }} + require.NoError(tx1.Initialize(txs.Codec)) preferred, err := vm.Builder.Preferred() - if err != nil { - t.Fatal(err) - } + require.NoError(err) + nextChainTime := initialClkTime.Add(time.Second) preferredID := preferred.ID() preferredHeight := preferred.Height() - statelessBlk, err := blocks.NewApricotProposalBlock( + statelessBlk, err := blocks.NewBanffStandardBlock( + nextChainTime, preferredID, preferredHeight+1, - firstAdvanceTimeTx, + []*txs.Tx{tx1}, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) firstAdvanceTimeBlk := vm.manager.NewBlock(statelessBlk) + err = firstAdvanceTimeBlk.Verify(context.Background()) + require.NoError(err) - vm.clock.Set(defaultGenesisTime.Add(2 * time.Second)) - if err := firstAdvanceTimeBlk.Verify(); err != nil { - t.Fatal(err) - } - - options, err := firstAdvanceTimeBlk.(smcon.OracleBlock).Options() - if err != nil { - t.Fatal(err) - } - firstOption := options[0] - secondOption := options[1] - - secondAdvanceTimeTx, err := vm.txBuilder.NewAdvanceTimeTx(defaultGenesisTime.Add(2 * time.Second)) - if err != nil { - t.Fatal(err) - } - statelessSecondAdvanceTimeBlk, err := blocks.NewApricotProposalBlock( - firstOption.ID(), - firstOption.(*blockexecutor.Block).Height()+1, - secondAdvanceTimeTx, + // include a tx1 to make the block be accepted + tx2 := &txs.Tx{Unsigned: &txs.ImportTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: vm.ctx.NetworkID, + BlockchainID: vm.ctx.ChainID, + }}, + SourceChain: vm.ctx.XChainID, + ImportedInputs: []*avax.TransferableInput{{ + UTXOID: avax.UTXOID{ + TxID: ids.Empty.Prefix(2), + OutputIndex: 2, + }, + Asset: avax.Asset{ID: vm.ctx.AVAXAssetID}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + }, + }}, + }} + require.NoError(tx1.Initialize(txs.Codec)) + nextChainTime = nextChainTime.Add(time.Second) + vm.clock.Set(nextChainTime) + statelessSecondAdvanceTimeBlk, err := blocks.NewBanffStandardBlock( + nextChainTime, + firstAdvanceTimeBlk.ID(), + firstAdvanceTimeBlk.Height()+1, + []*txs.Tx{tx2}, ) - if err != nil { - t.Fatal(err) - } - + require.NoError(err) secondAdvanceTimeBlk := vm.manager.NewBlock(statelessSecondAdvanceTimeBlk) - if parentBlkID := secondAdvanceTimeBlk.Parent(); parentBlkID != firstOption.ID() { - t.Fatalf("Wrong parent block ID returned") - } else if err := firstOption.Verify(); err != nil { - t.Fatal(err) - } else if err := secondOption.Verify(); err != nil { - t.Fatal(err) - } else if err := secondAdvanceTimeBlk.Verify(); err != nil { - t.Fatal(err) - } + require.Equal(secondAdvanceTimeBlk.Parent(), firstAdvanceTimeBlk.ID()) + require.NoError(secondAdvanceTimeBlk.Verify(context.Background())) } func TestMaxStakeAmount(t *testing.T) { vm, _, _ := defaultVM() vm.ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } + require.NoError(t, vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() @@ -2363,87 +2102,112 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { db := manager.NewMemDB(version.Semantic1_0_0) firstDB := db.NewPrefixDBManager([]byte{}) - firstVM := &VM{Factory: Factory{ - Config: config.Config{ - Chains: chains.MockManager{}, - UptimePercentage: .2, - RewardConfig: defaultRewardConfig, - Validators: validators.NewManager(), - UptimeLockedCalculator: uptime.NewLockedCalculator(), - BanffTime: mockable.MaxTime, - }, + firstVdrs := validators.NewManager() + firstPrimaryVdrs := validators.NewSet() + _ = firstVdrs.Add(constants.PrimaryNetworkID, firstPrimaryVdrs) + firstVM := &VM{Config: config.Config{ + Chains: chains.TestManager, + UptimePercentage: .2, + RewardConfig: defaultRewardConfig, + Validators: firstVdrs, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + BanffTime: banffForkTime, }} firstCtx := defaultContext() firstCtx.Lock.Lock() firstMsgChan := make(chan common.Message, 1) - require.NoError(firstVM.Initialize(firstCtx, firstDB, genesisBytes, nil, nil, firstMsgChan, nil, nil)) + err := firstVM.Initialize( + context.Background(), + firstCtx, + firstDB, + genesisBytes, + nil, + nil, + firstMsgChan, + nil, + nil, + ) + require.NoError(err) - firstVM.clock.Set(defaultGenesisTime) - firstVM.uptimeManager.(uptime.TestManager).SetTime(defaultGenesisTime) + initialClkTime := banffForkTime.Add(time.Second) + firstVM.clock.Set(initialClkTime) + firstVM.uptimeManager.(uptime.TestManager).SetTime(initialClkTime) - require.NoError(firstVM.SetState(snow.Bootstrapping)) - require.NoError(firstVM.SetState(snow.NormalOp)) + require.NoError(firstVM.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(firstVM.SetState(context.Background(), snow.NormalOp)) // Fast forward clock to time for genesis validators to leave firstVM.uptimeManager.(uptime.TestManager).SetTime(defaultValidateEndTime) - require.NoError(firstVM.Shutdown()) + require.NoError(firstVM.Shutdown(context.Background())) firstCtx.Lock.Unlock() secondDB := db.NewPrefixDBManager([]byte{}) - secondVM := &VM{Factory: Factory{ - Config: config.Config{ - Chains: chains.MockManager{}, - UptimePercentage: .21, - Validators: validators.NewManager(), - UptimeLockedCalculator: uptime.NewLockedCalculator(), - BanffTime: mockable.MaxTime, - }, + secondVdrs := validators.NewManager() + secondPrimaryVdrs := validators.NewSet() + _ = secondVdrs.Add(constants.PrimaryNetworkID, secondPrimaryVdrs) + secondVM := &VM{Config: config.Config{ + Chains: chains.TestManager, + UptimePercentage: .21, + Validators: secondVdrs, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + BanffTime: banffForkTime, }} secondCtx := defaultContext() secondCtx.Lock.Lock() defer func() { - require.NoError(secondVM.Shutdown()) + require.NoError(secondVM.Shutdown(context.Background())) secondCtx.Lock.Unlock() }() secondMsgChan := make(chan common.Message, 1) - require.NoError(secondVM.Initialize(secondCtx, secondDB, genesisBytes, nil, nil, secondMsgChan, nil, nil)) + err = secondVM.Initialize( + context.Background(), + secondCtx, + secondDB, + genesisBytes, + nil, + nil, + secondMsgChan, + nil, + nil, + ) + require.NoError(err) secondVM.clock.Set(defaultValidateStartTime.Add(2 * defaultMinStakingDuration)) secondVM.uptimeManager.(uptime.TestManager).SetTime(defaultValidateStartTime.Add(2 * defaultMinStakingDuration)) - require.NoError(secondVM.SetState(snow.Bootstrapping)) - require.NoError(secondVM.SetState(snow.NormalOp)) + require.NoError(secondVM.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(secondVM.SetState(context.Background(), snow.NormalOp)) secondVM.clock.Set(defaultValidateEndTime) secondVM.uptimeManager.(uptime.TestManager).SetTime(defaultValidateEndTime) - blk, err := secondVM.Builder.BuildBlock() // should contain proposal to advance time + blk, err := secondVM.Builder.BuildBlock(context.Background()) // should advance time require.NoError(err) - require.NoError(blk.Verify()) + require.NoError(blk.Verify(context.Background())) // Assert preferences are correct block := blk.(smcon.OracleBlock) - options, err := block.Options() + options, err := block.Options(context.Background()) require.NoError(err) commit := options[0].(*blockexecutor.Block) - _, ok := commit.Block.(*blocks.ApricotCommitBlock) + _, ok := commit.Block.(*blocks.BanffCommitBlock) require.True(ok) abort := options[1].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.ApricotAbortBlock) + _, ok = abort.Block.(*blocks.BanffAbortBlock) require.True(ok) - require.NoError(block.Accept()) - require.NoError(commit.Verify()) - require.NoError(abort.Verify()) - require.NoError(secondVM.SetPreference(secondVM.manager.LastAccepted())) + require.NoError(block.Accept(context.Background())) + require.NoError(commit.Verify(context.Background())) + require.NoError(abort.Verify(context.Background())) + require.NoError(secondVM.SetPreference(context.Background(), secondVM.manager.LastAccepted())) proposalTx := blk.(blocks.Block).Txs()[0] { @@ -2455,8 +2219,8 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { require.Equal(status.Aborted, txStatus) } - require.NoError(commit.Accept()) // advance the timestamp - require.NoError(secondVM.SetPreference(secondVM.manager.LastAccepted())) + require.NoError(commit.Accept(context.Background())) // advance the timestamp + require.NoError(secondVM.SetPreference(context.Background(), secondVM.manager.LastAccepted())) _, txStatus, err := secondVM.state.GetTx(proposalTx.ID()) require.NoError(err) @@ -2466,26 +2230,26 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { timestamp := secondVM.state.GetTimestamp() require.Equal(defaultValidateEndTime.Unix(), timestamp.Unix()) - blk, err = secondVM.Builder.BuildBlock() // should contain proposal to reward genesis validator + blk, err = secondVM.Builder.BuildBlock(context.Background()) // should contain proposal to reward genesis validator require.NoError(err) - require.NoError(blk.Verify()) + require.NoError(blk.Verify(context.Background())) block = blk.(smcon.OracleBlock) - options, err = block.Options() + options, err = block.Options(context.Background()) require.NoError(err) - commit = options[1].(*blockexecutor.Block) - _, ok = commit.Block.(*blocks.ApricotCommitBlock) + commit = options[0].(*blockexecutor.Block) + _, ok = commit.Block.(*blocks.BanffCommitBlock) require.True(ok) - abort = options[0].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.ApricotAbortBlock) + abort = options[1].(*blockexecutor.Block) + _, ok = abort.Block.(*blocks.BanffAbortBlock) require.True(ok) - require.NoError(blk.Accept()) - require.NoError(commit.Verify()) - require.NoError(secondVM.SetPreference(secondVM.manager.LastAccepted())) + require.NoError(blk.Accept(context.Background())) + require.NoError(commit.Verify(context.Background())) + require.NoError(secondVM.SetPreference(context.Background(), secondVM.manager.LastAccepted())) proposalTx = blk.(blocks.Block).Txs()[0] { @@ -2497,9 +2261,9 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { require.Equal(status.Committed, txStatus) } - require.NoError(abort.Verify()) - require.NoError(abort.Accept()) // do not reward the genesis validator - require.NoError(secondVM.SetPreference(secondVM.manager.LastAccepted())) + require.NoError(abort.Verify(context.Background())) + require.NoError(abort.Accept(context.Background())) // do not reward the genesis validator + require.NoError(secondVM.SetPreference(context.Background(), secondVM.manager.LastAccepted())) _, txStatus, err = secondVM.state.GetTx(proposalTx.ID()) require.NoError(err) @@ -2517,15 +2281,16 @@ func TestUptimeDisallowedAfterNeverConnecting(t *testing.T) { _, genesisBytes := defaultGenesis() db := manager.NewMemDB(version.Semantic1_0_0) - vm := &VM{Factory: Factory{ - Config: config.Config{ - Chains: chains.MockManager{}, - UptimePercentage: .2, - RewardConfig: defaultRewardConfig, - Validators: validators.NewManager(), - UptimeLockedCalculator: uptime.NewLockedCalculator(), - BanffTime: mockable.MaxTime, - }, + vdrs := validators.NewManager() + primaryVdrs := validators.NewSet() + _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) + vm := &VM{Config: config.Config{ + Chains: chains.TestManager, + UptimePercentage: .2, + RewardConfig: defaultRewardConfig, + Validators: vdrs, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + BanffTime: banffForkTime, }} ctx := defaultContext() @@ -2533,73 +2298,86 @@ func TestUptimeDisallowedAfterNeverConnecting(t *testing.T) { msgChan := make(chan common.Message, 1) appSender := &common.SenderTest{T: t} - require.NoError(vm.Initialize(ctx, db, genesisBytes, nil, nil, msgChan, nil, appSender)) + err := vm.Initialize( + context.Background(), + ctx, + db, + genesisBytes, + nil, + nil, + msgChan, + nil, + appSender, + ) + require.NoError(err) + defer func() { - require.NoError(vm.Shutdown()) + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() - vm.clock.Set(defaultGenesisTime) - vm.uptimeManager.(uptime.TestManager).SetTime(defaultGenesisTime) + initialClkTime := banffForkTime.Add(time.Second) + vm.clock.Set(initialClkTime) + vm.uptimeManager.(uptime.TestManager).SetTime(initialClkTime) - require.NoError(vm.SetState(snow.Bootstrapping)) - require.NoError(vm.SetState(snow.NormalOp)) + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) // Fast forward clock to time for genesis validators to leave vm.clock.Set(defaultValidateEndTime) vm.uptimeManager.(uptime.TestManager).SetTime(defaultValidateEndTime) - blk, err := vm.Builder.BuildBlock() // should contain proposal to advance time + blk, err := vm.Builder.BuildBlock(context.Background()) // should advance time require.NoError(err) - require.NoError(blk.Verify()) + require.NoError(blk.Verify(context.Background())) // first the time will be advanced. block := blk.(smcon.OracleBlock) - options, err := block.Options() + options, err := block.Options(context.Background()) require.NoError(err) commit := options[0].(*blockexecutor.Block) - _, ok := commit.Block.(*blocks.ApricotCommitBlock) + _, ok := commit.Block.(*blocks.BanffCommitBlock) require.True(ok) abort := options[1].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.ApricotAbortBlock) + _, ok = abort.Block.(*blocks.BanffAbortBlock) require.True(ok) - require.NoError(block.Accept()) - require.NoError(commit.Verify()) - require.NoError(abort.Verify()) - require.NoError(commit.Accept()) // advance the timestamp - require.NoError(vm.SetPreference(vm.manager.LastAccepted())) + require.NoError(block.Accept(context.Background())) + require.NoError(commit.Verify(context.Background())) + require.NoError(abort.Verify(context.Background())) + require.NoError(commit.Accept(context.Background())) // advance the timestamp + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) // Verify that chain's timestamp has advanced timestamp := vm.state.GetTimestamp() require.Equal(defaultValidateEndTime.Unix(), timestamp.Unix()) // should contain proposal to reward genesis validator - blk, err = vm.Builder.BuildBlock() + blk, err = vm.Builder.BuildBlock(context.Background()) require.NoError(err) - require.NoError(blk.Verify()) + require.NoError(blk.Verify(context.Background())) block = blk.(smcon.OracleBlock) - options, err = block.Options() + options, err = block.Options(context.Background()) require.NoError(err) - abort = options[0].(*blockexecutor.Block) - _, ok = abort.Block.(*blocks.ApricotAbortBlock) + commit = options[0].(*blockexecutor.Block) + _, ok = commit.Block.(*blocks.BanffCommitBlock) require.True(ok) - commit = options[1].(*blockexecutor.Block) - _, ok = commit.Block.(*blocks.ApricotCommitBlock) + abort = options[1].(*blockexecutor.Block) + _, ok = abort.Block.(*blocks.BanffAbortBlock) require.True(ok) - require.NoError(blk.Accept()) - require.NoError(commit.Verify()) - require.NoError(abort.Verify()) - require.NoError(abort.Accept()) // do not reward the genesis validator - require.NoError(vm.SetPreference(vm.manager.LastAccepted())) + require.NoError(blk.Accept(context.Background())) + require.NoError(commit.Verify(context.Background())) + require.NoError(abort.Verify(context.Background())) + require.NoError(abort.Accept(context.Background())) // do not reward the genesis validator + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) _, err = vm.state.GetCurrentValidator( constants.PrimaryNetworkID, @@ -2607,3 +2385,542 @@ func TestUptimeDisallowedAfterNeverConnecting(t *testing.T) { ) require.ErrorIs(err, database.ErrNotFound) } + +func TestVM_GetValidatorSet(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Setup VM + _, genesisBytes := defaultGenesis() + db := manager.NewMemDB(version.Semantic1_0_0) + + vdrManager := validators.NewManager() + primaryVdrs := validators.NewSet() + _ = vdrManager.Add(constants.PrimaryNetworkID, primaryVdrs) + + vm := &VM{Config: config.Config{ + Chains: chains.TestManager, + UptimePercentage: .2, + RewardConfig: defaultRewardConfig, + Validators: vdrManager, + UptimeLockedCalculator: uptime.NewLockedCalculator(), + BanffTime: mockable.MaxTime, + }} + + ctx := defaultContext() + ctx.Lock.Lock() + + msgChan := make(chan common.Message, 1) + appSender := &common.SenderTest{T: t} + err := vm.Initialize(context.Background(), ctx, db, genesisBytes, nil, nil, msgChan, nil, appSender) + require.NoError(t, err) + defer func() { + require.NoError(t, vm.Shutdown(context.Background())) + ctx.Lock.Unlock() + }() + + vm.clock.Set(defaultGenesisTime) + vm.uptimeManager.(uptime.TestManager).SetTime(defaultGenesisTime) + + require.NoError(t, vm.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(t, vm.SetState(context.Background(), snow.NormalOp)) + + var ( + oldVdrs = vm.Validators + oldState = vm.state + numVdrs = 4 + vdrBaseWeight = uint64(1_000) + vdrs []*validators.Validator + ) + // Populate the validator set to use below + for i := 0; i < numVdrs; i++ { + sk, err := bls.NewSecretKey() + require.NoError(t, err) + + vdrs = append(vdrs, &validators.Validator{ + NodeID: ids.GenerateTestNodeID(), + PublicKey: bls.PublicFromSecretKey(sk), + Weight: vdrBaseWeight + uint64(i), + }) + } + + type test struct { + name string + // Height we're getting the diff at + height uint64 + lastAcceptedHeight uint64 + subnetID ids.ID + // Validator sets at tip + currentPrimaryNetworkValidators []*validators.Validator + currentSubnetValidators []*validators.Validator + // Diff at tip, block before tip, etc. + // This must have [height] - [lastAcceptedHeight] elements + weightDiffs []map[ids.NodeID]*state.ValidatorWeightDiff + // Diff at tip, block before tip, etc. + // This must have [height] - [lastAcceptedHeight] elements + pkDiffs []map[ids.NodeID]*bls.PublicKey + expectedVdrSet map[ids.NodeID]*validators.GetValidatorOutput + expectedErr error + } + + tests := []test{ + { + name: "after tip", + height: 1, + lastAcceptedHeight: 0, + expectedVdrSet: map[ids.NodeID]*validators.GetValidatorOutput{}, + expectedErr: database.ErrNotFound, + }, + { + name: "at tip", + height: 1, + lastAcceptedHeight: 1, + currentPrimaryNetworkValidators: []*validators.Validator{ + copyPrimaryValidator(vdrs[0]), + }, + currentSubnetValidators: []*validators.Validator{ + copySubnetValidator(vdrs[0]), + }, + expectedVdrSet: map[ids.NodeID]*validators.GetValidatorOutput{ + vdrs[0].NodeID: { + NodeID: vdrs[0].NodeID, + PublicKey: vdrs[0].PublicKey, + Weight: vdrs[0].Weight, + }, + }, + expectedErr: nil, + }, + { + name: "1 before tip", + height: 2, + lastAcceptedHeight: 3, + currentPrimaryNetworkValidators: []*validators.Validator{ + copyPrimaryValidator(vdrs[0]), + copyPrimaryValidator(vdrs[1]), + }, + currentSubnetValidators: []*validators.Validator{ + // At tip we have these 2 validators + copySubnetValidator(vdrs[0]), + copySubnetValidator(vdrs[1]), + }, + weightDiffs: []map[ids.NodeID]*state.ValidatorWeightDiff{ + { + // At the tip block vdrs[0] lost weight, vdrs[1] gained weight, + // and vdrs[2] left + vdrs[0].NodeID: { + Decrease: true, + Amount: 1, + }, + vdrs[1].NodeID: { + Decrease: false, + Amount: 1, + }, + vdrs[2].NodeID: { + Decrease: true, + Amount: vdrs[2].Weight, + }, + }, + }, + pkDiffs: []map[ids.NodeID]*bls.PublicKey{ + { + vdrs[2].NodeID: vdrs[2].PublicKey, + }, + }, + expectedVdrSet: map[ids.NodeID]*validators.GetValidatorOutput{ + vdrs[0].NodeID: { + NodeID: vdrs[0].NodeID, + PublicKey: vdrs[0].PublicKey, + Weight: vdrs[0].Weight + 1, + }, + vdrs[1].NodeID: { + NodeID: vdrs[1].NodeID, + PublicKey: vdrs[1].PublicKey, + Weight: vdrs[1].Weight - 1, + }, + vdrs[2].NodeID: { + NodeID: vdrs[2].NodeID, + PublicKey: vdrs[2].PublicKey, + Weight: vdrs[2].Weight, + }, + }, + expectedErr: nil, + }, + { + name: "2 before tip", + height: 3, + lastAcceptedHeight: 5, + currentPrimaryNetworkValidators: []*validators.Validator{ + copyPrimaryValidator(vdrs[0]), + copyPrimaryValidator(vdrs[1]), + }, + currentSubnetValidators: []*validators.Validator{ + // At tip we have these 2 validators + copySubnetValidator(vdrs[0]), + copySubnetValidator(vdrs[1]), + }, + weightDiffs: []map[ids.NodeID]*state.ValidatorWeightDiff{ + { + // At the tip block vdrs[0] lost weight, vdrs[1] gained weight, + // and vdrs[2] left + vdrs[0].NodeID: { + Decrease: true, + Amount: 1, + }, + vdrs[1].NodeID: { + Decrease: false, + Amount: 1, + }, + vdrs[2].NodeID: { + Decrease: true, + Amount: vdrs[2].Weight, + }, + }, + { + // At the block before tip vdrs[0] lost weight, vdrs[1] gained weight, + // vdrs[2] joined + vdrs[0].NodeID: { + Decrease: true, + Amount: 1, + }, + vdrs[1].NodeID: { + Decrease: false, + Amount: 1, + }, + vdrs[2].NodeID: { + Decrease: false, + Amount: vdrs[2].Weight, + }, + }, + }, + pkDiffs: []map[ids.NodeID]*bls.PublicKey{ + { + vdrs[2].NodeID: vdrs[2].PublicKey, + }, + {}, + }, + expectedVdrSet: map[ids.NodeID]*validators.GetValidatorOutput{ + vdrs[0].NodeID: { + NodeID: vdrs[0].NodeID, + PublicKey: vdrs[0].PublicKey, + Weight: vdrs[0].Weight + 2, + }, + vdrs[1].NodeID: { + NodeID: vdrs[1].NodeID, + PublicKey: vdrs[1].PublicKey, + Weight: vdrs[1].Weight - 2, + }, + }, + expectedErr: nil, + }, + { + name: "1 before tip; nil public key", + height: 4, + lastAcceptedHeight: 5, + currentPrimaryNetworkValidators: []*validators.Validator{ + copyPrimaryValidator(vdrs[0]), + copyPrimaryValidator(vdrs[1]), + }, + currentSubnetValidators: []*validators.Validator{ + // At tip we have these 2 validators + copySubnetValidator(vdrs[0]), + copySubnetValidator(vdrs[1]), + }, + weightDiffs: []map[ids.NodeID]*state.ValidatorWeightDiff{ + { + // At the tip block vdrs[0] lost weight, vdrs[1] gained weight, + // and vdrs[2] left + vdrs[0].NodeID: { + Decrease: true, + Amount: 1, + }, + vdrs[1].NodeID: { + Decrease: false, + Amount: 1, + }, + vdrs[2].NodeID: { + Decrease: true, + Amount: vdrs[2].Weight, + }, + }, + }, + pkDiffs: []map[ids.NodeID]*bls.PublicKey{ + {}, + }, + expectedVdrSet: map[ids.NodeID]*validators.GetValidatorOutput{ + vdrs[0].NodeID: { + NodeID: vdrs[0].NodeID, + PublicKey: vdrs[0].PublicKey, + Weight: vdrs[0].Weight + 1, + }, + vdrs[1].NodeID: { + NodeID: vdrs[1].NodeID, + PublicKey: vdrs[1].PublicKey, + Weight: vdrs[1].Weight - 1, + }, + vdrs[2].NodeID: { + NodeID: vdrs[2].NodeID, + Weight: vdrs[2].Weight, + }, + }, + expectedErr: nil, + }, + { + name: "1 before tip; subnet", + height: 5, + lastAcceptedHeight: 6, + subnetID: ids.GenerateTestID(), + currentPrimaryNetworkValidators: []*validators.Validator{ + copyPrimaryValidator(vdrs[0]), + copyPrimaryValidator(vdrs[1]), + copyPrimaryValidator(vdrs[3]), + }, + currentSubnetValidators: []*validators.Validator{ + // At tip we have these 2 validators + copySubnetValidator(vdrs[0]), + copySubnetValidator(vdrs[1]), + }, + weightDiffs: []map[ids.NodeID]*state.ValidatorWeightDiff{ + { + // At the tip block vdrs[0] lost weight, vdrs[1] gained weight, + // and vdrs[2] left + vdrs[0].NodeID: { + Decrease: true, + Amount: 1, + }, + vdrs[1].NodeID: { + Decrease: false, + Amount: 1, + }, + vdrs[2].NodeID: { + Decrease: true, + Amount: vdrs[2].Weight, + }, + }, + }, + pkDiffs: []map[ids.NodeID]*bls.PublicKey{ + {}, + }, + expectedVdrSet: map[ids.NodeID]*validators.GetValidatorOutput{ + vdrs[0].NodeID: { + NodeID: vdrs[0].NodeID, + PublicKey: vdrs[0].PublicKey, + Weight: vdrs[0].Weight + 1, + }, + vdrs[1].NodeID: { + NodeID: vdrs[1].NodeID, + PublicKey: vdrs[1].PublicKey, + Weight: vdrs[1].Weight - 1, + }, + vdrs[2].NodeID: { + NodeID: vdrs[2].NodeID, + Weight: vdrs[2].Weight, + }, + }, + expectedErr: nil, + }, + { + name: "unrelated primary network key removal on subnet lookup", + height: 4, + lastAcceptedHeight: 5, + subnetID: ids.GenerateTestID(), + currentPrimaryNetworkValidators: []*validators.Validator{ + copyPrimaryValidator(vdrs[0]), + }, + currentSubnetValidators: []*validators.Validator{ + copySubnetValidator(vdrs[0]), + }, + weightDiffs: []map[ids.NodeID]*state.ValidatorWeightDiff{ + {}, + }, + pkDiffs: []map[ids.NodeID]*bls.PublicKey{ + { + vdrs[1].NodeID: vdrs[1].PublicKey, + }, + }, + expectedVdrSet: map[ids.NodeID]*validators.GetValidatorOutput{ + vdrs[0].NodeID: { + NodeID: vdrs[0].NodeID, + PublicKey: vdrs[0].PublicKey, + Weight: vdrs[0].Weight, + }, + }, + expectedErr: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + // Mock the VM's validators + vdrs := validators.NewMockManager(ctrl) + vm.Validators = vdrs + mockSubnetVdrSet := validators.NewMockSet(ctrl) + mockSubnetVdrSet.EXPECT().List().Return(tt.currentSubnetValidators).AnyTimes() + vdrs.EXPECT().Get(tt.subnetID).Return(mockSubnetVdrSet, true).AnyTimes() + + mockPrimaryVdrSet := mockSubnetVdrSet + if tt.subnetID != constants.PrimaryNetworkID { + mockPrimaryVdrSet = validators.NewMockSet(ctrl) + vdrs.EXPECT().Get(constants.PrimaryNetworkID).Return(mockPrimaryVdrSet, true).AnyTimes() + } + for _, vdr := range tt.currentPrimaryNetworkValidators { + mockPrimaryVdrSet.EXPECT().Get(vdr.NodeID).Return(vdr, true).AnyTimes() + } + + // Mock the block manager + mockManager := blockexecutor.NewMockManager(ctrl) + vm.manager = mockManager + + // Mock the VM's state + mockState := state.NewMockState(ctrl) + vm.state = mockState + + // Tell state what diffs to report + for _, weightDiff := range tt.weightDiffs { + mockState.EXPECT().GetValidatorWeightDiffs(gomock.Any(), gomock.Any()).Return(weightDiff, nil) + } + + for _, pkDiff := range tt.pkDiffs { + mockState.EXPECT().GetValidatorPublicKeyDiffs(gomock.Any()).Return(pkDiff, nil) + } + + // Tell state last accepted block to report + mockTip := smcon.NewMockBlock(ctrl) + mockTip.EXPECT().Height().Return(tt.lastAcceptedHeight) + mockTipID := ids.GenerateTestID() + mockState.EXPECT().GetLastAccepted().Return(mockTipID) + mockManager.EXPECT().GetBlock(mockTipID).Return(mockTip, nil) + + // Compute validator set at previous height + gotVdrSet, err := vm.GetValidatorSet(context.Background(), tt.height, tt.subnetID) + require.ErrorIs(err, tt.expectedErr) + if tt.expectedErr != nil { + return + } + require.Equal(len(tt.expectedVdrSet), len(gotVdrSet)) + for nodeID, vdr := range tt.expectedVdrSet { + otherVdr, ok := gotVdrSet[nodeID] + require.True(ok) + require.Equal(vdr, otherVdr) + } + }) + } + + // Put these back so we don't need to mock calls made on Shutdown + vm.Validators = oldVdrs + vm.state = oldState +} + +func copyPrimaryValidator(vdr *validators.Validator) *validators.Validator { + newVdr := *vdr + return &newVdr +} + +func copySubnetValidator(vdr *validators.Validator) *validators.Validator { + newVdr := *vdr + newVdr.PublicKey = nil + return &newVdr +} + +func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { + require := require.New(t) + + validatorStartTime := banffForkTime.Add(txexecutor.SyncBound).Add(1 * time.Second) + validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) + + vm, _, _ := defaultVM() + + vm.ctx.Lock.Lock() + defer func() { + err := vm.Shutdown(context.Background()) + require.NoError(err) + + vm.ctx.Lock.Unlock() + }() + + key, err := testKeyFactory.NewPrivateKey() + require.NoError(err) + + id := key.PublicKey().Address() + + addValidatorTx, err := vm.txBuilder.NewAddValidatorTx( + defaultMaxValidatorStake, + uint64(validatorStartTime.Unix()), + uint64(validatorEndTime.Unix()), + ids.NodeID(id), + id, + reward.PercentDenominator, + []*secp256k1.PrivateKey{keys[0]}, + keys[0].Address(), + ) + require.NoError(err) + + err = vm.Builder.AddUnverifiedTx(addValidatorTx) + require.NoError(err) + + // trigger block creation for the validator tx + addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) + require.NoError(err) + require.NoError(addValidatorBlock.Verify(context.Background())) + require.NoError(addValidatorBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + + createSubnetTx, err := vm.txBuilder.NewCreateSubnetTx( + 1, + []ids.ShortID{id}, + []*secp256k1.PrivateKey{keys[0]}, + keys[0].Address(), + ) + require.NoError(err) + + err = vm.Builder.AddUnverifiedTx(createSubnetTx) + require.NoError(err) + + // trigger block creation for the subnet tx + createSubnetBlock, err := vm.Builder.BuildBlock(context.Background()) + require.NoError(err) + require.NoError(createSubnetBlock.Verify(context.Background())) + require.NoError(createSubnetBlock.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + + addSubnetValidatorTx, err := vm.txBuilder.NewAddSubnetValidatorTx( + defaultMaxValidatorStake, + uint64(validatorStartTime.Unix()), + uint64(validatorEndTime.Unix()), + ids.NodeID(id), + createSubnetTx.ID(), + []*secp256k1.PrivateKey{key, keys[1]}, + keys[1].Address(), + ) + require.NoError(err) + + removeSubnetValidatorTx, err := vm.txBuilder.NewRemoveSubnetValidatorTx( + ids.NodeID(id), + createSubnetTx.ID(), + []*secp256k1.PrivateKey{key, keys[2]}, + keys[2].Address(), + ) + require.NoError(err) + + statelessBlock, err := blocks.NewBanffStandardBlock( + vm.state.GetTimestamp(), + createSubnetBlock.ID(), + createSubnetBlock.Height()+1, + []*txs.Tx{ + addSubnetValidatorTx, + removeSubnetValidatorTx, + }, + ) + require.NoError(err) + + blockBytes := statelessBlock.Bytes() + block, err := vm.ParseBlock(context.Background(), blockBytes) + require.NoError(err) + require.NoError(block.Verify(context.Background())) + require.NoError(block.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + + _, err = vm.state.GetPendingValidator(createSubnetTx.ID(), ids.NodeID(id)) + require.ErrorIs(err, database.ErrNotFound) +} diff --git a/avalanchego/vms/platformvm/warp/codec.go b/avalanchego/vms/platformvm/warp/codec.go new file mode 100644 index 00000000..0213a670 --- /dev/null +++ b/avalanchego/vms/platformvm/warp/codec.go @@ -0,0 +1,31 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "math" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/codec/linearcodec" + "github.com/ava-labs/avalanchego/utils/wrappers" +) + +const codecVersion = 0 + +// Codec does serialization and deserialization for Warp messages. +var c codec.Manager + +func init() { + c = codec.NewManager(math.MaxInt) + lc := linearcodec.NewCustomMaxLength(math.MaxInt32) + + errs := wrappers.Errs{} + errs.Add( + lc.RegisterType(&BitSetSignature{}), + c.RegisterCodec(codecVersion, lc), + ) + if errs.Errored() { + panic(errs.Err) + } +} diff --git a/avalanchego/vms/platformvm/warp/constants.go b/avalanchego/vms/platformvm/warp/constants.go new file mode 100644 index 00000000..a91f5f39 --- /dev/null +++ b/avalanchego/vms/platformvm/warp/constants.go @@ -0,0 +1,15 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import "github.com/ava-labs/avalanchego/ids" + +// AnycastID is a special DestinationChainID that is used to indicate that the +// message is intended to be able to be received by any chain. +var AnycastID = ids.ID{ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, +} diff --git a/avalanchego/vms/platformvm/warp/gwarp/client.go b/avalanchego/vms/platformvm/warp/gwarp/client.go new file mode 100644 index 00000000..96446fc8 --- /dev/null +++ b/avalanchego/vms/platformvm/warp/gwarp/client.go @@ -0,0 +1,34 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gwarp + +import ( + "context" + + "github.com/ava-labs/avalanchego/vms/platformvm/warp" + + pb "github.com/ava-labs/avalanchego/proto/pb/warp" +) + +var _ warp.Signer = (*Client)(nil) + +type Client struct { + client pb.SignerClient +} + +func NewClient(client pb.SignerClient) *Client { + return &Client{client: client} +} + +func (c *Client) Sign(unsignedMsg *warp.UnsignedMessage) ([]byte, error) { + resp, err := c.client.Sign(context.Background(), &pb.SignRequest{ + SourceChainId: unsignedMsg.SourceChainID[:], + DestinationChainId: unsignedMsg.DestinationChainID[:], + Payload: unsignedMsg.Payload, + }) + if err != nil { + return nil, err + } + return resp.Signature, nil +} diff --git a/avalanchego/vms/platformvm/warp/gwarp/server.go b/avalanchego/vms/platformvm/warp/gwarp/server.go new file mode 100644 index 00000000..f1ac8964 --- /dev/null +++ b/avalanchego/vms/platformvm/warp/gwarp/server.go @@ -0,0 +1,50 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gwarp + +import ( + "context" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/platformvm/warp" + + pb "github.com/ava-labs/avalanchego/proto/pb/warp" +) + +var _ pb.SignerServer = (*Server)(nil) + +type Server struct { + pb.UnsafeSignerServer + signer warp.Signer +} + +func NewServer(signer warp.Signer) *Server { + return &Server{signer: signer} +} + +func (s *Server) Sign(_ context.Context, unsignedMsg *pb.SignRequest) (*pb.SignResponse, error) { + sourceChainID, err := ids.ToID(unsignedMsg.SourceChainId) + if err != nil { + return nil, err + } + + destinationChainID, err := ids.ToID(unsignedMsg.DestinationChainId) + if err != nil { + return nil, err + } + + msg, err := warp.NewUnsignedMessage( + sourceChainID, + destinationChainID, + unsignedMsg.Payload, + ) + if err != nil { + return nil, err + } + + sig, err := s.signer.Sign(msg) + return &pb.SignResponse{ + Signature: sig, + }, err +} diff --git a/avalanchego/vms/platformvm/warp/gwarp/signer_test.go b/avalanchego/vms/platformvm/warp/gwarp/signer_test.go new file mode 100644 index 00000000..ec443415 --- /dev/null +++ b/avalanchego/vms/platformvm/warp/gwarp/signer_test.go @@ -0,0 +1,71 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gwarp + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/avalanchego/vms/rpcchainvm/grpcutils" + + pb "github.com/ava-labs/avalanchego/proto/pb/warp" +) + +type testSigner struct { + client *Client + server warp.Signer + sk *bls.SecretKey + chainID ids.ID + closeFn func() +} + +func setupSigner(t testing.TB) *testSigner { + require := require.New(t) + + sk, err := bls.NewSecretKey() + require.NoError(err) + + chainID := ids.GenerateTestID() + + s := &testSigner{ + server: warp.NewSigner(sk, chainID), + sk: sk, + chainID: chainID, + } + + listener, err := grpcutils.NewListener() + if err != nil { + t.Fatalf("Failed to create listener: %s", err) + } + serverCloser := grpcutils.ServerCloser{} + + server := grpcutils.NewServer() + pb.RegisterSignerServer(server, NewServer(s.server)) + serverCloser.Add(server) + + go grpcutils.Serve(listener, server) + + conn, err := grpcutils.Dial(listener.Addr().String()) + require.NoError(err) + + s.client = NewClient(pb.NewSignerClient(conn)) + s.closeFn = func() { + serverCloser.Stop() + _ = conn.Close() + _ = listener.Close() + } + return s +} + +func TestInterface(t *testing.T) { + for _, test := range warp.SignerTests { + s := setupSigner(t) + test(t, s.client, s.sk, s.chainID) + s.closeFn() + } +} diff --git a/avalanchego/vms/platformvm/warp/message.go b/avalanchego/vms/platformvm/warp/message.go new file mode 100644 index 00000000..34850aed --- /dev/null +++ b/avalanchego/vms/platformvm/warp/message.go @@ -0,0 +1,51 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +// Message defines the standard format for a Warp message. +type Message struct { + UnsignedMessage `serialize:"true"` + Signature Signature `serialize:"true"` + + bytes []byte +} + +// NewMessage creates a new *Message and initializes it. +func NewMessage( + unsignedMsg *UnsignedMessage, + signature Signature, +) (*Message, error) { + msg := &Message{ + UnsignedMessage: *unsignedMsg, + Signature: signature, + } + return msg, msg.Initialize() +} + +// ParseMessage converts a slice of bytes into an initialized *Message. +func ParseMessage(b []byte) (*Message, error) { + msg := &Message{ + bytes: b, + } + _, err := c.Unmarshal(b, msg) + if err != nil { + return nil, err + } + return msg, msg.UnsignedMessage.Initialize() +} + +// Initialize recalculates the result of Bytes(). It does not call Initialize() +// on the UnsignedMessage. +func (m *Message) Initialize() error { + bytes, err := c.Marshal(codecVersion, m) + m.bytes = bytes + return err +} + +// Bytes returns the binary representation of this message. It assumes that the +// message is initialized from either New, Parse, or an explicit call to +// Initialize. +func (m *Message) Bytes() []byte { + return m.bytes +} diff --git a/avalanchego/vms/platformvm/warp/message_test.go b/avalanchego/vms/platformvm/warp/message_test.go new file mode 100644 index 00000000..94375530 --- /dev/null +++ b/avalanchego/vms/platformvm/warp/message_test.go @@ -0,0 +1,44 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/crypto/bls" +) + +func TestMessage(t *testing.T) { + require := require.New(t) + + unsignedMsg, err := NewUnsignedMessage( + ids.GenerateTestID(), + ids.GenerateTestID(), + []byte("payload"), + ) + require.NoError(err) + + msg, err := NewMessage( + unsignedMsg, + &BitSetSignature{ + Signers: []byte{1, 2, 3}, + Signature: [bls.SignatureLen]byte{4, 5, 6}, + }, + ) + require.NoError(err) + + msgBytes := msg.Bytes() + msg2, err := ParseMessage(msgBytes) + require.NoError(err) + require.Equal(msg, msg2) +} + +func TestParseMessageJunk(t *testing.T) { + _, err := ParseMessage(utils.RandomBytes(1024)) + require.Error(t, err) +} diff --git a/avalanchego/vms/platformvm/warp/signature.go b/avalanchego/vms/platformvm/warp/signature.go new file mode 100644 index 00000000..df3973cc --- /dev/null +++ b/avalanchego/vms/platformvm/warp/signature.go @@ -0,0 +1,160 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "context" + "errors" + "fmt" + "math/big" + + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/set" +) + +var ( + _ Signature = (*BitSetSignature)(nil) + + ErrInvalidBitSet = errors.New("bitset is invalid") + ErrInsufficientWeight = errors.New("signature weight is insufficient") + ErrInvalidSignature = errors.New("signature is invalid") + ErrParseSignature = errors.New("failed to parse signature") +) + +type Signature interface { + // NumSigners is the number of [bls.PublicKeys] that participated in the + // [Signature]. This is exposed because users of these signatures typically + // impose a verification fee that is a function of the number of + // signers. + NumSigners() (int, error) + + // Verify that this signature was signed by at least [quorumNum]/[quorumDen] + // of the validators of [msg.SourceChainID] at [pChainHeight]. + // + // Invariant: [msg] is correctly initialized. + Verify( + ctx context.Context, + msg *UnsignedMessage, + pChainState validators.State, + pChainHeight uint64, + quorumNum uint64, + quorumDen uint64, + ) error +} + +type BitSetSignature struct { + // Signers is a big-endian byte slice encoding which validators signed this + // message. + Signers []byte `serialize:"true"` + Signature [bls.SignatureLen]byte `serialize:"true"` +} + +func (s *BitSetSignature) NumSigners() (int, error) { + // Parse signer bit vector + // + // We assert that the length of [signerIndices.Bytes()] is equal + // to [len(s.Signers)] to ensure that [s.Signers] does not have + // any unnecessary zero-padding to represent the [set.Bits]. + signerIndices := set.BitsFromBytes(s.Signers) + if len(signerIndices.Bytes()) != len(s.Signers) { + return 0, ErrInvalidBitSet + } + return signerIndices.Len(), nil +} + +func (s *BitSetSignature) Verify( + ctx context.Context, + msg *UnsignedMessage, + pChainState validators.State, + pChainHeight uint64, + quorumNum uint64, + quorumDen uint64, +) error { + subnetID, err := pChainState.GetSubnetID(ctx, msg.SourceChainID) + if err != nil { + return err + } + + vdrs, totalWeight, err := GetCanonicalValidatorSet(ctx, pChainState, pChainHeight, subnetID) + if err != nil { + return err + } + + // Parse signer bit vector + // + // We assert that the length of [signerIndices.Bytes()] is equal + // to [len(s.Signers)] to ensure that [s.Signers] does not have + // any unnecessary zero-padding to represent the [set.Bits]. + signerIndices := set.BitsFromBytes(s.Signers) + if len(signerIndices.Bytes()) != len(s.Signers) { + return ErrInvalidBitSet + } + + // Get the validators that (allegedly) signed the message. + signers, err := FilterValidators(signerIndices, vdrs) + if err != nil { + return err + } + + // Because [signers] is a subset of [vdrs], this can never error. + sigWeight, _ := SumWeight(signers) + + // Make sure the signature's weight is sufficient. + err = VerifyWeight( + sigWeight, + totalWeight, + quorumNum, + quorumDen, + ) + if err != nil { + return err + } + + // Parse the aggregate signature + aggSig, err := bls.SignatureFromBytes(s.Signature[:]) + if err != nil { + return fmt.Errorf("%w: %v", ErrParseSignature, err) + } + + // Create the aggregate public key + aggPubKey, err := AggregatePublicKeys(signers) + if err != nil { + return err + } + + // Verify the signature + unsignedBytes := msg.Bytes() + if !bls.Verify(aggPubKey, aggSig, unsignedBytes) { + return ErrInvalidSignature + } + return nil +} + +// VerifyWeight returns [nil] if [sigWeight] is at least [quorumNum]/[quorumDen] +// of [totalWeight]. +// If [sigWeight >= totalWeight * quorumNum / quorumDen] then return [nil] +func VerifyWeight( + sigWeight uint64, + totalWeight uint64, + quorumNum uint64, + quorumDen uint64, +) error { + // Verifies that quorumNum * totalWeight <= quorumDen * sigWeight + scaledTotalWeight := new(big.Int).SetUint64(totalWeight) + scaledTotalWeight.Mul(scaledTotalWeight, new(big.Int).SetUint64(quorumNum)) + scaledSigWeight := new(big.Int).SetUint64(sigWeight) + scaledSigWeight.Mul(scaledSigWeight, new(big.Int).SetUint64(quorumDen)) + if scaledTotalWeight.Cmp(scaledSigWeight) == 1 { + return fmt.Errorf( + "%w: %d*%d > %d*%d", + ErrInsufficientWeight, + quorumNum, + totalWeight, + quorumDen, + sigWeight, + ) + } + return nil +} diff --git a/avalanchego/vms/platformvm/warp/signature_test.go b/avalanchego/vms/platformvm/warp/signature_test.go new file mode 100644 index 00000000..a24fb176 --- /dev/null +++ b/avalanchego/vms/platformvm/warp/signature_test.go @@ -0,0 +1,785 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "context" + "errors" + "math" + "testing" + + "github.com/golang/mock/gomock" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/set" +) + +const pChainHeight uint64 = 1337 + +var ( + _ utils.Sortable[*testValidator] = (*testValidator)(nil) + + errTest = errors.New("non-nil error") + sourceChainID = ids.GenerateTestID() + subnetID = ids.GenerateTestID() + + testVdrs []*testValidator +) + +type testValidator struct { + nodeID ids.NodeID + sk *bls.SecretKey + vdr *Validator +} + +func (v *testValidator) Less(o *testValidator) bool { + return v.vdr.Less(o.vdr) +} + +func newTestValidator() *testValidator { + sk, err := bls.NewSecretKey() + if err != nil { + panic(err) + } + + nodeID := ids.GenerateTestNodeID() + pk := bls.PublicFromSecretKey(sk) + return &testValidator{ + nodeID: nodeID, + sk: sk, + vdr: &Validator{ + PublicKey: pk, + PublicKeyBytes: bls.PublicKeyToBytes(pk), + Weight: 3, + NodeIDs: []ids.NodeID{nodeID}, + }, + } +} + +func init() { + testVdrs = []*testValidator{ + newTestValidator(), + newTestValidator(), + newTestValidator(), + } + utils.Sort(testVdrs) +} + +func TestNumSigners(t *testing.T) { + tests := map[string]struct { + generateSignature func() *BitSetSignature + count int + err error + }{ + "empty signers": { + generateSignature: func() *BitSetSignature { + return &BitSetSignature{} + }, + }, + "invalid signers": { + generateSignature: func() *BitSetSignature { + return &BitSetSignature{ + Signers: make([]byte, 1), + } + }, + err: ErrInvalidBitSet, + }, + "no signers": { + generateSignature: func() *BitSetSignature { + signers := set.NewBits() + return &BitSetSignature{ + Signers: signers.Bytes(), + } + }, + }, + "1 signer": { + generateSignature: func() *BitSetSignature { + signers := set.NewBits() + signers.Add(2) + return &BitSetSignature{ + Signers: signers.Bytes(), + } + }, + count: 1, + }, + "multiple signers": { + generateSignature: func() *BitSetSignature { + signers := set.NewBits() + signers.Add(2) + signers.Add(11) + signers.Add(55) + signers.Add(93) + return &BitSetSignature{ + Signers: signers.Bytes(), + } + }, + count: 4, + }, + } + + for name, tt := range tests { + t.Run(name, func(t *testing.T) { + require := require.New(t) + sig := tt.generateSignature() + count, err := sig.NumSigners() + require.Equal(tt.count, count) + require.ErrorIs(err, tt.err) + }) + } +} + +func TestSignatureVerification(t *testing.T) { + vdrs := map[ids.NodeID]*validators.GetValidatorOutput{ + testVdrs[0].nodeID: { + NodeID: testVdrs[0].nodeID, + PublicKey: testVdrs[0].vdr.PublicKey, + Weight: testVdrs[0].vdr.Weight, + }, + testVdrs[1].nodeID: { + NodeID: testVdrs[1].nodeID, + PublicKey: testVdrs[1].vdr.PublicKey, + Weight: testVdrs[1].vdr.Weight, + }, + testVdrs[2].nodeID: { + NodeID: testVdrs[2].nodeID, + PublicKey: testVdrs[2].vdr.PublicKey, + Weight: testVdrs[2].vdr.Weight, + }, + } + + tests := []struct { + name string + stateF func(*gomock.Controller) validators.State + quorumNum uint64 + quorumDen uint64 + msgF func(*require.Assertions) *Message + err error + }{ + { + name: "can't get subnetID", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, errTest) + return state + }, + quorumNum: 1, + quorumDen: 2, + msgF: func(require *require.Assertions) *Message { + unsignedMsg, err := NewUnsignedMessage( + sourceChainID, + ids.Empty, + nil, + ) + require.NoError(err) + + msg, err := NewMessage( + unsignedMsg, + &BitSetSignature{}, + ) + require.NoError(err) + return msg + }, + err: errTest, + }, + { + name: "can't get validator set", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, subnetID).Return(nil, errTest) + return state + }, + quorumNum: 1, + quorumDen: 2, + msgF: func(require *require.Assertions) *Message { + unsignedMsg, err := NewUnsignedMessage( + sourceChainID, + ids.Empty, + nil, + ) + require.NoError(err) + + msg, err := NewMessage( + unsignedMsg, + &BitSetSignature{}, + ) + require.NoError(err) + return msg + }, + err: errTest, + }, + { + name: "weight overflow", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, subnetID).Return(map[ids.NodeID]*validators.GetValidatorOutput{ + testVdrs[0].nodeID: { + NodeID: testVdrs[0].nodeID, + PublicKey: testVdrs[0].vdr.PublicKey, + Weight: math.MaxUint64, + }, + testVdrs[1].nodeID: { + NodeID: testVdrs[1].nodeID, + PublicKey: testVdrs[1].vdr.PublicKey, + Weight: math.MaxUint64, + }, + }, nil) + return state + }, + quorumNum: 1, + quorumDen: 2, + msgF: func(*require.Assertions) *Message { + return &Message{ + UnsignedMessage: UnsignedMessage{ + SourceChainID: sourceChainID, + }, + Signature: &BitSetSignature{ + Signers: make([]byte, 8), + }, + } + }, + err: ErrWeightOverflow, + }, + { + name: "invalid bit set index", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, subnetID).Return(vdrs, nil) + return state + }, + quorumNum: 1, + quorumDen: 2, + msgF: func(require *require.Assertions) *Message { + unsignedMsg, err := NewUnsignedMessage( + sourceChainID, + ids.Empty, + []byte{1, 2, 3}, + ) + require.NoError(err) + + msg, err := NewMessage( + unsignedMsg, + &BitSetSignature{ + Signers: make([]byte, 1), + Signature: [bls.SignatureLen]byte{}, + }, + ) + require.NoError(err) + return msg + }, + err: ErrInvalidBitSet, + }, + { + name: "unknown index", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, subnetID).Return(vdrs, nil) + return state + }, + quorumNum: 1, + quorumDen: 2, + msgF: func(require *require.Assertions) *Message { + unsignedMsg, err := NewUnsignedMessage( + sourceChainID, + ids.Empty, + []byte{1, 2, 3}, + ) + require.NoError(err) + + signers := set.NewBits() + signers.Add(3) // vdr oob + + msg, err := NewMessage( + unsignedMsg, + &BitSetSignature{ + Signers: signers.Bytes(), + Signature: [bls.SignatureLen]byte{}, + }, + ) + require.NoError(err) + return msg + }, + err: ErrUnknownValidator, + }, + { + name: "insufficient weight", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, subnetID).Return(vdrs, nil) + return state + }, + quorumNum: 1, + quorumDen: 1, + msgF: func(require *require.Assertions) *Message { + unsignedMsg, err := NewUnsignedMessage( + sourceChainID, + ids.Empty, + []byte{1, 2, 3}, + ) + require.NoError(err) + + // [signers] has weight from [vdr[0], vdr[1]], + // which is 6, which is less than 9 + signers := set.NewBits() + signers.Add(0) + signers.Add(1) + + unsignedBytes := unsignedMsg.Bytes() + vdr0Sig := bls.Sign(testVdrs[0].sk, unsignedBytes) + vdr1Sig := bls.Sign(testVdrs[1].sk, unsignedBytes) + aggSig, err := bls.AggregateSignatures([]*bls.Signature{vdr0Sig, vdr1Sig}) + require.NoError(err) + aggSigBytes := [bls.SignatureLen]byte{} + copy(aggSigBytes[:], bls.SignatureToBytes(aggSig)) + + msg, err := NewMessage( + unsignedMsg, + &BitSetSignature{ + Signers: signers.Bytes(), + Signature: aggSigBytes, + }, + ) + require.NoError(err) + return msg + }, + err: ErrInsufficientWeight, + }, + { + name: "can't parse sig", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, subnetID).Return(vdrs, nil) + return state + }, + quorumNum: 1, + quorumDen: 2, + msgF: func(require *require.Assertions) *Message { + unsignedMsg, err := NewUnsignedMessage( + sourceChainID, + ids.Empty, + []byte{1, 2, 3}, + ) + require.NoError(err) + + signers := set.NewBits() + signers.Add(0) + signers.Add(1) + + msg, err := NewMessage( + unsignedMsg, + &BitSetSignature{ + Signers: signers.Bytes(), + Signature: [bls.SignatureLen]byte{}, + }, + ) + require.NoError(err) + return msg + }, + err: ErrParseSignature, + }, + { + name: "no validators", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, subnetID).Return(nil, nil) + return state + }, + quorumNum: 1, + quorumDen: 2, + msgF: func(require *require.Assertions) *Message { + unsignedMsg, err := NewUnsignedMessage( + sourceChainID, + ids.Empty, + []byte{1, 2, 3}, + ) + require.NoError(err) + + unsignedBytes := unsignedMsg.Bytes() + vdr0Sig := bls.Sign(testVdrs[0].sk, unsignedBytes) + aggSigBytes := [bls.SignatureLen]byte{} + copy(aggSigBytes[:], bls.SignatureToBytes(vdr0Sig)) + + msg, err := NewMessage( + unsignedMsg, + &BitSetSignature{ + Signers: nil, + Signature: aggSigBytes, + }, + ) + require.NoError(err) + return msg + }, + err: bls.ErrNoPublicKeys, + }, + { + name: "invalid signature (substitute)", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, subnetID).Return(vdrs, nil) + return state + }, + quorumNum: 3, + quorumDen: 5, + msgF: func(require *require.Assertions) *Message { + unsignedMsg, err := NewUnsignedMessage( + sourceChainID, + ids.Empty, + []byte{1, 2, 3}, + ) + require.NoError(err) + + signers := set.NewBits() + signers.Add(0) + signers.Add(1) + + unsignedBytes := unsignedMsg.Bytes() + vdr0Sig := bls.Sign(testVdrs[0].sk, unsignedBytes) + // Give sig from vdr[2] even though the bit vector says it + // should be from vdr[1] + vdr2Sig := bls.Sign(testVdrs[2].sk, unsignedBytes) + aggSig, err := bls.AggregateSignatures([]*bls.Signature{vdr0Sig, vdr2Sig}) + require.NoError(err) + aggSigBytes := [bls.SignatureLen]byte{} + copy(aggSigBytes[:], bls.SignatureToBytes(aggSig)) + + msg, err := NewMessage( + unsignedMsg, + &BitSetSignature{ + Signers: signers.Bytes(), + Signature: aggSigBytes, + }, + ) + require.NoError(err) + return msg + }, + err: ErrInvalidSignature, + }, + { + name: "invalid signature (missing one)", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, subnetID).Return(vdrs, nil) + return state + }, + quorumNum: 3, + quorumDen: 5, + msgF: func(require *require.Assertions) *Message { + unsignedMsg, err := NewUnsignedMessage( + sourceChainID, + ids.Empty, + []byte{1, 2, 3}, + ) + require.NoError(err) + + signers := set.NewBits() + signers.Add(0) + signers.Add(1) + + unsignedBytes := unsignedMsg.Bytes() + vdr0Sig := bls.Sign(testVdrs[0].sk, unsignedBytes) + // Don't give the sig from vdr[1] + aggSigBytes := [bls.SignatureLen]byte{} + copy(aggSigBytes[:], bls.SignatureToBytes(vdr0Sig)) + + msg, err := NewMessage( + unsignedMsg, + &BitSetSignature{ + Signers: signers.Bytes(), + Signature: aggSigBytes, + }, + ) + require.NoError(err) + return msg + }, + err: ErrInvalidSignature, + }, + { + name: "invalid signature (extra one)", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, subnetID).Return(vdrs, nil) + return state + }, + quorumNum: 3, + quorumDen: 5, + msgF: func(require *require.Assertions) *Message { + unsignedMsg, err := NewUnsignedMessage( + sourceChainID, + ids.Empty, + []byte{1, 2, 3}, + ) + require.NoError(err) + + signers := set.NewBits() + signers.Add(0) + signers.Add(1) + + unsignedBytes := unsignedMsg.Bytes() + vdr0Sig := bls.Sign(testVdrs[0].sk, unsignedBytes) + vdr1Sig := bls.Sign(testVdrs[1].sk, unsignedBytes) + // Give sig from vdr[2] even though the bit vector doesn't have + // it + vdr2Sig := bls.Sign(testVdrs[2].sk, unsignedBytes) + aggSig, err := bls.AggregateSignatures([]*bls.Signature{vdr0Sig, vdr1Sig, vdr2Sig}) + require.NoError(err) + aggSigBytes := [bls.SignatureLen]byte{} + copy(aggSigBytes[:], bls.SignatureToBytes(aggSig)) + + msg, err := NewMessage( + unsignedMsg, + &BitSetSignature{ + Signers: signers.Bytes(), + Signature: aggSigBytes, + }, + ) + require.NoError(err) + return msg + }, + err: ErrInvalidSignature, + }, + { + name: "valid signature", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, subnetID).Return(vdrs, nil) + return state + }, + quorumNum: 1, + quorumDen: 2, + msgF: func(require *require.Assertions) *Message { + unsignedMsg, err := NewUnsignedMessage( + sourceChainID, + ids.Empty, + []byte{1, 2, 3}, + ) + require.NoError(err) + + // [signers] has weight from [vdr[1], vdr[2]], + // which is 6, which is greater than 4.5 + signers := set.NewBits() + signers.Add(1) + signers.Add(2) + + unsignedBytes := unsignedMsg.Bytes() + vdr1Sig := bls.Sign(testVdrs[1].sk, unsignedBytes) + vdr2Sig := bls.Sign(testVdrs[2].sk, unsignedBytes) + aggSig, err := bls.AggregateSignatures([]*bls.Signature{vdr1Sig, vdr2Sig}) + require.NoError(err) + aggSigBytes := [bls.SignatureLen]byte{} + copy(aggSigBytes[:], bls.SignatureToBytes(aggSig)) + + msg, err := NewMessage( + unsignedMsg, + &BitSetSignature{ + Signers: signers.Bytes(), + Signature: aggSigBytes, + }, + ) + require.NoError(err) + return msg + }, + err: nil, + }, + { + name: "valid signature (boundary)", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, subnetID).Return(vdrs, nil) + return state + }, + quorumNum: 2, + quorumDen: 3, + msgF: func(require *require.Assertions) *Message { + unsignedMsg, err := NewUnsignedMessage( + sourceChainID, + ids.Empty, + []byte{1, 2, 3}, + ) + require.NoError(err) + + // [signers] has weight from [vdr[1], vdr[2]], + // which is 6, which meets the minimum 6 + signers := set.NewBits() + signers.Add(1) + signers.Add(2) + + unsignedBytes := unsignedMsg.Bytes() + vdr1Sig := bls.Sign(testVdrs[1].sk, unsignedBytes) + vdr2Sig := bls.Sign(testVdrs[2].sk, unsignedBytes) + aggSig, err := bls.AggregateSignatures([]*bls.Signature{vdr1Sig, vdr2Sig}) + require.NoError(err) + aggSigBytes := [bls.SignatureLen]byte{} + copy(aggSigBytes[:], bls.SignatureToBytes(aggSig)) + + msg, err := NewMessage( + unsignedMsg, + &BitSetSignature{ + Signers: signers.Bytes(), + Signature: aggSigBytes, + }, + ) + require.NoError(err) + return msg + }, + err: nil, + }, + { + name: "valid signature (missing key)", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, subnetID).Return(map[ids.NodeID]*validators.GetValidatorOutput{ + testVdrs[0].nodeID: { + NodeID: testVdrs[0].nodeID, + PublicKey: nil, + Weight: testVdrs[0].vdr.Weight, + }, + testVdrs[1].nodeID: { + NodeID: testVdrs[1].nodeID, + PublicKey: testVdrs[1].vdr.PublicKey, + Weight: testVdrs[1].vdr.Weight, + }, + testVdrs[2].nodeID: { + NodeID: testVdrs[2].nodeID, + PublicKey: testVdrs[2].vdr.PublicKey, + Weight: testVdrs[2].vdr.Weight, + }, + }, nil) + return state + }, + quorumNum: 1, + quorumDen: 3, + msgF: func(require *require.Assertions) *Message { + unsignedMsg, err := NewUnsignedMessage( + sourceChainID, + ids.Empty, + []byte{1, 2, 3}, + ) + require.NoError(err) + + // [signers] has weight from [vdr2, vdr3], + // which is 6, which is greater than 3 + signers := set.NewBits() + // Note: the bits are shifted because vdr[0]'s key was zeroed + signers.Add(0) // vdr[1] + signers.Add(1) // vdr[2] + + unsignedBytes := unsignedMsg.Bytes() + vdr1Sig := bls.Sign(testVdrs[1].sk, unsignedBytes) + vdr2Sig := bls.Sign(testVdrs[2].sk, unsignedBytes) + aggSig, err := bls.AggregateSignatures([]*bls.Signature{vdr1Sig, vdr2Sig}) + require.NoError(err) + aggSigBytes := [bls.SignatureLen]byte{} + copy(aggSigBytes[:], bls.SignatureToBytes(aggSig)) + + msg, err := NewMessage( + unsignedMsg, + &BitSetSignature{ + Signers: signers.Bytes(), + Signature: aggSigBytes, + }, + ) + require.NoError(err) + return msg + }, + err: nil, + }, + { + name: "valid signature (duplicate key)", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetSubnetID(gomock.Any(), sourceChainID).Return(subnetID, nil) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, subnetID).Return(map[ids.NodeID]*validators.GetValidatorOutput{ + testVdrs[0].nodeID: { + NodeID: testVdrs[0].nodeID, + PublicKey: nil, + Weight: testVdrs[0].vdr.Weight, + }, + testVdrs[1].nodeID: { + NodeID: testVdrs[1].nodeID, + PublicKey: testVdrs[2].vdr.PublicKey, + Weight: testVdrs[1].vdr.Weight, + }, + testVdrs[2].nodeID: { + NodeID: testVdrs[2].nodeID, + PublicKey: testVdrs[2].vdr.PublicKey, + Weight: testVdrs[2].vdr.Weight, + }, + }, nil) + return state + }, + quorumNum: 2, + quorumDen: 3, + msgF: func(require *require.Assertions) *Message { + unsignedMsg, err := NewUnsignedMessage( + sourceChainID, + ids.Empty, + []byte{1, 2, 3}, + ) + require.NoError(err) + + // [signers] has weight from [vdr2, vdr3], + // which is 6, which meets the minimum 6 + signers := set.NewBits() + // Note: the bits are shifted because vdr[0]'s key was zeroed + // Note: vdr[1] and vdr[2] were combined because of a shared pk + signers.Add(0) // vdr[1] + vdr[2] + + unsignedBytes := unsignedMsg.Bytes() + // Because vdr[1] and vdr[2] share a key, only one of them sign. + vdr2Sig := bls.Sign(testVdrs[2].sk, unsignedBytes) + aggSigBytes := [bls.SignatureLen]byte{} + copy(aggSigBytes[:], bls.SignatureToBytes(vdr2Sig)) + + msg, err := NewMessage( + unsignedMsg, + &BitSetSignature{ + Signers: signers.Bytes(), + Signature: aggSigBytes, + }, + ) + require.NoError(err) + return msg + }, + err: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + msg := tt.msgF(require) + pChainState := tt.stateF(ctrl) + + err := msg.Signature.Verify( + context.Background(), + &msg.UnsignedMessage, + pChainState, + pChainHeight, + tt.quorumNum, + tt.quorumDen, + ) + require.ErrorIs(err, tt.err) + }) + } +} diff --git a/avalanchego/vms/platformvm/warp/signer.go b/avalanchego/vms/platformvm/warp/signer.go new file mode 100644 index 00000000..dddb0816 --- /dev/null +++ b/avalanchego/vms/platformvm/warp/signer.go @@ -0,0 +1,48 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "errors" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/bls" +) + +var ( + _ Signer = (*signer)(nil) + + errWrongSourceChainID = errors.New("wrong SourceChainID") +) + +type Signer interface { + // Returns this node's BLS signature over an unsigned message. If the caller + // does not have the authority to sign the message, an error will be + // returned. + // + // Assumes the unsigned message is correctly initialized. + Sign(msg *UnsignedMessage) ([]byte, error) +} + +func NewSigner(sk *bls.SecretKey, chainID ids.ID) Signer { + return &signer{ + sk: sk, + chainID: chainID, + } +} + +type signer struct { + sk *bls.SecretKey + chainID ids.ID +} + +func (s *signer) Sign(msg *UnsignedMessage) ([]byte, error) { + if msg.SourceChainID != s.chainID { + return nil, errWrongSourceChainID + } + + msgBytes := msg.Bytes() + sig := bls.Sign(s.sk, msgBytes) + return bls.SignatureToBytes(sig), nil +} diff --git a/avalanchego/vms/platformvm/warp/signer_test.go b/avalanchego/vms/platformvm/warp/signer_test.go new file mode 100644 index 00000000..46ee6eeb --- /dev/null +++ b/avalanchego/vms/platformvm/warp/signer_test.go @@ -0,0 +1,25 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/bls" +) + +func TestSigner(t *testing.T) { + for _, test := range SignerTests { + sk, err := bls.NewSecretKey() + require.NoError(t, err) + + chainID := ids.GenerateTestID() + s := NewSigner(sk, chainID) + + test(t, s, sk, chainID) + } +} diff --git a/avalanchego/vms/platformvm/warp/test_signer.go b/avalanchego/vms/platformvm/warp/test_signer.go new file mode 100644 index 00000000..f030447f --- /dev/null +++ b/avalanchego/vms/platformvm/warp/test_signer.go @@ -0,0 +1,57 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/bls" +) + +// SignerTests is a list of all signer tests +var SignerTests = []func(t *testing.T, s Signer, sk *bls.SecretKey, chainID ids.ID){ + TestSignerWrongChainID, + TestSignerVerifies, +} + +// Test that using a random SourceChainID results in an error +func TestSignerWrongChainID(t *testing.T, s Signer, _ *bls.SecretKey, _ ids.ID) { + require := require.New(t) + + msg, err := NewUnsignedMessage( + ids.GenerateTestID(), + ids.GenerateTestID(), + []byte("payload"), + ) + require.NoError(err) + + _, err = s.Sign(msg) + require.Error(err) +} + +// Test that a signature generated with the signer verifies correctly +func TestSignerVerifies(t *testing.T, s Signer, sk *bls.SecretKey, chainID ids.ID) { + require := require.New(t) + + msg, err := NewUnsignedMessage( + chainID, + ids.GenerateTestID(), + []byte("payload"), + ) + require.NoError(err) + + sigBytes, err := s.Sign(msg) + require.NoError(err) + + sig, err := bls.SignatureFromBytes(sigBytes) + require.NoError(err) + + pk := bls.PublicFromSecretKey(sk) + msgBytes := msg.Bytes() + valid := bls.Verify(pk, sig, msgBytes) + require.True(valid) +} diff --git a/avalanchego/vms/platformvm/warp/unsigned_message.go b/avalanchego/vms/platformvm/warp/unsigned_message.go new file mode 100644 index 00000000..be266834 --- /dev/null +++ b/avalanchego/vms/platformvm/warp/unsigned_message.go @@ -0,0 +1,71 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "fmt" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/hashing" +) + +// UnsignedMessage defines the standard format for an unsigned Warp message. +type UnsignedMessage struct { + SourceChainID ids.ID `serialize:"true"` + DestinationChainID ids.ID `serialize:"true"` + Payload []byte `serialize:"true"` + + bytes []byte + id ids.ID +} + +// NewUnsignedMessage creates a new *UnsignedMessage and initializes it. +func NewUnsignedMessage( + sourceChainID ids.ID, + destinationChainID ids.ID, + payload []byte, +) (*UnsignedMessage, error) { + msg := &UnsignedMessage{ + SourceChainID: sourceChainID, + DestinationChainID: destinationChainID, + Payload: payload, + } + return msg, msg.Initialize() +} + +// ParseUnsignedMessage converts a slice of bytes into an initialized +// *UnsignedMessage. +func ParseUnsignedMessage(b []byte) (*UnsignedMessage, error) { + msg := &UnsignedMessage{ + bytes: b, + id: hashing.ComputeHash256Array(b), + } + _, err := c.Unmarshal(b, msg) + return msg, err +} + +// Initialize recalculates the result of Bytes(). +func (m *UnsignedMessage) Initialize() error { + bytes, err := c.Marshal(codecVersion, m) + if err != nil { + return fmt.Errorf("couldn't marshal warp unsigned message: %w", err) + } + m.bytes = bytes + m.id = hashing.ComputeHash256Array(m.bytes) + return nil +} + +// Bytes returns the binary representation of this message. It assumes that the +// message is initialized from either New, Parse, or an explicit call to +// Initialize. +func (m *UnsignedMessage) Bytes() []byte { + return m.bytes +} + +// ID returns an identifier for this message. It assumes that the +// message is initialized from either New, Parse, or an explicit call to +// Initialize. +func (m *UnsignedMessage) ID() ids.ID { + return m.id +} diff --git a/avalanchego/vms/platformvm/warp/unsigned_message_test.go b/avalanchego/vms/platformvm/warp/unsigned_message_test.go new file mode 100644 index 00000000..cf8073d8 --- /dev/null +++ b/avalanchego/vms/platformvm/warp/unsigned_message_test.go @@ -0,0 +1,34 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" +) + +func TestUnsignedMessage(t *testing.T) { + require := require.New(t) + + msg, err := NewUnsignedMessage( + ids.GenerateTestID(), + ids.GenerateTestID(), + []byte("payload"), + ) + require.NoError(err) + + msgBytes := msg.Bytes() + msg2, err := ParseUnsignedMessage(msgBytes) + require.NoError(err) + require.Equal(msg, msg2) +} + +func TestParseUnsignedMessageJunk(t *testing.T) { + _, err := ParseUnsignedMessage(utils.RandomBytes(1024)) + require.Error(t, err) +} diff --git a/avalanchego/vms/platformvm/warp/validator.go b/avalanchego/vms/platformvm/warp/validator.go new file mode 100644 index 00000000..357a442b --- /dev/null +++ b/avalanchego/vms/platformvm/warp/validator.go @@ -0,0 +1,142 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "bytes" + "context" + "errors" + "fmt" + + "golang.org/x/exp/maps" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/set" +) + +var ( + _ utils.Sortable[*Validator] = (*Validator)(nil) + + ErrUnknownValidator = errors.New("unknown validator") + ErrWeightOverflow = errors.New("weight overflowed") +) + +type Validator struct { + PublicKey *bls.PublicKey + PublicKeyBytes []byte + Weight uint64 + NodeIDs []ids.NodeID +} + +func (v *Validator) Less(o *Validator) bool { + return bytes.Compare(v.PublicKeyBytes, o.PublicKeyBytes) < 0 +} + +// GetCanonicalValidatorSet returns the validator set of [subnetID] at +// [pChcainHeight] in a canonical ordering. Also returns the total weight on +// [subnetID]. +func GetCanonicalValidatorSet( + ctx context.Context, + pChainState validators.State, + pChainHeight uint64, + subnetID ids.ID, +) ([]*Validator, uint64, error) { + // Get the validator set at the given height. + vdrSet, err := pChainState.GetValidatorSet(ctx, pChainHeight, subnetID) + if err != nil { + return nil, 0, fmt.Errorf("failed to fetch validator set (P-Chain Height: %d, SubnetID: %s): %w", pChainHeight, subnetID, err) + } + + var ( + vdrs = make(map[string]*Validator, len(vdrSet)) + totalWeight uint64 + ) + for _, vdr := range vdrSet { + totalWeight, err = math.Add64(totalWeight, vdr.Weight) + if err != nil { + return nil, 0, fmt.Errorf("%w: %v", ErrWeightOverflow, err) + } + + if vdr.PublicKey == nil { + continue + } + + pkBytes := bls.PublicKeyToBytes(vdr.PublicKey) + uniqueVdr, ok := vdrs[string(pkBytes)] + if !ok { + uniqueVdr = &Validator{ + PublicKey: vdr.PublicKey, + PublicKeyBytes: pkBytes, + } + vdrs[string(pkBytes)] = uniqueVdr + } + + uniqueVdr.Weight += vdr.Weight // Impossible to overflow here + uniqueVdr.NodeIDs = append(uniqueVdr.NodeIDs, vdr.NodeID) + } + + // Sort validators by public key + vdrList := maps.Values(vdrs) + utils.Sort(vdrList) + return vdrList, totalWeight, nil +} + +// FilterValidators returns the validators in [vdrs] whose bit is set to 1 in +// [indices]. +// +// Returns an error if [indices] references an unknown validator. +func FilterValidators( + indices set.Bits, + vdrs []*Validator, +) ([]*Validator, error) { + // Verify that all alleged signers exist + if indices.BitLen() > len(vdrs) { + return nil, fmt.Errorf( + "%w: NumIndices (%d) >= NumFilteredValidators (%d)", + ErrUnknownValidator, + indices.BitLen()-1, // -1 to convert from length to index + len(vdrs), + ) + } + + filteredVdrs := make([]*Validator, 0, len(vdrs)) + for i, vdr := range vdrs { + if !indices.Contains(i) { + continue + } + + filteredVdrs = append(filteredVdrs, vdr) + } + return filteredVdrs, nil +} + +// SumWeight returns the total weight of the provided validators. +func SumWeight(vdrs []*Validator) (uint64, error) { + var ( + weight uint64 + err error + ) + for _, vdr := range vdrs { + weight, err = math.Add64(weight, vdr.Weight) + if err != nil { + return 0, fmt.Errorf("%w: %v", ErrWeightOverflow, err) + } + } + return weight, nil +} + +// AggregatePublicKeys returns the public key of the provided validators. +// +// Invariant: All of the public keys in [vdrs] are valid. +func AggregatePublicKeys(vdrs []*Validator) (*bls.PublicKey, error) { + pks := make([]*bls.PublicKey, len(vdrs)) + for i, vdr := range vdrs { + pks[i] = vdr.PublicKey + } + return bls.AggregatePublicKeys(pks) +} diff --git a/avalanchego/vms/platformvm/warp/validator_test.go b/avalanchego/vms/platformvm/warp/validator_test.go new file mode 100644 index 00000000..ef8998eb --- /dev/null +++ b/avalanchego/vms/platformvm/warp/validator_test.go @@ -0,0 +1,306 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package warp + +import ( + "context" + "math" + "testing" + + "github.com/golang/mock/gomock" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/crypto/bls" + "github.com/ava-labs/avalanchego/utils/set" +) + +func TestGetCanonicalValidatorSet(t *testing.T) { + type test struct { + name string + stateF func(*gomock.Controller) validators.State + expectedVdrs []*Validator + expectedWeight uint64 + expectedErr error + } + + tests := []test{ + { + name: "can't get validator set", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, subnetID).Return(nil, errTest) + return state + }, + expectedErr: errTest, + }, + { + name: "all validators have public keys; no duplicate pub keys", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, subnetID).Return( + map[ids.NodeID]*validators.GetValidatorOutput{ + testVdrs[0].nodeID: { + NodeID: testVdrs[0].nodeID, + PublicKey: testVdrs[0].vdr.PublicKey, + Weight: testVdrs[0].vdr.Weight, + }, + testVdrs[1].nodeID: { + NodeID: testVdrs[1].nodeID, + PublicKey: testVdrs[1].vdr.PublicKey, + Weight: testVdrs[1].vdr.Weight, + }, + }, + nil, + ) + return state + }, + expectedVdrs: []*Validator{testVdrs[0].vdr, testVdrs[1].vdr}, + expectedWeight: 6, + expectedErr: nil, + }, + { + name: "all validators have public keys; duplicate pub keys", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, subnetID).Return( + map[ids.NodeID]*validators.GetValidatorOutput{ + testVdrs[0].nodeID: { + NodeID: testVdrs[0].nodeID, + PublicKey: testVdrs[0].vdr.PublicKey, + Weight: testVdrs[0].vdr.Weight, + }, + testVdrs[1].nodeID: { + NodeID: testVdrs[1].nodeID, + PublicKey: testVdrs[1].vdr.PublicKey, + Weight: testVdrs[1].vdr.Weight, + }, + testVdrs[2].nodeID: { + NodeID: testVdrs[2].nodeID, + PublicKey: testVdrs[0].vdr.PublicKey, + Weight: testVdrs[0].vdr.Weight, + }, + }, + nil, + ) + return state + }, + expectedVdrs: []*Validator{ + { + PublicKey: testVdrs[0].vdr.PublicKey, + PublicKeyBytes: testVdrs[0].vdr.PublicKeyBytes, + Weight: testVdrs[0].vdr.Weight * 2, + NodeIDs: []ids.NodeID{ + testVdrs[0].nodeID, + testVdrs[2].nodeID, + }, + }, + testVdrs[1].vdr, + }, + expectedWeight: 9, + expectedErr: nil, + }, + { + name: "validator without public key; no duplicate pub keys", + stateF: func(ctrl *gomock.Controller) validators.State { + state := validators.NewMockState(ctrl) + state.EXPECT().GetValidatorSet(gomock.Any(), pChainHeight, subnetID).Return( + map[ids.NodeID]*validators.GetValidatorOutput{ + testVdrs[0].nodeID: { + NodeID: testVdrs[0].nodeID, + PublicKey: nil, + Weight: testVdrs[0].vdr.Weight, + }, + testVdrs[1].nodeID: { + NodeID: testVdrs[1].nodeID, + PublicKey: testVdrs[1].vdr.PublicKey, + Weight: testVdrs[1].vdr.Weight, + }, + }, + nil, + ) + return state + }, + expectedVdrs: []*Validator{testVdrs[1].vdr}, + expectedWeight: 6, + expectedErr: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + state := tt.stateF(ctrl) + + vdrs, weight, err := GetCanonicalValidatorSet(context.Background(), state, pChainHeight, subnetID) + require.ErrorIs(err, tt.expectedErr) + if err != nil { + return + } + require.Equal(tt.expectedWeight, weight) + + // These are pointers so have to test equality like this + require.Equal(len(tt.expectedVdrs), len(vdrs)) + for i, expectedVdr := range tt.expectedVdrs { + gotVdr := vdrs[i] + expectedPKBytes := bls.PublicKeyToBytes(expectedVdr.PublicKey) + gotPKBytes := bls.PublicKeyToBytes(gotVdr.PublicKey) + require.Equal(expectedPKBytes, gotPKBytes) + require.Equal(expectedVdr.PublicKeyBytes, gotVdr.PublicKeyBytes) + require.Equal(expectedVdr.Weight, gotVdr.Weight) + require.ElementsMatch(expectedVdr.NodeIDs, gotVdr.NodeIDs) + } + }) + } +} + +func TestFilterValidators(t *testing.T) { + sk0, err := bls.NewSecretKey() + require.NoError(t, err) + pk0 := bls.PublicFromSecretKey(sk0) + vdr0 := &Validator{ + PublicKey: pk0, + PublicKeyBytes: bls.PublicKeyToBytes(pk0), + Weight: 1, + } + + sk1, err := bls.NewSecretKey() + require.NoError(t, err) + pk1 := bls.PublicFromSecretKey(sk1) + vdr1 := &Validator{ + PublicKey: pk1, + PublicKeyBytes: bls.PublicKeyToBytes(pk1), + Weight: 2, + } + + type test struct { + name string + indices set.Bits + vdrs []*Validator + expectedVdrs []*Validator + expectedErr error + } + + tests := []test{ + { + name: "empty", + indices: set.NewBits(), + vdrs: []*Validator{}, + expectedVdrs: []*Validator{}, + expectedErr: nil, + }, + { + name: "unknown validator", + indices: set.NewBits(2), + vdrs: []*Validator{vdr0, vdr1}, + expectedErr: ErrUnknownValidator, + }, + { + name: "two filtered out", + indices: set.NewBits(), + vdrs: []*Validator{ + vdr0, + vdr1, + }, + expectedVdrs: []*Validator{}, + expectedErr: nil, + }, + { + name: "one filtered out", + indices: set.NewBits(1), + vdrs: []*Validator{ + vdr0, + vdr1, + }, + expectedVdrs: []*Validator{ + vdr1, + }, + expectedErr: nil, + }, + { + name: "none filtered out", + indices: set.NewBits(0, 1), + vdrs: []*Validator{ + vdr0, + vdr1, + }, + expectedVdrs: []*Validator{ + vdr0, + vdr1, + }, + expectedErr: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + vdrs, err := FilterValidators(tt.indices, tt.vdrs) + require.ErrorIs(err, tt.expectedErr) + if err == nil { + require.Equal(tt.expectedVdrs, vdrs) + } + }) + } +} + +func TestSumWeight(t *testing.T) { + vdr0 := &Validator{ + Weight: 1, + } + vdr1 := &Validator{ + Weight: 2, + } + vdr2 := &Validator{ + Weight: math.MaxUint64, + } + + type test struct { + name string + vdrs []*Validator + expectedSum uint64 + expectedErr error + } + + tests := []test{ + { + name: "empty", + vdrs: []*Validator{}, + expectedSum: 0, + }, + { + name: "one", + vdrs: []*Validator{vdr0}, + expectedSum: 1, + }, + { + name: "two", + vdrs: []*Validator{vdr0, vdr1}, + expectedSum: 3, + }, + { + name: "overflow", + vdrs: []*Validator{vdr0, vdr2}, + expectedErr: ErrWeightOverflow, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + sum, err := SumWeight(tt.vdrs) + require.ErrorIs(err, tt.expectedErr) + if err == nil { + require.Equal(tt.expectedSum, sum) + } + }) + } +} diff --git a/avalanchego/vms/propertyfx/burn_operation.go b/avalanchego/vms/propertyfx/burn_operation.go index bb111812..4217420b 100644 --- a/avalanchego/vms/propertyfx/burn_operation.go +++ b/avalanchego/vms/propertyfx/burn_operation.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx @@ -13,6 +13,8 @@ type BurnOperation struct { secp256k1fx.Input `serialize:"true"` } -func (op *BurnOperation) InitCtx(ctx *snow.Context) {} +func (*BurnOperation) InitCtx(*snow.Context) {} -func (op *BurnOperation) Outs() []verify.State { return nil } +func (*BurnOperation) Outs() []verify.State { + return nil +} diff --git a/avalanchego/vms/propertyfx/burn_operation_test.go b/avalanchego/vms/propertyfx/burn_operation_test.go index 65748c83..0b5715ea 100644 --- a/avalanchego/vms/propertyfx/burn_operation_test.go +++ b/avalanchego/vms/propertyfx/burn_operation_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx diff --git a/avalanchego/vms/propertyfx/credential.go b/avalanchego/vms/propertyfx/credential.go index 564bcd73..a2622cf7 100644 --- a/avalanchego/vms/propertyfx/credential.go +++ b/avalanchego/vms/propertyfx/credential.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx diff --git a/avalanchego/vms/propertyfx/credential_test.go b/avalanchego/vms/propertyfx/credential_test.go index f469049f..d03d5b2b 100644 --- a/avalanchego/vms/propertyfx/credential_test.go +++ b/avalanchego/vms/propertyfx/credential_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx diff --git a/avalanchego/vms/propertyfx/factory.go b/avalanchego/vms/propertyfx/factory.go index 8aeecba2..21c69c97 100644 --- a/avalanchego/vms/propertyfx/factory.go +++ b/avalanchego/vms/propertyfx/factory.go @@ -1,16 +1,16 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms" ) var ( - _ vms.Factory = &Factory{} + _ vms.Factory = (*Factory)(nil) // ID that this Fx uses when labeled ID = ids.ID{'p', 'r', 'o', 'p', 'e', 'r', 't', 'y', 'f', 'x'} @@ -18,4 +18,6 @@ var ( type Factory struct{} -func (f *Factory) New(*snow.Context) (interface{}, error) { return &Fx{}, nil } +func (*Factory) New(logging.Logger) (interface{}, error) { + return &Fx{}, nil +} diff --git a/avalanchego/vms/propertyfx/factory_test.go b/avalanchego/vms/propertyfx/factory_test.go index 788e7124..25dc8935 100644 --- a/avalanchego/vms/propertyfx/factory_test.go +++ b/avalanchego/vms/propertyfx/factory_test.go @@ -1,15 +1,17 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx import ( "testing" + + "github.com/ava-labs/avalanchego/utils/logging" ) func TestFactory(t *testing.T) { factory := Factory{} - if fx, err := factory.New(nil); err != nil { + if fx, err := factory.New(logging.NoLog{}); err != nil { t.Fatal(err) } else if fx == nil { t.Fatalf("Factory.New returned nil") diff --git a/avalanchego/vms/propertyfx/fx.go b/avalanchego/vms/propertyfx/fx.go index 78e4abec..2719c37e 100644 --- a/avalanchego/vms/propertyfx/fx.go +++ b/avalanchego/vms/propertyfx/fx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx @@ -98,4 +98,6 @@ func (fx *Fx) VerifyTransferOperation(tx secp256k1fx.UnsignedTx, op *BurnOperati return fx.VerifyCredentials(tx, &op.Input, &cred.Credential, &out.OutputOwners) } -func (fx *Fx) VerifyTransfer(_, _, _, _ interface{}) error { return errCantTransfer } +func (*Fx) VerifyTransfer(_, _, _, _ interface{}) error { + return errCantTransfer +} diff --git a/avalanchego/vms/propertyfx/fx_test.go b/avalanchego/vms/propertyfx/fx_test.go index e7645873..f46602ab 100644 --- a/avalanchego/vms/propertyfx/fx_test.go +++ b/avalanchego/vms/propertyfx/fx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx @@ -9,7 +9,7 @@ import ( "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -17,7 +17,7 @@ import ( var ( txBytes = []byte{0, 1, 2, 3, 4, 5} - sigBytes = [crypto.SECP256K1RSigLen]byte{ + sigBytes = [secp256k1.SignatureLen]byte{ 0x0e, 0x33, 0x4e, 0xbc, 0x67, 0xa7, 0x3f, 0xe8, 0x24, 0x33, 0xac, 0xa3, 0x47, 0x88, 0xa6, 0x3d, 0x58, 0xe5, 0x8e, 0xf0, 0x3a, 0xd5, 0x84, 0xf1, @@ -61,7 +61,7 @@ func TestFxVerifyMintOperation(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { @@ -71,7 +71,7 @@ func TestFxVerifyMintOperation(t *testing.T) { UnsignedBytes: txBytes, } cred := &Credential{Credential: secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, }} @@ -105,14 +105,14 @@ func TestFxVerifyMintOperationWrongTx(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { t.Fatal(err) } cred := &Credential{Credential: secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, }} @@ -140,7 +140,7 @@ func TestFxVerifyMintOperationWrongNumberUTXOs(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { @@ -150,7 +150,7 @@ func TestFxVerifyMintOperationWrongNumberUTXOs(t *testing.T) { UnsignedBytes: txBytes, } cred := &Credential{Credential: secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, }} @@ -172,7 +172,7 @@ func TestFxVerifyMintOperationWrongCredential(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { @@ -205,7 +205,7 @@ func TestFxVerifyMintOperationInvalidUTXO(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { @@ -215,7 +215,7 @@ func TestFxVerifyMintOperationInvalidUTXO(t *testing.T) { UnsignedBytes: txBytes, } cred := &Credential{Credential: secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, }} @@ -237,7 +237,7 @@ func TestFxVerifyMintOperationFailingVerification(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { @@ -247,7 +247,7 @@ func TestFxVerifyMintOperationFailingVerification(t *testing.T) { UnsignedBytes: txBytes, } cred := &Credential{Credential: secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, }} @@ -276,7 +276,7 @@ func TestFxVerifyMintOperationInvalidGroupID(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { @@ -286,7 +286,7 @@ func TestFxVerifyMintOperationInvalidGroupID(t *testing.T) { UnsignedBytes: txBytes, } cred := &Credential{Credential: secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, }} @@ -314,7 +314,7 @@ func TestFxVerifyTransferOperation(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { @@ -324,7 +324,7 @@ func TestFxVerifyTransferOperation(t *testing.T) { UnsignedBytes: txBytes, } cred := &Credential{Credential: secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, }} @@ -350,7 +350,7 @@ func TestFxVerifyTransferOperationWrongUTXO(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { @@ -360,7 +360,7 @@ func TestFxVerifyTransferOperationWrongUTXO(t *testing.T) { UnsignedBytes: txBytes, } cred := &Credential{Credential: secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, }} @@ -380,7 +380,7 @@ func TestFxVerifyTransferOperationFailedVerify(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { @@ -390,7 +390,7 @@ func TestFxVerifyTransferOperationFailedVerify(t *testing.T) { UnsignedBytes: txBytes, } cred := &Credential{Credential: secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, }} @@ -416,7 +416,7 @@ func TestFxVerifyOperationUnknownOperation(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { @@ -426,7 +426,7 @@ func TestFxVerifyOperationUnknownOperation(t *testing.T) { UnsignedBytes: txBytes, } cred := &Credential{Credential: secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, }} @@ -449,7 +449,7 @@ func TestFxVerifyTransfer(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} if err := fx.Initialize(&vm); err != nil { diff --git a/avalanchego/vms/propertyfx/mint_operation.go b/avalanchego/vms/propertyfx/mint_operation.go index 088cc7d1..535ea135 100644 --- a/avalanchego/vms/propertyfx/mint_operation.go +++ b/avalanchego/vms/propertyfx/mint_operation.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx diff --git a/avalanchego/vms/propertyfx/mint_operation_test.go b/avalanchego/vms/propertyfx/mint_operation_test.go index b616660d..80e5cc24 100644 --- a/avalanchego/vms/propertyfx/mint_operation_test.go +++ b/avalanchego/vms/propertyfx/mint_operation_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx diff --git a/avalanchego/vms/propertyfx/mint_output.go b/avalanchego/vms/propertyfx/mint_output.go index 7f84d3df..9284d807 100644 --- a/avalanchego/vms/propertyfx/mint_output.go +++ b/avalanchego/vms/propertyfx/mint_output.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx diff --git a/avalanchego/vms/propertyfx/mint_output_test.go b/avalanchego/vms/propertyfx/mint_output_test.go index f17ba6fb..9e79f6a2 100644 --- a/avalanchego/vms/propertyfx/mint_output_test.go +++ b/avalanchego/vms/propertyfx/mint_output_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx diff --git a/avalanchego/vms/propertyfx/owned_output.go b/avalanchego/vms/propertyfx/owned_output.go index dba2f26c..295b6feb 100644 --- a/avalanchego/vms/propertyfx/owned_output.go +++ b/avalanchego/vms/propertyfx/owned_output.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx diff --git a/avalanchego/vms/propertyfx/owned_output_test.go b/avalanchego/vms/propertyfx/owned_output_test.go index 36e59981..c08c382f 100644 --- a/avalanchego/vms/propertyfx/owned_output_test.go +++ b/avalanchego/vms/propertyfx/owned_output_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package propertyfx diff --git a/avalanchego/vms/proposervm/batched_vm.go b/avalanchego/vms/proposervm/batched_vm.go index 932a9871..a9c7f7b6 100644 --- a/avalanchego/vms/proposervm/batched_vm.go +++ b/avalanchego/vms/proposervm/batched_vm.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm import ( + "context" "time" "github.com/ava-labs/avalanchego/database" @@ -16,15 +17,16 @@ import ( statelessblock "github.com/ava-labs/avalanchego/vms/proposervm/block" ) -var _ block.BatchedChainVM = &VM{} +var _ block.BatchedChainVM = (*VM)(nil) func (vm *VM) GetAncestors( + ctx context.Context, blkID ids.ID, maxBlocksNum int, maxBlocksSize int, maxBlocksRetrivalTime time.Duration, ) ([][]byte, error) { - if vm.bVM == nil { + if vm.batchedVM == nil { return nil, block.ErrRemoteVMNotImplemented } @@ -65,7 +67,13 @@ func (vm *VM) GetAncestors( preMaxBlocksNum := maxBlocksNum - len(res) preMaxBlocksSize := maxBlocksSize - currentByteLength preMaxBlocksRetrivalTime := maxBlocksRetrivalTime - time.Since(startTime) - innerBytes, err := vm.bVM.GetAncestors(blkID, preMaxBlocksNum, preMaxBlocksSize, preMaxBlocksRetrivalTime) + innerBytes, err := vm.batchedVM.GetAncestors( + ctx, + blkID, + preMaxBlocksNum, + preMaxBlocksSize, + preMaxBlocksRetrivalTime, + ) if err != nil { if len(res) == 0 { return nil, err @@ -76,8 +84,8 @@ func (vm *VM) GetAncestors( return res, nil } -func (vm *VM) BatchedParseBlock(blks [][]byte) ([]snowman.Block, error) { - if vm.bVM == nil { +func (vm *VM) BatchedParseBlock(ctx context.Context, blks [][]byte) ([]snowman.Block, error) { + if vm.batchedVM == nil { return nil, block.ErrRemoteVMNotImplemented } @@ -92,19 +100,14 @@ func (vm *VM) BatchedParseBlock(blks [][]byte) ([]snowman.Block, error) { innerBlocksIndex int statelessBlockDescs = make([]partialData, 0, len(blks)) innerBlockBytes = make([][]byte, 0, len(blks)) - banffActivated = vm.Clock.Time().After(vm.activationTimeBanff) ) for ; blocksIndex < len(blks); blocksIndex++ { blkBytes := blks[blocksIndex] - statelessBlock, requireBanff, err := statelessblock.Parse(blkBytes) + statelessBlock, err := statelessblock.Parse(blkBytes) if err != nil { break } - if requireBanff && !banffActivated { - break - } - blkID := statelessBlock.ID() block, exists := vm.verifiedBlocks[blkID] if exists { @@ -121,7 +124,7 @@ func (vm *VM) BatchedParseBlock(blks [][]byte) ([]snowman.Block, error) { innerBlockBytes = append(innerBlockBytes, blks[blocksIndex:]...) // parse all inner blocks at once - innerBlks, err := vm.bVM.BatchedParseBlock(innerBlockBytes) + innerBlks, err := vm.batchedVM.BatchedParseBlock(ctx, innerBlockBytes) if err != nil { return nil, err } diff --git a/avalanchego/vms/proposervm/batched_vm_test.go b/avalanchego/vms/proposervm/batched_vm_test.go index 5aa0a5d6..b61b9062 100644 --- a/avalanchego/vms/proposervm/batched_vm_test.go +++ b/avalanchego/vms/proposervm/batched_vm_test.go @@ -1,12 +1,12 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm import ( "bytes" + "context" "crypto" - "errors" "testing" "time" @@ -34,11 +34,17 @@ func TestCoreVMNotRemote(t *testing.T) { maxBlocksNum := 1000 // an high value to get all built blocks maxBlocksSize := 1000000 // an high value to get all built blocks maxBlocksRetrivalTime := time.Duration(1000000) // an high value to get all built blocks - _, errAncestors := proVM.GetAncestors(blkID, maxBlocksNum, maxBlocksSize, maxBlocksRetrivalTime) + _, errAncestors := proVM.GetAncestors( + context.Background(), + blkID, + maxBlocksNum, + maxBlocksSize, + maxBlocksRetrivalTime, + ) require.Error(errAncestors) var blks [][]byte - _, errBatchedParse := proVM.BatchedParseBlock(blks) + _, errBatchedParse := proVM.BatchedParseBlock(context.Background(), blks) require.Error(errBatchedParse) } @@ -57,13 +63,15 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { HeightV: coreGenBlk.Height() + 1, TimestampV: coreGenBlk.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk1, nil } - builtBlk1, err := proRemoteVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk1, nil + } + builtBlk1, err := proRemoteVM.BuildBlock(context.Background()) require.NoError(err, "Could not build preFork block") // prepare build of next block - require.NoError(proRemoteVM.SetPreference(builtBlk1.ID())) - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk1.ID())) + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch { case blkID == coreBlk1.ID(): return coreBlk1, nil @@ -82,13 +90,15 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { HeightV: coreBlk1.Height() + 1, TimestampV: coreBlk1.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk2, nil } - builtBlk2, err := proRemoteVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk2, nil + } + builtBlk2, err := proRemoteVM.BuildBlock(context.Background()) require.NoError(err, "Could not build proposer block") // prepare build of next block - require.NoError(proRemoteVM.SetPreference(builtBlk2.ID())) - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk2.ID())) + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch { case blkID == coreBlk2.ID(): return coreBlk2, nil @@ -107,19 +117,16 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { HeightV: coreBlk2.Height() + 1, TimestampV: coreBlk2.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk3, nil } - builtBlk3, err := proRemoteVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk3, nil + } + builtBlk3, err := proRemoteVM.BuildBlock(context.Background()) require.NoError(err, "Could not build proposer block") // ...Call GetAncestors on them ... // Note: we assumed that if blkID is not known, that's NOT an error. // Simply return an empty result - coreVM.GetAncestorsF = func( - blkID ids.ID, - maxBlocksNum, - maxBlocksSize int, - maxBlocksRetrivalTime time.Duration, - ) ([][]byte, error) { + coreVM.GetAncestorsF = func(_ context.Context, blkID ids.ID, _, _ int, _ time.Duration) ([][]byte, error) { res := make([][]byte, 0, 3) switch blkID { case coreBlk3.ID(): @@ -143,7 +150,13 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { maxBlocksNum := 1000 // an high value to get all built blocks maxBlocksSize := 1000000 // an high value to get all built blocks maxBlocksRetrivalTime := time.Duration(1000000) // an high value to get all built blocks - res, err := proRemoteVM.GetAncestors(reqBlkID, maxBlocksNum, maxBlocksSize, maxBlocksRetrivalTime) + res, err := proRemoteVM.GetAncestors( + context.Background(), + reqBlkID, + maxBlocksNum, + maxBlocksSize, + maxBlocksRetrivalTime, + ) // ... and check returned values are as expected require.NoError(err, "Error calling GetAncestors: %v", err) @@ -154,14 +167,26 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { // another good call reqBlkID = builtBlk1.ID() - res, err = proRemoteVM.GetAncestors(reqBlkID, maxBlocksNum, maxBlocksSize, maxBlocksRetrivalTime) + res, err = proRemoteVM.GetAncestors( + context.Background(), + reqBlkID, + maxBlocksNum, + maxBlocksSize, + maxBlocksRetrivalTime, + ) require.NoError(err, "Error calling GetAncestors: %v", err) require.Len(res, 1, "GetAncestor returned %v entries instead of %v", len(res), 1) require.EqualValues(res[0], builtBlk1.Bytes()) // a faulty call reqBlkID = ids.Empty - res, err = proRemoteVM.GetAncestors(reqBlkID, maxBlocksNum, maxBlocksSize, maxBlocksRetrivalTime) + res, err = proRemoteVM.GetAncestors( + context.Background(), + reqBlkID, + maxBlocksNum, + maxBlocksSize, + maxBlocksRetrivalTime, + ) require.NoError(err, "Error calling GetAncestors: %v", err) require.Empty(res, "GetAncestor returned %v entries instead of %v", len(res), 0) } @@ -181,13 +206,15 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { HeightV: coreGenBlk.Height() + 1, TimestampV: coreGenBlk.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk1, nil } - builtBlk1, err := proRemoteVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk1, nil + } + builtBlk1, err := proRemoteVM.BuildBlock(context.Background()) require.NoError(err, "Could not build preFork block") // prepare build of next block - require.NoError(builtBlk1.Verify()) - require.NoError(proRemoteVM.SetPreference(builtBlk1.ID())) + require.NoError(builtBlk1.Verify(context.Background())) + require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk1.ID())) proRemoteVM.Set(proRemoteVM.Time().Add(proposer.MaxDelay)) coreBlk2 := &snowman.TestBlock{ @@ -200,13 +227,15 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { HeightV: coreBlk1.Height() + 1, TimestampV: coreBlk1.Timestamp().Add(proposer.MaxDelay), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk2, nil } - builtBlk2, err := proRemoteVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk2, nil + } + builtBlk2, err := proRemoteVM.BuildBlock(context.Background()) require.NoError(err, "Could not build proposer block") // prepare build of next block - require.NoError(builtBlk2.Verify()) - require.NoError(proRemoteVM.SetPreference(builtBlk2.ID())) + require.NoError(builtBlk2.Verify(context.Background())) + require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk2.ID())) proRemoteVM.Set(proRemoteVM.Time().Add(proposer.MaxDelay)) coreBlk3 := &snowman.TestBlock{ @@ -219,22 +248,19 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { HeightV: coreBlk2.Height() + 1, TimestampV: coreBlk2.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk3, nil } - builtBlk3, err := proRemoteVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk3, nil + } + builtBlk3, err := proRemoteVM.BuildBlock(context.Background()) require.NoError(err, "Could not build proposer block") - require.NoError(builtBlk3.Verify()) - require.NoError(proRemoteVM.SetPreference(builtBlk3.ID())) + require.NoError(builtBlk3.Verify(context.Background())) + require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk3.ID())) // ...Call GetAncestors on them ... // Note: we assumed that if blkID is not known, that's NOT an error. // Simply return an empty result - coreVM.GetAncestorsF = func( - blkID ids.ID, - maxBlocksNum, - maxBlocksSize int, - maxBlocksRetrivalTime time.Duration, - ) ([][]byte, error) { + coreVM.GetAncestorsF = func(_ context.Context, blkID ids.ID, _, _ int, _ time.Duration) ([][]byte, error) { res := make([][]byte, 0, 3) switch blkID { case coreBlk3.ID(): @@ -254,7 +280,7 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -273,7 +299,13 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { maxBlocksNum := 1000 // an high value to get all built blocks maxBlocksSize := 1000000 // an high value to get all built blocks maxBlocksRetrivalTime := time.Duration(1000000) // an high value to get all built blocks - res, err := proRemoteVM.GetAncestors(reqBlkID, maxBlocksNum, maxBlocksSize, maxBlocksRetrivalTime) + res, err := proRemoteVM.GetAncestors( + context.Background(), + reqBlkID, + maxBlocksNum, + maxBlocksSize, + maxBlocksRetrivalTime, + ) // ... and check returned values are as expected require.NoError(err, "Error calling GetAncestors: %v", err) @@ -284,14 +316,26 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { // another good call reqBlkID = builtBlk1.ID() - res, err = proRemoteVM.GetAncestors(reqBlkID, maxBlocksNum, maxBlocksSize, maxBlocksRetrivalTime) + res, err = proRemoteVM.GetAncestors( + context.Background(), + reqBlkID, + maxBlocksNum, + maxBlocksSize, + maxBlocksRetrivalTime, + ) require.NoError(err, "Error calling GetAncestors: %v", err) require.Len(res, 1, "GetAncestor returned %v entries instead of %v", len(res), 1) require.EqualValues(res[0], builtBlk1.Bytes()) // a faulty call reqBlkID = ids.Empty - res, err = proRemoteVM.GetAncestors(reqBlkID, maxBlocksNum, maxBlocksSize, maxBlocksRetrivalTime) + res, err = proRemoteVM.GetAncestors( + context.Background(), + reqBlkID, + maxBlocksNum, + maxBlocksSize, + maxBlocksRetrivalTime, + ) require.NoError(err, "Error calling GetAncestors: %v", err) require.Empty(res, "GetAncestor returned %v entries instead of %v", len(res), 0) } @@ -317,15 +361,17 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { HeightV: coreGenBlk.Height() + 1, TimestampV: preForkTime, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk1, nil } - builtBlk1, err := proRemoteVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk1, nil + } + builtBlk1, err := proRemoteVM.BuildBlock(context.Background()) require.NoError(err, "Could not build preFork block") _, ok := builtBlk1.(*preForkBlock) require.True(ok, "Block should be a pre-fork one") // prepare build of next block - require.NoError(proRemoteVM.SetPreference(builtBlk1.ID())) - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk1.ID())) + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch { case blkID == coreBlk1.ID(): return coreBlk1, nil @@ -344,15 +390,17 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { HeightV: coreBlk1.Height() + 1, TimestampV: postForkTime, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk2, nil } - builtBlk2, err := proRemoteVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk2, nil + } + builtBlk2, err := proRemoteVM.BuildBlock(context.Background()) require.NoError(err, "Could not build proposer block") _, ok = builtBlk2.(*preForkBlock) require.True(ok, "Block should be a pre-fork one") // prepare build of next block - require.NoError(proRemoteVM.SetPreference(builtBlk2.ID())) - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk2.ID())) + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch { case blkID == coreBlk2.ID(): return coreBlk2, nil @@ -373,15 +421,17 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { HeightV: coreBlk2.Height() + 1, TimestampV: postForkTime.Add(proposer.MaxDelay), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk3, nil } - builtBlk3, err := proRemoteVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk3, nil + } + builtBlk3, err := proRemoteVM.BuildBlock(context.Background()) require.NoError(err, "Could not build proposer block") _, ok = builtBlk3.(*postForkBlock) require.True(ok, "Block should be a post-fork one") // prepare build of next block - require.NoError(builtBlk3.Verify()) - require.NoError(proRemoteVM.SetPreference(builtBlk3.ID())) + require.NoError(builtBlk3.Verify(context.Background())) + require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk3.ID())) proRemoteVM.Set(proRemoteVM.Time().Add(proposer.MaxDelay)) coreBlk4 := &snowman.TestBlock{ @@ -394,22 +444,19 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { HeightV: coreBlk3.Height() + 1, TimestampV: postForkTime, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk4, nil } - builtBlk4, err := proRemoteVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk4, nil + } + builtBlk4, err := proRemoteVM.BuildBlock(context.Background()) require.NoError(err, "Could not build proposer block") _, ok = builtBlk4.(*postForkBlock) require.True(ok, "Block should be a post-fork one") - require.NoError(builtBlk4.Verify()) + require.NoError(builtBlk4.Verify(context.Background())) // ...Call GetAncestors on them ... // Note: we assumed that if blkID is not known, that's NOT an error. // Simply return an empty result - coreVM.GetAncestorsF = func( - blkID ids.ID, - maxBlocksNum, - maxBlocksSize int, - maxBlocksRetrivalTime time.Duration, - ) ([][]byte, error) { + coreVM.GetAncestorsF = func(_ context.Context, blkID ids.ID, _, _ int, _ time.Duration) ([][]byte, error) { res := make([][]byte, 0, 3) switch blkID { case coreBlk4.ID(): @@ -439,7 +486,13 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { maxBlocksNum := 1000 // an high value to get all built blocks maxBlocksSize := 1000000 // an high value to get all built blocks maxBlocksRetrivalTime := 10 * time.Minute // an high value to get all built blocks - res, err := proRemoteVM.GetAncestors(reqBlkID, maxBlocksNum, maxBlocksSize, maxBlocksRetrivalTime) + res, err := proRemoteVM.GetAncestors( + context.Background(), + reqBlkID, + maxBlocksNum, + maxBlocksSize, + maxBlocksRetrivalTime, + ) // ... and check returned values are as expected require.NoError(err, "Error calling GetAncestors: %v", err) @@ -451,14 +504,26 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { // another good call reqBlkID = builtBlk1.ID() - res, err = proRemoteVM.GetAncestors(reqBlkID, maxBlocksNum, maxBlocksSize, maxBlocksRetrivalTime) + res, err = proRemoteVM.GetAncestors( + context.Background(), + reqBlkID, + maxBlocksNum, + maxBlocksSize, + maxBlocksRetrivalTime, + ) require.NoError(err, "Error calling GetAncestors: %v", err) require.Len(res, 1, "GetAncestor returned %v entries instead of %v", len(res), 1) require.EqualValues(res[0], builtBlk1.Bytes()) // a faulty call reqBlkID = ids.Empty - res, err = proRemoteVM.GetAncestors(reqBlkID, maxBlocksNum, maxBlocksSize, maxBlocksRetrivalTime) + res, err = proRemoteVM.GetAncestors( + context.Background(), + reqBlkID, + maxBlocksNum, + maxBlocksSize, + maxBlocksRetrivalTime, + ) require.NoError(err, "Error calling GetAncestors: %v", err) require.Len(res, 0, "GetAncestor returned %v entries instead of %v", len(res), 0) } @@ -478,13 +543,15 @@ func TestBatchedParseBlockPreForkOnly(t *testing.T) { HeightV: coreGenBlk.Height() + 1, TimestampV: coreGenBlk.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk1, nil } - builtBlk1, err := proRemoteVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk1, nil + } + builtBlk1, err := proRemoteVM.BuildBlock(context.Background()) require.NoError(err, "Could not build preFork block") // prepare build of next block - require.NoError(proRemoteVM.SetPreference(builtBlk1.ID())) - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk1.ID())) + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch { case blkID == coreBlk1.ID(): return coreBlk1, nil @@ -503,13 +570,15 @@ func TestBatchedParseBlockPreForkOnly(t *testing.T) { HeightV: coreBlk1.Height() + 1, TimestampV: coreBlk1.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk2, nil } - builtBlk2, err := proRemoteVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk2, nil + } + builtBlk2, err := proRemoteVM.BuildBlock(context.Background()) require.NoError(err, "Could not build proposer block") // prepare build of next block - require.NoError(proRemoteVM.SetPreference(builtBlk2.ID())) - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk2.ID())) + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch { case blkID == coreBlk2.ID(): return coreBlk2, nil @@ -528,11 +597,13 @@ func TestBatchedParseBlockPreForkOnly(t *testing.T) { HeightV: coreBlk2.Height() + 1, TimestampV: coreBlk2.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk3, nil } - builtBlk3, err := proRemoteVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk3, nil + } + builtBlk3, err := proRemoteVM.BuildBlock(context.Background()) require.NoError(err, "Could not build proposer block") - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreBlk1.Bytes()): return coreBlk1, nil @@ -545,7 +616,7 @@ func TestBatchedParseBlockPreForkOnly(t *testing.T) { } } - coreVM.BatchedParseBlockF = func(blks [][]byte) ([]snowman.Block, error) { + coreVM.BatchedParseBlockF = func(_ context.Context, blks [][]byte) ([]snowman.Block, error) { res := make([]snowman.Block, 0, len(blks)) for _, blkBytes := range blks { switch { @@ -556,7 +627,7 @@ func TestBatchedParseBlockPreForkOnly(t *testing.T) { case bytes.Equal(blkBytes, coreBlk3.Bytes()): res = append(res, coreBlk3) default: - return nil, errors.New("Unexpected call to parse unknown block") + return nil, errUnknownBlock } } return res, nil @@ -567,7 +638,7 @@ func TestBatchedParseBlockPreForkOnly(t *testing.T) { builtBlk2.Bytes(), builtBlk3.Bytes(), } - res, err := proRemoteVM.BatchedParseBlock(bytesToParse) + res, err := proRemoteVM.BatchedParseBlock(context.Background(), bytesToParse) require.NoError(err, "Error calling BatchedParseBlock: %v", err) require.Len(res, 3, "BatchedParseBlock returned %v entries instead of %v", len(res), 3) require.Equal(res[0].ID(), builtBlk1.ID()) @@ -590,13 +661,15 @@ func TestBatchedParseBlockPostForkOnly(t *testing.T) { HeightV: coreGenBlk.Height() + 1, TimestampV: coreGenBlk.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk1, nil } - builtBlk1, err := proRemoteVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk1, nil + } + builtBlk1, err := proRemoteVM.BuildBlock(context.Background()) require.NoError(err, "Could not build preFork block") // prepare build of next block - require.NoError(builtBlk1.Verify()) - require.NoError(proRemoteVM.SetPreference(builtBlk1.ID())) + require.NoError(builtBlk1.Verify(context.Background())) + require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk1.ID())) proRemoteVM.Set(proRemoteVM.Time().Add(proposer.MaxDelay)) coreBlk2 := &snowman.TestBlock{ @@ -609,13 +682,15 @@ func TestBatchedParseBlockPostForkOnly(t *testing.T) { HeightV: coreBlk1.Height() + 1, TimestampV: coreBlk1.Timestamp().Add(proposer.MaxDelay), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk2, nil } - builtBlk2, err := proRemoteVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk2, nil + } + builtBlk2, err := proRemoteVM.BuildBlock(context.Background()) require.NoError(err, "Could not build proposer block") // prepare build of next block - require.NoError(builtBlk2.Verify()) - require.NoError(proRemoteVM.SetPreference(builtBlk2.ID())) + require.NoError(builtBlk2.Verify(context.Background())) + require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk2.ID())) proRemoteVM.Set(proRemoteVM.Time().Add(proposer.MaxDelay)) coreBlk3 := &snowman.TestBlock{ @@ -628,11 +703,13 @@ func TestBatchedParseBlockPostForkOnly(t *testing.T) { HeightV: coreBlk2.Height() + 1, TimestampV: coreBlk2.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk3, nil } - builtBlk3, err := proRemoteVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk3, nil + } + builtBlk3, err := proRemoteVM.BuildBlock(context.Background()) require.NoError(err, "Could not build proposer block") - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreBlk1.Bytes()): return coreBlk1, nil @@ -645,7 +722,7 @@ func TestBatchedParseBlockPostForkOnly(t *testing.T) { } } - coreVM.BatchedParseBlockF = func(blks [][]byte) ([]snowman.Block, error) { + coreVM.BatchedParseBlockF = func(_ context.Context, blks [][]byte) ([]snowman.Block, error) { res := make([]snowman.Block, 0, len(blks)) for _, blkBytes := range blks { switch { @@ -656,7 +733,7 @@ func TestBatchedParseBlockPostForkOnly(t *testing.T) { case bytes.Equal(blkBytes, coreBlk3.Bytes()): res = append(res, coreBlk3) default: - return nil, errors.New("Unexpected call to parse unknown block") + return nil, errUnknownBlock } } return res, nil @@ -667,7 +744,7 @@ func TestBatchedParseBlockPostForkOnly(t *testing.T) { builtBlk2.Bytes(), builtBlk3.Bytes(), } - res, err := proRemoteVM.BatchedParseBlock(bytesToParse) + res, err := proRemoteVM.BatchedParseBlock(context.Background(), bytesToParse) require.NoError(err, "Error calling BatchedParseBlock: %v", err) require.Len(res, 3, "BatchedParseBlock returned %v entries instead of %v", len(res), 3) require.Equal(res[0].ID(), builtBlk1.ID()) @@ -696,15 +773,17 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { HeightV: coreGenBlk.Height() + 1, TimestampV: preForkTime, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk1, nil } - builtBlk1, err := proRemoteVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk1, nil + } + builtBlk1, err := proRemoteVM.BuildBlock(context.Background()) require.NoError(err, "Could not build preFork block") _, ok := builtBlk1.(*preForkBlock) require.True(ok, "Block should be a pre-fork one") // prepare build of next block - require.NoError(proRemoteVM.SetPreference(builtBlk1.ID())) - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk1.ID())) + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch { case blkID == coreBlk1.ID(): return coreBlk1, nil @@ -723,15 +802,17 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { HeightV: coreBlk1.Height() + 1, TimestampV: postForkTime, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk2, nil } - builtBlk2, err := proRemoteVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk2, nil + } + builtBlk2, err := proRemoteVM.BuildBlock(context.Background()) require.NoError(err, "Could not build proposer block") _, ok = builtBlk2.(*preForkBlock) require.True(ok, "Block should be a pre-fork one") // prepare build of next block - require.NoError(proRemoteVM.SetPreference(builtBlk2.ID())) - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk2.ID())) + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch { case blkID == coreBlk2.ID(): return coreBlk2, nil @@ -752,15 +833,17 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { HeightV: coreBlk2.Height() + 1, TimestampV: postForkTime.Add(proposer.MaxDelay), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk3, nil } - builtBlk3, err := proRemoteVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk3, nil + } + builtBlk3, err := proRemoteVM.BuildBlock(context.Background()) require.NoError(err, "Could not build proposer block") _, ok = builtBlk3.(*postForkBlock) require.True(ok, "Block should be a post-fork one") // prepare build of next block - require.NoError(builtBlk3.Verify()) - require.NoError(proRemoteVM.SetPreference(builtBlk3.ID())) + require.NoError(builtBlk3.Verify(context.Background())) + require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk3.ID())) proRemoteVM.Set(proRemoteVM.Time().Add(proposer.MaxDelay)) coreBlk4 := &snowman.TestBlock{ @@ -773,14 +856,16 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { HeightV: coreBlk3.Height() + 1, TimestampV: postForkTime, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk4, nil } - builtBlk4, err := proRemoteVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk4, nil + } + builtBlk4, err := proRemoteVM.BuildBlock(context.Background()) require.NoError(err, "Could not build proposer block") _, ok = builtBlk4.(*postForkBlock) require.True(ok, "Block should be a post-fork one") - require.NoError(builtBlk4.Verify()) + require.NoError(builtBlk4.Verify(context.Background())) - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreBlk1.Bytes()): return coreBlk1, nil @@ -795,7 +880,7 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { } } - coreVM.BatchedParseBlockF = func(blks [][]byte) ([]snowman.Block, error) { + coreVM.BatchedParseBlockF = func(_ context.Context, blks [][]byte) ([]snowman.Block, error) { res := make([]snowman.Block, 0, len(blks)) for _, blkBytes := range blks { switch { @@ -808,7 +893,7 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { case bytes.Equal(blkBytes, coreBlk4.Bytes()): res = append(res, coreBlk4) default: - return nil, errors.New("Unexpected call to parse unknown block") + return nil, errUnknownBlock } } return res, nil @@ -821,7 +906,7 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { builtBlk1.Bytes(), } - res, err := proRemoteVM.BatchedParseBlock(bytesToParse) + res, err := proRemoteVM.BatchedParseBlock(context.Background(), bytesToParse) require.NoError(err, "Error calling BatchedParseBlock: %v", err) require.Len(res, 4, "BatchedParseBlock returned %v entries instead of %v", len(res), 4) require.Equal(res[0].ID(), builtBlk4.ID()) @@ -862,6 +947,7 @@ func initTestRemoteProposerVM( coreVM.TestBatchedVM.T = t coreVM.InitializeF = func( + context.Context, *snow.Context, manager.Manager, []byte, @@ -873,8 +959,10 @@ func initTestRemoteProposerVM( ) error { return nil } - coreVM.LastAcceptedF = func() (ids.ID, error) { return coreGenBlk.ID(), nil } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.LastAcceptedF = func(context.Context) (ids.ID, error) { + return coreGenBlk.ID(), nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch { case blkID == coreGenBlk.ID(): return coreGenBlk, nil @@ -882,7 +970,7 @@ func initTestRemoteProposerVM( return nil, errUnknownBlock } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -891,43 +979,75 @@ func initTestRemoteProposerVM( } } - proVM := New(coreVM, proBlkStartTime, 0, time.Time{}) + proVM := New( + coreVM, + proBlkStartTime, + 0, + DefaultMinBlockDelay, + pTestCert.PrivateKey.(crypto.Signer), + pTestCert.Leaf, + ) valState := &validators.TestState{ T: t, } - valState.GetMinimumHeightF = func() (uint64, error) { return coreGenBlk.Height(), nil } - valState.GetCurrentHeightF = func() (uint64, error) { return defaultPChainHeight, nil } - valState.GetValidatorSetF = func(height uint64, subnetID ids.ID) (map[ids.NodeID]uint64, error) { - res := make(map[ids.NodeID]uint64) - res[proVM.ctx.NodeID] = uint64(10) - res[ids.NodeID{1}] = uint64(5) - res[ids.NodeID{2}] = uint64(6) - res[ids.NodeID{3}] = uint64(7) - return res, nil + valState.GetMinimumHeightF = func(context.Context) (uint64, error) { + return coreGenBlk.Height(), nil + } + valState.GetCurrentHeightF = func(context.Context) (uint64, error) { + return defaultPChainHeight, nil + } + valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return map[ids.NodeID]*validators.GetValidatorOutput{ + proVM.ctx.NodeID: { + NodeID: proVM.ctx.NodeID, + Weight: 10, + }, + {1}: { + NodeID: ids.NodeID{1}, + Weight: 5, + }, + {2}: { + NodeID: ids.NodeID{2}, + Weight: 6, + }, + {3}: { + NodeID: ids.NodeID{3}, + Weight: 7, + }, + }, nil } ctx := snow.DefaultContextTest() ctx.NodeID = ids.NodeIDFromCert(pTestCert.Leaf) - ctx.StakingCertLeaf = pTestCert.Leaf - ctx.StakingLeafSigner = pTestCert.PrivateKey.(crypto.Signer) ctx.ValidatorState = valState dummyDBManager := manager.NewMemDB(version.Semantic1_0_0) // make sure that DBs are compressed correctly dummyDBManager = dummyDBManager.NewPrefixDBManager([]byte{}) - if err := proVM.Initialize(ctx, dummyDBManager, initialState, nil, nil, nil, nil, nil); err != nil { + err := proVM.Initialize( + context.Background(), + ctx, + dummyDBManager, + initialState, + nil, + nil, + nil, + nil, + nil, + ) + if err != nil { t.Fatalf("failed to initialize proposerVM with %s", err) } // Initialize shouldn't be called again coreVM.InitializeF = nil - if err := proVM.SetState(snow.NormalOp); err != nil { + if err := proVM.SetState(context.Background(), snow.NormalOp); err != nil { t.Fatal(err) } - if err := proVM.SetPreference(coreGenBlk.IDV); err != nil { + if err := proVM.SetPreference(context.Background(), coreGenBlk.IDV); err != nil { t.Fatal(err) } diff --git a/avalanchego/vms/proposervm/block.go b/avalanchego/vms/proposervm/block.go index 754081cd..a448dcb3 100644 --- a/avalanchego/vms/proposervm/block.go +++ b/avalanchego/vms/proposervm/block.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm import ( + "context" "errors" "fmt" "time" @@ -16,6 +17,8 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/vms/proposervm/block" "github.com/ava-labs/avalanchego/vms/proposervm/proposer" + + smblock "github.com/ava-labs/avalanchego/snow/engine/snowman/block" ) const ( @@ -46,15 +49,15 @@ type Block interface { // acceptOuterBlk and acceptInnerBlk allow controlling acceptance of outer // and inner blocks. acceptOuterBlk() error - acceptInnerBlk() error + acceptInnerBlk(context.Context) error - verifyPreForkChild(child *preForkBlock) error - verifyPostForkChild(child *postForkBlock) error - verifyPostForkOption(child *postForkOption) error + verifyPreForkChild(ctx context.Context, child *preForkBlock) error + verifyPostForkChild(ctx context.Context, child *postForkBlock) error + verifyPostForkOption(ctx context.Context, child *postForkOption) error - buildChild() (Block, error) + buildChild(context.Context) (Block, error) - pChainHeight() (uint64, error) + pChainHeight(context.Context) (uint64, error) } type PostForkBlock interface { @@ -87,8 +90,13 @@ func (p *postForkCommonComponents) Height() uint64 { // 7) [child]'s timestamp is within its proposer's window // 8) [child] has a valid signature from its proposer // 9) [child]'s inner block is valid -func (p *postForkCommonComponents) Verify(parentTimestamp time.Time, parentPChainHeight uint64, child *postForkBlock) error { - if err := verifyIsNotOracleBlock(p.innerBlk); err != nil { +func (p *postForkCommonComponents) Verify( + ctx context.Context, + parentTimestamp time.Time, + parentPChainHeight uint64, + child *postForkBlock, +) error { + if err := verifyIsNotOracleBlock(ctx, p.innerBlk); err != nil { return err } @@ -117,7 +125,7 @@ func (p *postForkCommonComponents) Verify(parentTimestamp time.Time, parentPChai // been synced up to this point yet. if p.vm.consensusState == snow.NormalOp { childID := child.ID() - currentPChainHeight, err := p.vm.ctx.ValidatorState.GetCurrentHeight() + currentPChainHeight, err := p.vm.ctx.ValidatorState.GetCurrentHeight(ctx) if err != nil { p.vm.ctx.Log.Error("block verification failed", zap.String("reason", "failed to get current P-Chain height"), @@ -132,7 +140,7 @@ func (p *postForkCommonComponents) Verify(parentTimestamp time.Time, parentPChai childHeight := child.Height() proposerID := child.Proposer() - minDelay, err := p.vm.Windower.Delay(childHeight, parentPChainHeight, proposerID) + minDelay, err := p.vm.Windower.Delay(ctx, childHeight, parentPChainHeight, proposerID) if err != nil { return err } @@ -156,11 +164,18 @@ func (p *postForkCommonComponents) Verify(parentTimestamp time.Time, parentPChai ) } - return p.vm.verifyAndRecordInnerBlk(child) + return p.vm.verifyAndRecordInnerBlk( + ctx, + &smblock.Context{ + PChainHeight: parentPChainHeight, + }, + child, + ) } // Return the child (a *postForkBlock) of this block func (p *postForkCommonComponents) buildChild( + ctx context.Context, parentID ids.ID, parentTimestamp time.Time, parentPChainHeight uint64, @@ -173,7 +188,7 @@ func (p *postForkCommonComponents) buildChild( // The child's P-Chain height is proposed as the optimal P-Chain height that // is at least the parent's P-Chain height - pChainHeight, err := p.vm.optimalPChainHeight(parentPChainHeight) + pChainHeight, err := p.vm.optimalPChainHeight(ctx, parentPChainHeight) if err != nil { return nil, err } @@ -182,7 +197,7 @@ func (p *postForkCommonComponents) buildChild( if delay < proposer.MaxDelay { parentHeight := p.innerBlk.Height() proposerID := p.vm.ctx.NodeID - minDelay, err := p.vm.Windower.Delay(parentHeight+1, parentPChainHeight, proposerID) + minDelay, err := p.vm.Windower.Delay(ctx, parentHeight+1, parentPChainHeight, proposerID) if err != nil { return nil, err } @@ -206,59 +221,40 @@ func (p *postForkCommonComponents) buildChild( } } - innerBlock, err := p.vm.ChainVM.BuildBlock() + var innerBlock snowman.Block + if p.vm.blockBuilderVM != nil { + innerBlock, err = p.vm.blockBuilderVM.BuildBlockWithContext(ctx, &smblock.Context{ + PChainHeight: parentPChainHeight, + }) + } else { + innerBlock, err = p.vm.ChainVM.BuildBlock(ctx) + } if err != nil { return nil, err } - banffActivated := newTimestamp.After(p.vm.activationTimeBanff) - // Build the child var statelessChild block.SignedBlock if delay >= proposer.MaxDelay { - if banffActivated { - statelessChild, err = block.BuildUnsignedBanff( - parentID, - newTimestamp, - pChainHeight, - innerBlock.Bytes(), - ) - } else { - statelessChild, err = block.BuildUnsignedApricot( - parentID, - newTimestamp, - pChainHeight, - innerBlock.Bytes(), - ) - } - if err != nil { - return nil, err - } + statelessChild, err = block.BuildUnsigned( + parentID, + newTimestamp, + pChainHeight, + innerBlock.Bytes(), + ) } else { - if banffActivated { - statelessChild, err = block.BuildBanff( - parentID, - newTimestamp, - pChainHeight, - p.vm.ctx.StakingCertLeaf, - innerBlock.Bytes(), - p.vm.ctx.ChainID, - p.vm.ctx.StakingLeafSigner, - ) - } else { - statelessChild, err = block.BuildApricot( - parentID, - newTimestamp, - pChainHeight, - p.vm.ctx.StakingCertLeaf, - innerBlock.Bytes(), - p.vm.ctx.ChainID, - p.vm.ctx.StakingLeafSigner, - ) - } - if err != nil { - return nil, err - } + statelessChild, err = block.Build( + parentID, + newTimestamp, + pChainHeight, + p.vm.stakingCertLeaf, + innerBlock.Bytes(), + p.vm.ctx.ChainID, + p.vm.stakingLeafSigner, + ) + } + if err != nil { + return nil, err } child := &postForkBlock{ @@ -288,7 +284,7 @@ func (p *postForkCommonComponents) setInnerBlk(innerBlk snowman.Block) { p.innerBlk = innerBlk } -func verifyIsOracleBlock(b snowman.Block) error { +func verifyIsOracleBlock(ctx context.Context, b snowman.Block) error { oracle, ok := b.(snowman.OracleBlock) if !ok { return fmt.Errorf( @@ -296,16 +292,16 @@ func verifyIsOracleBlock(b snowman.Block) error { errUnexpectedBlockType, b.ID(), b, ) } - _, err := oracle.Options() + _, err := oracle.Options(ctx) return err } -func verifyIsNotOracleBlock(b snowman.Block) error { +func verifyIsNotOracleBlock(ctx context.Context, b snowman.Block) error { oracle, ok := b.(snowman.OracleBlock) if !ok { return nil } - _, err := oracle.Options() + _, err := oracle.Options(ctx) switch err { case nil: return fmt.Errorf( diff --git a/avalanchego/vms/proposervm/block/block.go b/avalanchego/vms/proposervm/block/block.go index 1967abc3..efdba801 100644 --- a/avalanchego/vms/proposervm/block/block.go +++ b/avalanchego/vms/proposervm/block/block.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -15,7 +15,7 @@ import ( ) var ( - _ SignedBlock = &statelessBlock{} + _ SignedBlock = (*statelessBlock)(nil) errUnexpectedProposer = errors.New("expected no proposer but one was provided") errMissingProposer = errors.New("expected proposer but none was provided") @@ -59,10 +59,21 @@ type statelessBlock struct { bytes []byte } -func (b *statelessBlock) ID() ids.ID { return b.id } -func (b *statelessBlock) ParentID() ids.ID { return b.StatelessBlock.ParentID } -func (b *statelessBlock) Block() []byte { return b.StatelessBlock.Block } -func (b *statelessBlock) Bytes() []byte { return b.bytes } +func (b *statelessBlock) ID() ids.ID { + return b.id +} + +func (b *statelessBlock) ParentID() ids.ID { + return b.StatelessBlock.ParentID +} + +func (b *statelessBlock) Block() []byte { + return b.StatelessBlock.Block +} + +func (b *statelessBlock) Bytes() []byte { + return b.bytes +} func (b *statelessBlock) initialize(bytes []byte) error { b.bytes = bytes @@ -93,9 +104,17 @@ func (b *statelessBlock) initialize(bytes []byte) error { return nil } -func (b *statelessBlock) PChainHeight() uint64 { return b.StatelessBlock.PChainHeight } -func (b *statelessBlock) Timestamp() time.Time { return b.timestamp } -func (b *statelessBlock) Proposer() ids.NodeID { return b.proposer } +func (b *statelessBlock) PChainHeight() uint64 { + return b.StatelessBlock.PChainHeight +} + +func (b *statelessBlock) Timestamp() time.Time { + return b.timestamp +} + +func (b *statelessBlock) Proposer() ids.NodeID { + return b.proposer +} func (b *statelessBlock) Verify(shouldHaveProposer bool, chainID ids.ID) error { if !shouldHaveProposer { diff --git a/avalanchego/vms/proposervm/block/block_test.go b/avalanchego/vms/proposervm/block/block_test.go index afa68b6f..7b6b6de5 100644 --- a/avalanchego/vms/proposervm/block/block_test.go +++ b/avalanchego/vms/proposervm/block/block_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -10,7 +10,6 @@ import ( "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/codec/reflectcodec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/units" ) @@ -35,7 +34,7 @@ func TestVerifyNoCertWithSignature(t *testing.T) { require := require.New(t) - builtBlockIntf, err := BuildUnsignedApricot(parentID, timestamp, pChainHeight, innerBlockBytes) + builtBlockIntf, err := BuildUnsigned(parentID, timestamp, pChainHeight, innerBlockBytes) require.NoError(err) builtBlock := builtBlockIntf.(*statelessBlock) @@ -48,7 +47,7 @@ func TestVerifyNoCertWithSignature(t *testing.T) { require.Error(err) } -func TestBlockSizeLimitExceeded(t *testing.T) { +func TestBlockSizeLimit(t *testing.T) { require := require.New(t) parentID := ids.ID{1} @@ -56,11 +55,7 @@ func TestBlockSizeLimitExceeded(t *testing.T) { pChainHeight := uint64(2) innerBlockBytes := bytes.Repeat([]byte{0}, 270*units.KiB) - // linear codec should fail to marshal, due to exceeded maximum length - _, err := BuildUnsignedApricot(parentID, timestamp, pChainHeight, innerBlockBytes) - require.ErrorIs(err, reflectcodec.ErrMaxMarshalSliceLimitExceeded) - - // with the new max limit, it should be able to build blocks - _, err = BuildUnsignedBanff(parentID, timestamp, pChainHeight, innerBlockBytes) + // with the large limit, it should be able to build large blocks + _, err := BuildUnsigned(parentID, timestamp, pChainHeight, innerBlockBytes) require.NoError(err) } diff --git a/avalanchego/vms/proposervm/block/build.go b/avalanchego/vms/proposervm/block/build.go index 1da65904..ccfa4da9 100644 --- a/avalanchego/vms/proposervm/block/build.go +++ b/avalanchego/vms/proposervm/block/build.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -9,32 +9,12 @@ import ( "crypto/x509" "time" - "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/wrappers" ) -func BuildUnsignedApricot( - parentID ids.ID, - timestamp time.Time, - pChainHeight uint64, - blockBytes []byte, -) (SignedBlock, error) { - return buildUnsigned(apricotCodec, parentID, timestamp, pChainHeight, blockBytes) -} - -func BuildUnsignedBanff( - parentID ids.ID, - timestamp time.Time, - pChainHeight uint64, - blockBytes []byte, -) (SignedBlock, error) { - return buildUnsigned(banffCodec, parentID, timestamp, pChainHeight, blockBytes) -} - -func buildUnsigned( - cm codec.Manager, +func BuildUnsigned( parentID ids.ID, timestamp time.Time, pChainHeight uint64, @@ -51,39 +31,14 @@ func buildUnsigned( timestamp: timestamp, } - bytes, err := cm.Marshal(codecVersion, &block) + bytes, err := c.Marshal(codecVersion, &block) if err != nil { return nil, err } return block, block.initialize(bytes) } -func BuildApricot( - parentID ids.ID, - timestamp time.Time, - pChainHeight uint64, - cert *x509.Certificate, - blockBytes []byte, - chainID ids.ID, - key crypto.Signer, -) (SignedBlock, error) { - return build(apricotCodec, parentID, timestamp, pChainHeight, cert, blockBytes, chainID, key) -} - -func BuildBanff( - parentID ids.ID, - timestamp time.Time, - pChainHeight uint64, - cert *x509.Certificate, - blockBytes []byte, - chainID ids.ID, - key crypto.Signer, -) (SignedBlock, error) { - return build(banffCodec, parentID, timestamp, pChainHeight, cert, blockBytes, chainID, key) -} - -func build( - cm codec.Manager, +func Build( parentID ids.ID, timestamp time.Time, pChainHeight uint64, @@ -106,7 +61,7 @@ func build( } var blockIntf SignedBlock = block - unsignedBytesWithEmptySignature, err := cm.Marshal(codecVersion, &blockIntf) + unsignedBytesWithEmptySignature, err := c.Marshal(codecVersion, &blockIntf) if err != nil { return nil, err } @@ -130,7 +85,7 @@ func build( return nil, err } - block.bytes, err = cm.Marshal(codecVersion, &blockIntf) + block.bytes, err = c.Marshal(codecVersion, &blockIntf) return block, err } @@ -145,7 +100,7 @@ func BuildHeader( Body: bodyID, } - bytes, err := banffCodec.Marshal(codecVersion, &header) + bytes, err := c.Marshal(codecVersion, &header) header.bytes = bytes return &header, err } @@ -162,7 +117,7 @@ func BuildOption( InnerBytes: innerBytes, } - bytes, err := banffCodec.Marshal(codecVersion, &block) + bytes, err := c.Marshal(codecVersion, &block) if err != nil { return nil, err } diff --git a/avalanchego/vms/proposervm/block/build_test.go b/avalanchego/vms/proposervm/block/build_test.go index 3b8e10eb..c2d89cff 100644 --- a/avalanchego/vms/proposervm/block/build_test.go +++ b/avalanchego/vms/proposervm/block/build_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -29,7 +29,7 @@ func TestBuild(t *testing.T) { cert := tlsCert.Leaf key := tlsCert.PrivateKey.(crypto.Signer) - builtBlock, err := BuildApricot( + builtBlock, err := Build( parentID, timestamp, pChainHeight, @@ -60,7 +60,7 @@ func TestBuildUnsigned(t *testing.T) { require := require.New(t) - builtBlock, err := BuildUnsignedApricot(parentID, timestamp, pChainHeight, innerBlockBytes) + builtBlock, err := BuildUnsigned(parentID, timestamp, pChainHeight, innerBlockBytes) require.NoError(err) require.Equal(parentID, builtBlock.ParentID()) diff --git a/avalanchego/vms/proposervm/block/codec.go b/avalanchego/vms/proposervm/block/codec.go index 09b8a915..bf8089db 100644 --- a/avalanchego/vms/proposervm/block/codec.go +++ b/avalanchego/vms/proposervm/block/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -13,39 +13,22 @@ import ( const codecVersion = 0 -var ( - // TODO: After Banff is activated, this codec can be deleted. - // - // Before the Banff upgrade, enforce a 256 KiB maximum block size. - apricotCodec codec.Manager - - // After the Banff upgrade, the maximum block size is enforced by the - // p2p message size limit. See: [constants.DefaultMaxMessageSize] - // - // Invariant: This codec must never be used to unmarshal a slice unless it - // is a `[]byte`. Otherwise a malicious payload could cause an - // OOM. - banffCodec codec.Manager -) +// The maximum block size is enforced by the p2p message size limit. +// See: [constants.DefaultMaxMessageSize] +// +// Invariant: This codec must never be used to unmarshal a slice unless it is a +// `[]byte`. Otherwise a malicious payload could cause an OOM. +var c codec.Manager func init() { - // codec.defaultMaxSize of 256 KiB - apricotLinearCodec := linearcodec.NewDefault() - apricotCodec = codec.NewDefaultManager() - - // maximum allowable size - banffLinearCodec := linearcodec.NewCustomMaxLength(math.MaxUint32) - banffCodec = codec.NewManager(math.MaxInt) + linearCodec := linearcodec.NewCustomMaxLength(math.MaxUint32) + c = codec.NewManager(math.MaxInt) errs := wrappers.Errs{} errs.Add( - apricotLinearCodec.RegisterType(&statelessBlock{}), - apricotLinearCodec.RegisterType(&option{}), - apricotCodec.RegisterCodec(codecVersion, apricotLinearCodec), - - banffLinearCodec.RegisterType(&statelessBlock{}), - banffLinearCodec.RegisterType(&option{}), - banffCodec.RegisterCodec(codecVersion, banffLinearCodec), + linearCodec.RegisterType(&statelessBlock{}), + linearCodec.RegisterType(&option{}), + c.RegisterCodec(codecVersion, linearCodec), ) if errs.Errored() { panic(errs.Err) diff --git a/avalanchego/vms/proposervm/block/header.go b/avalanchego/vms/proposervm/block/header.go index 591c8509..47ed36b9 100644 --- a/avalanchego/vms/proposervm/block/header.go +++ b/avalanchego/vms/proposervm/block/header.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -22,7 +22,18 @@ type statelessHeader struct { bytes []byte } -func (h *statelessHeader) ChainID() ids.ID { return h.Chain } -func (h *statelessHeader) ParentID() ids.ID { return h.Parent } -func (h *statelessHeader) BodyID() ids.ID { return h.Body } -func (h *statelessHeader) Bytes() []byte { return h.bytes } +func (h *statelessHeader) ChainID() ids.ID { + return h.Chain +} + +func (h *statelessHeader) ParentID() ids.ID { + return h.Parent +} + +func (h *statelessHeader) BodyID() ids.ID { + return h.Body +} + +func (h *statelessHeader) Bytes() []byte { + return h.bytes +} diff --git a/avalanchego/vms/proposervm/block/header_test.go b/avalanchego/vms/proposervm/block/header_test.go index 3e89327a..bdbfaf3b 100644 --- a/avalanchego/vms/proposervm/block/header_test.go +++ b/avalanchego/vms/proposervm/block/header_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/avalanchego/vms/proposervm/block/option.go b/avalanchego/vms/proposervm/block/option.go index 79eee19f..180b90e3 100644 --- a/avalanchego/vms/proposervm/block/option.go +++ b/avalanchego/vms/proposervm/block/option.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -16,10 +16,21 @@ type option struct { bytes []byte } -func (b *option) ID() ids.ID { return b.id } -func (b *option) ParentID() ids.ID { return b.PrntID } -func (b *option) Block() []byte { return b.InnerBytes } -func (b *option) Bytes() []byte { return b.bytes } +func (b *option) ID() ids.ID { + return b.id +} + +func (b *option) ParentID() ids.ID { + return b.PrntID +} + +func (b *option) Block() []byte { + return b.InnerBytes +} + +func (b *option) Bytes() []byte { + return b.bytes +} func (b *option) initialize(bytes []byte) error { b.id = hashing.ComputeHash256Array(bytes) diff --git a/avalanchego/vms/proposervm/block/option_test.go b/avalanchego/vms/proposervm/block/option_test.go index c721439c..f6d4f409 100644 --- a/avalanchego/vms/proposervm/block/option_test.go +++ b/avalanchego/vms/proposervm/block/option_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block diff --git a/avalanchego/vms/proposervm/block/parse.go b/avalanchego/vms/proposervm/block/parse.go index 01a61a9a..00ca2e80 100644 --- a/avalanchego/vms/proposervm/block/parse.go +++ b/avalanchego/vms/proposervm/block/parse.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -7,28 +7,21 @@ import ( "fmt" ) -func Parse(bytes []byte) (Block, bool, error) { - var ( - block Block - requireBanff bool - ) - parsedVersion, err := apricotCodec.Unmarshal(bytes, &block) +func Parse(bytes []byte) (Block, error) { + var block Block + parsedVersion, err := c.Unmarshal(bytes, &block) if err != nil { - parsedVersion, err = banffCodec.Unmarshal(bytes, &block) - requireBanff = true - } - if err != nil { - return nil, false, err + return nil, err } if parsedVersion != codecVersion { - return nil, false, fmt.Errorf("expected codec version %d but got %d", codecVersion, parsedVersion) + return nil, fmt.Errorf("expected codec version %d but got %d", codecVersion, parsedVersion) } - return block, requireBanff, block.initialize(bytes) + return block, block.initialize(bytes) } func ParseHeader(bytes []byte) (Header, error) { header := statelessHeader{} - parsedVersion, err := banffCodec.Unmarshal(bytes, &header) + parsedVersion, err := c.Unmarshal(bytes, &header) if err != nil { return nil, err } diff --git a/avalanchego/vms/proposervm/block/parse_test.go b/avalanchego/vms/proposervm/block/parse_test.go index 40e4e72c..f7499601 100644 --- a/avalanchego/vms/proposervm/block/parse_test.go +++ b/avalanchego/vms/proposervm/block/parse_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package block @@ -30,7 +30,7 @@ func TestParse(t *testing.T) { cert := tlsCert.Leaf key := tlsCert.PrivateKey.(crypto.Signer) - builtBlock, err := BuildApricot( + builtBlock, err := Build( parentID, timestamp, pChainHeight, @@ -43,9 +43,8 @@ func TestParse(t *testing.T) { builtBlockBytes := builtBlock.Bytes() - parsedBlockIntf, requireBanff, err := Parse(builtBlockBytes) + parsedBlockIntf, err := Parse(builtBlockBytes) require.NoError(err) - require.False(requireBanff) parsedBlock, ok := parsedBlockIntf.(SignedBlock) require.True(ok) @@ -60,7 +59,7 @@ func TestParseDuplicateExtension(t *testing.T) { blockBytes, err := hex.DecodeString(blockHex) require.NoError(err) - _, _, err = Parse(blockBytes) + _, err = Parse(blockBytes) require.Error(err) // Do not check for errDuplicateExtension to support g1.19 } @@ -97,9 +96,8 @@ func TestParseOption(t *testing.T) { builtOptionBytes := builtOption.Bytes() - parsedOption, requireBanff, err := Parse(builtOptionBytes) + parsedOption, err := Parse(builtOptionBytes) require.NoError(err) - require.False(requireBanff) equalOption(require, builtOption, parsedOption) } @@ -112,14 +110,13 @@ func TestParseUnsigned(t *testing.T) { pChainHeight := uint64(2) innerBlockBytes := []byte{3} - builtBlock, err := BuildUnsignedApricot(parentID, timestamp, pChainHeight, innerBlockBytes) + builtBlock, err := BuildUnsigned(parentID, timestamp, pChainHeight, innerBlockBytes) require.NoError(err) builtBlockBytes := builtBlock.Bytes() - parsedBlockIntf, requireBanff, err := Parse(builtBlockBytes) + parsedBlockIntf, err := Parse(builtBlockBytes) require.NoError(err) - require.False(requireBanff) parsedBlock, ok := parsedBlockIntf.(SignedBlock) require.True(ok) @@ -132,6 +129,6 @@ func TestParseGibberish(t *testing.T) { bytes := []byte{0, 1, 2, 3, 4, 5} - _, _, err := Parse(bytes) + _, err := Parse(bytes) require.Error(err) } diff --git a/avalanchego/vms/proposervm/block_server.go b/avalanchego/vms/proposervm/block_server.go index d45676d6..e9e2e192 100644 --- a/avalanchego/vms/proposervm/block_server.go +++ b/avalanchego/vms/proposervm/block_server.go @@ -1,23 +1,25 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm import ( + "context" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/vms/proposervm/indexer" ) -var _ indexer.BlockServer = &VM{} +var _ indexer.BlockServer = (*VM)(nil) // Note: this is a contention heavy call that should be avoided // for frequent/repeated indexer ops -func (vm *VM) GetFullPostForkBlock(blkID ids.ID) (snowman.Block, error) { +func (vm *VM) GetFullPostForkBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) { vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() - return vm.getPostForkBlock(blkID) + return vm.getPostForkBlock(ctx, blkID) } func (vm *VM) Commit() error { diff --git a/avalanchego/vms/proposervm/block_test.go b/avalanchego/vms/proposervm/block_test.go new file mode 100644 index 00000000..13da3c18 --- /dev/null +++ b/avalanchego/vms/proposervm/block_test.go @@ -0,0 +1,87 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package proposervm + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "testing" + "time" + + "github.com/golang/mock/gomock" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/proposervm/proposer" +) + +// Assert that when the underlying VM implements ChainVMWithBuildBlockContext +// and the proposervm is activated, we call the VM's BuildBlockWithContext +// method to build a block rather than BuildBlockWithContext. If the proposervm +// isn't activated, we should call BuildBlock rather than BuildBlockWithContext. +func TestPostForkCommonComponents_buildChild(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + pChainHeight := uint64(1337) + parentID := ids.GenerateTestID() + parentTimestamp := time.Now() + blkID := ids.GenerateTestID() + innerBlk := snowman.NewMockBlock(ctrl) + innerBlk.EXPECT().ID().Return(blkID).AnyTimes() + innerBlk.EXPECT().Height().Return(pChainHeight - 1).AnyTimes() + builtBlk := snowman.NewMockBlock(ctrl) + builtBlk.EXPECT().Bytes().Return([]byte{1, 2, 3}).AnyTimes() + builtBlk.EXPECT().ID().Return(ids.GenerateTestID()).AnyTimes() + builtBlk.EXPECT().Height().Return(pChainHeight).AnyTimes() + innerVM := mocks.NewMockChainVM(ctrl) + innerBlockBuilderVM := mocks.NewMockBuildBlockWithContextChainVM(ctrl) + innerBlockBuilderVM.EXPECT().BuildBlockWithContext(gomock.Any(), &block.Context{ + PChainHeight: pChainHeight - 1, + }).Return(builtBlk, nil).AnyTimes() + vdrState := validators.NewMockState(ctrl) + vdrState.EXPECT().GetMinimumHeight(context.Background()).Return(pChainHeight, nil).AnyTimes() + windower := proposer.NewMockWindower(ctrl) + windower.EXPECT().Delay(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(time.Duration(0), nil).AnyTimes() + + pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + require.NoError(err) + vm := &VM{ + ChainVM: innerVM, + blockBuilderVM: innerBlockBuilderVM, + ctx: &snow.Context{ + ValidatorState: vdrState, + Log: logging.NoLog{}, + }, + Windower: windower, + stakingCertLeaf: &x509.Certificate{}, + stakingLeafSigner: pk, + } + + blk := &postForkCommonComponents{ + innerBlk: innerBlk, + vm: vm, + } + + // Should call BuildBlockWithContext since proposervm is activated + gotChild, err := blk.buildChild( + context.Background(), + parentID, + parentTimestamp, + pChainHeight-1, + ) + require.NoError(err) + require.Equal(builtBlk, gotChild.(*postForkBlock).innerBlk) +} diff --git a/avalanchego/vms/proposervm/height_indexed_vm.go b/avalanchego/vms/proposervm/height_indexed_vm.go index f92c31a9..ff12456d 100644 --- a/avalanchego/vms/proposervm/height_indexed_vm.go +++ b/avalanchego/vms/proposervm/height_indexed_vm.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm import ( + "context" "fmt" "go.uber.org/zap" @@ -17,7 +18,7 @@ import ( // checkpoint if repairing is needed. // // vm.ctx.Lock should be held -func (vm *VM) shouldHeightIndexBeRepaired() (bool, error) { +func (vm *VM) shouldHeightIndexBeRepaired(ctx context.Context) (bool, error) { _, err := vm.State.GetCheckpoint() if err != database.ErrNotFound { return true, err @@ -33,7 +34,7 @@ func (vm *VM) shouldHeightIndexBeRepaired() (bool, error) { return false, err } - lastAcceptedBlk, err := vm.getPostForkBlock(latestProBlkID) + lastAcceptedBlk, err := vm.getPostForkBlock(ctx, latestProBlkID) if err != nil { // Could not retrieve last accepted block. return false, err @@ -51,7 +52,7 @@ func (vm *VM) shouldHeightIndexBeRepaired() (bool, error) { } // vm.ctx.Lock should be held -func (vm *VM) VerifyHeightIndex() error { +func (vm *VM) VerifyHeightIndex(context.Context) error { if vm.hVM == nil { return block.ErrHeightIndexedVMNotImplemented } @@ -63,7 +64,7 @@ func (vm *VM) VerifyHeightIndex() error { } // vm.ctx.Lock should be held -func (vm *VM) GetBlockIDAtHeight(height uint64) (ids.ID, error) { +func (vm *VM) GetBlockIDAtHeight(ctx context.Context, height uint64) (ids.ID, error) { if !vm.hIndexer.IsRepaired() { return ids.Empty, block.ErrIndexIncomplete } @@ -73,13 +74,13 @@ func (vm *VM) GetBlockIDAtHeight(height uint64) (ids.ID, error) { switch forkHeight, err := vm.State.GetForkHeight(); err { case nil: if height < forkHeight { - return vm.hVM.GetBlockIDAtHeight(height) + return vm.hVM.GetBlockIDAtHeight(ctx, height) } return vm.State.GetBlockIDAtHeight(height) case database.ErrNotFound: // fork not reached yet. Block must be pre-fork - return vm.hVM.GetBlockIDAtHeight(height) + return vm.hVM.GetBlockIDAtHeight(ctx, height) default: return ids.Empty, err @@ -89,10 +90,6 @@ func (vm *VM) GetBlockIDAtHeight(height uint64) (ids.ID, error) { // As postFork blocks/options are accepted, height index is updated even if its // repairing is ongoing. vm.ctx.Lock should be held func (vm *VM) updateHeightIndex(height uint64, blkID ids.ID) error { - if vm.resetHeightIndexOngoing.GetValue() { - return nil - } - _, err := vm.State.GetCheckpoint() switch err { case nil: diff --git a/avalanchego/vms/proposervm/indexer/block_server.go b/avalanchego/vms/proposervm/indexer/block_server.go index 7ae79923..e817b9ba 100644 --- a/avalanchego/vms/proposervm/indexer/block_server.go +++ b/avalanchego/vms/proposervm/indexer/block_server.go @@ -1,9 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer import ( + "context" + "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" @@ -16,5 +18,5 @@ type BlockServer interface { // Note: this is a contention heavy call that should be avoided // for frequent/repeated indexer ops - GetFullPostForkBlock(blkID ids.ID) (snowman.Block, error) + GetFullPostForkBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) } diff --git a/avalanchego/vms/proposervm/indexer/block_server_test.go b/avalanchego/vms/proposervm/indexer/block_server_test.go index caabac05..5bf74258 100644 --- a/avalanchego/vms/proposervm/indexer/block_server_test.go +++ b/avalanchego/vms/proposervm/indexer/block_server_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer import ( + "context" "errors" "testing" @@ -15,7 +16,7 @@ var ( errGetWrappingBlk = errors.New("unexpectedly called GetWrappingBlk") errCommit = errors.New("unexpectedly called Commit") - _ BlockServer = &TestBlockServer{} + _ BlockServer = (*TestBlockServer)(nil) ) // TestBatchedVM is a BatchedVM that is useful for testing. @@ -25,13 +26,13 @@ type TestBlockServer struct { CantGetFullPostForkBlock bool CantCommit bool - GetFullPostForkBlockF func(blkID ids.ID) (snowman.Block, error) + GetFullPostForkBlockF func(ctx context.Context, blkID ids.ID) (snowman.Block, error) CommitF func() error } -func (tsb *TestBlockServer) GetFullPostForkBlock(blkID ids.ID) (snowman.Block, error) { +func (tsb *TestBlockServer) GetFullPostForkBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) { if tsb.GetFullPostForkBlockF != nil { - return tsb.GetFullPostForkBlockF(blkID) + return tsb.GetFullPostForkBlockF(ctx, blkID) } if tsb.CantGetFullPostForkBlock && tsb.T != nil { tsb.T.Fatal(errGetWrappingBlk) diff --git a/avalanchego/vms/proposervm/indexer/height_indexer.go b/avalanchego/vms/proposervm/indexer/height_indexer.go index 27f4b587..833798c0 100644 --- a/avalanchego/vms/proposervm/indexer/height_indexer.go +++ b/avalanchego/vms/proposervm/indexer/height_indexer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer @@ -25,7 +25,7 @@ const ( sleepDurationMultiplier = 5 ) -var _ HeightIndexer = &heightIndexer{} +var _ HeightIndexer = (*heightIndexer)(nil) type HeightIndexer interface { // Returns whether the height index is fully repaired. @@ -63,18 +63,18 @@ type heightIndexer struct { server BlockServer log logging.Logger - jobDone utils.AtomicBool + jobDone utils.Atomic[bool] state state.State commitFrequency int } func (hi *heightIndexer) IsRepaired() bool { - return hi.jobDone.GetValue() + return hi.jobDone.Get() } func (hi *heightIndexer) MarkRepaired(repaired bool) { - hi.jobDone.SetValue(repaired) + hi.jobDone.Set(repaired) } // RepairHeightIndex ensures the height -> proBlkID height block index is well formed. @@ -95,7 +95,7 @@ func (hi *heightIndexer) RepairHeightIndex(ctx context.Context) error { // retrieve checkpoint height. We explicitly track block height // in doRepair to avoid heavier DB reads. - startBlk, err := hi.server.GetFullPostForkBlock(startBlkID) + startBlk, err := hi.server.GetFullPostForkBlock(ctx, startBlkID) if err != nil { return err } diff --git a/avalanchego/vms/proposervm/indexer/height_indexer_test.go b/avalanchego/vms/proposervm/indexer/height_indexer_test.go index ae144de0..3c1e671f 100644 --- a/avalanchego/vms/proposervm/indexer/height_indexer_test.go +++ b/avalanchego/vms/proposervm/indexer/height_indexer_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package indexer @@ -42,7 +42,7 @@ func TestHeightBlockIndexPostFork(t *testing.T) { dummyPCH := uint64(2022) // store postForkStatelessBlk in State ... - postForkStatelessBlk, err := block.BuildUnsignedApricot( + postForkStatelessBlk, err := block.BuildUnsigned( lastBlkID, dummyTS, dummyPCH, @@ -68,14 +68,16 @@ func TestHeightBlockIndexPostFork(t *testing.T) { CantGetFullPostForkBlock: true, CantCommit: true, - GetFullPostForkBlockF: func(blkID ids.ID) (snowman.Block, error) { + GetFullPostForkBlockF: func(_ context.Context, blkID ids.ID) (snowman.Block, error) { blk, found := proBlks[blkID] if !found { return nil, database.ErrNotFound } return blk, nil }, - CommitF: func() error { return nil }, + CommitF: func() error { + return nil + }, } hIndex := newHeightIndexer(blkSrv, @@ -120,7 +122,7 @@ func TestHeightBlockIndexAcrossFork(t *testing.T) { dummyPCH := uint64(2022) // store postForkStatelessBlk in State ... - postForkStatelessBlk, err := block.BuildUnsignedApricot( + postForkStatelessBlk, err := block.BuildUnsigned( lastBlkID, dummyTS, dummyPCH, @@ -146,14 +148,16 @@ func TestHeightBlockIndexAcrossFork(t *testing.T) { CantGetFullPostForkBlock: true, CantCommit: true, - GetFullPostForkBlockF: func(blkID ids.ID) (snowman.Block, error) { + GetFullPostForkBlockF: func(_ context.Context, blkID ids.ID) (snowman.Block, error) { blk, found := proBlks[blkID] if !found { return nil, database.ErrNotFound } return blk, nil }, - CommitF: func() error { return nil }, + CommitF: func() error { + return nil + }, } hIndex := newHeightIndexer(blkSrv, @@ -202,7 +206,7 @@ func TestHeightBlockIndexResumeFromCheckPoint(t *testing.T) { dummyPCH := uint64(2022) // store postForkStatelessBlk in State ... - postForkStatelessBlk, err := block.BuildUnsignedApricot( + postForkStatelessBlk, err := block.BuildUnsigned( lastBlkID, dummyTS, dummyPCH, @@ -228,14 +232,16 @@ func TestHeightBlockIndexResumeFromCheckPoint(t *testing.T) { CantGetFullPostForkBlock: true, CantCommit: true, - GetFullPostForkBlockF: func(blkID ids.ID) (snowman.Block, error) { + GetFullPostForkBlockF: func(_ context.Context, blkID ids.ID) (snowman.Block, error) { blk, found := proBlks[blkID] if !found { return nil, database.ErrNotFound } return blk, nil }, - CommitF: func() error { return nil }, + CommitF: func() error { + return nil + }, } hIndex := newHeightIndexer(blkSrv, diff --git a/avalanchego/vms/proposervm/mock_post_fork_block.go b/avalanchego/vms/proposervm/mock_post_fork_block.go new file mode 100644 index 00000000..6dfa398c --- /dev/null +++ b/avalanchego/vms/proposervm/mock_post_fork_block.go @@ -0,0 +1,321 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/vms/proposervm (interfaces: PostForkBlock) + +// Package proposervm is a generated GoMock package. +package proposervm + +import ( + context "context" + reflect "reflect" + time "time" + + ids "github.com/ava-labs/avalanchego/ids" + choices "github.com/ava-labs/avalanchego/snow/choices" + snowman "github.com/ava-labs/avalanchego/snow/consensus/snowman" + block "github.com/ava-labs/avalanchego/vms/proposervm/block" + gomock "github.com/golang/mock/gomock" +) + +// MockPostForkBlock is a mock of PostForkBlock interface. +type MockPostForkBlock struct { + ctrl *gomock.Controller + recorder *MockPostForkBlockMockRecorder +} + +// MockPostForkBlockMockRecorder is the mock recorder for MockPostForkBlock. +type MockPostForkBlockMockRecorder struct { + mock *MockPostForkBlock +} + +// NewMockPostForkBlock creates a new mock instance. +func NewMockPostForkBlock(ctrl *gomock.Controller) *MockPostForkBlock { + mock := &MockPostForkBlock{ctrl: ctrl} + mock.recorder = &MockPostForkBlockMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPostForkBlock) EXPECT() *MockPostForkBlockMockRecorder { + return m.recorder +} + +// Accept mocks base method. +func (m *MockPostForkBlock) Accept(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Accept", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Accept indicates an expected call of Accept. +func (mr *MockPostForkBlockMockRecorder) Accept(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Accept", reflect.TypeOf((*MockPostForkBlock)(nil).Accept), arg0) +} + +// Bytes mocks base method. +func (m *MockPostForkBlock) Bytes() []byte { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Bytes") + ret0, _ := ret[0].([]byte) + return ret0 +} + +// Bytes indicates an expected call of Bytes. +func (mr *MockPostForkBlockMockRecorder) Bytes() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Bytes", reflect.TypeOf((*MockPostForkBlock)(nil).Bytes)) +} + +// Height mocks base method. +func (m *MockPostForkBlock) Height() uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Height") + ret0, _ := ret[0].(uint64) + return ret0 +} + +// Height indicates an expected call of Height. +func (mr *MockPostForkBlockMockRecorder) Height() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Height", reflect.TypeOf((*MockPostForkBlock)(nil).Height)) +} + +// ID mocks base method. +func (m *MockPostForkBlock) ID() ids.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ID") + ret0, _ := ret[0].(ids.ID) + return ret0 +} + +// ID indicates an expected call of ID. +func (mr *MockPostForkBlockMockRecorder) ID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ID", reflect.TypeOf((*MockPostForkBlock)(nil).ID)) +} + +// Parent mocks base method. +func (m *MockPostForkBlock) Parent() ids.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Parent") + ret0, _ := ret[0].(ids.ID) + return ret0 +} + +// Parent indicates an expected call of Parent. +func (mr *MockPostForkBlockMockRecorder) Parent() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Parent", reflect.TypeOf((*MockPostForkBlock)(nil).Parent)) +} + +// Reject mocks base method. +func (m *MockPostForkBlock) Reject(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Reject", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Reject indicates an expected call of Reject. +func (mr *MockPostForkBlockMockRecorder) Reject(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reject", reflect.TypeOf((*MockPostForkBlock)(nil).Reject), arg0) +} + +// Status mocks base method. +func (m *MockPostForkBlock) Status() choices.Status { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Status") + ret0, _ := ret[0].(choices.Status) + return ret0 +} + +// Status indicates an expected call of Status. +func (mr *MockPostForkBlockMockRecorder) Status() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Status", reflect.TypeOf((*MockPostForkBlock)(nil).Status)) +} + +// Timestamp mocks base method. +func (m *MockPostForkBlock) Timestamp() time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Timestamp") + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// Timestamp indicates an expected call of Timestamp. +func (mr *MockPostForkBlockMockRecorder) Timestamp() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Timestamp", reflect.TypeOf((*MockPostForkBlock)(nil).Timestamp)) +} + +// Verify mocks base method. +func (m *MockPostForkBlock) Verify(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Verify", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Verify indicates an expected call of Verify. +func (mr *MockPostForkBlockMockRecorder) Verify(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Verify", reflect.TypeOf((*MockPostForkBlock)(nil).Verify), arg0) +} + +// acceptInnerBlk mocks base method. +func (m *MockPostForkBlock) acceptInnerBlk(arg0 context.Context) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "acceptInnerBlk", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// acceptInnerBlk indicates an expected call of acceptInnerBlk. +func (mr *MockPostForkBlockMockRecorder) acceptInnerBlk(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "acceptInnerBlk", reflect.TypeOf((*MockPostForkBlock)(nil).acceptInnerBlk), arg0) +} + +// acceptOuterBlk mocks base method. +func (m *MockPostForkBlock) acceptOuterBlk() error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "acceptOuterBlk") + ret0, _ := ret[0].(error) + return ret0 +} + +// acceptOuterBlk indicates an expected call of acceptOuterBlk. +func (mr *MockPostForkBlockMockRecorder) acceptOuterBlk() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "acceptOuterBlk", reflect.TypeOf((*MockPostForkBlock)(nil).acceptOuterBlk)) +} + +// buildChild mocks base method. +func (m *MockPostForkBlock) buildChild(arg0 context.Context) (Block, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "buildChild", arg0) + ret0, _ := ret[0].(Block) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// buildChild indicates an expected call of buildChild. +func (mr *MockPostForkBlockMockRecorder) buildChild(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "buildChild", reflect.TypeOf((*MockPostForkBlock)(nil).buildChild), arg0) +} + +// getInnerBlk mocks base method. +func (m *MockPostForkBlock) getInnerBlk() snowman.Block { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getInnerBlk") + ret0, _ := ret[0].(snowman.Block) + return ret0 +} + +// getInnerBlk indicates an expected call of getInnerBlk. +func (mr *MockPostForkBlockMockRecorder) getInnerBlk() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getInnerBlk", reflect.TypeOf((*MockPostForkBlock)(nil).getInnerBlk)) +} + +// getStatelessBlk mocks base method. +func (m *MockPostForkBlock) getStatelessBlk() block.Block { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "getStatelessBlk") + ret0, _ := ret[0].(block.Block) + return ret0 +} + +// getStatelessBlk indicates an expected call of getStatelessBlk. +func (mr *MockPostForkBlockMockRecorder) getStatelessBlk() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "getStatelessBlk", reflect.TypeOf((*MockPostForkBlock)(nil).getStatelessBlk)) +} + +// pChainHeight mocks base method. +func (m *MockPostForkBlock) pChainHeight(arg0 context.Context) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "pChainHeight", arg0) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// pChainHeight indicates an expected call of pChainHeight. +func (mr *MockPostForkBlockMockRecorder) pChainHeight(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "pChainHeight", reflect.TypeOf((*MockPostForkBlock)(nil).pChainHeight), arg0) +} + +// setInnerBlk mocks base method. +func (m *MockPostForkBlock) setInnerBlk(arg0 snowman.Block) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "setInnerBlk", arg0) +} + +// setInnerBlk indicates an expected call of setInnerBlk. +func (mr *MockPostForkBlockMockRecorder) setInnerBlk(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "setInnerBlk", reflect.TypeOf((*MockPostForkBlock)(nil).setInnerBlk), arg0) +} + +// setStatus mocks base method. +func (m *MockPostForkBlock) setStatus(arg0 choices.Status) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "setStatus", arg0) +} + +// setStatus indicates an expected call of setStatus. +func (mr *MockPostForkBlockMockRecorder) setStatus(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "setStatus", reflect.TypeOf((*MockPostForkBlock)(nil).setStatus), arg0) +} + +// verifyPostForkChild mocks base method. +func (m *MockPostForkBlock) verifyPostForkChild(arg0 context.Context, arg1 *postForkBlock) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "verifyPostForkChild", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// verifyPostForkChild indicates an expected call of verifyPostForkChild. +func (mr *MockPostForkBlockMockRecorder) verifyPostForkChild(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "verifyPostForkChild", reflect.TypeOf((*MockPostForkBlock)(nil).verifyPostForkChild), arg0, arg1) +} + +// verifyPostForkOption mocks base method. +func (m *MockPostForkBlock) verifyPostForkOption(arg0 context.Context, arg1 *postForkOption) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "verifyPostForkOption", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// verifyPostForkOption indicates an expected call of verifyPostForkOption. +func (mr *MockPostForkBlockMockRecorder) verifyPostForkOption(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "verifyPostForkOption", reflect.TypeOf((*MockPostForkBlock)(nil).verifyPostForkOption), arg0, arg1) +} + +// verifyPreForkChild mocks base method. +func (m *MockPostForkBlock) verifyPreForkChild(arg0 context.Context, arg1 *preForkBlock) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "verifyPreForkChild", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// verifyPreForkChild indicates an expected call of verifyPreForkChild. +func (mr *MockPostForkBlockMockRecorder) verifyPreForkChild(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "verifyPreForkChild", reflect.TypeOf((*MockPostForkBlock)(nil).verifyPreForkChild), arg0, arg1) +} diff --git a/avalanchego/vms/proposervm/post_fork_block.go b/avalanchego/vms/proposervm/post_fork_block.go index 2705fc7d..69e59aae 100644 --- a/avalanchego/vms/proposervm/post_fork_block.go +++ b/avalanchego/vms/proposervm/post_fork_block.go @@ -1,16 +1,18 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm import ( + "context" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/vms/proposervm/block" ) -var _ PostForkBlock = &postForkBlock{} +var _ PostForkBlock = (*postForkBlock)(nil) type postForkBlock struct { block.SignedBlock @@ -21,11 +23,11 @@ type postForkBlock struct { // 1) Sets this blocks status to Accepted. // 2) Persists this block in storage // 3) Calls Reject() on siblings of this block and their descendants. -func (b *postForkBlock) Accept() error { +func (b *postForkBlock) Accept(ctx context.Context) error { if err := b.acceptOuterBlk(); err != nil { return err } - return b.acceptInnerBlk() + return b.acceptInnerBlk(ctx) } func (b *postForkBlock) acceptOuterBlk() error { @@ -44,13 +46,13 @@ func (b *postForkBlock) acceptOuterBlk() error { return b.vm.storePostForkBlock(b) } -func (b *postForkBlock) acceptInnerBlk() error { +func (b *postForkBlock) acceptInnerBlk(ctx context.Context) error { // mark the inner block as accepted and all conflicting inner blocks as // rejected - return b.vm.Tree.Accept(b.innerBlk) + return b.vm.Tree.Accept(ctx, b.innerBlk) } -func (b *postForkBlock) Reject() error { +func (b *postForkBlock) Reject(context.Context) error { // We do not reject the inner block here because it may be accepted later delete(b.vm.verifiedBlocks, b.ID()) b.status = choices.Rejected @@ -72,16 +74,16 @@ func (b *postForkBlock) Parent() ids.ID { // If Verify() returns nil, Accept() or Reject() will eventually be called on // [b] and [b.innerBlk] -func (b *postForkBlock) Verify() error { - parent, err := b.vm.getBlock(b.ParentID()) +func (b *postForkBlock) Verify(ctx context.Context) error { + parent, err := b.vm.getBlock(ctx, b.ParentID()) if err != nil { return err } - return parent.verifyPostForkChild(b) + return parent.verifyPostForkChild(ctx, b) } // Return the two options for the block that follows [b] -func (b *postForkBlock) Options() ([2]snowman.Block, error) { +func (b *postForkBlock) Options(ctx context.Context) ([2]snowman.Block, error) { innerOracleBlk, ok := b.innerBlk.(snowman.OracleBlock) if !ok { // [b]'s innerBlk isn't an oracle block @@ -89,7 +91,7 @@ func (b *postForkBlock) Options() ([2]snowman.Block, error) { } // The inner block's child options - innerOptions, err := innerOracleBlk.Options() + innerOptions, err := innerOracleBlk.Options(ctx) if err != nil { return [2]snowman.Block{}, err } @@ -119,22 +121,23 @@ func (b *postForkBlock) Options() ([2]snowman.Block, error) { } // A post-fork block can never have a pre-fork child -func (b *postForkBlock) verifyPreForkChild(child *preForkBlock) error { +func (*postForkBlock) verifyPreForkChild(context.Context, *preForkBlock) error { return errUnsignedChild } -func (b *postForkBlock) verifyPostForkChild(child *postForkBlock) error { +func (b *postForkBlock) verifyPostForkChild(ctx context.Context, child *postForkBlock) error { parentTimestamp := b.Timestamp() parentPChainHeight := b.PChainHeight() return b.postForkCommonComponents.Verify( + ctx, parentTimestamp, parentPChainHeight, child, ) } -func (b *postForkBlock) verifyPostForkOption(child *postForkOption) error { - if err := verifyIsOracleBlock(b.innerBlk); err != nil { +func (b *postForkBlock) verifyPostForkOption(ctx context.Context, child *postForkOption) error { + if err := verifyIsOracleBlock(ctx, b.innerBlk); err != nil { return err } @@ -145,19 +148,20 @@ func (b *postForkBlock) verifyPostForkOption(child *postForkOption) error { return errInnerParentMismatch } - return child.vm.verifyAndRecordInnerBlk(child) + return child.vm.verifyAndRecordInnerBlk(ctx, nil, child) } // Return the child (a *postForkBlock) of this block -func (b *postForkBlock) buildChild() (Block, error) { +func (b *postForkBlock) buildChild(ctx context.Context) (Block, error) { return b.postForkCommonComponents.buildChild( + ctx, b.ID(), b.Timestamp(), b.PChainHeight(), ) } -func (b *postForkBlock) pChainHeight() (uint64, error) { +func (b *postForkBlock) pChainHeight(context.Context) (uint64, error) { return b.PChainHeight(), nil } diff --git a/avalanchego/vms/proposervm/post_fork_block_test.go b/avalanchego/vms/proposervm/post_fork_block_test.go index 1091803b..f4912172 100644 --- a/avalanchego/vms/proposervm/post_fork_block_test.go +++ b/avalanchego/vms/proposervm/post_fork_block_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm import ( "bytes" + "context" "errors" "testing" "time" @@ -17,6 +18,8 @@ import ( "github.com/ava-labs/avalanchego/vms/proposervm/proposer" ) +var errDuplicateVerify = errors.New("duplicate verify") + // ProposerBlock Option interface tests section func TestOracle_PostForkBlock_ImplementsInterface(t *testing.T) { // setup @@ -27,7 +30,7 @@ func TestOracle_PostForkBlock_ImplementsInterface(t *testing.T) { } // test - _, err := proBlk.Options() + _, err := proBlk.Options(context.Background()) if err != snowman.ErrNotOracle { t.Fatal("Proposer block should signal that it wraps a block not implementing Options interface with ErrNotOracleBlock error") } @@ -57,14 +60,14 @@ func TestOracle_PostForkBlock_ImplementsInterface(t *testing.T) { }, } - slb, err := block.BuildApricot( + slb, err := block.Build( ids.Empty, // refer unknown parent time.Time{}, 0, // pChainHeight, - proVM.ctx.StakingCertLeaf, + proVM.stakingCertLeaf, innerOracleBlk.Bytes(), proVM.ctx.ChainID, - proVM.ctx.StakingLeafSigner, + proVM.stakingLeafSigner, ) if err != nil { t.Fatal("could not build stateless block") @@ -79,7 +82,7 @@ func TestOracle_PostForkBlock_ImplementsInterface(t *testing.T) { } // test - _, err = proBlk.Options() + _, err = proBlk.Options(context.Background()) if err != nil { t.Fatal("Proposer block should forward wrapped block options if this implements Option interface") } @@ -89,7 +92,9 @@ func TestOracle_PostForkBlock_ImplementsInterface(t *testing.T) { func TestBlockVerify_PostForkBlock_ParentChecks(t *testing.T) { coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks pChainHeight := uint64(100) - valState.GetCurrentHeightF = func() (uint64, error) { return pChainHeight, nil } + valState.GetCurrentHeightF = func(context.Context) (uint64, error) { + return pChainHeight, nil + } // create parent block ... prntCoreBlk := &snowman.TestBlock{ @@ -101,8 +106,10 @@ func TestBlockVerify_PostForkBlock_ParentChecks(t *testing.T) { ParentV: coreGenBlk.ID(), TimestampV: coreGenBlk.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return prntCoreBlk, nil } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return prntCoreBlk, nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -112,7 +119,7 @@ func TestBlockVerify_PostForkBlock_ParentChecks(t *testing.T) { return nil, database.ErrNotFound } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -124,15 +131,15 @@ func TestBlockVerify_PostForkBlock_ParentChecks(t *testing.T) { } proVM.Set(proVM.Time().Add(proposer.MaxDelay)) - prntProBlk, err := proVM.BuildBlock() + prntProBlk, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatalf("Could not build proposer block: %s", err) } - if err := prntProBlk.Verify(); err != nil { + if err := prntProBlk.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := proVM.SetPreference(prntProBlk.ID()); err != nil { + if err := proVM.SetPreference(context.Background(), prntProBlk.ID()); err != nil { t.Fatal(err) } @@ -142,14 +149,14 @@ func TestBlockVerify_PostForkBlock_ParentChecks(t *testing.T) { BytesV: []byte{2}, TimestampV: prntCoreBlk.Timestamp(), } - childSlb, err := block.BuildApricot( + childSlb, err := block.Build( ids.Empty, // refer unknown parent childCoreBlk.Timestamp(), pChainHeight, - proVM.ctx.StakingCertLeaf, + proVM.stakingCertLeaf, childCoreBlk.Bytes(), proVM.ctx.ChainID, - proVM.ctx.StakingLeafSigner, + proVM.stakingLeafSigner, ) if err != nil { t.Fatal("could not build stateless block") @@ -164,13 +171,13 @@ func TestBlockVerify_PostForkBlock_ParentChecks(t *testing.T) { } // child block referring unknown parent does not verify - err = childProBlk.Verify() + err = childProBlk.Verify(context.Background()) if err == nil { t.Fatal("Block with unknown parent should not verify") } // child block referring known parent does verify - childSlb, err = block.BuildUnsignedApricot( + childSlb, err = block.BuildUnsigned( prntProBlk.ID(), // refer known parent prntProBlk.Timestamp().Add(proposer.MaxDelay), pChainHeight, @@ -185,7 +192,7 @@ func TestBlockVerify_PostForkBlock_ParentChecks(t *testing.T) { } proVM.Set(proVM.Time().Add(proposer.MaxDelay)) - if err := childProBlk.Verify(); err != nil { + if err := childProBlk.Verify(context.Background()); err != nil { t.Fatalf("Block with known parent should verify: %s", err) } } @@ -193,7 +200,9 @@ func TestBlockVerify_PostForkBlock_ParentChecks(t *testing.T) { func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks pChainHeight := uint64(100) - valState.GetCurrentHeightF = func() (uint64, error) { return pChainHeight, nil } + valState.GetCurrentHeightF = func(context.Context) (uint64, error) { + return pChainHeight, nil + } // create parent block ... prntCoreBlk := &snowman.TestBlock{ @@ -205,8 +214,10 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { ParentV: coreGenBlk.ID(), TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return prntCoreBlk, nil } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return prntCoreBlk, nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -216,7 +227,7 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { return nil, database.ErrNotFound } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -227,15 +238,15 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { } } - prntProBlk, err := proVM.BuildBlock() + prntProBlk, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("Could not build proposer block") } - if err := prntProBlk.Verify(); err != nil { + if err := prntProBlk.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := proVM.SetPreference(prntProBlk.ID()); err != nil { + if err := proVM.SetPreference(context.Background(), prntProBlk.ID()); err != nil { t.Fatal(err) } @@ -253,14 +264,14 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { // child block timestamp cannot be lower than parent timestamp childCoreBlk.TimestampV = prntTimestamp.Add(-1 * time.Second) proVM.Clock.Set(childCoreBlk.TimestampV) - childSlb, err := block.BuildApricot( + childSlb, err := block.Build( prntProBlk.ID(), childCoreBlk.Timestamp(), pChainHeight, - proVM.ctx.StakingCertLeaf, + proVM.stakingCertLeaf, childCoreBlk.Bytes(), proVM.ctx.ChainID, - proVM.ctx.StakingLeafSigner, + proVM.stakingLeafSigner, ) if err != nil { t.Fatal("could not build stateless block") @@ -274,83 +285,83 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { }, } - err = childProBlk.Verify() + err = childProBlk.Verify(context.Background()) if err == nil { t.Fatal("Proposer block timestamp too old should not verify") } // block cannot arrive before its creator window starts - blkWinDelay, err := proVM.Delay(childCoreBlk.Height(), pChainHeight, proVM.ctx.NodeID) + blkWinDelay, err := proVM.Delay(context.Background(), childCoreBlk.Height(), pChainHeight, proVM.ctx.NodeID) if err != nil { t.Fatal("Could not calculate submission window") } beforeWinStart := prntTimestamp.Add(blkWinDelay).Add(-1 * time.Second) proVM.Clock.Set(beforeWinStart) - childSlb, err = block.BuildApricot( + childSlb, err = block.Build( prntProBlk.ID(), beforeWinStart, pChainHeight, - proVM.ctx.StakingCertLeaf, + proVM.stakingCertLeaf, childCoreBlk.Bytes(), proVM.ctx.ChainID, - proVM.ctx.StakingLeafSigner, + proVM.stakingLeafSigner, ) if err != nil { t.Fatal("could not build stateless block") } childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(); err == nil { + if err := childProBlk.Verify(context.Background()); err == nil { t.Fatal("Proposer block timestamp before submission window should not verify") } // block can arrive at its creator window starts atWindowStart := prntTimestamp.Add(blkWinDelay) proVM.Clock.Set(atWindowStart) - childSlb, err = block.BuildApricot( + childSlb, err = block.Build( prntProBlk.ID(), atWindowStart, pChainHeight, - proVM.ctx.StakingCertLeaf, + proVM.stakingCertLeaf, childCoreBlk.Bytes(), proVM.ctx.ChainID, - proVM.ctx.StakingLeafSigner, + proVM.stakingLeafSigner, ) if err != nil { t.Fatal("could not build stateless block") } childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(); err != nil { + if err := childProBlk.Verify(context.Background()); err != nil { t.Fatalf("Proposer block timestamp at submission window start should verify") } // block can arrive after its creator window starts afterWindowStart := prntTimestamp.Add(blkWinDelay).Add(5 * time.Second) proVM.Clock.Set(afterWindowStart) - childSlb, err = block.BuildApricot( + childSlb, err = block.Build( prntProBlk.ID(), afterWindowStart, pChainHeight, - proVM.ctx.StakingCertLeaf, + proVM.stakingCertLeaf, childCoreBlk.Bytes(), proVM.ctx.ChainID, - proVM.ctx.StakingLeafSigner, + proVM.stakingLeafSigner, ) if err != nil { t.Fatal("could not build stateless block") } childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(); err != nil { + if err := childProBlk.Verify(context.Background()); err != nil { t.Fatal("Proposer block timestamp after submission window start should verify") } // block can arrive within submission window - AtSubWindowEnd := proVM.Time().Add(proposer.MaxDelay) - proVM.Clock.Set(AtSubWindowEnd) - childSlb, err = block.BuildUnsignedApricot( + atSubWindowEnd := proVM.Time().Add(proposer.MaxDelay) + proVM.Clock.Set(atSubWindowEnd) + childSlb, err = block.BuildUnsigned( prntProBlk.ID(), - AtSubWindowEnd, + atSubWindowEnd, pChainHeight, childCoreBlk.Bytes(), ) @@ -358,26 +369,26 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { t.Fatal("could not build stateless block") } childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(); err != nil { + if err := childProBlk.Verify(context.Background()); err != nil { t.Fatal("Proposer block timestamp within submission window should verify") } // block timestamp cannot be too much in the future afterSubWinEnd := proVM.Time().Add(maxSkew).Add(time.Second) - childSlb, err = block.BuildApricot( + childSlb, err = block.Build( prntProBlk.ID(), afterSubWinEnd, pChainHeight, - proVM.ctx.StakingCertLeaf, + proVM.stakingCertLeaf, childCoreBlk.Bytes(), proVM.ctx.ChainID, - proVM.ctx.StakingLeafSigner, + proVM.stakingLeafSigner, ) if err != nil { t.Fatal("could not build stateless block") } childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(); err == nil { + if err := childProBlk.Verify(context.Background()); err == nil { t.Fatal("Proposer block timestamp after submission window should not verify") } else if err == nil { t.Fatal("Proposer block timestamp after submission window should have different error") @@ -387,7 +398,9 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks pChainHeight := uint64(100) - valState.GetCurrentHeightF = func() (uint64, error) { return pChainHeight, nil } + valState.GetCurrentHeightF = func(context.Context) (uint64, error) { + return pChainHeight, nil + } // create parent block ... prntCoreBlk := &snowman.TestBlock{ @@ -399,8 +412,10 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { ParentV: coreGenBlk.ID(), TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return prntCoreBlk, nil } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return prntCoreBlk, nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -410,7 +425,7 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { return nil, database.ErrNotFound } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -421,15 +436,15 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { } } - prntProBlk, err := proVM.BuildBlock() + prntProBlk, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("Could not build proposer block") } - if err := prntProBlk.Verify(); err != nil { + if err := prntProBlk.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := proVM.SetPreference(prntProBlk.ID()); err != nil { + if err := proVM.SetPreference(context.Background(), prntProBlk.ID()); err != nil { t.Fatal(err) } @@ -446,14 +461,14 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { } // child P-Chain height must not precede parent P-Chain height - childSlb, err := block.BuildApricot( + childSlb, err := block.Build( prntProBlk.ID(), childCoreBlk.Timestamp(), prntBlkPChainHeight-1, - proVM.ctx.StakingCertLeaf, + proVM.stakingCertLeaf, childCoreBlk.Bytes(), proVM.ctx.ChainID, - proVM.ctx.StakingLeafSigner, + proVM.stakingLeafSigner, ) if err != nil { t.Fatal("could not build stateless block") @@ -467,14 +482,14 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { }, } - if err := childProBlk.Verify(); err == nil { + if err := childProBlk.Verify(context.Background()); err == nil { t.Fatal("ProBlock's P-Chain-Height cannot be lower than parent ProBlock's one") } else if err == nil { t.Fatal("Proposer block has wrong height should have different error") } // child P-Chain height can be equal to parent P-Chain height - childSlb, err = block.BuildUnsignedApricot( + childSlb, err = block.BuildUnsigned( prntProBlk.ID(), childCoreBlk.Timestamp(), prntBlkPChainHeight, @@ -486,13 +501,13 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { childProBlk.SignedBlock = childSlb proVM.Set(childCoreBlk.Timestamp()) - if err := childProBlk.Verify(); err != nil { + if err := childProBlk.Verify(context.Background()); err != nil { t.Fatalf("ProBlock's P-Chain-Height can be larger or equal than parent ProBlock's one: %s", err) } // child P-Chain height may follow parent P-Chain height pChainHeight = prntBlkPChainHeight * 2 // move ahead pChainHeight - childSlb, err = block.BuildUnsignedApricot( + childSlb, err = block.BuildUnsigned( prntProBlk.ID(), childCoreBlk.Timestamp(), prntBlkPChainHeight+1, @@ -502,13 +517,13 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { t.Fatal("could not build stateless block") } childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(); err != nil { + if err := childProBlk.Verify(context.Background()); err != nil { t.Fatal("ProBlock's P-Chain-Height can be larger or equal than parent ProBlock's one") } // block P-Chain height can be equal to current P-Chain height - currPChainHeight, _ := proVM.ctx.ValidatorState.GetCurrentHeight() - childSlb, err = block.BuildUnsignedApricot( + currPChainHeight, _ := proVM.ctx.ValidatorState.GetCurrentHeight(context.Background()) + childSlb, err = block.BuildUnsigned( prntProBlk.ID(), childCoreBlk.Timestamp(), currPChainHeight, @@ -518,12 +533,12 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { t.Fatal("could not build stateless block") } childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(); err != nil { + if err := childProBlk.Verify(context.Background()); err != nil { t.Fatal("ProBlock's P-Chain-Height can be equal to current p chain height") } // block P-Chain height cannot be at higher than current P-Chain height - childSlb, err = block.BuildUnsignedApricot( + childSlb, err = block.BuildUnsigned( prntProBlk.ID(), childCoreBlk.Timestamp(), currPChainHeight*2, @@ -533,7 +548,7 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { t.Fatal("could not build stateless block") } childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(); err != errPChainHeightNotReached { + if err := childProBlk.Verify(context.Background()); err != errPChainHeightNotReached { t.Fatal("ProBlock's P-Chain-Height cannot be larger than current p chain height") } } @@ -541,7 +556,9 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) { coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks pChainHeight := uint64(100) - valState.GetCurrentHeightF = func() (uint64, error) { return pChainHeight, nil } + valState.GetCurrentHeightF = func(context.Context) (uint64, error) { + return pChainHeight, nil + } // proVM.SetStartTime(timer.MaxTime) // switch off scheduler for current test // create post fork oracle block ... @@ -577,8 +594,10 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) }, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return oracleCoreBlk, nil } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return oracleCoreBlk, nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -592,7 +611,7 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) return nil, database.ErrNotFound } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -607,15 +626,15 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) } } - oracleBlk, err := proVM.BuildBlock() + oracleBlk, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("could not build post fork oracle block") } - if err := oracleBlk.Verify(); err != nil { + if err := oracleBlk.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := proVM.SetPreference(oracleBlk.ID()); err != nil { + if err := proVM.SetPreference(context.Background(), oracleBlk.ID()); err != nil { t.Fatal(err) } @@ -624,16 +643,16 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) if !ok { t.Fatal("expected post fork block") } - opts, err := postForkOracleBlk.Options() + opts, err := postForkOracleBlk.Options(context.Background()) if err != nil { t.Fatal("could not retrieve options from post fork oracle block") } parentBlk := opts[0] - if err := parentBlk.Verify(); err != nil { + if err := parentBlk.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := proVM.SetPreference(parentBlk.ID()); err != nil { + if err := proVM.SetPreference(context.Background(), parentBlk.ID()); err != nil { t.Fatal(err) } @@ -650,14 +669,14 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) } // child P-Chain height must not precede parent P-Chain height - childSlb, err := block.BuildApricot( + childSlb, err := block.Build( parentBlk.ID(), childCoreBlk.Timestamp(), prntBlkPChainHeight-1, - proVM.ctx.StakingCertLeaf, + proVM.stakingCertLeaf, childCoreBlk.Bytes(), proVM.ctx.ChainID, - proVM.ctx.StakingLeafSigner, + proVM.stakingLeafSigner, ) if err != nil { t.Fatal("could not build stateless block") @@ -671,12 +690,12 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) }, } - if err := childProBlk.Verify(); err == nil { + if err := childProBlk.Verify(context.Background()); err == nil { t.Fatal("ProBlock's P-Chain-Height cannot be lower than parent ProBlock's one") } // child P-Chain height can be equal to parent P-Chain height - childSlb, err = block.BuildUnsignedApricot( + childSlb, err = block.BuildUnsigned( parentBlk.ID(), childCoreBlk.Timestamp(), prntBlkPChainHeight, @@ -688,13 +707,13 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) childProBlk.SignedBlock = childSlb proVM.Set(childCoreBlk.Timestamp()) - if err := childProBlk.Verify(); err != nil { + if err := childProBlk.Verify(context.Background()); err != nil { t.Fatalf("ProBlock's P-Chain-Height can be larger or equal than parent ProBlock's one: %s", err) } // child P-Chain height may follow parent P-Chain height pChainHeight = prntBlkPChainHeight * 2 // move ahead pChainHeight - childSlb, err = block.BuildUnsignedApricot( + childSlb, err = block.BuildUnsigned( parentBlk.ID(), childCoreBlk.Timestamp(), prntBlkPChainHeight+1, @@ -704,13 +723,13 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) t.Fatal("could not build stateless block") } childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(); err != nil { + if err := childProBlk.Verify(context.Background()); err != nil { t.Fatal("ProBlock's P-Chain-Height can be larger or equal than parent ProBlock's one") } // block P-Chain height can be equal to current P-Chain height - currPChainHeight, _ := proVM.ctx.ValidatorState.GetCurrentHeight() - childSlb, err = block.BuildUnsignedApricot( + currPChainHeight, _ := proVM.ctx.ValidatorState.GetCurrentHeight(context.Background()) + childSlb, err = block.BuildUnsigned( parentBlk.ID(), childCoreBlk.Timestamp(), currPChainHeight, @@ -720,12 +739,12 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) t.Fatal("could not build stateless block") } childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(); err != nil { + if err := childProBlk.Verify(context.Background()); err != nil { t.Fatal("ProBlock's P-Chain-Height can be equal to current p chain height") } // block P-Chain height cannot be at higher than current P-Chain height - childSlb, err = block.BuildUnsignedApricot( + childSlb, err = block.BuildUnsigned( parentBlk.ID(), childCoreBlk.Timestamp(), currPChainHeight*2, @@ -735,7 +754,7 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) t.Fatal("could not build stateless block") } childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(); err != errPChainHeightNotReached { + if err := childProBlk.Verify(context.Background()); err != errPChainHeightNotReached { t.Fatal("ProBlock's P-Chain-Height cannot be larger than current p chain height") } } @@ -745,7 +764,9 @@ func TestBlockVerify_PostForkBlock_CoreBlockVerifyIsCalledOnce(t *testing.T) { // Show that other verify call would not call coreBlk.Verify() coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks pChainHeight := uint64(2000) - valState.GetCurrentHeightF = func() (uint64, error) { return pChainHeight, nil } + valState.GetCurrentHeightF = func(context.Context) (uint64, error) { + return pChainHeight, nil + } coreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -756,8 +777,10 @@ func TestBlockVerify_PostForkBlock_CoreBlockVerifyIsCalledOnce(t *testing.T) { ParentV: coreGenBlk.ID(), TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk, nil } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk, nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -767,7 +790,7 @@ func TestBlockVerify_PostForkBlock_CoreBlockVerifyIsCalledOnce(t *testing.T) { return nil, database.ErrNotFound } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -778,24 +801,24 @@ func TestBlockVerify_PostForkBlock_CoreBlockVerifyIsCalledOnce(t *testing.T) { } } - builtBlk, err := proVM.BuildBlock() + builtBlk, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("could not build block") } - if err := builtBlk.Verify(); err != nil { + if err := builtBlk.Verify(context.Background()); err != nil { t.Fatal(err) } // set error on coreBlock.Verify and recall Verify() - coreBlk.VerifyV = errors.New("core block verify should only be called once") - if err := builtBlk.Verify(); err != nil { + coreBlk.VerifyV = errDuplicateVerify + if err := builtBlk.Verify(context.Background()); err != nil { t.Fatal(err) } // rebuild a block with the same core block pChainHeight++ - if _, err := proVM.BuildBlock(); err != nil { + if _, err := proVM.BuildBlock(context.Background()); err != nil { t.Fatal("could not build block with same core block") } } @@ -805,7 +828,9 @@ func TestBlockAccept_PostForkBlock_SetsLastAcceptedBlock(t *testing.T) { // setup coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks pChainHeight := uint64(2000) - valState.GetCurrentHeightF = func() (uint64, error) { return pChainHeight, nil } + valState.GetCurrentHeightF = func(context.Context) (uint64, error) { + return pChainHeight, nil + } coreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -816,8 +841,10 @@ func TestBlockAccept_PostForkBlock_SetsLastAcceptedBlock(t *testing.T) { ParentV: coreGenBlk.ID(), TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk, nil } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk, nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -827,7 +854,7 @@ func TestBlockAccept_PostForkBlock_SetsLastAcceptedBlock(t *testing.T) { return nil, database.ErrNotFound } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -838,23 +865,23 @@ func TestBlockAccept_PostForkBlock_SetsLastAcceptedBlock(t *testing.T) { } } - builtBlk, err := proVM.BuildBlock() + builtBlk, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("proposerVM could not build block") } // test - if err := builtBlk.Accept(); err != nil { + if err := builtBlk.Accept(context.Background()); err != nil { t.Fatal("could not accept block") } - coreVM.LastAcceptedF = func() (ids.ID, error) { + coreVM.LastAcceptedF = func(context.Context) (ids.ID, error) { if coreBlk.Status() == choices.Accepted { return coreBlk.ID(), nil } return coreGenBlk.ID(), nil } - if acceptedID, err := proVM.LastAccepted(); err != nil { + if acceptedID, err := proVM.LastAccepted(context.Background()); err != nil { t.Fatal("could not retrieve last accepted block") } else if acceptedID != builtBlk.ID() { t.Fatal("unexpected last accepted ID") @@ -864,7 +891,9 @@ func TestBlockAccept_PostForkBlock_SetsLastAcceptedBlock(t *testing.T) { func TestBlockAccept_PostForkBlock_TwoProBlocksWithSameCoreBlock_OneIsAccepted(t *testing.T) { coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks var minimumHeight uint64 - valState.GetMinimumHeightF = func() (uint64, error) { return minimumHeight, nil } + valState.GetMinimumHeightF = func(context.Context) (uint64, error) { + return minimumHeight, nil + } // generate two blocks with the same core block and store them coreBlk := &snowman.TestBlock{ @@ -877,17 +906,19 @@ func TestBlockAccept_PostForkBlock_TwoProBlocksWithSameCoreBlock_OneIsAccepted(t HeightV: coreGenBlk.Height() + 1, TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk, nil } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk, nil + } minimumHeight = coreGenBlk.Height() - proBlk1, err := proVM.BuildBlock() + proBlk1, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("could not build proBlk1") } minimumHeight++ - proBlk2, err := proVM.BuildBlock() + proBlk2, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("could not build proBlk2") } @@ -896,14 +927,14 @@ func TestBlockAccept_PostForkBlock_TwoProBlocksWithSameCoreBlock_OneIsAccepted(t } // set proBlk1 as preferred - if err := proBlk1.Accept(); err != nil { + if err := proBlk1.Accept(context.Background()); err != nil { t.Fatal("could not accept proBlk1") } if coreBlk.Status() != choices.Accepted { t.Fatal("coreBlk should have been accepted") } - if acceptedID, err := proVM.LastAccepted(); err != nil { + if acceptedID, err := proVM.LastAccepted(context.Background()); err != nil { t.Fatal("could not retrieve last accepted block") } else if acceptedID != proBlk1.ID() { t.Fatal("unexpected last accepted ID") @@ -923,9 +954,11 @@ func TestBlockReject_PostForkBlock_InnerBlockIsNotRejected(t *testing.T) { HeightV: coreGenBlk.Height() + 1, TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk, nil } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk, nil + } - sb, err := proVM.BuildBlock() + sb, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("could not build block") } @@ -934,7 +967,7 @@ func TestBlockReject_PostForkBlock_InnerBlockIsNotRejected(t *testing.T) { t.Fatal("built block has not expected type") } - if err := proBlk.Reject(); err != nil { + if err := proBlk.Reject(context.Background()); err != nil { t.Fatal("could not reject block") } @@ -986,8 +1019,10 @@ func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { coreOpt1, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return oracleCoreBlk, nil } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return oracleCoreBlk, nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -1001,7 +1036,7 @@ func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { return nil, database.ErrNotFound } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -1016,15 +1051,15 @@ func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { } } - parentBlk, err := proVM.BuildBlock() + parentBlk, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("could not build post fork oracle block") } - if err := parentBlk.Verify(); err != nil { + if err := parentBlk.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := proVM.SetPreference(parentBlk.ID()); err != nil { + if err := proVM.SetPreference(context.Background(), parentBlk.ID()); err != nil { t.Fatal(err) } @@ -1033,7 +1068,7 @@ func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { if !ok { t.Fatal("expected post fork block") } - opts, err := postForkOracleBlk.Options() + opts, err := postForkOracleBlk.Options(context.Background()) if err != nil { t.Fatal("could not retrieve options from post fork oracle block") } @@ -1042,34 +1077,34 @@ func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { } // ... and verify them the first time - if err := opts[0].Verify(); err != nil { + if err := opts[0].Verify(context.Background()); err != nil { t.Fatal("option 0 should verify") } - if err := opts[1].Verify(); err != nil { + if err := opts[1].Verify(context.Background()); err != nil { t.Fatal("option 1 should verify") } // Build the child - statelessChild, err := block.BuildApricot( + statelessChild, err := block.Build( postForkOracleBlk.ID(), postForkOracleBlk.Timestamp().Add(proposer.WindowDuration), postForkOracleBlk.PChainHeight(), - proVM.ctx.StakingCertLeaf, + proVM.stakingCertLeaf, oracleCoreBlk.opts[0].Bytes(), proVM.ctx.ChainID, - proVM.ctx.StakingLeafSigner, + proVM.stakingLeafSigner, ) if err != nil { t.Fatal("failed to build new child block") } - invalidChild, err := proVM.ParseBlock(statelessChild.Bytes()) + invalidChild, err := proVM.ParseBlock(context.Background(), statelessChild.Bytes()) if err != nil { // A failure to parse is okay here return } - err = invalidChild.Verify() + err = invalidChild.Verify(context.Background()) if err == nil { t.Fatal("Should have failed to verify a child that was signed when it should be an oracle block") } @@ -1089,7 +1124,7 @@ func TestBlockVerify_PostForkBlock_PChainTooLow(t *testing.T) { TimestampV: coreGenBlk.Timestamp(), } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -1099,7 +1134,7 @@ func TestBlockVerify_PostForkBlock_PChainTooLow(t *testing.T) { return nil, database.ErrNotFound } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -1110,7 +1145,7 @@ func TestBlockVerify_PostForkBlock_PChainTooLow(t *testing.T) { } } - statelessChild, err := block.BuildUnsignedApricot( + statelessChild, err := block.BuildUnsigned( coreGenBlk.ID(), coreGenBlk.Timestamp(), 4, @@ -1120,13 +1155,13 @@ func TestBlockVerify_PostForkBlock_PChainTooLow(t *testing.T) { t.Fatal("failed to build new child block") } - invalidChild, err := proVM.ParseBlock(statelessChild.Bytes()) + invalidChild, err := proVM.ParseBlock(context.Background(), statelessChild.Bytes()) if err != nil { // A failure to parse is okay here return } - err = invalidChild.Verify() + err = invalidChild.Verify(context.Background()) if err == nil { t.Fatal("Should have failed to verify a child that was signed when it should be an oracle block") } diff --git a/avalanchego/vms/proposervm/post_fork_option.go b/avalanchego/vms/proposervm/post_fork_option.go index 8243afb8..e5745e04 100644 --- a/avalanchego/vms/proposervm/post_fork_option.go +++ b/avalanchego/vms/proposervm/post_fork_option.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm import ( + "context" "time" "github.com/ava-labs/avalanchego/ids" @@ -11,7 +12,7 @@ import ( "github.com/ava-labs/avalanchego/vms/proposervm/block" ) -var _ PostForkBlock = &postForkOption{} +var _ PostForkBlock = (*postForkOption)(nil) // The parent of a *postForkOption must be a *postForkBlock. type postForkOption struct { @@ -28,11 +29,11 @@ func (b *postForkOption) Timestamp() time.Time { return b.timestamp } -func (b *postForkOption) Accept() error { +func (b *postForkOption) Accept(ctx context.Context) error { if err := b.acceptOuterBlk(); err != nil { return err } - return b.acceptInnerBlk() + return b.acceptInnerBlk(ctx) } func (b *postForkOption) acceptOuterBlk() error { @@ -50,13 +51,13 @@ func (b *postForkOption) acceptOuterBlk() error { return b.vm.storePostForkBlock(b) } -func (b *postForkOption) acceptInnerBlk() error { +func (b *postForkOption) acceptInnerBlk(ctx context.Context) error { // mark the inner block as accepted and all conflicting inner blocks as // rejected - return b.vm.Tree.Accept(b.innerBlk) + return b.vm.Tree.Accept(ctx, b.innerBlk) } -func (b *postForkOption) Reject() error { +func (b *postForkOption) Reject(context.Context) error { // we do not reject the inner block here because that block may be contained // in the proposer block that causing this block to be rejected. @@ -78,44 +79,46 @@ func (b *postForkOption) Parent() ids.ID { // If Verify returns nil, Accept or Reject is eventually called on [b] and // [b.innerBlk]. -func (b *postForkOption) Verify() error { - parent, err := b.vm.getBlock(b.ParentID()) +func (b *postForkOption) Verify(ctx context.Context) error { + parent, err := b.vm.getBlock(ctx, b.ParentID()) if err != nil { return err } b.timestamp = parent.Timestamp() - return parent.verifyPostForkOption(b) + return parent.verifyPostForkOption(ctx, b) } -func (b *postForkOption) verifyPreForkChild(child *preForkBlock) error { +func (*postForkOption) verifyPreForkChild(context.Context, *preForkBlock) error { // A *preForkBlock's parent must be a *preForkBlock return errUnsignedChild } -func (b *postForkOption) verifyPostForkChild(child *postForkBlock) error { +func (b *postForkOption) verifyPostForkChild(ctx context.Context, child *postForkBlock) error { parentTimestamp := b.Timestamp() - parentPChainHeight, err := b.pChainHeight() + parentPChainHeight, err := b.pChainHeight(ctx) if err != nil { return err } return b.postForkCommonComponents.Verify( + ctx, parentTimestamp, parentPChainHeight, child, ) } -func (b *postForkOption) verifyPostForkOption(child *postForkOption) error { +func (*postForkOption) verifyPostForkOption(context.Context, *postForkOption) error { // A *postForkOption's parent can't be a *postForkOption return errUnexpectedBlockType } -func (b *postForkOption) buildChild() (Block, error) { - parentPChainHeight, err := b.pChainHeight() +func (b *postForkOption) buildChild(ctx context.Context) (Block, error) { + parentPChainHeight, err := b.pChainHeight(ctx) if err != nil { return nil, err } return b.postForkCommonComponents.buildChild( + ctx, b.ID(), b.Timestamp(), parentPChainHeight, @@ -123,12 +126,12 @@ func (b *postForkOption) buildChild() (Block, error) { } // This block's P-Chain height is its parent's P-Chain height -func (b *postForkOption) pChainHeight() (uint64, error) { - parent, err := b.vm.getBlock(b.ParentID()) +func (b *postForkOption) pChainHeight(ctx context.Context) (uint64, error) { + parent, err := b.vm.getBlock(ctx, b.ParentID()) if err != nil { return 0, err } - return parent.pChainHeight() + return parent.pChainHeight(ctx) } func (b *postForkOption) setStatus(status choices.Status) { diff --git a/avalanchego/vms/proposervm/post_fork_option_test.go b/avalanchego/vms/proposervm/post_fork_option_test.go index a54f896b..3f6c0ac6 100644 --- a/avalanchego/vms/proposervm/post_fork_option_test.go +++ b/avalanchego/vms/proposervm/post_fork_option_test.go @@ -1,11 +1,12 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm import ( "bytes" - "errors" + "context" + "crypto" "testing" "time" @@ -20,13 +21,15 @@ import ( "github.com/ava-labs/avalanchego/vms/proposervm/proposer" ) +var _ snowman.OracleBlock = (*TestOptionsBlock)(nil) + type TestOptionsBlock struct { snowman.TestBlock opts [2]snowman.Block optsErr error } -func (tob TestOptionsBlock) Options() ([2]snowman.Block, error) { +func (tob TestOptionsBlock) Options(context.Context) ([2]snowman.Block, error) { return tob.opts, tob.optsErr } @@ -68,8 +71,10 @@ func TestBlockVerify_PostForkOption_ParentChecks(t *testing.T) { }, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return oracleCoreBlk, nil } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return oracleCoreBlk, nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -83,7 +88,7 @@ func TestBlockVerify_PostForkOption_ParentChecks(t *testing.T) { return nil, database.ErrNotFound } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -98,15 +103,15 @@ func TestBlockVerify_PostForkOption_ParentChecks(t *testing.T) { } } - parentBlk, err := proVM.BuildBlock() + parentBlk, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("could not build post fork oracle block") } - if err := parentBlk.Verify(); err != nil { + if err := parentBlk.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := proVM.SetPreference(parentBlk.ID()); err != nil { + if err := proVM.SetPreference(context.Background(), parentBlk.ID()); err != nil { t.Fatal(err) } @@ -115,7 +120,7 @@ func TestBlockVerify_PostForkOption_ParentChecks(t *testing.T) { if !ok { t.Fatal("expected post fork block") } - opts, err := postForkOracleBlk.Options() + opts, err := postForkOracleBlk.Options(context.Background()) if err != nil { t.Fatal("could not retrieve options from post fork oracle block") } @@ -124,15 +129,15 @@ func TestBlockVerify_PostForkOption_ParentChecks(t *testing.T) { } // ... and verify them - if err := opts[0].Verify(); err != nil { + if err := opts[0].Verify(context.Background()); err != nil { t.Fatal("option 0 should verify") } - if err := opts[1].Verify(); err != nil { + if err := opts[1].Verify(context.Background()); err != nil { t.Fatal("option 1 should verify") } // show we can build on options - if err := proVM.SetPreference(opts[0].ID()); err != nil { + if err := proVM.SetPreference(context.Background(), opts[0].ID()); err != nil { t.Fatal("could not set preference") } @@ -145,17 +150,19 @@ func TestBlockVerify_PostForkOption_ParentChecks(t *testing.T) { BytesV: []byte{4}, TimestampV: oracleCoreBlk.opts[0].Timestamp().Add(proposer.MaxDelay), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return childCoreBlk, nil } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return childCoreBlk, nil + } proVM.Set(childCoreBlk.Timestamp()) - proChild, err := proVM.BuildBlock() + proChild, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("could not build on top of option") } if _, ok := proChild.(*postForkBlock); !ok { t.Fatal("unexpected block type") } - if err := proChild.Verify(); err != nil { + if err := proChild.Verify(context.Background()); err != nil { t.Fatal("block built on option does not verify") } } @@ -201,8 +208,10 @@ func TestBlockVerify_PostForkOption_CoreBlockVerifyIsCalledOnce(t *testing.T) { coreOpt1, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return oracleCoreBlk, nil } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return oracleCoreBlk, nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -216,7 +225,7 @@ func TestBlockVerify_PostForkOption_CoreBlockVerifyIsCalledOnce(t *testing.T) { return nil, database.ErrNotFound } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -231,15 +240,15 @@ func TestBlockVerify_PostForkOption_CoreBlockVerifyIsCalledOnce(t *testing.T) { } } - parentBlk, err := proVM.BuildBlock() + parentBlk, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("could not build post fork oracle block") } - if err := parentBlk.Verify(); err != nil { + if err := parentBlk.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := proVM.SetPreference(parentBlk.ID()); err != nil { + if err := proVM.SetPreference(context.Background(), parentBlk.ID()); err != nil { t.Fatal(err) } @@ -248,7 +257,7 @@ func TestBlockVerify_PostForkOption_CoreBlockVerifyIsCalledOnce(t *testing.T) { if !ok { t.Fatal("expected post fork block") } - opts, err := postForkOracleBlk.Options() + opts, err := postForkOracleBlk.Options(context.Background()) if err != nil { t.Fatal("could not retrieve options from post fork oracle block") } @@ -257,22 +266,22 @@ func TestBlockVerify_PostForkOption_CoreBlockVerifyIsCalledOnce(t *testing.T) { } // ... and verify them the first time - if err := opts[0].Verify(); err != nil { + if err := opts[0].Verify(context.Background()); err != nil { t.Fatal("option 0 should verify") } - if err := opts[1].Verify(); err != nil { + if err := opts[1].Verify(context.Background()); err != nil { t.Fatal("option 1 should verify") } // set error on coreBlock.Verify and recall Verify() - coreOpt0.VerifyV = errors.New("core block verify should only be called once") - coreOpt1.VerifyV = errors.New("core block verify should only be called once") + coreOpt0.VerifyV = errDuplicateVerify + coreOpt1.VerifyV = errDuplicateVerify // ... and verify them again. They verify without call to innerBlk - if err := opts[0].Verify(); err != nil { + if err := opts[0].Verify(context.Background()); err != nil { t.Fatal("option 0 should verify") } - if err := opts[1].Verify(); err != nil { + if err := opts[1].Verify(context.Background()); err != nil { t.Fatal("option 1 should verify") } } @@ -315,8 +324,10 @@ func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { }, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return oracleCoreBlk, nil } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return oracleCoreBlk, nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -330,7 +341,7 @@ func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { return nil, database.ErrNotFound } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -345,23 +356,23 @@ func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { } } - parentBlk, err := proVM.BuildBlock() + parentBlk, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("could not build post fork oracle block") } // accept oracle block - if err := parentBlk.Accept(); err != nil { + if err := parentBlk.Accept(context.Background()); err != nil { t.Fatal("could not accept block") } - coreVM.LastAcceptedF = func() (ids.ID, error) { + coreVM.LastAcceptedF = func(context.Context) (ids.ID, error) { if oracleCoreBlk.Status() == choices.Accepted { return oracleCoreBlk.ID(), nil } return coreGenBlk.ID(), nil } - if acceptedID, err := proVM.LastAccepted(); err != nil { + if acceptedID, err := proVM.LastAccepted(context.Background()); err != nil { t.Fatal("could not retrieve last accepted block") } else if acceptedID != parentBlk.ID() { t.Fatal("unexpected last accepted ID") @@ -372,22 +383,22 @@ func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { if !ok { t.Fatal("expected post fork block") } - opts, err := postForkOracleBlk.Options() + opts, err := postForkOracleBlk.Options(context.Background()) if err != nil { t.Fatal("could not retrieve options from post fork oracle block") } - if err := opts[0].Accept(); err != nil { + if err := opts[0].Accept(context.Background()); err != nil { t.Fatal("could not accept option") } - coreVM.LastAcceptedF = func() (ids.ID, error) { + coreVM.LastAcceptedF = func(context.Context) (ids.ID, error) { if oracleCoreBlk.opts[0].Status() == choices.Accepted { return oracleCoreBlk.opts[0].ID(), nil } return oracleCoreBlk.ID(), nil } - if acceptedID, err := proVM.LastAccepted(); err != nil { + if acceptedID, err := proVM.LastAccepted(context.Background()); err != nil { t.Fatal("could not retrieve last accepted block") } else if acceptedID != opts[0].ID() { t.Fatal("unexpected last accepted ID") @@ -433,8 +444,10 @@ func TestBlockReject_InnerBlockIsNotRejected(t *testing.T) { }, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return oracleCoreBlk, nil } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return oracleCoreBlk, nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -448,7 +461,7 @@ func TestBlockReject_InnerBlockIsNotRejected(t *testing.T) { return nil, database.ErrNotFound } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -463,13 +476,13 @@ func TestBlockReject_InnerBlockIsNotRejected(t *testing.T) { } } - builtBlk, err := proVM.BuildBlock() + builtBlk, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("could not build post fork oracle block") } // reject oracle block - if err := builtBlk.Reject(); err != nil { + if err := builtBlk.Reject(context.Background()); err != nil { t.Fatal("could not reject block") } proBlk, ok := builtBlk.(*postForkBlock) @@ -490,12 +503,12 @@ func TestBlockReject_InnerBlockIsNotRejected(t *testing.T) { if !ok { t.Fatal("expected post fork block") } - opts, err := postForkOracleBlk.Options() + opts, err := postForkOracleBlk.Options(context.Background()) if err != nil { t.Fatal("could not retrieve options from post fork oracle block") } - if err := opts[0].Reject(); err != nil { + if err := opts[0].Reject(context.Background()); err != nil { t.Fatal("could not accept option") } proOpt, ok := opts[0].(*postForkOption) @@ -541,8 +554,10 @@ func TestBlockVerify_PostForkOption_ParentIsNotOracleWithError(t *testing.T) { TimestampV: coreBlk.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk, nil } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk, nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -554,7 +569,7 @@ func TestBlockVerify_PostForkOption_ParentIsNotOracleWithError(t *testing.T) { return nil, database.ErrNotFound } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -567,7 +582,7 @@ func TestBlockVerify_PostForkOption_ParentIsNotOracleWithError(t *testing.T) { } } - parentBlk, err := proVM.BuildBlock() + parentBlk, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("could not build post fork oracle block") } @@ -576,7 +591,7 @@ func TestBlockVerify_PostForkOption_ParentIsNotOracleWithError(t *testing.T) { if !ok { t.Fatal("expected post fork block") } - _, err = postForkBlk.Options() + _, err = postForkBlk.Options(context.Background()) if err != snowman.ErrNotOracle { t.Fatal("should have reported that the block isn't an oracle block") } @@ -590,13 +605,13 @@ func TestBlockVerify_PostForkOption_ParentIsNotOracleWithError(t *testing.T) { t.Fatal("failed to build new child block") } - invalidChild, err := proVM.ParseBlock(statelessChild.Bytes()) + invalidChild, err := proVM.ParseBlock(context.Background(), statelessChild.Bytes()) if err != nil { // A failure to parse is okay here return } - err = invalidChild.Verify() + err = invalidChild.Verify(context.Background()) if err == nil { t.Fatal("Should have failed to verify a child that should have been signed") } @@ -638,7 +653,7 @@ func TestOptionTimestampValidity(t *testing.T) { }, }, } - statelessBlock, err := block.BuildUnsignedApricot( + statelessBlock, err := block.BuildUnsigned( coreGenBlk.ID(), coreGenBlk.Timestamp(), 0, @@ -648,7 +663,7 @@ func TestOptionTimestampValidity(t *testing.T) { t.Fatal(err) } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -662,7 +677,7 @@ func TestOptionTimestampValidity(t *testing.T) { return nil, errUnknownBlock } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -677,12 +692,12 @@ func TestOptionTimestampValidity(t *testing.T) { } } - statefulBlock, err := proVM.ParseBlock(statelessBlock.Bytes()) + statefulBlock, err := proVM.ParseBlock(context.Background(), statelessBlock.Bytes()) if err != nil { t.Fatal(err) } - if err := statefulBlock.Verify(); err != nil { + if err := statefulBlock.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -691,25 +706,25 @@ func TestOptionTimestampValidity(t *testing.T) { t.Fatal("should have reported as an oracle block") } - options, err := statefulOracleBlock.Options() + options, err := statefulOracleBlock.Options(context.Background()) if err != nil { t.Fatal(err) } option := options[0] - if err := option.Verify(); err != nil { + if err := option.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := statefulBlock.Accept(); err != nil { + if err := statefulBlock.Accept(context.Background()); err != nil { t.Fatal(err) } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.GetBlockF = func(context.Context, ids.ID) (snowman.Block, error) { t.Fatal("called GetBlock when unable to handle the error") return nil, nil } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(context.Context, []byte) (snowman.Block, error) { t.Fatal("called ParseBlock when unable to handle the error") return nil, nil } @@ -719,16 +734,24 @@ func TestOptionTimestampValidity(t *testing.T) { t.Fatalf("wrong time returned expected %s got %s", expectedTime, optionTime) } - if err := option.Accept(); err != nil { + if err := option.Accept(context.Background()); err != nil { t.Fatal(err) } // Restart the node. ctx := proVM.ctx - proVM = New(coreVM, time.Time{}, 0, time.Time{}) + proVM = New( + coreVM, + time.Time{}, + 0, + DefaultMinBlockDelay, + pTestCert.PrivateKey.(crypto.Signer), + pTestCert.Leaf, + ) coreVM.InitializeF = func( + context.Context, *snow.Context, manager.Manager, []byte, @@ -740,9 +763,11 @@ func TestOptionTimestampValidity(t *testing.T) { ) error { return nil } - coreVM.LastAcceptedF = func() (ids.ID, error) { return coreOracleBlk.opts[0].ID(), nil } + coreVM.LastAcceptedF = func(context.Context) (ids.ID, error) { + return coreOracleBlk.opts[0].ID(), nil + } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -756,7 +781,7 @@ func TestOptionTimestampValidity(t *testing.T) { return nil, errUnknownBlock } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -771,11 +796,22 @@ func TestOptionTimestampValidity(t *testing.T) { } } - if err := proVM.Initialize(ctx, db, nil, nil, nil, nil, nil, nil); err != nil { + err = proVM.Initialize( + context.Background(), + ctx, + db, + nil, + nil, + nil, + nil, + nil, + nil, + ) + if err != nil { t.Fatalf("failed to initialize proposerVM with %s", err) } - statefulOptionBlock, err := proVM.ParseBlock(option.Bytes()) + statefulOptionBlock, err := proVM.ParseBlock(context.Background(), option.Bytes()) if err != nil { t.Fatal(err) } @@ -784,11 +820,11 @@ func TestOptionTimestampValidity(t *testing.T) { t.Fatalf("wrong status returned expected %s got %s", choices.Accepted, status) } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.GetBlockF = func(context.Context, ids.ID) (snowman.Block, error) { t.Fatal("called GetBlock when unable to handle the error") return nil, nil } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(context.Context, []byte) (snowman.Block, error) { t.Fatal("called ParseBlock when unable to handle the error") return nil, nil } diff --git a/avalanchego/vms/proposervm/pre_fork_block.go b/avalanchego/vms/proposervm/pre_fork_block.go index 3fb5fdcf..7fcb8d70 100644 --- a/avalanchego/vms/proposervm/pre_fork_block.go +++ b/avalanchego/vms/proposervm/pre_fork_block.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm import ( + "context" "time" "go.uber.org/zap" @@ -14,43 +15,43 @@ import ( "github.com/ava-labs/avalanchego/vms/proposervm/block" ) -var _ Block = &preForkBlock{} +var _ Block = (*preForkBlock)(nil) type preForkBlock struct { snowman.Block vm *VM } -func (b *preForkBlock) Accept() error { +func (b *preForkBlock) Accept(ctx context.Context) error { if err := b.acceptOuterBlk(); err != nil { return err } - return b.acceptInnerBlk() + return b.acceptInnerBlk(ctx) } -func (b *preForkBlock) acceptOuterBlk() error { +func (*preForkBlock) acceptOuterBlk() error { return nil } -func (b *preForkBlock) acceptInnerBlk() error { - return b.Block.Accept() +func (b *preForkBlock) acceptInnerBlk(ctx context.Context) error { + return b.Block.Accept(ctx) } -func (b *preForkBlock) Verify() error { - parent, err := b.vm.getPreForkBlock(b.Block.Parent()) +func (b *preForkBlock) Verify(ctx context.Context) error { + parent, err := b.vm.getPreForkBlock(ctx, b.Block.Parent()) if err != nil { return err } - return parent.verifyPreForkChild(b) + return parent.verifyPreForkChild(ctx, b) } -func (b *preForkBlock) Options() ([2]snowman.Block, error) { +func (b *preForkBlock) Options(ctx context.Context) ([2]snowman.Block, error) { oracleBlk, ok := b.Block.(snowman.OracleBlock) if !ok { return [2]snowman.Block{}, snowman.ErrNotOracle } - options, err := oracleBlk.Options() + options, err := oracleBlk.Options(ctx) if err != nil { return [2]snowman.Block{}, err } @@ -71,10 +72,10 @@ func (b *preForkBlock) getInnerBlk() snowman.Block { return b.Block } -func (b *preForkBlock) verifyPreForkChild(child *preForkBlock) error { +func (b *preForkBlock) verifyPreForkChild(ctx context.Context, child *preForkBlock) error { parentTimestamp := b.Timestamp() if !parentTimestamp.Before(b.vm.activationTime) { - if err := verifyIsOracleBlock(b.Block); err != nil { + if err := verifyIsOracleBlock(ctx, b.Block); err != nil { return err } @@ -88,12 +89,12 @@ func (b *preForkBlock) verifyPreForkChild(child *preForkBlock) error { ) } - return child.Block.Verify() + return child.Block.Verify(ctx) } // This method only returns nil once (during the transition) -func (b *preForkBlock) verifyPostForkChild(child *postForkBlock) error { - if err := verifyIsNotOracleBlock(b.Block); err != nil { +func (b *preForkBlock) verifyPostForkChild(ctx context.Context, child *postForkBlock) error { + if err := verifyIsNotOracleBlock(ctx, b.Block); err != nil { return err } @@ -103,7 +104,7 @@ func (b *preForkBlock) verifyPostForkChild(child *postForkBlock) error { childID := child.ID() childPChainHeight := child.PChainHeight() - currentPChainHeight, err := b.vm.ctx.ValidatorState.GetCurrentHeight() + currentPChainHeight, err := b.vm.ctx.ValidatorState.GetCurrentHeight(ctx) if err != nil { b.vm.ctx.Log.Error("block verification failed", zap.String("reason", "failed to get current P-Chain height"), @@ -152,18 +153,18 @@ func (b *preForkBlock) verifyPostForkChild(child *postForkBlock) error { } // Verify the inner block and track it as verified - return b.vm.verifyAndRecordInnerBlk(child) + return b.vm.verifyAndRecordInnerBlk(ctx, nil, child) } -func (b *preForkBlock) verifyPostForkOption(child *postForkOption) error { +func (*preForkBlock) verifyPostForkOption(context.Context, *postForkOption) error { return errUnexpectedBlockType } -func (b *preForkBlock) buildChild() (Block, error) { +func (b *preForkBlock) buildChild(ctx context.Context) (Block, error) { parentTimestamp := b.Timestamp() if parentTimestamp.Before(b.vm.activationTime) { // The chain hasn't forked yet - innerBlock, err := b.vm.ChainVM.BuildBlock() + innerBlock, err := b.vm.ChainVM.BuildBlock(ctx) if err != nil { return nil, err } @@ -190,17 +191,17 @@ func (b *preForkBlock) buildChild() (Block, error) { // The child's P-Chain height is proposed as the optimal P-Chain height that // is at least the minimum height - pChainHeight, err := b.vm.optimalPChainHeight(b.vm.minimumPChainHeight) + pChainHeight, err := b.vm.optimalPChainHeight(ctx, b.vm.minimumPChainHeight) if err != nil { return nil, err } - innerBlock, err := b.vm.ChainVM.BuildBlock() + innerBlock, err := b.vm.ChainVM.BuildBlock(ctx) if err != nil { return nil, err } - statelessBlock, err := block.BuildUnsignedApricot( + statelessBlock, err := block.BuildUnsigned( parentID, newTimestamp, pChainHeight, @@ -228,7 +229,7 @@ func (b *preForkBlock) buildChild() (Block, error) { return blk, nil } -func (b *preForkBlock) pChainHeight() (uint64, error) { +func (*preForkBlock) pChainHeight(context.Context) (uint64, error) { return 0, nil } diff --git a/avalanchego/vms/proposervm/pre_fork_block_test.go b/avalanchego/vms/proposervm/pre_fork_block_test.go index f0610d4b..08df36b0 100644 --- a/avalanchego/vms/proposervm/pre_fork_block_test.go +++ b/avalanchego/vms/proposervm/pre_fork_block_test.go @@ -1,17 +1,26 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm import ( "bytes" + "context" "testing" "time" + "github.com/golang/mock/gomock" + + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/proposervm/block" "github.com/ava-labs/avalanchego/vms/proposervm/proposer" @@ -24,7 +33,7 @@ func TestOracle_PreForkBlkImplementsInterface(t *testing.T) { } // test - _, err := proBlk.Options() + _, err := proBlk.Options(context.Background()) if err != snowman.ErrNotOracle { t.Fatal("Proposer block should signal that it wraps a block not implementing Options interface with ErrNotOracleBlock error") } @@ -35,7 +44,7 @@ func TestOracle_PreForkBlkImplementsInterface(t *testing.T) { } // test - _, err = proBlk.Options() + _, err = proBlk.Options(context.Background()) if err != nil { t.Fatal("Proposer block should forward wrapped block options if this implements Option interface") } @@ -74,8 +83,10 @@ func TestOracle_PreForkBlkCanBuiltOnPreForkOption(t *testing.T) { }, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return oracleCoreBlk, nil } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return oracleCoreBlk, nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -90,7 +101,7 @@ func TestOracle_PreForkBlkCanBuiltOnPreForkOption(t *testing.T) { } } - parentBlk, err := proVM.BuildBlock() + parentBlk, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("could not build pre fork oracle block") } @@ -100,16 +111,16 @@ func TestOracle_PreForkBlkCanBuiltOnPreForkOption(t *testing.T) { if !ok { t.Fatal("expected pre fork block") } - opts, err := preForkOracleBlk.Options() + opts, err := preForkOracleBlk.Options(context.Background()) if err != nil { t.Fatal("could not retrieve options from pre fork oracle block") } - if err := opts[0].Verify(); err != nil { + if err := opts[0].Verify(context.Background()); err != nil { t.Fatal("option should verify") } // ... show a block can be built on top of an option - if err := proVM.SetPreference(opts[0].ID()); err != nil { + if err := proVM.SetPreference(context.Background(), opts[0].ID()); err != nil { t.Fatal("could not set preference") } @@ -123,9 +134,11 @@ func TestOracle_PreForkBlkCanBuiltOnPreForkOption(t *testing.T) { ParentV: oracleCoreBlk.opts[0].ID(), }, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return lastCoreBlk, nil } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return lastCoreBlk, nil + } - preForkChild, err := proVM.BuildBlock() + preForkChild, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("could not build pre fork block on pre fork option block") } @@ -173,8 +186,10 @@ func TestOracle_PostForkBlkCanBuiltOnPreForkOption(t *testing.T) { }, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return oracleCoreBlk, nil } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return oracleCoreBlk, nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -189,7 +204,7 @@ func TestOracle_PostForkBlkCanBuiltOnPreForkOption(t *testing.T) { } } - parentBlk, err := proVM.BuildBlock() + parentBlk, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("could not build pre fork oracle block") } @@ -199,16 +214,16 @@ func TestOracle_PostForkBlkCanBuiltOnPreForkOption(t *testing.T) { if !ok { t.Fatal("expected pre fork block") } - opts, err := preForkOracleBlk.Options() + opts, err := preForkOracleBlk.Options(context.Background()) if err != nil { t.Fatal("could not retrieve options from pre fork oracle block") } - if err := opts[0].Verify(); err != nil { + if err := opts[0].Verify(context.Background()); err != nil { t.Fatal("option should verify") } // ... show a block can be built on top of an option - if err := proVM.SetPreference(opts[0].ID()); err != nil { + if err := proVM.SetPreference(context.Background(), opts[0].ID()); err != nil { t.Fatal("could not set preference") } @@ -222,9 +237,11 @@ func TestOracle_PostForkBlkCanBuiltOnPreForkOption(t *testing.T) { ParentV: oracleCoreBlk.opts[0].ID(), }, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return lastCoreBlk, nil } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return lastCoreBlk, nil + } - postForkChild, err := proVM.BuildBlock() + postForkChild, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("could not build pre fork block on pre fork option block") } @@ -251,8 +268,10 @@ func TestBlockVerify_PreFork_ParentChecks(t *testing.T) { ParentV: coreGenBlk.ID(), TimestampV: coreGenBlk.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return prntCoreBlk, nil } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return prntCoreBlk, nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -262,7 +281,7 @@ func TestBlockVerify_PreFork_ParentChecks(t *testing.T) { return nil, database.ErrNotFound } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -274,7 +293,7 @@ func TestBlockVerify_PreFork_ParentChecks(t *testing.T) { } proVM.Set(proVM.Time().Add(proposer.MaxDelay)) - prntProBlk, err := proVM.BuildBlock() + prntProBlk, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("Could not build proposer block") } @@ -295,14 +314,14 @@ func TestBlockVerify_PreFork_ParentChecks(t *testing.T) { // child block referring unknown parent does not verify childCoreBlk.ParentV = ids.Empty - err = childProBlk.Verify() + err = childProBlk.Verify(context.Background()) if err == nil { t.Fatal("Block with unknown parent should not verify") } // child block referring known parent does verify childCoreBlk.ParentV = prntProBlk.ID() - if err := childProBlk.Verify(); err != nil { + if err := childProBlk.Verify(context.Background()); err != nil { t.Fatal("Block with known parent should verify") } } @@ -326,29 +345,31 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { TimestampV: preActivationTime, VerifyV: nil, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk, nil } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk, nil + } // preFork block verifies if parent is before fork activation time - preForkChild, err := proVM.BuildBlock() + preForkChild, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatalf("unexpectedly could not build block due to %s", err) } else if _, ok := preForkChild.(*preForkBlock); !ok { t.Fatal("expected preForkBlock") } - if err := preForkChild.Verify(); err != nil { + if err := preForkChild.Verify(context.Background()); err != nil { t.Fatal("pre Fork blocks should verify before fork") } // postFork block does NOT verify if parent is before fork activation time - postForkStatelessChild, err := block.BuildApricot( + postForkStatelessChild, err := block.Build( coreGenBlk.ID(), coreBlk.Timestamp(), 0, // pChainHeight - proVM.ctx.StakingCertLeaf, + proVM.stakingCertLeaf, coreBlk.Bytes(), proVM.ctx.ChainID, - proVM.ctx.StakingLeafSigner, + proVM.stakingLeafSigner, ) if err != nil { t.Fatalf("unexpectedly could not build block due to %s", err) @@ -365,7 +386,7 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { if !postForkChild.Timestamp().Before(activationTime) { t.Fatal("This test requires postForkChild to be before fork activation time") } - if err := postForkChild.Verify(); err == nil { + if err := postForkChild.Verify(context.Background()); err == nil { t.Fatal("post Fork blocks should NOT verify before fork") } @@ -373,8 +394,10 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { postActivationTime := activationTime.Add(time.Second) proVM.Set(postActivationTime) - coreVM.SetPreferenceF = func(id ids.ID) error { return nil } - if err := proVM.SetPreference(preForkChild.ID()); err != nil { + coreVM.SetPreferenceF = func(_ context.Context, id ids.ID) error { + return nil + } + if err := proVM.SetPreference(context.Background(), preForkChild.ID()); err != nil { t.Fatal("could not set preference") } @@ -387,8 +410,10 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { TimestampV: postActivationTime, VerifyV: nil, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return secondCoreBlk, nil } - coreVM.GetBlockF = func(id ids.ID) (snowman.Block, error) { + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return secondCoreBlk, nil + } + coreVM.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { switch id { case coreGenBlk.ID(): return coreGenBlk, nil @@ -400,18 +425,18 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { } } - lastPreForkBlk, err := proVM.BuildBlock() + lastPreForkBlk, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatalf("unexpectedly could not build block due to %s", err) } else if _, ok := lastPreForkBlk.(*preForkBlock); !ok { t.Fatal("expected preForkBlock") } - if err := lastPreForkBlk.Verify(); err != nil { + if err := lastPreForkBlk.Verify(context.Background()); err != nil { t.Fatal("pre Fork blocks should verify before fork") } - if err := proVM.SetPreference(lastPreForkBlk.ID()); err != nil { + if err := proVM.SetPreference(context.Background(), lastPreForkBlk.ID()); err != nil { t.Fatal("could not set preference") } thirdCoreBlk := &snowman.TestBlock{ @@ -423,8 +448,10 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { TimestampV: postActivationTime, VerifyV: nil, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return thirdCoreBlk, nil } - coreVM.GetBlockF = func(id ids.ID) (snowman.Block, error) { + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return thirdCoreBlk, nil + } + coreVM.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { switch id { case coreGenBlk.ID(): return coreGenBlk, nil @@ -438,14 +465,14 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { } } - firstPostForkBlk, err := proVM.BuildBlock() + firstPostForkBlk, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatalf("unexpectedly could not build block due to %s", err) } else if _, ok := firstPostForkBlk.(*postForkBlock); !ok { t.Fatal("expected preForkBlock") } - if err := firstPostForkBlk.Verify(); err != nil { + if err := firstPostForkBlk.Verify(context.Background()); err != nil { t.Fatal("pre Fork blocks should verify before fork") } } @@ -466,17 +493,19 @@ func TestBlockVerify_BlocksBuiltOnPostForkGenesis(t *testing.T) { TimestampV: coreGenBlk.Timestamp(), VerifyV: nil, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlock, nil } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlock, nil + } // postFork block verifies if parent is after fork activation time - postForkChild, err := proVM.BuildBlock() + postForkChild, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatalf("unexpectedly could not build block due to %s", err) } else if _, ok := postForkChild.(*postForkBlock); !ok { t.Fatal("expected postForkBlock") } - if err := postForkChild.Verify(); err != nil { + if err := postForkChild.Verify(context.Background()); err != nil { t.Fatal("post Fork blocks should verify after fork") } @@ -485,7 +514,7 @@ func TestBlockVerify_BlocksBuiltOnPostForkGenesis(t *testing.T) { Block: coreBlock, vm: proVM, } - if err := preForkChild.Verify(); err == nil { + if err := preForkChild.Verify(context.Background()); err == nil { t.Fatal("pre Fork blocks should NOT verify after fork") } } @@ -502,8 +531,10 @@ func TestBlockAccept_PreFork_SetsLastAcceptedBlock(t *testing.T) { BytesV: []byte{1}, ParentV: coreGenBlk.ID(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk, nil } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk, nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -513,7 +544,7 @@ func TestBlockAccept_PreFork_SetsLastAcceptedBlock(t *testing.T) { return nil, database.ErrNotFound } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -524,23 +555,23 @@ func TestBlockAccept_PreFork_SetsLastAcceptedBlock(t *testing.T) { } } - builtBlk, err := proVM.BuildBlock() + builtBlk, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("proposerVM could not build block") } // test - if err := builtBlk.Accept(); err != nil { + if err := builtBlk.Accept(context.Background()); err != nil { t.Fatal("could not accept block") } - coreVM.LastAcceptedF = func() (ids.ID, error) { + coreVM.LastAcceptedF = func(context.Context) (ids.ID, error) { if coreBlk.Status() == choices.Accepted { return coreBlk.ID(), nil } return coreGenBlk.ID(), nil } - if acceptedID, err := proVM.LastAccepted(); err != nil { + if acceptedID, err := proVM.LastAccepted(context.Background()); err != nil { t.Fatal("could not retrieve last accepted block") } else if acceptedID != builtBlk.ID() { t.Fatal("unexpected last accepted ID") @@ -559,9 +590,11 @@ func TestBlockReject_PreForkBlock_InnerBlockIsRejected(t *testing.T) { ParentV: coreGenBlk.ID(), HeightV: coreGenBlk.Height() + 1, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk, nil } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk, nil + } - sb, err := proVM.BuildBlock() + sb, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("could not build block") } @@ -570,7 +603,7 @@ func TestBlockReject_PreForkBlock_InnerBlockIsRejected(t *testing.T) { t.Fatal("built block has not expected type") } - if err := proBlk.Reject(); err != nil { + if err := proBlk.Reject(context.Background()); err != nil { t.Fatal("could not reject block") } @@ -625,7 +658,7 @@ func TestBlockVerify_ForkBlockIsOracleBlock(t *testing.T) { }, } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -639,7 +672,7 @@ func TestBlockVerify_ForkBlockIsOracleBlock(t *testing.T) { return nil, database.ErrNotFound } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -654,12 +687,12 @@ func TestBlockVerify_ForkBlockIsOracleBlock(t *testing.T) { } } - firstBlock, err := proVM.ParseBlock(coreBlk.Bytes()) + firstBlock, err := proVM.ParseBlock(context.Background(), coreBlk.Bytes()) if err != nil { t.Fatal(err) } - if err := firstBlock.Verify(); err != nil { + if err := firstBlock.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -668,16 +701,16 @@ func TestBlockVerify_ForkBlockIsOracleBlock(t *testing.T) { t.Fatal("should have returned an oracle block") } - options, err := oracleBlock.Options() + options, err := oracleBlock.Options(context.Background()) if err != nil { t.Fatal(err) } - if err := options[0].Verify(); err != nil { + if err := options[0].Verify(context.Background()); err != nil { t.Fatal(err) } - if err := options[1].Verify(); err != nil { + if err := options[1].Verify(context.Background()); err != nil { t.Fatal(err) } } @@ -724,7 +757,7 @@ func TestBlockVerify_ForkBlockIsOracleBlockButChildrenAreSigned(t *testing.T) { }, } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -738,7 +771,7 @@ func TestBlockVerify_ForkBlockIsOracleBlockButChildrenAreSigned(t *testing.T) { return nil, database.ErrNotFound } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -753,36 +786,85 @@ func TestBlockVerify_ForkBlockIsOracleBlockButChildrenAreSigned(t *testing.T) { } } - firstBlock, err := proVM.ParseBlock(coreBlk.Bytes()) + firstBlock, err := proVM.ParseBlock(context.Background(), coreBlk.Bytes()) if err != nil { t.Fatal(err) } - if err := firstBlock.Verify(); err != nil { + if err := firstBlock.Verify(context.Background()); err != nil { t.Fatal(err) } - slb, err := block.BuildApricot( + slb, err := block.Build( firstBlock.ID(), // refer unknown parent firstBlock.Timestamp(), 0, // pChainHeight, - proVM.ctx.StakingCertLeaf, + proVM.stakingCertLeaf, coreBlk.opts[0].Bytes(), proVM.ctx.ChainID, - proVM.ctx.StakingLeafSigner, + proVM.stakingLeafSigner, ) if err != nil { t.Fatal("could not build stateless block") } - invalidChild, err := proVM.ParseBlock(slb.Bytes()) + invalidChild, err := proVM.ParseBlock(context.Background(), slb.Bytes()) if err != nil { // A failure to parse is okay here return } - err = invalidChild.Verify() + err = invalidChild.Verify(context.Background()) if err == nil { t.Fatal("Should have failed to verify a child that was signed when it should be a pre fork block") } } + +// Assert that when the underlying VM implements ChainVMWithBuildBlockContext +// and the proposervm is activated, we only call the VM's BuildBlockWithContext +// when a P-chain height can be correctly provided from the parent block. +func TestPreForkBlock_BuildBlockWithContext(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + pChainHeight := uint64(1337) + blkID := ids.GenerateTestID() + innerBlk := snowman.NewMockBlock(ctrl) + innerBlk.EXPECT().ID().Return(blkID).AnyTimes() + innerBlk.EXPECT().Timestamp().Return(mockable.MaxTime) + builtBlk := snowman.NewMockBlock(ctrl) + builtBlk.EXPECT().Bytes().Return([]byte{1, 2, 3}).AnyTimes() + builtBlk.EXPECT().ID().Return(ids.GenerateTestID()).AnyTimes() + builtBlk.EXPECT().Height().Return(pChainHeight).AnyTimes() + innerVM := mocks.NewMockChainVM(ctrl) + innerVM.EXPECT().BuildBlock(gomock.Any()).Return(builtBlk, nil).AnyTimes() + vdrState := validators.NewMockState(ctrl) + vdrState.EXPECT().GetMinimumHeight(context.Background()).Return(pChainHeight, nil).AnyTimes() + + vm := &VM{ + ChainVM: innerVM, + ctx: &snow.Context{ + ValidatorState: vdrState, + Log: logging.NoLog{}, + }, + } + + blk := &preForkBlock{ + Block: innerBlk, + vm: vm, + } + + // Should call BuildBlock since proposervm won't have a P-chain height + gotChild, err := blk.buildChild(context.Background()) + require.NoError(err) + require.Equal(builtBlk, gotChild.(*postForkBlock).innerBlk) + + // Should call BuildBlock since proposervm is not activated + innerBlk.EXPECT().Timestamp().Return(time.Time{}) + vm.activationTime = mockable.MaxTime + + gotChild, err = blk.buildChild(context.Background()) + require.NoError(err) + require.Equal(builtBlk, gotChild.(*preForkBlock).Block) +} diff --git a/avalanchego/vms/proposervm/proposer/mock_windower.go b/avalanchego/vms/proposervm/proposer/mock_windower.go new file mode 100644 index 00000000..0d6c9b45 --- /dev/null +++ b/avalanchego/vms/proposervm/proposer/mock_windower.go @@ -0,0 +1,70 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/vms/proposervm/proposer (interfaces: Windower) + +// Package proposer is a generated GoMock package. +package proposer + +import ( + context "context" + reflect "reflect" + time "time" + + ids "github.com/ava-labs/avalanchego/ids" + gomock "github.com/golang/mock/gomock" +) + +// MockWindower is a mock of Windower interface. +type MockWindower struct { + ctrl *gomock.Controller + recorder *MockWindowerMockRecorder +} + +// MockWindowerMockRecorder is the mock recorder for MockWindower. +type MockWindowerMockRecorder struct { + mock *MockWindower +} + +// NewMockWindower creates a new mock instance. +func NewMockWindower(ctrl *gomock.Controller) *MockWindower { + mock := &MockWindower{ctrl: ctrl} + mock.recorder = &MockWindowerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockWindower) EXPECT() *MockWindowerMockRecorder { + return m.recorder +} + +// Delay mocks base method. +func (m *MockWindower) Delay(arg0 context.Context, arg1, arg2 uint64, arg3 ids.NodeID) (time.Duration, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Delay", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(time.Duration) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Delay indicates an expected call of Delay. +func (mr *MockWindowerMockRecorder) Delay(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delay", reflect.TypeOf((*MockWindower)(nil).Delay), arg0, arg1, arg2, arg3) +} + +// Proposers mocks base method. +func (m *MockWindower) Proposers(arg0 context.Context, arg1, arg2 uint64) ([]ids.NodeID, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Proposers", arg0, arg1, arg2) + ret0, _ := ret[0].([]ids.NodeID) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Proposers indicates an expected call of Proposers. +func (mr *MockWindowerMockRecorder) Proposers(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Proposers", reflect.TypeOf((*MockWindower)(nil).Proposers), arg0, arg1, arg2) +} diff --git a/avalanchego/vms/proposervm/proposer/validators.go b/avalanchego/vms/proposervm/proposer/validators.go index e5b29234..ba60a088 100644 --- a/avalanchego/vms/proposervm/proposer/validators.go +++ b/avalanchego/vms/proposervm/proposer/validators.go @@ -1,26 +1,20 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposer import ( - "bytes" - "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" ) +var _ utils.Sortable[validatorData] = validatorData{} + type validatorData struct { id ids.NodeID weight uint64 } -type validatorsSlice []validatorData - -func (d validatorsSlice) Len() int { return len(d) } -func (d validatorsSlice) Swap(i, j int) { d[i], d[j] = d[j], d[i] } - -func (d validatorsSlice) Less(i, j int) bool { - iID := d[i].id - jID := d[j].id - return bytes.Compare(iID[:], jID[:]) == -1 +func (d validatorData) Less(other validatorData) bool { + return d.id.Less(other.id) } diff --git a/avalanchego/vms/proposervm/proposer/validators_test.go b/avalanchego/vms/proposervm/proposer/validators_test.go new file mode 100644 index 00000000..a0703d49 --- /dev/null +++ b/avalanchego/vms/proposervm/proposer/validators_test.go @@ -0,0 +1,26 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package proposer + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" +) + +func TestValidatorDataLess(t *testing.T) { + require := require.New(t) + + var v1, v2 validatorData + require.False(v1.Less(v2)) + require.False(v2.Less(v1)) + + v1 = validatorData{ + id: ids.NodeID{1}, + } + require.False(v1.Less(v2)) + require.True(v2.Less(v1)) +} diff --git a/avalanchego/vms/proposervm/proposer/windower.go b/avalanchego/vms/proposervm/proposer/windower.go index 36255d21..4f67b279 100644 --- a/avalanchego/vms/proposervm/proposer/windower.go +++ b/avalanchego/vms/proposervm/proposer/windower.go @@ -1,14 +1,15 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposer import ( - "sort" + "context" "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/sampler" "github.com/ava-labs/avalanchego/utils/wrappers" @@ -21,10 +22,23 @@ const ( MaxDelay = MaxWindows * WindowDuration ) -var _ Windower = &windower{} +var _ Windower = (*windower)(nil) type Windower interface { + // Proposers returns the proposer list for building a block at [chainHeight] + // when the validator set is defined at [pChainHeight]. The list is returned + // in order. The minimum delay of a validator is the index they appear times + // [WindowDuration]. + Proposers( + ctx context.Context, + chainHeight, + pChainHeight uint64, + ) ([]ids.NodeID, error) + // Delay returns the amount of time that [validatorID] must wait before + // building a block at [chainHeight] when the validator set is defined at + // [pChainHeight]. Delay( + ctx context.Context, chainHeight, pChainHeight uint64, validatorID ids.NodeID, @@ -50,28 +64,24 @@ func New(state validators.State, subnetID, chainID ids.ID) Windower { } } -func (w *windower) Delay(chainHeight, pChainHeight uint64, validatorID ids.NodeID) (time.Duration, error) { - if validatorID == ids.EmptyNodeID { - return MaxDelay, nil - } - +func (w *windower) Proposers(ctx context.Context, chainHeight, pChainHeight uint64) ([]ids.NodeID, error) { // get the validator set by the p-chain height - validatorsMap, err := w.state.GetValidatorSet(pChainHeight, w.subnetID) + validatorsMap, err := w.state.GetValidatorSet(ctx, pChainHeight, w.subnetID) if err != nil { - return 0, err + return nil, err } // convert the map of validators to a slice - validators := make(validatorsSlice, 0, len(validatorsMap)) + validators := make([]validatorData, 0, len(validatorsMap)) weight := uint64(0) for k, v := range validatorsMap { validators = append(validators, validatorData{ id: k, - weight: v, + weight: v.Weight, }) - newWeight, err := math.Add64(weight, v) + newWeight, err := math.Add64(weight, v.Weight) if err != nil { - return 0, err + return nil, err } weight = newWeight } @@ -79,7 +89,7 @@ func (w *windower) Delay(chainHeight, pChainHeight uint64, validatorID ids.NodeI // canonically sort validators // Note: validators are sorted by ID, sorting by weight would not create a // canonically sorted list - sort.Sort(validators) + utils.Sort(validators) // convert the slice of validators to a slice of weights validatorWeights := make([]uint64, len(validators)) @@ -88,7 +98,7 @@ func (w *windower) Delay(chainHeight, pChainHeight uint64, validatorID ids.NodeI } if err := w.sampler.Initialize(validatorWeights); err != nil { - return 0, err + return nil, err } numToSample := MaxWindows @@ -100,13 +110,29 @@ func (w *windower) Delay(chainHeight, pChainHeight uint64, validatorID ids.NodeI w.sampler.Seed(int64(seed)) indices, err := w.sampler.Sample(numToSample) + if err != nil { + return nil, err + } + + nodeIDs := make([]ids.NodeID, numToSample) + for i, index := range indices { + nodeIDs[i] = validators[index].id + } + return nodeIDs, nil +} + +func (w *windower) Delay(ctx context.Context, chainHeight, pChainHeight uint64, validatorID ids.NodeID) (time.Duration, error) { + if validatorID == ids.EmptyNodeID { + return MaxDelay, nil + } + + proposers, err := w.Proposers(ctx, chainHeight, pChainHeight) if err != nil { return 0, err } delay := time.Duration(0) - for _, index := range indices { - nodeID := validators[index].id + for _, nodeID := range proposers { if nodeID == validatorID { return delay, nil } diff --git a/avalanchego/vms/proposervm/proposer/windower_test.go b/avalanchego/vms/proposervm/proposer/windower_test.go index fd5cd3a5..ef7cc706 100644 --- a/avalanchego/vms/proposervm/proposer/windower_test.go +++ b/avalanchego/vms/proposervm/proposer/windower_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposer import ( + "context" "math/rand" "testing" "time" @@ -22,14 +23,14 @@ func TestWindowerNoValidators(t *testing.T) { nodeID := ids.GenerateTestNodeID() vdrState := &validators.TestState{ T: t, - GetValidatorSetF: func(height uint64, subnetID ids.ID) (map[ids.NodeID]uint64, error) { + GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { return nil, nil }, } w := New(vdrState, subnetID, chainID) - delay, err := w.Delay(1, 0, nodeID) + delay, err := w.Delay(context.Background(), 1, 0, nodeID) require.NoError(err) require.EqualValues(0, delay) } @@ -43,20 +44,23 @@ func TestWindowerRepeatedValidator(t *testing.T) { nonValidatorID := ids.GenerateTestNodeID() vdrState := &validators.TestState{ T: t, - GetValidatorSetF: func(height uint64, subnetID ids.ID) (map[ids.NodeID]uint64, error) { - return map[ids.NodeID]uint64{ - validatorID: 10, + GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return map[ids.NodeID]*validators.GetValidatorOutput{ + validatorID: { + NodeID: validatorID, + Weight: 10, + }, }, nil }, } w := New(vdrState, subnetID, chainID) - validatorDelay, err := w.Delay(1, 0, validatorID) + validatorDelay, err := w.Delay(context.Background(), 1, 0, validatorID) require.NoError(err) require.EqualValues(0, validatorDelay) - nonValidatorDelay, err := w.Delay(1, 0, nonValidatorID) + nonValidatorDelay, err := w.Delay(context.Background(), 1, 0, nonValidatorID) require.NoError(err) require.EqualValues(MaxDelay, nonValidatorDelay) } @@ -72,12 +76,15 @@ func TestWindowerChangeByHeight(t *testing.T) { } vdrState := &validators.TestState{ T: t, - GetValidatorSetF: func(height uint64, subnetID ids.ID) (map[ids.NodeID]uint64, error) { - validators := make(map[ids.NodeID]uint64, MaxWindows) + GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + vdrs := make(map[ids.NodeID]*validators.GetValidatorOutput, MaxWindows) for _, id := range validatorIDs { - validators[id] = 1 + vdrs[id] = &validators.GetValidatorOutput{ + NodeID: id, + Weight: 1, + } } - return validators, nil + return vdrs, nil }, } @@ -93,7 +100,7 @@ func TestWindowerChangeByHeight(t *testing.T) { } for i, expectedDelay := range expectedDelays1 { vdrID := validatorIDs[i] - validatorDelay, err := w.Delay(1, 0, vdrID) + validatorDelay, err := w.Delay(context.Background(), 1, 0, vdrID) require.NoError(err) require.EqualValues(expectedDelay, validatorDelay) } @@ -108,7 +115,7 @@ func TestWindowerChangeByHeight(t *testing.T) { } for i, expectedDelay := range expectedDelays2 { vdrID := validatorIDs[i] - validatorDelay, err := w.Delay(2, 0, vdrID) + validatorDelay, err := w.Delay(context.Background(), 2, 0, vdrID) require.NoError(err) require.EqualValues(expectedDelay, validatorDelay) } @@ -131,12 +138,15 @@ func TestWindowerChangeByChain(t *testing.T) { } vdrState := &validators.TestState{ T: t, - GetValidatorSetF: func(height uint64, subnetID ids.ID) (map[ids.NodeID]uint64, error) { - validators := make(map[ids.NodeID]uint64, MaxWindows) + GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + vdrs := make(map[ids.NodeID]*validators.GetValidatorOutput, MaxWindows) for _, id := range validatorIDs { - validators[id] = 1 + vdrs[id] = &validators.GetValidatorOutput{ + NodeID: id, + Weight: 1, + } } - return validators, nil + return vdrs, nil }, } @@ -153,7 +163,7 @@ func TestWindowerChangeByChain(t *testing.T) { } for i, expectedDelay := range expectedDelays0 { vdrID := validatorIDs[i] - validatorDelay, err := w0.Delay(1, 0, vdrID) + validatorDelay, err := w0.Delay(context.Background(), 1, 0, vdrID) require.NoError(err) require.EqualValues(expectedDelay, validatorDelay) } @@ -168,7 +178,7 @@ func TestWindowerChangeByChain(t *testing.T) { } for i, expectedDelay := range expectedDelays1 { vdrID := validatorIDs[i] - validatorDelay, err := w1.Delay(1, 0, vdrID) + validatorDelay, err := w1.Delay(context.Background(), 1, 0, vdrID) require.NoError(err) require.EqualValues(expectedDelay, validatorDelay) } diff --git a/avalanchego/vms/proposervm/scheduler/scheduler.go b/avalanchego/vms/proposervm/scheduler/scheduler.go index 9a709d52..e0062019 100644 --- a/avalanchego/vms/proposervm/scheduler/scheduler.go +++ b/avalanchego/vms/proposervm/scheduler/scheduler.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package scheduler diff --git a/avalanchego/vms/proposervm/scheduler/scheduler_test.go b/avalanchego/vms/proposervm/scheduler/scheduler_test.go index aeaeca1b..74693657 100644 --- a/avalanchego/vms/proposervm/scheduler/scheduler_test.go +++ b/avalanchego/vms/proposervm/scheduler/scheduler_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package scheduler @@ -46,7 +46,7 @@ func TestDelayFromSetTime(t *testing.T) { } } -func TestReceipt(t *testing.T) { +func TestReceipt(*testing.T) { toEngine := make(chan common.Message, 10) now := time.Now() startTime := now.Add(50 * time.Millisecond) diff --git a/avalanchego/vms/proposervm/state/block_height_index.go b/avalanchego/vms/proposervm/state/block_height_index.go index f94d56aa..1a6f0297 100644 --- a/avalanchego/vms/proposervm/state/block_height_index.go +++ b/avalanchego/vms/proposervm/state/block_height_index.go @@ -1,41 +1,26 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state import ( - "time" - - "go.uber.org/zap" - "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/wrappers" ) -const ( - cacheSize = 8192 // max cache entries - - deleteBatchSize = 8192 - - // Sleep [sleepDurationMultiplier]x (5x) the amount of time we spend processing the block - // to ensure the async indexing does not bottleneck the node. - sleepDurationMultiplier = 5 -) +const cacheSize = 8192 // max cache entries var ( - _ HeightIndex = &heightIndex{} + _ HeightIndex = (*heightIndex)(nil) heightPrefix = []byte("height") metadataPrefix = []byte("metadata") - forkKey = []byte("fork") - checkpointKey = []byte("checkpoint") - resetOccurredKey = []byte("resetOccurred") + forkKey = []byte("fork") + checkpointKey = []byte("checkpoint") ) type HeightIndexGetter interface { @@ -44,14 +29,11 @@ type HeightIndexGetter interface { // Fork height is stored when the first post-fork block/option is accepted. // Before that, fork height won't be found. GetForkHeight() (uint64, error) - IsIndexEmpty() (bool, error) - HasIndexReset() (bool, error) } type HeightIndexWriter interface { SetBlockIDAtHeight(height uint64, blkID ids.ID) error SetForkHeight(height uint64) error - SetIndexHasReset() error } // A checkpoint is the blockID of the next block to be considered @@ -71,16 +53,13 @@ type HeightIndex interface { HeightIndexWriter HeightIndexGetter HeightIndexBatchSupport - - // ResetHeightIndex deletes all index DB entries - ResetHeightIndex(logging.Logger, versiondb.Commitable) error } type heightIndex struct { versiondb.Commitable // Caches block height -> proposerVMBlockID. - heightsCache cache.Cacher + heightsCache cache.Cacher[uint64, ids.ID] heightDB database.Database metadataDB database.Database @@ -90,112 +69,15 @@ func NewHeightIndex(db database.Database, commitable versiondb.Commitable) Heigh return &heightIndex{ Commitable: commitable, - heightsCache: &cache.LRU{Size: cacheSize}, + heightsCache: &cache.LRU[uint64, ids.ID]{Size: cacheSize}, heightDB: prefixdb.New(heightPrefix, db), metadataDB: prefixdb.New(metadataPrefix, db), } } -func (hi *heightIndex) IsIndexEmpty() (bool, error) { - heightsIsEmpty, err := database.IsEmpty(hi.heightDB) - if err != nil { - return false, err - } - if !heightsIsEmpty { - return false, nil - } - return database.IsEmpty(hi.metadataDB) -} - -func (hi *heightIndex) HasIndexReset() (bool, error) { - return hi.metadataDB.Has(resetOccurredKey) -} - -func (hi *heightIndex) SetIndexHasReset() error { - return hi.metadataDB.Put(resetOccurredKey, nil) -} - -func (hi *heightIndex) ResetHeightIndex(log logging.Logger, baseDB versiondb.Commitable) error { - var ( - itHeight = hi.heightDB.NewIterator() - itMetadata = hi.metadataDB.NewIterator() - ) - defer func() { - itHeight.Release() - itMetadata.Release() - }() - - // clear height cache - hi.heightsCache.Flush() - - // clear heightDB - deleteCount := 0 - processingStart := time.Now() - for itHeight.Next() { - if err := hi.heightDB.Delete(itHeight.Key()); err != nil { - return err - } - - deleteCount++ - if deleteCount%deleteBatchSize == 0 { - if err := hi.Commit(); err != nil { - return err - } - if err := baseDB.Commit(); err != nil { - return err - } - - log.Info("deleted height index entries", - zap.Int("numDeleted", deleteCount), - ) - - // every deleteBatchSize ops, sleep to avoid clogging the node on this - processingDuration := time.Since(processingStart) - // Sleep [sleepDurationMultiplier]x (5x) the amount of time we spend processing the block - // to ensure the indexing does not bottleneck the node. - time.Sleep(processingDuration * sleepDurationMultiplier) - processingStart = time.Now() - - if err := itHeight.Error(); err != nil { - return err - } - - // release iterator so underlying db does not hold on to the previous state - itHeight.Release() - itHeight = hi.heightDB.NewIterator() - } - } - - // clear metadataDB - for itMetadata.Next() { - if err := hi.metadataDB.Delete(itMetadata.Key()); err != nil { - return err - } - } - - errs := wrappers.Errs{} - errs.Add( - itHeight.Error(), - itMetadata.Error(), - ) - if errs.Errored() { - return errs.Err - } - - if err := hi.SetIndexHasReset(); err != nil { - return err - } - - if err := hi.Commit(); err != nil { - return err - } - return baseDB.Commit() -} - func (hi *heightIndex) GetBlockIDAtHeight(height uint64) (ids.ID, error) { - if blkIDIntf, found := hi.heightsCache.Get(height); found { - res, _ := blkIDIntf.(ids.ID) - return res, nil + if blkID, found := hi.heightsCache.Get(height); found { + return blkID, nil } key := database.PackUInt64(height) diff --git a/avalanchego/vms/proposervm/state/block_height_index_test.go b/avalanchego/vms/proposervm/state/block_height_index_test.go deleted file mode 100644 index d7bbc37f..00000000 --- a/avalanchego/vms/proposervm/state/block_height_index_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package state - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/database/memdb" - "github.com/ava-labs/avalanchego/database/versiondb" - "github.com/ava-labs/avalanchego/utils/logging" -) - -func TestHasIndexReset(t *testing.T) { - a := require.New(t) - - db := memdb.New() - vdb := versiondb.New(db) - s := New(vdb) - wasReset, err := s.HasIndexReset() - a.NoError(err) - a.False(wasReset) - err = s.ResetHeightIndex(logging.NoLog{}, vdb) - a.NoError(err) - wasReset, err = s.HasIndexReset() - a.NoError(err) - a.True(wasReset) -} diff --git a/avalanchego/vms/proposervm/state/block_state.go b/avalanchego/vms/proposervm/state/block_state.go index 7cbde441..6d426d2e 100644 --- a/avalanchego/vms/proposervm/state/block_state.go +++ b/avalanchego/vms/proposervm/state/block_state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -22,7 +22,7 @@ const blockCacheSize = 8192 var ( errBlockWrongVersion = errors.New("wrong version") - _ BlockState = &blockState{} + _ BlockState = (*blockState)(nil) ) type BlockState interface { @@ -33,7 +33,7 @@ type BlockState interface { type blockState struct { // Caches BlockID -> Block. If the Block is nil, that means the block is not // in storage. - blkCache cache.Cacher + blkCache cache.Cacher[ids.ID, *blockWrapper] db database.Database } @@ -47,16 +47,16 @@ type blockWrapper struct { func NewBlockState(db database.Database) BlockState { return &blockState{ - blkCache: &cache.LRU{Size: blockCacheSize}, + blkCache: &cache.LRU[ids.ID, *blockWrapper]{Size: blockCacheSize}, db: db, } } func NewMeteredBlockState(db database.Database, namespace string, metrics prometheus.Registerer) (BlockState, error) { - blkCache, err := metercacher.New( + blkCache, err := metercacher.New[ids.ID, *blockWrapper]( fmt.Sprintf("%s_block_cache", namespace), metrics, - &cache.LRU{Size: blockCacheSize}, + &cache.LRU[ids.ID, *blockWrapper]{Size: blockCacheSize}, ) return &blockState{ @@ -66,12 +66,8 @@ func NewMeteredBlockState(db database.Database, namespace string, metrics promet } func (s *blockState) GetBlock(blkID ids.ID) (block.Block, choices.Status, error) { - if blkIntf, found := s.blkCache.Get(blkID); found { - if blkIntf == nil { - return nil, choices.Unknown, database.ErrNotFound - } - blk, ok := blkIntf.(*blockWrapper) - if !ok { + if blk, found := s.blkCache.Get(blkID); found { + if blk == nil { return nil, choices.Unknown, database.ErrNotFound } return blk.block, blk.Status, nil @@ -96,7 +92,7 @@ func (s *blockState) GetBlock(blkID ids.ID) (block.Block, choices.Status, error) } // The key was in the database - blk, _, err := block.Parse(blkWrapper.Block) + blk, err := block.Parse(blkWrapper.Block) if err != nil { return nil, choices.Unknown, err } diff --git a/avalanchego/vms/proposervm/state/block_state_test.go b/avalanchego/vms/proposervm/state/block_state_test.go index 44c6a3c3..22b4d87b 100644 --- a/avalanchego/vms/proposervm/state/block_state_test.go +++ b/avalanchego/vms/proposervm/state/block_state_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -33,7 +33,7 @@ func testBlockState(a *require.Assertions, bs BlockState) { cert := tlsCert.Leaf key := tlsCert.PrivateKey.(crypto.Signer) - b, err := block.BuildApricot( + b, err := block.Build( parentID, timestamp, pChainHeight, diff --git a/avalanchego/vms/proposervm/state/chain_state.go b/avalanchego/vms/proposervm/state/chain_state.go index c5311250..0f1a1bfb 100644 --- a/avalanchego/vms/proposervm/state/chain_state.go +++ b/avalanchego/vms/proposervm/state/chain_state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state @@ -15,7 +15,7 @@ const ( var ( lastAcceptedKey = []byte{lastAcceptedByte} - _ ChainState = &chainState{} + _ ChainState = (*chainState)(nil) ) type ChainState interface { diff --git a/avalanchego/vms/proposervm/state/chain_state_test.go b/avalanchego/vms/proposervm/state/chain_state_test.go index b1ae6192..ab14f422 100644 --- a/avalanchego/vms/proposervm/state/chain_state_test.go +++ b/avalanchego/vms/proposervm/state/chain_state_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/avalanchego/vms/proposervm/state/codec.go b/avalanchego/vms/proposervm/state/codec.go index c3b2692e..f7352380 100644 --- a/avalanchego/vms/proposervm/state/codec.go +++ b/avalanchego/vms/proposervm/state/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/avalanchego/vms/proposervm/state/mock_state.go b/avalanchego/vms/proposervm/state/mock_state.go index cbf99bbc..da70f134 100644 --- a/avalanchego/vms/proposervm/state/mock_state.go +++ b/avalanchego/vms/proposervm/state/mock_state.go @@ -1,3 +1,6 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + // Code generated by MockGen. DO NOT EDIT. // Source: github.com/ava-labs/avalanchego/vms/proposervm/state (interfaces: State) @@ -7,10 +10,8 @@ package state import ( reflect "reflect" - versiondb "github.com/ava-labs/avalanchego/database/versiondb" ids "github.com/ava-labs/avalanchego/ids" choices "github.com/ava-labs/avalanchego/snow/choices" - logging "github.com/ava-labs/avalanchego/utils/logging" block "github.com/ava-labs/avalanchego/vms/proposervm/block" gomock "github.com/golang/mock/gomock" ) @@ -156,36 +157,6 @@ func (mr *MockStateMockRecorder) GetLastAccepted() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLastAccepted", reflect.TypeOf((*MockState)(nil).GetLastAccepted)) } -// HasIndexReset mocks base method. -func (m *MockState) HasIndexReset() (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HasIndexReset") - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// HasIndexReset indicates an expected call of HasIndexReset. -func (mr *MockStateMockRecorder) HasIndexReset() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasIndexReset", reflect.TypeOf((*MockState)(nil).HasIndexReset)) -} - -// IsIndexEmpty mocks base method. -func (m *MockState) IsIndexEmpty() (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IsIndexEmpty") - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// IsIndexEmpty indicates an expected call of IsIndexEmpty. -func (mr *MockStateMockRecorder) IsIndexEmpty() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsIndexEmpty", reflect.TypeOf((*MockState)(nil).IsIndexEmpty)) -} - // PutBlock mocks base method. func (m *MockState) PutBlock(arg0 block.Block, arg1 choices.Status) error { m.ctrl.T.Helper() @@ -200,20 +171,6 @@ func (mr *MockStateMockRecorder) PutBlock(arg0, arg1 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PutBlock", reflect.TypeOf((*MockState)(nil).PutBlock), arg0, arg1) } -// ResetHeightIndex mocks base method. -func (m *MockState) ResetHeightIndex(arg0 logging.Logger, arg1 versiondb.Commitable) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ResetHeightIndex", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// ResetHeightIndex indicates an expected call of ResetHeightIndex. -func (mr *MockStateMockRecorder) ResetHeightIndex(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ResetHeightIndex", reflect.TypeOf((*MockState)(nil).ResetHeightIndex), arg0, arg1) -} - // SetBlockIDAtHeight mocks base method. func (m *MockState) SetBlockIDAtHeight(arg0 uint64, arg1 ids.ID) error { m.ctrl.T.Helper() @@ -256,20 +213,6 @@ func (mr *MockStateMockRecorder) SetForkHeight(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetForkHeight", reflect.TypeOf((*MockState)(nil).SetForkHeight), arg0) } -// SetIndexHasReset mocks base method. -func (m *MockState) SetIndexHasReset() error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "SetIndexHasReset") - ret0, _ := ret[0].(error) - return ret0 -} - -// SetIndexHasReset indicates an expected call of SetIndexHasReset. -func (mr *MockStateMockRecorder) SetIndexHasReset() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetIndexHasReset", reflect.TypeOf((*MockState)(nil).SetIndexHasReset)) -} - // SetLastAccepted mocks base method. func (m *MockState) SetLastAccepted(arg0 ids.ID) error { m.ctrl.T.Helper() diff --git a/avalanchego/vms/proposervm/state/state.go b/avalanchego/vms/proposervm/state/state.go index 95d301f1..c8b80b94 100644 --- a/avalanchego/vms/proposervm/state/state.go +++ b/avalanchego/vms/proposervm/state/state.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/avalanchego/vms/proposervm/state/state_test.go b/avalanchego/vms/proposervm/state/state_test.go index 7ee8815d..97980fc3 100644 --- a/avalanchego/vms/proposervm/state/state_test.go +++ b/avalanchego/vms/proposervm/state/state_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package state diff --git a/avalanchego/vms/proposervm/state_summary.go b/avalanchego/vms/proposervm/state_summary.go index 5e50dad5..629d2c64 100644 --- a/avalanchego/vms/proposervm/state_summary.go +++ b/avalanchego/vms/proposervm/state_summary.go @@ -1,25 +1,28 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm import ( + "context" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/vms/proposervm/summary" ) -var _ block.StateSummary = &stateSummary{} +var _ block.StateSummary = (*stateSummary)(nil) // stateSummary implements block.StateSummary by layering three objects: -// 1. [statelessSummary] carries all summary marshallable content along with -// data immediately retrievable from it. -// 2. [innerSummary] reports the height of the summary as well as notifying the -// inner vm of the summary's acceptance. -// 3. [block] is used to update the proposervm's last accepted block upon -// Accept. +// +// 1. [statelessSummary] carries all summary marshallable content along with +// data immediately retrievable from it. +// 2. [innerSummary] reports the height of the summary as well as notifying the +// inner vm of the summary's acceptance. +// 3. [block] is used to update the proposervm's last accepted block upon +// Accept. // // Note: summary.StatelessSummary contains the data to build both [innerSummary] -// and [block]. +// and [block]. type stateSummary struct { summary.StateSummary @@ -36,28 +39,28 @@ func (s *stateSummary) Height() uint64 { return s.innerSummary.Height() } -func (s *stateSummary) Accept() (bool, error) { +func (s *stateSummary) Accept(ctx context.Context) (block.StateSyncMode, error) { // If we have already synced up to or past this state summary, we do not // want to sync to it. if s.vm.lastAcceptedHeight >= s.Height() { - return false, nil + return block.StateSyncSkipped, nil } // set fork height first, before accepting proposerVM full block // which updates height index (among other indices) if err := s.vm.State.SetForkHeight(s.StateSummary.ForkHeight()); err != nil { - return false, err + return block.StateSyncSkipped, err } // We store the full proposerVM block associated with the summary // and update height index with it, so that state sync could resume // after a shutdown. if err := s.block.acceptOuterBlk(); err != nil { - return false, err + return block.StateSyncSkipped, err } // innerSummary.Accept may fail with the proposerVM block and index already // updated. The error would be treated as fatal and the chain would then be // repaired upon the VM restart. - return s.innerSummary.Accept() + return s.innerSummary.Accept(ctx) } diff --git a/avalanchego/vms/proposervm/state_syncable_vm.go b/avalanchego/vms/proposervm/state_syncable_vm.go index e28cb798..da86d8c3 100644 --- a/avalanchego/vms/proposervm/state_syncable_vm.go +++ b/avalanchego/vms/proposervm/state_syncable_vm.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm import ( + "context" "fmt" "go.uber.org/zap" @@ -13,50 +14,50 @@ import ( "github.com/ava-labs/avalanchego/vms/proposervm/summary" ) -func (vm *VM) StateSyncEnabled() (bool, error) { +func (vm *VM) StateSyncEnabled(ctx context.Context) (bool, error) { if vm.ssVM == nil { return false, nil } // if vm implements Snowman++, a block height index must be available // to support state sync - if vm.VerifyHeightIndex() != nil { + if vm.VerifyHeightIndex(ctx) != nil { return false, nil } - return vm.ssVM.StateSyncEnabled() + return vm.ssVM.StateSyncEnabled(ctx) } -func (vm *VM) GetOngoingSyncStateSummary() (block.StateSummary, error) { +func (vm *VM) GetOngoingSyncStateSummary(ctx context.Context) (block.StateSummary, error) { if vm.ssVM == nil { return nil, block.ErrStateSyncableVMNotImplemented } - innerSummary, err := vm.ssVM.GetOngoingSyncStateSummary() + innerSummary, err := vm.ssVM.GetOngoingSyncStateSummary(ctx) if err != nil { return nil, err // includes database.ErrNotFound case } - return vm.buildStateSummary(innerSummary) + return vm.buildStateSummary(ctx, innerSummary) } -func (vm *VM) GetLastStateSummary() (block.StateSummary, error) { +func (vm *VM) GetLastStateSummary(ctx context.Context) (block.StateSummary, error) { if vm.ssVM == nil { return nil, block.ErrStateSyncableVMNotImplemented } // Extract inner vm's last state summary - innerSummary, err := vm.ssVM.GetLastStateSummary() + innerSummary, err := vm.ssVM.GetLastStateSummary(ctx) if err != nil { return nil, err // including database.ErrNotFound case } - return vm.buildStateSummary(innerSummary) + return vm.buildStateSummary(ctx, innerSummary) } // Note: it's important that ParseStateSummary do not use any index or state // to allow summaries being parsed also by freshly started node with no previous state. -func (vm *VM) ParseStateSummary(summaryBytes []byte) (block.StateSummary, error) { +func (vm *VM) ParseStateSummary(ctx context.Context, summaryBytes []byte) (block.StateSummary, error) { if vm.ssVM == nil { return nil, block.ErrStateSyncableVMNotImplemented } @@ -64,14 +65,14 @@ func (vm *VM) ParseStateSummary(summaryBytes []byte) (block.StateSummary, error) statelessSummary, err := summary.Parse(summaryBytes) if err != nil { // it may be a preFork summary - return vm.ssVM.ParseStateSummary(summaryBytes) + return vm.ssVM.ParseStateSummary(ctx, summaryBytes) } - innerSummary, err := vm.ssVM.ParseStateSummary(statelessSummary.InnerSummaryBytes()) + innerSummary, err := vm.ssVM.ParseStateSummary(ctx, statelessSummary.InnerSummaryBytes()) if err != nil { return nil, fmt.Errorf("could not parse inner summary due to: %w", err) } - block, err := vm.parsePostForkBlock(statelessSummary.BlockBytes()) + block, err := vm.parsePostForkBlock(ctx, statelessSummary.BlockBytes()) if err != nil { return nil, fmt.Errorf("could not parse proposervm block bytes from summary due to: %w", err) } @@ -84,24 +85,24 @@ func (vm *VM) ParseStateSummary(summaryBytes []byte) (block.StateSummary, error) }, nil } -func (vm *VM) GetStateSummary(height uint64) (block.StateSummary, error) { +func (vm *VM) GetStateSummary(ctx context.Context, height uint64) (block.StateSummary, error) { if vm.ssVM == nil { return nil, block.ErrStateSyncableVMNotImplemented } - innerSummary, err := vm.ssVM.GetStateSummary(height) + innerSummary, err := vm.ssVM.GetStateSummary(ctx, height) if err != nil { return nil, err // including database.ErrNotFound case } - return vm.buildStateSummary(innerSummary) + return vm.buildStateSummary(ctx, innerSummary) } // Note: building state summary requires a well formed height index. -func (vm *VM) buildStateSummary(innerSummary block.StateSummary) (block.StateSummary, error) { +func (vm *VM) buildStateSummary(ctx context.Context, innerSummary block.StateSummary) (block.StateSummary, error) { // if vm implements Snowman++, a block height index must be available // to support state sync - if err := vm.VerifyHeightIndex(); err != nil { + if err := vm.VerifyHeightIndex(ctx); err != nil { return nil, fmt.Errorf("could not build state summary: %w", err) } @@ -124,7 +125,7 @@ func (vm *VM) buildStateSummary(innerSummary block.StateSummary) (block.StateSum } height := innerSummary.Height() - blkID, err := vm.GetBlockIDAtHeight(height) + blkID, err := vm.GetBlockIDAtHeight(ctx, height) if err != nil { vm.ctx.Log.Debug("failed to fetch proposervm block ID", zap.Uint64("height", height), @@ -132,7 +133,7 @@ func (vm *VM) buildStateSummary(innerSummary block.StateSummary) (block.StateSum ) return nil, err } - block, err := vm.getPostForkBlock(blkID) + block, err := vm.getPostForkBlock(ctx, blkID) if err != nil { vm.ctx.Log.Warn("failed to fetch proposervm block", zap.Stringer("blkID", blkID), diff --git a/avalanchego/vms/proposervm/state_syncable_vm_test.go b/avalanchego/vms/proposervm/state_syncable_vm_test.go index 1ad05bc2..a7e40131 100644 --- a/avalanchego/vms/proposervm/state_syncable_vm_test.go +++ b/avalanchego/vms/proposervm/state_syncable_vm_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm import ( "bytes" + "context" "crypto" "errors" "testing" @@ -14,8 +15,6 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/manager" - "github.com/ava-labs/avalanchego/database/prefixdb" - "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" @@ -23,29 +22,11 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/version" - "github.com/ava-labs/avalanchego/vms/proposervm/state" statelessblock "github.com/ava-labs/avalanchego/vms/proposervm/block" ) -func stopHeightReindexing(t *testing.T, coreVM *fullVM, dbMan manager.Manager) { - rawDB := dbMan.Current().Database - prefixDB := prefixdb.New(dbPrefix, rawDB) - db := versiondb.New(prefixDB) - vmState := state.New(db) - - if err := vmState.SetIndexHasReset(); err != nil { - t.Fatal("could not preload key to vm state") - } - if err := vmState.Commit(); err != nil { - t.Fatal("could not commit preloaded key") - } - if err := db.Commit(); err != nil { - t.Fatal("could not commit preloaded key") - } - - coreVM.VerifyHeightIndexF = func() error { return nil } -} +var errUnknownSummary = errors.New("unknown summary") func helperBuildStateSyncTestObjects(t *testing.T) (*fullVM, *VM) { innerVM := &fullVM{ @@ -62,10 +43,10 @@ func helperBuildStateSyncTestObjects(t *testing.T) (*fullVM, *VM) { }, } - // Preload DB with key showing height index has been purged of rejected blocks - dbManager := manager.NewMemDB(version.Semantic1_0_0) - dbManager = dbManager.NewPrefixDBManager([]byte{}) - stopHeightReindexing(t, innerVM, dbManager) + // signal height index is complete + innerVM.VerifyHeightIndexF = func(context.Context) error { + return nil + } // load innerVM expectations innerGenesisBlk := &snowman.TestBlock{ @@ -75,25 +56,50 @@ func helperBuildStateSyncTestObjects(t *testing.T) (*fullVM, *VM) { HeightV: 0, BytesV: []byte("genesis state"), } - innerVM.InitializeF = func(*snow.Context, manager.Manager, + innerVM.InitializeF = func(context.Context, *snow.Context, manager.Manager, []byte, []byte, []byte, chan<- common.Message, []*common.Fx, common.AppSender, ) error { return nil } - innerVM.VerifyHeightIndexF = func() error { return nil } - innerVM.LastAcceptedF = func() (ids.ID, error) { return innerGenesisBlk.ID(), nil } - innerVM.GetBlockF = func(i ids.ID) (snowman.Block, error) { return innerGenesisBlk, nil } + innerVM.VerifyHeightIndexF = func(context.Context) error { + return nil + } + innerVM.LastAcceptedF = func(context.Context) (ids.ID, error) { + return innerGenesisBlk.ID(), nil + } + innerVM.GetBlockF = func(context.Context, ids.ID) (snowman.Block, error) { + return innerGenesisBlk, nil + } // createVM - vm := New(innerVM, time.Time{}, uint64(0), time.Time{}) + dbManager := manager.NewMemDB(version.Semantic1_0_0) + dbManager = dbManager.NewPrefixDBManager([]byte{}) + + vm := New( + innerVM, + time.Time{}, + 0, + DefaultMinBlockDelay, + pTestCert.PrivateKey.(crypto.Signer), + pTestCert.Leaf, + ) ctx := snow.DefaultContextTest() ctx.NodeID = ids.NodeIDFromCert(pTestCert.Leaf) - ctx.StakingCertLeaf = pTestCert.Leaf - ctx.StakingLeafSigner = pTestCert.PrivateKey.(crypto.Signer) - if err := vm.Initialize(ctx, dbManager, innerGenesisBlk.Bytes(), nil, nil, nil, nil, nil); err != nil { + err := vm.Initialize( + context.Background(), + ctx, + dbManager, + innerGenesisBlk.Bytes(), + nil, + nil, + nil, + nil, + nil, + ) + if err != nil { t.Fatalf("failed to initialize proposerVM with %s", err) } @@ -107,14 +113,18 @@ func TestStateSyncEnabled(t *testing.T) { // ProposerVM State Sync disabled if innerVM State sync is disabled vm.hIndexer.MarkRepaired(true) - innerVM.StateSyncEnabledF = func() (bool, error) { return false, nil } - enabled, err := vm.StateSyncEnabled() + innerVM.StateSyncEnabledF = func(context.Context) (bool, error) { + return false, nil + } + enabled, err := vm.StateSyncEnabled(context.Background()) require.NoError(err) require.False(enabled) // ProposerVM State Sync enabled if innerVM State sync is enabled - innerVM.StateSyncEnabledF = func() (bool, error) { return true, nil } - enabled, err = vm.StateSyncEnabled() + innerVM.StateSyncEnabledF = func(context.Context) (bool, error) { + return true, nil + } + enabled, err = vm.StateSyncEnabled(context.Background()) require.NoError(err) require.True(enabled) } @@ -131,31 +141,31 @@ func TestStateSyncGetOngoingSyncStateSummary(t *testing.T) { } // No ongoing state summary case - innerVM.GetOngoingSyncStateSummaryF = func() (block.StateSummary, error) { + innerVM.GetOngoingSyncStateSummaryF = func(context.Context) (block.StateSummary, error) { return nil, database.ErrNotFound } - summary, err := vm.GetOngoingSyncStateSummary() + summary, err := vm.GetOngoingSyncStateSummary(context.Background()) require.True(err == database.ErrNotFound) require.True(summary == nil) // Pre fork summary case, fork height not reached hence not set yet - innerVM.GetOngoingSyncStateSummaryF = func() (block.StateSummary, error) { + innerVM.GetOngoingSyncStateSummaryF = func(context.Context) (block.StateSummary, error) { return innerSummary, nil } _, err = vm.GetForkHeight() require.Equal(err, database.ErrNotFound) - summary, err = vm.GetOngoingSyncStateSummary() + summary, err = vm.GetOngoingSyncStateSummary(context.Background()) require.NoError(err) require.True(summary.ID() == innerSummary.ID()) require.True(summary.Height() == innerSummary.Height()) require.True(bytes.Equal(summary.Bytes(), innerSummary.Bytes())) // Pre fork summary case, fork height already reached - innerVM.GetOngoingSyncStateSummaryF = func() (block.StateSummary, error) { + innerVM.GetOngoingSyncStateSummaryF = func(context.Context) (block.StateSummary, error) { return innerSummary, nil } require.NoError(vm.SetForkHeight(innerSummary.Height() + 1)) - summary, err = vm.GetOngoingSyncStateSummary() + summary, err = vm.GetOngoingSyncStateSummary(context.Background()) require.NoError(err) require.True(summary.ID() == innerSummary.ID()) require.True(summary.Height() == innerSummary.Height()) @@ -171,19 +181,19 @@ func TestStateSyncGetOngoingSyncStateSummary(t *testing.T) { TimestampV: vm.Time(), HeightV: innerSummary.Height(), } - innerVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + innerVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { require.True(bytes.Equal(b, innerBlk.Bytes())) return innerBlk, nil } - slb, err := statelessblock.BuildApricot( + slb, err := statelessblock.Build( vm.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - vm.ctx.StakingCertLeaf, + vm.stakingCertLeaf, innerBlk.Bytes(), vm.ctx.ChainID, - vm.ctx.StakingLeafSigner, + vm.stakingLeafSigner, ) require.NoError(err) proBlk := &postForkBlock{ @@ -196,7 +206,7 @@ func TestStateSyncGetOngoingSyncStateSummary(t *testing.T) { } require.NoError(vm.storePostForkBlock(proBlk)) - summary, err = vm.GetOngoingSyncStateSummary() + summary, err = vm.GetOngoingSyncStateSummary(context.Background()) require.NoError(err) require.True(summary.Height() == innerSummary.Height()) } @@ -213,31 +223,31 @@ func TestStateSyncGetLastStateSummary(t *testing.T) { } // No last state summary case - innerVM.GetLastStateSummaryF = func() (block.StateSummary, error) { + innerVM.GetLastStateSummaryF = func(context.Context) (block.StateSummary, error) { return nil, database.ErrNotFound } - summary, err := vm.GetLastStateSummary() + summary, err := vm.GetLastStateSummary(context.Background()) require.True(err == database.ErrNotFound) require.True(summary == nil) // Pre fork summary case, fork height not reached hence not set yet - innerVM.GetLastStateSummaryF = func() (block.StateSummary, error) { + innerVM.GetLastStateSummaryF = func(context.Context) (block.StateSummary, error) { return innerSummary, nil } _, err = vm.GetForkHeight() require.Equal(err, database.ErrNotFound) - summary, err = vm.GetLastStateSummary() + summary, err = vm.GetLastStateSummary(context.Background()) require.NoError(err) require.True(summary.ID() == innerSummary.ID()) require.True(summary.Height() == innerSummary.Height()) require.True(bytes.Equal(summary.Bytes(), innerSummary.Bytes())) // Pre fork summary case, fork height already reached - innerVM.GetLastStateSummaryF = func() (block.StateSummary, error) { + innerVM.GetLastStateSummaryF = func(context.Context) (block.StateSummary, error) { return innerSummary, nil } require.NoError(vm.SetForkHeight(innerSummary.Height() + 1)) - summary, err = vm.GetLastStateSummary() + summary, err = vm.GetLastStateSummary(context.Background()) require.NoError(err) require.True(summary.ID() == innerSummary.ID()) require.True(summary.Height() == innerSummary.Height()) @@ -253,19 +263,19 @@ func TestStateSyncGetLastStateSummary(t *testing.T) { TimestampV: vm.Time(), HeightV: innerSummary.Height(), } - innerVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + innerVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { require.True(bytes.Equal(b, innerBlk.Bytes())) return innerBlk, nil } - slb, err := statelessblock.BuildApricot( + slb, err := statelessblock.Build( vm.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - vm.ctx.StakingCertLeaf, + vm.stakingCertLeaf, innerBlk.Bytes(), vm.ctx.ChainID, - vm.ctx.StakingLeafSigner, + vm.stakingLeafSigner, ) require.NoError(err) proBlk := &postForkBlock{ @@ -278,7 +288,7 @@ func TestStateSyncGetLastStateSummary(t *testing.T) { } require.NoError(vm.storePostForkBlock(proBlk)) - summary, err = vm.GetLastStateSummary() + summary, err = vm.GetLastStateSummary(context.Background()) require.NoError(err) require.True(summary.Height() == innerSummary.Height()) } @@ -296,33 +306,33 @@ func TestStateSyncGetStateSummary(t *testing.T) { } // No state summary case - innerVM.GetStateSummaryF = func(h uint64) (block.StateSummary, error) { + innerVM.GetStateSummaryF = func(context.Context, uint64) (block.StateSummary, error) { return nil, database.ErrNotFound } - summary, err := vm.GetStateSummary(reqHeight) + summary, err := vm.GetStateSummary(context.Background(), reqHeight) require.True(err == database.ErrNotFound) require.True(summary == nil) // Pre fork summary case, fork height not reached hence not set yet - innerVM.GetStateSummaryF = func(h uint64) (block.StateSummary, error) { + innerVM.GetStateSummaryF = func(_ context.Context, h uint64) (block.StateSummary, error) { require.True(h == reqHeight) return innerSummary, nil } _, err = vm.GetForkHeight() require.Equal(err, database.ErrNotFound) - summary, err = vm.GetStateSummary(reqHeight) + summary, err = vm.GetStateSummary(context.Background(), reqHeight) require.NoError(err) require.True(summary.ID() == innerSummary.ID()) require.True(summary.Height() == innerSummary.Height()) require.True(bytes.Equal(summary.Bytes(), innerSummary.Bytes())) // Pre fork summary case, fork height already reached - innerVM.GetStateSummaryF = func(h uint64) (block.StateSummary, error) { + innerVM.GetStateSummaryF = func(_ context.Context, h uint64) (block.StateSummary, error) { require.True(h == reqHeight) return innerSummary, nil } require.NoError(vm.SetForkHeight(innerSummary.Height() + 1)) - summary, err = vm.GetStateSummary(reqHeight) + summary, err = vm.GetStateSummary(context.Background(), reqHeight) require.NoError(err) require.True(summary.ID() == innerSummary.ID()) require.True(summary.Height() == innerSummary.Height()) @@ -338,19 +348,19 @@ func TestStateSyncGetStateSummary(t *testing.T) { TimestampV: vm.Time(), HeightV: innerSummary.Height(), } - innerVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + innerVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { require.True(bytes.Equal(b, innerBlk.Bytes())) return innerBlk, nil } - slb, err := statelessblock.BuildApricot( + slb, err := statelessblock.Build( vm.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - vm.ctx.StakingCertLeaf, + vm.stakingCertLeaf, innerBlk.Bytes(), vm.ctx.ChainID, - vm.ctx.StakingLeafSigner, + vm.stakingLeafSigner, ) require.NoError(err) proBlk := &postForkBlock{ @@ -363,7 +373,7 @@ func TestStateSyncGetStateSummary(t *testing.T) { } require.NoError(vm.storePostForkBlock(proBlk)) - summary, err = vm.GetStateSummary(reqHeight) + summary, err = vm.GetStateSummary(context.Background(), reqHeight) require.NoError(err) require.True(summary.Height() == innerSummary.Height()) } @@ -378,21 +388,21 @@ func TestParseStateSummary(t *testing.T) { HeightV: reqHeight, BytesV: []byte{'i', 'n', 'n', 'e', 'r'}, } - innerVM.ParseStateSummaryF = func(summaryBytes []byte) (block.StateSummary, error) { - require.True(bytes.Equal(summaryBytes, innerSummary.Bytes())) + innerVM.ParseStateSummaryF = func(_ context.Context, summaryBytes []byte) (block.StateSummary, error) { + require.Equal(summaryBytes, innerSummary.Bytes()) return innerSummary, nil } - innerVM.GetStateSummaryF = func(h uint64) (block.StateSummary, error) { + innerVM.GetStateSummaryF = func(_ context.Context, h uint64) (block.StateSummary, error) { require.True(h == reqHeight) return innerSummary, nil } // Get a pre fork block than parse it require.NoError(vm.SetForkHeight(innerSummary.Height() + 1)) - summary, err := vm.GetStateSummary(reqHeight) + summary, err := vm.GetStateSummary(context.Background(), reqHeight) require.NoError(err) - parsedSummary, err := vm.ParseStateSummary(summary.Bytes()) + parsedSummary, err := vm.ParseStateSummary(context.Background(), summary.Bytes()) require.NoError(err) require.True(summary.ID() == parsedSummary.ID()) require.True(summary.Height() == parsedSummary.Height()) @@ -408,19 +418,19 @@ func TestParseStateSummary(t *testing.T) { TimestampV: vm.Time(), HeightV: innerSummary.Height(), } - innerVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + innerVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { require.True(bytes.Equal(b, innerBlk.Bytes())) return innerBlk, nil } - slb, err := statelessblock.BuildApricot( + slb, err := statelessblock.Build( vm.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - vm.ctx.StakingCertLeaf, + vm.stakingCertLeaf, innerBlk.Bytes(), vm.ctx.ChainID, - vm.ctx.StakingLeafSigner, + vm.stakingLeafSigner, ) require.NoError(err) proBlk := &postForkBlock{ @@ -433,10 +443,10 @@ func TestParseStateSummary(t *testing.T) { } require.NoError(vm.storePostForkBlock(proBlk)) require.NoError(vm.SetForkHeight(innerSummary.Height() - 1)) - summary, err = vm.GetStateSummary(reqHeight) + summary, err = vm.GetStateSummary(context.Background(), reqHeight) require.NoError(err) - parsedSummary, err = vm.ParseStateSummary(summary.Bytes()) + parsedSummary, err = vm.ParseStateSummary(context.Background(), summary.Bytes()) require.NoError(err) require.True(summary.ID() == parsedSummary.ID()) require.True(summary.Height() == parsedSummary.Height()) @@ -464,23 +474,23 @@ func TestStateSummaryAccept(t *testing.T) { TimestampV: vm.Time(), HeightV: innerSummary.Height(), } - innerVM.GetStateSummaryF = func(h uint64) (block.StateSummary, error) { + innerVM.GetStateSummaryF = func(_ context.Context, h uint64) (block.StateSummary, error) { require.True(h == reqHeight) return innerSummary, nil } - innerVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + innerVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { require.True(bytes.Equal(b, innerBlk.Bytes())) return innerBlk, nil } - slb, err := statelessblock.BuildApricot( + slb, err := statelessblock.Build( vm.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - vm.ctx.StakingCertLeaf, + vm.stakingCertLeaf, innerBlk.Bytes(), vm.ctx.ChainID, - vm.ctx.StakingLeafSigner, + vm.stakingLeafSigner, ) require.NoError(err) proBlk := &postForkBlock{ @@ -493,20 +503,24 @@ func TestStateSummaryAccept(t *testing.T) { } require.NoError(vm.storePostForkBlock(proBlk)) - summary, err := vm.GetStateSummary(reqHeight) + summary, err := vm.GetStateSummary(context.Background(), reqHeight) require.NoError(err) // test Accept accepted - innerSummary.AcceptF = func() (bool, error) { return true, nil } - accepted, err := summary.Accept() + innerSummary.AcceptF = func(context.Context) (block.StateSyncMode, error) { + return block.StateSyncStatic, nil + } + status, err := summary.Accept(context.Background()) require.NoError(err) - require.True(accepted) + require.Equal(block.StateSyncStatic, status) // test Accept skipped - innerSummary.AcceptF = func() (bool, error) { return false, nil } - accepted, err = summary.Accept() + innerSummary.AcceptF = func(context.Context) (block.StateSyncMode, error) { + return block.StateSyncSkipped, nil + } + status, err = summary.Accept(context.Background()) require.NoError(err) - require.False(accepted) + require.Equal(block.StateSyncSkipped, status) } func TestStateSummaryAcceptOlderBlock(t *testing.T) { @@ -534,23 +548,23 @@ func TestStateSummaryAcceptOlderBlock(t *testing.T) { TimestampV: vm.Time(), HeightV: innerSummary.Height(), } - innerVM.GetStateSummaryF = func(h uint64) (block.StateSummary, error) { + innerVM.GetStateSummaryF = func(_ context.Context, h uint64) (block.StateSummary, error) { require.True(h == reqHeight) return innerSummary, nil } - innerVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + innerVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { require.True(bytes.Equal(b, innerBlk.Bytes())) return innerBlk, nil } - slb, err := statelessblock.BuildApricot( + slb, err := statelessblock.Build( vm.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - vm.ctx.StakingCertLeaf, + vm.stakingCertLeaf, innerBlk.Bytes(), vm.ctx.ChainID, - vm.ctx.StakingLeafSigner, + vm.stakingLeafSigner, ) require.NoError(err) proBlk := &postForkBlock{ @@ -563,14 +577,16 @@ func TestStateSummaryAcceptOlderBlock(t *testing.T) { } require.NoError(vm.storePostForkBlock(proBlk)) - summary, err := vm.GetStateSummary(reqHeight) + summary, err := vm.GetStateSummary(context.Background(), reqHeight) require.NoError(err) // test Accept skipped - innerSummary.AcceptF = func() (bool, error) { return true, nil } - accepted, err := summary.Accept() + innerSummary.AcceptF = func(context.Context) (block.StateSyncMode, error) { + return block.StateSyncStatic, nil + } + status, err := summary.Accept(context.Background()) require.NoError(err) - require.False(accepted) + require.Equal(block.StateSyncSkipped, status) } func TestNoStateSummariesServedWhileRepairingHeightIndex(t *testing.T) { @@ -578,7 +594,7 @@ func TestNoStateSummariesServedWhileRepairingHeightIndex(t *testing.T) { // Note: by default proVM is built such that heightIndex will be considered complete coreVM, _, proVM, _, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks - require.NoError(proVM.VerifyHeightIndex()) + require.NoError(proVM.VerifyHeightIndex(context.Background())) // let coreVM be always ready to serve summaries summaryHeight := uint64(2022) @@ -588,31 +604,31 @@ func TestNoStateSummariesServedWhileRepairingHeightIndex(t *testing.T) { HeightV: summaryHeight, BytesV: []byte{'c', 'o', 'r', 'e', 'S', 'u', 'm', 'm', 'a', 'r', 'y'}, } - coreVM.GetLastStateSummaryF = func() (block.StateSummary, error) { + coreVM.GetLastStateSummaryF = func(context.Context) (block.StateSummary, error) { return coreStateSummary, nil } - coreVM.GetStateSummaryF = func(height uint64) (block.StateSummary, error) { + coreVM.GetStateSummaryF = func(_ context.Context, height uint64) (block.StateSummary, error) { if height != summaryHeight { - return nil, errors.New("requested unexpected summary") + return nil, errUnknownSummary } return coreStateSummary, nil } // set height index to reindexing proVM.hIndexer.MarkRepaired(false) - require.ErrorIs(proVM.VerifyHeightIndex(), block.ErrIndexIncomplete) + require.ErrorIs(proVM.VerifyHeightIndex(context.Background()), block.ErrIndexIncomplete) - _, err := proVM.GetLastStateSummary() + _, err := proVM.GetLastStateSummary(context.Background()) require.ErrorIs(err, block.ErrIndexIncomplete) - _, err = proVM.GetStateSummary(summaryHeight) + _, err = proVM.GetStateSummary(context.Background(), summaryHeight) require.ErrorIs(err, block.ErrIndexIncomplete) // declare height index complete proVM.hIndexer.MarkRepaired(true) - require.NoError(proVM.VerifyHeightIndex()) + require.NoError(proVM.VerifyHeightIndex(context.Background())) - summary, err := proVM.GetLastStateSummary() + summary, err := proVM.GetLastStateSummary(context.Background()) require.NoError(err) require.True(summary.Height() == summaryHeight) } diff --git a/avalanchego/vms/proposervm/summary/build.go b/avalanchego/vms/proposervm/summary/build.go index 484dd8cb..35e2e179 100644 --- a/avalanchego/vms/proposervm/summary/build.go +++ b/avalanchego/vms/proposervm/summary/build.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package summary diff --git a/avalanchego/vms/proposervm/summary/build_test.go b/avalanchego/vms/proposervm/summary/build_test.go index 361ec364..0e15ac3c 100644 --- a/avalanchego/vms/proposervm/summary/build_test.go +++ b/avalanchego/vms/proposervm/summary/build_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package summary diff --git a/avalanchego/vms/proposervm/summary/codec.go b/avalanchego/vms/proposervm/summary/codec.go index 2ad2508e..a71350f3 100644 --- a/avalanchego/vms/proposervm/summary/codec.go +++ b/avalanchego/vms/proposervm/summary/codec.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package summary diff --git a/avalanchego/vms/proposervm/summary/parse.go b/avalanchego/vms/proposervm/summary/parse.go index 34538ac6..3d929544 100644 --- a/avalanchego/vms/proposervm/summary/parse.go +++ b/avalanchego/vms/proposervm/summary/parse.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package summary diff --git a/avalanchego/vms/proposervm/summary/parse_test.go b/avalanchego/vms/proposervm/summary/parse_test.go index 14f586cf..3d527e27 100644 --- a/avalanchego/vms/proposervm/summary/parse_test.go +++ b/avalanchego/vms/proposervm/summary/parse_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package summary diff --git a/avalanchego/vms/proposervm/summary/state_summary.go b/avalanchego/vms/proposervm/summary/state_summary.go index badbddea..25c29b6e 100644 --- a/avalanchego/vms/proposervm/summary/state_summary.go +++ b/avalanchego/vms/proposervm/summary/state_summary.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package summary @@ -7,7 +7,7 @@ import ( "github.com/ava-labs/avalanchego/ids" ) -var _ StateSummary = &stateSummary{} +var _ StateSummary = (*stateSummary)(nil) type StateSummary interface { ID() ids.ID @@ -30,8 +30,22 @@ type stateSummary struct { bytes []byte } -func (s *stateSummary) ID() ids.ID { return s.id } -func (s *stateSummary) ForkHeight() uint64 { return s.Height } -func (s *stateSummary) BlockBytes() []byte { return s.Block } -func (s *stateSummary) InnerSummaryBytes() []byte { return s.InnerSummary } -func (s *stateSummary) Bytes() []byte { return s.bytes } +func (s *stateSummary) ID() ids.ID { + return s.id +} + +func (s *stateSummary) ForkHeight() uint64 { + return s.Height +} + +func (s *stateSummary) BlockBytes() []byte { + return s.Block +} + +func (s *stateSummary) InnerSummaryBytes() []byte { + return s.InnerSummary +} + +func (s *stateSummary) Bytes() []byte { + return s.bytes +} diff --git a/avalanchego/vms/proposervm/tree/tree.go b/avalanchego/vms/proposervm/tree/tree.go index 92ed031a..63a36dd6 100644 --- a/avalanchego/vms/proposervm/tree/tree.go +++ b/avalanchego/vms/proposervm/tree/tree.go @@ -1,9 +1,13 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tree import ( + "context" + + "golang.org/x/exp/maps" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" ) @@ -18,7 +22,7 @@ type Tree interface { // Accept marks the provided block as accepted and rejects every conflicting // block. - Accept(snowman.Block) error + Accept(context.Context, snowman.Block) error } type tree struct { @@ -51,9 +55,9 @@ func (t *tree) Get(blk snowman.Block) (snowman.Block, bool) { return originalBlk, exists } -func (t *tree) Accept(blk snowman.Block) error { +func (t *tree) Accept(ctx context.Context, blk snowman.Block) error { // accept the provided block - if err := blk.Accept(); err != nil { + if err := blk.Accept(ctx); err != nil { return err } @@ -65,10 +69,7 @@ func (t *tree) Accept(blk snowman.Block) error { delete(t.nodes, parentID) // mark the siblings of the accepted block as rejectable - childrenToReject := make([]snowman.Block, 0, len(children)) - for _, child := range children { - childrenToReject = append(childrenToReject, child) - } + childrenToReject := maps.Values(children) // reject all the rejectable blocks for len(childrenToReject) > 0 { @@ -77,16 +78,14 @@ func (t *tree) Accept(blk snowman.Block) error { childrenToReject = childrenToReject[:i] // reject the block - if err := child.Reject(); err != nil { + if err := child.Reject(ctx); err != nil { return err } // mark the progeny of this block as being rejectable blkID := child.ID() children := t.nodes[blkID] - for _, child := range children { - childrenToReject = append(childrenToReject, child) - } + childrenToReject = append(childrenToReject, maps.Values(children)...) delete(t.nodes, blkID) } return nil diff --git a/avalanchego/vms/proposervm/tree/tree_test.go b/avalanchego/vms/proposervm/tree/tree_test.go index 40f25127..979943b8 100644 --- a/avalanchego/vms/proposervm/tree/tree_test.go +++ b/avalanchego/vms/proposervm/tree/tree_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package tree import ( + "context" "testing" "github.com/stretchr/testify/require" @@ -42,7 +43,7 @@ func TestAcceptSingleBlock(t *testing.T) { _, contains = tr.Get(block) require.True(contains) - err := tr.Accept(block) + err := tr.Accept(context.Background(), block) require.NoError(err) require.Equal(choices.Accepted, block.Status()) } @@ -77,7 +78,7 @@ func TestAcceptBlockConflict(t *testing.T) { _, contains = tr.Get(blockToReject) require.True(contains) - err := tr.Accept(blockToAccept) + err := tr.Accept(context.Background(), blockToAccept) require.NoError(err) require.Equal(choices.Accepted, blockToAccept.Status()) require.Equal(choices.Rejected, blockToReject.Status()) @@ -125,7 +126,7 @@ func TestAcceptChainConflict(t *testing.T) { _, contains = tr.Get(blockToRejectChild) require.True(contains) - err := tr.Accept(blockToAccept) + err := tr.Accept(context.Background(), blockToAccept) require.NoError(err) require.Equal(choices.Accepted, blockToAccept.Status()) require.Equal(choices.Rejected, blockToReject.Status()) diff --git a/avalanchego/vms/proposervm/vm.go b/avalanchego/vms/proposervm/vm.go index ffa6acde..443f1e11 100644 --- a/avalanchego/vms/proposervm/vm.go +++ b/avalanchego/vms/proposervm/vm.go @@ -1,11 +1,12 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm import ( "context" - "errors" + "crypto" + "crypto/x509" "fmt" "time" @@ -39,38 +40,40 @@ import ( ) const ( - // minBlockDelay should be kept as whole seconds because block timestamps - // are only specific to the second. - minBlockDelay = time.Second + // DefaultMinBlockDelay should be kept as whole seconds because block + // timestamps are only specific to the second. + DefaultMinBlockDelay = time.Second + checkIndexedFrequency = 10 * time.Second innerBlkCacheSize = 512 ) var ( - _ block.ChainVM = &VM{} - _ block.BatchedChainVM = &VM{} - _ block.HeightIndexedChainVM = &VM{} - _ block.StateSyncableVM = &VM{} + _ block.ChainVM = (*VM)(nil) + _ block.BatchedChainVM = (*VM)(nil) + _ block.HeightIndexedChainVM = (*VM)(nil) + _ block.StateSyncableVM = (*VM)(nil) dbPrefix = []byte("proposervm") - - errBanffBlockBeforeBanff = errors.New("block requiring Banff issued before Banff activated") ) type VM struct { block.ChainVM - bVM block.BatchedChainVM - hVM block.HeightIndexedChainVM - ssVM block.StateSyncableVM + blockBuilderVM block.BuildBlockWithContextChainVM + batchedVM block.BatchedChainVM + hVM block.HeightIndexedChainVM + ssVM block.StateSyncableVM activationTime time.Time minimumPChainHeight uint64 - - banffActivationTime time.Time + minBlkDelay time.Duration + // block signer + stakingLeafSigner crypto.Signer + // block certificate + stakingCertLeaf *x509.Certificate state.State - hIndexer indexer.HeightIndexer - resetHeightIndexOngoing utils.AtomicBool + hIndexer indexer.HeightIndexer proposer.Windower tree.Tree @@ -89,7 +92,7 @@ type VM struct { // Only contains post-fork blocks near the tip so that the cache doesn't get // filled with random blocks every time this node parses blocks while // processing a GetAncestors message from a bootstrapping node. - innerBlkCache cache.Cacher + innerBlkCache cache.Cacher[ids.ID, snowman.Block] preferred ids.ID consensusState snow.State context context.Context @@ -102,34 +105,40 @@ type VM struct { // lastAcceptedHeight is set to the last accepted PostForkBlock's height. lastAcceptedHeight uint64 - - activationTimeBanff time.Time } +// New performs best when [minBlkDelay] is whole seconds. This is because block +// timestamps are only specific to the second. func New( vm block.ChainVM, activationTime time.Time, minimumPChainHeight uint64, - banffActivationTime time.Time, + minBlkDelay time.Duration, + stakingLeafSigner crypto.Signer, + stakingCertLeaf *x509.Certificate, ) *VM { - bVM, _ := vm.(block.BatchedChainVM) + blockBuilderVM, _ := vm.(block.BuildBlockWithContextChainVM) + batchedVM, _ := vm.(block.BatchedChainVM) hVM, _ := vm.(block.HeightIndexedChainVM) ssVM, _ := vm.(block.StateSyncableVM) return &VM{ - ChainVM: vm, - bVM: bVM, - hVM: hVM, - ssVM: ssVM, + ChainVM: vm, + blockBuilderVM: blockBuilderVM, + batchedVM: batchedVM, + hVM: hVM, + ssVM: ssVM, activationTime: activationTime, minimumPChainHeight: minimumPChainHeight, - - banffActivationTime: banffActivationTime, + minBlkDelay: minBlkDelay, + stakingLeafSigner: stakingLeafSigner, + stakingCertLeaf: stakingCertLeaf, } } func (vm *VM) Initialize( - ctx *snow.Context, + ctx context.Context, + chainCtx *snow.Context, dbManager manager.Manager, genesisBytes []byte, upgradeBytes []byte, @@ -150,22 +159,22 @@ func (vm *VM) Initialize( if err := multiGatherer.Register("", optionalGatherer); err != nil { return err } - if err := ctx.Metrics.Register(multiGatherer); err != nil { + if err := chainCtx.Metrics.Register(multiGatherer); err != nil { return err } - ctx.Metrics = optionalGatherer + chainCtx.Metrics = optionalGatherer - vm.ctx = ctx + vm.ctx = chainCtx rawDB := dbManager.Current().Database prefixDB := prefixdb.New(dbPrefix, rawDB) vm.db = versiondb.New(prefixDB) vm.State = state.New(vm.db) - vm.Windower = proposer.New(ctx.ValidatorState, ctx.SubnetID, ctx.ChainID) + vm.Windower = proposer.New(chainCtx.ValidatorState, chainCtx.SubnetID, chainCtx.ChainID) vm.Tree = tree.New() - innerBlkCache, err := metercacher.New( + innerBlkCache, err := metercacher.New[ids.ID, snowman.Block]( "inner_block_cache", registerer, - &cache.LRU{Size: innerBlkCacheSize}, + &cache.LRU[ids.ID, snowman.Block]{Size: innerBlkCacheSize}, ) if err != nil { return err @@ -181,17 +190,19 @@ func (vm *VM) Initialize( vm.Scheduler = scheduler vm.toScheduler = vmToEngine - go ctx.Log.RecoverAndPanic(func() { + go chainCtx.Log.RecoverAndPanic(func() { scheduler.Dispatch(time.Now()) }) vm.verifiedBlocks = make(map[ids.ID]PostForkBlock) - context, cancel := context.WithCancel(context.Background()) + detachedCtx := utils.Detach(ctx) + context, cancel := context.WithCancel(detachedCtx) vm.context = context vm.onShutdown = cancel err = vm.ChainVM.Initialize( ctx, + chainCtx, dbManager, genesisBytes, upgradeBytes, @@ -204,25 +215,25 @@ func (vm *VM) Initialize( return err } - if err := vm.repair(indexerState); err != nil { + if err := vm.repair(detachedCtx); err != nil { return err } - return vm.setLastAcceptedMetadata() + return vm.setLastAcceptedMetadata(ctx) } // shutdown ops then propagate shutdown to innerVM -func (vm *VM) Shutdown() error { +func (vm *VM) Shutdown(ctx context.Context) error { vm.onShutdown() if err := vm.db.Commit(); err != nil { return err } - return vm.ChainVM.Shutdown() + return vm.ChainVM.Shutdown(ctx) } -func (vm *VM) SetState(newState snow.State) error { - if err := vm.ChainVM.SetState(newState); err != nil { +func (vm *VM) SetState(ctx context.Context, newState snow.State) error { + if err := vm.ChainVM.SetState(ctx, newState); err != nil { return err } @@ -236,54 +247,54 @@ func (vm *VM) SetState(newState snow.State) error { // repairAcceptedChainByHeight rolls back the chain to the previously last // accepted block. If state sync has completed successfully, this call is a // no-op. - if err := vm.repairAcceptedChainByHeight(); err != nil { + if err := vm.repairAcceptedChainByHeight(ctx); err != nil { return err } - return vm.setLastAcceptedMetadata() + return vm.setLastAcceptedMetadata(ctx) } -func (vm *VM) BuildBlock() (snowman.Block, error) { - preferredBlock, err := vm.getBlock(vm.preferred) +func (vm *VM) BuildBlock(ctx context.Context) (snowman.Block, error) { + preferredBlock, err := vm.getBlock(ctx, vm.preferred) if err != nil { return nil, err } - return preferredBlock.buildChild() + return preferredBlock.buildChild(ctx) } -func (vm *VM) ParseBlock(b []byte) (snowman.Block, error) { - if blk, err := vm.parsePostForkBlock(b); err == nil { +func (vm *VM) ParseBlock(ctx context.Context, b []byte) (snowman.Block, error) { + if blk, err := vm.parsePostForkBlock(ctx, b); err == nil { return blk, nil } - return vm.parsePreForkBlock(b) + return vm.parsePreForkBlock(ctx, b) } -func (vm *VM) GetBlock(id ids.ID) (snowman.Block, error) { - return vm.getBlock(id) +func (vm *VM) GetBlock(ctx context.Context, id ids.ID) (snowman.Block, error) { + return vm.getBlock(ctx, id) } -func (vm *VM) SetPreference(preferred ids.ID) error { +func (vm *VM) SetPreference(ctx context.Context, preferred ids.ID) error { if vm.preferred == preferred { return nil } vm.preferred = preferred - blk, err := vm.getPostForkBlock(preferred) + blk, err := vm.getPostForkBlock(ctx, preferred) if err != nil { - return vm.ChainVM.SetPreference(preferred) + return vm.ChainVM.SetPreference(ctx, preferred) } - if err := vm.ChainVM.SetPreference(blk.getInnerBlk().ID()); err != nil { + if err := vm.ChainVM.SetPreference(ctx, blk.getInnerBlk().ID()); err != nil { return err } - pChainHeight, err := blk.pChainHeight() + pChainHeight, err := blk.pChainHeight(ctx) if err != nil { return err } // reset scheduler - minDelay, err := vm.Windower.Delay(blk.Height()+1, pChainHeight, vm.ctx.NodeID) + minDelay, err := vm.Windower.Delay(ctx, blk.Height()+1, pChainHeight, vm.ctx.NodeID) if err != nil { vm.ctx.Log.Debug("failed to fetch the expected delay", zap.Error(err), @@ -294,8 +305,15 @@ func (vm *VM) SetPreference(preferred ids.ID) error { // until the P-chain's height has advanced. return nil } - if minDelay < minBlockDelay { - minDelay = minBlockDelay + + // Note: The P-chain does not currently try to target any block time. It + // notifies the consensus engine as soon as a new block may be built. To + // avoid fast runs of blocks there is an additional minimum delay that + // validators can specify. This delay may be an issue for high performance, + // custom VMs. Until the P-chain is modified to target a specific block + // time, ProposerMinBlockDelay can be configured in the subnet config. + if minDelay < vm.minBlkDelay { + minDelay = vm.minBlkDelay } preferredTime := blk.Timestamp() @@ -310,83 +328,47 @@ func (vm *VM) SetPreference(preferred ids.ID) error { return nil } -func (vm *VM) LastAccepted() (ids.ID, error) { +func (vm *VM) LastAccepted(ctx context.Context) (ids.ID, error) { lastAccepted, err := vm.State.GetLastAccepted() if err == database.ErrNotFound { - return vm.ChainVM.LastAccepted() + return vm.ChainVM.LastAccepted(ctx) } return lastAccepted, err } -func (vm *VM) repair(indexerState state.State) error { - // check and possibly rebuild height index +// repair makes sure that vm and innerVM chains are in sync. +// Moreover it fixes vm's height index if defined. +func (vm *VM) repair(ctx context.Context) error { if vm.hVM == nil { - return vm.repairAcceptedChainByIteration() + // height index not defined. Just sync vms and innerVM chains. + return vm.repairAcceptedChainByIteration(ctx) } - indexIsEmpty, err := vm.State.IsIndexEmpty() - if err != nil { - return err - } - if indexIsEmpty { - if err := vm.State.SetIndexHasReset(); err != nil { - return err - } - if err := vm.State.Commit(); err != nil { - return err - } - } else { - indexWasReset, err := vm.State.HasIndexReset() + switch vm.hVM.VerifyHeightIndex(ctx) { + case nil: + // InnerVM height index is complete. We can immediately verify + // and repair this VM height index. + shouldRepair, err := vm.shouldHeightIndexBeRepaired(ctx) if err != nil { - return fmt.Errorf("retrieving value of required index reset failed with: %w", err) - } - - if !indexWasReset { - vm.resetHeightIndexOngoing.SetValue(true) - } - } - - if !vm.resetHeightIndexOngoing.GetValue() { - // We are not going to wipe the height index - switch vm.hVM.VerifyHeightIndex() { - case nil: - // We are not going to wait for the height index to be repaired. - shouldRepair, err := vm.shouldHeightIndexBeRepaired() - if err != nil { - return err - } - if !shouldRepair { - vm.ctx.Log.Info("block height index was successfully verified") - vm.hIndexer.MarkRepaired(true) - return vm.repairAcceptedChainByHeight() - } - case block.ErrIndexIncomplete: - default: return err } + if !shouldRepair { + vm.ctx.Log.Info("block height index was successfully verified") + vm.hIndexer.MarkRepaired(true) + return vm.repairAcceptedChainByHeight(ctx) + } + case block.ErrIndexIncomplete: + default: + return nil } - if err := vm.repairAcceptedChainByIteration(); err != nil { + // innerVM height index is incomplete. Sync vm and innerVM chains first. + if err := vm.repairAcceptedChainByIteration(ctx); err != nil { return err } // asynchronously rebuild height index, if needed go func() { - // If index reset has been requested, carry it out first - if vm.resetHeightIndexOngoing.GetValue() { - vm.ctx.Log.Info("block height indexing reset started") - - if err := indexerState.ResetHeightIndex(vm.ctx.Log, vm); err != nil { - vm.ctx.Log.Error("block height indexing reset failed", - zap.Error(err), - ) - return - } - - vm.ctx.Log.Info("block height indexing reset finished") - vm.resetHeightIndexOngoing.SetValue(false) - } - // Poll until the underlying chain's index is complete or shutdown is // called. ticker := time.NewTicker(checkIndexedFrequency) @@ -394,11 +376,11 @@ func (vm *VM) repair(indexerState state.State) error { for { // The underlying VM expects the lock to be held here. vm.ctx.Lock.Lock() - err := vm.hVM.VerifyHeightIndex() + err := vm.hVM.VerifyHeightIndex(ctx) vm.ctx.Lock.Unlock() if err == nil { - // innerVM indexing complete. Let re-index this machine + // innerVM indexing complete. Let's re-index this VM break } if err != block.ErrIndexIncomplete { @@ -417,7 +399,7 @@ func (vm *VM) repair(indexerState state.State) error { } vm.ctx.Lock.Lock() - shouldRepair, err := vm.shouldHeightIndexBeRepaired() + shouldRepair, err := vm.shouldHeightIndexBeRepaired(ctx) vm.ctx.Lock.Unlock() if err != nil { @@ -452,7 +434,7 @@ func (vm *VM) repair(indexerState state.State) error { return nil } -func (vm *VM) repairAcceptedChainByIteration() error { +func (vm *VM) repairAcceptedChainByIteration(ctx context.Context) error { lastAcceptedID, err := vm.GetLastAccepted() if err == database.ErrNotFound { // If the last accepted block isn't indexed yet, then the underlying @@ -465,7 +447,7 @@ func (vm *VM) repairAcceptedChainByIteration() error { // Revert accepted blocks that weren't committed to the database. for { - lastAccepted, err := vm.getPostForkBlock(lastAcceptedID) + lastAccepted, err := vm.getPostForkBlock(ctx, lastAcceptedID) if err == database.ErrNotFound { // If the post fork block can't be found, it's because we're // reverting past the fork boundary. If this is the case, then there @@ -522,12 +504,12 @@ func (vm *VM) repairAcceptedChainByIteration() error { } } -func (vm *VM) repairAcceptedChainByHeight() error { - innerLastAcceptedID, err := vm.ChainVM.LastAccepted() +func (vm *VM) repairAcceptedChainByHeight(ctx context.Context) error { + innerLastAcceptedID, err := vm.ChainVM.LastAccepted(ctx) if err != nil { return err } - innerLastAccepted, err := vm.ChainVM.GetBlock(innerLastAcceptedID) + innerLastAccepted, err := vm.ChainVM.GetBlock(ctx, innerLastAcceptedID) if err != nil { return err } @@ -540,8 +522,7 @@ func (vm *VM) repairAcceptedChainByHeight() error { if err != nil { return err } - - proLastAccepted, err := vm.getPostForkBlock(proLastAcceptedID) + proLastAccepted, err := vm.getPostForkBlock(ctx, proLastAcceptedID) if err != nil { return err } @@ -564,7 +545,6 @@ func (vm *VM) repairAcceptedChainByHeight() error { if forkHeight > innerLastAcceptedHeight { // We are rolling back past the fork, so we should just forget about all of our proposervm indices. - if err := vm.State.DeleteLastAccepted(); err != nil { return err } @@ -582,7 +562,7 @@ func (vm *VM) repairAcceptedChainByHeight() error { return vm.db.Commit() } -func (vm *VM) setLastAcceptedMetadata() error { +func (vm *VM) setLastAcceptedMetadata(ctx context.Context) error { lastAcceptedID, err := vm.GetLastAccepted() if err == database.ErrNotFound { // If the last accepted block wasn't a PostFork block, then we don't @@ -595,7 +575,7 @@ func (vm *VM) setLastAcceptedMetadata() error { return err } - lastAccepted, err := vm.getPostForkBlock(lastAcceptedID) + lastAccepted, err := vm.getPostForkBlock(ctx, lastAcceptedID) if err != nil { return err } @@ -609,7 +589,7 @@ func (vm *VM) setLastAcceptedMetadata() error { return nil } - acceptedParent, err := vm.getPostForkBlock(lastAccepted.Parent()) + acceptedParent, err := vm.getPostForkBlock(ctx, lastAccepted.Parent()) if err != nil { return err } @@ -617,22 +597,15 @@ func (vm *VM) setLastAcceptedMetadata() error { return nil } -func (vm *VM) parsePostForkBlock(b []byte) (PostForkBlock, error) { - statelessBlock, requireBanff, err := statelessblock.Parse(b) +func (vm *VM) parsePostForkBlock(ctx context.Context, b []byte) (PostForkBlock, error) { + statelessBlock, err := statelessblock.Parse(b) if err != nil { return nil, err } - if requireBanff { - banffActivated := vm.Clock.Time().After(vm.activationTimeBanff) - if !banffActivated { - return nil, errBanffBlockBeforeBanff - } - } - // if the block already exists, then make sure the status is set correctly blkID := statelessBlock.ID() - blk, err := vm.getPostForkBlock(blkID) + blk, err := vm.getPostForkBlock(ctx, blkID) if err == nil { return blk, nil } @@ -641,7 +614,7 @@ func (vm *VM) parsePostForkBlock(b []byte) (PostForkBlock, error) { } innerBlkBytes := statelessBlock.Block() - innerBlk, err := vm.parseInnerBlock(blkID, innerBlkBytes) + innerBlk, err := vm.parseInnerBlock(ctx, blkID, innerBlkBytes) if err != nil { return nil, err } @@ -668,22 +641,22 @@ func (vm *VM) parsePostForkBlock(b []byte) (PostForkBlock, error) { return blk, nil } -func (vm *VM) parsePreForkBlock(b []byte) (*preForkBlock, error) { - blk, err := vm.ChainVM.ParseBlock(b) +func (vm *VM) parsePreForkBlock(ctx context.Context, b []byte) (*preForkBlock, error) { + blk, err := vm.ChainVM.ParseBlock(ctx, b) return &preForkBlock{ Block: blk, vm: vm, }, err } -func (vm *VM) getBlock(id ids.ID) (Block, error) { - if blk, err := vm.getPostForkBlock(id); err == nil { +func (vm *VM) getBlock(ctx context.Context, id ids.ID) (Block, error) { + if blk, err := vm.getPostForkBlock(ctx, id); err == nil { return blk, nil } - return vm.getPreForkBlock(id) + return vm.getPreForkBlock(ctx, id) } -func (vm *VM) getPostForkBlock(blkID ids.ID) (PostForkBlock, error) { +func (vm *VM) getPostForkBlock(ctx context.Context, blkID ids.ID) (PostForkBlock, error) { block, exists := vm.verifiedBlocks[blkID] if exists { return block, nil @@ -695,7 +668,7 @@ func (vm *VM) getPostForkBlock(blkID ids.ID) (PostForkBlock, error) { } innerBlkBytes := statelessBlock.Block() - innerBlk, err := vm.parseInnerBlock(blkID, innerBlkBytes) + innerBlk, err := vm.parseInnerBlock(ctx, blkID, innerBlkBytes) if err != nil { return nil, err } @@ -720,8 +693,8 @@ func (vm *VM) getPostForkBlock(blkID ids.ID) (PostForkBlock, error) { }, nil } -func (vm *VM) getPreForkBlock(blkID ids.ID) (*preForkBlock, error) { - blk, err := vm.ChainVM.GetBlock(blkID) +func (vm *VM) getPreForkBlock(ctx context.Context, blkID ids.ID) (*preForkBlock, error) { + blk, err := vm.ChainVM.GetBlock(ctx, blkID) return &preForkBlock{ Block: blk, vm: vm, @@ -740,23 +713,56 @@ func (vm *VM) storePostForkBlock(blk PostForkBlock) error { return vm.db.Commit() } -func (vm *VM) verifyAndRecordInnerBlk(postFork PostForkBlock) error { - // If inner block's Verify returned true, don't call it again. - // - // Note that if [innerBlk.Verify] returns nil, this method returns nil. This - // must always remain the case to maintain the inner block's invariant that - // if it's Verify() returns nil, it is eventually accepted or rejected. - currentInnerBlk := postFork.getInnerBlk() - if originalInnerBlk, contains := vm.Tree.Get(currentInnerBlk); !contains { - if err := currentInnerBlk.Verify(); err != nil { - return err +func (vm *VM) verifyAndRecordInnerBlk(ctx context.Context, blockCtx *block.Context, postFork PostForkBlock) error { + innerBlk := postFork.getInnerBlk() + postForkID := postFork.ID() + originalInnerBlock, previouslyVerified := vm.Tree.Get(innerBlk) + if previouslyVerified { + innerBlk = originalInnerBlock + // We must update all of the mappings from postFork -> innerBlock to + // now point to originalInnerBlock. + postFork.setInnerBlk(originalInnerBlock) + vm.innerBlkCache.Put(postForkID, originalInnerBlock) + } + + var ( + shouldVerifyWithCtx = blockCtx != nil + blkWithCtx block.WithVerifyContext + err error + ) + if shouldVerifyWithCtx { + blkWithCtx, shouldVerifyWithCtx = innerBlk.(block.WithVerifyContext) + if shouldVerifyWithCtx { + shouldVerifyWithCtx, err = blkWithCtx.ShouldVerifyWithContext(ctx) + if err != nil { + return err + } } - vm.Tree.Add(currentInnerBlk) - } else { - postFork.setInnerBlk(originalInnerBlk) } - vm.verifiedBlocks[postFork.ID()] = postFork + // Invariant: If either [Verify] or [VerifyWithContext] returns nil, this + // function must return nil. This maintains the inner block's + // invariant that successful verification will eventually result + // in accepted or rejected being called. + if shouldVerifyWithCtx { + // This block needs to know the P-Chain height during verification. + // Note that [VerifyWithContext] with context may be called multiple + // times with multiple contexts. + err = blkWithCtx.VerifyWithContext(ctx, blockCtx) + } else if !previouslyVerified { + // This isn't a [block.WithVerifyContext] so we only call [Verify] once. + err = innerBlk.Verify(ctx) + } + if err != nil { + return err + } + + // Since verification passed, we should ensure the inner block tree is + // populated. + if !previouslyVerified { + vm.Tree.Add(innerBlk) + } + vm.verifiedBlocks[postForkID] = postFork return nil } @@ -770,24 +776,24 @@ func (vm *VM) notifyInnerBlockReady() { } } -func (vm *VM) optimalPChainHeight(minPChainHeight uint64) (uint64, error) { - minimumHeight, err := vm.ctx.ValidatorState.GetMinimumHeight() +func (vm *VM) optimalPChainHeight(ctx context.Context, minPChainHeight uint64) (uint64, error) { + minimumHeight, err := vm.ctx.ValidatorState.GetMinimumHeight(ctx) if err != nil { return 0, err } - return math.Max64(minimumHeight, minPChainHeight), nil + return math.Max(minimumHeight, minPChainHeight), nil } // parseInnerBlock attempts to parse the provided bytes as an inner block. If // the inner block happens to be cached, then the inner block will not be // parsed. -func (vm *VM) parseInnerBlock(outerBlkID ids.ID, innerBlkBytes []byte) (snowman.Block, error) { - if innerBlkIntf, ok := vm.innerBlkCache.Get(outerBlkID); ok { - return innerBlkIntf.(snowman.Block), nil +func (vm *VM) parseInnerBlock(ctx context.Context, outerBlkID ids.ID, innerBlkBytes []byte) (snowman.Block, error) { + if innerBlk, ok := vm.innerBlkCache.Get(outerBlkID); ok { + return innerBlk, nil } - innerBlk, err := vm.ChainVM.ParseBlock(innerBlkBytes) + innerBlk, err := vm.ChainVM.ParseBlock(ctx, innerBlkBytes) if err != nil { return nil, err } @@ -798,7 +804,7 @@ func (vm *VM) parseInnerBlock(outerBlkID ids.ID, innerBlkBytes []byte) (snowman. // Caches proposervm block ID --> inner block if the inner block's height // is within [innerBlkCacheSize] of the last accepted block's height. func (vm *VM) cacheInnerBlock(outerBlkID ids.ID, innerBlk snowman.Block) { - diff := math.Diff64(innerBlk.Height(), vm.lastAcceptedHeight) + diff := math.AbsDiff(innerBlk.Height(), vm.lastAcceptedHeight) if diff < innerBlkCacheSize { vm.innerBlkCache.Put(outerBlkID, innerBlk) } diff --git a/avalanchego/vms/proposervm/vm_byzantine_test.go b/avalanchego/vms/proposervm/vm_byzantine_test.go index 0dc346d8..d471fd70 100644 --- a/avalanchego/vms/proposervm/vm_byzantine_test.go +++ b/avalanchego/vms/proposervm/vm_byzantine_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm import ( "bytes" + "context" "encoding/hex" "errors" "testing" @@ -14,6 +15,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/vms/proposervm/block" "github.com/ava-labs/avalanchego/vms/proposervm/proposer" ) @@ -21,11 +23,12 @@ import ( // Ensure that a byzantine node issuing an invalid PreForkBlock (Y) when the // parent block (X) is issued into a PostForkBlock (A) will be marked as invalid // correctly. -// G -// / | -// A - X -// | -// Y +// +// G +// / | +// A - X +// | +// Y func TestInvalidByzantineProposerParent(t *testing.T) { forkTime := time.Unix(0, 0) // enable ProBlks coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, forkTime, 0) @@ -40,20 +43,22 @@ func TestInvalidByzantineProposerParent(t *testing.T) { HeightV: gBlock.Height() + 1, TimestampV: gBlock.Timestamp().Add(proposer.MaxDelay), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return xBlock, nil } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return xBlock, nil + } - aBlock, err := proVM.BuildBlock() + aBlock, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatalf("proposerVM could not build block due to %s", err) } coreVM.BuildBlockF = nil - if err := aBlock.Verify(); err != nil { + if err := aBlock.Verify(context.Background()); err != nil { t.Fatalf("could not verify valid block due to %s", err) } - if err := aBlock.Accept(); err != nil { + if err := aBlock.Accept(context.Background()); err != nil { t.Fatalf("could not accept valid block due to %s", err) } @@ -69,21 +74,21 @@ func TestInvalidByzantineProposerParent(t *testing.T) { TimestampV: xBlock.Timestamp().Add(proposer.MaxDelay), } - coreVM.ParseBlockF = func(blockBytes []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, blockBytes []byte) (snowman.Block, error) { if !bytes.Equal(blockBytes, yBlockBytes) { return nil, errUnknownBlock } return yBlock, nil } - parsedBlock, err := proVM.ParseBlock(yBlockBytes) + parsedBlock, err := proVM.ParseBlock(context.Background(), yBlockBytes) if err != nil { // If there was an error parsing, then this is fine. return } // If there wasn't an error parsing - verify must return an error - if err := parsedBlock.Verify(); err == nil { + if err := parsedBlock.Verify(context.Background()); err == nil { t.Fatal("should have marked the parsed block as invalid") } } @@ -92,11 +97,11 @@ func TestInvalidByzantineProposerParent(t *testing.T) { // the parent block (X) is issued into a PostForkBlock (A) will be marked as // invalid correctly. // -// G -// / | -// A - X -// / \ -// Y Z +// G +// / | +// A - X +// / \ +// Y Z func TestInvalidByzantineProposerOracleParent(t *testing.T) { coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) proVM.Set(coreGenBlk.Timestamp()) @@ -134,8 +139,10 @@ func TestInvalidByzantineProposerOracleParent(t *testing.T) { }, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return xBlock, nil } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return xBlock, nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -149,7 +156,7 @@ func TestInvalidByzantineProposerOracleParent(t *testing.T) { return nil, database.ErrNotFound } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -164,7 +171,7 @@ func TestInvalidByzantineProposerOracleParent(t *testing.T) { } } - aBlockIntf, err := proVM.BuildBlock() + aBlockIntf, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("could not build post fork oracle block") } @@ -174,35 +181,35 @@ func TestInvalidByzantineProposerOracleParent(t *testing.T) { t.Fatal("expected post fork block") } - opts, err := aBlock.Options() + opts, err := aBlock.Options(context.Background()) if err != nil { t.Fatal("could not retrieve options from post fork oracle block") } - if err := aBlock.Verify(); err != nil { + if err := aBlock.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := opts[0].Verify(); err != nil { + if err := opts[0].Verify(context.Background()); err != nil { t.Fatal(err) } - if err := opts[1].Verify(); err != nil { + if err := opts[1].Verify(context.Background()); err != nil { t.Fatal(err) } - yBlock, err := proVM.ParseBlock(xBlock.opts[0].Bytes()) + yBlock, err := proVM.ParseBlock(context.Background(), xBlock.opts[0].Bytes()) if err != nil { // It's okay for this block not to be parsed return } - if err := yBlock.Verify(); err == nil { + if err := yBlock.Verify(context.Background()); err == nil { t.Fatal("unexpectedly passed block verification") } - if err := aBlock.Accept(); err != nil { + if err := aBlock.Accept(context.Background()); err != nil { t.Fatal(err) } - if err := yBlock.Verify(); err == nil { + if err := yBlock.Verify(context.Background()); err == nil { t.Fatal("unexpectedly passed block verification") } } @@ -211,11 +218,11 @@ func TestInvalidByzantineProposerOracleParent(t *testing.T) { // parent block (X) is issued into a PostForkBlock (A) will be marked as invalid // correctly. // -// G -// / | -// A - X -// / | -// B - Y +// G +// / | +// A - X +// / | +// B - Y func TestInvalidByzantineProposerPreForkParent(t *testing.T) { forkTime := time.Unix(0, 0) // enable ProBlks coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, forkTime, 0) @@ -230,9 +237,11 @@ func TestInvalidByzantineProposerPreForkParent(t *testing.T) { HeightV: gBlock.Height() + 1, TimestampV: gBlock.Timestamp().Add(proposer.MaxDelay), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return xBlock, nil } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return xBlock, nil + } - aBlock, err := proVM.BuildBlock() + aBlock, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatalf("proposerVM could not build block due to %s", err) } @@ -251,7 +260,7 @@ func TestInvalidByzantineProposerPreForkParent(t *testing.T) { TimestampV: xBlock.Timestamp().Add(proposer.MaxDelay), } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case gBlock.ID(): return gBlock, nil @@ -263,7 +272,7 @@ func TestInvalidByzantineProposerPreForkParent(t *testing.T) { return nil, errUnknownBlock } } - coreVM.ParseBlockF = func(blockBytes []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, blockBytes []byte) (snowman.Block, error) { switch { case bytes.Equal(blockBytes, gBlock.Bytes()): return gBlock, nil @@ -276,7 +285,7 @@ func TestInvalidByzantineProposerPreForkParent(t *testing.T) { } } - bStatelessBlock, err := block.BuildUnsignedApricot( + bStatelessBlock, err := block.BuildUnsigned( xBlock.ID(), yBlock.Timestamp(), 0, @@ -286,27 +295,27 @@ func TestInvalidByzantineProposerPreForkParent(t *testing.T) { t.Fatal(err) } - bBlock, err := proVM.ParseBlock(bStatelessBlock.Bytes()) + bBlock, err := proVM.ParseBlock(context.Background(), bStatelessBlock.Bytes()) if err != nil { // If there was an error parsing, then this is fine. return } - if err := aBlock.Verify(); err != nil { + if err := aBlock.Verify(context.Background()); err != nil { t.Fatalf("could not verify valid block due to %s", err) } // If there wasn't an error parsing - verify must return an error - if err := bBlock.Verify(); err == nil { + if err := bBlock.Verify(context.Background()); err == nil { t.Fatal("should have marked the parsed block as invalid") } - if err := aBlock.Accept(); err != nil { + if err := aBlock.Accept(context.Background()); err != nil { t.Fatalf("could not accept valid block due to %s", err) } // If there wasn't an error parsing - verify must return an error - if err := bBlock.Verify(); err == nil { + if err := bBlock.Verify(context.Background()); err == nil { t.Fatal("should have marked the parsed block as invalid") } } @@ -315,11 +324,11 @@ func TestInvalidByzantineProposerPreForkParent(t *testing.T) { // contains core block (Y) whose parent (G) doesn't match (B)'s parent (A)'s // inner block (X) will be marked as invalid correctly. // -// G -// / | \ -// A - X | -// | / -// B - Y +// G +// / | \ +// A - X | +// | / +// B - Y func TestBlockVerify_PostForkOption_FaultyParent(t *testing.T) { coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) proVM.Set(coreGenBlk.Timestamp()) @@ -356,8 +365,10 @@ func TestBlockVerify_PostForkOption_FaultyParent(t *testing.T) { }, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return xBlock, nil } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return xBlock, nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -371,7 +382,7 @@ func TestBlockVerify_PostForkOption_FaultyParent(t *testing.T) { return nil, database.ErrNotFound } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -386,7 +397,7 @@ func TestBlockVerify_PostForkOption_FaultyParent(t *testing.T) { } } - aBlockIntf, err := proVM.BuildBlock() + aBlockIntf, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("could not build post fork oracle block") } @@ -395,29 +406,29 @@ func TestBlockVerify_PostForkOption_FaultyParent(t *testing.T) { if !ok { t.Fatal("expected post fork block") } - opts, err := aBlock.Options() + opts, err := aBlock.Options(context.Background()) if err != nil { t.Fatal("could not retrieve options from post fork oracle block") } - if err := aBlock.Verify(); err != nil { + if err := aBlock.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := opts[0].Verify(); err == nil { + if err := opts[0].Verify(context.Background()); err == nil { t.Fatal("option 0 has invalid parent, should not verify") } - if err := opts[1].Verify(); err == nil { + if err := opts[1].Verify(context.Background()); err == nil { t.Fatal("option 1 has invalid parent, should not verify") } } -// ,--G ----. -// / \ \ -// A(X) B(Y) C(Z) -// | \_ /_____/ -// |\ / | -// | \/ | -// O2 O1 O3 +// ,--G ----. +// / \ \ +// A(X) B(Y) C(Z) +// | \_ /_____/ +// |\ / | +// | \/ | +// O2 O1 O3 // // O1.parent = B (non-Oracle), O1.inner = first option of X (invalid) // O2.parent = A (original), O2.inner = first option of X (valid) @@ -460,7 +471,7 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { }, } - xInnerOptions, err := xBlock.Options() + xInnerOptions, err := xBlock.Options(context.Background()) if err != nil { t.Fatal(err) } @@ -478,7 +489,7 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { TimestampV: coreGenBlk.Timestamp(), } - ySlb, err := block.BuildUnsignedApricot( + ySlb, err := block.BuildUnsigned( coreGenBlk.ID(), coreGenBlk.Timestamp(), uint64(2000), @@ -498,7 +509,7 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { }, } - if err = bBlock.Verify(); err != nil { + if err := bBlock.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -520,18 +531,20 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { }, } - if err := outerOption.Verify(); !errors.Is(err, errUnexpectedBlockType) { + if err := outerOption.Verify(context.Background()); !errors.Is(err, errUnexpectedBlockType) { t.Fatal(err) } // generate A from X and O2 - coreVM.BuildBlockF = func() (snowman.Block, error) { return xBlock, nil } - aBlock, err := proVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return xBlock, nil + } + aBlock, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } coreVM.BuildBlockF = nil - if err := aBlock.Verify(); err != nil { + if err := aBlock.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -552,7 +565,7 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { }, } - if err := outerOption.Verify(); err != nil { + if err := outerOption.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -591,13 +604,15 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { }, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return zBlock, nil } - cBlock, err := proVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return zBlock, nil + } + cBlock, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } coreVM.BuildBlockF = nil - if err := cBlock.Verify(); err != nil { + if err := cBlock.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -619,7 +634,7 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { }, } - if err := outerOption.Verify(); err != errInnerParentMismatch { + if err := outerOption.Verify(context.Background()); err != errInnerParentMismatch { t.Fatal(err) } } @@ -628,10 +643,13 @@ func TestGetBlock_MutatedSignature(t *testing.T) { coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // Make sure that we will be sampled to perform the proposals. - valState.GetValidatorSetF = func(height uint64, subnetID ids.ID) (map[ids.NodeID]uint64, error) { - res := make(map[ids.NodeID]uint64) - res[proVM.ctx.NodeID] = uint64(10) - return res, nil + valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return map[ids.NodeID]*validators.GetValidatorOutput{ + proVM.ctx.NodeID: { + NodeID: proVM.ctx.NodeID, + Weight: 10, + }, + }, nil } proVM.Set(coreGenBlk.Timestamp()) @@ -659,7 +677,7 @@ func TestGetBlock_MutatedSignature(t *testing.T) { TimestampV: coreGenBlk.Timestamp(), } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -671,7 +689,7 @@ func TestGetBlock_MutatedSignature(t *testing.T) { return nil, database.ErrNotFound } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -685,18 +703,20 @@ func TestGetBlock_MutatedSignature(t *testing.T) { } // Build the first proposal block - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk0, nil } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk0, nil + } - builtBlk0, err := proVM.BuildBlock() + builtBlk0, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatalf("could not build post fork block %s", err) } - if err := builtBlk0.Verify(); err != nil { + if err := builtBlk0.Verify(context.Background()); err != nil { t.Fatalf("failed to verify newly created block %s", err) } - if err := proVM.SetPreference(builtBlk0.ID()); err != nil { + if err := proVM.SetPreference(context.Background(), builtBlk0.ID()); err != nil { t.Fatal(err) } @@ -713,13 +733,13 @@ func TestGetBlock_MutatedSignature(t *testing.T) { t.Fatal(err) } - invalidBlk, err := proVM.ParseBlock(invalidBlkBytes) + invalidBlk, err := proVM.ParseBlock(context.Background(), invalidBlkBytes) if err != nil { // Not being able to parse an invalid block is fine. t.Skip(err) } - if err := invalidBlk.Verify(); err == nil { + if err := invalidBlk.Verify(context.Background()); err == nil { t.Fatalf("verified block without valid signature") } @@ -737,14 +757,14 @@ func TestGetBlock_MutatedSignature(t *testing.T) { // GetBlock shouldn't really be able to succeed, as we don't have a valid // representation of [blkID] proVM.innerBlkCache.Flush() // So we don't get from the cache - fetchedBlk, err := proVM.GetBlock(blkID) + fetchedBlk, err := proVM.GetBlock(context.Background(), blkID) if err != nil { t.Skip(err) } // GetBlock returned, so it must have somehow gotten a valid representation // of [blkID]. - if err := fetchedBlk.Verify(); err != nil { + if err := fetchedBlk.Verify(context.Background()); err != nil { t.Fatalf("GetBlock returned an invalid block when the ID represented a potentially valid block: %s", err) } } diff --git a/avalanchego/vms/proposervm/vm_test.go b/avalanchego/vms/proposervm/vm_test.go index dd3eb1e3..0aecbe12 100644 --- a/avalanchego/vms/proposervm/vm_test.go +++ b/avalanchego/vms/proposervm/vm_test.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package proposervm import ( "bytes" + "context" "crypto" "crypto/tls" "errors" @@ -34,9 +35,9 @@ import ( ) var ( - _ block.ChainVM = &fullVM{} - _ block.HeightIndexedChainVM = &fullVM{} - _ block.StateSyncableVM = &fullVM{} + _ block.ChainVM = (*fullVM)(nil) + _ block.HeightIndexedChainVM = (*fullVM)(nil) + _ block.StateSyncableVM = (*fullVM)(nil) ) type fullVM struct { @@ -56,6 +57,7 @@ var ( errUnknownBlock = errors.New("unknown block") errUnverifiedBlock = errors.New("unverified block") errMarshallingFailed = errors.New("marshalling failed") + errTooHigh = errors.New("too high") ) func init() { @@ -102,14 +104,16 @@ func initTestProposerVM( }, } - coreVM.InitializeF = func(*snow.Context, manager.Manager, + coreVM.InitializeF = func(context.Context, *snow.Context, manager.Manager, []byte, []byte, []byte, chan<- common.Message, []*common.Fx, common.AppSender, ) error { return nil } - coreVM.LastAcceptedF = func() (ids.ID, error) { return coreGenBlk.ID(), nil } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.LastAcceptedF = func(context.Context) (ids.ID, error) { + return coreGenBlk.ID(), nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch { case blkID == coreGenBlk.ID(): return coreGenBlk, nil @@ -117,7 +121,7 @@ func initTestProposerVM( return nil, errUnknownBlock } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -126,46 +130,80 @@ func initTestProposerVM( } } - proVM := New(coreVM, proBlkStartTime, minPChainHeight, time.Time{}) + proVM := New( + coreVM, + proBlkStartTime, + minPChainHeight, + DefaultMinBlockDelay, + pTestCert.PrivateKey.(crypto.Signer), + pTestCert.Leaf, + ) valState := &validators.TestState{ T: t, } - valState.GetMinimumHeightF = func() (uint64, error) { return coreGenBlk.HeightV, nil } - valState.GetCurrentHeightF = func() (uint64, error) { return defaultPChainHeight, nil } - valState.GetValidatorSetF = func(height uint64, subnetID ids.ID) (map[ids.NodeID]uint64, error) { - res := make(map[ids.NodeID]uint64) - res[proVM.ctx.NodeID] = uint64(10) - res[ids.NodeID{1}] = uint64(5) - res[ids.NodeID{2}] = uint64(6) - res[ids.NodeID{3}] = uint64(7) - return res, nil + valState.GetMinimumHeightF = func(context.Context) (uint64, error) { + return coreGenBlk.HeightV, nil + } + valState.GetCurrentHeightF = func(context.Context) (uint64, error) { + return defaultPChainHeight, nil + } + valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return map[ids.NodeID]*validators.GetValidatorOutput{ + proVM.ctx.NodeID: { + NodeID: proVM.ctx.NodeID, + Weight: 10, + }, + {1}: { + NodeID: ids.NodeID{1}, + Weight: 5, + }, + {2}: { + NodeID: ids.NodeID{2}, + Weight: 6, + }, + {3}: { + NodeID: ids.NodeID{3}, + Weight: 7, + }, + }, nil } ctx := snow.DefaultContextTest() ctx.NodeID = ids.NodeIDFromCert(pTestCert.Leaf) - ctx.StakingCertLeaf = pTestCert.Leaf - ctx.StakingLeafSigner = pTestCert.PrivateKey.(crypto.Signer) ctx.ValidatorState = valState dummyDBManager := manager.NewMemDB(version.Semantic1_0_0) dummyDBManager = dummyDBManager.NewPrefixDBManager([]byte{}) - // pre-insert resetOccurred key to make VM not spinning height reindexing - stopHeightReindexing(t, coreVM, dummyDBManager) + // signal height index is complete + coreVM.VerifyHeightIndexF = func(context.Context) error { + return nil + } - if err := proVM.Initialize(ctx, dummyDBManager, initialState, nil, nil, nil, nil, nil); err != nil { + err := proVM.Initialize( + context.Background(), + ctx, + dummyDBManager, + initialState, + nil, + nil, + nil, + nil, + nil, + ) + if err != nil { t.Fatalf("failed to initialize proposerVM with %s", err) } // Initialize shouldn't be called again coreVM.InitializeF = nil - if err := proVM.SetState(snow.NormalOp); err != nil { + if err := proVM.SetState(context.Background(), snow.NormalOp); err != nil { t.Fatal(err) } - if err := proVM.SetPreference(coreGenBlk.IDV); err != nil { + if err := proVM.SetPreference(context.Background(), coreGenBlk.IDV); err != nil { t.Fatal(err) } @@ -190,10 +228,12 @@ func TestBuildBlockTimestampAreRoundedToSeconds(t *testing.T) { HeightV: coreGenBlk.Height() + 1, TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk, nil } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk, nil + } // test - builtBlk, err := proVM.BuildBlock() + builtBlk, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("proposerVM could not build block") } @@ -217,15 +257,17 @@ func TestBuildBlockIsIdempotent(t *testing.T) { HeightV: coreGenBlk.Height() + 1, TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk, nil } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk, nil + } // test - builtBlk1, err := proVM.BuildBlock() + builtBlk1, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("proposerVM could not build block") } - builtBlk2, err := proVM.BuildBlock() + builtBlk2, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("proposerVM could not build block") } @@ -249,10 +291,12 @@ func TestFirstProposerBlockIsBuiltOnTopOfGenesis(t *testing.T) { HeightV: coreGenBlk.Height() + 1, TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk, nil } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk, nil + } // test - snowBlock, err := proVM.BuildBlock() + snowBlock, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("Could not build block") } @@ -283,8 +327,10 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { HeightV: coreGenBlk.Height() + 1, TimestampV: coreGenBlk.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk1, nil } - proBlk1, err := proVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk1, nil + } + proBlk1, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatalf("Could not build proBlk1 due to %s", err) } @@ -299,8 +345,10 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { HeightV: coreGenBlk.Height() + 1, TimestampV: coreGenBlk.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk2, nil } - proBlk2, err := proVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk2, nil + } + proBlk2, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("Could not build proBlk2") } @@ -308,13 +356,13 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { t.Fatal("proBlk1 and proBlk2 should be different for this test") } - if err := proBlk2.Verify(); err != nil { + if err := proBlk2.Verify(context.Background()); err != nil { t.Fatal(err) } // ...and set one as preferred var prefcoreBlk *snowman.TestBlock - coreVM.SetPreferenceF = func(prefID ids.ID) error { + coreVM.SetPreferenceF = func(_ context.Context, prefID ids.ID) error { switch prefID { case coreBlk1.ID(): prefcoreBlk = coreBlk1 @@ -327,7 +375,7 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { return nil } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreBlk1.Bytes()): return coreBlk1, nil @@ -339,7 +387,7 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { } } - if err := proVM.SetPreference(proBlk2.ID()); err != nil { + if err := proVM.SetPreference(context.Background(), proBlk2.ID()); err != nil { t.Fatal("Could not set preference") } @@ -354,10 +402,12 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { HeightV: prefcoreBlk.Height() + 1, TimestampV: coreGenBlk.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk3, nil } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk3, nil + } proVM.Set(proVM.Time().Add(proposer.MaxDelay)) - builtBlk, err := proVM.BuildBlock() + builtBlk, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatalf("unexpectedly could not build block due to %s", err) } @@ -381,8 +431,10 @@ func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { HeightV: coreGenBlk.Height() + 1, TimestampV: coreGenBlk.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk1, nil } - proBlk1, err := proVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk1, nil + } + proBlk1, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("Could not build proBlk1") } @@ -397,8 +449,10 @@ func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { HeightV: coreGenBlk.Height() + 1, TimestampV: coreGenBlk.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk2, nil } - proBlk2, err := proVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk2, nil + } + proBlk2, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("Could not build proBlk2") } @@ -406,13 +460,13 @@ func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { t.Fatal("proBlk1 and proBlk2 should be different for this test") } - if err := proBlk2.Verify(); err != nil { + if err := proBlk2.Verify(context.Background()); err != nil { t.Fatal(err) } // ...and set one as preferred var wronglyPreferredcoreBlk *snowman.TestBlock - coreVM.SetPreferenceF = func(prefID ids.ID) error { + coreVM.SetPreferenceF = func(_ context.Context, prefID ids.ID) error { switch prefID { case coreBlk1.ID(): wronglyPreferredcoreBlk = coreBlk2 @@ -425,7 +479,7 @@ func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { return nil } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreBlk1.Bytes()): return coreBlk1, nil @@ -437,7 +491,7 @@ func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { } } - if err := proVM.SetPreference(proBlk2.ID()); err != nil { + if err := proVM.SetPreference(context.Background(), proBlk2.ID()); err != nil { t.Fatal("Could not set preference") } @@ -452,15 +506,17 @@ func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { HeightV: wronglyPreferredcoreBlk.Height() + 1, TimestampV: coreGenBlk.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk3, nil } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk3, nil + } proVM.Set(proVM.Time().Add(proposer.MaxDelay)) - blk, err := proVM.BuildBlock() + blk, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := blk.Verify(); err == nil { + if err := blk.Verify(context.Background()); err == nil { t.Fatal("coreVM does not build on preferred coreBlock. It should err") } } @@ -473,17 +529,17 @@ func TestCoreBlockFailureCauseProposerBlockParseFailure(t *testing.T) { BytesV: []byte{1}, TimestampV: proVM.Time(), } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(context.Context, []byte) (snowman.Block, error) { return nil, errMarshallingFailed } - slb, err := statelessblock.BuildApricot( + slb, err := statelessblock.Build( proVM.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - proVM.ctx.StakingCertLeaf, + proVM.stakingCertLeaf, innerBlk.Bytes(), proVM.ctx.ChainID, - proVM.ctx.StakingLeafSigner, + proVM.stakingLeafSigner, ) if err != nil { t.Fatal("could not build stateless block") @@ -499,7 +555,7 @@ func TestCoreBlockFailureCauseProposerBlockParseFailure(t *testing.T) { // test - if _, err := proVM.ParseBlock(proBlk.Bytes()); err == nil { + if _, err := proVM.ParseBlock(context.Background(), proBlk.Bytes()); err == nil { t.Fatal("failed parsing proposervm.Block. Error:", err) } } @@ -514,21 +570,21 @@ func TestTwoProBlocksWrappingSameCoreBlockCanBeParsed(t *testing.T) { HeightV: gencoreBlk.Height() + 1, TimestampV: proVM.Time(), } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { if !bytes.Equal(b, innerBlk.Bytes()) { t.Fatalf("Wrong bytes") } return innerBlk, nil } - slb1, err := statelessblock.BuildApricot( + slb1, err := statelessblock.Build( proVM.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - proVM.ctx.StakingCertLeaf, + proVM.stakingCertLeaf, innerBlk.Bytes(), proVM.ctx.ChainID, - proVM.ctx.StakingLeafSigner, + proVM.stakingLeafSigner, ) if err != nil { t.Fatal("could not build stateless block") @@ -542,14 +598,14 @@ func TestTwoProBlocksWrappingSameCoreBlockCanBeParsed(t *testing.T) { }, } - slb2, err := statelessblock.BuildApricot( + slb2, err := statelessblock.Build( proVM.preferred, innerBlk.Timestamp(), 200, // pChainHeight, - proVM.ctx.StakingCertLeaf, + proVM.stakingCertLeaf, innerBlk.Bytes(), proVM.ctx.ChainID, - proVM.ctx.StakingLeafSigner, + proVM.stakingLeafSigner, ) if err != nil { t.Fatal("could not build stateless block") @@ -568,11 +624,11 @@ func TestTwoProBlocksWrappingSameCoreBlockCanBeParsed(t *testing.T) { } // Show that both can be parsed and retrieved - parsedBlk1, err := proVM.ParseBlock(proBlk1.Bytes()) + parsedBlk1, err := proVM.ParseBlock(context.Background(), proBlk1.Bytes()) if err != nil { t.Fatal("proposerVM could not parse parsedBlk1") } - parsedBlk2, err := proVM.ParseBlock(proBlk2.Bytes()) + parsedBlk2, err := proVM.ParseBlock(context.Background(), proBlk2.Bytes()) if err != nil { t.Fatal("proposerVM could not parse parsedBlk2") } @@ -596,15 +652,15 @@ func TestTwoProBlocksWithSameParentCanBothVerify(t *testing.T) { HeightV: coreGenBlk.Height() + 1, TimestampV: genesisTimestamp, } - coreVM.BuildBlockF = func() (snowman.Block, error) { + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return localcoreBlk, nil } - builtBlk, err := proVM.BuildBlock() + builtBlk, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("Could not build block") } - if err = builtBlk.Verify(); err != nil { + if err := builtBlk.Verify(context.Background()); err != nil { t.Fatal("Built block does not verify") } @@ -615,7 +671,7 @@ func TestTwoProBlocksWithSameParentCanBothVerify(t *testing.T) { HeightV: coreGenBlk.Height() + 1, TimestampV: genesisTimestamp, } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -629,12 +685,12 @@ func TestTwoProBlocksWithSameParentCanBothVerify(t *testing.T) { } } - pChainHeight, err := proVM.ctx.ValidatorState.GetCurrentHeight() + pChainHeight, err := proVM.ctx.ValidatorState.GetCurrentHeight(context.Background()) if err != nil { t.Fatal("could not retrieve pChain height") } - netSlb, err := statelessblock.BuildUnsignedApricot( + netSlb, err := statelessblock.BuildUnsigned( proVM.preferred, netcoreBlk.Timestamp(), pChainHeight, @@ -653,7 +709,7 @@ func TestTwoProBlocksWithSameParentCanBothVerify(t *testing.T) { } // prove that also block from network verifies - if err = netProBlk.Verify(); err != nil { + if err := netProBlk.Verify(context.Background()); err != nil { t.Fatal("block from network does not verify") } } @@ -663,12 +719,12 @@ func TestPreFork_Initialize(t *testing.T) { _, _, proVM, coreGenBlk, _ := initTestProposerVM(t, mockable.MaxTime, 0) // disable ProBlks // checks - blkID, err := proVM.LastAccepted() + blkID, err := proVM.LastAccepted(context.Background()) if err != nil { t.Fatal("failed to retrieve last accepted block") } - rtvdBlk, err := proVM.GetBlock(blkID) + rtvdBlk, err := proVM.GetBlock(context.Background(), blkID) if err != nil { t.Fatal("Block should be returned without calling core vm") } @@ -695,10 +751,12 @@ func TestPreFork_BuildBlock(t *testing.T) { HeightV: coreGenBlk.Height() + 1, TimestampV: coreGenBlk.Timestamp().Add(proposer.MaxDelay), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk, nil } + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk, nil + } // test - builtBlk, err := proVM.BuildBlock() + builtBlk, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("proposerVM could not build block") } @@ -713,8 +771,10 @@ func TestPreFork_BuildBlock(t *testing.T) { } // test - coreVM.GetBlockF = func(id ids.ID) (snowman.Block, error) { return coreBlk, nil } - storedBlk, err := proVM.GetBlock(builtBlk.ID()) + coreVM.GetBlockF = func(context.Context, ids.ID) (snowman.Block, error) { + return coreBlk, nil + } + storedBlk, err := proVM.GetBlock(context.Background(), builtBlk.ID()) if err != nil { t.Fatal("proposerVM has not cached built block") } @@ -734,14 +794,14 @@ func TestPreFork_ParseBlock(t *testing.T) { BytesV: []byte{1}, } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { if !bytes.Equal(b, coreBlk.Bytes()) { t.Fatalf("Wrong bytes") } return coreBlk, nil } - parsedBlk, err := proVM.ParseBlock(coreBlk.Bytes()) + parsedBlk, err := proVM.ParseBlock(context.Background(), coreBlk.Bytes()) if err != nil { t.Fatal("Could not parse naked core block") } @@ -755,13 +815,13 @@ func TestPreFork_ParseBlock(t *testing.T) { t.Fatal("Parsed block does not match expected block") } - coreVM.GetBlockF = func(id ids.ID) (snowman.Block, error) { + coreVM.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { if id != coreBlk.ID() { t.Fatalf("Unknown core block") } return coreBlk, nil } - storedBlk, err := proVM.GetBlock(parsedBlk.ID()) + storedBlk, err := proVM.GetBlock(context.Background(), parsedBlk.ID()) if err != nil { t.Fatal("proposerVM has not cached parsed block") } @@ -783,13 +843,15 @@ func TestPreFork_SetPreference(t *testing.T) { HeightV: coreGenBlk.Height() + 1, TimestampV: coreGenBlk.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk0, nil } - builtBlk, err := proVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk0, nil + } + builtBlk, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("Could not build proposer block") } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -799,7 +861,7 @@ func TestPreFork_SetPreference(t *testing.T) { return nil, errUnknownBlock } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -809,7 +871,7 @@ func TestPreFork_SetPreference(t *testing.T) { return nil, errUnknownBlock } } - if err = proVM.SetPreference(builtBlk.ID()); err != nil { + if err := proVM.SetPreference(context.Background(), builtBlk.ID()); err != nil { t.Fatal("Could not set preference on proposer Block") } @@ -823,8 +885,10 @@ func TestPreFork_SetPreference(t *testing.T) { HeightV: coreBlk0.Height() + 1, TimestampV: coreBlk0.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return coreBlk1, nil } - nextBlk, err := proVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return coreBlk1, nil + } + nextBlk, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatalf("Could not build proposer block %s", err) } @@ -847,8 +911,10 @@ func TestExpiredBuildBlock(t *testing.T) { coreVM := &block.TestVM{} coreVM.T = t - coreVM.LastAcceptedF = func() (ids.ID, error) { return coreGenBlk.ID(), nil } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.LastAcceptedF = func(context.Context) (ids.ID, error) { + return coreGenBlk.ID(), nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -856,7 +922,7 @@ func TestExpiredBuildBlock(t *testing.T) { return nil, errUnknownBlock } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -865,23 +931,35 @@ func TestExpiredBuildBlock(t *testing.T) { } } - proVM := New(coreVM, time.Time{}, 0, time.Time{}) + proVM := New( + coreVM, + time.Time{}, + 0, + DefaultMinBlockDelay, + pTestCert.PrivateKey.(crypto.Signer), + pTestCert.Leaf, + ) valState := &validators.TestState{ T: t, } - valState.GetMinimumHeightF = func() (uint64, error) { return coreGenBlk.Height(), nil } - valState.GetCurrentHeightF = func() (uint64, error) { return defaultPChainHeight, nil } - valState.GetValidatorSetF = func(height uint64, subnetID ids.ID) (map[ids.NodeID]uint64, error) { - return map[ids.NodeID]uint64{ - {1}: 100, + valState.GetMinimumHeightF = func(context.Context) (uint64, error) { + return coreGenBlk.Height(), nil + } + valState.GetCurrentHeightF = func(context.Context) (uint64, error) { + return defaultPChainHeight, nil + } + valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return map[ids.NodeID]*validators.GetValidatorOutput{ + {1}: { + NodeID: ids.NodeID{1}, + Weight: 100, + }, }, nil } ctx := snow.DefaultContextTest() ctx.NodeID = ids.NodeIDFromCert(pTestCert.Leaf) - ctx.StakingCertLeaf = pTestCert.Leaf - ctx.StakingLeafSigner = pTestCert.PrivateKey.(crypto.Signer) ctx.ValidatorState = valState dbManager := manager.NewMemDB(version.Semantic1_0_0) @@ -889,6 +967,7 @@ func TestExpiredBuildBlock(t *testing.T) { var toScheduler chan<- common.Message coreVM.InitializeF = func( + _ context.Context, _ *snow.Context, _ manager.Manager, _ []byte, @@ -903,18 +982,29 @@ func TestExpiredBuildBlock(t *testing.T) { } // make sure that DBs are compressed correctly - if err := proVM.Initialize(ctx, dbManager, nil, nil, nil, toEngine, nil, nil); err != nil { + err := proVM.Initialize( + context.Background(), + ctx, + dbManager, + nil, + nil, + nil, + toEngine, + nil, + nil, + ) + if err != nil { t.Fatalf("failed to initialize proposerVM with %s", err) } // Initialize shouldn't be called again coreVM.InitializeF = nil - if err := proVM.SetState(snow.NormalOp); err != nil { + if err := proVM.SetState(context.Background(), snow.NormalOp); err != nil { t.Fatal(err) } - if err := proVM.SetPreference(coreGenBlk.IDV); err != nil { + if err := proVM.SetPreference(context.Background(), coreGenBlk.IDV); err != nil { t.Fatal(err) } @@ -935,7 +1025,7 @@ func TestExpiredBuildBlock(t *testing.T) { HeightV: coreGenBlk.Height() + 1, TimestampV: coreGenBlk.Timestamp(), } - statelessBlock, err := statelessblock.BuildUnsignedApricot( + statelessBlock, err := statelessblock.BuildUnsigned( coreGenBlk.ID(), coreBlk.Timestamp(), 0, @@ -945,7 +1035,7 @@ func TestExpiredBuildBlock(t *testing.T) { t.Fatal(err) } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -955,7 +1045,7 @@ func TestExpiredBuildBlock(t *testing.T) { return nil, errUnknownBlock } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -968,20 +1058,20 @@ func TestExpiredBuildBlock(t *testing.T) { proVM.Clock.Set(statelessBlock.Timestamp()) - parsedBlock, err := proVM.ParseBlock(statelessBlock.Bytes()) + parsedBlock, err := proVM.ParseBlock(context.Background(), statelessBlock.Bytes()) if err != nil { t.Fatal(err) } - if err := parsedBlock.Verify(); err != nil { + if err := parsedBlock.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := proVM.SetPreference(parsedBlock.ID()); err != nil { + if err := proVM.SetPreference(context.Background(), parsedBlock.ID()); err != nil { t.Fatal(err) } - coreVM.BuildBlockF = func() (snowman.Block, error) { + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { t.Fatal("unexpectedly called build block") panic("unexpectedly called build block") } @@ -989,7 +1079,7 @@ func TestExpiredBuildBlock(t *testing.T) { // The first notification will be read from the consensus engine <-toEngine - if _, err := proVM.BuildBlock(); err == nil { + if _, err := proVM.BuildBlock(context.Background()); err == nil { t.Fatal("build block when the proposer window hasn't started") } @@ -1006,15 +1096,15 @@ type wrappedBlock struct { verified bool } -func (b *wrappedBlock) Accept() error { +func (b *wrappedBlock) Accept(ctx context.Context) error { if !b.verified { return errUnverifiedBlock } - return b.Block.Accept() + return b.Block.Accept(ctx) } -func (b *wrappedBlock) Verify() error { - if err := b.Block.Verify(); err != nil { +func (b *wrappedBlock) Verify(ctx context.Context) error { + if err := b.Block.Verify(ctx); err != nil { return err } b.verified = true @@ -1040,7 +1130,7 @@ func TestInnerBlockDeduplication(t *testing.T) { coreBlk1 := &wrappedBlock{ Block: coreBlk, } - statelessBlock0, err := statelessblock.BuildUnsignedApricot( + statelessBlock0, err := statelessblock.BuildUnsigned( coreGenBlk.ID(), coreBlk.Timestamp(), 0, @@ -1049,7 +1139,7 @@ func TestInnerBlockDeduplication(t *testing.T) { if err != nil { t.Fatal(err) } - statelessBlock1, err := statelessblock.BuildUnsignedApricot( + statelessBlock1, err := statelessblock.BuildUnsigned( coreGenBlk.ID(), coreBlk.Timestamp(), 1, @@ -1059,7 +1149,7 @@ func TestInnerBlockDeduplication(t *testing.T) { t.Fatal(err) } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -1069,7 +1159,7 @@ func TestInnerBlockDeduplication(t *testing.T) { return nil, errUnknownBlock } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -1080,20 +1170,20 @@ func TestInnerBlockDeduplication(t *testing.T) { } } - parsedBlock0, err := proVM.ParseBlock(statelessBlock0.Bytes()) + parsedBlock0, err := proVM.ParseBlock(context.Background(), statelessBlock0.Bytes()) if err != nil { t.Fatal(err) } - if err := parsedBlock0.Verify(); err != nil { + if err := parsedBlock0.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := proVM.SetPreference(parsedBlock0.ID()); err != nil { + if err := proVM.SetPreference(context.Background(), parsedBlock0.ID()); err != nil { t.Fatal(err) } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -1103,7 +1193,7 @@ func TestInnerBlockDeduplication(t *testing.T) { return nil, errUnknownBlock } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -1114,20 +1204,20 @@ func TestInnerBlockDeduplication(t *testing.T) { } } - parsedBlock1, err := proVM.ParseBlock(statelessBlock1.Bytes()) + parsedBlock1, err := proVM.ParseBlock(context.Background(), statelessBlock1.Bytes()) if err != nil { t.Fatal(err) } - if err := parsedBlock1.Verify(); err != nil { + if err := parsedBlock1.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := proVM.SetPreference(parsedBlock1.ID()); err != nil { + if err := proVM.SetPreference(context.Background(), parsedBlock1.ID()); err != nil { t.Fatal(err) } - if err := parsedBlock1.Accept(); err != nil { + if err := parsedBlock1.Accept(context.Background()); err != nil { t.Fatal(err) } } @@ -1146,18 +1236,25 @@ func TestInnerVMRollback(t *testing.T) { valState := &validators.TestState{ T: t, } - valState.GetCurrentHeightF = func() (uint64, error) { return defaultPChainHeight, nil } - valState.GetValidatorSetF = func(height uint64, subnetID ids.ID) (map[ids.NodeID]uint64, error) { - return map[ids.NodeID]uint64{ - {1}: 100, + valState.GetCurrentHeightF = func(context.Context) (uint64, error) { + return defaultPChainHeight, nil + } + valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return map[ids.NodeID]*validators.GetValidatorOutput{ + {1}: { + NodeID: ids.NodeID{1}, + Weight: 100, + }, }, nil } coreVM := &block.TestVM{} coreVM.T = t - coreVM.LastAcceptedF = func() (ids.ID, error) { return coreGenBlk.ID(), nil } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.LastAcceptedF = func(context.Context) (ids.ID, error) { + return coreGenBlk.ID(), nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -1165,7 +1262,7 @@ func TestInnerVMRollback(t *testing.T) { return nil, errUnknownBlock } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -1176,11 +1273,10 @@ func TestInnerVMRollback(t *testing.T) { ctx := snow.DefaultContextTest() ctx.NodeID = ids.NodeIDFromCert(pTestCert.Leaf) - ctx.StakingCertLeaf = pTestCert.Leaf - ctx.StakingLeafSigner = pTestCert.PrivateKey.(crypto.Signer) ctx.ValidatorState = valState coreVM.InitializeF = func( + context.Context, *snow.Context, manager.Manager, []byte, @@ -1195,17 +1291,35 @@ func TestInnerVMRollback(t *testing.T) { dbManager := manager.NewMemDB(version.Semantic1_0_0) - proVM := New(coreVM, time.Time{}, 0, time.Time{}) + proVM := New( + coreVM, + time.Time{}, + 0, + DefaultMinBlockDelay, + pTestCert.PrivateKey.(crypto.Signer), + pTestCert.Leaf, + ) - if err := proVM.Initialize(ctx, dbManager, nil, nil, nil, nil, nil, nil); err != nil { + err := proVM.Initialize( + context.Background(), + ctx, + dbManager, + nil, + nil, + nil, + nil, + nil, + nil, + ) + if err != nil { t.Fatalf("failed to initialize proposerVM with %s", err) } - if err := proVM.SetState(snow.NormalOp); err != nil { + if err := proVM.SetState(context.Background(), snow.NormalOp); err != nil { t.Fatal(err) } - if err := proVM.SetPreference(coreGenBlk.IDV); err != nil { + if err := proVM.SetPreference(context.Background(), coreGenBlk.IDV); err != nil { t.Fatal(err) } @@ -1219,7 +1333,7 @@ func TestInnerVMRollback(t *testing.T) { HeightV: coreGenBlk.Height() + 1, TimestampV: coreGenBlk.Timestamp(), } - statelessBlock, err := statelessblock.BuildUnsignedApricot( + statelessBlock, err := statelessblock.BuildUnsigned( coreGenBlk.ID(), coreBlk.Timestamp(), 0, @@ -1229,7 +1343,7 @@ func TestInnerVMRollback(t *testing.T) { t.Fatal(err) } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -1239,7 +1353,7 @@ func TestInnerVMRollback(t *testing.T) { return nil, errUnknownBlock } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -1252,7 +1366,7 @@ func TestInnerVMRollback(t *testing.T) { proVM.Clock.Set(statelessBlock.Timestamp()) - parsedBlock, err := proVM.ParseBlock(statelessBlock.Bytes()) + parsedBlock, err := proVM.ParseBlock(context.Background(), statelessBlock.Bytes()) if err != nil { t.Fatal(err) } @@ -1261,19 +1375,19 @@ func TestInnerVMRollback(t *testing.T) { t.Fatalf("expected status to be %s but was %s", choices.Processing, status) } - if err := parsedBlock.Verify(); err != nil { + if err := parsedBlock.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := proVM.SetPreference(parsedBlock.ID()); err != nil { + if err := proVM.SetPreference(context.Background(), parsedBlock.ID()); err != nil { t.Fatal(err) } - if err := parsedBlock.Accept(); err != nil { + if err := parsedBlock.Accept(context.Background()); err != nil { t.Fatal(err) } - fetchedBlock, err := proVM.GetBlock(parsedBlock.ID()) + fetchedBlock, err := proVM.GetBlock(context.Background(), parsedBlock.ID()) if err != nil { t.Fatal(err) } @@ -1286,13 +1400,31 @@ func TestInnerVMRollback(t *testing.T) { coreBlk.StatusV = choices.Processing - proVM = New(coreVM, time.Time{}, 0, time.Time{}) + proVM = New( + coreVM, + time.Time{}, + 0, + DefaultMinBlockDelay, + pTestCert.PrivateKey.(crypto.Signer), + pTestCert.Leaf, + ) - if err := proVM.Initialize(ctx, dbManager, nil, nil, nil, nil, nil, nil); err != nil { + err = proVM.Initialize( + context.Background(), + ctx, + dbManager, + nil, + nil, + nil, + nil, + nil, + nil, + ) + if err != nil { t.Fatalf("failed to initialize proposerVM with %s", err) } - lastAcceptedID, err := proVM.LastAccepted() + lastAcceptedID, err := proVM.LastAccepted(context.Background()) if err != nil { t.Fatal(err) } @@ -1301,7 +1433,7 @@ func TestInnerVMRollback(t *testing.T) { t.Fatalf("failed to roll back the VM to the last accepted block") } - parsedBlock, err = proVM.ParseBlock(statelessBlock.Bytes()) + parsedBlock, err = proVM.ParseBlock(context.Background(), statelessBlock.Bytes()) if err != nil { t.Fatal(err) } @@ -1314,9 +1446,12 @@ func TestInnerVMRollback(t *testing.T) { func TestBuildBlockDuringWindow(t *testing.T) { coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks - valState.GetValidatorSetF = func(height uint64, subnetID ids.ID) (map[ids.NodeID]uint64, error) { - return map[ids.NodeID]uint64{ - proVM.ctx.NodeID: 10, + valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return map[ids.NodeID]*validators.GetValidatorOutput{ + proVM.ctx.NodeID: { + NodeID: proVM.ctx.NodeID, + Weight: 10, + }, }, nil } @@ -1340,7 +1475,7 @@ func TestBuildBlockDuringWindow(t *testing.T) { HeightV: coreBlk0.Height() + 1, TimestampV: coreBlk0.Timestamp(), } - statelessBlock0, err := statelessblock.BuildUnsignedApricot( + statelessBlock0, err := statelessblock.BuildUnsigned( coreGenBlk.ID(), coreBlk0.Timestamp(), 0, @@ -1350,7 +1485,7 @@ func TestBuildBlockDuringWindow(t *testing.T) { t.Fatal(err) } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreGenBlk.ID(): return coreGenBlk, nil @@ -1362,7 +1497,7 @@ func TestBuildBlockDuringWindow(t *testing.T) { return nil, errUnknownBlock } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -1377,41 +1512,41 @@ func TestBuildBlockDuringWindow(t *testing.T) { proVM.Clock.Set(statelessBlock0.Timestamp()) - statefulBlock0, err := proVM.ParseBlock(statelessBlock0.Bytes()) + statefulBlock0, err := proVM.ParseBlock(context.Background(), statelessBlock0.Bytes()) if err != nil { t.Fatal(err) } - if err := statefulBlock0.Verify(); err != nil { + if err := statefulBlock0.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := proVM.SetPreference(statefulBlock0.ID()); err != nil { + if err := proVM.SetPreference(context.Background(), statefulBlock0.ID()); err != nil { t.Fatal(err) } - coreVM.BuildBlockF = func() (snowman.Block, error) { + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } - statefulBlock1, err := proVM.BuildBlock() + statefulBlock1, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := statefulBlock1.Verify(); err != nil { + if err := statefulBlock1.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := proVM.SetPreference(statefulBlock1.ID()); err != nil { + if err := proVM.SetPreference(context.Background(), statefulBlock1.ID()); err != nil { t.Fatal(err) } - if err := statefulBlock0.Accept(); err != nil { + if err := statefulBlock0.Accept(context.Background()); err != nil { t.Fatal(err) } - if err := statefulBlock1.Accept(); err != nil { + if err := statefulBlock1.Accept(context.Background()); err != nil { t.Fatal(err) } } @@ -1419,11 +1554,11 @@ func TestBuildBlockDuringWindow(t *testing.T) { // Ensure that Accepting a PostForkBlock (A) containing core block (X) causes // core block (Y) and (Z) to also be rejected. // -// G -// / \ -// A(X) B(Y) -// | -// C(Z) +// G +// / \ +// A(X) B(Y) +// | +// C(Z) func TestTwoForks_OneIsAccepted(t *testing.T) { forkTime := time.Unix(0, 0) coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, forkTime, 0) @@ -1440,13 +1575,15 @@ func TestTwoForks_OneIsAccepted(t *testing.T) { TimestampV: gBlock.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return xBlock, nil } - aBlock, err := proVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return xBlock, nil + } + aBlock, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatalf("proposerVM could not build block due to %s", err) } coreVM.BuildBlockF = nil - if err := aBlock.Verify(); err != nil { + if err := aBlock.Verify(context.Background()); err != nil { t.Fatalf("could not verify valid block due to %s", err) } @@ -1462,7 +1599,7 @@ func TestTwoForks_OneIsAccepted(t *testing.T) { TimestampV: gBlock.Timestamp(), } - ySlb, err := statelessblock.BuildUnsignedApricot( + ySlb, err := statelessblock.BuildUnsigned( gBlock.ID(), gBlock.Timestamp(), defaultPChainHeight, @@ -1481,7 +1618,7 @@ func TestTwoForks_OneIsAccepted(t *testing.T) { }, } - if err := bBlock.Verify(); err != nil { + if err := bBlock.Verify(context.Background()); err != nil { t.Fatalf("could not verify valid block due to %s", err) } @@ -1497,17 +1634,19 @@ func TestTwoForks_OneIsAccepted(t *testing.T) { TimestampV: yBlock.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return zBlock, nil } - if err := proVM.SetPreference(bBlock.ID()); err != nil { + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return zBlock, nil + } + if err := proVM.SetPreference(context.Background(), bBlock.ID()); err != nil { t.Fatal(err) } - cBlock, err := proVM.BuildBlock() + cBlock, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatalf("proposerVM could not build block due to %s", err) } coreVM.BuildBlockF = nil - if err := cBlock.Verify(); err != nil { + if err := cBlock.Verify(context.Background()); err != nil { t.Fatalf("could not verify valid block due to %s", err) } @@ -1522,7 +1661,7 @@ func TestTwoForks_OneIsAccepted(t *testing.T) { } // accept A - if err := aBlock.Accept(); err != nil { + if err := aBlock.Accept(context.Background()); err != nil { t.Fatalf("could not accept valid block due to %s", err) } @@ -1564,16 +1703,18 @@ func TestTooFarAdvanced(t *testing.T) { TimestampV: xBlock.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return xBlock, nil } - aBlock, err := proVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return xBlock, nil + } + aBlock, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatalf("proposerVM could not build block due to %s", err) } - if err := aBlock.Verify(); err != nil { + if err := aBlock.Verify(context.Background()); err != nil { t.Fatalf("could not verify valid block due to %s", err) } - ySlb, err := statelessblock.BuildUnsignedApricot( + ySlb, err := statelessblock.BuildUnsigned( aBlock.ID(), aBlock.Timestamp().Add(maxSkew), defaultPChainHeight, @@ -1592,11 +1733,11 @@ func TestTooFarAdvanced(t *testing.T) { }, } - if err = bBlock.Verify(); err != errProposerWindowNotStarted { + if err := bBlock.Verify(context.Background()); err != errProposerWindowNotStarted { t.Fatal("should have errored errProposerWindowNotStarted") } - ySlb, err = statelessblock.BuildUnsignedApricot( + ySlb, err = statelessblock.BuildUnsigned( aBlock.ID(), aBlock.Timestamp().Add(proposer.MaxDelay), defaultPChainHeight, @@ -1616,7 +1757,7 @@ func TestTooFarAdvanced(t *testing.T) { }, } - if err = bBlock.Verify(); err != errTimeTooAdvanced { + if err := bBlock.Verify(context.Background()); err != errTimeTooAdvanced { t.Fatal("should have errored errTimeTooAdvanced") } } @@ -1624,11 +1765,11 @@ func TestTooFarAdvanced(t *testing.T) { // Ensure that Accepting a PostForkOption (B) causes both the other option and // the core block in the other option to be rejected. // -// G -// | -// A(X) -// /====\ -// B(...) C(...) +// G +// | +// A(X) +// /====\ +// B(...) C(...) // // B(...) is B(X.opts[0]) // B(...) is C(X.opts[1]) @@ -1669,8 +1810,10 @@ func TestTwoOptions_OneIsAccepted(t *testing.T) { }, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return xBlock, nil } - aBlockIntf, err := proVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return xBlock, nil + } + aBlockIntf, err := proVM.BuildBlock(context.Background()) if err != nil { t.Fatal("could not build post fork oracle block") } @@ -1680,28 +1823,28 @@ func TestTwoOptions_OneIsAccepted(t *testing.T) { t.Fatal("expected post fork block") } - opts, err := aBlock.Options() + opts, err := aBlock.Options(context.Background()) if err != nil { t.Fatal("could not retrieve options from post fork oracle block") } - if err := aBlock.Verify(); err != nil { + if err := aBlock.Verify(context.Background()); err != nil { t.Fatal(err) } bBlock := opts[0] - if err := bBlock.Verify(); err != nil { + if err := bBlock.Verify(context.Background()); err != nil { t.Fatal(err) } cBlock := opts[1] - if err := cBlock.Verify(); err != nil { + if err := cBlock.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := aBlock.Accept(); err != nil { + if err := aBlock.Accept(context.Background()); err != nil { t.Fatal(err) } - if err := bBlock.Accept(); err != nil { + if err := bBlock.Accept(context.Background()); err != nil { t.Fatal(err) } @@ -1711,7 +1854,7 @@ func TestTwoOptions_OneIsAccepted(t *testing.T) { } // the other post-fork option should also be rejected - if err := cBlock.Reject(); err != nil { + if err := cBlock.Reject(context.Background()); err != nil { t.Fatal("the post-fork option block should have be rejected") } @@ -1738,8 +1881,10 @@ func TestLaggedPChainHeight(t *testing.T) { TimestampV: coreGenBlk.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return innerBlock, nil } - blockIntf, err := proVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return innerBlock, nil + } + blockIntf, err := proVM.BuildBlock(context.Background()) require.NoError(err) block, ok := blockIntf.(*postForkBlock) @@ -1777,25 +1922,29 @@ func TestRejectedHeightNotIndexed(t *testing.T) { }, }, TestHeightIndexedVM: block.TestHeightIndexedVM{ - T: t, - VerifyHeightIndexF: func() error { return nil }, - GetBlockIDAtHeightF: func(height uint64) (ids.ID, error) { + T: t, + VerifyHeightIndexF: func(context.Context) error { + return nil + }, + GetBlockIDAtHeightF: func(_ context.Context, height uint64) (ids.ID, error) { if height >= uint64(len(coreHeights)) { - return ids.ID{}, errors.New("too high") + return ids.ID{}, errTooHigh } return coreHeights[height], nil }, }, } - coreVM.InitializeF = func(*snow.Context, manager.Manager, + coreVM.InitializeF = func(context.Context, *snow.Context, manager.Manager, []byte, []byte, []byte, chan<- common.Message, []*common.Fx, common.AppSender, ) error { return nil } - coreVM.LastAcceptedF = func() (ids.ID, error) { return coreGenBlk.ID(), nil } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.LastAcceptedF = func(context.Context) (ids.ID, error) { + return coreGenBlk.ID(), nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch { case blkID == coreGenBlk.ID(): return coreGenBlk, nil @@ -1803,7 +1952,7 @@ func TestRejectedHeightNotIndexed(t *testing.T) { return nil, errUnknownBlock } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -1812,45 +1961,76 @@ func TestRejectedHeightNotIndexed(t *testing.T) { } } - proVM := New(coreVM, time.Time{}, 0, time.Time{}) + proVM := New( + coreVM, + time.Time{}, + 0, + DefaultMinBlockDelay, + pTestCert.PrivateKey.(crypto.Signer), + pTestCert.Leaf, + ) valState := &validators.TestState{ T: t, } - valState.GetMinimumHeightF = func() (uint64, error) { return coreGenBlk.HeightV, nil } - valState.GetCurrentHeightF = func() (uint64, error) { return defaultPChainHeight, nil } - valState.GetValidatorSetF = func(height uint64, subnetID ids.ID) (map[ids.NodeID]uint64, error) { - res := make(map[ids.NodeID]uint64) - res[proVM.ctx.NodeID] = uint64(10) - res[ids.NodeID{1}] = uint64(5) - res[ids.NodeID{2}] = uint64(6) - res[ids.NodeID{3}] = uint64(7) - return res, nil + valState.GetMinimumHeightF = func(context.Context) (uint64, error) { + return coreGenBlk.HeightV, nil + } + valState.GetCurrentHeightF = func(context.Context) (uint64, error) { + return defaultPChainHeight, nil + } + valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return map[ids.NodeID]*validators.GetValidatorOutput{ + proVM.ctx.NodeID: { + NodeID: proVM.ctx.NodeID, + Weight: 10, + }, + {1}: { + NodeID: ids.NodeID{1}, + Weight: 5, + }, + {2}: { + NodeID: ids.NodeID{2}, + Weight: 6, + }, + {3}: { + NodeID: ids.NodeID{3}, + Weight: 7, + }, + }, nil } ctx := snow.DefaultContextTest() ctx.NodeID = ids.NodeIDFromCert(pTestCert.Leaf) - ctx.StakingCertLeaf = pTestCert.Leaf - ctx.StakingLeafSigner = pTestCert.PrivateKey.(crypto.Signer) ctx.ValidatorState = valState dummyDBManager := manager.NewMemDB(version.Semantic1_0_0) // make sure that DBs are compressed correctly dummyDBManager = dummyDBManager.NewPrefixDBManager([]byte{}) - err := proVM.Initialize(ctx, dummyDBManager, initialState, nil, nil, nil, nil, nil) + err := proVM.Initialize( + context.Background(), + ctx, + dummyDBManager, + initialState, + nil, + nil, + nil, + nil, + nil, + ) require.NoError(err) // Initialize shouldn't be called again coreVM.InitializeF = nil - err = proVM.SetState(snow.NormalOp) + err = proVM.SetState(context.Background(), snow.NormalOp) require.NoError(err) - err = proVM.SetPreference(coreGenBlk.IDV) + err = proVM.SetPreference(context.Background(), coreGenBlk.IDV) require.NoError(err) ctx.Lock.Lock() - for proVM.VerifyHeightIndex() != nil { + for proVM.VerifyHeightIndex(context.Background()) != nil { ctx.Lock.Unlock() time.Sleep(time.Millisecond) ctx.Lock.Lock() @@ -1869,12 +2049,14 @@ func TestRejectedHeightNotIndexed(t *testing.T) { TimestampV: coreGenBlk.Timestamp(), } - coreVM.BuildBlockF = func() (snowman.Block, error) { return xBlock, nil } - aBlock, err := proVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return xBlock, nil + } + aBlock, err := proVM.BuildBlock(context.Background()) require.NoError(err) coreVM.BuildBlockF = nil - err = aBlock.Verify() + err = aBlock.Verify(context.Background()) require.NoError(err) // use a different way to construct inner block Y and outer block B @@ -1889,7 +2071,7 @@ func TestRejectedHeightNotIndexed(t *testing.T) { TimestampV: coreGenBlk.Timestamp(), } - ySlb, err := statelessblock.BuildUnsignedApricot( + ySlb, err := statelessblock.BuildUnsigned( coreGenBlk.ID(), coreGenBlk.Timestamp(), defaultPChainHeight, @@ -1906,23 +2088,23 @@ func TestRejectedHeightNotIndexed(t *testing.T) { }, } - err = bBlock.Verify() + err = bBlock.Verify(context.Background()) require.NoError(err) // accept A - err = aBlock.Accept() + err = aBlock.Accept(context.Background()) require.NoError(err) coreHeights = append(coreHeights, xBlock.ID()) - blkID, err := proVM.GetBlockIDAtHeight(aBlock.Height()) + blkID, err := proVM.GetBlockIDAtHeight(context.Background(), aBlock.Height()) require.NoError(err) require.Equal(aBlock.ID(), blkID) // reject B - err = bBlock.Reject() + err = bBlock.Reject(context.Background()) require.NoError(err) - blkID, err = proVM.GetBlockIDAtHeight(aBlock.Height()) + blkID, err = proVM.GetBlockIDAtHeight(context.Background(), aBlock.Height()) require.NoError(err) require.Equal(aBlock.ID(), blkID) } @@ -1955,25 +2137,29 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { }, }, TestHeightIndexedVM: block.TestHeightIndexedVM{ - T: t, - VerifyHeightIndexF: func() error { return nil }, - GetBlockIDAtHeightF: func(height uint64) (ids.ID, error) { + T: t, + VerifyHeightIndexF: func(context.Context) error { + return nil + }, + GetBlockIDAtHeightF: func(_ context.Context, height uint64) (ids.ID, error) { if height >= uint64(len(coreHeights)) { - return ids.ID{}, errors.New("too high") + return ids.ID{}, errTooHigh } return coreHeights[height], nil }, }, } - coreVM.InitializeF = func(*snow.Context, manager.Manager, + coreVM.InitializeF = func(context.Context, *snow.Context, manager.Manager, []byte, []byte, []byte, chan<- common.Message, []*common.Fx, common.AppSender, ) error { return nil } - coreVM.LastAcceptedF = func() (ids.ID, error) { return coreGenBlk.ID(), nil } - coreVM.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + coreVM.LastAcceptedF = func(context.Context) (ids.ID, error) { + return coreGenBlk.ID(), nil + } + coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch { case blkID == coreGenBlk.ID(): return coreGenBlk, nil @@ -1981,7 +2167,7 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { return nil, errUnknownBlock } } - coreVM.ParseBlockF = func(b []byte) (snowman.Block, error) { + coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { case bytes.Equal(b, coreGenBlk.Bytes()): return coreGenBlk, nil @@ -1990,45 +2176,76 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { } } - proVM := New(coreVM, time.Time{}, 0, time.Time{}) + proVM := New( + coreVM, + time.Time{}, + 0, + DefaultMinBlockDelay, + pTestCert.PrivateKey.(crypto.Signer), + pTestCert.Leaf, + ) valState := &validators.TestState{ T: t, } - valState.GetMinimumHeightF = func() (uint64, error) { return coreGenBlk.HeightV, nil } - valState.GetCurrentHeightF = func() (uint64, error) { return defaultPChainHeight, nil } - valState.GetValidatorSetF = func(height uint64, subnetID ids.ID) (map[ids.NodeID]uint64, error) { - res := make(map[ids.NodeID]uint64) - res[proVM.ctx.NodeID] = uint64(10) - res[ids.NodeID{1}] = uint64(5) - res[ids.NodeID{2}] = uint64(6) - res[ids.NodeID{3}] = uint64(7) - return res, nil + valState.GetMinimumHeightF = func(context.Context) (uint64, error) { + return coreGenBlk.HeightV, nil + } + valState.GetCurrentHeightF = func(context.Context) (uint64, error) { + return defaultPChainHeight, nil + } + valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return map[ids.NodeID]*validators.GetValidatorOutput{ + proVM.ctx.NodeID: { + NodeID: proVM.ctx.NodeID, + Weight: 10, + }, + {1}: { + NodeID: ids.NodeID{1}, + Weight: 5, + }, + {2}: { + NodeID: ids.NodeID{2}, + Weight: 6, + }, + {3}: { + NodeID: ids.NodeID{3}, + Weight: 7, + }, + }, nil } ctx := snow.DefaultContextTest() ctx.NodeID = ids.NodeIDFromCert(pTestCert.Leaf) - ctx.StakingCertLeaf = pTestCert.Leaf - ctx.StakingLeafSigner = pTestCert.PrivateKey.(crypto.Signer) ctx.ValidatorState = valState dummyDBManager := manager.NewMemDB(version.Semantic1_0_0) // make sure that DBs are compressed correctly dummyDBManager = dummyDBManager.NewPrefixDBManager([]byte{}) - err := proVM.Initialize(ctx, dummyDBManager, initialState, nil, nil, nil, nil, nil) + err := proVM.Initialize( + context.Background(), + ctx, + dummyDBManager, + initialState, + nil, + nil, + nil, + nil, + nil, + ) require.NoError(err) // Initialize shouldn't be called again coreVM.InitializeF = nil - err = proVM.SetState(snow.NormalOp) + err = proVM.SetState(context.Background(), snow.NormalOp) require.NoError(err) - err = proVM.SetPreference(coreGenBlk.IDV) + err = proVM.SetPreference(context.Background(), coreGenBlk.IDV) require.NoError(err) ctx.Lock.Lock() - for proVM.VerifyHeightIndex() != nil { + for proVM.VerifyHeightIndex(context.Background()) != nil { ctx.Lock.Unlock() time.Sleep(time.Millisecond) ctx.Lock.Lock() @@ -2068,50 +2285,52 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { }, } - coreVM.BuildBlockF = func() (snowman.Block, error) { return xBlock, nil } - aBlockIntf, err := proVM.BuildBlock() + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return xBlock, nil + } + aBlockIntf, err := proVM.BuildBlock(context.Background()) require.NoError(err) aBlock, ok := aBlockIntf.(*postForkBlock) require.True(ok) - opts, err := aBlock.Options() + opts, err := aBlock.Options(context.Background()) require.NoError(err) - err = aBlock.Verify() + err = aBlock.Verify(context.Background()) require.NoError(err) bBlock := opts[0] - err = bBlock.Verify() + err = bBlock.Verify(context.Background()) require.NoError(err) cBlock := opts[1] - err = cBlock.Verify() + err = cBlock.Verify(context.Background()) require.NoError(err) // accept A - err = aBlock.Accept() + err = aBlock.Accept(context.Background()) require.NoError(err) coreHeights = append(coreHeights, xBlock.ID()) - blkID, err := proVM.GetBlockIDAtHeight(aBlock.Height()) + blkID, err := proVM.GetBlockIDAtHeight(context.Background(), aBlock.Height()) require.NoError(err) require.Equal(aBlock.ID(), blkID) // accept B - err = bBlock.Accept() + err = bBlock.Accept(context.Background()) require.NoError(err) coreHeights = append(coreHeights, xBlock.opts[0].ID()) - blkID, err = proVM.GetBlockIDAtHeight(bBlock.Height()) + blkID, err = proVM.GetBlockIDAtHeight(context.Background(), bBlock.Height()) require.NoError(err) require.Equal(bBlock.ID(), blkID) // reject C - err = cBlock.Reject() + err = cBlock.Reject(context.Background()) require.NoError(err) - blkID, err = proVM.GetBlockIDAtHeight(cBlock.Height()) + blkID, err = proVM.GetBlockIDAtHeight(context.Background(), cBlock.Height()) require.NoError(err) require.Equal(bBlock.ID(), blkID) } @@ -2127,7 +2346,9 @@ func TestVMInnerBlkCache(t *testing.T) { innerVM, time.Time{}, // fork is active 0, // minimum P-Chain height - time.Time{}, // fork is active + DefaultMinBlockDelay, + pTestCert.PrivateKey.(crypto.Signer), + pTestCert.Leaf, ) dummyDBManager := manager.NewMemDB(version.Semantic1_0_0) @@ -2143,14 +2364,14 @@ func TestVMInnerBlkCache(t *testing.T) { gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), ).Return(nil) ctx := snow.DefaultContextTest() ctx.NodeID = ids.NodeIDFromCert(pTestCert.Leaf) - ctx.StakingCertLeaf = pTestCert.Leaf - ctx.StakingLeafSigner = pTestCert.PrivateKey.(crypto.Signer) err := vm.Initialize( + context.Background(), ctx, dummyDBManager, nil, @@ -2166,14 +2387,14 @@ func TestVMInnerBlkCache(t *testing.T) { // Create a block near the tip (0). blkNearTipInnerBytes := []byte{1} - blkNearTip, err := statelessblock.BuildBanff( - ids.GenerateTestID(), // parent - time.Time{}, // timestamp - 1, // pChainHeight, - vm.ctx.StakingCertLeaf, // cert - blkNearTipInnerBytes, // inner blk bytes - vm.ctx.ChainID, // chain ID - vm.ctx.StakingLeafSigner, // key + blkNearTip, err := statelessblock.Build( + ids.GenerateTestID(), // parent + time.Time{}, // timestamp + 1, // pChainHeight, + vm.stakingCertLeaf, // cert + blkNearTipInnerBytes, // inner blk bytes + vm.ctx.ChainID, // chain ID + vm.stakingLeafSigner, // key ) require.NoError(err) @@ -2183,8 +2404,8 @@ func TestVMInnerBlkCache(t *testing.T) { // We will ask the inner VM to parse. mockInnerBlkNearTip := snowman.NewMockBlock(ctrl) mockInnerBlkNearTip.EXPECT().Height().Return(uint64(1)).Times(2) - innerVM.EXPECT().ParseBlock(blkNearTipInnerBytes).Return(mockInnerBlkNearTip, nil).Times(2) - _, err = vm.ParseBlock(blkNearTip.Bytes()) + innerVM.EXPECT().ParseBlock(gomock.Any(), blkNearTipInnerBytes).Return(mockInnerBlkNearTip, nil).Times(2) + _, err = vm.ParseBlock(context.Background(), blkNearTip.Bytes()) require.NoError(err) // Block should now be in cache because it's a post-fork block @@ -2192,7 +2413,7 @@ func TestVMInnerBlkCache(t *testing.T) { gotBlk, ok := vm.innerBlkCache.Get(blkNearTip.ID()) require.True(ok) require.Equal(mockInnerBlkNearTip, gotBlk) - require.Equal(uint64(0), vm.lastAcceptedHeight) + require.EqualValues(0, vm.lastAcceptedHeight) // Clear the cache vm.innerBlkCache.Flush() @@ -2202,9 +2423,236 @@ func TestVMInnerBlkCache(t *testing.T) { // Parse the block again. This time it shouldn't be cached // because it's not close to the tip. - _, err = vm.ParseBlock(blkNearTip.Bytes()) + _, err = vm.ParseBlock(context.Background(), blkNearTip.Bytes()) require.NoError(err) _, ok = vm.innerBlkCache.Get(blkNearTip.ID()) require.False(ok) } + +func TestVMInnerBlkCacheDeduplicationRegression(t *testing.T) { + require := require.New(t) + forkTime := time.Unix(0, 0) + coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, forkTime, 0) + + // create pre-fork block X and post-fork block A + xBlock := &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: ids.GenerateTestID(), + StatusV: choices.Processing, + }, + BytesV: []byte{1}, + ParentV: gBlock.ID(), + HeightV: gBlock.Height() + 1, + TimestampV: gBlock.Timestamp(), + } + + coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { + return xBlock, nil + } + aBlock, err := proVM.BuildBlock(context.Background()) + require.NoError(err) + coreVM.BuildBlockF = nil + + bStatelessBlock, err := statelessblock.BuildUnsigned( + gBlock.ID(), + gBlock.Timestamp(), + defaultPChainHeight, + xBlock.Bytes(), + ) + require.NoError(err) + + xBlockCopy := &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: xBlock.IDV, + StatusV: choices.Processing, + }, + BytesV: []byte{1}, + ParentV: gBlock.ID(), + HeightV: gBlock.Height() + 1, + TimestampV: gBlock.Timestamp(), + } + coreVM.ParseBlockF = func(context.Context, []byte) (snowman.Block, error) { + return xBlockCopy, nil + } + + bBlockBytes := bStatelessBlock.Bytes() + bBlock, err := proVM.ParseBlock(context.Background(), bBlockBytes) + require.NoError(err) + + err = aBlock.Verify(context.Background()) + require.NoError(err) + + err = bBlock.Verify(context.Background()) + require.NoError(err) + + err = aBlock.Accept(context.Background()) + require.NoError(err) + + err = bBlock.Reject(context.Background()) + require.NoError(err) + + require.Equal( + choices.Accepted, + aBlock.(*postForkBlock).innerBlk.Status(), + ) + + require.Equal( + choices.Accepted, + bBlock.(*postForkBlock).innerBlk.Status(), + ) + + cachedXBlock, ok := proVM.innerBlkCache.Get(bBlock.ID()) + require.True(ok) + require.Equal( + choices.Accepted, + cachedXBlock.Status(), + ) +} + +type blockWithVerifyContext struct { + *snowman.MockBlock + *mocks.MockWithVerifyContext +} + +// Ensures that we call [VerifyWithContext] rather than [Verify] on blocks that +// implement [block.WithVerifyContext] and that returns true for +// [ShouldVerifyWithContext]. +func TestVM_VerifyBlockWithContext(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Create a VM + innerVM := mocks.NewMockChainVM(ctrl) + vm := New( + innerVM, + time.Time{}, // fork is active + 0, // minimum P-Chain height + DefaultMinBlockDelay, + pTestCert.PrivateKey.(crypto.Signer), + pTestCert.Leaf, + ) + + dummyDBManager := manager.NewMemDB(version.Semantic1_0_0) + // make sure that DBs are compressed correctly + dummyDBManager = dummyDBManager.NewPrefixDBManager([]byte{}) + + innerVM.EXPECT().Initialize( + gomock.Any(), + gomock.Any(), + gomock.Any(), + gomock.Any(), + gomock.Any(), + gomock.Any(), + gomock.Any(), + gomock.Any(), + gomock.Any(), + ).Return(nil) + + snowCtx := snow.DefaultContextTest() + snowCtx.NodeID = ids.NodeIDFromCert(pTestCert.Leaf) + + err := vm.Initialize( + context.Background(), + snowCtx, + dummyDBManager, + nil, + nil, + nil, + nil, + nil, + nil, + ) + require.NoError(err) + + { + pChainHeight := uint64(0) + innerBlk := blockWithVerifyContext{ + MockBlock: snowman.NewMockBlock(ctrl), + MockWithVerifyContext: mocks.NewMockWithVerifyContext(ctrl), + } + innerBlk.MockWithVerifyContext.EXPECT().ShouldVerifyWithContext(gomock.Any()).Return(true, nil).Times(2) + innerBlk.MockWithVerifyContext.EXPECT().VerifyWithContext(context.Background(), + &block.Context{ + PChainHeight: pChainHeight, + }, + ).Return(nil) + innerBlk.MockBlock.EXPECT().Parent().Return(ids.GenerateTestID()).AnyTimes() + innerBlk.MockBlock.EXPECT().ID().Return(ids.GenerateTestID()).AnyTimes() + + blk := NewMockPostForkBlock(ctrl) + blk.EXPECT().getInnerBlk().Return(innerBlk).AnyTimes() + blkID := ids.GenerateTestID() + blk.EXPECT().ID().Return(blkID).AnyTimes() + + err = vm.verifyAndRecordInnerBlk( + context.Background(), + &block.Context{ + PChainHeight: pChainHeight, + }, + blk, + ) + require.NoError(err) + + // Call VerifyWithContext again but with a different P-Chain height + blk.EXPECT().setInnerBlk(innerBlk).AnyTimes() + pChainHeight++ + innerBlk.MockWithVerifyContext.EXPECT().VerifyWithContext(context.Background(), + &block.Context{ + PChainHeight: pChainHeight, + }, + ).Return(nil) + + err = vm.verifyAndRecordInnerBlk( + context.Background(), + &block.Context{ + PChainHeight: pChainHeight, + }, + blk, + ) + require.NoError(err) + } + + { + // Ensure we call Verify on a block that returns + // false for ShouldVerifyWithContext + innerBlk := blockWithVerifyContext{ + MockBlock: snowman.NewMockBlock(ctrl), + MockWithVerifyContext: mocks.NewMockWithVerifyContext(ctrl), + } + innerBlk.MockWithVerifyContext.EXPECT().ShouldVerifyWithContext(gomock.Any()).Return(false, nil) + innerBlk.MockBlock.EXPECT().Verify(gomock.Any()).Return(nil) + innerBlk.MockBlock.EXPECT().Parent().Return(ids.GenerateTestID()).AnyTimes() + innerBlk.MockBlock.EXPECT().ID().Return(ids.GenerateTestID()).AnyTimes() + blk := NewMockPostForkBlock(ctrl) + blk.EXPECT().getInnerBlk().Return(innerBlk).AnyTimes() + blkID := ids.GenerateTestID() + blk.EXPECT().ID().Return(blkID).AnyTimes() + err = vm.verifyAndRecordInnerBlk( + context.Background(), + &block.Context{ + PChainHeight: 1, + }, + blk, + ) + require.NoError(err) + } + + { + // Ensure we call Verify on a block that doesn't have a valid context + innerBlk := blockWithVerifyContext{ + MockBlock: snowman.NewMockBlock(ctrl), + MockWithVerifyContext: mocks.NewMockWithVerifyContext(ctrl), + } + innerBlk.MockBlock.EXPECT().Verify(gomock.Any()).Return(nil) + innerBlk.MockBlock.EXPECT().Parent().Return(ids.GenerateTestID()).AnyTimes() + innerBlk.MockBlock.EXPECT().ID().Return(ids.GenerateTestID()).AnyTimes() + blk := NewMockPostForkBlock(ctrl) + blk.EXPECT().getInnerBlk().Return(innerBlk).AnyTimes() + blkID := ids.GenerateTestID() + blk.EXPECT().ID().Return(blkID).AnyTimes() + err = vm.verifyAndRecordInnerBlk(context.Background(), nil, blk) + require.NoError(err) + } +} diff --git a/avalanchego/vms/registry/mock_vm_getter.go b/avalanchego/vms/registry/mock_vm_getter.go index f8affd2d..c6b4e1dd 100644 --- a/avalanchego/vms/registry/mock_vm_getter.go +++ b/avalanchego/vms/registry/mock_vm_getter.go @@ -1,8 +1,8 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // Code generated by MockGen. DO NOT EDIT. -// Source: vms/registry/vm_getter.go +// Source: github.com/ava-labs/avalanchego/vms/registry (interfaces: VMGetter) // Package registry is a generated GoMock package. package registry diff --git a/avalanchego/vms/registry/mock_vm_registerer.go b/avalanchego/vms/registry/mock_vm_registerer.go index 2d88894e..f068d77a 100644 --- a/avalanchego/vms/registry/mock_vm_registerer.go +++ b/avalanchego/vms/registry/mock_vm_registerer.go @@ -1,13 +1,14 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // Code generated by MockGen. DO NOT EDIT. -// Source: vms/registry/vm_registerer.go +// Source: github.com/ava-labs/avalanchego/vms/registry (interfaces: VMRegisterer) -// Package mock_registry is a generated GoMock package. +// Package registry is a generated GoMock package. package registry import ( + context "context" reflect "reflect" ids "github.com/ava-labs/avalanchego/ids" @@ -39,66 +40,29 @@ func (m *MockVMRegisterer) EXPECT() *MockVMRegistererMockRecorder { } // Register mocks base method. -func (m *MockVMRegisterer) Register(arg0 ids.ID, arg1 vms.Factory) error { +func (m *MockVMRegisterer) Register(arg0 context.Context, arg1 ids.ID, arg2 vms.Factory) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Register", arg0, arg1) + ret := m.ctrl.Call(m, "Register", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // Register indicates an expected call of Register. -func (mr *MockVMRegistererMockRecorder) Register(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockVMRegistererMockRecorder) Register(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Register", reflect.TypeOf((*MockVMRegisterer)(nil).Register), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Register", reflect.TypeOf((*MockVMRegisterer)(nil).Register), arg0, arg1, arg2) } // RegisterWithReadLock mocks base method. -func (m *MockVMRegisterer) RegisterWithReadLock(arg0 ids.ID, arg1 vms.Factory) error { +func (m *MockVMRegisterer) RegisterWithReadLock(arg0 context.Context, arg1 ids.ID, arg2 vms.Factory) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RegisterWithReadLock", arg0, arg1) + ret := m.ctrl.Call(m, "RegisterWithReadLock", arg0, arg1, arg2) ret0, _ := ret[0].(error) return ret0 } // RegisterWithReadLock indicates an expected call of RegisterWithReadLock. -func (mr *MockVMRegistererMockRecorder) RegisterWithReadLock(arg0, arg1 interface{}) *gomock.Call { +func (mr *MockVMRegistererMockRecorder) RegisterWithReadLock(arg0, arg1, arg2 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterWithReadLock", reflect.TypeOf((*MockVMRegisterer)(nil).RegisterWithReadLock), arg0, arg1) -} - -// Mockregisterer is a mock of registerer interface. -type Mockregisterer struct { - ctrl *gomock.Controller - recorder *MockregistererMockRecorder -} - -// MockregistererMockRecorder is the mock recorder for Mockregisterer. -type MockregistererMockRecorder struct { - mock *Mockregisterer -} - -// NewMockregisterer creates a new mock instance. -func NewMockregisterer(ctrl *gomock.Controller) *Mockregisterer { - mock := &Mockregisterer{ctrl: ctrl} - mock.recorder = &MockregistererMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *Mockregisterer) EXPECT() *MockregistererMockRecorder { - return m.recorder -} - -// Register mocks base method. -func (m *Mockregisterer) Register(arg0 ids.ID, arg1 vms.Factory) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Register", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// Register indicates an expected call of Register. -func (mr *MockregistererMockRecorder) Register(arg0, arg1 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Register", reflect.TypeOf((*Mockregisterer)(nil).Register), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterWithReadLock", reflect.TypeOf((*MockVMRegisterer)(nil).RegisterWithReadLock), arg0, arg1, arg2) } diff --git a/avalanchego/vms/registry/mock_vm_registry.go b/avalanchego/vms/registry/mock_vm_registry.go index 50b1009a..6d6dd047 100644 --- a/avalanchego/vms/registry/mock_vm_registry.go +++ b/avalanchego/vms/registry/mock_vm_registry.go @@ -1,13 +1,14 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. // Code generated by MockGen. DO NOT EDIT. -// Source: vms/registry/vm_registry.go +// Source: github.com/ava-labs/avalanchego/vms/registry (interfaces: VMRegistry) // Package registry is a generated GoMock package. package registry import ( + context "context" reflect "reflect" ids "github.com/ava-labs/avalanchego/ids" @@ -38,9 +39,9 @@ func (m *MockVMRegistry) EXPECT() *MockVMRegistryMockRecorder { } // Reload mocks base method. -func (m *MockVMRegistry) Reload() ([]ids.ID, map[ids.ID]error, error) { +func (m *MockVMRegistry) Reload(arg0 context.Context) ([]ids.ID, map[ids.ID]error, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Reload") + ret := m.ctrl.Call(m, "Reload", arg0) ret0, _ := ret[0].([]ids.ID) ret1, _ := ret[1].(map[ids.ID]error) ret2, _ := ret[2].(error) @@ -48,15 +49,15 @@ func (m *MockVMRegistry) Reload() ([]ids.ID, map[ids.ID]error, error) { } // Reload indicates an expected call of Reload. -func (mr *MockVMRegistryMockRecorder) Reload() *gomock.Call { +func (mr *MockVMRegistryMockRecorder) Reload(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reload", reflect.TypeOf((*MockVMRegistry)(nil).Reload)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reload", reflect.TypeOf((*MockVMRegistry)(nil).Reload), arg0) } // ReloadWithReadLock mocks base method. -func (m *MockVMRegistry) ReloadWithReadLock() ([]ids.ID, map[ids.ID]error, error) { +func (m *MockVMRegistry) ReloadWithReadLock(arg0 context.Context) ([]ids.ID, map[ids.ID]error, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ReloadWithReadLock") + ret := m.ctrl.Call(m, "ReloadWithReadLock", arg0) ret0, _ := ret[0].([]ids.ID) ret1, _ := ret[1].(map[ids.ID]error) ret2, _ := ret[2].(error) @@ -64,7 +65,7 @@ func (m *MockVMRegistry) ReloadWithReadLock() ([]ids.ID, map[ids.ID]error, error } // ReloadWithReadLock indicates an expected call of ReloadWithReadLock. -func (mr *MockVMRegistryMockRecorder) ReloadWithReadLock() *gomock.Call { +func (mr *MockVMRegistryMockRecorder) ReloadWithReadLock(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReloadWithReadLock", reflect.TypeOf((*MockVMRegistry)(nil).ReloadWithReadLock)) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReloadWithReadLock", reflect.TypeOf((*MockVMRegistry)(nil).ReloadWithReadLock), arg0) } diff --git a/avalanchego/vms/registry/vm_getter.go b/avalanchego/vms/registry/vm_getter.go index 18601135..5115af9e 100644 --- a/avalanchego/vms/registry/vm_getter.go +++ b/avalanchego/vms/registry/vm_getter.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package registry @@ -13,10 +13,11 @@ import ( "github.com/ava-labs/avalanchego/utils/resource" "github.com/ava-labs/avalanchego/vms" "github.com/ava-labs/avalanchego/vms/rpcchainvm" + "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" ) var ( - _ VMGetter = &vmGetter{} + _ VMGetter = (*vmGetter)(nil) errInvalidVMID = errors.New("invalid vmID") ) @@ -38,6 +39,7 @@ type VMGetterConfig struct { Manager vms.Manager PluginDirectory string CPUTracker resource.ProcessTracker + RuntimeTracker runtime.Tracker } type vmGetter struct { @@ -100,6 +102,7 @@ func (getter *vmGetter) Get() (map[ids.ID]vms.Factory, map[ids.ID]vms.Factory, e unregisteredVMs[vmID] = rpcchainvm.NewFactory( filepath.Join(getter.config.PluginDirectory, file.Name()), getter.config.CPUTracker, + getter.config.RuntimeTracker, ) } return registeredVMs, unregisteredVMs, nil diff --git a/avalanchego/vms/registry/vm_getter_test.go b/avalanchego/vms/registry/vm_getter_test.go index 4322aa33..ce659cf4 100644 --- a/avalanchego/vms/registry/vm_getter_test.go +++ b/avalanchego/vms/registry/vm_getter_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package registry @@ -23,7 +23,7 @@ var ( pluginDir = "plugin getter" // errors - errOops = errors.New("oops") + errTest = errors.New("non-nil error") // vm names registeredVMName = "mgj786NP7uDwBCcq6YwThhaN8FLyybkCa4zBWTQbNgmK6k9A6" @@ -66,10 +66,10 @@ func TestGet_ReadDirFails(t *testing.T) { defer resources.ctrl.Finish() // disk read fails - resources.mockReader.EXPECT().ReadDir(pluginDir).Times(1).Return(nil, errOops) + resources.mockReader.EXPECT().ReadDir(pluginDir).Times(1).Return(nil, errTest) _, _, err := resources.getter.Get() - require.Equal(t, errOops, err) + require.ErrorIs(t, err, errTest) } // Get should fail if we see an invalid VM id @@ -79,7 +79,7 @@ func TestGet_InvalidVMName(t *testing.T) { resources.mockReader.EXPECT().ReadDir(pluginDir).Times(1).Return(invalidVMs, nil) // didn't find an alias, so we'll try using this invalid vm name - resources.mockManager.EXPECT().Lookup("invalid-vm").Times(1).Return(ids.Empty, errOops) + resources.mockManager.EXPECT().Lookup("invalid-vm").Times(1).Return(ids.Empty, errTest) _, _, err := resources.getter.Get() require.ErrorIs(t, err, errInvalidVMID) @@ -95,10 +95,10 @@ func TestGet_GetFactoryFails(t *testing.T) { resources.mockReader.EXPECT().ReadDir(pluginDir).Times(1).Return(oneValidVM, nil) resources.mockManager.EXPECT().Lookup(registeredVMName).Times(1).Return(vm, nil) // Getting the factory fails - resources.mockManager.EXPECT().GetFactory(vm).Times(1).Return(nil, errOops) + resources.mockManager.EXPECT().GetFactory(vm).Times(1).Return(nil, errTest) _, _, err := resources.getter.Get() - require.Equal(t, errOops, err) + require.ErrorIs(t, err, errTest) } // Get should return the correct registered and unregistered VMs. diff --git a/avalanchego/vms/registry/vm_registerer.go b/avalanchego/vms/registry/vm_registerer.go index 40d373e3..6a6fdd85 100644 --- a/avalanchego/vms/registry/vm_registerer.go +++ b/avalanchego/vms/registry/vm_registerer.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package registry import ( + "context" "fmt" "path" "sync" @@ -18,26 +19,27 @@ import ( "github.com/ava-labs/avalanchego/vms" ) -var _ VMRegisterer = &vmRegisterer{} +var _ VMRegisterer = (*vmRegisterer)(nil) // VMRegisterer defines functionality to install a virtual machine. type VMRegisterer interface { registerer // RegisterWithReadLock installs the VM assuming that the http read-lock is // held. - RegisterWithReadLock(ids.ID, vms.Factory) error + RegisterWithReadLock(context.Context, ids.ID, vms.Factory) error } type registerer interface { // Register installs the VM. - Register(ids.ID, vms.Factory) error + Register(context.Context, ids.ID, vms.Factory) error } // VMRegistererConfig configures settings for VMRegisterer. type VMRegistererConfig struct { - APIServer server.Server - Log logging.Logger - VMManager vms.Manager + APIServer server.Server + Log logging.Logger + VMFactoryLog logging.Logger + VMManager vms.Manager } type vmRegisterer struct { @@ -51,19 +53,19 @@ func NewVMRegisterer(config VMRegistererConfig) VMRegisterer { } } -func (r *vmRegisterer) Register(vmID ids.ID, factory vms.Factory) error { - return r.register(r.config.APIServer, vmID, factory) +func (r *vmRegisterer) Register(ctx context.Context, vmID ids.ID, factory vms.Factory) error { + return r.register(ctx, r.config.APIServer, vmID, factory) } -func (r *vmRegisterer) RegisterWithReadLock(vmID ids.ID, factory vms.Factory) error { - return r.register(server.PathWriterFromWithReadLock(r.config.APIServer), vmID, factory) +func (r *vmRegisterer) RegisterWithReadLock(ctx context.Context, vmID ids.ID, factory vms.Factory) error { + return r.register(ctx, server.PathWriterFromWithReadLock(r.config.APIServer), vmID, factory) } -func (r *vmRegisterer) register(pathAdder server.PathAdder, vmID ids.ID, factory vms.Factory) error { - if err := r.config.VMManager.RegisterFactory(vmID, factory); err != nil { +func (r *vmRegisterer) register(ctx context.Context, pathAdder server.PathAdder, vmID ids.ID, factory vms.Factory) error { + if err := r.config.VMManager.RegisterFactory(ctx, vmID, factory); err != nil { return err } - handlers, err := r.createStaticHandlers(vmID, factory) + handlers, err := r.createStaticHandlers(ctx, vmID, factory) if err != nil { return err } @@ -81,8 +83,14 @@ func (r *vmRegisterer) register(pathAdder server.PathAdder, vmID ids.ID, factory return pathAdder.AddAliases(defaultEndpoint, urlAliases...) } -func (r *vmRegisterer) createStaticHandlers(vmID ids.ID, factory vms.Factory) (map[string]*common.HTTPHandler, error) { - vm, err := factory.New(nil) +// Creates a dedicated VM instance for the sole purpose of serving the static +// handlers. +func (r *vmRegisterer) createStaticHandlers( + ctx context.Context, + vmID ids.ID, + factory vms.Factory, +) (map[string]*common.HTTPHandler, error) { + vm, err := factory.New(r.config.VMFactoryLog) if err != nil { return nil, err } @@ -92,14 +100,14 @@ func (r *vmRegisterer) createStaticHandlers(vmID ids.ID, factory vms.Factory) (m return nil, fmt.Errorf("%s doesn't implement VM", vmID) } - handlers, err := commonVM.CreateStaticHandlers() + handlers, err := commonVM.CreateStaticHandlers(ctx) if err != nil { r.config.Log.Error("failed to create static API endpoints", zap.Stringer("vmID", vmID), zap.Error(err), ) - if err := commonVM.Shutdown(); err != nil { + if err := commonVM.Shutdown(ctx); err != nil { return nil, fmt.Errorf("shutting down VM errored with: %w", err) } return nil, err @@ -118,7 +126,7 @@ func (r *vmRegisterer) createStaticEndpoints(pathAdder server.PathAdder, handler ) if err := pathAdder.AddRoute(service, lock, defaultEndpoint, extension); err != nil { return fmt.Errorf( - "failed to add static API endpoint %s%s: %s", + "failed to add static API endpoint %s%s: %w", defaultEndpoint, extension, err, @@ -148,6 +156,6 @@ type readRegisterer struct { registerer VMRegisterer } -func (r readRegisterer) Register(vmID ids.ID, factory vms.Factory) error { - return r.registerer.RegisterWithReadLock(vmID, factory) +func (r readRegisterer) Register(ctx context.Context, vmID ids.ID, factory vms.Factory) error { + return r.registerer.RegisterWithReadLock(ctx, vmID, factory) } diff --git a/avalanchego/vms/registry/vm_registerer_test.go b/avalanchego/vms/registry/vm_registerer_test.go index 1812a60a..e469277b 100644 --- a/avalanchego/vms/registry/vm_registerer_test.go +++ b/avalanchego/vms/registry/vm_registerer_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package registry import ( + "context" "path" "testing" @@ -30,9 +31,9 @@ func TestRegisterRegisterVMFails(t *testing.T) { vmFactory := vms.NewMockFactory(resources.ctrl) // We fail to register the VM - resources.mockManager.EXPECT().RegisterFactory(id, vmFactory).Times(1).Return(errOops) + resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(errTest) - require.Error(t, errOops, resources.registerer.Register(id, vmFactory)) + require.ErrorIs(t, resources.registerer.Register(context.Background(), id, vmFactory), errTest) } // Tests Register if a VM doesn't actually implement VM. @@ -43,11 +44,11 @@ func TestRegisterBadVM(t *testing.T) { vmFactory := vms.NewMockFactory(resources.ctrl) vm := "this is not a vm..." - resources.mockManager.EXPECT().RegisterFactory(id, vmFactory).Times(1).Return(nil) + resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) // Since this factory produces a bad vm, we should get an error. - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - require.Error(t, errOops, resources.registerer.Register(id, vmFactory)) + require.Error(t, resources.registerer.Register(context.Background(), id, vmFactory)) } // Tests Register if creating endpoints for a VM fails + shutdown fails @@ -58,13 +59,13 @@ func TestRegisterCreateHandlersAndShutdownFails(t *testing.T) { vmFactory := vms.NewMockFactory(resources.ctrl) vm := mocks.NewMockChainVM(resources.ctrl) - resources.mockManager.EXPECT().RegisterFactory(id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) + resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) // We fail to create handlers + fail to shutdown - vm.EXPECT().CreateStaticHandlers().Return(nil, errOops).Times(1) - vm.EXPECT().Shutdown().Return(errOops).Times(1) + vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(nil, errTest).Times(1) + vm.EXPECT().Shutdown(gomock.Any()).Return(errTest).Times(1) - require.Error(t, errOops, resources.registerer.Register(id, vmFactory)) + require.ErrorIs(t, resources.registerer.Register(context.Background(), id, vmFactory), errTest) } // Tests Register if creating endpoints for a VM fails + shutdown succeeds @@ -75,16 +76,16 @@ func TestRegisterCreateHandlersFails(t *testing.T) { vmFactory := vms.NewMockFactory(resources.ctrl) vm := mocks.NewMockChainVM(resources.ctrl) - resources.mockManager.EXPECT().RegisterFactory(id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) + resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) // We fail to create handlers + but succeed our shutdown - vm.EXPECT().CreateStaticHandlers().Return(nil, errOops).Times(1) - vm.EXPECT().Shutdown().Return(nil).Times(1) + vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(nil, errTest).Times(1) + vm.EXPECT().Shutdown(gomock.Any()).Return(nil).Times(1) - require.Error(t, errOops, resources.registerer.Register(id, vmFactory)) + require.ErrorIs(t, resources.registerer.Register(context.Background(), id, vmFactory), errTest) } -// Tests Register if we fail to regsiter the new endpoint on the server. +// Tests Register if we fail to register the new endpoint on the server. func TestRegisterAddRouteFails(t *testing.T) { resources := initRegistererTest(t) defer resources.ctrl.Finish() @@ -96,9 +97,9 @@ func TestRegisterAddRouteFails(t *testing.T) { "foo": {}, } - resources.mockManager.EXPECT().RegisterFactory(id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) - vm.EXPECT().CreateStaticHandlers().Return(handlers, nil).Times(1) + resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) + vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) // We fail to create an endpoint for the handler resources.mockServer.EXPECT(). AddRoute( @@ -108,9 +109,9 @@ func TestRegisterAddRouteFails(t *testing.T) { "foo", ). Times(1). - Return(errOops) + Return(errTest) - require.Error(t, errOops, resources.registerer.Register(id, vmFactory)) + require.ErrorIs(t, resources.registerer.Register(context.Background(), id, vmFactory), errTest) } // Tests Register we can't find the alias for the newly registered vm @@ -125,9 +126,9 @@ func TestRegisterAliasLookupFails(t *testing.T) { "foo": {}, } - resources.mockManager.EXPECT().RegisterFactory(id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) - vm.EXPECT().CreateStaticHandlers().Return(handlers, nil).Times(1) + resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) + vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) // Registering the route fails resources.mockServer.EXPECT(). AddRoute( @@ -138,9 +139,9 @@ func TestRegisterAliasLookupFails(t *testing.T) { ). Times(1). Return(nil) - resources.mockManager.EXPECT().Aliases(id).Times(1).Return(nil, errOops) + resources.mockManager.EXPECT().Aliases(id).Times(1).Return(nil, errTest) - require.Error(t, errOops, resources.registerer.Register(id, vmFactory)) + require.ErrorIs(t, resources.registerer.Register(context.Background(), id, vmFactory), errTest) } // Tests Register if adding aliases for the newly registered vm fails @@ -156,9 +157,9 @@ func TestRegisterAddAliasesFails(t *testing.T) { } aliases := []string{"alias-1", "alias-2"} - resources.mockManager.EXPECT().RegisterFactory(id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) - vm.EXPECT().CreateStaticHandlers().Return(handlers, nil).Times(1) + resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) + vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) resources.mockServer.EXPECT(). AddRoute( handlers["foo"], @@ -176,9 +177,9 @@ func TestRegisterAddAliasesFails(t *testing.T) { path.Join(constants.VMAliasPrefix, aliases[0]), path.Join(constants.VMAliasPrefix, aliases[1]), ). - Return(errOops) + Return(errTest) - require.Error(t, errOops, resources.registerer.Register(id, vmFactory)) + require.ErrorIs(t, resources.registerer.Register(context.Background(), id, vmFactory), errTest) } // Tests Register if no errors are thrown @@ -194,9 +195,9 @@ func TestRegisterHappyCase(t *testing.T) { } aliases := []string{"alias-1", "alias-2"} - resources.mockManager.EXPECT().RegisterFactory(id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) - vm.EXPECT().CreateStaticHandlers().Return(handlers, nil).Times(1) + resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) + vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) resources.mockServer.EXPECT(). AddRoute( handlers["foo"], @@ -216,7 +217,7 @@ func TestRegisterHappyCase(t *testing.T) { Times(1). Return(nil) - require.Nil(t, resources.registerer.Register(id, vmFactory)) + require.NoError(t, resources.registerer.Register(context.Background(), id, vmFactory)) } // RegisterWithReadLock should succeed even if we can't register a VM @@ -227,9 +228,9 @@ func TestRegisterWithReadLockRegisterVMFails(t *testing.T) { vmFactory := vms.NewMockFactory(resources.ctrl) // We fail to register the VM - resources.mockManager.EXPECT().RegisterFactory(id, vmFactory).Times(1).Return(errOops) + resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(errTest) - require.Error(t, errOops, resources.registerer.RegisterWithReadLock(id, vmFactory)) + require.ErrorIs(t, resources.registerer.RegisterWithReadLock(context.Background(), id, vmFactory), errTest) } // Tests RegisterWithReadLock if a VM doesn't actually implement VM. @@ -240,11 +241,11 @@ func TestRegisterWithReadLockBadVM(t *testing.T) { vmFactory := vms.NewMockFactory(resources.ctrl) vm := "this is not a vm..." - resources.mockManager.EXPECT().RegisterFactory(id, vmFactory).Times(1).Return(nil) + resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) // Since this factory produces a bad vm, we should get an error. - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) - require.Error(t, errOops, resources.registerer.RegisterWithReadLock(id, vmFactory)) + require.Error(t, resources.registerer.RegisterWithReadLock(context.Background(), id, vmFactory)) } // Tests RegisterWithReadLock if creating endpoints for a VM fails + shutdown fails @@ -255,13 +256,13 @@ func TestRegisterWithReadLockCreateHandlersAndShutdownFails(t *testing.T) { vmFactory := vms.NewMockFactory(resources.ctrl) vm := mocks.NewMockChainVM(resources.ctrl) - resources.mockManager.EXPECT().RegisterFactory(id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) + resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) // We fail to create handlers + fail to shutdown - vm.EXPECT().CreateStaticHandlers().Return(nil, errOops).Times(1) - vm.EXPECT().Shutdown().Return(errOops).Times(1) + vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(nil, errTest).Times(1) + vm.EXPECT().Shutdown(gomock.Any()).Return(errTest).Times(1) - require.Error(t, errOops, resources.registerer.RegisterWithReadLock(id, vmFactory)) + require.ErrorIs(t, resources.registerer.RegisterWithReadLock(context.Background(), id, vmFactory), errTest) } // Tests RegisterWithReadLock if creating endpoints for a VM fails + shutdown succeeds @@ -272,16 +273,16 @@ func TestRegisterWithReadLockCreateHandlersFails(t *testing.T) { vmFactory := vms.NewMockFactory(resources.ctrl) vm := mocks.NewMockChainVM(resources.ctrl) - resources.mockManager.EXPECT().RegisterFactory(id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) + resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) // We fail to create handlers + but succeed our shutdown - vm.EXPECT().CreateStaticHandlers().Return(nil, errOops).Times(1) - vm.EXPECT().Shutdown().Return(nil).Times(1) + vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(nil, errTest).Times(1) + vm.EXPECT().Shutdown(gomock.Any()).Return(nil).Times(1) - require.Error(t, errOops, resources.registerer.RegisterWithReadLock(id, vmFactory)) + require.ErrorIs(t, resources.registerer.RegisterWithReadLock(context.Background(), id, vmFactory), errTest) } -// Tests RegisterWithReadLock if we fail to regsiter the new endpoint on the server. +// Tests RegisterWithReadLock if we fail to register the new endpoint on the server. func TestRegisterWithReadLockAddRouteWithReadLockFails(t *testing.T) { resources := initRegistererTest(t) defer resources.ctrl.Finish() @@ -293,9 +294,9 @@ func TestRegisterWithReadLockAddRouteWithReadLockFails(t *testing.T) { "foo": {}, } - resources.mockManager.EXPECT().RegisterFactory(id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) - vm.EXPECT().CreateStaticHandlers().Return(handlers, nil).Times(1) + resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) + vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) // We fail to create an endpoint for the handler resources.mockServer.EXPECT(). AddRouteWithReadLock( @@ -305,9 +306,9 @@ func TestRegisterWithReadLockAddRouteWithReadLockFails(t *testing.T) { "foo", ). Times(1). - Return(errOops) + Return(errTest) - require.Error(t, errOops, resources.registerer.RegisterWithReadLock(id, vmFactory)) + require.ErrorIs(t, resources.registerer.RegisterWithReadLock(context.Background(), id, vmFactory), errTest) } // Tests RegisterWithReadLock we can't find the alias for the newly registered vm @@ -322,9 +323,9 @@ func TestRegisterWithReadLockAliasLookupFails(t *testing.T) { "foo": {}, } - resources.mockManager.EXPECT().RegisterFactory(id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) - vm.EXPECT().CreateStaticHandlers().Return(handlers, nil).Times(1) + resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) + vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) // RegisterWithReadLocking the route fails resources.mockServer.EXPECT(). AddRouteWithReadLock( @@ -335,9 +336,9 @@ func TestRegisterWithReadLockAliasLookupFails(t *testing.T) { ). Times(1). Return(nil) - resources.mockManager.EXPECT().Aliases(id).Times(1).Return(nil, errOops) + resources.mockManager.EXPECT().Aliases(id).Times(1).Return(nil, errTest) - require.Error(t, errOops, resources.registerer.RegisterWithReadLock(id, vmFactory)) + require.ErrorIs(t, resources.registerer.RegisterWithReadLock(context.Background(), id, vmFactory), errTest) } // Tests RegisterWithReadLock if adding aliases for the newly registered vm fails @@ -353,9 +354,9 @@ func TestRegisterWithReadLockAddAliasesFails(t *testing.T) { } aliases := []string{"alias-1", "alias-2"} - resources.mockManager.EXPECT().RegisterFactory(id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) - vm.EXPECT().CreateStaticHandlers().Return(handlers, nil).Times(1) + resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) + vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) resources.mockServer.EXPECT(). AddRouteWithReadLock( handlers["foo"], @@ -373,9 +374,9 @@ func TestRegisterWithReadLockAddAliasesFails(t *testing.T) { path.Join(constants.VMAliasPrefix, aliases[0]), path.Join(constants.VMAliasPrefix, aliases[1]), ). - Return(errOops) + Return(errTest) - require.Error(t, errOops, resources.registerer.RegisterWithReadLock(id, vmFactory)) + require.ErrorIs(t, resources.registerer.RegisterWithReadLock(context.Background(), id, vmFactory), errTest) } // Tests RegisterWithReadLock if no errors are thrown @@ -391,9 +392,9 @@ func TestRegisterWithReadLockHappyCase(t *testing.T) { } aliases := []string{"alias-1", "alias-2"} - resources.mockManager.EXPECT().RegisterFactory(id, vmFactory).Times(1).Return(nil) - vmFactory.EXPECT().New(nil).Times(1).Return(vm, nil) - vm.EXPECT().CreateStaticHandlers().Return(handlers, nil).Times(1) + resources.mockManager.EXPECT().RegisterFactory(gomock.Any(), id, vmFactory).Times(1).Return(nil) + vmFactory.EXPECT().New(logging.NoLog{}).Times(1).Return(vm, nil) + vm.EXPECT().CreateStaticHandlers(gomock.Any()).Return(handlers, nil).Times(1) resources.mockServer.EXPECT(). AddRouteWithReadLock( handlers["foo"], @@ -413,7 +414,7 @@ func TestRegisterWithReadLockHappyCase(t *testing.T) { Times(1). Return(nil) - require.Nil(t, resources.registerer.RegisterWithReadLock(id, vmFactory)) + require.NoError(t, resources.registerer.RegisterWithReadLock(context.Background(), id, vmFactory)) } type vmRegistererTestResources struct { @@ -432,9 +433,10 @@ func initRegistererTest(t *testing.T) *vmRegistererTestResources { mockLog := logging.NewMockLogger(ctrl) registerer := NewVMRegisterer(VMRegistererConfig{ - APIServer: mockServer, - Log: mockLog, - VMManager: mockManager, + APIServer: mockServer, + Log: mockLog, + VMFactoryLog: logging.NoLog{}, + VMManager: mockManager, }) mockLog.EXPECT().Error(gomock.Any(), gomock.Any()).AnyTimes() diff --git a/avalanchego/vms/registry/vm_registry.go b/avalanchego/vms/registry/vm_registry.go index ff54a362..dd6f96d4 100644 --- a/avalanchego/vms/registry/vm_registry.go +++ b/avalanchego/vms/registry/vm_registry.go @@ -1,20 +1,24 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package registry -import "github.com/ava-labs/avalanchego/ids" +import ( + "context" -var _ VMRegistry = &vmRegistry{} + "github.com/ava-labs/avalanchego/ids" +) + +var _ VMRegistry = (*vmRegistry)(nil) // VMRegistry defines functionality to get any new virtual machines on the node, // and install them if they're not already installed. type VMRegistry interface { // Reload installs all non-installed vms on the node. - Reload() ([]ids.ID, map[ids.ID]error, error) + Reload(ctx context.Context) ([]ids.ID, map[ids.ID]error, error) // ReloadWithReadLock installs all non-installed vms on the node assuming // the http read lock is currently held. - ReloadWithReadLock() ([]ids.ID, map[ids.ID]error, error) + ReloadWithReadLock(ctx context.Context) ([]ids.ID, map[ids.ID]error, error) } // VMRegistryConfig defines configurations for VMRegistry @@ -34,17 +38,17 @@ func NewVMRegistry(config VMRegistryConfig) VMRegistry { } } -func (r *vmRegistry) Reload() ([]ids.ID, map[ids.ID]error, error) { - return r.reload(r.config.VMRegisterer) +func (r *vmRegistry) Reload(ctx context.Context) ([]ids.ID, map[ids.ID]error, error) { + return r.reload(ctx, r.config.VMRegisterer) } -func (r *vmRegistry) ReloadWithReadLock() ([]ids.ID, map[ids.ID]error, error) { - return r.reload(readRegisterer{ +func (r *vmRegistry) ReloadWithReadLock(ctx context.Context) ([]ids.ID, map[ids.ID]error, error) { + return r.reload(ctx, readRegisterer{ registerer: r.config.VMRegisterer, }) } -func (r *vmRegistry) reload(registerer registerer) ([]ids.ID, map[ids.ID]error, error) { +func (r *vmRegistry) reload(ctx context.Context, registerer registerer) ([]ids.ID, map[ids.ID]error, error) { _, unregisteredVMs, err := r.config.VMGetter.Get() if err != nil { return nil, nil, err @@ -54,7 +58,7 @@ func (r *vmRegistry) reload(registerer registerer) ([]ids.ID, map[ids.ID]error, failedVMs := make(map[ids.ID]error) for vmID, factory := range unregisteredVMs { - if err := registerer.Register(vmID, factory); err != nil { + if err := registerer.Register(ctx, vmID, factory); err != nil { failedVMs[vmID] = err continue } diff --git a/avalanchego/vms/registry/vm_registry_test.go b/avalanchego/vms/registry/vm_registry_test.go index ffbe3ef9..fd8f096a 100644 --- a/avalanchego/vms/registry/vm_registry_test.go +++ b/avalanchego/vms/registry/vm_registry_test.go @@ -1,9 +1,10 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package registry import ( + "context" "testing" "github.com/golang/mock/gomock" @@ -46,18 +47,18 @@ func TestReload_Success(t *testing.T) { Times(1). Return(registeredVms, unregisteredVms, nil) resources.mockVMRegisterer.EXPECT(). - Register(id3, factory3). + Register(gomock.Any(), id3, factory3). Times(1). Return(nil) resources.mockVMRegisterer.EXPECT(). - Register(id4, factory4). + Register(gomock.Any(), id4, factory4). Times(1). Return(nil) - installedVMs, failedVMs, err := resources.vmRegistry.Reload() + installedVMs, failedVMs, err := resources.vmRegistry.Reload(context.Background()) require.ElementsMatch(t, []ids.ID{id3, id4}, installedVMs) require.Empty(t, failedVMs) - require.Nil(t, err) + require.NoError(t, err) } // Tests that we fail if we're not able to get the vms on disk @@ -65,12 +66,12 @@ func TestReload_GetNewVMsFails(t *testing.T) { resources := initVMRegistryTest(t) defer resources.ctrl.Finish() - resources.mockVMGetter.EXPECT().Get().Times(1).Return(nil, nil, errOops) + resources.mockVMGetter.EXPECT().Get().Times(1).Return(nil, nil, errTest) - installedVMs, failedVMs, err := resources.vmRegistry.Reload() - require.Nil(t, installedVMs) + installedVMs, failedVMs, err := resources.vmRegistry.Reload(context.Background()) + require.Empty(t, installedVMs) require.Empty(t, failedVMs) - require.Equal(t, err, errOops) + require.ErrorIs(t, err, errTest) } // Tests that if we fail to register a VM, we fail. @@ -98,21 +99,21 @@ func TestReload_PartialRegisterFailure(t *testing.T) { Times(1). Return(registeredVms, unregisteredVms, nil) resources.mockVMRegisterer.EXPECT(). - Register(id3, factory3). + Register(gomock.Any(), id3, factory3). Times(1). - Return(errOops) + Return(errTest) resources.mockVMRegisterer.EXPECT(). - Register(id4, factory4). + Register(gomock.Any(), id4, factory4). Times(1). Return(nil) - installedVMs, failedVMs, err := resources.vmRegistry.Reload() + installedVMs, failedVMs, err := resources.vmRegistry.Reload(context.Background()) require.Len(t, failedVMs, 1) - require.Equal(t, failedVMs[id3], errOops) + require.ErrorIs(t, failedVMs[id3], errTest) require.Len(t, installedVMs, 1) - require.Equal(t, installedVMs[0], id4) - require.Nil(t, err) + require.Equal(t, id4, installedVMs[0]) + require.NoError(t, err) } // Tests the happy case where Reload succeeds. @@ -140,18 +141,18 @@ func TestReloadWithReadLock_Success(t *testing.T) { Times(1). Return(registeredVms, unregisteredVms, nil) resources.mockVMRegisterer.EXPECT(). - RegisterWithReadLock(id3, factory3). + RegisterWithReadLock(gomock.Any(), id3, factory3). Times(1). Return(nil) resources.mockVMRegisterer.EXPECT(). - RegisterWithReadLock(id4, factory4). + RegisterWithReadLock(gomock.Any(), id4, factory4). Times(1). Return(nil) - installedVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock() + installedVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock(context.Background()) require.ElementsMatch(t, []ids.ID{id3, id4}, installedVMs) require.Empty(t, failedVMs) - require.Nil(t, err) + require.NoError(t, err) } // Tests that we fail if we're not able to get the vms on disk @@ -159,12 +160,12 @@ func TestReloadWithReadLock_GetNewVMsFails(t *testing.T) { resources := initVMRegistryTest(t) defer resources.ctrl.Finish() - resources.mockVMGetter.EXPECT().Get().Times(1).Return(nil, nil, errOops) + resources.mockVMGetter.EXPECT().Get().Times(1).Return(nil, nil, errTest) - installedVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock() - require.Nil(t, installedVMs) + installedVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock(context.Background()) + require.Empty(t, installedVMs) require.Empty(t, failedVMs) - require.Equal(t, err, errOops) + require.ErrorIs(t, err, errTest) } // Tests that if we fail to register a VM, we fail. @@ -192,21 +193,21 @@ func TestReloadWithReadLock_PartialRegisterFailure(t *testing.T) { Times(1). Return(registeredVms, unregisteredVms, nil) resources.mockVMRegisterer.EXPECT(). - RegisterWithReadLock(id3, factory3). + RegisterWithReadLock(gomock.Any(), id3, factory3). Times(1). - Return(errOops) + Return(errTest) resources.mockVMRegisterer.EXPECT(). - RegisterWithReadLock(id4, factory4). + RegisterWithReadLock(gomock.Any(), id4, factory4). Times(1). Return(nil) - installedVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock() + installedVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock(context.Background()) require.Len(t, failedVMs, 1) - require.Equal(t, failedVMs[id3], errOops) + require.ErrorIs(t, failedVMs[id3], errTest) require.Len(t, installedVMs, 1) - require.Equal(t, installedVMs[0], id4) - require.Nil(t, err) + require.Equal(t, id4, installedVMs[0]) + require.NoError(t, err) } type registryTestResources struct { diff --git a/avalanchego/vms/rpcchainvm/batched_vm_test.go b/avalanchego/vms/rpcchainvm/batched_vm_test.go new file mode 100644 index 00000000..cd554af9 --- /dev/null +++ b/avalanchego/vms/rpcchainvm/batched_vm_test.go @@ -0,0 +1,129 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package rpcchainvm + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" + "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/vms/components/chain" +) + +var ( + blkBytes1 = []byte{1} + blkBytes2 = []byte{2} + + blkID0 = ids.ID{0} + blkID1 = ids.ID{1} + blkID2 = ids.ID{2} + + status1 = choices.Accepted + status2 = choices.Processing + + time1 = time.Unix(1, 0) + time2 = time.Unix(2, 0) +) + +func batchedParseBlockCachingTestPlugin(t *testing.T, loadExpectations bool) (block.ChainVM, *gomock.Controller) { + // test key is "batchedParseBlockCachingTestKey" + + // create mock + ctrl := gomock.NewController(t) + vm := mocks.NewMockChainVM(ctrl) + + if loadExpectations { + blk1 := snowman.NewMockBlock(ctrl) + blk2 := snowman.NewMockBlock(ctrl) + gomock.InOrder( + // Initialize + vm.EXPECT().Initialize( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), + ).Return(nil).Times(1), + vm.EXPECT().LastAccepted(gomock.Any()).Return(preSummaryBlk.ID(), nil).Times(1), + vm.EXPECT().GetBlock(gomock.Any(), gomock.Any()).Return(preSummaryBlk, nil).Times(1), + + // Parse Block 1 + vm.EXPECT().ParseBlock(gomock.Any(), blkBytes1).Return(blk1, nil).Times(1), + blk1.EXPECT().ID().Return(blkID1).Times(1), + blk1.EXPECT().Parent().Return(blkID0).Times(1), + blk1.EXPECT().Status().Return(status1).Times(1), + blk1.EXPECT().Height().Return(uint64(1)).Times(1), + blk1.EXPECT().Timestamp().Return(time1).Times(1), + + // Parse Block 2 + vm.EXPECT().ParseBlock(gomock.Any(), blkBytes2).Return(blk2, nil).Times(1), + blk2.EXPECT().ID().Return(blkID2).Times(1), + blk2.EXPECT().Parent().Return(blkID1).Times(1), + blk2.EXPECT().Status().Return(status2).Times(1), + blk2.EXPECT().Height().Return(uint64(2)).Times(1), + blk2.EXPECT().Timestamp().Return(time2).Times(1), + ) + } + + return vm, ctrl +} + +func TestBatchedParseBlockCaching(t *testing.T) { + require := require.New(t) + testKey := batchedParseBlockCachingTestKey + + // Create and start the plugin + vm, stopper := buildClientHelper(require, testKey) + defer stopper.Stop(context.Background()) + + ctx := snow.DefaultContextTest() + dbManager := manager.NewMemDB(version.Semantic1_0_0) + + err := vm.Initialize(context.Background(), ctx, dbManager, nil, nil, nil, nil, nil, nil) + require.NoError(err) + + // Call should parse the first block + blk, err := vm.ParseBlock(context.Background(), blkBytes1) + require.NoError(err) + require.Equal(blkID1, blk.ID()) + + _, typeChecked := blk.(*chain.BlockWrapper) + require.True(typeChecked) + + // Call should cache the first block and parse the second block + blks, err := vm.BatchedParseBlock(context.Background(), [][]byte{blkBytes1, blkBytes2}) + require.NoError(err) + require.Len(blks, 2) + require.Equal(blkID1, blks[0].ID()) + require.Equal(blkID2, blks[1].ID()) + + _, typeChecked = blks[0].(*chain.BlockWrapper) + require.True(typeChecked) + + _, typeChecked = blks[1].(*chain.BlockWrapper) + require.True(typeChecked) + + // Call should be fully cached and not result in a grpc call + blks, err = vm.BatchedParseBlock(context.Background(), [][]byte{blkBytes1, blkBytes2}) + require.NoError(err) + require.Len(blks, 2) + require.Equal(blkID1, blks[0].ID()) + require.Equal(blkID2, blks[1].ID()) + + _, typeChecked = blks[0].(*chain.BlockWrapper) + require.True(typeChecked) + + _, typeChecked = blks[1].(*chain.BlockWrapper) + require.True(typeChecked) +} diff --git a/avalanchego/vms/rpcchainvm/errors.go b/avalanchego/vms/rpcchainvm/errors.go index aee26427..e1456d87 100644 --- a/avalanchego/vms/rpcchainvm/errors.go +++ b/avalanchego/vms/rpcchainvm/errors.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcchainvm @@ -6,27 +6,29 @@ package rpcchainvm import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + + vmpb "github.com/ava-labs/avalanchego/proto/pb/vm" ) var ( - errCodeToError = map[uint32]error{ - 1: database.ErrClosed, - 2: database.ErrNotFound, - 3: block.ErrHeightIndexedVMNotImplemented, - 4: block.ErrIndexIncomplete, - 5: block.ErrStateSyncableVMNotImplemented, + errEnumToError = map[vmpb.Error]error{ + vmpb.Error_ERROR_CLOSED: database.ErrClosed, + vmpb.Error_ERROR_NOT_FOUND: database.ErrNotFound, + vmpb.Error_ERROR_HEIGHT_INDEX_NOT_IMPLEMENTED: block.ErrHeightIndexedVMNotImplemented, + vmpb.Error_ERROR_HEIGHT_INDEX_INCOMPLETE: block.ErrIndexIncomplete, + vmpb.Error_ERROR_STATE_SYNC_NOT_IMPLEMENTED: block.ErrStateSyncableVMNotImplemented, } - errorToErrCode = map[error]uint32{ - database.ErrClosed: 1, - database.ErrNotFound: 2, - block.ErrHeightIndexedVMNotImplemented: 3, - block.ErrIndexIncomplete: 4, - block.ErrStateSyncableVMNotImplemented: 5, + errorToErrEnum = map[error]vmpb.Error{ + database.ErrClosed: vmpb.Error_ERROR_CLOSED, + database.ErrNotFound: vmpb.Error_ERROR_NOT_FOUND, + block.ErrHeightIndexedVMNotImplemented: vmpb.Error_ERROR_HEIGHT_INDEX_NOT_IMPLEMENTED, + block.ErrIndexIncomplete: vmpb.Error_ERROR_HEIGHT_INDEX_INCOMPLETE, + block.ErrStateSyncableVMNotImplemented: vmpb.Error_ERROR_STATE_SYNC_NOT_IMPLEMENTED, } ) func errorToRPCError(err error) error { - if _, ok := errorToErrCode[err]; ok { + if _, ok := errorToErrEnum[err]; ok { return nil } return err diff --git a/avalanchego/vms/rpcchainvm/factory.go b/avalanchego/vms/rpcchainvm/factory.go index f55945b0..f121c7bb 100644 --- a/avalanchego/vms/rpcchainvm/factory.go +++ b/avalanchego/vms/rpcchainvm/factory.go @@ -1,103 +1,70 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcchainvm import ( - "errors" + "context" "fmt" - "io" - "log" - "path/filepath" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin" - - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/resource" - "github.com/ava-labs/avalanchego/utils/subprocess" "github.com/ava-labs/avalanchego/vms" "github.com/ava-labs/avalanchego/vms/rpcchainvm/grpcutils" -) + "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" + "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime/subprocess" -var ( - errWrongVM = errors.New("wrong vm type") - - _ vms.Factory = &factory{} + vmpb "github.com/ava-labs/avalanchego/proto/pb/vm" ) +var _ vms.Factory = (*factory)(nil) + type factory struct { path string processTracker resource.ProcessTracker + runtimeTracker runtime.Tracker } -func NewFactory(path string, processTracker resource.ProcessTracker) vms.Factory { +func NewFactory(path string, processTracker resource.ProcessTracker, runtimeTracker runtime.Tracker) vms.Factory { return &factory{ path: path, processTracker: processTracker, + runtimeTracker: runtimeTracker, } } -func (f *factory) New(ctx *snow.Context) (interface{}, error) { - config := &plugin.ClientConfig{ - HandshakeConfig: Handshake, - Plugins: PluginMap, - Cmd: subprocess.New(f.path), - AllowedProtocols: []plugin.Protocol{ - plugin.ProtocolGRPC, - }, - // We kill this client by calling kill() when the chain running this VM - // shuts down. However, there are some cases where the VM's Shutdown - // method is not called. Namely, if: - // 1) The node shuts down after the client is created but before the - // chain is registered with the message router. - // 2) The chain doesn't handle a shutdown message before the node times - // out on the chain's shutdown and dies, leaving the shutdown message - // unhandled. - // We set managed to true so that we can call plugin.CleanupClients on - // node shutdown to ensure every plugin subprocess is killed. - Managed: true, - GRPCDialOptions: grpcutils.DefaultDialOptions, +func (f *factory) New(log logging.Logger) (interface{}, error) { + config := &subprocess.Config{ + Stderr: log, + Stdout: log, + HandshakeTimeout: runtime.DefaultHandshakeTimeout, + Log: log, } - if ctx != nil { - log.SetOutput(ctx.Log) - config.Stderr = ctx.Log - config.Logger = hclog.New(&hclog.LoggerOptions{ - Output: ctx.Log, - Level: hclog.Info, - }) - } else { - log.SetOutput(io.Discard) - config.Stderr = io.Discard - config.Logger = hclog.New(&hclog.LoggerOptions{ - Output: io.Discard, - }) - } - client := plugin.NewClient(config) - pluginName := filepath.Base(f.path) - pluginErr := func(err error) error { - return fmt.Errorf("plugin: %q: %w", pluginName, err) + listener, err := grpcutils.NewListener() + if err != nil { + return nil, fmt.Errorf("failed to create listener: %w", err) } - rpcClient, err := client.Client() + status, stopper, err := subprocess.Bootstrap( + context.TODO(), + listener, + subprocess.NewCmd(f.path), + config, + ) if err != nil { - client.Kill() - return nil, pluginErr(err) + return nil, err } - raw, err := rpcClient.Dispense("vm") + clientConn, err := grpcutils.Dial(status.Addr) if err != nil { - client.Kill() - return nil, pluginErr(err) + return nil, err } - vm, ok := raw.(*VMClient) - if !ok { - client.Kill() - return nil, pluginErr(errWrongVM) - } + vm := NewClient(vmpb.NewVMClient(clientConn)) + vm.SetProcess(stopper, status.Pid, f.processTracker) + + f.runtimeTracker.TrackRuntime(stopper) - vm.SetProcess(ctx, client, f.processTracker) return vm, nil } diff --git a/avalanchego/vms/rpcchainvm/ghttp/gconn/conn_client.go b/avalanchego/vms/rpcchainvm/ghttp/gconn/conn_client.go index cc91e682..b4bc5a5a 100644 --- a/avalanchego/vms/rpcchainvm/ghttp/gconn/conn_client.go +++ b/avalanchego/vms/rpcchainvm/ghttp/gconn/conn_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gconn @@ -17,7 +17,7 @@ import ( connpb "github.com/ava-labs/avalanchego/proto/pb/net/conn" ) -var _ net.Conn = &Client{} +var _ net.Conn = (*Client)(nil) // Client is an implementation of a connection that talks over RPC. type Client struct { @@ -47,8 +47,8 @@ func (c *Client) Read(p []byte) (int, error) { copy(p, resp.Read) - if resp.Errored { - err = errors.New(resp.Error) + if resp.Error != nil { + err = errors.New(*resp.Error) } return len(resp.Read), err } @@ -61,8 +61,8 @@ func (c *Client) Write(b []byte) (int, error) { return 0, err } - if resp.Errored { - err = errors.New(resp.Error) + if resp.Error != nil { + err = errors.New(*resp.Error) } return int(resp.Length), err } @@ -77,8 +77,13 @@ func (c *Client) Close() error { return errs.Err } -func (c *Client) LocalAddr() net.Addr { return c.local } -func (c *Client) RemoteAddr() net.Addr { return c.remote } +func (c *Client) LocalAddr() net.Addr { + return c.local +} + +func (c *Client) RemoteAddr() net.Addr { + return c.remote +} func (c *Client) SetDeadline(t time.Time) error { bytes, err := t.MarshalBinary() diff --git a/avalanchego/vms/rpcchainvm/ghttp/gconn/conn_server.go b/avalanchego/vms/rpcchainvm/ghttp/gconn/conn_server.go index 6576e5c4..07ca0f5a 100644 --- a/avalanchego/vms/rpcchainvm/ghttp/gconn/conn_server.go +++ b/avalanchego/vms/rpcchainvm/ghttp/gconn/conn_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gconn @@ -15,7 +15,7 @@ import ( connpb "github.com/ava-labs/avalanchego/proto/pb/net/conn" ) -var _ connpb.ConnServer = &Server{} +var _ connpb.ConnServer = (*Server)(nil) // Server is an http.Conn that is managed over RPC. type Server struct { @@ -32,20 +32,20 @@ func NewServer(conn net.Conn, closer *grpcutils.ServerCloser) *Server { } } -func (s *Server) Read(ctx context.Context, req *connpb.ReadRequest) (*connpb.ReadResponse, error) { +func (s *Server) Read(_ context.Context, req *connpb.ReadRequest) (*connpb.ReadResponse, error) { buf := make([]byte, int(req.Length)) n, err := s.conn.Read(buf) resp := &connpb.ReadResponse{ Read: buf[:n], } if err != nil { - resp.Errored = true - resp.Error = err.Error() + errStr := err.Error() + resp.Error = &errStr } return resp, nil } -func (s *Server) Write(ctx context.Context, req *connpb.WriteRequest) (*connpb.WriteResponse, error) { +func (s *Server) Write(_ context.Context, req *connpb.WriteRequest) (*connpb.WriteResponse, error) { n, err := s.conn.Write(req.Payload) if err != nil { return nil, err @@ -55,13 +55,13 @@ func (s *Server) Write(ctx context.Context, req *connpb.WriteRequest) (*connpb.W }, nil } -func (s *Server) Close(ctx context.Context, req *emptypb.Empty) (*emptypb.Empty, error) { +func (s *Server) Close(context.Context, *emptypb.Empty) (*emptypb.Empty, error) { err := s.conn.Close() s.closer.Stop() return &emptypb.Empty{}, err } -func (s *Server) SetDeadline(ctx context.Context, req *connpb.SetDeadlineRequest) (*emptypb.Empty, error) { +func (s *Server) SetDeadline(_ context.Context, req *connpb.SetDeadlineRequest) (*emptypb.Empty, error) { deadline := time.Time{} err := deadline.UnmarshalBinary(req.Time) if err != nil { @@ -70,7 +70,7 @@ func (s *Server) SetDeadline(ctx context.Context, req *connpb.SetDeadlineRequest return &emptypb.Empty{}, s.conn.SetDeadline(deadline) } -func (s *Server) SetReadDeadline(ctx context.Context, req *connpb.SetDeadlineRequest) (*emptypb.Empty, error) { +func (s *Server) SetReadDeadline(_ context.Context, req *connpb.SetDeadlineRequest) (*emptypb.Empty, error) { deadline := time.Time{} err := deadline.UnmarshalBinary(req.Time) if err != nil { @@ -79,7 +79,7 @@ func (s *Server) SetReadDeadline(ctx context.Context, req *connpb.SetDeadlineReq return &emptypb.Empty{}, s.conn.SetReadDeadline(deadline) } -func (s *Server) SetWriteDeadline(ctx context.Context, req *connpb.SetDeadlineRequest) (*emptypb.Empty, error) { +func (s *Server) SetWriteDeadline(_ context.Context, req *connpb.SetDeadlineRequest) (*emptypb.Empty, error) { deadline := time.Time{} err := deadline.UnmarshalBinary(req.Time) if err != nil { diff --git a/avalanchego/vms/rpcchainvm/ghttp/greader/reader_client.go b/avalanchego/vms/rpcchainvm/ghttp/greader/reader_client.go index ce0702d5..c06bdce9 100644 --- a/avalanchego/vms/rpcchainvm/ghttp/greader/reader_client.go +++ b/avalanchego/vms/rpcchainvm/ghttp/greader/reader_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package greader @@ -11,7 +11,7 @@ import ( readerpb "github.com/ava-labs/avalanchego/proto/pb/io/reader" ) -var _ io.Reader = &Client{} +var _ io.Reader = (*Client)(nil) // Client is a reader that talks over RPC. type Client struct{ client readerpb.ReaderClient } @@ -31,8 +31,8 @@ func (c *Client) Read(p []byte) (int, error) { copy(p, resp.Read) - if resp.Errored { - err = errors.New(resp.Error) + if resp.Error != nil { + err = errors.New(*resp.Error) } return len(resp.Read), err } diff --git a/avalanchego/vms/rpcchainvm/ghttp/greader/reader_server.go b/avalanchego/vms/rpcchainvm/ghttp/greader/reader_server.go index 0fd5a2b5..a5f8f5d7 100644 --- a/avalanchego/vms/rpcchainvm/ghttp/greader/reader_server.go +++ b/avalanchego/vms/rpcchainvm/ghttp/greader/reader_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package greader @@ -10,7 +10,7 @@ import ( readerpb "github.com/ava-labs/avalanchego/proto/pb/io/reader" ) -var _ readerpb.ReaderServer = &Server{} +var _ readerpb.ReaderServer = (*Server)(nil) // Server is an io.Reader that is managed over RPC. type Server struct { @@ -23,15 +23,15 @@ func NewServer(reader io.Reader) *Server { return &Server{reader: reader} } -func (s *Server) Read(ctx context.Context, req *readerpb.ReadRequest) (*readerpb.ReadResponse, error) { +func (s *Server) Read(_ context.Context, req *readerpb.ReadRequest) (*readerpb.ReadResponse, error) { buf := make([]byte, int(req.Length)) n, err := s.reader.Read(buf) resp := &readerpb.ReadResponse{ Read: buf[:n], } if err != nil { - resp.Errored = true - resp.Error = err.Error() + errStr := err.Error() + resp.Error = &errStr } return resp, nil } diff --git a/avalanchego/vms/rpcchainvm/ghttp/gresponsewriter/locked_writer.go b/avalanchego/vms/rpcchainvm/ghttp/gresponsewriter/locked_writer.go index 5a3290e8..c89eb509 100644 --- a/avalanchego/vms/rpcchainvm/ghttp/gresponsewriter/locked_writer.go +++ b/avalanchego/vms/rpcchainvm/ghttp/gresponsewriter/locked_writer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gresponsewriter @@ -11,9 +11,9 @@ import ( ) var ( - _ http.ResponseWriter = &lockedWriter{} - _ http.Flusher = &lockedWriter{} - _ http.Hijacker = &lockedWriter{} + _ http.ResponseWriter = (*lockedWriter)(nil) + _ http.Flusher = (*lockedWriter)(nil) + _ http.Hijacker = (*lockedWriter)(nil) ) type lockedWriter struct { @@ -22,7 +22,9 @@ type lockedWriter struct { headerWritten bool } -func NewLockedWriter(w http.ResponseWriter) http.ResponseWriter { return &lockedWriter{writer: w} } +func NewLockedWriter(w http.ResponseWriter) http.ResponseWriter { + return &lockedWriter{writer: w} +} func (lw *lockedWriter) Header() http.Header { lw.lock.Lock() diff --git a/avalanchego/vms/rpcchainvm/ghttp/gresponsewriter/writer_client.go b/avalanchego/vms/rpcchainvm/ghttp/gresponsewriter/writer_client.go index f34a393a..769d8edc 100644 --- a/avalanchego/vms/rpcchainvm/ghttp/gresponsewriter/writer_client.go +++ b/avalanchego/vms/rpcchainvm/ghttp/gresponsewriter/writer_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gresponsewriter @@ -23,9 +23,9 @@ import ( ) var ( - _ http.ResponseWriter = &Client{} - _ http.Flusher = &Client{} - _ http.Hijacker = &Client{} + _ http.ResponseWriter = (*Client)(nil) + _ http.Flusher = (*Client)(nil) + _ http.Hijacker = (*Client)(nil) ) // Client is an http.ResponseWriter that talks over RPC. @@ -42,7 +42,9 @@ func NewClient(header http.Header, client responsewriterpb.WriterClient) *Client } } -func (c *Client) Header() http.Header { return c.header } +func (c *Client) Header() http.Header { + return c.header +} func (c *Client) Write(payload []byte) (int, error) { req := &responsewriterpb.WriteRequest{ @@ -87,8 +89,13 @@ type addr struct { str string } -func (a *addr) Network() string { return a.network } -func (a *addr) String() string { return a.str } +func (a *addr) Network() string { + return a.network +} + +func (a *addr) String() string { + return a.str +} func (c *Client) Hijack() (net.Conn, *bufio.ReadWriter, error) { resp, err := c.client.Hijack(context.Background(), &emptypb.Empty{}) diff --git a/avalanchego/vms/rpcchainvm/ghttp/gresponsewriter/writer_server.go b/avalanchego/vms/rpcchainvm/ghttp/gresponsewriter/writer_server.go index 38c1ad63..a78e6b00 100644 --- a/avalanchego/vms/rpcchainvm/ghttp/gresponsewriter/writer_server.go +++ b/avalanchego/vms/rpcchainvm/ghttp/gresponsewriter/writer_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gresponsewriter @@ -8,7 +8,8 @@ import ( "errors" "net/http" - "google.golang.org/grpc" + "golang.org/x/exp/maps" + "google.golang.org/protobuf/types/known/emptypb" "github.com/ava-labs/avalanchego/vms/rpcchainvm/ghttp/gconn" @@ -26,7 +27,7 @@ var ( errUnsupportedFlushing = errors.New("response writer doesn't support flushing") errUnsupportedHijacking = errors.New("response writer doesn't support hijacking") - _ responsewriterpb.WriterServer = &Server{} + _ responsewriterpb.WriterServer = (*Server)(nil) ) // Server is an http.ResponseWriter that is managed over RPC. @@ -42,11 +43,12 @@ func NewServer(writer http.ResponseWriter) *Server { } } -func (s *Server) Write(ctx context.Context, req *responsewriterpb.WriteRequest) (*responsewriterpb.WriteResponse, error) { +func (s *Server) Write( + _ context.Context, + req *responsewriterpb.WriteRequest, +) (*responsewriterpb.WriteResponse, error) { headers := s.writer.Header() - for key := range headers { - delete(headers, key) - } + maps.Clear(headers) for _, header := range req.Headers { headers[header.Key] = header.Values } @@ -60,11 +62,12 @@ func (s *Server) Write(ctx context.Context, req *responsewriterpb.WriteRequest) }, nil } -func (s *Server) WriteHeader(ctx context.Context, req *responsewriterpb.WriteHeaderRequest) (*emptypb.Empty, error) { +func (s *Server) WriteHeader( + _ context.Context, + req *responsewriterpb.WriteHeaderRequest, +) (*emptypb.Empty, error) { headers := s.writer.Header() - for key := range headers { - delete(headers, key) - } + maps.Clear(headers) for _, header := range req.Headers { headers[header.Key] = header.Values } @@ -72,7 +75,7 @@ func (s *Server) WriteHeader(ctx context.Context, req *responsewriterpb.WriteHea return &emptypb.Empty{}, nil } -func (s *Server) Flush(ctx context.Context, req *emptypb.Empty) (*emptypb.Empty, error) { +func (s *Server) Flush(context.Context, *emptypb.Empty) (*emptypb.Empty, error) { flusher, ok := s.writer.(http.Flusher) if !ok { return nil, errUnsupportedFlushing @@ -81,7 +84,7 @@ func (s *Server) Flush(ctx context.Context, req *emptypb.Empty) (*emptypb.Empty, return &emptypb.Empty{}, nil } -func (s *Server) Hijack(ctx context.Context, req *emptypb.Empty) (*responsewriterpb.HijackResponse, error) { +func (s *Server) Hijack(context.Context, *emptypb.Empty) (*responsewriterpb.HijackResponse, error) { hijacker, ok := s.writer.(http.Hijacker) if !ok { return nil, errUnsupportedHijacking @@ -95,20 +98,16 @@ func (s *Server) Hijack(ctx context.Context, req *emptypb.Empty) (*responsewrite if err != nil { return nil, err } - serverAddr := serverListener.Addr().String() + server := grpcutils.NewServer() closer := grpcutils.ServerCloser{} - go grpcutils.Serve(serverListener, func(opts []grpc.ServerOption) *grpc.Server { - if len(opts) == 0 { - opts = append(opts, grpcutils.DefaultServerOptions...) - } - server := grpc.NewServer(opts...) - closer.Add(server) - connpb.RegisterConnServer(server, gconn.NewServer(conn, &closer)) - readerpb.RegisterReaderServer(server, greader.NewServer(readWriter)) - writerpb.RegisterWriterServer(server, gwriter.NewServer(readWriter)) - return server - }) + closer.Add(server) + + connpb.RegisterConnServer(server, gconn.NewServer(conn, &closer)) + readerpb.RegisterReaderServer(server, greader.NewServer(readWriter)) + writerpb.RegisterWriterServer(server, gwriter.NewServer(readWriter)) + + go grpcutils.Serve(serverListener, server) local := conn.LocalAddr() remote := conn.RemoteAddr() @@ -118,6 +117,6 @@ func (s *Server) Hijack(ctx context.Context, req *emptypb.Empty) (*responsewrite LocalString: local.String(), RemoteNetwork: remote.Network(), RemoteString: remote.String(), - ServerAddr: serverAddr, + ServerAddr: serverListener.Addr().String(), }, nil } diff --git a/avalanchego/vms/rpcchainvm/ghttp/gwriter/writer_client.go b/avalanchego/vms/rpcchainvm/ghttp/gwriter/writer_client.go index 5fdc224b..d9a561f2 100644 --- a/avalanchego/vms/rpcchainvm/ghttp/gwriter/writer_client.go +++ b/avalanchego/vms/rpcchainvm/ghttp/gwriter/writer_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gwriter @@ -11,7 +11,7 @@ import ( writerpb "github.com/ava-labs/avalanchego/proto/pb/io/writer" ) -var _ io.Writer = &Client{} +var _ io.Writer = (*Client)(nil) // Client is an io.Writer that talks over RPC. type Client struct{ client writerpb.WriterClient } @@ -29,8 +29,8 @@ func (c *Client) Write(p []byte) (int, error) { return 0, err } - if resp.Errored { - err = errors.New(resp.Error) + if resp.Error != nil { + err = errors.New(*resp.Error) } return int(resp.Written), err } diff --git a/avalanchego/vms/rpcchainvm/ghttp/gwriter/writer_server.go b/avalanchego/vms/rpcchainvm/ghttp/gwriter/writer_server.go index 52f57ebe..ce85aace 100644 --- a/avalanchego/vms/rpcchainvm/ghttp/gwriter/writer_server.go +++ b/avalanchego/vms/rpcchainvm/ghttp/gwriter/writer_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package gwriter @@ -10,7 +10,7 @@ import ( writerpb "github.com/ava-labs/avalanchego/proto/pb/io/writer" ) -var _ writerpb.WriterServer = &Server{} +var _ writerpb.WriterServer = (*Server)(nil) // Server is an http.Handler that is managed over RPC. type Server struct { @@ -23,14 +23,14 @@ func NewServer(writer io.Writer) *Server { return &Server{writer: writer} } -func (s *Server) Write(ctx context.Context, req *writerpb.WriteRequest) (*writerpb.WriteResponse, error) { +func (s *Server) Write(_ context.Context, req *writerpb.WriteRequest) (*writerpb.WriteResponse, error) { n, err := s.writer.Write(req.Payload) resp := &writerpb.WriteResponse{ Written: int32(n), } if err != nil { - resp.Errored = true - resp.Error = err.Error() + errStr := err.Error() + resp.Error = &errStr } return resp, nil } diff --git a/avalanchego/vms/rpcchainvm/ghttp/http_client.go b/avalanchego/vms/rpcchainvm/ghttp/http_client.go index 5119dcf2..62a6b705 100644 --- a/avalanchego/vms/rpcchainvm/ghttp/http_client.go +++ b/avalanchego/vms/rpcchainvm/ghttp/http_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ghttp @@ -7,8 +7,6 @@ import ( "io" "net/http" - "google.golang.org/grpc" - "github.com/ava-labs/avalanchego/vms/rpcchainvm/ghttp/gresponsewriter" "github.com/ava-labs/avalanchego/vms/rpcchainvm/grpcutils" @@ -16,7 +14,7 @@ import ( responsewriterpb "github.com/ava-labs/avalanchego/proto/pb/http/responsewriter" ) -var _ http.Handler = &Client{} +var _ http.Handler = (*Client)(nil) // Client is an http.Handler that talks over RPC. type Client struct { @@ -52,18 +50,13 @@ func (c *Client) ServeHTTP(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusInternalServerError) return } - serverAddr := serverListener.Addr().String() + + server := grpcutils.NewServer() + closer.Add(server) + responsewriterpb.RegisterWriterServer(server, gresponsewriter.NewServer(w)) // Start responsewriter gRPC service. - go grpcutils.Serve(serverListener, func(opts []grpc.ServerOption) *grpc.Server { - if len(opts) == 0 { - opts = append(opts, grpcutils.DefaultServerOptions...) - } - server := grpc.NewServer(opts...) - closer.Add(server) - responsewriterpb.RegisterWriterServer(server, gresponsewriter.NewServer(w)) - return server - }) + go grpcutils.Serve(serverListener, server) body, err := io.ReadAll(r.Body) if err != nil { @@ -73,7 +66,7 @@ func (c *Client) ServeHTTP(w http.ResponseWriter, r *http.Request) { req := &httppb.HTTPRequest{ ResponseWriter: &httppb.ResponseWriter{ - ServerAddr: serverAddr, + ServerAddr: serverListener.Addr().String(), Header: make([]*httppb.Element, 0, len(r.Header)), }, Request: &httppb.Request{ diff --git a/avalanchego/vms/rpcchainvm/ghttp/http_server.go b/avalanchego/vms/rpcchainvm/ghttp/http_server.go index 440e433b..adece6f9 100644 --- a/avalanchego/vms/rpcchainvm/ghttp/http_server.go +++ b/avalanchego/vms/rpcchainvm/ghttp/http_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ghttp @@ -21,8 +21,8 @@ import ( ) var ( - _ httppb.HTTPServer = &Server{} - _ http.ResponseWriter = &ResponseWriter{} + _ httppb.HTTPServer = (*Server)(nil) + _ http.ResponseWriter = (*ResponseWriter)(nil) ) // Server is an http.Handler that is managed over RPC. @@ -191,8 +191,7 @@ func (w *ResponseWriter) Header() http.Header { } func (w *ResponseWriter) Write(buf []byte) (int, error) { - w.body.Write(buf) - return len(buf), nil + return w.body.Write(buf) } func (w *ResponseWriter) WriteHeader(code int) { diff --git a/avalanchego/vms/rpcchainvm/ghttp/http_test.go b/avalanchego/vms/rpcchainvm/ghttp/http_test.go index 73e51e6c..7cafe62a 100644 --- a/avalanchego/vms/rpcchainvm/ghttp/http_test.go +++ b/avalanchego/vms/rpcchainvm/ghttp/http_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package ghttp @@ -12,7 +12,7 @@ import ( httppb "github.com/ava-labs/avalanchego/proto/pb/http" ) -func Test_convertWriteResponse(t *testing.T) { +func TestConvertWriteResponse(t *testing.T) { require := require.New(t) scenerios := map[string]struct { diff --git a/avalanchego/vms/rpcchainvm/grpcutils/client.go b/avalanchego/vms/rpcchainvm/grpcutils/client.go new file mode 100644 index 00000000..0a9dfcff --- /dev/null +++ b/avalanchego/vms/rpcchainvm/grpcutils/client.go @@ -0,0 +1,105 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package grpcutils + +import ( + "fmt" + "math" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/keepalive" +) + +const ( + // After a duration of this time if the client doesn't see any activity it + // pings the server to see if the transport is still alive. + // If set below 10s, a minimum value of 10s will be used instead. + // grpc-go default infinity + defaultClientKeepAliveTime = 30 * time.Second + // After having pinged for keepalive check, the client waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. grpc-go default 20s + defaultClientKeepAliveTimeOut = 10 * time.Second + // If true, client sends keepalive pings even with no active RPCs. If false, + // when there are no active RPCs, Time and Timeout will be ignored and no + // keepalive pings will be sent. grpc-go default false + defaultPermitWithoutStream = true + // WaitForReady configures the action to take when an RPC is attempted on + // broken connections or unreachable servers. If waitForReady is false and + // the connection is in the TRANSIENT_FAILURE state, the RPC will fail + // immediately. Otherwise, the RPC client will block the call until a + // connection is available (or the call is canceled or times out) and will + // retry the call if it fails due to a transient error. gRPC will not retry + // if data was written to the wire unless the server indicates it did not + // process the data. Please refer to + // https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. + // + // gRPC default behavior is to NOT "wait for ready". + defaultWaitForReady = true +) + +var DefaultDialOptions = []grpc.DialOption{ + grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt), + grpc.MaxCallSendMsgSize(math.MaxInt), + grpc.WaitForReady(defaultWaitForReady), + ), + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: defaultClientKeepAliveTime, + Timeout: defaultClientKeepAliveTimeOut, + PermitWithoutStream: defaultPermitWithoutStream, + }), + grpc.WithTransportCredentials(insecure.NewCredentials()), +} + +// gRPC clients created from this ClientConn will wait forever for the Server to +// become Ready. If you desire a dial timeout ensure context is properly plumbed +// to the client and use context.WithTimeout. +// +// Dial returns a gRPC ClientConn with the dial options as defined by +// DefaultDialOptions. DialOption can also optionally be passed. +func Dial(addr string, opts ...DialOption) (*grpc.ClientConn, error) { + return grpc.Dial(fmt.Sprintf("passthrough:///%s", addr), newDialOpts(opts...)...) +} + +// DialOptions are options which can be applied to a gRPC client in addition to +// the defaults set by DefaultDialOptions. +type DialOptions struct { + opts []grpc.DialOption +} + +// append(DefaultDialOptions, ...) will always allocate a new slice and will +// not overwrite any potential data that may have previously been appended to +// DefaultServerOptions https://go.dev/ref/spec#Composite_literals +func newDialOpts(opts ...DialOption) []grpc.DialOption { + d := &DialOptions{opts: DefaultDialOptions} + d.applyOpts(opts) + return d.opts +} + +func (d *DialOptions) applyOpts(opts []DialOption) { + for _, opt := range opts { + opt(d) + } +} + +type DialOption func(*DialOptions) + +// WithChainUnaryInterceptor takes a list of unary client interceptors which +// are added to the dial options. +func WithChainUnaryInterceptor(interceptors ...grpc.UnaryClientInterceptor) DialOption { + return func(d *DialOptions) { + d.opts = append(d.opts, grpc.WithChainUnaryInterceptor(interceptors...)) + } +} + +// WithChainStreamInterceptor takes a list of stream client interceptors which +// are added to the dial options. +func WithChainStreamInterceptor(interceptors ...grpc.StreamClientInterceptor) DialOption { + return func(d *DialOptions) { + d.opts = append(d.opts, grpc.WithChainStreamInterceptor(interceptors...)) + } +} diff --git a/avalanchego/vms/rpcchainvm/grpcutils/client_test.go b/avalanchego/vms/rpcchainvm/grpcutils/client_test.go new file mode 100644 index 00000000..0e48a837 --- /dev/null +++ b/avalanchego/vms/rpcchainvm/grpcutils/client_test.go @@ -0,0 +1,87 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package grpcutils + +import ( + "testing" + "time" + + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/status" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/database/rpcdb" + + pb "github.com/ava-labs/avalanchego/proto/pb/rpcdb" +) + +func TestDialOptsSmoke(t *testing.T) { + require := require.New(t) + + opts := newDialOpts() + require.Len(opts, 3) + + opts = newDialOpts( + WithChainUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor), + WithChainStreamInterceptor(grpc_prometheus.StreamClientInterceptor), + ) + require.Len(opts, 5) +} + +// Test_WaitForReady shows the expected results from the DialOption during +// client creation. If true the client will block and wait forever for the +// server to become Ready even if the listener is closed. +// ref. https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md +func TestWaitForReady(t *testing.T) { + require := require.New(t) + + listener, err := NewListener() + require.NoError(err) + defer listener.Close() + + server := NewServer() + defer server.Stop() + pb.RegisterDatabaseServer(server, rpcdb.NewServer(memdb.New())) + + go func() { + time.Sleep(100 * time.Millisecond) + Serve(listener, server) + }() + + // The default includes grpc.WaitForReady(true). + conn, err := Dial(listener.Addr().String()) + require.NoError(err) + + db := rpcdb.NewClient(pb.NewDatabaseClient(conn)) + + err = db.Put([]byte("foo"), []byte("bar")) + require.NoError(err) + + noWaitListener, err := NewListener() + require.NoError(err) + // close listener causes RPC to fail fast. + // The client would timeout otherwise. + _ = noWaitListener.Close() + + // By directly calling `grpc.Dial` rather than `Dial`, the default does not + // include setting grpc.WaitForReady(true). + noWaitConn, err := grpc.Dial( + noWaitListener.Addr().String(), + grpc.WithTransportCredentials(insecure.NewCredentials()), + ) + require.NoError(err) + + db = rpcdb.NewClient(pb.NewDatabaseClient(noWaitConn)) + + err = db.Put([]byte("foo"), []byte("bar")) + status, ok := status.FromError(err) + require.True(ok) + require.Equal(codes.Unavailable, status.Code()) +} diff --git a/avalanchego/vms/rpcchainvm/grpcutils/listener.go b/avalanchego/vms/rpcchainvm/grpcutils/listener.go deleted file mode 100644 index 23663f39..00000000 --- a/avalanchego/vms/rpcchainvm/grpcutils/listener.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package grpcutils - -import ( - "net" - - "google.golang.org/grpc" -) - -func NewListener() (net.Listener, error) { - return net.Listen("tcp", "127.0.0.1:") -} - -func Dial(addr string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { - if len(opts) == 0 { - opts = append(opts, DefaultDialOptions...) - } - return createClientConn(addr, opts...) -} diff --git a/avalanchego/vms/rpcchainvm/grpcutils/server.go b/avalanchego/vms/rpcchainvm/grpcutils/server.go new file mode 100644 index 00000000..b262cf74 --- /dev/null +++ b/avalanchego/vms/rpcchainvm/grpcutils/server.go @@ -0,0 +1,123 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package grpcutils + +import ( + "math" + "net" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" +) + +const ( + // MinTime is the minimum amount of time a client should wait before sending + // a keepalive ping. grpc-go default 5 mins + defaultServerKeepAliveMinTime = 5 * time.Second + // After a duration of this time if the server doesn't see any activity it + // pings the client to see if the transport is still alive. + // If set below 1s, a minimum value of 1s will be used instead. + // grpc-go default 2h + defaultServerKeepAliveInterval = 2 * time.Hour + // After having pinged for keepalive check, the server waits for a duration + // of Timeout and if no activity is seen even after that the connection is + // closed. grpc-go default 20s + defaultServerKeepAliveTimeout = 20 * time.Second + // Duration for the maximum amount of time a http2 connection can exist + // before sending GOAWAY. Internally in gRPC a +-10% jitter is added to + // mitigate retry storms. + defaultServerMaxConnectionAge = 10 * time.Minute + // After MaxConnectionAge, MaxConnectionAgeGrace specifies the amount of time + // between when the server sends a GOAWAY to the client to initiate graceful + // shutdown, and when the server closes the connection. + // + // The server expects that this grace period will allow the client to complete + // any ongoing requests, after which it will forcefully terminate the connection. + // If a request takes longer than this grace period, it will *fail*. + // We *never* want an RPC to live longer than this value. + // + // invariant: Any value < 1 second will be internally overridden by gRPC. + defaultServerMaxConnectionAgeGrace = math.MaxInt64 +) + +var DefaultServerOptions = []grpc.ServerOption{ + grpc.MaxRecvMsgSize(math.MaxInt), + grpc.MaxSendMsgSize(math.MaxInt), + grpc.MaxConcurrentStreams(math.MaxUint32), + grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ + MinTime: defaultServerKeepAliveMinTime, + PermitWithoutStream: defaultPermitWithoutStream, + }), + grpc.KeepaliveParams(keepalive.ServerParameters{ + Time: defaultServerKeepAliveInterval, + Timeout: defaultServerKeepAliveTimeout, + MaxConnectionAge: defaultServerMaxConnectionAge, + MaxConnectionAgeGrace: defaultServerMaxConnectionAgeGrace, + }), +} + +// NewServer will return a gRPC server with server options as defined by +// DefaultServerOptions. ServerOption can also optionally be passed. +func NewServer(opts ...ServerOption) *grpc.Server { + return grpc.NewServer(newServerOpts(opts)...) +} + +type ServerOptions struct { + opts []grpc.ServerOption +} + +// append(DefaultServerOptions, ...) will always allocate a new slice and will +// not overwrite any potential data that may have previously been appended to +// DefaultServerOptions https://go.dev/ref/spec#Composite_literals +func newServerOpts(opts []ServerOption) []grpc.ServerOption { + s := &ServerOptions{opts: DefaultServerOptions} + s.applyOpts(opts) + return s.opts +} + +func (s *ServerOptions) applyOpts(opts []ServerOption) { + for _, opt := range opts { + opt(s) + } +} + +// ServerOption are options which can be applied to a gRPC server in addition to +// the defaults set by DefaultServerOPtions. +type ServerOption func(*ServerOptions) + +// WithUnaryInterceptor adds a single unary interceptor to the gRPC server +// options. +func WithUnaryInterceptor(unaryInterceptor grpc.UnaryServerInterceptor) ServerOption { + return func(s *ServerOptions) { + s.opts = append(s.opts, grpc.UnaryInterceptor(unaryInterceptor)) + } +} + +// WithStreamInterceptor adds a single stream interceptor to the gRPC server +// options. +func WithStreamInterceptor(streamInterceptor grpc.StreamServerInterceptor) ServerOption { + return func(s *ServerOptions) { + s.opts = append(s.opts, grpc.StreamInterceptor(streamInterceptor)) + } +} + +// NewListener returns a TCP listener listening against the next available port +// on the system bound to localhost. +func NewListener() (net.Listener, error) { + return net.Listen("tcp", "127.0.0.1:") +} + +// Serve will start a gRPC server and block until it errors or is shutdown. +func Serve(listener net.Listener, grpcServer *grpc.Server) { + // TODO: While errors will be reported later, it could be useful to somehow + // log this if it is the primary error. + // + // There is nothing to with the error returned by serve here. Later requests + // will propegate their error if they occur. + _ = grpcServer.Serve(listener) + + // Similarly, there is nothing to with an error when the listener is closed. + _ = listener.Close() +} diff --git a/avalanchego/vms/rpcchainvm/grpcutils/server_closer.go b/avalanchego/vms/rpcchainvm/grpcutils/server_closer.go index a7b0b453..35ca2b73 100644 --- a/avalanchego/vms/rpcchainvm/grpcutils/server_closer.go +++ b/avalanchego/vms/rpcchainvm/grpcutils/server_closer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package grpcutils diff --git a/avalanchego/vms/rpcchainvm/grpcutils/util.go b/avalanchego/vms/rpcchainvm/grpcutils/util.go index 9cd2f183..8ad042ea 100644 --- a/avalanchego/vms/rpcchainvm/grpcutils/util.go +++ b/avalanchego/vms/rpcchainvm/grpcutils/util.go @@ -1,20 +1,13 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package grpcutils import ( "fmt" - "math" - "net" "net/http" "time" - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - "google.golang.org/grpc/keepalive" "google.golang.org/grpc/status" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" @@ -26,96 +19,6 @@ import ( httppb "github.com/ava-labs/avalanchego/proto/pb/http" ) -const ( - // Server: - - // MinTime is the minimum amount of time a client should wait before sending - // a keepalive ping. grpc-go default 5 mins - defaultServerKeepAliveMinTime = 5 * time.Second - // After a duration of this time if the server doesn't see any activity it - // pings the client to see if the transport is still alive. - // If set below 1s, a minimum value of 1s will be used instead. - // grpc-go default 2h - defaultServerKeepAliveInterval = 2 * time.Hour - // After having pinged for keepalive check, the server waits for a duration - // of Timeout and if no activity is seen even after that the connection is - // closed. grpc-go default 20s - defaultServerKeepAliveTimeout = 20 * time.Second - // Duration for the maximum amount of time a http2 connection can exist - // before sending GOAWAY. Internally in gRPC a +-10% jitter is added to - // mitigate retry storms. - defaultServerMaxConnectionAge = 10 * time.Minute - // After MaxConnectionAge, MaxConnectionAgeGrace specifies the amount of time - // between when the server sends a GOAWAY to the client to initiate graceful - // shutdown, and when the server closes the connection. - // - // The server expects that this grace period will allow the client to complete - // any ongoing requests, after which it will forcefully terminate the connection. - // If a request takes longer than this grace period, it will *fail*. - // We *never* want an RPC to live longer than this value. - // - // invariant: Any value < 1 second will be internally overridden by gRPC. - defaultServerMaxConnectionAgeGrace = math.MaxInt64 - - // Client: - - // After a duration of this time if the client doesn't see any activity it - // pings the server to see if the transport is still alive. - // If set below 10s, a minimum value of 10s will be used instead. - // grpc-go default infinity - defaultClientKeepAliveTime = 30 * time.Second - // After having pinged for keepalive check, the client waits for a duration - // of Timeout and if no activity is seen even after that the connection is - // closed. grpc-go default 20s - defaultClientKeepAliveTimeOut = 10 * time.Second - // If true, client sends keepalive pings even with no active RPCs. If false, - // when there are no active RPCs, Time and Timeout will be ignored and no - // keepalive pings will be sent. grpc-go default false - defaultPermitWithoutStream = true -) - -var ( - DefaultDialOptions = []grpc.DialOption{ - grpc.WithDefaultCallOptions( - grpc.MaxCallRecvMsgSize(math.MaxInt), - grpc.MaxCallSendMsgSize(math.MaxInt), - grpc.WaitForReady(true), - ), - grpc.WithKeepaliveParams(keepalive.ClientParameters{ - Time: defaultClientKeepAliveTime, - Timeout: defaultClientKeepAliveTimeOut, - PermitWithoutStream: defaultPermitWithoutStream, - }), - } - - DefaultServerOptions = []grpc.ServerOption{ - grpc.MaxRecvMsgSize(math.MaxInt), - grpc.MaxSendMsgSize(math.MaxInt), - grpc.MaxConcurrentStreams(math.MaxUint32), - grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{ - MinTime: defaultServerKeepAliveMinTime, - PermitWithoutStream: defaultPermitWithoutStream, - }), - grpc.KeepaliveParams(keepalive.ServerParameters{ - Time: defaultServerKeepAliveInterval, - Timeout: defaultServerKeepAliveTimeout, - MaxConnectionAge: defaultServerMaxConnectionAge, - MaxConnectionAgeGrace: defaultServerMaxConnectionAgeGrace, - }), - } -) - -// DialOptsWithMetrics registers gRPC client metrics via chain interceptors. -func DialOptsWithMetrics(clientMetrics *grpc_prometheus.ClientMetrics) []grpc.DialOption { - return append(DefaultDialOptions, - // Use chain interceptors to ensure custom/default interceptors are - // applied correctly. - // ref. https://github.com/kubernetes/kubernetes/pull/105069 - grpc.WithChainStreamInterceptor(clientMetrics.StreamClientInterceptor()), - grpc.WithChainUnaryInterceptor(clientMetrics.UnaryClientInterceptor()), - ) -} - func Errorf(code int, tmpl string, args ...interface{}) error { return GetGRPCErrorFromHTTPResponse(&httppb.HandleSimpleHTTPResponse{ Code: int32(code), @@ -177,36 +80,6 @@ func MergeHTTPHeader(hs []*httppb.Element, header http.Header) { } } -func Serve(listener net.Listener, grpcServerFunc func([]grpc.ServerOption) *grpc.Server) { - var opts []grpc.ServerOption - grpcServer := grpcServerFunc(opts) - - // TODO: While errors will be reported later, it could be useful to somehow - // log this if it is the primary error. - // - // There is nothing to with the error returned by serve here. Later requests - // will propegate their error if they occur. - _ = grpcServer.Serve(listener) - - // Similarly, there is nothing to with an error when the listener is closed. - _ = listener.Close() -} - -func createClientConn(addr string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { - opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) - return grpc.Dial(addr, opts...) -} - -// NewDefaultServer ensures the plugin service is served with proper -// defaults. This should always be passed to GRPCServer field of -// plugin.ServeConfig. -func NewDefaultServer(opts []grpc.ServerOption) *grpc.Server { - if len(opts) == 0 { - opts = append(opts, DefaultServerOptions...) - } - return grpc.NewServer(opts...) -} - // TimestampAsTime validates timestamppb timestamp and returns time.Time. func TimestampAsTime(ts *tspb.Timestamp) (time.Time, error) { if err := ts.CheckValid(); err != nil { diff --git a/avalanchego/vms/rpcchainvm/gruntime/runtime_client.go b/avalanchego/vms/rpcchainvm/gruntime/runtime_client.go new file mode 100644 index 00000000..67a1e986 --- /dev/null +++ b/avalanchego/vms/rpcchainvm/gruntime/runtime_client.go @@ -0,0 +1,31 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gruntime + +import ( + "context" + + "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" + + pb "github.com/ava-labs/avalanchego/proto/pb/vm/runtime" +) + +var _ runtime.Initializer = (*Client)(nil) + +// Client is a VM runtime initializer. +type Client struct { + client pb.RuntimeClient +} + +func NewClient(client pb.RuntimeClient) *Client { + return &Client{client: client} +} + +func (c *Client) Initialize(ctx context.Context, protocolVersion uint, vmAddr string) error { + _, err := c.client.Initialize(ctx, &pb.InitializeRequest{ + ProtocolVersion: uint32(protocolVersion), + Addr: vmAddr, + }) + return err +} diff --git a/avalanchego/vms/rpcchainvm/gruntime/runtime_server.go b/avalanchego/vms/rpcchainvm/gruntime/runtime_server.go new file mode 100644 index 00000000..25954ee9 --- /dev/null +++ b/avalanchego/vms/rpcchainvm/gruntime/runtime_server.go @@ -0,0 +1,32 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gruntime + +import ( + "context" + + "google.golang.org/protobuf/types/known/emptypb" + + "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" + + pb "github.com/ava-labs/avalanchego/proto/pb/vm/runtime" +) + +var _ pb.RuntimeServer = &Server{} + +// Server is a VM runtime initializer controlled by RPC. +type Server struct { + pb.UnsafeRuntimeServer + runtime runtime.Initializer +} + +func NewServer(runtime runtime.Initializer) *Server { + return &Server{ + runtime: runtime, + } +} + +func (s *Server) Initialize(ctx context.Context, req *pb.InitializeRequest) (*emptypb.Empty, error) { + return &emptypb.Empty{}, s.runtime.Initialize(ctx, uint(req.ProtocolVersion), req.Addr) +} diff --git a/avalanchego/vms/rpcchainvm/gsubnetlookup/subnet_lookup_client.go b/avalanchego/vms/rpcchainvm/gsubnetlookup/subnet_lookup_client.go deleted file mode 100644 index ddb77cb2..00000000 --- a/avalanchego/vms/rpcchainvm/gsubnetlookup/subnet_lookup_client.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package gsubnetlookup - -import ( - "context" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - - subnetlookuppb "github.com/ava-labs/avalanchego/proto/pb/subnetlookup" -) - -var _ snow.SubnetLookup = &Client{} - -// Client is a subnet lookup that talks over RPC. -type Client struct { - client subnetlookuppb.SubnetLookupClient -} - -// NewClient returns an alias lookup connected to a remote alias lookup -func NewClient(client subnetlookuppb.SubnetLookupClient) *Client { - return &Client{client: client} -} - -func (c *Client) SubnetID(chainID ids.ID) (ids.ID, error) { - resp, err := c.client.SubnetID(context.Background(), &subnetlookuppb.SubnetIDRequest{ - ChainId: chainID[:], - }) - if err != nil { - return ids.ID{}, err - } - return ids.ToID(resp.Id) -} diff --git a/avalanchego/vms/rpcchainvm/gsubnetlookup/subnet_lookup_server.go b/avalanchego/vms/rpcchainvm/gsubnetlookup/subnet_lookup_server.go deleted file mode 100644 index 78cfde3f..00000000 --- a/avalanchego/vms/rpcchainvm/gsubnetlookup/subnet_lookup_server.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package gsubnetlookup - -import ( - "context" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" - - subnetlookuppb "github.com/ava-labs/avalanchego/proto/pb/subnetlookup" -) - -var _ subnetlookuppb.SubnetLookupServer = &Server{} - -// Server is a subnet lookup that is managed over RPC. -type Server struct { - subnetlookuppb.UnsafeSubnetLookupServer - aliaser snow.SubnetLookup -} - -// NewServer returns a subnet lookup connected to a remote subnet lookup -func NewServer(aliaser snow.SubnetLookup) *Server { - return &Server{aliaser: aliaser} -} - -func (s *Server) SubnetID( - _ context.Context, - req *subnetlookuppb.SubnetIDRequest, -) (*subnetlookuppb.SubnetIDResponse, error) { - chainID, err := ids.ToID(req.ChainId) - if err != nil { - return nil, err - } - id, err := s.aliaser.SubnetID(chainID) - if err != nil { - return nil, err - } - return &subnetlookuppb.SubnetIDResponse{ - Id: id[:], - }, nil -} diff --git a/avalanchego/vms/rpcchainvm/messenger/messenger_client.go b/avalanchego/vms/rpcchainvm/messenger/messenger_client.go index 718fa739..e7910eb0 100644 --- a/avalanchego/vms/rpcchainvm/messenger/messenger_client.go +++ b/avalanchego/vms/rpcchainvm/messenger/messenger_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package messenger @@ -23,7 +23,7 @@ func NewClient(client messengerpb.MessengerClient) *Client { func (c *Client) Notify(msg common.Message) error { _, err := c.client.Notify(context.Background(), &messengerpb.NotifyRequest{ - Message: uint32(msg), + Message: messengerpb.Message(msg), }) return err } diff --git a/avalanchego/vms/rpcchainvm/messenger/messenger_server.go b/avalanchego/vms/rpcchainvm/messenger/messenger_server.go index 2e6ed5df..273ffdfd 100644 --- a/avalanchego/vms/rpcchainvm/messenger/messenger_server.go +++ b/avalanchego/vms/rpcchainvm/messenger/messenger_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package messenger @@ -15,7 +15,7 @@ import ( var ( errFullQueue = errors.New("full message queue") - _ messengerpb.MessengerServer = &Server{} + _ messengerpb.MessengerServer = (*Server)(nil) ) // Server is a messenger that is managed over RPC. diff --git a/avalanchego/vms/rpcchainvm/mock_factory.go b/avalanchego/vms/rpcchainvm/mock_factory.go deleted file mode 100644 index 495e29f8..00000000 --- a/avalanchego/vms/rpcchainvm/mock_factory.go +++ /dev/null @@ -1,50 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: vms/rpcchainvm/factory.go - -// Package rpcchainvm is a generated GoMock package. -package rpcchainvm - -import ( - reflect "reflect" - - snow "github.com/ava-labs/avalanchego/snow" - gomock "github.com/golang/mock/gomock" -) - -// MockFactory is a mock of Factory interface. -type MockFactory struct { - ctrl *gomock.Controller - recorder *MockFactoryMockRecorder -} - -// MockFactoryMockRecorder is the mock recorder for MockFactory. -type MockFactoryMockRecorder struct { - mock *MockFactory -} - -// NewMockFactory creates a new mock instance. -func NewMockFactory(ctrl *gomock.Controller) *MockFactory { - mock := &MockFactory{ctrl: ctrl} - mock.recorder = &MockFactoryMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockFactory) EXPECT() *MockFactoryMockRecorder { - return m.recorder -} - -// New mocks base method. -func (m *MockFactory) New(arg0 *snow.Context) (interface{}, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "New", arg0) - ret0, _ := ret[0].(interface{}) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// New indicates an expected call of New. -func (mr *MockFactoryMockRecorder) New(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "New", reflect.TypeOf((*MockFactory)(nil).New), arg0) -} diff --git a/avalanchego/vms/rpcchainvm/mock_plugin_directory.go b/avalanchego/vms/rpcchainvm/mock_plugin_directory.go deleted file mode 100644 index 516e7bd9..00000000 --- a/avalanchego/vms/rpcchainvm/mock_plugin_directory.go +++ /dev/null @@ -1,52 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: vms/rpcchainvm/plugin_directory.go - -// Package rpcchainvm is a generated GoMock package. -package rpcchainvm - -import ( - reflect "reflect" - - ids "github.com/ava-labs/avalanchego/ids" - vms "github.com/ava-labs/avalanchego/vms" - gomock "github.com/golang/mock/gomock" -) - -// MockPluginDirectory is a mock of PluginDirectory interface. -type MockPluginDirectory struct { - ctrl *gomock.Controller - recorder *MockPluginDirectoryMockRecorder -} - -// MockPluginDirectoryMockRecorder is the mock recorder for MockPluginDirectory. -type MockPluginDirectoryMockRecorder struct { - mock *MockPluginDirectory -} - -// NewMockPluginDirectory creates a new mock instance. -func NewMockPluginDirectory(ctrl *gomock.Controller) *MockPluginDirectory { - mock := &MockPluginDirectory{ctrl: ctrl} - mock.recorder = &MockPluginDirectoryMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockPluginDirectory) EXPECT() *MockPluginDirectoryMockRecorder { - return m.recorder -} - -// GetVMs mocks base method. -func (m *MockPluginDirectory) GetVMs(manager vms.Manager) (map[ids.ID]vms.Factory, map[ids.ID]vms.Factory, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetVMs", manager) - ret0, _ := ret[0].(map[ids.ID]vms.Factory) - ret1, _ := ret[1].(map[ids.ID]vms.Factory) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// GetVMs indicates an expected call of GetVMs. -func (mr *MockPluginDirectoryMockRecorder) GetVMs(manager interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVMs", reflect.TypeOf((*MockPluginDirectory)(nil).GetVMs), manager) -} diff --git a/avalanchego/vms/rpcchainvm/plugin_test.go b/avalanchego/vms/rpcchainvm/plugin_test.go deleted file mode 100644 index 87645d07..00000000 --- a/avalanchego/vms/rpcchainvm/plugin_test.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package rpcchainvm - -import ( - "fmt" - "os" - "os/exec" - "testing" - - gomock "github.com/golang/mock/gomock" - - plugin "github.com/hashicorp/go-plugin" - - "github.com/ava-labs/avalanchego/vms/rpcchainvm/grpcutils" -) - -// plugin_test collects objects and helpers generally helpful for various rpc tests - -const ( - chainVMTestKey = "chainVMTest" - stateSyncEnabledTestKey = "stateSyncEnabledTest" - getOngoingSyncStateSummaryTestKey = "getOngoingSyncStateSummaryTest" - getLastStateSummaryTestKey = "getLastStateSummaryTest" - parseStateSummaryTestKey = "parseStateSummaryTest" - getStateSummaryTestKey = "getStateSummaryTest" - acceptStateSummaryTestKey = "acceptStateSummaryTest" - lastAcceptedBlockPostStateSummaryAcceptTestKey = "lastAcceptedBlockPostStateSummaryAcceptTest" -) - -var ( - TestHandshake = plugin.HandshakeConfig{ - ProtocolVersion: protocolVersion, - MagicCookieKey: "VM_PLUGIN", - MagicCookieValue: "dynamic", - } - - TestClientPluginMap = map[string]plugin.Plugin{ - chainVMTestKey: &testVMPlugin{}, - } - - TestServerPluginMap = map[string]func(*testing.T, bool) (plugin.Plugin, *gomock.Controller){ - chainVMTestKey: chainVMTestPlugin, - stateSyncEnabledTestKey: stateSyncEnabledTestPlugin, - getOngoingSyncStateSummaryTestKey: getOngoingSyncStateSummaryTestPlugin, - getLastStateSummaryTestKey: getLastStateSummaryTestPlugin, - parseStateSummaryTestKey: parseStateSummaryTestPlugin, - getStateSummaryTestKey: getStateSummaryTestPlugin, - acceptStateSummaryTestKey: acceptStateSummaryTestPlugin, - lastAcceptedBlockPostStateSummaryAcceptTestKey: lastAcceptedBlockPostStateSummaryAcceptTestPlugin, - } -) - -// helperProcess helps with creating the plugin binary for testing. -func helperProcess(s ...string) *exec.Cmd { - cs := []string{"-test.run=TestHelperProcess", "--"} - cs = append(cs, s...) - env := []string{ - "TEST_PROCESS=1", - } - run := os.Args[0] - cmd := exec.Command(run, cs...) - env = append(env, os.Environ()...) - cmd.Env = env - return cmd -} - -func TestHelperProcess(t *testing.T) { - if os.Getenv("TEST_PROCESS") != "1" { - return - } - - args := os.Args - for len(args) > 0 { - if args[0] == "--" { - args = args[1:] - break - } - - args = args[1:] - } - - if len(args) == 0 { - fmt.Fprintf(os.Stderr, "failed to receive command\n") - os.Exit(2) - } - - plugins := make(map[string]plugin.Plugin) - controllersList := make([]*gomock.Controller, 0, len(args)) - for _, testKey := range args { - mockedPlugin, ctrl := TestServerPluginMap[testKey](t, true /*loadExpectations*/) - controllersList = append(controllersList, ctrl) - plugins[testKey] = mockedPlugin - } - - plugin.Serve(&plugin.ServeConfig{ - HandshakeConfig: TestHandshake, - Plugins: plugins, - - // A non-nil value here enables gRPC serving for this plugin. - GRPCServer: grpcutils.NewDefaultServer, - }) - - for _, ctrl := range controllersList { - ctrl.Finish() - } - os.Exit(0) -} diff --git a/avalanchego/vms/rpcchainvm/runtime/README.md b/avalanchego/vms/rpcchainvm/runtime/README.md new file mode 100644 index 00000000..6e09e41f --- /dev/null +++ b/avalanchego/vms/rpcchainvm/runtime/README.md @@ -0,0 +1,45 @@ +# Virtual Machine Runtime Engine (VMRE) + +The `VMRE` handles the lifecycle, compatibility and logging IO of a managed VM process. + +## How it works + +The `runtime.Initializer` interface could be implemented to manage local or remote VM processes. +This implementation is consumed by a gRPC server which serves the `Runtime` +service. The server interacts with the underlying process and allows for the VM +binary to communicate with AvalancheGo. + +### Subprocess VM management + +The `subprocess` is currently the only supported `Runtime` implementation. +It works by starting the VM's as a subprocess of AvalancheGo by `os.Exec`. + +## Workflow + +- `VMRegistry` calls the RPC Chain VM `Factory`. +- Factory Starts an instanace of a `VMRE` server that consumes a `runtime.Initializer` interface implementation. +- The address of this server is passed as a ENV variable `AVALANCHE_VM_RUNTIME_ENGINE_ADDR` via `os.Exec` which starts the VM binary. +- The VM uses the address of the `VMRE` server to create a client. +- Client sends a `Initialize` RPC informing the server of the `Protocol Version` and future `Address` of the RPC Chain VM server allowing it to perform a validation `Handshake`. +- After the `Handshake` is complete the RPC Chain VM server is started which serves the `ChainVM` implementation. +- The connection details for the RPC Chain VM server are now used to create an RPC Chain VM client. +- `ChainManager` uses this VM client to bootstrap the chain powered by `Snowman` consensus. +- To shutdown the VM `runtime.Stop()` sends a `SIGTERM` signal to the VM process. + +## Debugging + +### Process Not Found + +When runtime is `Bootstrapped` handshake success is observed during the `Initialize` RPC. Process not found means that the runtime Client in the VM binary could not communicate with the runtime Server on AvalancheGo. This could be the result of networking issues or other error in `Serve()`. + +```bash +failed to register VM {"vmID": "tGas3T58KzdjcJ2iKSyiYsWiqYctRXaPTqBCA11BqEkNg8kPc", "error": "handshake failed: timeout"} +``` + +### Protocol Version Mismatch + +To ensure RPC compatibility the protocol version of AvalancheGo must match the subnet VM. To correct this error update the subnet VM's dependencies to the latest version AvalancheGo. + +```bash +failed to register VM {"vmID": "tGas3T58KzdjcJ2iKSyiYsWiqYctRXaPTqBCA11BqEkNg8kPc", "error": "handshake failed: protocol version mismatch avalanchego: 19 vm: 18"} +``` diff --git a/avalanchego/vms/rpcchainvm/runtime/manager.go b/avalanchego/vms/rpcchainvm/runtime/manager.go new file mode 100644 index 00000000..3e1a9eaa --- /dev/null +++ b/avalanchego/vms/rpcchainvm/runtime/manager.go @@ -0,0 +1,48 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package runtime + +import ( + "context" + "sync" +) + +// Manages tracking and shutdown of VM runtimes. +type manager struct { + lock sync.Mutex + runtimes []Stopper +} + +// NewManager returns manager of VM runtimes. +// +// TODO: If a runtime exits before the call to `manager.Stop`, it would be nice +// to remove it from the current set. +func NewManager() Manager { + return &manager{} +} + +func (m *manager) Stop(ctx context.Context) { + var wg sync.WaitGroup + m.lock.Lock() + defer func() { + m.lock.Unlock() + wg.Wait() + }() + + wg.Add(len(m.runtimes)) + for _, rt := range m.runtimes { + go func(runtime Stopper) { + defer wg.Done() + runtime.Stop(ctx) + }(rt) + } + m.runtimes = nil +} + +func (m *manager) TrackRuntime(runtime Stopper) { + m.lock.Lock() + defer m.lock.Unlock() + + m.runtimes = append(m.runtimes, runtime) +} diff --git a/avalanchego/vms/rpcchainvm/runtime/runtime.go b/avalanchego/vms/rpcchainvm/runtime/runtime.go new file mode 100644 index 00000000..f5d9666e --- /dev/null +++ b/avalanchego/vms/rpcchainvm/runtime/runtime.go @@ -0,0 +1,51 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package runtime + +import ( + "context" + "errors" + "time" +) + +const ( + // Address of the runtime engine server. + EngineAddressKey = "AVALANCHE_VM_RUNTIME_ENGINE_ADDR" + + // Duration before handshake timeout during bootstrap. + DefaultHandshakeTimeout = 5 * time.Second + + // Duration of time to wait for graceful termination to complete. + DefaultGracefulTimeout = 5 * time.Second +) + +var ( + ErrProtocolVersionMismatch = errors.New("protocol version mismatch") + ErrHandshakeFailed = errors.New("handshake failed") + ErrInvalidConfig = errors.New("invalid config") + ErrProcessNotFound = errors.New("vm process not found") +) + +type Initializer interface { + // Initialize provides AvalancheGo with compatibility, networking and + // process information of a VM. + Initialize(ctx context.Context, protocolVersion uint, vmAddr string) error +} + +type Stopper interface { + // Stop begins shutdown of a VM. This method must not block + // and multiple calls to this method will result in no-op. + Stop(ctx context.Context) +} + +type Tracker interface { + // TrackRuntime adds a VM stopper to the manager. + TrackRuntime(runtime Stopper) +} + +type Manager interface { + Tracker + // Stop all managed VMs. + Stop(ctx context.Context) +} diff --git a/avalanchego/vms/rpcchainvm/runtime/subprocess/initializer.go b/avalanchego/vms/rpcchainvm/runtime/subprocess/initializer.go new file mode 100644 index 00000000..47a50984 --- /dev/null +++ b/avalanchego/vms/rpcchainvm/runtime/subprocess/initializer.go @@ -0,0 +1,48 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package subprocess + +import ( + "context" + "fmt" + "sync" + + "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" +) + +var _ runtime.Initializer = (*initializer)(nil) + +// Subprocess VM Runtime intializer. +type initializer struct { + once sync.Once + // Address of the RPC Chain VM server + vmAddr string + // Error, if one occurred, during Initialization + err error + // Initialized is closed once Initialize is called + initialized chan struct{} +} + +func newInitializer() *initializer { + return &initializer{ + initialized: make(chan struct{}), + } +} + +func (i *initializer) Initialize(_ context.Context, protocolVersion uint, vmAddr string) error { + i.once.Do(func() { + if version.RPCChainVMProtocol != protocolVersion { + i.err = fmt.Errorf( + "%w avalanchego: %d, vm: %d", + runtime.ErrProtocolVersionMismatch, + version.RPCChainVMProtocol, + protocolVersion, + ) + } + i.vmAddr = vmAddr + close(i.initialized) + }) + return i.err +} diff --git a/avalanchego/vms/rpcchainvm/runtime/subprocess/linux_stopper.go b/avalanchego/vms/rpcchainvm/runtime/subprocess/linux_stopper.go new file mode 100644 index 00000000..e056b82f --- /dev/null +++ b/avalanchego/vms/rpcchainvm/runtime/subprocess/linux_stopper.go @@ -0,0 +1,62 @@ +//go:build linux +// +build linux + +// ^ SIGTERM signal is not available on Windows +// ^ syscall.SysProcAttr only has field Pdeathsig on Linux + +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package subprocess + +import ( + "context" + "os/exec" + "syscall" + + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" +) + +func NewCmd(path string, args ...string) *exec.Cmd { + cmd := exec.Command(path, args...) + cmd.SysProcAttr = &syscall.SysProcAttr{Pdeathsig: syscall.SIGTERM} + return cmd +} + +func stop(ctx context.Context, log logging.Logger, cmd *exec.Cmd) { + waitChan := make(chan error) + go func() { + // attempt graceful shutdown + errs := wrappers.Errs{} + err := cmd.Process.Signal(syscall.SIGTERM) + errs.Add(err) + _, err = cmd.Process.Wait() + errs.Add(err) + waitChan <- errs.Err + close(waitChan) + }() + + ctx, cancel := context.WithTimeout(ctx, runtime.DefaultGracefulTimeout) + defer cancel() + + select { + case err := <-waitChan: + if err == nil { + log.Debug("subprocess gracefully shutdown") + } else { + log.Error("subprocess graceful shutdown failed", + zap.Error(err), + ) + } + case <-ctx.Done(): + // force kill + err := cmd.Process.Kill() + log.Error("subprocess was killed", + zap.Error(err), + ) + } +} diff --git a/avalanchego/vms/rpcchainvm/runtime/subprocess/non_linux_stopper.go b/avalanchego/vms/rpcchainvm/runtime/subprocess/non_linux_stopper.go new file mode 100644 index 00000000..facbf58b --- /dev/null +++ b/avalanchego/vms/rpcchainvm/runtime/subprocess/non_linux_stopper.go @@ -0,0 +1,31 @@ +//go:build !linux +// +build !linux + +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package subprocess + +import ( + "context" + "os/exec" + + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/utils/logging" +) + +func NewCmd(path string, args ...string) *exec.Cmd { + return exec.Command(path, args...) +} + +func stop(_ context.Context, log logging.Logger, cmd *exec.Cmd) { + err := cmd.Process.Kill() + if err == nil { + log.Debug("subprocess was killed") + } else { + log.Error("subprocess was killed", + zap.Error(err), + ) + } +} diff --git a/avalanchego/vms/rpcchainvm/runtime/subprocess/runtime.go b/avalanchego/vms/rpcchainvm/runtime/subprocess/runtime.go new file mode 100644 index 00000000..10f5ebed --- /dev/null +++ b/avalanchego/vms/rpcchainvm/runtime/subprocess/runtime.go @@ -0,0 +1,152 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package subprocess + +import ( + "context" + "fmt" + "io" + "net" + "os" + "os/exec" + "strings" + "time" + + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/rpcchainvm/grpcutils" + "github.com/ava-labs/avalanchego/vms/rpcchainvm/gruntime" + "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" + + pb "github.com/ava-labs/avalanchego/proto/pb/vm/runtime" +) + +type Config struct { + // Stderr of the VM process written to this writer. + Stderr io.Writer + // Stdout of the VM process written to this writer. + Stdout io.Writer + // Duration engine server will wait for handshake success. + HandshakeTimeout time.Duration + Log logging.Logger +} + +type Status struct { + // Id of the process. + Pid int + // Address of the VM gRPC service. + Addr string +} + +// Bootstrap starts a VM as a subprocess after initialization completes and +// pipes the IO to the appropriate writers. +// +// The subprocess is expected to be stopped by the caller if a non-nil error is +// returned. If piping the IO fails then the subprocess will be stopped. +// +// TODO: create the listener inside this method once we refactor the tests +func Bootstrap( + ctx context.Context, + listener net.Listener, + cmd *exec.Cmd, + config *Config, +) (*Status, runtime.Stopper, error) { + defer listener.Close() + + switch { + case cmd == nil: + return nil, nil, fmt.Errorf("%w: cmd required", runtime.ErrInvalidConfig) + case config.Log == nil: + return nil, nil, fmt.Errorf("%w: logger required", runtime.ErrInvalidConfig) + case config.Stderr == nil, config.Stdout == nil: + return nil, nil, fmt.Errorf("%w: stderr and stdout required", runtime.ErrInvalidConfig) + } + + intitializer := newInitializer() + + server := grpcutils.NewServer() + defer server.GracefulStop() + pb.RegisterRuntimeServer(server, gruntime.NewServer(intitializer)) + + go grpcutils.Serve(listener, server) + + serverAddr := listener.Addr() + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", runtime.EngineAddressKey, serverAddr.String())) + // pass golang debug env to subprocess + for _, env := range os.Environ() { + if strings.HasPrefix(env, "GRPC_") || strings.HasPrefix(env, "GODEBUG") { + cmd.Env = append(cmd.Env, env) + } + } + + stdoutPipe, err := cmd.StdoutPipe() + if err != nil { + return nil, nil, fmt.Errorf("failed to create stdout pipe: %w", err) + } + stderrPipe, err := cmd.StderrPipe() + if err != nil { + return nil, nil, fmt.Errorf("failed to create stderr pipe: %w", err) + } + + // start subproccess + if err := cmd.Start(); err != nil { + return nil, nil, fmt.Errorf("failed to start process: %w", err) + } + + log := config.Log + stopper := NewStopper(log, cmd) + + // start stdout collector + go func() { + _, err := io.Copy(config.Stdout, stdoutPipe) + if err != nil { + log.Error("stdout collector failed", + zap.Error(err), + ) + } + stopper.Stop(context.TODO()) + + log.Info("stdout collector shutdown") + }() + + // start stderr collector + go func() { + _, err := io.Copy(config.Stderr, stderrPipe) + if err != nil { + log.Error("stderr collector failed", + zap.Error(err), + ) + } + stopper.Stop(context.TODO()) + + log.Info("stderr collector shutdown") + }() + + // wait for handshake success + timeout := time.NewTimer(config.HandshakeTimeout) + defer timeout.Stop() + + select { + case <-intitializer.initialized: + case <-timeout.C: + stopper.Stop(ctx) + return nil, nil, fmt.Errorf("%w: %v", runtime.ErrHandshakeFailed, runtime.ErrProcessNotFound) + } + + if intitializer.err != nil { + stopper.Stop(ctx) + return nil, nil, fmt.Errorf("%w: %v", runtime.ErrHandshakeFailed, intitializer.err) + } + + log.Info("plugin handshake succeeded", + zap.String("addr", intitializer.vmAddr), + ) + + status := &Status{ + Pid: cmd.Process.Pid, + Addr: intitializer.vmAddr, + } + return status, stopper, nil +} diff --git a/avalanchego/vms/rpcchainvm/runtime/subprocess/stopper.go b/avalanchego/vms/rpcchainvm/runtime/subprocess/stopper.go new file mode 100644 index 00000000..b4d02659 --- /dev/null +++ b/avalanchego/vms/rpcchainvm/runtime/subprocess/stopper.go @@ -0,0 +1,32 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package subprocess + +import ( + "context" + "os/exec" + "sync" + + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" +) + +func NewStopper(logger logging.Logger, cmd *exec.Cmd) runtime.Stopper { + return &stopper{ + cmd: cmd, + logger: logger, + } +} + +type stopper struct { + once sync.Once + cmd *exec.Cmd + logger logging.Logger +} + +func (s *stopper) Stop(ctx context.Context) { + s.once.Do(func() { + stop(ctx, s.logger, s.cmd) + }) +} diff --git a/avalanchego/vms/rpcchainvm/state_syncable_vm_test.go b/avalanchego/vms/rpcchainvm/state_syncable_vm_test.go index 8d1a3232..d2c47856 100644 --- a/avalanchego/vms/rpcchainvm/state_syncable_vm_test.go +++ b/avalanchego/vms/rpcchainvm/state_syncable_vm_test.go @@ -1,16 +1,16 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcchainvm import ( + "context" "errors" + "io" "testing" "github.com/golang/mock/gomock" - "github.com/hashicorp/go-plugin" - "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database/manager" @@ -20,7 +20,13 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/vms/rpcchainvm/grpcutils" + "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" + "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime/subprocess" + + vmpb "github.com/ava-labs/avalanchego/proto/pb/vm" ) var ( @@ -66,7 +72,7 @@ type StateSyncEnabledMock struct { *mocks.MockStateSyncableVM } -func stateSyncEnabledTestPlugin(t *testing.T, loadExpectations bool) (plugin.Plugin, *gomock.Controller) { +func stateSyncEnabledTestPlugin(t *testing.T, loadExpectations bool) (block.ChainVM, *gomock.Controller) { // test key is "stateSyncEnabledTestKey" // create mock @@ -78,17 +84,17 @@ func stateSyncEnabledTestPlugin(t *testing.T, loadExpectations bool) (plugin.Plu if loadExpectations { gomock.InOrder( - ssVM.MockStateSyncableVM.EXPECT().StateSyncEnabled().Return(false, block.ErrStateSyncableVMNotImplemented).Times(1), - ssVM.MockStateSyncableVM.EXPECT().StateSyncEnabled().Return(false, nil).Times(1), - ssVM.MockStateSyncableVM.EXPECT().StateSyncEnabled().Return(true, nil).Times(1), - ssVM.MockStateSyncableVM.EXPECT().StateSyncEnabled().Return(false, errBrokenConnectionOrSomething).Times(1), + ssVM.MockStateSyncableVM.EXPECT().StateSyncEnabled(gomock.Any()).Return(false, block.ErrStateSyncableVMNotImplemented).Times(1), + ssVM.MockStateSyncableVM.EXPECT().StateSyncEnabled(gomock.Any()).Return(false, nil).Times(1), + ssVM.MockStateSyncableVM.EXPECT().StateSyncEnabled(gomock.Any()).Return(true, nil).Times(1), + ssVM.MockStateSyncableVM.EXPECT().StateSyncEnabled(gomock.Any()).Return(false, errBrokenConnectionOrSomething).Times(1), ) } - return New(ssVM), ctrl + return ssVM, ctrl } -func getOngoingSyncStateSummaryTestPlugin(t *testing.T, loadExpectations bool) (plugin.Plugin, *gomock.Controller) { +func getOngoingSyncStateSummaryTestPlugin(t *testing.T, loadExpectations bool) (block.ChainVM, *gomock.Controller) { // test key is "getOngoingSyncStateSummaryTestKey" // create mock @@ -100,16 +106,16 @@ func getOngoingSyncStateSummaryTestPlugin(t *testing.T, loadExpectations bool) ( if loadExpectations { gomock.InOrder( - ssVM.MockStateSyncableVM.EXPECT().GetOngoingSyncStateSummary().Return(nil, block.ErrStateSyncableVMNotImplemented).Times(1), - ssVM.MockStateSyncableVM.EXPECT().GetOngoingSyncStateSummary().Return(mockedSummary, nil).Times(1), - ssVM.MockStateSyncableVM.EXPECT().GetOngoingSyncStateSummary().Return(nil, errBrokenConnectionOrSomething).Times(1), + ssVM.MockStateSyncableVM.EXPECT().GetOngoingSyncStateSummary(gomock.Any()).Return(nil, block.ErrStateSyncableVMNotImplemented).Times(1), + ssVM.MockStateSyncableVM.EXPECT().GetOngoingSyncStateSummary(gomock.Any()).Return(mockedSummary, nil).Times(1), + ssVM.MockStateSyncableVM.EXPECT().GetOngoingSyncStateSummary(gomock.Any()).Return(nil, errBrokenConnectionOrSomething).Times(1), ) } - return New(ssVM), ctrl + return ssVM, ctrl } -func getLastStateSummaryTestPlugin(t *testing.T, loadExpectations bool) (plugin.Plugin, *gomock.Controller) { +func getLastStateSummaryTestPlugin(t *testing.T, loadExpectations bool) (block.ChainVM, *gomock.Controller) { // test key is "getLastStateSummaryTestKey" // create mock @@ -121,16 +127,16 @@ func getLastStateSummaryTestPlugin(t *testing.T, loadExpectations bool) (plugin. if loadExpectations { gomock.InOrder( - ssVM.MockStateSyncableVM.EXPECT().GetLastStateSummary().Return(nil, block.ErrStateSyncableVMNotImplemented).Times(1), - ssVM.MockStateSyncableVM.EXPECT().GetLastStateSummary().Return(mockedSummary, nil).Times(1), - ssVM.MockStateSyncableVM.EXPECT().GetLastStateSummary().Return(nil, errBrokenConnectionOrSomething).Times(1), + ssVM.MockStateSyncableVM.EXPECT().GetLastStateSummary(gomock.Any()).Return(nil, block.ErrStateSyncableVMNotImplemented).Times(1), + ssVM.MockStateSyncableVM.EXPECT().GetLastStateSummary(gomock.Any()).Return(mockedSummary, nil).Times(1), + ssVM.MockStateSyncableVM.EXPECT().GetLastStateSummary(gomock.Any()).Return(nil, errBrokenConnectionOrSomething).Times(1), ) } - return New(ssVM), ctrl + return ssVM, ctrl } -func parseStateSummaryTestPlugin(t *testing.T, loadExpectations bool) (plugin.Plugin, *gomock.Controller) { +func parseStateSummaryTestPlugin(t *testing.T, loadExpectations bool) (block.ChainVM, *gomock.Controller) { // test key is "parseStateSummaryTestKey" // create mock @@ -142,17 +148,17 @@ func parseStateSummaryTestPlugin(t *testing.T, loadExpectations bool) (plugin.Pl if loadExpectations { gomock.InOrder( - ssVM.MockStateSyncableVM.EXPECT().ParseStateSummary(gomock.Any()).Return(nil, block.ErrStateSyncableVMNotImplemented).Times(1), - ssVM.MockStateSyncableVM.EXPECT().ParseStateSummary(gomock.Any()).Return(mockedSummary, nil).Times(1), - ssVM.MockStateSyncableVM.EXPECT().ParseStateSummary(gomock.Any()).Return(nil, errNothingToParse).Times(1), - ssVM.MockStateSyncableVM.EXPECT().ParseStateSummary(gomock.Any()).Return(nil, errBrokenConnectionOrSomething).Times(1), + ssVM.MockStateSyncableVM.EXPECT().ParseStateSummary(gomock.Any(), gomock.Any()).Return(nil, block.ErrStateSyncableVMNotImplemented).Times(1), + ssVM.MockStateSyncableVM.EXPECT().ParseStateSummary(gomock.Any(), gomock.Any()).Return(mockedSummary, nil).Times(1), + ssVM.MockStateSyncableVM.EXPECT().ParseStateSummary(gomock.Any(), gomock.Any()).Return(nil, errNothingToParse).Times(1), + ssVM.MockStateSyncableVM.EXPECT().ParseStateSummary(gomock.Any(), gomock.Any()).Return(nil, errBrokenConnectionOrSomething).Times(1), ) } - return New(ssVM), ctrl + return ssVM, ctrl } -func getStateSummaryTestPlugin(t *testing.T, loadExpectations bool) (plugin.Plugin, *gomock.Controller) { +func getStateSummaryTestPlugin(t *testing.T, loadExpectations bool) (block.ChainVM, *gomock.Controller) { // test key is "getStateSummaryTestKey" // create mock @@ -164,16 +170,16 @@ func getStateSummaryTestPlugin(t *testing.T, loadExpectations bool) (plugin.Plug if loadExpectations { gomock.InOrder( - ssVM.MockStateSyncableVM.EXPECT().GetStateSummary(gomock.Any()).Return(nil, block.ErrStateSyncableVMNotImplemented).Times(1), - ssVM.MockStateSyncableVM.EXPECT().GetStateSummary(gomock.Any()).Return(mockedSummary, nil).Times(1), - ssVM.MockStateSyncableVM.EXPECT().GetStateSummary(gomock.Any()).Return(nil, errBrokenConnectionOrSomething).Times(1), + ssVM.MockStateSyncableVM.EXPECT().GetStateSummary(gomock.Any(), gomock.Any()).Return(nil, block.ErrStateSyncableVMNotImplemented).Times(1), + ssVM.MockStateSyncableVM.EXPECT().GetStateSummary(gomock.Any(), gomock.Any()).Return(mockedSummary, nil).Times(1), + ssVM.MockStateSyncableVM.EXPECT().GetStateSummary(gomock.Any(), gomock.Any()).Return(nil, errBrokenConnectionOrSomething).Times(1), ) } - return New(ssVM), ctrl + return ssVM, ctrl } -func acceptStateSummaryTestPlugin(t *testing.T, loadExpectations bool) (plugin.Plugin, *gomock.Controller) { +func acceptStateSummaryTestPlugin(t *testing.T, loadExpectations bool) (block.ChainVM, *gomock.Controller) { // test key is "acceptStateSummaryTestKey" // create mock @@ -185,35 +191,41 @@ func acceptStateSummaryTestPlugin(t *testing.T, loadExpectations bool) (plugin.P if loadExpectations { gomock.InOrder( - ssVM.MockStateSyncableVM.EXPECT().GetStateSummary(gomock.Any()).Return(mockedSummary, nil).Times(1), - ssVM.MockStateSyncableVM.EXPECT().ParseStateSummary(gomock.Any()).DoAndReturn( - func(summaryBytes []byte) (block.StateSummary, error) { + ssVM.MockStateSyncableVM.EXPECT().GetStateSummary(gomock.Any(), gomock.Any()).Return(mockedSummary, nil).Times(1), + ssVM.MockStateSyncableVM.EXPECT().ParseStateSummary(gomock.Any(), gomock.Any()).DoAndReturn( + func(context.Context, []byte) (block.StateSummary, error) { // setup summary to be accepted before returning it - mockedSummary.AcceptF = func() (bool, error) { return true, nil } + mockedSummary.AcceptF = func(context.Context) (block.StateSyncMode, error) { + return block.StateSyncStatic, nil + } return mockedSummary, nil }, ).Times(1), - ssVM.MockStateSyncableVM.EXPECT().ParseStateSummary(gomock.Any()).DoAndReturn( - func(summaryBytes []byte) (block.StateSummary, error) { + ssVM.MockStateSyncableVM.EXPECT().ParseStateSummary(gomock.Any(), gomock.Any()).DoAndReturn( + func(context.Context, []byte) (block.StateSummary, error) { // setup summary to be skipped before returning it - mockedSummary.AcceptF = func() (bool, error) { return false, nil } + mockedSummary.AcceptF = func(context.Context) (block.StateSyncMode, error) { + return block.StateSyncSkipped, nil + } return mockedSummary, nil }, ).Times(1), - ssVM.MockStateSyncableVM.EXPECT().ParseStateSummary(gomock.Any()).DoAndReturn( - func(summaryBytes []byte) (block.StateSummary, error) { + ssVM.MockStateSyncableVM.EXPECT().ParseStateSummary(gomock.Any(), gomock.Any()).DoAndReturn( + func(context.Context, []byte) (block.StateSummary, error) { // setup summary to fail accept - mockedSummary.AcceptF = func() (bool, error) { return false, errBrokenConnectionOrSomething } + mockedSummary.AcceptF = func(context.Context) (block.StateSyncMode, error) { + return block.StateSyncSkipped, errBrokenConnectionOrSomething + } return mockedSummary, nil }, ).Times(1), ) } - return New(ssVM), ctrl + return ssVM, ctrl } -func lastAcceptedBlockPostStateSummaryAcceptTestPlugin(t *testing.T, loadExpectations bool) (plugin.Plugin, *gomock.Controller) { +func lastAcceptedBlockPostStateSummaryAcceptTestPlugin(t *testing.T, loadExpectations bool) (block.ChainVM, *gomock.Controller) { // test key is "lastAcceptedBlockPostStateSummaryAcceptTestKey" // create mock @@ -228,86 +240,92 @@ func lastAcceptedBlockPostStateSummaryAcceptTestPlugin(t *testing.T, loadExpecta ssVM.MockChainVM.EXPECT().Initialize( gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), ).Return(nil).Times(1), - ssVM.MockChainVM.EXPECT().LastAccepted().Return(preSummaryBlk.ID(), nil).Times(1), - ssVM.MockChainVM.EXPECT().GetBlock(gomock.Any()).Return(preSummaryBlk, nil).Times(1), + ssVM.MockChainVM.EXPECT().LastAccepted(gomock.Any()).Return(preSummaryBlk.ID(), nil).Times(1), + ssVM.MockChainVM.EXPECT().GetBlock(gomock.Any(), gomock.Any()).Return(preSummaryBlk, nil).Times(1), - ssVM.MockStateSyncableVM.EXPECT().ParseStateSummary(gomock.Any()).DoAndReturn( - func(summaryBytes []byte) (block.StateSummary, error) { + ssVM.MockStateSyncableVM.EXPECT().ParseStateSummary(gomock.Any(), gomock.Any()).DoAndReturn( + func(context.Context, []byte) (block.StateSummary, error) { // setup summary to be accepted before returning it - mockedSummary.AcceptF = func() (bool, error) { return true, nil } + mockedSummary.AcceptF = func(context.Context) (block.StateSyncMode, error) { + return block.StateSyncStatic, nil + } return mockedSummary, nil }, ).Times(2), - ssVM.MockChainVM.EXPECT().SetState(gomock.Any()).Return(nil).Times(1), - ssVM.MockChainVM.EXPECT().LastAccepted().Return(summaryBlk.ID(), nil).Times(1), - ssVM.MockChainVM.EXPECT().GetBlock(gomock.Any()).Return(summaryBlk, nil).Times(1), + ssVM.MockChainVM.EXPECT().SetState(gomock.Any(), gomock.Any()).Return(nil).Times(1), + ssVM.MockChainVM.EXPECT().LastAccepted(gomock.Any()).Return(summaryBlk.ID(), nil).Times(1), + ssVM.MockChainVM.EXPECT().GetBlock(gomock.Any(), gomock.Any()).Return(summaryBlk, nil).Times(1), ) } - return New(ssVM), ctrl + return ssVM, ctrl } -func buildClientHelper(require *require.Assertions, testKey string, mockedPlugin plugin.Plugin) (*VMClient, *plugin.Client) { +func buildClientHelper(require *require.Assertions, testKey string) (*VMClient, runtime.Stopper) { process := helperProcess(testKey) - c := plugin.NewClient(&plugin.ClientConfig{ - Cmd: process, - HandshakeConfig: TestHandshake, - Plugins: plugin.PluginSet{testKey: mockedPlugin}, - AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC}, - }) - - _, err := c.Start() - require.NoErrorf(err, "failed to start plugin: %v", err) - require.True(c.Protocol() == plugin.ProtocolGRPC) - - // Get the plugin client. - client, err := c.Client() - require.NoErrorf(err, "failed to get plugin client: %v", err) - - // Grab the vm implementation. - raw, err := client.Dispense(testKey) - require.NoErrorf(err, "failed to dispense plugin: %v", err) - - // Get vm client. - vm, ok := raw.(*VMClient) - require.True(ok) - - return vm, c + + log := logging.NewLogger( + testKey, + logging.NewWrappedCore( + logging.Info, + originalStderr, + logging.Colors.ConsoleEncoder(), + ), + ) + + listener, err := grpcutils.NewListener() + require.NoError(err) + + status, stopper, err := subprocess.Bootstrap( + context.Background(), + listener, + process, + &subprocess.Config{ + Stderr: log, + Stdout: io.Discard, + Log: log, + HandshakeTimeout: runtime.DefaultHandshakeTimeout, + }, + ) + require.NoError(err) + + clientConn, err := grpcutils.Dial(status.Addr) + require.NoError(err) + + return NewClient(vmpb.NewVMClient(clientConn)), stopper } func TestStateSyncEnabled(t *testing.T) { require := require.New(t) testKey := stateSyncEnabledTestKey - mockedPlugin, ctrl := stateSyncEnabledTestPlugin(t, false /*loadExpectations*/) - defer ctrl.Finish() - // Create and start the plugin - vm, c := buildClientHelper(require, testKey, mockedPlugin) - defer c.Kill() + vm, stopper := buildClientHelper(require, testKey) + defer stopper.Stop(context.Background()) // test state sync not implemented // Note that enabled == false is returned rather than // common.ErrStateSyncableVMNotImplemented - enabled, err := vm.StateSyncEnabled() + enabled, err := vm.StateSyncEnabled(context.Background()) require.NoError(err) require.False(enabled) // test state sync disabled - enabled, err = vm.StateSyncEnabled() + enabled, err = vm.StateSyncEnabled(context.Background()) require.NoError(err) require.False(enabled) // test state sync enabled - enabled, err = vm.StateSyncEnabled() + enabled, err = vm.StateSyncEnabled(context.Background()) require.NoError(err) require.True(enabled) // test a non-special error. // TODO: retrieve exact error - _, err = vm.StateSyncEnabled() + _, err = vm.StateSyncEnabled(context.Background()) require.Error(err) } @@ -315,19 +333,16 @@ func TestGetOngoingSyncStateSummary(t *testing.T) { require := require.New(t) testKey := getOngoingSyncStateSummaryTestKey - mockedPlugin, ctrl := getOngoingSyncStateSummaryTestPlugin(t, false /*loadExpectations*/) - defer ctrl.Finish() - // Create and start the plugin - vm, c := buildClientHelper(require, testKey, mockedPlugin) - defer c.Kill() + vm, stopper := buildClientHelper(require, testKey) + defer stopper.Stop(context.Background()) // test unimplemented case; this is just a guard - _, err := vm.GetOngoingSyncStateSummary() + _, err := vm.GetOngoingSyncStateSummary(context.Background()) require.Equal(block.ErrStateSyncableVMNotImplemented, err) // test successful retrieval - summary, err := vm.GetOngoingSyncStateSummary() + summary, err := vm.GetOngoingSyncStateSummary(context.Background()) require.NoError(err) require.Equal(mockedSummary.ID(), summary.ID()) require.Equal(mockedSummary.Height(), summary.Height()) @@ -335,7 +350,7 @@ func TestGetOngoingSyncStateSummary(t *testing.T) { // test a non-special error. // TODO: retrieve exact error - _, err = vm.GetOngoingSyncStateSummary() + _, err = vm.GetOngoingSyncStateSummary(context.Background()) require.Error(err) } @@ -343,19 +358,16 @@ func TestGetLastStateSummary(t *testing.T) { require := require.New(t) testKey := getLastStateSummaryTestKey - mockedPlugin, ctrl := getLastStateSummaryTestPlugin(t, false /*loadExpectations*/) - defer ctrl.Finish() - // Create and start the plugin - vm, c := buildClientHelper(require, testKey, mockedPlugin) - defer c.Kill() + vm, stopper := buildClientHelper(require, testKey) + defer stopper.Stop(context.Background()) // test unimplemented case; this is just a guard - _, err := vm.GetLastStateSummary() + _, err := vm.GetLastStateSummary(context.Background()) require.Equal(block.ErrStateSyncableVMNotImplemented, err) // test successful retrieval - summary, err := vm.GetLastStateSummary() + summary, err := vm.GetLastStateSummary(context.Background()) require.NoError(err) require.Equal(mockedSummary.ID(), summary.ID()) require.Equal(mockedSummary.Height(), summary.Height()) @@ -363,7 +375,7 @@ func TestGetLastStateSummary(t *testing.T) { // test a non-special error. // TODO: retrieve exact error - _, err = vm.GetLastStateSummary() + _, err = vm.GetLastStateSummary(context.Background()) require.Error(err) } @@ -371,31 +383,28 @@ func TestParseStateSummary(t *testing.T) { require := require.New(t) testKey := parseStateSummaryTestKey - mockedPlugin, ctrl := parseStateSummaryTestPlugin(t, false /*loadExpectations*/) - defer ctrl.Finish() - // Create and start the plugin - vm, c := buildClientHelper(require, testKey, mockedPlugin) - defer c.Kill() + vm, stopper := buildClientHelper(require, testKey) + defer stopper.Stop(context.Background()) // test unimplemented case; this is just a guard - _, err := vm.ParseStateSummary(mockedSummary.Bytes()) + _, err := vm.ParseStateSummary(context.Background(), mockedSummary.Bytes()) require.Equal(block.ErrStateSyncableVMNotImplemented, err) // test successful parsing - summary, err := vm.ParseStateSummary(mockedSummary.Bytes()) + summary, err := vm.ParseStateSummary(context.Background(), mockedSummary.Bytes()) require.NoError(err) require.Equal(mockedSummary.ID(), summary.ID()) require.Equal(mockedSummary.Height(), summary.Height()) require.Equal(mockedSummary.Bytes(), summary.Bytes()) // test parsing nil summary - _, err = vm.ParseStateSummary(nil) + _, err = vm.ParseStateSummary(context.Background(), nil) require.Error(err) // test a non-special error. // TODO: retrieve exact error - _, err = vm.ParseStateSummary(mockedSummary.Bytes()) + _, err = vm.ParseStateSummary(context.Background(), mockedSummary.Bytes()) require.Error(err) } @@ -403,19 +412,16 @@ func TestGetStateSummary(t *testing.T) { require := require.New(t) testKey := getStateSummaryTestKey - mockedPlugin, ctrl := getStateSummaryTestPlugin(t, false /*loadExpectations*/) - defer ctrl.Finish() - // Create and start the plugin - vm, c := buildClientHelper(require, testKey, mockedPlugin) - defer c.Kill() + vm, stopper := buildClientHelper(require, testKey) + defer stopper.Stop(context.Background()) // test unimplemented case; this is just a guard - _, err := vm.GetStateSummary(mockedSummary.Height()) + _, err := vm.GetStateSummary(context.Background(), mockedSummary.Height()) require.Equal(block.ErrStateSyncableVMNotImplemented, err) // test successful retrieval - summary, err := vm.GetStateSummary(mockedSummary.Height()) + summary, err := vm.GetStateSummary(context.Background(), mockedSummary.Height()) require.NoError(err) require.Equal(mockedSummary.ID(), summary.ID()) require.Equal(mockedSummary.Height(), summary.Height()) @@ -423,7 +429,7 @@ func TestGetStateSummary(t *testing.T) { // test a non-special error. // TODO: retrieve exact error - _, err = vm.GetStateSummary(mockedSummary.Height()) + _, err = vm.GetStateSummary(context.Background(), mockedSummary.Height()) require.Error(err) } @@ -431,30 +437,27 @@ func TestAcceptStateSummary(t *testing.T) { require := require.New(t) testKey := acceptStateSummaryTestKey - mockedPlugin, ctrl := acceptStateSummaryTestPlugin(t, false /*loadExpectations*/) - defer ctrl.Finish() - // Create and start the plugin - vm, c := buildClientHelper(require, testKey, mockedPlugin) - defer c.Kill() + vm, stopper := buildClientHelper(require, testKey) + defer stopper.Stop(context.Background()) // retrieve the summary first - summary, err := vm.GetStateSummary(mockedSummary.Height()) + summary, err := vm.GetStateSummary(context.Background(), mockedSummary.Height()) require.NoError(err) - // test accepted Summary - accepted, err := summary.Accept() + // test status Summary + status, err := summary.Accept(context.Background()) require.NoError(err) - require.True(accepted) + require.Equal(block.StateSyncStatic, status) // test skipped Summary - accepted, err = summary.Accept() + status, err = summary.Accept(context.Background()) require.NoError(err) - require.False(accepted) + require.Equal(block.StateSyncSkipped, status) // test a non-special error. // TODO: retrieve exact error - _, err = summary.Accept() + _, err = summary.Accept(context.Background()) require.Error(err) } @@ -464,52 +467,49 @@ func TestLastAcceptedBlockPostStateSummaryAccept(t *testing.T) { require := require.New(t) testKey := lastAcceptedBlockPostStateSummaryAcceptTestKey - mockedPlugin, ctrl := lastAcceptedBlockPostStateSummaryAcceptTestPlugin(t, false /*loadExpectations*/) - defer ctrl.Finish() - // Create and start the plugin - vm, c := buildClientHelper(require, testKey, mockedPlugin) - defer c.Kill() + vm, stopper := buildClientHelper(require, testKey) + defer stopper.Stop(context.Background()) // Step 1: initialize VM and check initial LastAcceptedBlock ctx := snow.DefaultContextTest() dbManager := manager.NewMemDB(version.Semantic1_0_0) dbManager = dbManager.NewPrefixDBManager([]byte{}) - require.NoError(vm.Initialize(ctx, dbManager, nil, nil, nil, nil, nil, nil)) + require.NoError(vm.Initialize(context.Background(), ctx, dbManager, nil, nil, nil, nil, nil, nil)) - blkID, err := vm.LastAccepted() + blkID, err := vm.LastAccepted(context.Background()) require.NoError(err) require.Equal(preSummaryBlk.ID(), blkID) - lastBlk, err := vm.GetBlock(blkID) + lastBlk, err := vm.GetBlock(context.Background(), blkID) require.NoError(err) require.Equal(preSummaryBlk.Height(), lastBlk.Height()) // Step 2: pick a state summary to an higher height and accept it - summary, err := vm.ParseStateSummary(mockedSummary.Bytes()) + summary, err := vm.ParseStateSummary(context.Background(), mockedSummary.Bytes()) require.NoError(err) - accepted, err := summary.Accept() + status, err := summary.Accept(context.Background()) require.NoError(err) - require.True(accepted) + require.Equal(block.StateSyncStatic, status) // State Sync accept does not duly update LastAccepted block information // since state sync can complete asynchronously - blkID, err = vm.LastAccepted() + blkID, err = vm.LastAccepted(context.Background()) require.NoError(err) - lastBlk, err = vm.GetBlock(blkID) + lastBlk, err = vm.GetBlock(context.Background(), blkID) require.NoError(err) require.Equal(preSummaryBlk.Height(), lastBlk.Height()) // Setting state to bootstrapping duly update last accepted block - require.NoError(vm.SetState(snow.Bootstrapping)) + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) - blkID, err = vm.LastAccepted() + blkID, err = vm.LastAccepted(context.Background()) require.NoError(err) - lastBlk, err = vm.GetBlock(blkID) + lastBlk, err = vm.GetBlock(context.Background(), blkID) require.NoError(err) require.Equal(summary.Height(), lastBlk.Height()) } diff --git a/avalanchego/vms/rpcchainvm/vm.go b/avalanchego/vms/rpcchainvm/vm.go index b71d956e..834ab9f1 100644 --- a/avalanchego/vms/rpcchainvm/vm.go +++ b/avalanchego/vms/rpcchainvm/vm.go @@ -1,74 +1,105 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcchainvm import ( - "golang.org/x/net/context" + "context" + "fmt" + "os" + "os/signal" + "syscall" + "time" "google.golang.org/grpc" + "google.golang.org/grpc/health" - "github.com/hashicorp/go-plugin" + healthpb "google.golang.org/grpc/health/grpc_health_v1" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/rpcchainvm/grpcutils" + "github.com/ava-labs/avalanchego/vms/rpcchainvm/gruntime" + "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" vmpb "github.com/ava-labs/avalanchego/proto/pb/vm" + runtimepb "github.com/ava-labs/avalanchego/proto/pb/vm/runtime" ) -// protocolVersion should be bumped anytime changes are made which require -// the plugin vm to upgrade to latest avalanchego release to be compatible. -const protocolVersion = 17 +const defaultRuntimeDialTimeout = 5 * time.Second -var ( - // Handshake is a common handshake that is shared by plugin and host. - Handshake = plugin.HandshakeConfig{ - ProtocolVersion: protocolVersion, - MagicCookieKey: "VM_PLUGIN", - MagicCookieValue: "dynamic", +// The address of the Runtime server is expected to be passed via ENV `runtime.EngineAddressKey`. +// This address is used by the Runtime client to send Initialize RPC to server. +// +// Serve starts the RPC Chain VM server and performs a handshake with the VM runtime service. +func Serve(ctx context.Context, vm block.ChainVM, opts ...grpcutils.ServerOption) error { + signals := make(chan os.Signal, 2) + signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM) + + server := newVMServer(vm, opts...) + go func(ctx context.Context) { + defer func() { + server.GracefulStop() + fmt.Println("vm server: graceful termination success") + }() + + for { + select { + case s := <-signals: + switch s { + case syscall.SIGINT: + fmt.Println("runtime engine: ignoring signal: SIGINT") + case syscall.SIGTERM: + fmt.Println("runtime engine: received shutdown signal: SIGTERM") + return + } + case <-ctx.Done(): + fmt.Println("runtime engine: context has been cancelled") + return + } + } + }(ctx) + + // address of Runtime server from ENV + runtimeAddr := os.Getenv(runtime.EngineAddressKey) + if runtimeAddr == "" { + return fmt.Errorf("required env var missing: %q", runtime.EngineAddressKey) } - // PluginMap is the map of plugins we can dispense. - PluginMap = map[string]plugin.Plugin{ - "vm": &vmPlugin{}, + clientConn, err := grpcutils.Dial(runtimeAddr) + if err != nil { + return fmt.Errorf("failed to create client conn: %w", err) } - _ plugin.Plugin = &vmPlugin{} - _ plugin.GRPCPlugin = &vmPlugin{} -) + client := gruntime.NewClient(runtimepb.NewRuntimeClient(clientConn)) -type vmPlugin struct { - plugin.NetRPCUnsupportedPlugin - // Concrete implementation, written in Go. This is only used for plugins - // that are written in Go. - vm block.ChainVM -} + listener, err := grpcutils.NewListener() + if err != nil { + return fmt.Errorf("failed to create new listener: %w", err) + } -// New will be called by the server side of the plugin to pass into the server -// side PluginMap for dispatching. -func New(vm block.ChainVM) plugin.Plugin { - return &vmPlugin{vm: vm} -} + ctx, cancel := context.WithTimeout(ctx, defaultRuntimeDialTimeout) + defer cancel() + err = client.Initialize(ctx, version.RPCChainVMProtocol, listener.Addr().String()) + if err != nil { + _ = listener.Close() + return fmt.Errorf("failed to initialize vm runtime: %w", err) + } + + // start RPC Chain VM server + grpcutils.Serve(listener, server) -// GRPCServer registers a new GRPC server. -func (p *vmPlugin) GRPCServer(_ *plugin.GRPCBroker, s *grpc.Server) error { - vmpb.RegisterVMServer(s, NewServer(p.vm)) return nil } -// GRPCClient returns a new GRPC client -func (p *vmPlugin) GRPCClient(ctx context.Context, _ *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { - return NewClient(vmpb.NewVMClient(c)), nil -} +// Returns an RPC Chain VM server serving health and VM services. +func newVMServer(vm block.ChainVM, opts ...grpcutils.ServerOption) *grpc.Server { + server := grpcutils.NewServer(opts...) + vmpb.RegisterVMServer(server, NewServer(vm)) + + health := health.NewServer() + health.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) + healthpb.RegisterHealthServer(server, health) -// Serve serves a ChainVM plugin using sane gRPC server defaults. -func Serve(vm block.ChainVM) { - plugin.Serve(&plugin.ServeConfig{ - HandshakeConfig: Handshake, - Plugins: map[string]plugin.Plugin{ - "vm": New(vm), - }, - // ensure proper defaults - GRPCServer: grpcutils.NewDefaultServer, - }) + return server } diff --git a/avalanchego/vms/rpcchainvm/vm_client.go b/avalanchego/vms/rpcchainvm/vm_client.go index 369ae59e..2d18483d 100644 --- a/avalanchego/vms/rpcchainvm/vm_client.go +++ b/avalanchego/vms/rpcchainvm/vm_client.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcchainvm @@ -12,8 +12,6 @@ import ( grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/hashicorp/go-plugin" - "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" @@ -29,6 +27,7 @@ import ( "github.com/ava-labs/avalanchego/api/keystore/gkeystore" "github.com/ava-labs/avalanchego/api/metrics" "github.com/ava-labs/avalanchego/chains/atomic/gsharedmemory" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/rpcdb" "github.com/ava-labs/avalanchego/ids" @@ -39,14 +38,17 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/common/appsender" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/validators/gvalidators" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/resource" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/chain" + "github.com/ava-labs/avalanchego/vms/platformvm/warp/gwarp" "github.com/ava-labs/avalanchego/vms/rpcchainvm/ghttp" "github.com/ava-labs/avalanchego/vms/rpcchainvm/grpcutils" - "github.com/ava-labs/avalanchego/vms/rpcchainvm/gsubnetlookup" "github.com/ava-labs/avalanchego/vms/rpcchainvm/messenger" + "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" aliasreaderpb "github.com/ava-labs/avalanchego/proto/pb/aliasreader" appsenderpb "github.com/ava-labs/avalanchego/proto/pb/appsender" @@ -55,8 +57,9 @@ import ( messengerpb "github.com/ava-labs/avalanchego/proto/pb/messenger" rpcdbpb "github.com/ava-labs/avalanchego/proto/pb/rpcdb" sharedmemorypb "github.com/ava-labs/avalanchego/proto/pb/sharedmemory" - subnetlookuppb "github.com/ava-labs/avalanchego/proto/pb/subnetlookup" + validatorstatepb "github.com/ava-labs/avalanchego/proto/pb/validatorstate" vmpb "github.com/ava-labs/avalanchego/proto/pb/vm" + warppb "github.com/ava-labs/avalanchego/proto/pb/warp" ) const ( @@ -70,38 +73,39 @@ var ( errUnsupportedFXs = errors.New("unsupported feature extensions") errBatchedParseBlockWrongNumberOfBlocks = errors.New("BatchedParseBlock returned different number of blocks than expected") - _ block.ChainVM = &VMClient{} - _ block.BatchedChainVM = &VMClient{} - _ block.HeightIndexedChainVM = &VMClient{} - _ block.StateSyncableVM = &VMClient{} - _ prometheus.Gatherer = &VMClient{} + _ block.ChainVM = (*VMClient)(nil) + _ block.BuildBlockWithContextChainVM = (*VMClient)(nil) + _ block.BatchedChainVM = (*VMClient)(nil) + _ block.HeightIndexedChainVM = (*VMClient)(nil) + _ block.StateSyncableVM = (*VMClient)(nil) + _ prometheus.Gatherer = (*VMClient)(nil) - _ snowman.Block = &blockClient{} + _ snowman.Block = (*blockClient)(nil) + _ block.WithVerifyContext = (*blockClient)(nil) - _ block.StateSummary = &summaryClient{} + _ block.StateSummary = (*summaryClient)(nil) ) // VMClient is an implementation of a VM that talks over RPC. type VMClient struct { *chain.State client vmpb.VMClient - proc *plugin.Client + runtime runtime.Stopper pid int processTracker resource.ProcessTracker - messenger *messenger.Server - keystore *gkeystore.Server - sharedMemory *gsharedmemory.Server - bcLookup *galiasreader.Server - snLookup *gsubnetlookup.Server - appSender *appsender.Server + messenger *messenger.Server + keystore *gkeystore.Server + sharedMemory *gsharedmemory.Server + bcLookup *galiasreader.Server + appSender *appsender.Server + validatorStateServer *gvalidators.Server + warpSignerServer *gwarp.Server serverCloser grpcutils.ServerCloser conns []*grpc.ClientConn grpcServerMetrics *grpc_prometheus.ServerMetrics - - ctx *snow.Context } // NewClient returns a VM connected to a remote VM @@ -112,16 +116,16 @@ func NewClient(client vmpb.VMClient) *VMClient { } // SetProcess gives ownership of the server process to the client. -func (vm *VMClient) SetProcess(ctx *snow.Context, proc *plugin.Client, processTracker resource.ProcessTracker) { - vm.ctx = ctx - vm.proc = proc +func (vm *VMClient) SetProcess(runtime runtime.Stopper, pid int, processTracker resource.ProcessTracker) { + vm.runtime = runtime vm.processTracker = processTracker - vm.pid = proc.ReattachConfig().Pid + vm.pid = pid processTracker.TrackProcess(vm.pid) } func (vm *VMClient) Initialize( - ctx *snow.Context, + ctx context.Context, + chainCtx *snow.Context, dbManager manager.Manager, genesisBytes []byte, upgradeBytes []byte, @@ -134,8 +138,6 @@ func (vm *VMClient) Initialize( return errUnsupportedFXs } - vm.ctx = ctx - // Register metrics registerer := prometheus.NewRegistry() multiGatherer := metrics.NewMultiGatherer() @@ -155,7 +157,6 @@ func (vm *VMClient) Initialize( versionedDBs := dbManager.GetDatabases() versionedDBServers := make([]*vmpb.VersionedDBServer, len(versionedDBs)) for i, semDB := range versionedDBs { - db := rpcdb.NewServer(semDB.Database) dbVersion := semDB.Version.String() serverListener, err := grpcutils.NewListener() if err != nil { @@ -163,8 +164,8 @@ func (vm *VMClient) Initialize( } serverAddr := serverListener.Addr().String() - go grpcutils.Serve(serverListener, vm.getDBServerFunc(db)) - vm.ctx.Log.Info("grpc: serving database", + go grpcutils.Serve(serverListener, vm.newDBServer(semDB.Database)) + chainCtx.Log.Info("grpc: serving database", zap.String("version", dbVersion), zap.String("address", serverAddr), ) @@ -176,11 +177,12 @@ func (vm *VMClient) Initialize( } vm.messenger = messenger.NewServer(toEngine) - vm.keystore = gkeystore.NewServer(ctx.Keystore) - vm.sharedMemory = gsharedmemory.NewServer(ctx.SharedMemory, dbManager.Current().Database) - vm.bcLookup = galiasreader.NewServer(ctx.BCLookup) - vm.snLookup = gsubnetlookup.NewServer(ctx.SNLookup) + vm.keystore = gkeystore.NewServer(chainCtx.Keystore) + vm.sharedMemory = gsharedmemory.NewServer(chainCtx.SharedMemory, dbManager.Current().Database) + vm.bcLookup = galiasreader.NewServer(chainCtx.BCLookup) vm.appSender = appsender.NewServer(appSender) + vm.validatorStateServer = gvalidators.NewServer(chainCtx.ValidatorState) + vm.warpSignerServer = gwarp.NewServer(chainCtx.WarpSigner) serverListener, err := grpcutils.NewListener() if err != nil { @@ -188,18 +190,21 @@ func (vm *VMClient) Initialize( } serverAddr := serverListener.Addr().String() - go grpcutils.Serve(serverListener, vm.getInitServer) - vm.ctx.Log.Info("grpc: serving vm services", + go grpcutils.Serve(serverListener, vm.newInitServer()) + chainCtx.Log.Info("grpc: serving vm services", zap.String("address", serverAddr), ) - resp, err := vm.client.Initialize(context.Background(), &vmpb.InitializeRequest{ - NetworkId: ctx.NetworkID, - SubnetId: ctx.SubnetID[:], - ChainId: ctx.ChainID[:], - NodeId: ctx.NodeID.Bytes(), - XChainId: ctx.XChainID[:], - AvaxAssetId: ctx.AVAXAssetID[:], + resp, err := vm.client.Initialize(ctx, &vmpb.InitializeRequest{ + NetworkId: chainCtx.NetworkID, + SubnetId: chainCtx.SubnetID[:], + ChainId: chainCtx.ChainID[:], + NodeId: chainCtx.NodeID.Bytes(), + PublicKey: bls.PublicKeyToBytes(chainCtx.PublicKey), + XChainId: chainCtx.XChainID[:], + CChainId: chainCtx.CChainID[:], + AvaxAssetId: chainCtx.AVAXAssetID[:], + ChainDataDir: chainCtx.ChainDataDir, GenesisBytes: genesisBytes, UpgradeBytes: upgradeBytes, ConfigBytes: configBytes, @@ -224,6 +229,8 @@ func (vm *VMClient) Initialize( return err } + // We don't need to check whether this is a block.WithVerifyContext because + // we'll never Verify this block. lastAcceptedBlk := &blockClient{ vm: vm, id: id, @@ -237,14 +244,16 @@ func (vm *VMClient) Initialize( chainState, err := chain.NewMeteredState( registerer, &chain.Config{ - DecidedCacheSize: decidedCacheSize, - MissingCacheSize: missingCacheSize, - UnverifiedCacheSize: unverifiedCacheSize, - BytesToIDCacheSize: bytesToIDCacheSize, - LastAcceptedBlock: lastAcceptedBlk, - GetBlock: vm.getBlock, - UnmarshalBlock: vm.parseBlock, - BuildBlock: vm.buildBlock, + DecidedCacheSize: decidedCacheSize, + MissingCacheSize: missingCacheSize, + UnverifiedCacheSize: unverifiedCacheSize, + BytesToIDCacheSize: bytesToIDCacheSize, + LastAcceptedBlock: lastAcceptedBlk, + GetBlock: vm.getBlock, + UnmarshalBlock: vm.parseBlock, + BatchedUnmarshalBlock: vm.batchedParseBlock, + BuildBlock: vm.buildBlock, + BuildBlockWithContext: vm.buildBlockWithContext, }, ) if err != nil { @@ -252,74 +261,52 @@ func (vm *VMClient) Initialize( } vm.State = chainState - return vm.ctx.Metrics.Register(multiGatherer) + return chainCtx.Metrics.Register(multiGatherer) } -func (vm *VMClient) getDBServerFunc(db rpcdbpb.DatabaseServer) func(opts []grpc.ServerOption) *grpc.Server { // #nolint - return func(opts []grpc.ServerOption) *grpc.Server { - if len(opts) == 0 { - opts = append(opts, grpcutils.DefaultServerOptions...) - } - - // Collect gRPC serving metrics - opts = append(opts, grpc.UnaryInterceptor(vm.grpcServerMetrics.UnaryServerInterceptor())) - opts = append(opts, grpc.StreamInterceptor(vm.grpcServerMetrics.StreamServerInterceptor())) - - server := grpc.NewServer(opts...) +func (vm *VMClient) newDBServer(db database.Database) *grpc.Server { + server := grpcutils.NewServer( + grpcutils.WithUnaryInterceptor(vm.grpcServerMetrics.UnaryServerInterceptor()), + grpcutils.WithStreamInterceptor(vm.grpcServerMetrics.StreamServerInterceptor()), + ) - grpcHealth := health.NewServer() - // The server should use an empty string as the key for server's overall - // health status. - // See https://github.com/grpc/grpc/blob/master/doc/health-checking.md - grpcHealth.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) + // See https://github.com/grpc/grpc/blob/master/doc/health-checking.md + grpcHealth := health.NewServer() + grpcHealth.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) - vm.serverCloser.Add(server) + vm.serverCloser.Add(server) - // register database service - rpcdbpb.RegisterDatabaseServer(server, db) - // register health service - healthpb.RegisterHealthServer(server, grpcHealth) + // Register services + rpcdbpb.RegisterDatabaseServer(server, rpcdb.NewServer(db)) + healthpb.RegisterHealthServer(server, grpcHealth) - // Ensure metric counters are zeroed on restart - grpc_prometheus.Register(server) + // Ensure metric counters are zeroed on restart + grpc_prometheus.Register(server) - return server - } + return server } -func (vm *VMClient) getInitServer(opts []grpc.ServerOption) *grpc.Server { - if len(opts) == 0 { - opts = append(opts, grpcutils.DefaultServerOptions...) - } - - // Collect gRPC serving metrics - opts = append(opts, grpc.UnaryInterceptor(vm.grpcServerMetrics.UnaryServerInterceptor())) - opts = append(opts, grpc.StreamInterceptor(vm.grpcServerMetrics.StreamServerInterceptor())) - - server := grpc.NewServer(opts...) +func (vm *VMClient) newInitServer() *grpc.Server { + server := grpcutils.NewServer( + grpcutils.WithUnaryInterceptor(vm.grpcServerMetrics.UnaryServerInterceptor()), + grpcutils.WithStreamInterceptor(vm.grpcServerMetrics.StreamServerInterceptor()), + ) - grpcHealth := health.NewServer() - // The server should use an empty string as the key for server's overall - // health status. // See https://github.com/grpc/grpc/blob/master/doc/health-checking.md + grpcHealth := health.NewServer() grpcHealth.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) vm.serverCloser.Add(server) - // register the messenger service + // Register services messengerpb.RegisterMessengerServer(server, vm.messenger) - // register the keystore service keystorepb.RegisterKeystoreServer(server, vm.keystore) - // register the shared memory service sharedmemorypb.RegisterSharedMemoryServer(server, vm.sharedMemory) - // register the blockchain alias service aliasreaderpb.RegisterAliasReaderServer(server, vm.bcLookup) - // register the subnet alias service - subnetlookuppb.RegisterSubnetLookupServer(server, vm.snLookup) - // register the app sender service appsenderpb.RegisterAppSenderServer(server, vm.appSender) - // register the health service healthpb.RegisterHealthServer(server, grpcHealth) + validatorstatepb.RegisterValidatorStateServer(server, vm.validatorStateServer) + warppb.RegisterSignerServer(server, vm.warpSignerServer) // Ensure metric counters are zeroed on restart grpc_prometheus.Register(server) @@ -327,9 +314,9 @@ func (vm *VMClient) getInitServer(opts []grpc.ServerOption) *grpc.Server { return server } -func (vm *VMClient) SetState(state snow.State) error { - resp, err := vm.client.SetState(context.Background(), &vmpb.SetStateRequest{ - State: uint32(state), +func (vm *VMClient) SetState(ctx context.Context, state snow.State) error { + resp, err := vm.client.SetState(ctx, &vmpb.SetStateRequest{ + State: vmpb.State(state), }) if err != nil { return err @@ -350,6 +337,8 @@ func (vm *VMClient) SetState(state snow.State) error { return err } + // We don't need to check whether this is a block.WithVerifyContext because + // we'll never Verify this block. return vm.State.SetLastAcceptedBlock(&blockClient{ vm: vm, id: id, @@ -361,9 +350,9 @@ func (vm *VMClient) SetState(state snow.State) error { }) } -func (vm *VMClient) Shutdown() error { +func (vm *VMClient) Shutdown(ctx context.Context) error { errs := wrappers.Errs{} - _, err := vm.client.Shutdown(context.Background(), &emptypb.Empty{}) + _, err := vm.client.Shutdown(ctx, &emptypb.Empty{}) errs.Add(err) vm.serverCloser.Stop() @@ -371,13 +360,14 @@ func (vm *VMClient) Shutdown() error { errs.Add(conn.Close()) } - vm.proc.Kill() + vm.runtime.Stop(ctx) + vm.processTracker.UntrackProcess(vm.pid) return errs.Err } -func (vm *VMClient) CreateHandlers() (map[string]*common.HTTPHandler, error) { - resp, err := vm.client.CreateHandlers(context.Background(), &emptypb.Empty{}) +func (vm *VMClient) CreateHandlers(ctx context.Context) (map[string]*common.HTTPHandler, error) { + resp, err := vm.client.CreateHandlers(ctx, &emptypb.Empty{}) if err != nil { return nil, err } @@ -398,8 +388,8 @@ func (vm *VMClient) CreateHandlers() (map[string]*common.HTTPHandler, error) { return handlers, nil } -func (vm *VMClient) CreateStaticHandlers() (map[string]*common.HTTPHandler, error) { - resp, err := vm.client.CreateStaticHandlers(context.Background(), &emptypb.Empty{}) +func (vm *VMClient) CreateStaticHandlers(ctx context.Context) (map[string]*common.HTTPHandler, error) { + resp, err := vm.client.CreateStaticHandlers(ctx, &emptypb.Empty{}) if err != nil { return nil, err } @@ -420,51 +410,43 @@ func (vm *VMClient) CreateStaticHandlers() (map[string]*common.HTTPHandler, erro return handlers, nil } -func (vm *VMClient) Connected(nodeID ids.NodeID, nodeVersion *version.Application) error { - _, err := vm.client.Connected(context.Background(), &vmpb.ConnectedRequest{ +func (vm *VMClient) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { + _, err := vm.client.Connected(ctx, &vmpb.ConnectedRequest{ NodeId: nodeID[:], Version: nodeVersion.String(), }) return err } -func (vm *VMClient) Disconnected(nodeID ids.NodeID) error { - _, err := vm.client.Disconnected(context.Background(), &vmpb.DisconnectedRequest{ +func (vm *VMClient) Disconnected(ctx context.Context, nodeID ids.NodeID) error { + _, err := vm.client.Disconnected(ctx, &vmpb.DisconnectedRequest{ NodeId: nodeID[:], }) return err } -func (vm *VMClient) buildBlock() (snowman.Block, error) { - resp, err := vm.client.BuildBlock(context.Background(), &emptypb.Empty{}) - if err != nil { - return nil, err - } - - id, err := ids.ToID(resp.Id) +// If the underlying VM doesn't actually implement this method, its [BuildBlock] +// method will be called instead. +func (vm *VMClient) buildBlockWithContext(ctx context.Context, blockCtx *block.Context) (snowman.Block, error) { + resp, err := vm.client.BuildBlock(ctx, &vmpb.BuildBlockRequest{ + PChainHeight: &blockCtx.PChainHeight, + }) if err != nil { return nil, err } + return vm.newBlockFromBuildBlock(resp) +} - parentID, err := ids.ToID(resp.ParentId) +func (vm *VMClient) buildBlock(ctx context.Context) (snowman.Block, error) { + resp, err := vm.client.BuildBlock(ctx, &vmpb.BuildBlockRequest{}) if err != nil { return nil, err } - - time, err := grpcutils.TimestampAsTime(resp.Timestamp) - return &blockClient{ - vm: vm, - id: id, - parentID: parentID, - status: choices.Processing, - bytes: resp.Bytes, - height: resp.Height, - time: time, - }, err + return vm.newBlockFromBuildBlock(resp) } -func (vm *VMClient) parseBlock(bytes []byte) (snowman.Block, error) { - resp, err := vm.client.ParseBlock(context.Background(), &vmpb.ParseBlockRequest{ +func (vm *VMClient) parseBlock(ctx context.Context, bytes []byte) (snowman.Block, error) { + resp, err := vm.client.ParseBlock(ctx, &vmpb.ParseBlockRequest{ Bytes: bytes, }) if err != nil { @@ -487,26 +469,30 @@ func (vm *VMClient) parseBlock(bytes []byte) (snowman.Block, error) { } time, err := grpcutils.TimestampAsTime(resp.Timestamp) + if err != nil { + return nil, err + } return &blockClient{ - vm: vm, - id: id, - parentID: parentID, - status: status, - bytes: bytes, - height: resp.Height, - time: time, - }, err -} - -func (vm *VMClient) getBlock(id ids.ID) (snowman.Block, error) { - resp, err := vm.client.GetBlock(context.Background(), &vmpb.GetBlockRequest{ - Id: id[:], + vm: vm, + id: id, + parentID: parentID, + status: status, + bytes: bytes, + height: resp.Height, + time: time, + shouldVerifyWithCtx: resp.VerifyWithContext, + }, nil +} + +func (vm *VMClient) getBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) { + resp, err := vm.client.GetBlock(ctx, &vmpb.GetBlockRequest{ + Id: blkID[:], }) if err != nil { return nil, err } - if errCode := resp.Err; errCode != 0 { - return nil, errCodeToError[errCode] + if errEnum := resp.Err; errEnum != vmpb.Error_ERROR_UNSPECIFIED { + return nil, errEnumToError[errEnum] } parentID, err := ids.ToID(resp.ParentId) @@ -521,25 +507,26 @@ func (vm *VMClient) getBlock(id ids.ID) (snowman.Block, error) { time, err := grpcutils.TimestampAsTime(resp.Timestamp) return &blockClient{ - vm: vm, - id: id, - parentID: parentID, - status: status, - bytes: resp.Bytes, - height: resp.Height, - time: time, + vm: vm, + id: blkID, + parentID: parentID, + status: status, + bytes: resp.Bytes, + height: resp.Height, + time: time, + shouldVerifyWithCtx: resp.VerifyWithContext, }, err } -func (vm *VMClient) SetPreference(id ids.ID) error { - _, err := vm.client.SetPreference(context.Background(), &vmpb.SetPreferenceRequest{ - Id: id[:], +func (vm *VMClient) SetPreference(ctx context.Context, blkID ids.ID) error { + _, err := vm.client.SetPreference(ctx, &vmpb.SetPreferenceRequest{ + Id: blkID[:], }) return err } -func (vm *VMClient) HealthCheck() (interface{}, error) { - health, err := vm.client.Health(context.Background(), &emptypb.Empty{}) +func (vm *VMClient) HealthCheck(ctx context.Context) (interface{}, error) { + health, err := vm.client.Health(ctx, &emptypb.Empty{}) if err != nil { return nil, fmt.Errorf("health check failed: %w", err) } @@ -547,20 +534,53 @@ func (vm *VMClient) HealthCheck() (interface{}, error) { return json.RawMessage(health.Details), nil } -func (vm *VMClient) Version() (string, error) { - resp, err := vm.client.Version( - context.Background(), - &emptypb.Empty{}, - ) +func (vm *VMClient) Version(ctx context.Context) (string, error) { + resp, err := vm.client.Version(ctx, &emptypb.Empty{}) if err != nil { return "", err } return resp.Version, nil } -func (vm *VMClient) AppRequest(nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { +func (vm *VMClient) CrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, deadline time.Time, request []byte) error { + _, err := vm.client.CrossChainAppRequest( + ctx, + &vmpb.CrossChainAppRequestMsg{ + ChainId: chainID[:], + RequestId: requestID, + Deadline: grpcutils.TimestampFromTime(deadline), + Request: request, + }, + ) + return err +} + +func (vm *VMClient) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32) error { + _, err := vm.client.CrossChainAppRequestFailed( + ctx, + &vmpb.CrossChainAppRequestFailedMsg{ + ChainId: chainID[:], + RequestId: requestID, + }, + ) + return err +} + +func (vm *VMClient) CrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, response []byte) error { + _, err := vm.client.CrossChainAppResponse( + ctx, + &vmpb.CrossChainAppResponseMsg{ + ChainId: chainID[:], + RequestId: requestID, + Response: response, + }, + ) + return err +} + +func (vm *VMClient) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { _, err := vm.client.AppRequest( - context.Background(), + ctx, &vmpb.AppRequestMsg{ NodeId: nodeID[:], RequestId: requestID, @@ -571,9 +591,9 @@ func (vm *VMClient) AppRequest(nodeID ids.NodeID, requestID uint32, deadline tim return err } -func (vm *VMClient) AppResponse(nodeID ids.NodeID, requestID uint32, response []byte) error { +func (vm *VMClient) AppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { _, err := vm.client.AppResponse( - context.Background(), + ctx, &vmpb.AppResponseMsg{ NodeId: nodeID[:], RequestId: requestID, @@ -583,9 +603,9 @@ func (vm *VMClient) AppResponse(nodeID ids.NodeID, requestID uint32, response [] return err } -func (vm *VMClient) AppRequestFailed(nodeID ids.NodeID, requestID uint32) error { +func (vm *VMClient) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { _, err := vm.client.AppRequestFailed( - context.Background(), + ctx, &vmpb.AppRequestFailedMsg{ NodeId: nodeID[:], RequestId: requestID, @@ -594,9 +614,9 @@ func (vm *VMClient) AppRequestFailed(nodeID ids.NodeID, requestID uint32) error return err } -func (vm *VMClient) AppGossip(nodeID ids.NodeID, msg []byte) error { +func (vm *VMClient) AppGossip(ctx context.Context, nodeID ids.NodeID, msg []byte) error { _, err := vm.client.AppGossip( - context.Background(), + ctx, &vmpb.AppGossipMsg{ NodeId: nodeID[:], Msg: msg, @@ -614,12 +634,13 @@ func (vm *VMClient) Gather() ([]*dto.MetricFamily, error) { } func (vm *VMClient) GetAncestors( + ctx context.Context, blkID ids.ID, maxBlocksNum int, maxBlocksSize int, maxBlocksRetrivalTime time.Duration, ) ([][]byte, error) { - resp, err := vm.client.GetAncestors(context.Background(), &vmpb.GetAncestorsRequest{ + resp, err := vm.client.GetAncestors(ctx, &vmpb.GetAncestorsRequest{ BlkId: blkID[:], MaxBlocksNum: int32(maxBlocksNum), MaxBlocksSize: int32(maxBlocksSize), @@ -631,8 +652,8 @@ func (vm *VMClient) GetAncestors( return resp.BlksBytes, nil } -func (vm *VMClient) BatchedParseBlock(blksBytes [][]byte) ([]snowman.Block, error) { - resp, err := vm.client.BatchedParseBlock(context.Background(), &vmpb.BatchedParseBlockRequest{ +func (vm *VMClient) batchedParseBlock(ctx context.Context, blksBytes [][]byte) ([]snowman.Block, error) { + resp, err := vm.client.BatchedParseBlock(ctx, &vmpb.BatchedParseBlockRequest{ Request: blksBytes, }) if err != nil { @@ -665,66 +686,61 @@ func (vm *VMClient) BatchedParseBlock(blksBytes [][]byte) ([]snowman.Block, erro } res = append(res, &blockClient{ - vm: vm, - id: id, - parentID: parentID, - status: status, - bytes: blksBytes[idx], - height: blkResp.Height, - time: time, + vm: vm, + id: id, + parentID: parentID, + status: status, + bytes: blksBytes[idx], + height: blkResp.Height, + time: time, + shouldVerifyWithCtx: blkResp.VerifyWithContext, }) } return res, nil } -func (vm *VMClient) VerifyHeightIndex() error { - resp, err := vm.client.VerifyHeightIndex( - context.Background(), - &emptypb.Empty{}, - ) +func (vm *VMClient) VerifyHeightIndex(ctx context.Context) error { + resp, err := vm.client.VerifyHeightIndex(ctx, &emptypb.Empty{}) if err != nil { return err } - return errCodeToError[resp.Err] + return errEnumToError[resp.Err] } -func (vm *VMClient) GetBlockIDAtHeight(height uint64) (ids.ID, error) { +func (vm *VMClient) GetBlockIDAtHeight(ctx context.Context, height uint64) (ids.ID, error) { resp, err := vm.client.GetBlockIDAtHeight( - context.Background(), + ctx, &vmpb.GetBlockIDAtHeightRequest{Height: height}, ) if err != nil { return ids.Empty, err } - if errCode := resp.Err; errCode != 0 { - return ids.Empty, errCodeToError[errCode] + if errEnum := resp.Err; errEnum != vmpb.Error_ERROR_UNSPECIFIED { + return ids.Empty, errEnumToError[errEnum] } return ids.ToID(resp.BlkId) } -func (vm *VMClient) StateSyncEnabled() (bool, error) { - resp, err := vm.client.StateSyncEnabled( - context.Background(), - &emptypb.Empty{}, - ) +func (vm *VMClient) StateSyncEnabled(ctx context.Context) (bool, error) { + resp, err := vm.client.StateSyncEnabled(ctx, &emptypb.Empty{}) if err != nil { return false, err } - err = errCodeToError[resp.Err] + err = errEnumToError[resp.Err] if err == block.ErrStateSyncableVMNotImplemented { return false, nil } return resp.Enabled, err } -func (vm *VMClient) GetOngoingSyncStateSummary() (block.StateSummary, error) { - resp, err := vm.client.GetOngoingSyncStateSummary(context.Background(), &emptypb.Empty{}) +func (vm *VMClient) GetOngoingSyncStateSummary(ctx context.Context) (block.StateSummary, error) { + resp, err := vm.client.GetOngoingSyncStateSummary(ctx, &emptypb.Empty{}) if err != nil { return nil, err } - if errCode := resp.Err; errCode != 0 { - return nil, errCodeToError[errCode] + if errEnum := resp.Err; errEnum != vmpb.Error_ERROR_UNSPECIFIED { + return nil, errEnumToError[errEnum] } summaryID, err := ids.ToID(resp.Id) @@ -736,13 +752,13 @@ func (vm *VMClient) GetOngoingSyncStateSummary() (block.StateSummary, error) { }, err } -func (vm *VMClient) GetLastStateSummary() (block.StateSummary, error) { - resp, err := vm.client.GetLastStateSummary(context.Background(), &emptypb.Empty{}) +func (vm *VMClient) GetLastStateSummary(ctx context.Context) (block.StateSummary, error) { + resp, err := vm.client.GetLastStateSummary(ctx, &emptypb.Empty{}) if err != nil { return nil, err } - if errCode := resp.Err; errCode != 0 { - return nil, errCodeToError[errCode] + if errEnum := resp.Err; errEnum != vmpb.Error_ERROR_UNSPECIFIED { + return nil, errEnumToError[errEnum] } summaryID, err := ids.ToID(resp.Id) @@ -754,9 +770,9 @@ func (vm *VMClient) GetLastStateSummary() (block.StateSummary, error) { }, err } -func (vm *VMClient) ParseStateSummary(summaryBytes []byte) (block.StateSummary, error) { +func (vm *VMClient) ParseStateSummary(ctx context.Context, summaryBytes []byte) (block.StateSummary, error) { resp, err := vm.client.ParseStateSummary( - context.Background(), + ctx, &vmpb.ParseStateSummaryRequest{ Bytes: summaryBytes, }, @@ -764,8 +780,8 @@ func (vm *VMClient) ParseStateSummary(summaryBytes []byte) (block.StateSummary, if err != nil { return nil, err } - if errCode := resp.Err; errCode != 0 { - return nil, errCodeToError[errCode] + if errEnum := resp.Err; errEnum != vmpb.Error_ERROR_UNSPECIFIED { + return nil, errEnumToError[errEnum] } summaryID, err := ids.ToID(resp.Id) @@ -777,9 +793,9 @@ func (vm *VMClient) ParseStateSummary(summaryBytes []byte) (block.StateSummary, }, err } -func (vm *VMClient) GetStateSummary(summaryHeight uint64) (block.StateSummary, error) { +func (vm *VMClient) GetStateSummary(ctx context.Context, summaryHeight uint64) (block.StateSummary, error) { resp, err := vm.client.GetStateSummary( - context.Background(), + ctx, &vmpb.GetStateSummaryRequest{ Height: summaryHeight, }, @@ -787,8 +803,8 @@ func (vm *VMClient) GetStateSummary(summaryHeight uint64) (block.StateSummary, e if err != nil { return nil, err } - if errCode := resp.Err; errCode != 0 { - return nil, errCodeToError[errCode] + if errEnum := resp.Err; errEnum != vmpb.Error_ERROR_UNSPECIFIED { + return nil, errEnumToError[errEnum] } summaryID, err := ids.ToID(resp.Id) @@ -800,41 +816,72 @@ func (vm *VMClient) GetStateSummary(summaryHeight uint64) (block.StateSummary, e }, err } +func (vm *VMClient) newBlockFromBuildBlock(resp *vmpb.BuildBlockResponse) (*blockClient, error) { + id, err := ids.ToID(resp.Id) + if err != nil { + return nil, err + } + + parentID, err := ids.ToID(resp.ParentId) + if err != nil { + return nil, err + } + + time, err := grpcutils.TimestampAsTime(resp.Timestamp) + return &blockClient{ + vm: vm, + id: id, + parentID: parentID, + status: choices.Processing, + bytes: resp.Bytes, + height: resp.Height, + time: time, + shouldVerifyWithCtx: resp.VerifyWithContext, + }, err +} + type blockClient struct { vm *VMClient - id ids.ID - parentID ids.ID - status choices.Status - bytes []byte - height uint64 - time time.Time + id ids.ID + parentID ids.ID + status choices.Status + bytes []byte + height uint64 + time time.Time + shouldVerifyWithCtx bool } -func (b *blockClient) ID() ids.ID { return b.id } +func (b *blockClient) ID() ids.ID { + return b.id +} -func (b *blockClient) Accept() error { +func (b *blockClient) Accept(ctx context.Context) error { b.status = choices.Accepted - _, err := b.vm.client.BlockAccept(context.Background(), &vmpb.BlockAcceptRequest{ + _, err := b.vm.client.BlockAccept(ctx, &vmpb.BlockAcceptRequest{ Id: b.id[:], }) return err } -func (b *blockClient) Reject() error { +func (b *blockClient) Reject(ctx context.Context) error { b.status = choices.Rejected - _, err := b.vm.client.BlockReject(context.Background(), &vmpb.BlockRejectRequest{ + _, err := b.vm.client.BlockReject(ctx, &vmpb.BlockRejectRequest{ Id: b.id[:], }) return err } -func (b *blockClient) Status() choices.Status { return b.status } +func (b *blockClient) Status() choices.Status { + return b.status +} -func (b *blockClient) Parent() ids.ID { return b.parentID } +func (b *blockClient) Parent() ids.ID { + return b.parentID +} -func (b *blockClient) Verify() error { - resp, err := b.vm.client.BlockVerify(context.Background(), &vmpb.BlockVerifyRequest{ +func (b *blockClient) Verify(ctx context.Context) error { + resp, err := b.vm.client.BlockVerify(ctx, &vmpb.BlockVerifyRequest{ Bytes: b.bytes, }) if err != nil { @@ -845,9 +892,34 @@ func (b *blockClient) Verify() error { return err } -func (b *blockClient) Bytes() []byte { return b.bytes } -func (b *blockClient) Height() uint64 { return b.height } -func (b *blockClient) Timestamp() time.Time { return b.time } +func (b *blockClient) Bytes() []byte { + return b.bytes +} + +func (b *blockClient) Height() uint64 { + return b.height +} + +func (b *blockClient) Timestamp() time.Time { + return b.time +} + +func (b *blockClient) ShouldVerifyWithContext(context.Context) (bool, error) { + return b.shouldVerifyWithCtx, nil +} + +func (b *blockClient) VerifyWithContext(ctx context.Context, blockCtx *block.Context) error { + resp, err := b.vm.client.BlockVerify(ctx, &vmpb.BlockVerifyRequest{ + Bytes: b.bytes, + PChainHeight: &blockCtx.PChainHeight, + }) + if err != nil { + return err + } + + b.time, err = grpcutils.TimestampAsTime(resp.Timestamp) + return err +} type summaryClient struct { vm *VMClient @@ -857,19 +929,27 @@ type summaryClient struct { bytes []byte } -func (s *summaryClient) ID() ids.ID { return s.id } -func (s *summaryClient) Height() uint64 { return s.height } -func (s *summaryClient) Bytes() []byte { return s.bytes } +func (s *summaryClient) ID() ids.ID { + return s.id +} + +func (s *summaryClient) Height() uint64 { + return s.height +} -func (s *summaryClient) Accept() (bool, error) { +func (s *summaryClient) Bytes() []byte { + return s.bytes +} + +func (s *summaryClient) Accept(ctx context.Context) (block.StateSyncMode, error) { resp, err := s.vm.client.StateSummaryAccept( - context.Background(), + ctx, &vmpb.StateSummaryAcceptRequest{ Bytes: s.bytes, }, ) if err != nil { - return false, err + return block.StateSyncSkipped, err } - return resp.Accepted, errCodeToError[resp.Err] + return block.StateSyncMode(resp.Mode), errEnumToError[resp.Err] } diff --git a/avalanchego/vms/rpcchainvm/vm_server.go b/avalanchego/vms/rpcchainvm/vm_server.go index aab9195f..b6701eff 100644 --- a/avalanchego/vms/rpcchainvm/vm_server.go +++ b/avalanchego/vms/rpcchainvm/vm_server.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcchainvm @@ -6,7 +6,9 @@ package rpcchainvm import ( "context" "encoding/json" + "errors" "fmt" + "os" "time" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" @@ -14,7 +16,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/collectors" - "google.golang.org/grpc" "google.golang.org/protobuf/types/known/emptypb" "github.com/ava-labs/avalanchego/api/keystore/gkeystore" @@ -26,15 +27,18 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/ids/galiasreader" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/common/appsender" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/validators/gvalidators" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/vms/platformvm/warp/gwarp" "github.com/ava-labs/avalanchego/vms/rpcchainvm/ghttp" "github.com/ava-labs/avalanchego/vms/rpcchainvm/grpcutils" - "github.com/ava-labs/avalanchego/vms/rpcchainvm/gsubnetlookup" "github.com/ava-labs/avalanchego/vms/rpcchainvm/messenger" aliasreaderpb "github.com/ava-labs/avalanchego/proto/pb/aliasreader" @@ -44,18 +48,29 @@ import ( messengerpb "github.com/ava-labs/avalanchego/proto/pb/messenger" rpcdbpb "github.com/ava-labs/avalanchego/proto/pb/rpcdb" sharedmemorypb "github.com/ava-labs/avalanchego/proto/pb/sharedmemory" - subnetlookuppb "github.com/ava-labs/avalanchego/proto/pb/subnetlookup" + validatorstatepb "github.com/ava-labs/avalanchego/proto/pb/validatorstate" vmpb "github.com/ava-labs/avalanchego/proto/pb/vm" + warppb "github.com/ava-labs/avalanchego/proto/pb/warp" ) -var _ vmpb.VMServer = &VMServer{} +var ( + _ vmpb.VMServer = (*VMServer)(nil) + + originalStderr = os.Stderr + + errExpectedBlockWithVerifyContext = errors.New("expected block.WithVerifyContext") +) // VMServer is a VM that is managed over RPC. type VMServer struct { vmpb.UnsafeVMServer - vm block.ChainVM - hVM block.HeightIndexedChainVM + vm block.ChainVM + // If nil, the underlying VM doesn't implement the interface. + bVM block.BuildBlockWithContextChainVM + // If nil, the underlying VM doesn't implement the interface. + hVM block.HeightIndexedChainVM + // If nil, the underlying VM doesn't implement the interface. ssVM block.StateSyncableVM processMetrics prometheus.Gatherer @@ -70,16 +85,18 @@ type VMServer struct { // NewServer returns a vm instance connected to a remote vm instance func NewServer(vm block.ChainVM) *VMServer { + bVM, _ := vm.(block.BuildBlockWithContextChainVM) hVM, _ := vm.(block.HeightIndexedChainVM) ssVM, _ := vm.(block.StateSyncableVM) return &VMServer{ vm: vm, + bVM: bVM, hVM: hVM, ssVM: ssVM, } } -func (vm *VMServer) Initialize(_ context.Context, req *vmpb.InitializeRequest) (*vmpb.InitializeResponse, error) { +func (vm *VMServer) Initialize(ctx context.Context, req *vmpb.InitializeRequest) (*vmpb.InitializeResponse, error) { subnetID, err := ids.ToID(req.SubnetId) if err != nil { return nil, err @@ -92,10 +109,18 @@ func (vm *VMServer) Initialize(_ context.Context, req *vmpb.InitializeRequest) ( if err != nil { return nil, err } + publicKey, err := bls.PublicKeyFromBytes(req.PublicKey) + if err != nil { + return nil, err + } xChainID, err := ids.ToID(req.XChainId) if err != nil { return nil, err } + cChainID, err := ids.ToID(req.CChainId) + if err != nil { + return nil, err + } avaxAssetID, err := ids.ToID(req.AvaxAssetId) if err != nil { return nil, err @@ -134,7 +159,11 @@ func (vm *VMServer) Initialize(_ context.Context, req *vmpb.InitializeRequest) ( return nil, err } - clientConn, err := grpcutils.Dial(vDBReq.ServerAddr, grpcutils.DialOptsWithMetrics(grpcClientMetrics)...) + clientConn, err := grpcutils.Dial( + vDBReq.ServerAddr, + grpcutils.WithChainUnaryInterceptor(grpcClientMetrics.UnaryClientInterceptor()), + grpcutils.WithChainStreamInterceptor(grpcClientMetrics.StreamClientInterceptor()), + ) if err != nil { // Ignore closing errors to return the original error _ = vm.connCloser.Close() @@ -155,7 +184,11 @@ func (vm *VMServer) Initialize(_ context.Context, req *vmpb.InitializeRequest) ( } vm.dbManager = dbManager - clientConn, err := grpcutils.Dial(req.ServerAddr, grpcutils.DialOptsWithMetrics(grpcClientMetrics)...) + clientConn, err := grpcutils.Dial( + req.ServerAddr, + grpcutils.WithChainUnaryInterceptor(grpcClientMetrics.UnaryClientInterceptor()), + grpcutils.WithChainStreamInterceptor(grpcClientMetrics.StreamClientInterceptor()), + ) if err != nil { // Ignore closing errors to return the original error _ = vm.connCloser.Close() @@ -168,8 +201,9 @@ func (vm *VMServer) Initialize(_ context.Context, req *vmpb.InitializeRequest) ( keystoreClient := gkeystore.NewClient(keystorepb.NewKeystoreClient(clientConn)) sharedMemoryClient := gsharedmemory.NewClient(sharedmemorypb.NewSharedMemoryClient(clientConn)) bcLookupClient := galiasreader.NewClient(aliasreaderpb.NewAliasReaderClient(clientConn)) - snLookupClient := gsubnetlookup.NewClient(subnetlookuppb.NewSubnetLookupClient(clientConn)) appSenderClient := appsender.NewClient(appsenderpb.NewAppSenderClient(clientConn)) + validatorStateClient := gvalidators.NewClient(validatorstatepb.NewValidatorStateClient(clientConn)) + warpSignerClient := gwarp.NewClient(warppb.NewSignerClient(clientConn)) toEngine := make(chan common.Message, 1) vm.closed = make(chan struct{}) @@ -193,40 +227,55 @@ func (vm *VMServer) Initialize(_ context.Context, req *vmpb.InitializeRequest) ( SubnetID: subnetID, ChainID: chainID, NodeID: nodeID, + PublicKey: publicKey, XChainID: xChainID, + CChainID: cChainID, AVAXAssetID: avaxAssetID, - Log: logging.NoLog{}, + // TODO: Allow the logger to be configured by the client + Log: logging.NewLogger( + fmt.Sprintf("<%s Chain>", chainID), + logging.NewWrappedCore( + logging.Info, + originalStderr, + logging.Colors.ConsoleEncoder(), + ), + ), Keystore: keystoreClient, SharedMemory: sharedMemoryClient, BCLookup: bcLookupClient, - SNLookup: snLookupClient, Metrics: metrics.NewOptionalGatherer(), - // TODO: support snowman++ fields + // Signs warp messages + WarpSigner: warpSignerClient, + + ValidatorState: validatorStateClient, + // TODO: support remaining snowman++ fields + + ChainDataDir: req.ChainDataDir, } - if err := vm.vm.Initialize(vm.ctx, dbManager, req.GenesisBytes, req.UpgradeBytes, req.ConfigBytes, toEngine, nil, appSenderClient); err != nil { + if err := vm.vm.Initialize(ctx, vm.ctx, dbManager, req.GenesisBytes, req.UpgradeBytes, req.ConfigBytes, toEngine, nil, appSenderClient); err != nil { // Ignore errors closing resources to return the original error _ = vm.connCloser.Close() close(vm.closed) return nil, err } - lastAccepted, err := vm.vm.LastAccepted() + lastAccepted, err := vm.vm.LastAccepted(ctx) if err != nil { // Ignore errors closing resources to return the original error - _ = vm.vm.Shutdown() + _ = vm.vm.Shutdown(ctx) _ = vm.connCloser.Close() close(vm.closed) return nil, err } - blk, err := vm.vm.GetBlock(lastAccepted) + blk, err := vm.vm.GetBlock(ctx, lastAccepted) if err != nil { // Ignore errors closing resources to return the original error - _ = vm.vm.Shutdown() + _ = vm.vm.Shutdown(ctx) _ = vm.connCloser.Close() close(vm.closed) return nil, err @@ -241,18 +290,18 @@ func (vm *VMServer) Initialize(_ context.Context, req *vmpb.InitializeRequest) ( }, nil } -func (vm *VMServer) SetState(_ context.Context, stateReq *vmpb.SetStateRequest) (*vmpb.SetStateResponse, error) { - err := vm.vm.SetState(snow.State(stateReq.State)) +func (vm *VMServer) SetState(ctx context.Context, stateReq *vmpb.SetStateRequest) (*vmpb.SetStateResponse, error) { + err := vm.vm.SetState(ctx, snow.State(stateReq.State)) if err != nil { return nil, err } - lastAccepted, err := vm.vm.LastAccepted() + lastAccepted, err := vm.vm.LastAccepted(ctx) if err != nil { return nil, err } - blk, err := vm.vm.GetBlock(lastAccepted) + blk, err := vm.vm.GetBlock(ctx, lastAccepted) if err != nil { return nil, err } @@ -267,20 +316,20 @@ func (vm *VMServer) SetState(_ context.Context, stateReq *vmpb.SetStateRequest) }, nil } -func (vm *VMServer) Shutdown(context.Context, *emptypb.Empty) (*emptypb.Empty, error) { +func (vm *VMServer) Shutdown(ctx context.Context, _ *emptypb.Empty) (*emptypb.Empty, error) { if vm.closed == nil { return &emptypb.Empty{}, nil } errs := wrappers.Errs{} - errs.Add(vm.vm.Shutdown()) + errs.Add(vm.vm.Shutdown(ctx)) close(vm.closed) vm.serverCloser.Stop() errs.Add(vm.connCloser.Close()) return &emptypb.Empty{}, errs.Err } -func (vm *VMServer) CreateHandlers(context.Context, *emptypb.Empty) (*vmpb.CreateHandlersResponse, error) { - handlers, err := vm.vm.CreateHandlers() +func (vm *VMServer) CreateHandlers(ctx context.Context, _ *emptypb.Empty) (*vmpb.CreateHandlersResponse, error) { + handlers, err := vm.vm.CreateHandlers(ctx) if err != nil { return nil, err } @@ -292,30 +341,24 @@ func (vm *VMServer) CreateHandlers(context.Context, *emptypb.Empty) (*vmpb.Creat if err != nil { return nil, err } - serverAddr := serverListener.Addr().String() + server := grpcutils.NewServer() + vm.serverCloser.Add(server) + httppb.RegisterHTTPServer(server, ghttp.NewServer(handler.Handler)) - // Start the gRPC server which serves the HTTP service - go grpcutils.Serve(serverListener, func(opts []grpc.ServerOption) *grpc.Server { - if len(opts) == 0 { - opts = append(opts, grpcutils.DefaultServerOptions...) - } - server := grpc.NewServer(opts...) - vm.serverCloser.Add(server) - httppb.RegisterHTTPServer(server, ghttp.NewServer(handler.Handler)) - return server - }) + // Start HTTP service + go grpcutils.Serve(serverListener, server) resp.Handlers = append(resp.Handlers, &vmpb.Handler{ Prefix: prefix, LockOptions: uint32(handler.LockOptions), - ServerAddr: serverAddr, + ServerAddr: serverListener.Addr().String(), }) } return resp, nil } -func (vm *VMServer) CreateStaticHandlers(context.Context, *emptypb.Empty) (*vmpb.CreateStaticHandlersResponse, error) { - handlers, err := vm.vm.CreateStaticHandlers() +func (vm *VMServer) CreateStaticHandlers(ctx context.Context, _ *emptypb.Empty) (*vmpb.CreateStaticHandlersResponse, error) { + handlers, err := vm.vm.CreateStaticHandlers(ctx) if err != nil { return nil, err } @@ -327,29 +370,23 @@ func (vm *VMServer) CreateStaticHandlers(context.Context, *emptypb.Empty) (*vmpb if err != nil { return nil, err } - serverAddr := serverListener.Addr().String() + server := grpcutils.NewServer() + vm.serverCloser.Add(server) + httppb.RegisterHTTPServer(server, ghttp.NewServer(handler.Handler)) - // Start the gRPC server which serves the HTTP service - go grpcutils.Serve(serverListener, func(opts []grpc.ServerOption) *grpc.Server { - if len(opts) == 0 { - opts = append(opts, grpcutils.DefaultServerOptions...) - } - server := grpc.NewServer(opts...) - vm.serverCloser.Add(server) - httppb.RegisterHTTPServer(server, ghttp.NewServer(handler.Handler)) - return server - }) + // Start HTTP service + go grpcutils.Serve(serverListener, server) resp.Handlers = append(resp.Handlers, &vmpb.Handler{ Prefix: prefix, LockOptions: uint32(handler.LockOptions), - ServerAddr: serverAddr, + ServerAddr: serverListener.Addr().String(), }) } return resp, nil } -func (vm *VMServer) Connected(_ context.Context, req *vmpb.ConnectedRequest) (*emptypb.Empty, error) { +func (vm *VMServer) Connected(ctx context.Context, req *vmpb.ConnectedRequest) (*emptypb.Empty, error) { nodeID, err := ids.ToNodeID(req.NodeId) if err != nil { return nil, err @@ -360,85 +397,130 @@ func (vm *VMServer) Connected(_ context.Context, req *vmpb.ConnectedRequest) (*e return nil, err } - return &emptypb.Empty{}, vm.vm.Connected(nodeID, peerVersion) + return &emptypb.Empty{}, vm.vm.Connected(ctx, nodeID, peerVersion) } -func (vm *VMServer) Disconnected(_ context.Context, req *vmpb.DisconnectedRequest) (*emptypb.Empty, error) { +func (vm *VMServer) Disconnected(ctx context.Context, req *vmpb.DisconnectedRequest) (*emptypb.Empty, error) { nodeID, err := ids.ToNodeID(req.NodeId) if err != nil { return nil, err } - return &emptypb.Empty{}, vm.vm.Disconnected(nodeID) + return &emptypb.Empty{}, vm.vm.Disconnected(ctx, nodeID) } -func (vm *VMServer) BuildBlock(context.Context, *emptypb.Empty) (*vmpb.BuildBlockResponse, error) { - blk, err := vm.vm.BuildBlock() +// If the underlying VM doesn't actually implement this method, its [BuildBlock] +// method will be called instead. +func (vm *VMServer) BuildBlock(ctx context.Context, req *vmpb.BuildBlockRequest) (*vmpb.BuildBlockResponse, error) { + var ( + blk snowman.Block + err error + ) + if vm.bVM == nil || req.PChainHeight == nil { + blk, err = vm.vm.BuildBlock(ctx) + } else { + blk, err = vm.bVM.BuildBlockWithContext(ctx, &block.Context{ + PChainHeight: *req.PChainHeight, + }) + } if err != nil { return nil, err } - blkID := blk.ID() - parentID := blk.Parent() + + blkWithCtx, verifyWithCtx := blk.(block.WithVerifyContext) + if verifyWithCtx { + verifyWithCtx, err = blkWithCtx.ShouldVerifyWithContext(ctx) + if err != nil { + return nil, err + } + } + + var ( + blkID = blk.ID() + parentID = blk.Parent() + ) return &vmpb.BuildBlockResponse{ - Id: blkID[:], - ParentId: parentID[:], - Bytes: blk.Bytes(), - Height: blk.Height(), - Timestamp: grpcutils.TimestampFromTime(blk.Timestamp()), + Id: blkID[:], + ParentId: parentID[:], + Bytes: blk.Bytes(), + Height: blk.Height(), + Timestamp: grpcutils.TimestampFromTime(blk.Timestamp()), + VerifyWithContext: verifyWithCtx, }, nil } -func (vm *VMServer) ParseBlock(_ context.Context, req *vmpb.ParseBlockRequest) (*vmpb.ParseBlockResponse, error) { - blk, err := vm.vm.ParseBlock(req.Bytes) +func (vm *VMServer) ParseBlock(ctx context.Context, req *vmpb.ParseBlockRequest) (*vmpb.ParseBlockResponse, error) { + blk, err := vm.vm.ParseBlock(ctx, req.Bytes) if err != nil { return nil, err } - blkID := blk.ID() - parentID := blk.Parent() + + blkWithCtx, verifyWithCtx := blk.(block.WithVerifyContext) + if verifyWithCtx { + verifyWithCtx, err = blkWithCtx.ShouldVerifyWithContext(ctx) + if err != nil { + return nil, err + } + } + + var ( + blkID = blk.ID() + parentID = blk.Parent() + ) return &vmpb.ParseBlockResponse{ - Id: blkID[:], - ParentId: parentID[:], - Status: uint32(blk.Status()), - Height: blk.Height(), - Timestamp: grpcutils.TimestampFromTime(blk.Timestamp()), + Id: blkID[:], + ParentId: parentID[:], + Status: vmpb.Status(blk.Status()), + Height: blk.Height(), + Timestamp: grpcutils.TimestampFromTime(blk.Timestamp()), + VerifyWithContext: verifyWithCtx, }, nil } -func (vm *VMServer) GetBlock(_ context.Context, req *vmpb.GetBlockRequest) (*vmpb.GetBlockResponse, error) { +func (vm *VMServer) GetBlock(ctx context.Context, req *vmpb.GetBlockRequest) (*vmpb.GetBlockResponse, error) { id, err := ids.ToID(req.Id) if err != nil { return nil, err } - blk, err := vm.vm.GetBlock(id) + blk, err := vm.vm.GetBlock(ctx, id) if err != nil { return &vmpb.GetBlockResponse{ - Err: errorToErrCode[err], + Err: errorToErrEnum[err], }, errorToRPCError(err) } + blkWithCtx, verifyWithCtx := blk.(block.WithVerifyContext) + if verifyWithCtx { + verifyWithCtx, err = blkWithCtx.ShouldVerifyWithContext(ctx) + if err != nil { + return nil, err + } + } + parentID := blk.Parent() return &vmpb.GetBlockResponse{ - ParentId: parentID[:], - Bytes: blk.Bytes(), - Status: uint32(blk.Status()), - Height: blk.Height(), - Timestamp: grpcutils.TimestampFromTime(blk.Timestamp()), + ParentId: parentID[:], + Bytes: blk.Bytes(), + Status: vmpb.Status(blk.Status()), + Height: blk.Height(), + Timestamp: grpcutils.TimestampFromTime(blk.Timestamp()), + VerifyWithContext: verifyWithCtx, }, nil } -func (vm *VMServer) SetPreference(_ context.Context, req *vmpb.SetPreferenceRequest) (*emptypb.Empty, error) { +func (vm *VMServer) SetPreference(ctx context.Context, req *vmpb.SetPreferenceRequest) (*emptypb.Empty, error) { id, err := ids.ToID(req.Id) if err != nil { return nil, err } - return &emptypb.Empty{}, vm.vm.SetPreference(id) + return &emptypb.Empty{}, vm.vm.SetPreference(ctx, id) } -func (vm *VMServer) Health(ctx context.Context, req *emptypb.Empty) (*vmpb.HealthResponse, error) { - vmHealth, err := vm.vm.HealthCheck() +func (vm *VMServer) Health(ctx context.Context, _ *emptypb.Empty) (*vmpb.HealthResponse, error) { + vmHealth, err := vm.vm.HealthCheck(ctx) if err != nil { return &vmpb.HealthResponse{}, err } - dbHealth, err := vm.dbHealthChecks() + dbHealth, err := vm.dbHealthChecks(ctx) if err != nil { return &vmpb.HealthResponse{}, err } @@ -453,13 +535,13 @@ func (vm *VMServer) Health(ctx context.Context, req *emptypb.Empty) (*vmpb.Healt }, err } -func (vm *VMServer) dbHealthChecks() (interface{}, error) { +func (vm *VMServer) dbHealthChecks(ctx context.Context) (interface{}, error) { details := make(map[string]interface{}, len(vm.dbManager.GetDatabases())) // Check Database health for _, client := range vm.dbManager.GetDatabases() { // Shared gRPC client don't close - health, err := client.Database.HealthCheck() + health, err := client.Database.HealthCheck(ctx) if err != nil { return nil, fmt.Errorf("failed to check db health %q: %w", client.Version.String(), err) } @@ -469,14 +551,42 @@ func (vm *VMServer) dbHealthChecks() (interface{}, error) { return details, nil } -func (vm *VMServer) Version(context.Context, *emptypb.Empty) (*vmpb.VersionResponse, error) { - version, err := vm.vm.Version() +func (vm *VMServer) Version(ctx context.Context, _ *emptypb.Empty) (*vmpb.VersionResponse, error) { + version, err := vm.vm.Version(ctx) return &vmpb.VersionResponse{ Version: version, }, err } -func (vm *VMServer) AppRequest(_ context.Context, req *vmpb.AppRequestMsg) (*emptypb.Empty, error) { +func (vm *VMServer) CrossChainAppRequest(ctx context.Context, msg *vmpb.CrossChainAppRequestMsg) (*emptypb.Empty, error) { + chainID, err := ids.ToID(msg.ChainId) + if err != nil { + return nil, err + } + deadline, err := grpcutils.TimestampAsTime(msg.Deadline) + if err != nil { + return nil, err + } + return &emptypb.Empty{}, vm.vm.CrossChainAppRequest(ctx, chainID, msg.RequestId, deadline, msg.Request) +} + +func (vm *VMServer) CrossChainAppRequestFailed(ctx context.Context, msg *vmpb.CrossChainAppRequestFailedMsg) (*emptypb.Empty, error) { + chainID, err := ids.ToID(msg.ChainId) + if err != nil { + return nil, err + } + return &emptypb.Empty{}, vm.vm.CrossChainAppRequestFailed(ctx, chainID, msg.RequestId) +} + +func (vm *VMServer) CrossChainAppResponse(ctx context.Context, msg *vmpb.CrossChainAppResponseMsg) (*emptypb.Empty, error) { + chainID, err := ids.ToID(msg.ChainId) + if err != nil { + return nil, err + } + return &emptypb.Empty{}, vm.vm.CrossChainAppResponse(ctx, chainID, msg.RequestId, msg.Response) +} + +func (vm *VMServer) AppRequest(ctx context.Context, req *vmpb.AppRequestMsg) (*emptypb.Empty, error) { nodeID, err := ids.ToNodeID(req.NodeId) if err != nil { return nil, err @@ -485,31 +595,31 @@ func (vm *VMServer) AppRequest(_ context.Context, req *vmpb.AppRequestMsg) (*emp if err != nil { return nil, err } - return &emptypb.Empty{}, vm.vm.AppRequest(nodeID, req.RequestId, deadline, req.Request) + return &emptypb.Empty{}, vm.vm.AppRequest(ctx, nodeID, req.RequestId, deadline, req.Request) } -func (vm *VMServer) AppRequestFailed(_ context.Context, req *vmpb.AppRequestFailedMsg) (*emptypb.Empty, error) { +func (vm *VMServer) AppRequestFailed(ctx context.Context, req *vmpb.AppRequestFailedMsg) (*emptypb.Empty, error) { nodeID, err := ids.ToNodeID(req.NodeId) if err != nil { return nil, err } - return &emptypb.Empty{}, vm.vm.AppRequestFailed(nodeID, req.RequestId) + return &emptypb.Empty{}, vm.vm.AppRequestFailed(ctx, nodeID, req.RequestId) } -func (vm *VMServer) AppResponse(_ context.Context, req *vmpb.AppResponseMsg) (*emptypb.Empty, error) { +func (vm *VMServer) AppResponse(ctx context.Context, req *vmpb.AppResponseMsg) (*emptypb.Empty, error) { nodeID, err := ids.ToNodeID(req.NodeId) if err != nil { return nil, err } - return &emptypb.Empty{}, vm.vm.AppResponse(nodeID, req.RequestId, req.Response) + return &emptypb.Empty{}, vm.vm.AppResponse(ctx, nodeID, req.RequestId, req.Response) } -func (vm *VMServer) AppGossip(_ context.Context, req *vmpb.AppGossipMsg) (*emptypb.Empty, error) { +func (vm *VMServer) AppGossip(ctx context.Context, req *vmpb.AppGossipMsg) (*emptypb.Empty, error) { nodeID, err := ids.ToNodeID(req.NodeId) if err != nil { return nil, err } - return &emptypb.Empty{}, vm.vm.AppGossip(nodeID, req.Msg) + return &emptypb.Empty{}, vm.vm.AppGossip(ctx, nodeID, req.Msg) } func (vm *VMServer) Gather(context.Context, *emptypb.Empty) (*vmpb.GatherResponse, error) { @@ -531,7 +641,7 @@ func (vm *VMServer) Gather(context.Context, *emptypb.Empty) (*vmpb.GatherRespons return &vmpb.GatherResponse{MetricFamilies: mfs}, err } -func (vm *VMServer) GetAncestors(_ context.Context, req *vmpb.GetAncestorsRequest) (*vmpb.GetAncestorsResponse, error) { +func (vm *VMServer) GetAncestors(ctx context.Context, req *vmpb.GetAncestorsRequest) (*vmpb.GetAncestorsResponse, error) { blkID, err := ids.ToID(req.BlkId) if err != nil { return nil, err @@ -541,6 +651,7 @@ func (vm *VMServer) GetAncestors(_ context.Context, req *vmpb.GetAncestorsReques maxBlocksRetrivalTime := time.Duration(req.MaxBlocksRetrivalTime) blocks, err := block.GetAncestors( + ctx, vm.vm, blkID, maxBlksNum, @@ -571,68 +682,71 @@ func (vm *VMServer) BatchedParseBlock( }, nil } -func (vm *VMServer) VerifyHeightIndex(context.Context, *emptypb.Empty) (*vmpb.VerifyHeightIndexResponse, error) { +func (vm *VMServer) VerifyHeightIndex(ctx context.Context, _ *emptypb.Empty) (*vmpb.VerifyHeightIndexResponse, error) { var err error if vm.hVM != nil { - err = vm.hVM.VerifyHeightIndex() + err = vm.hVM.VerifyHeightIndex(ctx) } else { err = block.ErrHeightIndexedVMNotImplemented } return &vmpb.VerifyHeightIndexResponse{ - Err: errorToErrCode[err], + Err: errorToErrEnum[err], }, errorToRPCError(err) } -func (vm *VMServer) GetBlockIDAtHeight(ctx context.Context, req *vmpb.GetBlockIDAtHeightRequest) (*vmpb.GetBlockIDAtHeightResponse, error) { +func (vm *VMServer) GetBlockIDAtHeight( + ctx context.Context, + req *vmpb.GetBlockIDAtHeightRequest, +) (*vmpb.GetBlockIDAtHeightResponse, error) { var ( blkID ids.ID err error ) if vm.hVM != nil { - blkID, err = vm.hVM.GetBlockIDAtHeight(req.Height) + blkID, err = vm.hVM.GetBlockIDAtHeight(ctx, req.Height) } else { err = block.ErrHeightIndexedVMNotImplemented } return &vmpb.GetBlockIDAtHeightResponse{ BlkId: blkID[:], - Err: errorToErrCode[err], + Err: errorToErrEnum[err], }, errorToRPCError(err) } -func (vm *VMServer) StateSyncEnabled(context.Context, *emptypb.Empty) (*vmpb.StateSyncEnabledResponse, error) { +func (vm *VMServer) StateSyncEnabled(ctx context.Context, _ *emptypb.Empty) (*vmpb.StateSyncEnabledResponse, error) { var ( enabled bool err error ) if vm.ssVM != nil { - enabled, err = vm.ssVM.StateSyncEnabled() + enabled, err = vm.ssVM.StateSyncEnabled(ctx) } return &vmpb.StateSyncEnabledResponse{ Enabled: enabled, - Err: errorToErrCode[err], + Err: errorToErrEnum[err], }, errorToRPCError(err) } func (vm *VMServer) GetOngoingSyncStateSummary( - context.Context, - *emptypb.Empty, + ctx context.Context, + _ *emptypb.Empty, ) (*vmpb.GetOngoingSyncStateSummaryResponse, error) { var ( summary block.StateSummary err error ) if vm.ssVM != nil { - summary, err = vm.ssVM.GetOngoingSyncStateSummary() + summary, err = vm.ssVM.GetOngoingSyncStateSummary(ctx) } else { err = block.ErrStateSyncableVMNotImplemented } if err != nil { return &vmpb.GetOngoingSyncStateSummaryResponse{ - Err: errorToErrCode[err], + Err: errorToErrEnum[err], }, errorToRPCError(err) } @@ -644,23 +758,20 @@ func (vm *VMServer) GetOngoingSyncStateSummary( }, nil } -func (vm *VMServer) GetLastStateSummary( - ctx context.Context, - empty *emptypb.Empty, -) (*vmpb.GetLastStateSummaryResponse, error) { +func (vm *VMServer) GetLastStateSummary(ctx context.Context, _ *emptypb.Empty) (*vmpb.GetLastStateSummaryResponse, error) { var ( summary block.StateSummary err error ) if vm.ssVM != nil { - summary, err = vm.ssVM.GetLastStateSummary() + summary, err = vm.ssVM.GetLastStateSummary(ctx) } else { err = block.ErrStateSyncableVMNotImplemented } if err != nil { return &vmpb.GetLastStateSummaryResponse{ - Err: errorToErrCode[err], + Err: errorToErrEnum[err], }, errorToRPCError(err) } @@ -681,14 +792,14 @@ func (vm *VMServer) ParseStateSummary( err error ) if vm.ssVM != nil { - summary, err = vm.ssVM.ParseStateSummary(req.Bytes) + summary, err = vm.ssVM.ParseStateSummary(ctx, req.Bytes) } else { err = block.ErrStateSyncableVMNotImplemented } if err != nil { return &vmpb.ParseStateSummaryResponse{ - Err: errorToErrCode[err], + Err: errorToErrEnum[err], }, errorToRPCError(err) } @@ -708,14 +819,14 @@ func (vm *VMServer) GetStateSummary( err error ) if vm.ssVM != nil { - summary, err = vm.ssVM.GetStateSummary(req.Height) + summary, err = vm.ssVM.GetStateSummary(ctx, req.Height) } else { err = block.ErrStateSyncableVMNotImplemented } if err != nil { return &vmpb.GetStateSummaryResponse{ - Err: errorToErrCode[err], + Err: errorToErrEnum[err], }, errorToRPCError(err) } @@ -726,69 +837,83 @@ func (vm *VMServer) GetStateSummary( }, nil } -func (vm *VMServer) BlockVerify(_ context.Context, req *vmpb.BlockVerifyRequest) (*vmpb.BlockVerifyResponse, error) { - blk, err := vm.vm.ParseBlock(req.Bytes) +func (vm *VMServer) BlockVerify(ctx context.Context, req *vmpb.BlockVerifyRequest) (*vmpb.BlockVerifyResponse, error) { + blk, err := vm.vm.ParseBlock(ctx, req.Bytes) if err != nil { return nil, err } - if err := blk.Verify(); err != nil { + + if req.PChainHeight == nil { + err = blk.Verify(ctx) + } else { + blkWithCtx, ok := blk.(block.WithVerifyContext) + if !ok { + return nil, fmt.Errorf("%w but got %T", errExpectedBlockWithVerifyContext, blk) + } + blockCtx := &block.Context{ + PChainHeight: *req.PChainHeight, + } + err = blkWithCtx.VerifyWithContext(ctx, blockCtx) + } + if err != nil { return nil, err } + return &vmpb.BlockVerifyResponse{ Timestamp: grpcutils.TimestampFromTime(blk.Timestamp()), }, nil } -func (vm *VMServer) BlockAccept(_ context.Context, req *vmpb.BlockAcceptRequest) (*emptypb.Empty, error) { +func (vm *VMServer) BlockAccept(ctx context.Context, req *vmpb.BlockAcceptRequest) (*emptypb.Empty, error) { id, err := ids.ToID(req.Id) if err != nil { return nil, err } - blk, err := vm.vm.GetBlock(id) + blk, err := vm.vm.GetBlock(ctx, id) if err != nil { return nil, err } - if err := blk.Accept(); err != nil { + if err := blk.Accept(ctx); err != nil { return nil, err } return &emptypb.Empty{}, nil } -func (vm *VMServer) BlockReject(_ context.Context, req *vmpb.BlockRejectRequest) (*emptypb.Empty, error) { +func (vm *VMServer) BlockReject(ctx context.Context, req *vmpb.BlockRejectRequest) (*emptypb.Empty, error) { id, err := ids.ToID(req.Id) if err != nil { return nil, err } - blk, err := vm.vm.GetBlock(id) + blk, err := vm.vm.GetBlock(ctx, id) if err != nil { return nil, err } - if err := blk.Reject(); err != nil { + if err := blk.Reject(ctx); err != nil { return nil, err } return &emptypb.Empty{}, nil } func (vm *VMServer) StateSummaryAccept( - _ context.Context, + ctx context.Context, req *vmpb.StateSummaryAcceptRequest, ) (*vmpb.StateSummaryAcceptResponse, error) { var ( - accepted bool - err error + mode = block.StateSyncSkipped + err error ) if vm.ssVM != nil { var summary block.StateSummary - summary, err = vm.ssVM.ParseStateSummary(req.Bytes) + summary, err = vm.ssVM.ParseStateSummary(ctx, req.Bytes) if err == nil { - accepted, err = summary.Accept() + mode, err = summary.Accept(ctx) } } else { err = block.ErrStateSyncableVMNotImplemented } return &vmpb.StateSummaryAcceptResponse{ - Accepted: accepted, - Err: errorToErrCode[err], + Mode: vmpb.StateSummaryAcceptResponse_Mode(mode), + Err: errorToErrEnum[err], }, errorToRPCError(err) } diff --git a/avalanchego/vms/rpcchainvm/vm_test.go b/avalanchego/vms/rpcchainvm/vm_test.go index f92decdb..5c1953d3 100644 --- a/avalanchego/vms/rpcchainvm/vm_test.go +++ b/avalanchego/vms/rpcchainvm/vm_test.go @@ -1,418 +1,209 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package rpcchainvm import ( - "bytes" "context" "fmt" - "io" - "net" - "net/http" "os" + "os/exec" "reflect" - "sort" "testing" + "time" - stdjson "encoding/json" + "golang.org/x/exp/slices" "github.com/golang/mock/gomock" - "github.com/gorilla/mux" - "github.com/gorilla/websocket" - - gorillarpc "github.com/gorilla/rpc/v2" - - hclog "github.com/hashicorp/go-hclog" - plugin "github.com/hashicorp/go-plugin" - "github.com/stretchr/testify/require" - "google.golang.org/grpc" - "google.golang.org/protobuf/types/known/emptypb" - - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/json" - "github.com/ava-labs/avalanchego/vms/rpcchainvm/ghttp" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/rpcchainvm/grpcutils" + "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" + "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime/subprocess" - httppb "github.com/ava-labs/avalanchego/proto/pb/http" vmpb "github.com/ava-labs/avalanchego/proto/pb/vm" ) -var ( - _ plugin.Plugin = &testVMPlugin{} - _ plugin.GRPCPlugin = &testVMPlugin{} +const ( + chainVMTestKey = "chainVMTest" + stateSyncEnabledTestKey = "stateSyncEnabledTest" + getOngoingSyncStateSummaryTestKey = "getOngoingSyncStateSummaryTest" + getLastStateSummaryTestKey = "getLastStateSummaryTest" + parseStateSummaryTestKey = "parseStateSummaryTest" + getStateSummaryTestKey = "getStateSummaryTest" + acceptStateSummaryTestKey = "acceptStateSummaryTest" + lastAcceptedBlockPostStateSummaryAcceptTestKey = "lastAcceptedBlockPostStateSummaryAcceptTest" + contextTestKey = "contextTest" + batchedParseBlockCachingTestKey = "batchedParseBlockCachingTest" ) -// Test_VMServerInterface ensures that the RPCs methods defined by VMServer +var TestServerPluginMap = map[string]func(*testing.T, bool) (block.ChainVM, *gomock.Controller){ + stateSyncEnabledTestKey: stateSyncEnabledTestPlugin, + getOngoingSyncStateSummaryTestKey: getOngoingSyncStateSummaryTestPlugin, + getLastStateSummaryTestKey: getLastStateSummaryTestPlugin, + parseStateSummaryTestKey: parseStateSummaryTestPlugin, + getStateSummaryTestKey: getStateSummaryTestPlugin, + acceptStateSummaryTestKey: acceptStateSummaryTestPlugin, + lastAcceptedBlockPostStateSummaryAcceptTestKey: lastAcceptedBlockPostStateSummaryAcceptTestPlugin, + contextTestKey: contextEnabledTestPlugin, + batchedParseBlockCachingTestKey: batchedParseBlockCachingTestPlugin, +} + +// helperProcess helps with creating the subnet binary for testing. +func helperProcess(s ...string) *exec.Cmd { + cs := []string{"-test.run=TestHelperProcess", "--"} + cs = append(cs, s...) + env := []string{ + "TEST_PROCESS=1", + } + run := os.Args[0] + cmd := exec.Command(run, cs...) + env = append(env, os.Environ()...) + cmd.Env = env + return cmd +} + +func TestHelperProcess(t *testing.T) { + if os.Getenv("TEST_PROCESS") != "1" { + return + } + + args := os.Args + for len(args) > 0 { + if args[0] == "--" { + args = args[1:] + break + } + args = args[1:] + } + + if len(args) == 0 { + fmt.Fprintf(os.Stderr, "failed to receive testKey\n") + os.Exit(2) + } + + testKey := args[0] + if testKey == "dummy" { + // block till killed + select {} + } + + mockedVM, ctrl := TestServerPluginMap[testKey](t, true /*loadExpectations*/) + err := Serve(context.Background(), mockedVM) + if err != nil { + os.Exit(1) + } + ctrl.Finish() + + os.Exit(0) +} + +// TestVMServerInterface ensures that the RPCs methods defined by VMServer // interface are implemented. -func Test_VMServerInterface(t *testing.T) { +func TestVMServerInterface(t *testing.T) { var wantMethods, gotMethods []string pb := reflect.TypeOf((*vmpb.VMServer)(nil)).Elem() for i := 0; i < pb.NumMethod()-1; i++ { wantMethods = append(wantMethods, pb.Method(i).Name) } - sort.Strings(wantMethods) + slices.Sort(wantMethods) impl := reflect.TypeOf(&VMServer{}) for i := 0; i < impl.NumMethod(); i++ { gotMethods = append(gotMethods, impl.Method(i).Name) } - sort.Strings(gotMethods) + slices.Sort(gotMethods) if !reflect.DeepEqual(gotMethods, wantMethods) { t.Errorf("\ngot: %q\nwant: %q", gotMethods, wantMethods) } } -// chainVMTestPlugin creates the server plugin needed for the test -func chainVMTestPlugin(t *testing.T, _ bool) (plugin.Plugin, *gomock.Controller) { - // test key is "chainVMTest" - ctrl := gomock.NewController(t) - - return NewTestVM(&TestSubnetVM{ - logger: hclog.New(&hclog.LoggerOptions{ - Level: hclog.Trace, - Output: os.Stderr, - JSONFormat: true, - }), - }), ctrl -} - -// Test_VMCreateHandlers tests the Handle and HandleSimple RPCs by creating a plugin and -// serving the handlers exposed by the subnet. The test then will exercise the service -// as a regression test. -func Test_VMCreateHandlers(t *testing.T) { - require := require.New(t) - pr := &pingRequest{ - Version: "2.0", - Method: "subnet.ping", - Params: []string{}, - ID: "1", - } - pingBody, err := stdjson.Marshal(pr) - require.NoError(err) - - scenarios := []struct { - name string - payload []byte +func TestRuntimeSubprocessBootstrap(t *testing.T) { + tests := []struct { + name string + config *subprocess.Config + assertErr func(require *require.Assertions, err error) + // if false vm initialize bootstrap will fail + serveVM bool }{ { - name: "test HTTP gRPC service", - payload: pingBody, + name: "happy path", + config: &subprocess.Config{ + Stderr: logging.NoLog{}, + Stdout: logging.NoLog{}, + Log: logging.NoLog{}, + HandshakeTimeout: runtime.DefaultHandshakeTimeout, + }, + assertErr: func(require *require.Assertions, err error) { + require.NoError(err) + }, + serveVM: true, + }, + { + name: "invalid stderr", + config: &subprocess.Config{ + Stdout: logging.NoLog{}, + Log: logging.NoLog{}, + HandshakeTimeout: runtime.DefaultHandshakeTimeout, + }, + assertErr: func(require *require.Assertions, err error) { + require.ErrorIs(err, runtime.ErrInvalidConfig) + }, + serveVM: true, + }, + { + name: "handshake timeout", + config: &subprocess.Config{ + Stderr: logging.NoLog{}, + Stdout: logging.NoLog{}, + Log: logging.NoLog{}, + HandshakeTimeout: time.Microsecond, + }, + assertErr: func(require *require.Assertions, err error) { + require.ErrorIs(err, runtime.ErrHandshakeFailed) + }, + serveVM: false, }, } - for _, scenario := range scenarios { - t.Run(scenario.name, func(t *testing.T) { - process := helperProcess(chainVMTestKey) - c := plugin.NewClient(&plugin.ClientConfig{ - Cmd: process, - HandshakeConfig: TestHandshake, - Plugins: TestClientPluginMap, - AllowedProtocols: []plugin.Protocol{plugin.ProtocolGRPC}, - }) - defer c.Kill() + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) - _, err := c.Start() - require.NoErrorf(err, "failed to start plugin: %v", err) - - if v := c.Protocol(); v != plugin.ProtocolGRPC { - require.NoErrorf(err, "invalid protocol %q: :%v", c.Protocol(), err) - } - - // Get the plugin client. - client, err := c.Client() - require.NoErrorf(err, "failed to get plugin client: %v", err) - - // Grab the vm implementation. - raw, err := client.Dispense(chainVMTestKey) - require.NoErrorf(err, "failed to dispense plugin: %v", err) - - // Get vm client. - vm, ok := raw.(*TestVMClient) - require.True(ok) - - // Get the handlers exposed by the subnet vm. - handlers, err := vm.CreateHandlers() - require.NoErrorf(err, "failed to get handlers: %v", err) - - r := mux.NewRouter() - for ep, handler := range handlers { - r.Handle(ep, handler.Handler) - } - listener, err := net.Listen("tcp", "localhost:0") - require.NoErrorf(err, "failed to create listener: %v", err) + ctrl := gomock.NewController(t) + vm := mocks.NewMockChainVM(ctrl) + defer ctrl.Finish() - go func() { - err := http.Serve(listener, r) // #nosec G114 - require.NoErrorf(err, "failed to serve HTTP: %v", err) - }() + listener, err := grpcutils.NewListener() + require.NoError(err) - target := listener.Addr().String() + err = os.Setenv(runtime.EngineAddressKey, listener.Addr().String()) + require.NoError(err) - for endpoint := range handlers { - switch endpoint { - case "/rpc": - err := testHTTPPingRequest(target, endpoint, scenario.payload) - require.NoErrorf(err, "%s rpc ping failed: %v", endpoint, err) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - case "/ws": - // expected number of msg echos to receive from websocket server. - // This test is sanity for conn hijack and server push. - expectedMsgCount := 5 - err := testWebsocketEchoRequest(target, endpoint, expectedMsgCount, scenario.payload) - require.NoErrorf(err, "%s websocket echo failed: %v", endpoint, err) - default: - t.Fatal("unknown handler") - } + if test.serveVM { + go func() { + _ = Serve(ctx, vm) + }() } - }) - } -} - -func testHTTPPingRequest(target, endpoint string, payload []byte) error { - req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("http://%s%s", target, endpoint), bytes.NewBuffer(payload)) - if err != nil { - return err - } - req.Header.Set("Content-Type", "application/json") - httpClient := new(http.Client) - resp, err := httpClient.Do(req) - if err != nil { - return fmt.Errorf("failed to dial test server: %v", err) - } - defer resp.Body.Close() - - pb, err := io.ReadAll(resp.Body) - if err != nil { - return err - } - var ping testResult - err = stdjson.Unmarshal(pb, &ping) - if err != nil { - return err - } - - if !ping.Result.Success { - return fmt.Errorf("want ping success: true: got %v", ping.Result.Success) - } - return nil -} - -func testWebsocketEchoRequest(target, endpoint string, expectedMsgCount int, payload []byte) error { - dialTarget := fmt.Sprintf("ws://%s%s", target, endpoint) - cli, _, err := websocket.DefaultDialer.Dial(dialTarget, nil) - if err != nil { - return err - } - defer cli.Close() - - err = cli.WriteMessage(websocket.TextMessage, payload) - if err != nil { - return err - } - - i := 0 - for i < expectedMsgCount { - i++ - // TODO: verify message body - _, _, err := cli.ReadMessage() - if err != nil { - return err - } - } - - // TODO more robust test... - if i != expectedMsgCount { - return fmt.Errorf("want (%d) messages got (%d)", expectedMsgCount, i) - } - return nil -} - -type TestVM interface { - CreateHandlers() (map[string]*common.HTTPHandler, error) -} - -func NewTestServer(vm TestVM) *TestVMServer { - return &TestVMServer{ - vm: vm, - } -} - -type TestVMServer struct { - vmpb.UnimplementedVMServer - vm TestVM - serverCloser grpcutils.ServerCloser -} - -func (vm *TestVMServer) CreateHandlers(context.Context, *emptypb.Empty) (*vmpb.CreateHandlersResponse, error) { - handlers, err := vm.vm.CreateHandlers() - if err != nil { - return nil, err - } - - resp := &vmpb.CreateHandlersResponse{} - for prefix, h := range handlers { - handler := h - - // start the http server - serverListener, err := grpcutils.NewListener() - if err != nil { - return nil, err - } - serverAddr := serverListener.Addr().String() - - go grpcutils.Serve(serverListener, func(opts []grpc.ServerOption) *grpc.Server { - if len(opts) == 0 { - opts = append(opts, grpcutils.DefaultServerOptions...) + status, stopper, err := subprocess.Bootstrap( + context.Background(), + listener, + helperProcess("dummy"), + test.config, + ) + if err == nil { + require.NotEmpty(status.Addr) + stopper.Stop(ctx) } - server := grpc.NewServer(opts...) - vm.serverCloser.Add(server) - httppb.RegisterHTTPServer(server, ghttp.NewServer(handler.Handler)) - return server - }) - - resp.Handlers = append(resp.Handlers, &vmpb.Handler{ - Prefix: prefix, - LockOptions: uint32(handler.LockOptions), - ServerAddr: serverAddr, + test.assertErr(require, err) }) } - return resp, nil -} - -type TestVMClient struct { - client vmpb.VMClient - conns []*grpc.ClientConn -} - -func NewTestClient(client vmpb.VMClient) *TestVMClient { - return &TestVMClient{ - client: client, - } -} - -func (vm *TestVMClient) CreateHandlers() (map[string]*common.HTTPHandler, error) { - resp, err := vm.client.CreateHandlers(context.Background(), &emptypb.Empty{}) - if err != nil { - return nil, err - } - - handlers := make(map[string]*common.HTTPHandler, len(resp.Handlers)) - for _, handler := range resp.Handlers { - clientConn, err := grpcutils.Dial(handler.ServerAddr) - if err != nil { - return nil, err - } - - vm.conns = append(vm.conns, clientConn) - handlers[handler.Prefix] = &common.HTTPHandler{ - LockOptions: common.LockOption(handler.LockOptions), - Handler: ghttp.NewClient(httppb.NewHTTPClient(clientConn)), - } - } - return handlers, nil -} - -type testVMPlugin struct { - plugin.NetRPCUnsupportedPlugin - vm TestVM -} - -func NewTestVM(vm *TestSubnetVM) plugin.Plugin { - return &testVMPlugin{vm: vm} -} - -func (p *testVMPlugin) GRPCServer(_ *plugin.GRPCBroker, s *grpc.Server) error { - vmpb.RegisterVMServer(s, NewTestServer(p.vm)) - return nil -} - -func (p *testVMPlugin) GRPCClient(_ context.Context, _ *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { - return NewTestClient(vmpb.NewVMClient(c)), nil -} - -type TestSubnetVM struct { - logger hclog.Logger -} - -func (vm *TestSubnetVM) CreateHandlers() (map[string]*common.HTTPHandler, error) { - apis := make(map[string]*common.HTTPHandler) - - testEchoMsgCount := 5 - apis["/ws"] = &common.HTTPHandler{ - LockOptions: common.NoLock, Handler: websocketEchoHandler(testEchoMsgCount), - } - rpcServer, err := getTestRPCServer() - if err != nil { - return nil, err - } - - apis["/rpc"] = &common.HTTPHandler{ - LockOptions: common.NoLock, Handler: rpcServer, - } - return apis, nil -} - -type PingService struct{} - -type PingReply struct { - Success bool `json:"success"` -} - -type pingRequest struct { - Version string `json:"jsonrpc"` - Method string `json:"method"` - Params []string `json:"params"` - ID string `json:"id"` -} - -type testResult struct { - Result PingReply `json:"result"` -} - -func (p *PingService) Ping(_ *http.Request, _ *struct{}, reply *PingReply) (err error) { - reply.Success = true - return nil -} - -func getTestRPCServer() (*gorillarpc.Server, error) { - server := gorillarpc.NewServer() - server.RegisterCodec(json.NewCodec(), "application/json") - server.RegisterCodec(json.NewCodec(), "application/json;charset=UTF-8") - if err := server.RegisterService(&PingService{}, "subnet"); err != nil { - return nil, fmt.Errorf("failed to create rpc server %v", err) - } - return server, nil -} - -// websocketEchoHandler upgrades the request and sends back N(msgCount) -// echos. -func websocketEchoHandler(msgCount int) http.Handler { - upgrader := websocket.Upgrader{} // use default options - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - c, err := upgrader.Upgrade(w, r, nil) - if err != nil { - return - } - defer c.Close() - - mt, b, err := c.ReadMessage() - if err != nil { - if err != io.EOF { - return - } - return - } - for i := 0; i < msgCount; i++ { - err = c.WriteMessage(mt, b) - if err != nil { - return - } - } - }) } diff --git a/avalanchego/vms/rpcchainvm/with_context_vm_test.go b/avalanchego/vms/rpcchainvm/with_context_vm_test.go new file mode 100644 index 00000000..8fd85f30 --- /dev/null +++ b/avalanchego/vms/rpcchainvm/with_context_vm_test.go @@ -0,0 +1,120 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package rpcchainvm + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block/mocks" + "github.com/ava-labs/avalanchego/version" +) + +var ( + _ block.ChainVM = ContextEnabledVMMock{} + _ block.BuildBlockWithContextChainVM = ContextEnabledVMMock{} + + _ snowman.Block = ContextEnabledBlockMock{} + _ block.WithVerifyContext = ContextEnabledBlockMock{} + + blockContext = &block.Context{ + PChainHeight: 1, + } + + blkID = ids.ID{1} + parentID = ids.ID{0} + blkBytes = []byte{0} +) + +type ContextEnabledVMMock struct { + *mocks.MockChainVM + *mocks.MockBuildBlockWithContextChainVM +} + +type ContextEnabledBlockMock struct { + *snowman.MockBlock + *mocks.MockWithVerifyContext +} + +func contextEnabledTestPlugin(t *testing.T, loadExpectations bool) (block.ChainVM, *gomock.Controller) { + // test key is "contextTestKey" + + // create mock + ctrl := gomock.NewController(t) + ctxVM := ContextEnabledVMMock{ + MockChainVM: mocks.NewMockChainVM(ctrl), + MockBuildBlockWithContextChainVM: mocks.NewMockBuildBlockWithContextChainVM(ctrl), + } + + if loadExpectations { + ctxBlock := ContextEnabledBlockMock{ + MockBlock: snowman.NewMockBlock(ctrl), + MockWithVerifyContext: mocks.NewMockWithVerifyContext(ctrl), + } + gomock.InOrder( + // Initialize + ctxVM.MockChainVM.EXPECT().Initialize( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + gomock.Any(), + ).Return(nil).Times(1), + ctxVM.MockChainVM.EXPECT().LastAccepted(gomock.Any()).Return(preSummaryBlk.ID(), nil).Times(1), + ctxVM.MockChainVM.EXPECT().GetBlock(gomock.Any(), gomock.Any()).Return(preSummaryBlk, nil).Times(1), + + // BuildBlockWithContext + ctxVM.MockBuildBlockWithContextChainVM.EXPECT().BuildBlockWithContext(gomock.Any(), blockContext).Return(ctxBlock, nil).Times(1), + ctxBlock.MockWithVerifyContext.EXPECT().ShouldVerifyWithContext(gomock.Any()).Return(true, nil).Times(1), + ctxBlock.MockBlock.EXPECT().ID().Return(blkID).Times(1), + ctxBlock.MockBlock.EXPECT().Parent().Return(parentID).Times(1), + ctxBlock.MockBlock.EXPECT().Bytes().Return(blkBytes).Times(1), + ctxBlock.MockBlock.EXPECT().Height().Return(uint64(1)).Times(1), + ctxBlock.MockBlock.EXPECT().Timestamp().Return(time.Now()).Times(1), + + // VerifyWithContext + ctxVM.MockChainVM.EXPECT().ParseBlock(gomock.Any(), blkBytes).Return(ctxBlock, nil).Times(1), + ctxBlock.MockWithVerifyContext.EXPECT().VerifyWithContext(gomock.Any(), blockContext).Return(nil).Times(1), + ctxBlock.MockBlock.EXPECT().Timestamp().Return(time.Now()).Times(1), + ) + } + + return ctxVM, ctrl +} + +func TestContextVMSummary(t *testing.T) { + require := require.New(t) + testKey := contextTestKey + + // Create and start the plugin + vm, stopper := buildClientHelper(require, testKey) + defer stopper.Stop(context.Background()) + + ctx := snow.DefaultContextTest() + dbManager := manager.NewMemDB(version.Semantic1_0_0) + + err := vm.Initialize(context.Background(), ctx, dbManager, nil, nil, nil, nil, nil, nil) + require.NoError(err) + + blkIntf, err := vm.BuildBlockWithContext(context.Background(), blockContext) + require.NoError(err) + + blk, ok := blkIntf.(block.WithVerifyContext) + require.True(ok) + + shouldVerify, err := blk.ShouldVerifyWithContext(context.Background()) + require.NoError(err) + require.True(shouldVerify) + + err = blk.VerifyWithContext(context.Background(), blockContext) + require.NoError(err) +} diff --git a/avalanchego/vms/secp256k1fx/credential.go b/avalanchego/vms/secp256k1fx/credential.go index 6d2e4803..707a6b3f 100644 --- a/avalanchego/vms/secp256k1fx/credential.go +++ b/avalanchego/vms/secp256k1fx/credential.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -8,14 +8,14 @@ import ( "errors" "fmt" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" ) -var errNilCredential = errors.New("nil credential") +var ErrNilCredential = errors.New("nil credential") type Credential struct { - Sigs [][crypto.SECP256K1RSigLen]byte `serialize:"true" json:"signatures"` + Sigs [][secp256k1.SignatureLen]byte `serialize:"true" json:"signatures"` } // MarshalJSON marshals [cr] to JSON @@ -38,7 +38,7 @@ func (cr *Credential) MarshalJSON() ([]byte, error) { func (cr *Credential) Verify() error { switch { case cr == nil: - return errNilCredential + return ErrNilCredential default: return nil } diff --git a/avalanchego/vms/secp256k1fx/credential_test.go b/avalanchego/vms/secp256k1fx/credential_test.go index 6a6e660b..a17daa3c 100644 --- a/avalanchego/vms/secp256k1fx/credential_test.go +++ b/avalanchego/vms/secp256k1fx/credential_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -10,7 +10,7 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/components/verify" ) @@ -23,7 +23,7 @@ func TestCredentialVerify(t *testing.T) { func TestCredentialVerifyNil(t *testing.T) { require := require.New(t) cred := (*Credential)(nil) - require.ErrorIs(cred.Verify(), errNilCredential) + require.ErrorIs(cred.Verify(), ErrNilCredential) } func TestCredentialSerialize(t *testing.T) { @@ -58,7 +58,7 @@ func TestCredentialSerialize(t *testing.T) { 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x00, } - cred := Credential{Sigs: [][crypto.SECP256K1RSigLen]byte{ + cred := Credential{Sigs: [][secp256k1.SignatureLen]byte{ { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, diff --git a/avalanchego/vms/secp256k1fx/factory.go b/avalanchego/vms/secp256k1fx/factory.go index 9952fb66..ae2463a1 100644 --- a/avalanchego/vms/secp256k1fx/factory.go +++ b/avalanchego/vms/secp256k1fx/factory.go @@ -1,16 +1,16 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms" ) var ( - _ vms.Factory = &Factory{} + _ vms.Factory = (*Factory)(nil) // ID that this Fx uses when labeled ID = ids.ID{'s', 'e', 'c', 'p', '2', '5', '6', 'k', '1', 'f', 'x'} @@ -18,4 +18,6 @@ var ( type Factory struct{} -func (f *Factory) New(*snow.Context) (interface{}, error) { return &Fx{}, nil } +func (*Factory) New(logging.Logger) (interface{}, error) { + return &Fx{}, nil +} diff --git a/avalanchego/vms/secp256k1fx/factory_test.go b/avalanchego/vms/secp256k1fx/factory_test.go index 644fe21f..43516499 100644 --- a/avalanchego/vms/secp256k1fx/factory_test.go +++ b/avalanchego/vms/secp256k1fx/factory_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -7,12 +7,14 @@ import ( "testing" "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/utils/logging" ) func TestFactory(t *testing.T) { require := require.New(t) factory := Factory{} - fx, err := factory.New(nil) + fx, err := factory.New(logging.NoLog{}) require.NoError(err) require.NotNil(fx) } diff --git a/avalanchego/vms/secp256k1fx/fx.go b/avalanchego/vms/secp256k1fx/fx.go index d2e9053f..15e10617 100644 --- a/avalanchego/vms/secp256k1fx/fx.go +++ b/avalanchego/vms/secp256k1fx/fx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -10,7 +10,7 @@ import ( "github.com/ava-labs/avalanchego/cache" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/verify" @@ -22,26 +22,28 @@ const ( ) var ( - errWrongVMType = errors.New("wrong vm type") - errWrongTxType = errors.New("wrong tx type") - errWrongOpType = errors.New("wrong operation type") - errWrongUTXOType = errors.New("wrong utxo type") - errWrongInputType = errors.New("wrong input type") - errWrongCredentialType = errors.New("wrong credential type") - errWrongOwnerType = errors.New("wrong owner type") - errWrongNumberOfUTXOs = errors.New("wrong number of utxos for the operation") - errWrongMintCreated = errors.New("wrong mint output created from the operation") - errTimelocked = errors.New("output is time locked") - errTooManySigners = errors.New("input has more signers than expected") - errTooFewSigners = errors.New("input has less signers than expected") - errInputOutputIndexOutOfBounds = errors.New("input referenced a nonexistent address in the output") - errInputCredentialSignersMismatch = errors.New("input expected a different number of signers than provided in the credential") + ErrWrongVMType = errors.New("wrong vm type") + ErrWrongTxType = errors.New("wrong tx type") + ErrWrongOpType = errors.New("wrong operation type") + ErrWrongUTXOType = errors.New("wrong utxo type") + ErrWrongInputType = errors.New("wrong input type") + ErrWrongCredentialType = errors.New("wrong credential type") + ErrWrongOwnerType = errors.New("wrong owner type") + ErrMismatchedAmounts = errors.New("utxo amount and input amount are not equal") + ErrWrongNumberOfUTXOs = errors.New("wrong number of utxos for the operation") + ErrWrongMintCreated = errors.New("wrong mint output created from the operation") + ErrTimelocked = errors.New("output is time locked") + ErrTooManySigners = errors.New("input has more signers than expected") + ErrTooFewSigners = errors.New("input has less signers than expected") + ErrInputOutputIndexOutOfBounds = errors.New("input referenced a nonexistent address in the output") + ErrInputCredentialSignersMismatch = errors.New("input expected a different number of signers than provided in the credential") + ErrWrongSig = errors.New("wrong signature") ) // Fx describes the secp256k1 feature extension type Fx struct { VM VM - SECPFactory crypto.FactorySECP256K1R + SECPFactory secp256k1.Factory bootstrapped bool } @@ -53,8 +55,10 @@ func (fx *Fx) Initialize(vmIntf interface{}) error { log := fx.VM.Logger() log.Debug("initializing secp256k1 fx") - fx.SECPFactory = crypto.FactorySECP256K1R{ - Cache: cache.LRU{Size: defaultCacheSize}, + fx.SECPFactory = secp256k1.Factory{ + Cache: cache.LRU[ids.ID, *secp256k1.PublicKey]{ + Size: defaultCacheSize, + }, } c := fx.VM.CodecRegistry() errs := wrappers.Errs{} @@ -71,33 +75,38 @@ func (fx *Fx) Initialize(vmIntf interface{}) error { func (fx *Fx) InitializeVM(vmIntf interface{}) error { vm, ok := vmIntf.(VM) if !ok { - return errWrongVMType + return ErrWrongVMType } fx.VM = vm return nil } -func (fx *Fx) Bootstrapping() error { return nil } +func (*Fx) Bootstrapping() error { + return nil +} -func (fx *Fx) Bootstrapped() error { fx.bootstrapped = true; return nil } +func (fx *Fx) Bootstrapped() error { + fx.bootstrapped = true + return nil +} // VerifyPermission returns nil iff [credIntf] proves that [controlGroup] assents to [txIntf] func (fx *Fx) VerifyPermission(txIntf, inIntf, credIntf, ownerIntf interface{}) error { tx, ok := txIntf.(UnsignedTx) if !ok { - return errWrongTxType + return ErrWrongTxType } in, ok := inIntf.(*Input) if !ok { - return errWrongInputType + return ErrWrongInputType } cred, ok := credIntf.(*Credential) if !ok { - return errWrongCredentialType + return ErrWrongCredentialType } owner, ok := ownerIntf.(*OutputOwners) if !ok { - return errWrongOwnerType + return ErrWrongOwnerType } if err := verify.All(in, cred, owner); err != nil { return err @@ -108,22 +117,22 @@ func (fx *Fx) VerifyPermission(txIntf, inIntf, credIntf, ownerIntf interface{}) func (fx *Fx) VerifyOperation(txIntf, opIntf, credIntf interface{}, utxosIntf []interface{}) error { tx, ok := txIntf.(UnsignedTx) if !ok { - return errWrongTxType + return ErrWrongTxType } op, ok := opIntf.(*MintOperation) if !ok { - return errWrongOpType + return ErrWrongOpType } cred, ok := credIntf.(*Credential) if !ok { - return errWrongCredentialType + return ErrWrongCredentialType } if len(utxosIntf) != 1 { - return errWrongNumberOfUTXOs + return ErrWrongNumberOfUTXOs } out, ok := utxosIntf[0].(*MintOutput) if !ok { - return errWrongUTXOType + return ErrWrongUTXOType } return fx.verifyOperation(tx, op, cred, out) } @@ -133,7 +142,7 @@ func (fx *Fx) verifyOperation(tx UnsignedTx, op *MintOperation, cred *Credential return err } if !utxo.Equals(&op.MintOutput.OutputOwners) { - return errWrongMintCreated + return ErrWrongMintCreated } return fx.VerifyCredentials(tx, &op.MintInput, cred, &utxo.OutputOwners) } @@ -141,19 +150,19 @@ func (fx *Fx) verifyOperation(tx UnsignedTx, op *MintOperation, cred *Credential func (fx *Fx) VerifyTransfer(txIntf, inIntf, credIntf, utxoIntf interface{}) error { tx, ok := txIntf.(UnsignedTx) if !ok { - return errWrongTxType + return ErrWrongTxType } in, ok := inIntf.(*TransferInput) if !ok { - return errWrongInputType + return ErrWrongInputType } cred, ok := credIntf.(*Credential) if !ok { - return errWrongCredentialType + return ErrWrongCredentialType } out, ok := utxoIntf.(*TransferOutput) if !ok { - return errWrongUTXOType + return ErrWrongUTXOType } return fx.VerifySpend(tx, in, cred, out) } @@ -163,7 +172,7 @@ func (fx *Fx) VerifySpend(utx UnsignedTx, in *TransferInput, cred *Credential, u if err := verify.All(utxo, in, cred); err != nil { return err } else if utxo.Amt != in.Amt { - return fmt.Errorf("utxo amount and input amount should be same but are %d and %d", utxo.Amt, in.Amt) + return fmt.Errorf("%w: %d != %d", ErrMismatchedAmounts, utxo.Amt, in.Amt) } return fx.VerifyCredentials(utx, &in.Input, cred, &utxo.OutputOwners) @@ -175,13 +184,13 @@ func (fx *Fx) VerifyCredentials(utx UnsignedTx, in *Input, cred *Credential, out numSigs := len(in.SigIndices) switch { case out.Locktime > fx.VM.Clock().Unix(): - return errTimelocked + return ErrTimelocked case out.Threshold < uint32(numSigs): - return errTooManySigners + return ErrTooManySigners case out.Threshold > uint32(numSigs): - return errTooFewSigners + return ErrTooFewSigners case numSigs != len(cred.Sigs): - return errInputCredentialSignersMismatch + return ErrInputCredentialSignersMismatch case !fx.bootstrapped: // disable signature verification during bootstrapping return nil } @@ -193,7 +202,7 @@ func (fx *Fx) VerifyCredentials(utx UnsignedTx, in *Input, cred *Credential, out for i, index := range in.SigIndices { // Make sure the input references an address that exists if index >= uint32(len(out.Addrs)) { - return errInputOutputIndexOutOfBounds + return ErrInputOutputIndexOutOfBounds } // Make sure each signature in the signature list is from an owner of // the output being consumed @@ -222,7 +231,7 @@ func (fx *Fx) VerifyCredentials(utx UnsignedTx, in *Input, cred *Credential, out } } - return fmt.Errorf("expected signature from %s", expectedAddress) + return ErrWrongSig } return nil @@ -230,10 +239,10 @@ func (fx *Fx) VerifyCredentials(utx UnsignedTx, in *Input, cred *Credential, out // CreateOutput creates a new output with the provided control group worth // the specified amount -func (fx *Fx) CreateOutput(amount uint64, ownerIntf interface{}) (interface{}, error) { +func (*Fx) CreateOutput(amount uint64, ownerIntf interface{}) (interface{}, error) { owner, ok := ownerIntf.(*OutputOwners) if !ok { - return nil, errWrongOwnerType + return nil, ErrWrongOwnerType } if err := owner.Verify(); err != nil { return nil, err diff --git a/avalanchego/vms/secp256k1fx/fx_test.go b/avalanchego/vms/secp256k1fx/fx_test.go index 930863a0..cc48fb64 100644 --- a/avalanchego/vms/secp256k1fx/fx_test.go +++ b/avalanchego/vms/secp256k1fx/fx_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -12,13 +12,13 @@ import ( "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/cb58" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/logging" ) var ( txBytes = []byte{0, 1, 2, 3, 4, 5} - sigBytes = [crypto.SECP256K1RSigLen]byte{ // signature of addr on txBytes + sigBytes = [secp256k1.SignatureLen]byte{ // signature of addr on txBytes 0x0e, 0x33, 0x4e, 0xbc, 0x67, 0xa7, 0x3f, 0xe8, 0x24, 0x33, 0xac, 0xa3, 0x47, 0x88, 0xa6, 0x3d, 0x58, 0xe5, 0x8e, 0xf0, 0x3a, 0xd5, 0x84, 0xf1, @@ -35,7 +35,7 @@ var ( 0x39, 0x1a, 0xe7, 0xf0, } addr2 ids.ShortID - sig2Bytes [crypto.SECP256K1RSigLen]byte // signature of addr2 on txBytes + sig2Bytes [secp256k1.SignatureLen]byte // signature of addr2 on txBytes ) func init() { @@ -64,7 +64,7 @@ func TestFxInitialize(t *testing.T) { func TestFxInitializeInvalid(t *testing.T) { require := require.New(t) fx := Fx{} - require.ErrorIs(fx.Initialize(nil), errWrongVMType) + require.ErrorIs(fx.Initialize(nil), ErrWrongVMType) } func TestFxVerifyTransfer(t *testing.T) { @@ -74,7 +74,7 @@ func TestFxVerifyTransfer(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} require.NoError(fx.Initialize(&vm)) require.NoError(fx.Bootstrapping()) @@ -97,7 +97,7 @@ func TestFxVerifyTransfer(t *testing.T) { }, } cred := &Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, } @@ -112,7 +112,7 @@ func TestFxVerifyTransferNilTx(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} require.NoError(fx.Initialize(&vm)) out := &TransferOutput{ @@ -132,12 +132,12 @@ func TestFxVerifyTransferNilTx(t *testing.T) { }, } cred := &Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, } - require.ErrorIs(fx.VerifyTransfer(nil, in, cred, out), errWrongTxType) + require.ErrorIs(fx.VerifyTransfer(nil, in, cred, out), ErrWrongTxType) } func TestFxVerifyTransferNilOutput(t *testing.T) { @@ -147,7 +147,7 @@ func TestFxVerifyTransferNilOutput(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} require.NoError(fx.Initialize(&vm)) tx := &TestTx{UnsignedBytes: txBytes} @@ -158,12 +158,12 @@ func TestFxVerifyTransferNilOutput(t *testing.T) { }, } cred := &Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, } - require.ErrorIs(fx.VerifyTransfer(tx, in, cred, nil), errWrongUTXOType) + require.ErrorIs(fx.VerifyTransfer(tx, in, cred, nil), ErrWrongUTXOType) } func TestFxVerifyTransferNilInput(t *testing.T) { @@ -173,7 +173,7 @@ func TestFxVerifyTransferNilInput(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} require.NoError(fx.Initialize(&vm)) tx := &TestTx{UnsignedBytes: txBytes} @@ -188,12 +188,12 @@ func TestFxVerifyTransferNilInput(t *testing.T) { }, } cred := &Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, } - require.ErrorIs(fx.VerifyTransfer(tx, nil, cred, out), errWrongInputType) + require.ErrorIs(fx.VerifyTransfer(tx, nil, cred, out), ErrWrongInputType) } func TestFxVerifyTransferNilCredential(t *testing.T) { @@ -203,7 +203,7 @@ func TestFxVerifyTransferNilCredential(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} require.NoError(fx.Initialize(&vm)) tx := &TestTx{UnsignedBytes: txBytes} @@ -224,7 +224,7 @@ func TestFxVerifyTransferNilCredential(t *testing.T) { }, } - require.ErrorIs(fx.VerifyTransfer(tx, in, nil, out), errWrongCredentialType) + require.ErrorIs(fx.VerifyTransfer(tx, in, nil, out), ErrWrongCredentialType) } func TestFxVerifyTransferInvalidOutput(t *testing.T) { @@ -234,7 +234,7 @@ func TestFxVerifyTransferInvalidOutput(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} require.NoError(fx.Initialize(&vm)) tx := &TestTx{UnsignedBytes: txBytes} @@ -255,7 +255,7 @@ func TestFxVerifyTransferInvalidOutput(t *testing.T) { }, } cred := &Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, } @@ -270,7 +270,7 @@ func TestFxVerifyTransferWrongAmounts(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} require.NoError(fx.Initialize(&vm)) tx := &TestTx{UnsignedBytes: txBytes} @@ -291,7 +291,7 @@ func TestFxVerifyTransferWrongAmounts(t *testing.T) { }, } cred := &Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, } @@ -306,7 +306,7 @@ func TestFxVerifyTransferTimelocked(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} require.NoError(fx.Initialize(&vm)) tx := &TestTx{UnsignedBytes: txBytes} @@ -327,12 +327,12 @@ func TestFxVerifyTransferTimelocked(t *testing.T) { }, } cred := &Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, } - require.ErrorIs(fx.VerifyTransfer(tx, in, cred, out), errTimelocked) + require.ErrorIs(fx.VerifyTransfer(tx, in, cred, out), ErrTimelocked) } func TestFxVerifyTransferTooManySigners(t *testing.T) { @@ -342,7 +342,7 @@ func TestFxVerifyTransferTooManySigners(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} require.NoError(fx.Initialize(&vm)) tx := &TestTx{UnsignedBytes: txBytes} @@ -363,13 +363,13 @@ func TestFxVerifyTransferTooManySigners(t *testing.T) { }, } cred := &Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, {}, }, } - require.ErrorIs(fx.VerifyTransfer(tx, in, cred, out), errTooManySigners) + require.ErrorIs(fx.VerifyTransfer(tx, in, cred, out), ErrTooManySigners) } func TestFxVerifyTransferTooFewSigners(t *testing.T) { @@ -379,7 +379,7 @@ func TestFxVerifyTransferTooFewSigners(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} require.NoError(fx.Initialize(&vm)) tx := &TestTx{UnsignedBytes: txBytes} @@ -400,10 +400,10 @@ func TestFxVerifyTransferTooFewSigners(t *testing.T) { }, } cred := &Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{}, + Sigs: [][secp256k1.SignatureLen]byte{}, } - require.ErrorIs(fx.VerifyTransfer(tx, in, cred, out), errTooFewSigners) + require.ErrorIs(fx.VerifyTransfer(tx, in, cred, out), ErrTooFewSigners) } func TestFxVerifyTransferMismatchedSigners(t *testing.T) { @@ -413,7 +413,7 @@ func TestFxVerifyTransferMismatchedSigners(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} require.NoError(fx.Initialize(&vm)) tx := &TestTx{UnsignedBytes: txBytes} @@ -434,13 +434,13 @@ func TestFxVerifyTransferMismatchedSigners(t *testing.T) { }, } cred := &Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, {}, }, } - require.ErrorIs(fx.VerifyTransfer(tx, in, cred, out), errInputCredentialSignersMismatch) + require.ErrorIs(fx.VerifyTransfer(tx, in, cred, out), ErrInputCredentialSignersMismatch) } func TestFxVerifyTransferInvalidSignature(t *testing.T) { @@ -450,7 +450,7 @@ func TestFxVerifyTransferInvalidSignature(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} require.NoError(fx.Initialize(&vm)) require.NoError(fx.Bootstrapping()) @@ -472,7 +472,7 @@ func TestFxVerifyTransferInvalidSignature(t *testing.T) { }, } cred := &Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ {}, }, } @@ -489,7 +489,7 @@ func TestFxVerifyTransferWrongSigner(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} require.NoError(fx.Initialize(&vm)) require.NoError(fx.Bootstrapping()) @@ -511,7 +511,7 @@ func TestFxVerifyTransferWrongSigner(t *testing.T) { }, } cred := &Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, } @@ -521,6 +521,45 @@ func TestFxVerifyTransferWrongSigner(t *testing.T) { require.Error(fx.VerifyTransfer(tx, in, cred, out)) } +func TestFxVerifyTransferSigIndexOOB(t *testing.T) { + require := require.New(t) + vm := TestVM{ + Codec: linearcodec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.Clk.Set(date) + fx := Fx{} + require.NoError(fx.Initialize(&vm)) + require.NoError(fx.Bootstrapping()) + tx := &TestTx{UnsignedBytes: txBytes} + out := &TransferOutput{ + Amt: 1, + OutputOwners: OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + addr, + }, + }, + } + in := &TransferInput{ + Amt: 1, + Input: Input{ + SigIndices: []uint32{1}, // There is no address at index 1 + }, + } + cred := &Credential{ + Sigs: [][secp256k1.SignatureLen]byte{ + sigBytes, + }, + } + + require.NoError(fx.VerifyTransfer(tx, in, cred, out)) + require.NoError(fx.Bootstrapped()) + require.ErrorIs(fx.VerifyTransfer(tx, in, cred, out), ErrInputOutputIndexOutOfBounds) +} + func TestFxVerifyOperation(t *testing.T) { require := require.New(t) vm := TestVM{ @@ -528,7 +567,7 @@ func TestFxVerifyOperation(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} require.NoError(fx.Initialize(&vm)) tx := &TestTx{UnsignedBytes: txBytes} @@ -564,7 +603,7 @@ func TestFxVerifyOperation(t *testing.T) { }, } cred := &Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, } @@ -580,7 +619,7 @@ func TestFxVerifyOperationUnknownTx(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} require.NoError(fx.Initialize(&vm)) utxo := &MintOutput{ @@ -615,13 +654,13 @@ func TestFxVerifyOperationUnknownTx(t *testing.T) { }, } cred := &Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, } utxos := []interface{}{utxo} - require.ErrorIs(fx.VerifyOperation(nil, op, cred, utxos), errWrongTxType) + require.ErrorIs(fx.VerifyOperation(nil, op, cred, utxos), ErrWrongTxType) } func TestFxVerifyOperationUnknownOperation(t *testing.T) { @@ -631,7 +670,7 @@ func TestFxVerifyOperationUnknownOperation(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} require.NoError(fx.Initialize(&vm)) tx := &TestTx{UnsignedBytes: txBytes} @@ -644,13 +683,13 @@ func TestFxVerifyOperationUnknownOperation(t *testing.T) { }, } cred := &Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, } utxos := []interface{}{utxo} - require.ErrorIs(fx.VerifyOperation(tx, nil, cred, utxos), errWrongOpType) + require.ErrorIs(fx.VerifyOperation(tx, nil, cred, utxos), ErrWrongOpType) } func TestFxVerifyOperationUnknownCredential(t *testing.T) { @@ -660,7 +699,7 @@ func TestFxVerifyOperationUnknownCredential(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} require.NoError(fx.Initialize(&vm)) tx := &TestTx{UnsignedBytes: txBytes} @@ -697,7 +736,7 @@ func TestFxVerifyOperationUnknownCredential(t *testing.T) { } utxos := []interface{}{utxo} - require.ErrorIs(fx.VerifyOperation(tx, op, nil, utxos), errWrongCredentialType) + require.ErrorIs(fx.VerifyOperation(tx, op, nil, utxos), ErrWrongCredentialType) } func TestFxVerifyOperationWrongNumberOfUTXOs(t *testing.T) { @@ -707,7 +746,7 @@ func TestFxVerifyOperationWrongNumberOfUTXOs(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} require.NoError(fx.Initialize(&vm)) tx := &TestTx{UnsignedBytes: txBytes} @@ -743,13 +782,13 @@ func TestFxVerifyOperationWrongNumberOfUTXOs(t *testing.T) { }, } cred := &Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, } utxos := []interface{}{utxo, utxo} - require.ErrorIs(fx.VerifyOperation(tx, op, cred, utxos), errWrongNumberOfUTXOs) + require.ErrorIs(fx.VerifyOperation(tx, op, cred, utxos), ErrWrongNumberOfUTXOs) } func TestFxVerifyOperationUnknownUTXOType(t *testing.T) { @@ -759,7 +798,7 @@ func TestFxVerifyOperationUnknownUTXOType(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} require.NoError(fx.Initialize(&vm)) tx := &TestTx{UnsignedBytes: txBytes} @@ -787,13 +826,13 @@ func TestFxVerifyOperationUnknownUTXOType(t *testing.T) { }, } cred := &Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, } utxos := []interface{}{nil} - require.ErrorIs(fx.VerifyOperation(tx, op, cred, utxos), errWrongUTXOType) + require.ErrorIs(fx.VerifyOperation(tx, op, cred, utxos), ErrWrongUTXOType) } func TestFxVerifyOperationInvalidOperationVerify(t *testing.T) { @@ -803,7 +842,7 @@ func TestFxVerifyOperationInvalidOperationVerify(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} require.NoError(fx.Initialize(&vm)) tx := &TestTx{UnsignedBytes: txBytes} @@ -836,7 +875,7 @@ func TestFxVerifyOperationInvalidOperationVerify(t *testing.T) { }, } cred := &Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, } @@ -852,7 +891,7 @@ func TestFxVerifyOperationMismatchedMintOutputs(t *testing.T) { Log: logging.NoLog{}, } date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.CLK.Set(date) + vm.Clk.Set(date) fx := Fx{} require.NoError(fx.Initialize(&vm)) tx := &TestTx{UnsignedBytes: txBytes} @@ -883,25 +922,27 @@ func TestFxVerifyOperationMismatchedMintOutputs(t *testing.T) { }, } cred := &Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ + Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, }, } utxos := []interface{}{utxo} - require.ErrorIs(fx.VerifyOperation(tx, op, cred, utxos), errWrongMintCreated) + require.ErrorIs(fx.VerifyOperation(tx, op, cred, utxos), ErrWrongMintCreated) } func TestVerifyPermission(t *testing.T) { - require := require.New(t) vm := TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } fx := Fx{} - require.NoError(fx.Initialize(&vm)) - require.NoError(fx.Bootstrapping()) - require.NoError(fx.Bootstrapped()) + require.NoError(t, fx.Initialize(&vm)) + require.NoError(t, fx.Bootstrapping()) + require.NoError(t, fx.Bootstrapped()) + + now := time.Now() + fx.VM.Clock().Set(now) type test struct { description string @@ -909,138 +950,170 @@ func TestVerifyPermission(t *testing.T) { in *Input cred *Credential cg *OutputOwners - shouldErr bool + expectedErr error } tests := []test{ { "threshold 0, no sigs, has addrs", &TestTx{UnsignedBytes: txBytes}, &Input{SigIndices: []uint32{}}, - &Credential{Sigs: [][crypto.SECP256K1RSigLen]byte{}}, + &Credential{Sigs: [][secp256k1.SignatureLen]byte{}}, &OutputOwners{ Threshold: 0, Addrs: []ids.ShortID{addr}, }, - true, + errOutputUnoptimized, }, { "threshold 0, no sigs, no addrs", &TestTx{UnsignedBytes: txBytes}, &Input{SigIndices: []uint32{}}, - &Credential{Sigs: [][crypto.SECP256K1RSigLen]byte{}}, + &Credential{Sigs: [][secp256k1.SignatureLen]byte{}}, &OutputOwners{ Threshold: 0, Addrs: []ids.ShortID{}, }, - false, + nil, }, { "threshold 1, 1 sig", &TestTx{UnsignedBytes: txBytes}, &Input{SigIndices: []uint32{0}}, - &Credential{Sigs: [][crypto.SECP256K1RSigLen]byte{sigBytes}}, + &Credential{Sigs: [][secp256k1.SignatureLen]byte{sigBytes}}, &OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{addr}, }, - false, + nil, }, { "threshold 0, 1 sig (too many sigs)", &TestTx{UnsignedBytes: txBytes}, &Input{SigIndices: []uint32{0}}, - &Credential{Sigs: [][crypto.SECP256K1RSigLen]byte{sigBytes}}, + &Credential{Sigs: [][secp256k1.SignatureLen]byte{sigBytes}}, &OutputOwners{ Threshold: 0, Addrs: []ids.ShortID{addr}, }, - true, + errOutputUnoptimized, }, { "threshold 1, 0 sigs (too few sigs)", &TestTx{UnsignedBytes: txBytes}, &Input{SigIndices: []uint32{}}, - &Credential{Sigs: [][crypto.SECP256K1RSigLen]byte{}}, + &Credential{Sigs: [][secp256k1.SignatureLen]byte{}}, &OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{addr}, }, - true, + ErrTooFewSigners, }, { "threshold 1, 1 incorrect sig", &TestTx{UnsignedBytes: txBytes}, &Input{SigIndices: []uint32{0}}, - &Credential{Sigs: [][crypto.SECP256K1RSigLen]byte{sigBytes}}, + &Credential{Sigs: [][secp256k1.SignatureLen]byte{sigBytes}}, &OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{ids.GenerateTestShortID()}, }, - true, + ErrWrongSig, }, { "repeated sig", &TestTx{UnsignedBytes: txBytes}, &Input{SigIndices: []uint32{0, 0}}, - &Credential{Sigs: [][crypto.SECP256K1RSigLen]byte{sigBytes, sigBytes}}, + &Credential{Sigs: [][secp256k1.SignatureLen]byte{sigBytes, sigBytes}}, &OutputOwners{ Threshold: 2, Addrs: []ids.ShortID{addr, addr2}, }, - true, + errNotSortedUnique, }, { "threshold 2, repeated address and repeated sig", &TestTx{UnsignedBytes: txBytes}, &Input{SigIndices: []uint32{0, 1}}, - &Credential{Sigs: [][crypto.SECP256K1RSigLen]byte{sigBytes, sigBytes}}, + &Credential{Sigs: [][secp256k1.SignatureLen]byte{sigBytes, sigBytes}}, &OutputOwners{ Threshold: 2, Addrs: []ids.ShortID{addr, addr}, }, - true, + errAddrsNotSortedUnique, }, { "threshold 2, 2 sigs", &TestTx{UnsignedBytes: txBytes}, &Input{SigIndices: []uint32{0, 1}}, - &Credential{Sigs: [][crypto.SECP256K1RSigLen]byte{sigBytes, sig2Bytes}}, + &Credential{Sigs: [][secp256k1.SignatureLen]byte{sigBytes, sig2Bytes}}, &OutputOwners{ Threshold: 2, Addrs: []ids.ShortID{addr, addr2}, }, - false, + nil, }, { "threshold 2, 2 sigs reversed (should be sorted)", &TestTx{UnsignedBytes: txBytes}, &Input{SigIndices: []uint32{1, 0}}, - &Credential{Sigs: [][crypto.SECP256K1RSigLen]byte{sig2Bytes, sigBytes}}, + &Credential{Sigs: [][secp256k1.SignatureLen]byte{sig2Bytes, sigBytes}}, &OutputOwners{ Threshold: 2, Addrs: []ids.ShortID{addr, addr2}, }, - true, + errNotSortedUnique, }, { "threshold 1, 1 sig, index out of bounds", &TestTx{UnsignedBytes: txBytes}, &Input{SigIndices: []uint32{1}}, - &Credential{Sigs: [][crypto.SECP256K1RSigLen]byte{sigBytes}}, + &Credential{Sigs: [][secp256k1.SignatureLen]byte{sigBytes}}, &OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{addr}, }, - true, + ErrInputOutputIndexOutOfBounds, + }, + { + "too many signers", + &TestTx{UnsignedBytes: txBytes}, + &Input{SigIndices: []uint32{0, 1}}, + &Credential{Sigs: [][secp256k1.SignatureLen]byte{sigBytes, sig2Bytes}}, + &OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr, addr2}, + }, + ErrTooManySigners, + }, + { + "number of signatures doesn't match", + &TestTx{UnsignedBytes: txBytes}, + &Input{SigIndices: []uint32{0}}, + &Credential{Sigs: [][secp256k1.SignatureLen]byte{sigBytes, sig2Bytes}}, + &OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr, addr2}, + }, + ErrInputCredentialSignersMismatch, + }, + { + "output is locked", + &TestTx{UnsignedBytes: txBytes}, + &Input{SigIndices: []uint32{0}}, + &Credential{Sigs: [][secp256k1.SignatureLen]byte{sigBytes, sig2Bytes}}, + &OutputOwners{ + Threshold: 1, + Locktime: uint64(now.Add(time.Second).Unix()), + Addrs: []ids.ShortID{addr, addr2}, + }, + ErrTimelocked, }, } for _, test := range tests { - err := fx.VerifyPermission(test.tx, test.in, test.cred, test.cg) - if test.shouldErr { - require.Errorf(err, "test '%s' should have errored but didn't", test.description) - } else { - require.NoErrorf(err, "test '%s' errored but it shouldn't have", test.description) - } + t.Run(test.description, func(t *testing.T) { + err := fx.VerifyPermission(test.tx, test.in, test.cred, test.cg) + require.ErrorIs(t, err, test.expectedErr) + }) } } diff --git a/avalanchego/vms/secp256k1fx/input.go b/avalanchego/vms/secp256k1fx/input.go index 4c1fda2c..5659727c 100644 --- a/avalanchego/vms/secp256k1fx/input.go +++ b/avalanchego/vms/secp256k1fx/input.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -36,7 +36,7 @@ func (in *Input) Verify() error { switch { case in == nil: return errNilInput - case !utils.IsSortedAndUniqueUint32(in.SigIndices): + case !utils.IsSortedAndUniqueOrdered(in.SigIndices): return errNotSortedUnique default: return nil diff --git a/avalanchego/vms/secp256k1fx/input_test.go b/avalanchego/vms/secp256k1fx/input_test.go index d9f44ee2..72088530 100644 --- a/avalanchego/vms/secp256k1fx/input_test.go +++ b/avalanchego/vms/secp256k1fx/input_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -10,7 +10,65 @@ import ( ) func TestInputVerifyNil(t *testing.T) { - require := require.New(t) - in := (*Input)(nil) - require.ErrorIs(in.Verify(), errNilInput) + tests := []struct { + name string + in *Input + expectedErr error + }{ + { + name: "nil input", + in: nil, + expectedErr: errNilInput, + }, + { + name: "not sorted", + in: &Input{SigIndices: []uint32{2, 1}}, + expectedErr: errNotSortedUnique, + }, + { + name: "not unique", + in: &Input{SigIndices: []uint32{2, 2}}, + expectedErr: errNotSortedUnique, + }, + { + name: "passes verification", + in: &Input{SigIndices: []uint32{1, 2}}, + expectedErr: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + require.ErrorIs(tt.in.Verify(), tt.expectedErr) + }) + } +} + +func TestInputCost(t *testing.T) { + tests := []struct { + name string + in *Input + expectedCost uint64 + }{ + { + name: "2 sigs", + in: &Input{SigIndices: []uint32{1, 2}}, + expectedCost: 2 * CostPerSignature, + }, + { + name: "3 sigs", + in: &Input{SigIndices: []uint32{1, 2, 3}}, + expectedCost: 3 * CostPerSignature, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + cost, err := tt.in.Cost() + require.NoError(err) + require.Equal(tt.expectedCost, cost) + }) + } } diff --git a/avalanchego/vms/secp256k1fx/keychain.go b/avalanchego/vms/secp256k1fx/keychain.go index 92496cf4..b09c29eb 100644 --- a/avalanchego/vms/secp256k1fx/keychain.go +++ b/avalanchego/vms/secp256k1fx/keychain.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -9,27 +9,33 @@ import ( "strings" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/keychain" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/verify" ) -var errCantSpend = errors.New("unable to spend this UTXO") +var ( + errCantSpend = errors.New("unable to spend this UTXO") + + _ keychain.Keychain = (*Keychain)(nil) +) // Keychain is a collection of keys that can be used to spend outputs type Keychain struct { - factory *crypto.FactorySECP256K1R + factory *secp256k1.Factory addrToKeyIndex map[ids.ShortID]int // These can be used to iterate over. However, they should not be modified externally. - Addrs ids.ShortSet - Keys []*crypto.PrivateKeySECP256K1R + Addrs set.Set[ids.ShortID] + Keys []*secp256k1.PrivateKey } // NewKeychain returns a new keychain containing [keys] -func NewKeychain(keys ...*crypto.PrivateKeySECP256K1R) *Keychain { +func NewKeychain(keys ...*secp256k1.PrivateKey) *Keychain { kc := &Keychain{ - factory: &crypto.FactorySECP256K1R{}, + factory: &secp256k1.Factory{}, addrToKeyIndex: make(map[ids.ShortID]int), } for _, key := range keys { @@ -39,7 +45,7 @@ func NewKeychain(keys ...*crypto.PrivateKeySECP256K1R) *Keychain { } // Add a new key to the key chain -func (kc *Keychain) Add(key *crypto.PrivateKeySECP256K1R) { +func (kc *Keychain) Add(key *secp256k1.PrivateKey) { addr := key.PublicKey().Address() if _, ok := kc.addrToKeyIndex[addr]; !ok { kc.addrToKeyIndex[addr] = len(kc.Keys) @@ -48,31 +54,30 @@ func (kc *Keychain) Add(key *crypto.PrivateKeySECP256K1R) { } } -// Get a key from the keychain. If the key is unknown, the -func (kc Keychain) Get(id ids.ShortID) (*crypto.PrivateKeySECP256K1R, bool) { - if i, ok := kc.addrToKeyIndex[id]; ok { - return kc.Keys[i], true - } - return &crypto.PrivateKeySECP256K1R{}, false +// Get a key from the keychain. If the key is unknown, return a pointer to an empty key. +// In both cases also return a boolean telling whether the key is known. +func (kc Keychain) Get(id ids.ShortID) (keychain.Signer, bool) { + return kc.get(id) } // Addresses returns a list of addresses this keychain manages -func (kc Keychain) Addresses() ids.ShortSet { return kc.Addrs } +func (kc Keychain) Addresses() set.Set[ids.ShortID] { + return kc.Addrs +} // New returns a newly generated private key -func (kc *Keychain) New() (*crypto.PrivateKeySECP256K1R, error) { - skGen, err := kc.factory.NewPrivateKey() +func (kc *Keychain) New() (*secp256k1.PrivateKey, error) { + sk, err := kc.factory.NewPrivateKey() if err != nil { return nil, err } - sk := skGen.(*crypto.PrivateKeySECP256K1R) kc.Add(sk) return sk, nil } // Spend attempts to create an input -func (kc *Keychain) Spend(out verify.Verifiable, time uint64) (verify.Verifiable, []*crypto.PrivateKeySECP256K1R, error) { +func (kc *Keychain) Spend(out verify.Verifiable, time uint64) (verify.Verifiable, []*secp256k1.PrivateKey, error) { switch out := out.(type) { case *MintOutput: if sigIndices, keys, able := kc.Match(&out.OutputOwners, time); able { @@ -96,14 +101,14 @@ func (kc *Keychain) Spend(out verify.Verifiable, time uint64) (verify.Verifiable } // Match attempts to match a list of addresses up to the provided threshold -func (kc *Keychain) Match(owners *OutputOwners, time uint64) ([]uint32, []*crypto.PrivateKeySECP256K1R, bool) { +func (kc *Keychain) Match(owners *OutputOwners, time uint64) ([]uint32, []*secp256k1.PrivateKey, bool) { if time < owners.Locktime { return nil, nil, false } sigs := make([]uint32, 0, owners.Threshold) - keys := make([]*crypto.PrivateKeySECP256K1R, 0, owners.Threshold) + keys := make([]*secp256k1.PrivateKey, 0, owners.Threshold) for i := uint32(0); i < uint32(len(owners.Addrs)) && uint32(len(keys)) < owners.Threshold; i++ { - if key, exists := kc.Get(owners.Addrs[i]); exists { + if key, exists := kc.get(owners.Addrs[i]); exists { sigs = append(sigs, i) keys = append(keys, key) } @@ -114,21 +119,32 @@ func (kc *Keychain) Match(owners *OutputOwners, time uint64) ([]uint32, []*crypt // PrefixedString returns the key chain as a string representation with [prefix] // added before every line. func (kc *Keychain) PrefixedString(prefix string) string { - s := strings.Builder{} + sb := strings.Builder{} format := fmt.Sprintf("%%sKey[%s]: Key: %%s Address: %%s\n", formatting.IntFormat(len(kc.Keys)-1)) for i, key := range kc.Keys { // We assume that the maximum size of a byte slice that // can be stringified is at least the length of a SECP256K1 private key keyStr, _ := formatting.Encode(formatting.HexNC, key.Bytes()) - s.WriteString(fmt.Sprintf(format, + sb.WriteString(fmt.Sprintf(format, prefix, i, keyStr, - key.PublicKey().Address())) + key.PublicKey().Address(), + )) } - return strings.TrimSuffix(s.String(), "\n") + return strings.TrimSuffix(sb.String(), "\n") } -func (kc *Keychain) String() string { return kc.PrefixedString("") } +func (kc *Keychain) String() string { + return kc.PrefixedString("") +} + +// to avoid internals type assertions +func (kc Keychain) get(id ids.ShortID) (*secp256k1.PrivateKey, bool) { + if i, ok := kc.addrToKeyIndex[id]; ok { + return kc.Keys[i], true + } + return nil, false +} diff --git a/avalanchego/vms/secp256k1fx/keychain_test.go b/avalanchego/vms/secp256k1fx/keychain_test.go index 604d4bfd..56a3a592 100644 --- a/avalanchego/vms/secp256k1fx/keychain_test.go +++ b/avalanchego/vms/secp256k1fx/keychain_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" ) @@ -47,16 +47,16 @@ func TestKeychainAdd(t *testing.T) { skBytes, err := formatting.Decode(formatting.HexNC, keys[0]) require.NoError(err) - skIntff, err := kc.factory.ToPrivateKey(skBytes) + sk, err := kc.factory.ToPrivateKey(skBytes) require.NoError(err) - sk, ok := skIntff.(*crypto.PrivateKeySECP256K1R) - require.True(ok, "Factory should have returned secp256k1r private key") kc.Add(sk) addr, _ := ids.ShortFromString(addrs[0]) rsk, exists := kc.Get(addr) require.True(exists) - require.Equal(sk.Bytes(), rsk.Bytes()) + rsksecp, ok := rsk.(*secp256k1.PrivateKey) + require.True(ok, "Factory should have returned secp256k1r private key") + require.Equal(sk.Bytes(), rsksecp.Bytes()) addrs := kc.Addresses() require.Equal(1, addrs.Len()) @@ -83,15 +83,13 @@ func TestKeychainMatch(t *testing.T) { require := require.New(t) kc := NewKeychain() - sks := []*crypto.PrivateKeySECP256K1R{} + sks := []*secp256k1.PrivateKey{} for _, keyStr := range keys { skBytes, err := formatting.Decode(formatting.HexNC, keyStr) require.NoError(err) - skIntf, err := kc.factory.ToPrivateKey(skBytes) + sk, err := kc.factory.ToPrivateKey(skBytes) require.NoError(err) - sk, ok := skIntf.(*crypto.PrivateKeySECP256K1R) - require.True(ok, "Factory should have returned secp256k1r private key") sks = append(sks, sk) } @@ -130,15 +128,13 @@ func TestKeychainSpendMint(t *testing.T) { require := require.New(t) kc := NewKeychain() - sks := []*crypto.PrivateKeySECP256K1R{} + sks := []*secp256k1.PrivateKey{} for _, keyStr := range keys { skBytes, err := formatting.Decode(formatting.HexNC, keyStr) require.NoError(err) - skIntf, err := kc.factory.ToPrivateKey(skBytes) + sk, err := kc.factory.ToPrivateKey(skBytes) require.NoError(err) - sk, ok := skIntf.(*crypto.PrivateKeySECP256K1R) - require.True(ok, "Factory should have returned secp256k1r private key") sks = append(sks, sk) } @@ -174,15 +170,13 @@ func TestKeychainSpendTransfer(t *testing.T) { require := require.New(t) kc := NewKeychain() - sks := []*crypto.PrivateKeySECP256K1R{} + sks := []*secp256k1.PrivateKey{} for _, keyStr := range keys { skBytes, err := formatting.Decode(formatting.HexNC, keyStr) require.NoError(err) - skIntf, err := kc.factory.ToPrivateKey(skBytes) + sk, err := kc.factory.ToPrivateKey(skBytes) require.NoError(err) - sk, ok := skIntf.(*crypto.PrivateKeySECP256K1R) - require.True(ok, "Factory should have returned secp256k1r private key") sks = append(sks, sk) } @@ -229,10 +223,8 @@ func TestKeychainString(t *testing.T) { skBytes, err := formatting.Decode(formatting.HexNC, keys[0]) require.NoError(err) - skIntf, err := kc.factory.ToPrivateKey(skBytes) + sk, err := kc.factory.ToPrivateKey(skBytes) require.NoError(err) - sk, ok := skIntf.(*crypto.PrivateKeySECP256K1R) - require.True(ok, "Factory should have returned secp256k1r private key") kc.Add(sk) expected := "Key[0]: Key: 0xb1ed77ad48555d49f03a7465f0685a7d86bfd5f3a3ccf1be01971ea8dec5471c Address: B6D4v1VtPYLbiUvYXtW4Px8oE9imC2vGW" @@ -246,10 +238,8 @@ func TestKeychainPrefixedString(t *testing.T) { skBytes, err := formatting.Decode(formatting.HexNC, keys[0]) require.NoError(err) - skIntf, err := kc.factory.ToPrivateKey(skBytes) + sk, err := kc.factory.ToPrivateKey(skBytes) require.NoError(err) - sk, ok := skIntf.(*crypto.PrivateKeySECP256K1R) - require.True(ok, "Factory should have returned secp256k1r private key") kc.Add(sk) expected := "xDKey[0]: Key: 0xb1ed77ad48555d49f03a7465f0685a7d86bfd5f3a3ccf1be01971ea8dec5471c Address: B6D4v1VtPYLbiUvYXtW4Px8oE9imC2vGW" diff --git a/avalanchego/vms/secp256k1fx/mint_operation.go b/avalanchego/vms/secp256k1fx/mint_operation.go index 7b04380d..a21f3061 100644 --- a/avalanchego/vms/secp256k1fx/mint_operation.go +++ b/avalanchego/vms/secp256k1fx/mint_operation.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx diff --git a/avalanchego/vms/secp256k1fx/mint_operation_test.go b/avalanchego/vms/secp256k1fx/mint_operation_test.go index 7793a480..9b68b1c8 100644 --- a/avalanchego/vms/secp256k1fx/mint_operation_test.go +++ b/avalanchego/vms/secp256k1fx/mint_operation_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -12,10 +12,121 @@ import ( "github.com/ava-labs/avalanchego/vms/components/verify" ) -func TestMintOperationVerifyNil(t *testing.T) { - require := require.New(t) - op := (*MintOperation)(nil) - require.ErrorIs(op.Verify(), errNilMintOperation) +func TestMintOperationVerify(t *testing.T) { + var ( + validOutputOwners = OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + } + validMintInput = Input{ + SigIndices: []uint32{0}, + } + validMintOutput = MintOutput{ + OutputOwners: validOutputOwners, + } + validTransferOutput = TransferOutput{ + Amt: 1, + OutputOwners: validOutputOwners, + } + ) + + tests := []struct { + name string + op *MintOperation + expectedErr error + }{ + { + name: "nil", + op: nil, + expectedErr: errNilMintOperation, + }, + { + name: "invalid mint input", + op: &MintOperation{ + MintInput: Input{ + SigIndices: []uint32{0, 0}, + }, + MintOutput: validMintOutput, + TransferOutput: validTransferOutput, + }, + expectedErr: errNotSortedUnique, + }, + { + name: "invalid mint output", + op: &MintOperation{ + MintInput: validMintInput, + MintOutput: MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 2, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, + }, + TransferOutput: validTransferOutput, + }, + expectedErr: errOutputUnspendable, + }, + { + name: "invalid transfer output", + op: &MintOperation{ + MintInput: validMintInput, + MintOutput: validMintOutput, + TransferOutput: TransferOutput{ + Amt: 1, + OutputOwners: OutputOwners{ + Threshold: 0, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, + }, + }, + expectedErr: errOutputUnoptimized, + }, + { + name: "addresses not unique", + op: &MintOperation{ + MintInput: validMintInput, + MintOutput: validMintOutput, + TransferOutput: TransferOutput{ + Amt: 1, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty, ids.ShortEmpty}, + }, + }, + }, + expectedErr: errAddrsNotSortedUnique, + }, + { + name: "addresses not sorted", + op: &MintOperation{ + MintInput: validMintInput, + MintOutput: validMintOutput, + TransferOutput: TransferOutput{ + Amt: 1, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{{2}, {1}}, + }, + }, + }, + expectedErr: errAddrsNotSortedUnique, + }, + { + name: "passes verification", + op: &MintOperation{ + MintInput: validMintInput, + MintOutput: validMintOutput, + TransferOutput: validTransferOutput, + }, + expectedErr: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + require.ErrorIs(tt.op.Verify(), tt.expectedErr) + }) + } } func TestMintOperationOuts(t *testing.T) { diff --git a/avalanchego/vms/secp256k1fx/mint_output.go b/avalanchego/vms/secp256k1fx/mint_output.go index 7f293f21..7655fc5c 100644 --- a/avalanchego/vms/secp256k1fx/mint_output.go +++ b/avalanchego/vms/secp256k1fx/mint_output.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -7,7 +7,7 @@ import ( "github.com/ava-labs/avalanchego/vms/components/verify" ) -var _ verify.State = &MintOutput{} +var _ verify.State = (*MintOutput)(nil) type MintOutput struct { OutputOwners `serialize:"true"` @@ -22,4 +22,6 @@ func (out *MintOutput) Verify() error { } } -func (out *MintOutput) VerifyState() error { return out.Verify() } +func (out *MintOutput) VerifyState() error { + return out.Verify() +} diff --git a/avalanchego/vms/secp256k1fx/mint_output_test.go b/avalanchego/vms/secp256k1fx/mint_output_test.go index 98f917e7..7bd55cc0 100644 --- a/avalanchego/vms/secp256k1fx/mint_output_test.go +++ b/avalanchego/vms/secp256k1fx/mint_output_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -8,18 +8,46 @@ import ( "github.com/stretchr/testify/require" - "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/ava-labs/avalanchego/ids" ) -func TestMintOutputVerifyNil(t *testing.T) { - require := require.New(t) - out := (*MintOutput)(nil) - require.ErrorIs(out.Verify(), errNilOutput) -} +func TestMintOutputVerify(t *testing.T) { + tests := []struct { + name string + out *MintOutput + expectedErr error + }{ + { + name: "nil", + out: nil, + expectedErr: errNilOutput, + }, + { + name: "invalid output owners", + out: &MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 2, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, + }, + expectedErr: errOutputUnspendable, + }, + { + name: "passes verification", + out: &MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, + }, + expectedErr: nil, + }, + } -func TestMintOutputState(t *testing.T) { - require := require.New(t) - intf := interface{}(&MintOutput{}) - _, ok := intf.(verify.State) - require.True(ok) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require.ErrorIs(t, tt.out.Verify(), tt.expectedErr) + require.ErrorIs(t, tt.out.VerifyState(), tt.expectedErr) + }) + } } diff --git a/avalanchego/vms/secp256k1fx/output_owners.go b/avalanchego/vms/secp256k1fx/output_owners.go index a2962135..eb601eb3 100644 --- a/avalanchego/vms/secp256k1fx/output_owners.go +++ b/avalanchego/vms/secp256k1fx/output_owners.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -9,8 +9,10 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/formatting/address" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/verify" ) @@ -21,17 +23,18 @@ var ( errAddrsNotSortedUnique = errors.New("addresses not sorted and unique") errMarshal = errors.New("cannot marshal without ctx") - _ verify.State = &OutputOwners{} + _ verify.State = (*OutputOwners)(nil) ) type OutputOwners struct { Locktime uint64 `serialize:"true" json:"locktime"` Threshold uint32 `serialize:"true" json:"threshold"` Addrs []ids.ShortID `serialize:"true" json:"addresses"` + // ctx is used in MarshalJSON to convert Addrs into human readable // format with ChainID and NetworkID. Unexported because we don't use // it outside this object. - ctx *snow.Context `serialize:"false"` + ctx *snow.Context } // InitCtx assigns the OutputOwners.ctx object to given [ctx] object @@ -94,8 +97,8 @@ func (out *OutputOwners) Addresses() [][]byte { } // AddressesSet returns addresses as a set -func (out *OutputOwners) AddressesSet() ids.ShortSet { - set := ids.NewShortSet(len(out.Addrs)) +func (out *OutputOwners) AddressesSet() set.Set[ids.ShortID] { + set := set.NewSet[ids.ShortID](len(out.Addrs)) set.Add(out.Addrs...) return set } @@ -125,16 +128,20 @@ func (out *OutputOwners) Verify() error { return errOutputUnspendable case out.Threshold == 0 && len(out.Addrs) > 0: return errOutputUnoptimized - case !ids.IsSortedAndUniqueShortIDs(out.Addrs): + case !utils.IsSortedAndUniqueSortable(out.Addrs): return errAddrsNotSortedUnique default: return nil } } -func (out *OutputOwners) VerifyState() error { return out.Verify() } +func (out *OutputOwners) VerifyState() error { + return out.Verify() +} -func (out *OutputOwners) Sort() { ids.SortShortIDs(out.Addrs) } +func (out *OutputOwners) Sort() { + utils.Sort(out.Addrs) +} // formatAddress formats a given [addr] into human readable format using // [ChainID] and [NetworkID] from the provided [ctx]. diff --git a/avalanchego/vms/secp256k1fx/output_owners_test.go b/avalanchego/vms/secp256k1fx/output_owners_test.go index ac268d21..db8e8436 100644 --- a/avalanchego/vms/secp256k1fx/output_owners_test.go +++ b/avalanchego/vms/secp256k1fx/output_owners_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -11,48 +11,142 @@ import ( "github.com/ava-labs/avalanchego/ids" ) -func TestMintOutputOwnersVerifyNil(t *testing.T) { - require := require.New(t) - out := (*OutputOwners)(nil) - require.ErrorIs(out.Verify(), errNilOutput) -} +func TestOutputOwnersVerify(t *testing.T) { + tests := []struct { + name string + out *OutputOwners + expectedErr error + }{ + { + name: "nil", + out: nil, + expectedErr: errNilOutput, + }, + { + name: "threshold > num addrs", + out: &OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{}, + }, + expectedErr: errOutputUnspendable, + }, + { + name: "unoptimized", + out: &OutputOwners{ + Threshold: 0, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, + expectedErr: errOutputUnoptimized, + }, + { + name: "not sorted", + out: &OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{{2}, {1}}, + }, + expectedErr: errAddrsNotSortedUnique, + }, + { + name: "not unique", + out: &OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{{2}, {2}}, + }, + expectedErr: errAddrsNotSortedUnique, + }, + { + name: "passes verification", + out: &OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{{2}}, + }, + expectedErr: nil, + }, + } -func TestMintOutputOwnersExactEquals(t *testing.T) { - require := require.New(t) - out0 := (*OutputOwners)(nil) - out1 := (*OutputOwners)(nil) - require.True(out0.Equals(out1)) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + require.ErrorIs(tt.out.Verify(), tt.expectedErr) + require.ErrorIs(tt.out.VerifyState(), tt.expectedErr) + }) + } } -func TestMintOutputOwnersNotEqual(t *testing.T) { - require := require.New(t) - out0 := &OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.ShortEmpty, +func TestOutputOwnerEquals(t *testing.T) { + addr1, addr2 := ids.GenerateTestShortID(), ids.GenerateTestShortID() + tests := []struct { + name string + out1 *OutputOwners + out2 *OutputOwners + shouldEqual bool + }{ + { + name: "both nil", + out1: nil, + out2: nil, + shouldEqual: true, }, - } - out1 := &OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - {1}, + { + name: "different locktimes", + out1: &OutputOwners{ + Locktime: 1, + Addrs: []ids.ShortID{addr1, addr2}, + }, + out2: &OutputOwners{ + Locktime: 2, + Addrs: []ids.ShortID{addr1, addr2}, + }, + shouldEqual: false, + }, + { + name: "different thresholds", + out1: &OutputOwners{ + Threshold: 1, + Locktime: 1, + Addrs: []ids.ShortID{addr1, addr2}, + }, + out2: &OutputOwners{ + Locktime: 1, + Addrs: []ids.ShortID{addr1, addr2}, + }, + shouldEqual: false, + }, + { + name: "different addresses", + out1: &OutputOwners{ + Locktime: 1, + Addrs: []ids.ShortID{addr1, ids.GenerateTestShortID()}, + }, + out2: &OutputOwners{ + Locktime: 1, + Addrs: []ids.ShortID{addr1, addr2}, + }, + shouldEqual: false, + }, + { + name: "equal", + out1: &OutputOwners{ + Locktime: 1, + Addrs: []ids.ShortID{addr1, addr2}, + }, + out2: &OutputOwners{ + Locktime: 1, + Addrs: []ids.ShortID{addr1, addr2}, + }, + shouldEqual: true, }, } - require.False(out0.Equals(out1)) -} -func TestMintOutputOwnersNotSorted(t *testing.T) { - require := require.New(t) - out := &OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - {1}, - {0}, - }, + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + require.Equal(tt.shouldEqual, tt.out1.Equals(tt.out2)) + require.Equal(tt.shouldEqual, tt.out2.Equals(tt.out1)) + require.True(tt.out1.Equals(tt.out1)) //nolint:gocritic + require.True(tt.out2.Equals(tt.out2)) //nolint:gocritic + }) } - require.ErrorIs(out.Verify(), errAddrsNotSortedUnique) - out.Sort() - require.NoError(out.Verify()) } func TestMarshalJSONRequiresCtxWhenAddrsArePresent(t *testing.T) { diff --git a/avalanchego/vms/secp256k1fx/transfer_input.go b/avalanchego/vms/secp256k1fx/transfer_input.go index 58e33913..2fb69c64 100644 --- a/avalanchego/vms/secp256k1fx/transfer_input.go +++ b/avalanchego/vms/secp256k1fx/transfer_input.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -9,17 +9,19 @@ import ( "github.com/ava-labs/avalanchego/snow" ) -var errNoValueInput = errors.New("input has no value") +var ErrNoValueInput = errors.New("input has no value") type TransferInput struct { Amt uint64 `serialize:"true" json:"amount"` Input `serialize:"true"` } -func (in *TransferInput) InitCtx(*snow.Context) {} +func (*TransferInput) InitCtx(*snow.Context) {} // Amount returns the quantity of the asset this input produces -func (in *TransferInput) Amount() uint64 { return in.Amt } +func (in *TransferInput) Amount() uint64 { + return in.Amt +} // Verify this input is syntactically valid func (in *TransferInput) Verify() error { @@ -27,7 +29,7 @@ func (in *TransferInput) Verify() error { case in == nil: return errNilInput case in.Amt == 0: - return errNoValueInput + return ErrNoValueInput default: return in.Input.Verify() } diff --git a/avalanchego/vms/secp256k1fx/transfer_input_test.go b/avalanchego/vms/secp256k1fx/transfer_input_test.go index 2c923ebd..96d2c40e 100644 --- a/avalanchego/vms/secp256k1fx/transfer_input_test.go +++ b/avalanchego/vms/secp256k1fx/transfer_input_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -49,7 +49,7 @@ func TestTransferInputVerifyNoValue(t *testing.T) { SigIndices: []uint32{0, 1}, }, } - require.ErrorIs(in.Verify(), errNoValueInput) + require.ErrorIs(in.Verify(), ErrNoValueInput) } func TestTransferInputVerifyDuplicated(t *testing.T) { diff --git a/avalanchego/vms/secp256k1fx/transfer_output.go b/avalanchego/vms/secp256k1fx/transfer_output.go index 6327d774..285ffef5 100644 --- a/avalanchego/vms/secp256k1fx/transfer_output.go +++ b/avalanchego/vms/secp256k1fx/transfer_output.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -11,9 +11,9 @@ import ( ) var ( - _ verify.State = &OutputOwners{} + _ verify.State = (*OutputOwners)(nil) - errNoValueOutput = errors.New("output has no value") + ErrNoValueOutput = errors.New("output has no value") ) type TransferOutput struct { @@ -24,7 +24,7 @@ type TransferOutput struct { // MarshalJSON marshals Amt and the embedded OutputOwners struct // into a JSON readable format -// If OutputOwners cannot be serialised then this will return error +// If OutputOwners cannot be serialized then this will return error func (out *TransferOutput) MarshalJSON() ([]byte, error) { result, err := out.OutputOwners.Fields() if err != nil { @@ -36,19 +36,25 @@ func (out *TransferOutput) MarshalJSON() ([]byte, error) { } // Amount returns the quantity of the asset this output consumes -func (out *TransferOutput) Amount() uint64 { return out.Amt } +func (out *TransferOutput) Amount() uint64 { + return out.Amt +} func (out *TransferOutput) Verify() error { switch { case out == nil: return errNilOutput case out.Amt == 0: - return errNoValueOutput + return ErrNoValueOutput default: return out.OutputOwners.Verify() } } -func (out *TransferOutput) VerifyState() error { return out.Verify() } +func (out *TransferOutput) VerifyState() error { + return out.Verify() +} -func (out *TransferOutput) Owners() interface{} { return &out.OutputOwners } +func (out *TransferOutput) Owners() interface{} { + return &out.OutputOwners +} diff --git a/avalanchego/vms/secp256k1fx/transfer_output_test.go b/avalanchego/vms/secp256k1fx/transfer_output_test.go index 21a117ac..08bfd173 100644 --- a/avalanchego/vms/secp256k1fx/transfer_output_test.go +++ b/avalanchego/vms/secp256k1fx/transfer_output_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -62,7 +62,7 @@ func TestOutputVerifyNoValue(t *testing.T) { }, }, } - require.ErrorIs(out.Verify(), errNoValueOutput) + require.ErrorIs(out.Verify(), ErrNoValueOutput) } func TestOutputVerifyUnspendable(t *testing.T) { diff --git a/avalanchego/vms/secp256k1fx/tx.go b/avalanchego/vms/secp256k1fx/tx.go index fe7179b5..81f4ee4d 100644 --- a/avalanchego/vms/secp256k1fx/tx.go +++ b/avalanchego/vms/secp256k1fx/tx.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -8,10 +8,12 @@ type UnsignedTx interface { Bytes() []byte } -var _ UnsignedTx = &TestTx{} +var _ UnsignedTx = (*TestTx)(nil) // TestTx is a minimal implementation of a Tx type TestTx struct{ UnsignedBytes []byte } // UnsignedBytes returns Bytes -func (tx *TestTx) Bytes() []byte { return tx.UnsignedBytes } +func (tx *TestTx) Bytes() []byte { + return tx.UnsignedBytes +} diff --git a/avalanchego/vms/secp256k1fx/vm.go b/avalanchego/vms/secp256k1fx/vm.go index 3092b865..9fe16d58 100644 --- a/avalanchego/vms/secp256k1fx/vm.go +++ b/avalanchego/vms/secp256k1fx/vm.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package secp256k1fx @@ -17,16 +17,27 @@ type VM interface { EthVerificationEnabled() bool } -var _ VM = &TestVM{} +var _ VM = (*TestVM)(nil) // TestVM is a minimal implementation of a VM type TestVM struct { - CLK mockable.Clock + Clk mockable.Clock Codec codec.Registry Log logging.Logger } -func (vm *TestVM) Clock() *mockable.Clock { return &vm.CLK } -func (vm *TestVM) CodecRegistry() codec.Registry { return vm.Codec } -func (vm *TestVM) Logger() logging.Logger { return vm.Log } -func (vm *TestVM) EthVerificationEnabled() bool { return false } +func (vm *TestVM) Clock() *mockable.Clock { + return &vm.Clk +} + +func (vm *TestVM) CodecRegistry() codec.Registry { + return vm.Codec +} + +func (vm *TestVM) Logger() logging.Logger { + return vm.Log +} +func (vm *TestVM) EthVerificationEnabled() bool { + return false +} + diff --git a/avalanchego/vms/tracedvm/batched_vm.go b/avalanchego/vms/tracedvm/batched_vm.go new file mode 100644 index 00000000..47f81c9f --- /dev/null +++ b/avalanchego/vms/tracedvm/batched_vm.go @@ -0,0 +1,70 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tracedvm + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/attribute" + + oteltrace "go.opentelemetry.io/otel/trace" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" +) + +func (vm *blockVM) GetAncestors( + ctx context.Context, + blkID ids.ID, + maxBlocksNum int, + maxBlocksSize int, + maxBlocksRetrivalTime time.Duration, +) ([][]byte, error) { + if vm.batchedVM == nil { + return nil, block.ErrRemoteVMNotImplemented + } + + ctx, span := vm.tracer.Start(ctx, vm.getAncestorsTag, oteltrace.WithAttributes( + attribute.Stringer("blkID", blkID), + attribute.Int("maxBlocksNum", maxBlocksNum), + attribute.Int("maxBlocksSize", maxBlocksSize), + attribute.Int64("maxBlocksRetrivalTime", int64(maxBlocksRetrivalTime)), + )) + defer span.End() + + return vm.batchedVM.GetAncestors( + ctx, + blkID, + maxBlocksNum, + maxBlocksSize, + maxBlocksRetrivalTime, + ) +} + +func (vm *blockVM) BatchedParseBlock(ctx context.Context, blks [][]byte) ([]snowman.Block, error) { + if vm.batchedVM == nil { + return nil, block.ErrRemoteVMNotImplemented + } + + ctx, span := vm.tracer.Start(ctx, vm.batchedParseBlockTag, oteltrace.WithAttributes( + attribute.Int("numBlocks", len(blks)), + )) + defer span.End() + + blocks, err := vm.batchedVM.BatchedParseBlock(ctx, blks) + if err != nil { + return nil, err + } + + wrappedBlocks := make([]snowman.Block, len(blocks)) + for i, block := range blocks { + wrappedBlocks[i] = &tracedBlock{ + Block: block, + vm: vm, + } + } + return wrappedBlocks, nil +} diff --git a/avalanchego/vms/tracedvm/block.go b/avalanchego/vms/tracedvm/block.go new file mode 100644 index 00000000..a90a1103 --- /dev/null +++ b/avalanchego/vms/tracedvm/block.go @@ -0,0 +1,120 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tracedvm + +import ( + "context" + "errors" + "fmt" + + "go.opentelemetry.io/otel/attribute" + + oteltrace "go.opentelemetry.io/otel/trace" + + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" +) + +var ( + _ snowman.Block = (*tracedBlock)(nil) + _ snowman.OracleBlock = (*tracedBlock)(nil) + _ block.WithVerifyContext = (*tracedBlock)(nil) + + errExpectedBlockWithVerifyContext = errors.New("expected block.WithVerifyContext") +) + +type tracedBlock struct { + snowman.Block + + vm *blockVM +} + +func (b *tracedBlock) Verify(ctx context.Context) error { + ctx, span := b.vm.tracer.Start(ctx, b.vm.verifyTag, oteltrace.WithAttributes( + attribute.Stringer("blkID", b.ID()), + attribute.Int64("height", int64(b.Height())), + )) + defer span.End() + + return b.Block.Verify(ctx) +} + +func (b *tracedBlock) Accept(ctx context.Context) error { + ctx, span := b.vm.tracer.Start(ctx, b.vm.acceptTag, oteltrace.WithAttributes( + attribute.Stringer("blkID", b.ID()), + attribute.Int64("height", int64(b.Height())), + )) + defer span.End() + + return b.Block.Accept(ctx) +} + +func (b *tracedBlock) Reject(ctx context.Context) error { + ctx, span := b.vm.tracer.Start(ctx, b.vm.rejectTag, oteltrace.WithAttributes( + attribute.Stringer("blkID", b.ID()), + attribute.Int64("height", int64(b.Height())), + )) + defer span.End() + + return b.Block.Reject(ctx) +} + +func (b *tracedBlock) Options(ctx context.Context) ([2]snowman.Block, error) { + oracleBlock, ok := b.Block.(snowman.OracleBlock) + if !ok { + return [2]snowman.Block{}, snowman.ErrNotOracle + } + + ctx, span := b.vm.tracer.Start(ctx, b.vm.optionsTag, oteltrace.WithAttributes( + attribute.Stringer("blkID", b.ID()), + attribute.Int64("height", int64(b.Height())), + )) + defer span.End() + + blks, err := oracleBlock.Options(ctx) + if err != nil { + return [2]snowman.Block{}, err + } + return [2]snowman.Block{ + &tracedBlock{ + Block: blks[0], + vm: b.vm, + }, + &tracedBlock{ + Block: blks[1], + vm: b.vm, + }, + }, nil +} + +func (b *tracedBlock) ShouldVerifyWithContext(ctx context.Context) (bool, error) { + blkWithCtx, ok := b.Block.(block.WithVerifyContext) + if !ok { + return false, nil + } + + ctx, span := b.vm.tracer.Start(ctx, b.vm.shouldVerifyWithContextTag, oteltrace.WithAttributes( + attribute.Stringer("blkID", b.ID()), + attribute.Int64("height", int64(b.Height())), + )) + defer span.End() + + return blkWithCtx.ShouldVerifyWithContext(ctx) +} + +func (b *tracedBlock) VerifyWithContext(ctx context.Context, blockCtx *block.Context) error { + blkWithCtx, ok := b.Block.(block.WithVerifyContext) + if !ok { + return fmt.Errorf("%w but got %T", errExpectedBlockWithVerifyContext, b.Block) + } + + ctx, span := b.vm.tracer.Start(ctx, b.vm.verifyWithContextTag, oteltrace.WithAttributes( + attribute.Stringer("blkID", b.ID()), + attribute.Int64("height", int64(b.Height())), + attribute.Int64("pChainHeight", int64(blockCtx.PChainHeight)), + )) + defer span.End() + + return blkWithCtx.VerifyWithContext(ctx, blockCtx) +} diff --git a/avalanchego/vms/tracedvm/block_vm.go b/avalanchego/vms/tracedvm/block_vm.go new file mode 100644 index 00000000..1092c252 --- /dev/null +++ b/avalanchego/vms/tracedvm/block_vm.go @@ -0,0 +1,182 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tracedvm + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel/attribute" + + oteltrace "go.opentelemetry.io/otel/trace" + + "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/trace" +) + +var ( + _ block.ChainVM = (*blockVM)(nil) + _ block.BuildBlockWithContextChainVM = (*blockVM)(nil) + _ block.BatchedChainVM = (*blockVM)(nil) + _ block.HeightIndexedChainVM = (*blockVM)(nil) + _ block.StateSyncableVM = (*blockVM)(nil) +) + +type blockVM struct { + block.ChainVM + buildBlockVM block.BuildBlockWithContextChainVM + batchedVM block.BatchedChainVM + hVM block.HeightIndexedChainVM + ssVM block.StateSyncableVM + // ChainVM tags + initializeTag string + buildBlockTag string + parseBlockTag string + getBlockTag string + setPreferenceTag string + lastAcceptedTag string + verifyTag string + acceptTag string + rejectTag string + optionsTag string + shouldVerifyWithContextTag string + verifyWithContextTag string + // BuildBlockWithContextChainVM tags + buildBlockWithContextTag string + // BatchedChainVM tags + getAncestorsTag string + batchedParseBlockTag string + // HeightIndexedChainVM tags + verifyHeightIndexTag string + getBlockIDAtHeightTag string + // StateSyncableVM tags + stateSyncEnabledTag string + getOngoingSyncStateSummaryTag string + getLastStateSummaryTag string + parseStateSummaryTag string + getStateSummaryTag string + tracer trace.Tracer +} + +func NewBlockVM(vm block.ChainVM, name string, tracer trace.Tracer) block.ChainVM { + buildBlockVM, _ := vm.(block.BuildBlockWithContextChainVM) + batchedVM, _ := vm.(block.BatchedChainVM) + hVM, _ := vm.(block.HeightIndexedChainVM) + ssVM, _ := vm.(block.StateSyncableVM) + return &blockVM{ + ChainVM: vm, + buildBlockVM: buildBlockVM, + batchedVM: batchedVM, + hVM: hVM, + ssVM: ssVM, + initializeTag: fmt.Sprintf("%s.initialize", name), + buildBlockTag: fmt.Sprintf("%s.buildBlock", name), + parseBlockTag: fmt.Sprintf("%s.parseBlock", name), + getBlockTag: fmt.Sprintf("%s.getBlock", name), + setPreferenceTag: fmt.Sprintf("%s.setPreference", name), + lastAcceptedTag: fmt.Sprintf("%s.lastAccepted", name), + verifyTag: fmt.Sprintf("%s.verify", name), + acceptTag: fmt.Sprintf("%s.accept", name), + rejectTag: fmt.Sprintf("%s.reject", name), + optionsTag: fmt.Sprintf("%s.options", name), + shouldVerifyWithContextTag: fmt.Sprintf("%s.shouldVerifyWithContext", name), + verifyWithContextTag: fmt.Sprintf("%s.verifyWithContext", name), + buildBlockWithContextTag: fmt.Sprintf("%s.buildBlockWithContext", name), + getAncestorsTag: fmt.Sprintf("%s.getAncestors", name), + batchedParseBlockTag: fmt.Sprintf("%s.batchedParseBlock", name), + verifyHeightIndexTag: fmt.Sprintf("%s.verifyHeightIndex", name), + getBlockIDAtHeightTag: fmt.Sprintf("%s.getBlockIDAtHeight", name), + stateSyncEnabledTag: fmt.Sprintf("%s.stateSyncEnabled", name), + getOngoingSyncStateSummaryTag: fmt.Sprintf("%s.getOngoingSyncStateSummary", name), + getLastStateSummaryTag: fmt.Sprintf("%s.getLastStateSummary", name), + parseStateSummaryTag: fmt.Sprintf("%s.parseStateSummary", name), + getStateSummaryTag: fmt.Sprintf("%s.getStateSummary", name), + tracer: tracer, + } +} + +func (vm *blockVM) Initialize( + ctx context.Context, + chainCtx *snow.Context, + db manager.Manager, + genesisBytes, + upgradeBytes, + configBytes []byte, + toEngine chan<- common.Message, + fxs []*common.Fx, + appSender common.AppSender, +) error { + ctx, span := vm.tracer.Start(ctx, vm.initializeTag) + defer span.End() + + return vm.ChainVM.Initialize( + ctx, + chainCtx, + db, + genesisBytes, + upgradeBytes, + configBytes, + toEngine, + fxs, + appSender, + ) +} + +func (vm *blockVM) BuildBlock(ctx context.Context) (snowman.Block, error) { + ctx, span := vm.tracer.Start(ctx, vm.buildBlockTag) + defer span.End() + + blk, err := vm.ChainVM.BuildBlock(ctx) + return &tracedBlock{ + Block: blk, + vm: vm, + }, err +} + +func (vm *blockVM) ParseBlock(ctx context.Context, block []byte) (snowman.Block, error) { + ctx, span := vm.tracer.Start(ctx, vm.parseBlockTag, oteltrace.WithAttributes( + attribute.Int("blockLen", len(block)), + )) + defer span.End() + + blk, err := vm.ChainVM.ParseBlock(ctx, block) + return &tracedBlock{ + Block: blk, + vm: vm, + }, err +} + +func (vm *blockVM) GetBlock(ctx context.Context, blkID ids.ID) (snowman.Block, error) { + ctx, span := vm.tracer.Start(ctx, vm.getBlockTag, oteltrace.WithAttributes( + attribute.Stringer("blkID", blkID), + )) + defer span.End() + + blk, err := vm.ChainVM.GetBlock(ctx, blkID) + return &tracedBlock{ + Block: blk, + vm: vm, + }, err +} + +func (vm *blockVM) SetPreference(ctx context.Context, blkID ids.ID) error { + ctx, span := vm.tracer.Start(ctx, vm.setPreferenceTag, oteltrace.WithAttributes( + attribute.Stringer("blkID", blkID), + )) + defer span.End() + + return vm.ChainVM.SetPreference(ctx, blkID) +} + +func (vm *blockVM) LastAccepted(ctx context.Context) (ids.ID, error) { + ctx, span := vm.tracer.Start(ctx, vm.lastAcceptedTag) + defer span.End() + + return vm.ChainVM.LastAccepted(ctx) +} diff --git a/avalanchego/vms/tracedvm/build_block_with_context_vm.go b/avalanchego/vms/tracedvm/build_block_with_context_vm.go new file mode 100644 index 00000000..1d9e9319 --- /dev/null +++ b/avalanchego/vms/tracedvm/build_block_with_context_vm.go @@ -0,0 +1,28 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tracedvm + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + + oteltrace "go.opentelemetry.io/otel/trace" + + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" +) + +func (vm *blockVM) BuildBlockWithContext(ctx context.Context, blockCtx *block.Context) (snowman.Block, error) { + if vm.buildBlockVM == nil { + return vm.BuildBlock(ctx) + } + + ctx, span := vm.tracer.Start(ctx, vm.buildBlockWithContextTag, oteltrace.WithAttributes( + attribute.Int64("pChainHeight", int64(blockCtx.PChainHeight)), + )) + defer span.End() + + return vm.buildBlockVM.BuildBlockWithContext(ctx, blockCtx) +} diff --git a/avalanchego/vms/tracedvm/height_indexed_vm.go b/avalanchego/vms/tracedvm/height_indexed_vm.go new file mode 100644 index 00000000..4d240461 --- /dev/null +++ b/avalanchego/vms/tracedvm/height_indexed_vm.go @@ -0,0 +1,39 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tracedvm + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + + oteltrace "go.opentelemetry.io/otel/trace" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" +) + +func (vm *blockVM) VerifyHeightIndex(ctx context.Context) error { + if vm.hVM == nil { + return block.ErrHeightIndexedVMNotImplemented + } + + ctx, span := vm.tracer.Start(ctx, vm.verifyHeightIndexTag) + defer span.End() + + return vm.hVM.VerifyHeightIndex(ctx) +} + +func (vm *blockVM) GetBlockIDAtHeight(ctx context.Context, height uint64) (ids.ID, error) { + if vm.hVM == nil { + return ids.Empty, block.ErrHeightIndexedVMNotImplemented + } + + ctx, span := vm.tracer.Start(ctx, vm.getBlockIDAtHeightTag, oteltrace.WithAttributes( + attribute.Int64("height", int64(height)), + )) + defer span.End() + + return vm.hVM.GetBlockIDAtHeight(ctx, height) +} diff --git a/avalanchego/vms/tracedvm/state_syncable_vm.go b/avalanchego/vms/tracedvm/state_syncable_vm.go new file mode 100644 index 00000000..75738462 --- /dev/null +++ b/avalanchego/vms/tracedvm/state_syncable_vm.go @@ -0,0 +1,73 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tracedvm + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + + oteltrace "go.opentelemetry.io/otel/trace" + + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" +) + +func (vm *blockVM) StateSyncEnabled(ctx context.Context) (bool, error) { + if vm.ssVM == nil { + return false, nil + } + + ctx, span := vm.tracer.Start(ctx, vm.stateSyncEnabledTag) + defer span.End() + + return vm.ssVM.StateSyncEnabled(ctx) +} + +func (vm *blockVM) GetOngoingSyncStateSummary(ctx context.Context) (block.StateSummary, error) { + if vm.ssVM == nil { + return nil, block.ErrStateSyncableVMNotImplemented + } + + ctx, span := vm.tracer.Start(ctx, vm.getOngoingSyncStateSummaryTag) + defer span.End() + + return vm.ssVM.GetOngoingSyncStateSummary(ctx) +} + +func (vm *blockVM) GetLastStateSummary(ctx context.Context) (block.StateSummary, error) { + if vm.ssVM == nil { + return nil, block.ErrStateSyncableVMNotImplemented + } + + ctx, span := vm.tracer.Start(ctx, vm.getLastStateSummaryTag) + defer span.End() + + return vm.ssVM.GetLastStateSummary(ctx) +} + +func (vm *blockVM) ParseStateSummary(ctx context.Context, summaryBytes []byte) (block.StateSummary, error) { + if vm.ssVM == nil { + return nil, block.ErrStateSyncableVMNotImplemented + } + + ctx, span := vm.tracer.Start(ctx, vm.parseStateSummaryTag, oteltrace.WithAttributes( + attribute.Int("summaryLen", len(summaryBytes)), + )) + defer span.End() + + return vm.ssVM.ParseStateSummary(ctx, summaryBytes) +} + +func (vm *blockVM) GetStateSummary(ctx context.Context, height uint64) (block.StateSummary, error) { + if vm.ssVM == nil { + return nil, block.ErrStateSyncableVMNotImplemented + } + + ctx, span := vm.tracer.Start(ctx, vm.getStateSummaryTag, oteltrace.WithAttributes( + attribute.Int64("height", int64(height)), + )) + defer span.End() + + return vm.ssVM.GetStateSummary(ctx, height) +} diff --git a/avalanchego/vms/tracedvm/tx.go b/avalanchego/vms/tracedvm/tx.go new file mode 100644 index 00000000..7e18efcb --- /dev/null +++ b/avalanchego/vms/tracedvm/tx.go @@ -0,0 +1,50 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tracedvm + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + + oteltrace "go.opentelemetry.io/otel/trace" + + "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" + "github.com/ava-labs/avalanchego/trace" +) + +var _ snowstorm.Tx = (*tracedTx)(nil) + +type tracedTx struct { + snowstorm.Tx + + tracer trace.Tracer +} + +func (t *tracedTx) Verify(ctx context.Context) error { + ctx, span := t.tracer.Start(ctx, "tracedTx.Verify", oteltrace.WithAttributes( + attribute.Stringer("txID", t.ID()), + )) + defer span.End() + + return t.Tx.Verify(ctx) +} + +func (t *tracedTx) Accept(ctx context.Context) error { + ctx, span := t.tracer.Start(ctx, "tracedTx.Accept", oteltrace.WithAttributes( + attribute.Stringer("txID", t.ID()), + )) + defer span.End() + + return t.Tx.Accept(ctx) +} + +func (t *tracedTx) Reject(ctx context.Context) error { + ctx, span := t.tracer.Start(ctx, "tracedTx.Reject", oteltrace.WithAttributes( + attribute.Stringer("txID", t.ID()), + )) + defer span.End() + + return t.Tx.Reject(ctx) +} diff --git a/avalanchego/vms/tracedvm/vertex_vm.go b/avalanchego/vms/tracedvm/vertex_vm.go new file mode 100644 index 00000000..465e0077 --- /dev/null +++ b/avalanchego/vms/tracedvm/vertex_vm.go @@ -0,0 +1,94 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package tracedvm + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + + oteltrace "go.opentelemetry.io/otel/trace" + + "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" + "github.com/ava-labs/avalanchego/snow/engine/avalanche/vertex" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/trace" +) + +var _ vertex.LinearizableVMWithEngine = (*vertexVM)(nil) + +type vertexVM struct { + vertex.LinearizableVMWithEngine + tracer trace.Tracer +} + +func NewVertexVM(vm vertex.LinearizableVMWithEngine, tracer trace.Tracer) vertex.LinearizableVMWithEngine { + return &vertexVM{ + LinearizableVMWithEngine: vm, + tracer: tracer, + } +} + +func (vm *vertexVM) Initialize( + ctx context.Context, + chainCtx *snow.Context, + db manager.Manager, + genesisBytes, + upgradeBytes, + configBytes []byte, + toEngine chan<- common.Message, + fxs []*common.Fx, + appSender common.AppSender, +) error { + ctx, span := vm.tracer.Start(ctx, "vertexVM.Initialize") + defer span.End() + + return vm.LinearizableVMWithEngine.Initialize( + ctx, + chainCtx, + db, + genesisBytes, + upgradeBytes, + configBytes, + toEngine, + fxs, + appSender, + ) +} + +func (vm *vertexVM) PendingTxs(ctx context.Context) []snowstorm.Tx { + ctx, span := vm.tracer.Start(ctx, "vertexVM.PendingTxs") + defer span.End() + + return vm.LinearizableVMWithEngine.PendingTxs(ctx) +} + +func (vm *vertexVM) ParseTx(ctx context.Context, txBytes []byte) (snowstorm.Tx, error) { + ctx, span := vm.tracer.Start(ctx, "vertexVM.ParseTx", oteltrace.WithAttributes( + attribute.Int("txLen", len(txBytes)), + )) + defer span.End() + + tx, err := vm.LinearizableVMWithEngine.ParseTx(ctx, txBytes) + return &tracedTx{ + Tx: tx, + tracer: vm.tracer, + }, err +} + +func (vm *vertexVM) GetTx(ctx context.Context, txID ids.ID) (snowstorm.Tx, error) { + ctx, span := vm.tracer.Start(ctx, "vertexVM.GetTx", oteltrace.WithAttributes( + attribute.Stringer("txID", txID), + )) + defer span.End() + + tx, err := vm.LinearizableVMWithEngine.GetTx(ctx, txID) + return &tracedTx{ + Tx: tx, + tracer: vm.tracer, + }, err +} diff --git a/avalanchego/vms/types/blob_data.go b/avalanchego/vms/types/blob_data.go index 1e6f9ed4..cf5855ad 100644 --- a/avalanchego/vms/types/blob_data.go +++ b/avalanchego/vms/types/blob_data.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package types diff --git a/avalanchego/wallet/chain/p/backend.go b/avalanchego/wallet/chain/p/backend.go index 8f267a6d..37e63b0b 100644 --- a/avalanchego/wallet/chain/p/backend.go +++ b/avalanchego/wallet/chain/p/backend.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p @@ -11,11 +11,12 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) -var _ Backend = &backend{} +var _ Backend = (*backend)(nil) type ChainUTXOs interface { AddUTXO(ctx stdcontext.Context, destinationChainID ids.ID, utxo *avax.UTXO) error @@ -84,7 +85,7 @@ func (b *backend) addUTXOs(ctx stdcontext.Context, destinationChainID ids.ID, ut return nil } -func (b *backend) removeUTXOs(ctx stdcontext.Context, sourceChain ids.ID, utxoIDs ids.Set) error { +func (b *backend) removeUTXOs(ctx stdcontext.Context, sourceChain ids.ID, utxoIDs set.Set[ids.ID]) error { for utxoID := range utxoIDs { if err := b.RemoveUTXO(ctx, sourceChain, utxoID); err != nil { return err diff --git a/avalanchego/wallet/chain/p/backend_visitor.go b/avalanchego/wallet/chain/p/backend_visitor.go index 02ae9ae1..9830d87a 100644 --- a/avalanchego/wallet/chain/p/backend_visitor.go +++ b/avalanchego/wallet/chain/p/backend_visitor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p @@ -12,7 +12,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) -var _ txs.Visitor = &backendVisitor{} +var _ txs.Visitor = (*backendVisitor)(nil) // backendVisitor handles accepting of transactions for the backend type backendVisitor struct { @@ -21,8 +21,13 @@ type backendVisitor struct { txID ids.ID } -func (*backendVisitor) AdvanceTimeTx(*txs.AdvanceTimeTx) error { return errUnsupportedTxType } -func (*backendVisitor) RewardValidatorTx(*txs.RewardValidatorTx) error { return errUnsupportedTxType } +func (*backendVisitor) AdvanceTimeTx(*txs.AdvanceTimeTx) error { + return errUnsupportedTxType +} + +func (*backendVisitor) RewardValidatorTx(*txs.RewardValidatorTx) error { + return errUnsupportedTxType +} func (b *backendVisitor) AddValidatorTx(tx *txs.AddValidatorTx) error { return b.baseTx(&tx.BaseTx) diff --git a/avalanchego/wallet/chain/p/builder.go b/avalanchego/wallet/chain/p/builder.go index 6615d41f..f890790d 100644 --- a/avalanchego/wallet/chain/p/builder.go +++ b/avalanchego/wallet/chain/p/builder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p @@ -11,13 +11,14 @@ import ( stdcontext "context" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) @@ -29,7 +30,7 @@ var ( errInsufficientAuthorization = errors.New("insufficient authorization") errInsufficientFunds = errors.New("insufficient funds") - _ Builder = &builder{} + _ Builder = (*builder)(nil) ) // Builder provides a convenient interface for building unsigned P-chain @@ -71,7 +72,7 @@ type Builder interface { // will take from delegation rewards. If 1,000,000 is provided, 100% of // the delegation reward will be sent to the validator's [rewardsOwner]. NewAddValidatorTx( - vdr *validator.Validator, + vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, shares uint32, options ...common.Option, @@ -82,7 +83,7 @@ type Builder interface { // - [vdr] specifies all the details of the validation period such as the // startTime, endTime, sampling weight, nodeID, and subnetID. NewAddSubnetValidatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, options ...common.Option, ) (*txs.AddSubnetValidatorTx, error) @@ -102,7 +103,7 @@ type Builder interface { // - [rewardsOwner] specifies the owner of all the rewards this delegator // may accrue at the end of its delegation period. NewAddDelegatorTx( - vdr *validator.Validator, + vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, ) (*txs.AddDelegatorTx, error) @@ -219,7 +220,7 @@ type Builder interface { // will take from delegation rewards. If 1,000,000 is provided, 100% of // the delegation reward will be sent to the validator's [rewardsOwner]. NewAddPermissionlessValidatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, signer signer.Signer, assetID ids.ID, validationRewardsOwner *secp256k1fx.OutputOwners, @@ -237,7 +238,7 @@ type Builder interface { // - [rewardsOwner] specifies the owner of all the rewards this delegator // earns during its delegation period. NewAddPermissionlessDelegatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, assetID ids.ID, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, @@ -253,7 +254,7 @@ type BuilderBackend interface { } type builder struct { - addrs ids.ShortSet + addrs set.Set[ids.ShortID] backend BuilderBackend } @@ -263,7 +264,7 @@ type builder struct { // signing the transactions in the future. // - [backend] provides the required access to the chain's context and state // to build out the transactions. -func NewBuilder(addrs ids.ShortSet, backend BuilderBackend) Builder { +func NewBuilder(addrs set.Set[ids.ShortID], backend BuilderBackend) Builder { return &builder{ addrs: addrs, backend: backend, @@ -323,7 +324,7 @@ func (b *builder) NewBaseTx( } func (b *builder) NewAddValidatorTx( - vdr *validator.Validator, + vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, shares uint32, options ...common.Option, @@ -341,7 +342,7 @@ func (b *builder) NewAddValidatorTx( return nil, err } - ids.SortShortIDs(rewardsOwner.Addrs) + utils.Sort(rewardsOwner.Addrs) return &txs.AddValidatorTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), @@ -358,7 +359,7 @@ func (b *builder) NewAddValidatorTx( } func (b *builder) NewAddSubnetValidatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, options ...common.Option, ) (*txs.AddSubnetValidatorTx, error) { toBurn := map[ids.ID]uint64{ @@ -384,8 +385,8 @@ func (b *builder) NewAddSubnetValidatorTx( Outs: outputs, Memo: ops.Memo(), }}, - Validator: *vdr, - SubnetAuth: subnetAuth, + SubnetValidator: *vdr, + SubnetAuth: subnetAuth, }, nil } @@ -424,7 +425,7 @@ func (b *builder) NewRemoveSubnetValidatorTx( } func (b *builder) NewAddDelegatorTx( - vdr *validator.Validator, + vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, ) (*txs.AddDelegatorTx, error) { @@ -441,7 +442,7 @@ func (b *builder) NewAddDelegatorTx( return nil, err } - ids.SortShortIDs(rewardsOwner.Addrs) + utils.Sort(rewardsOwner.Addrs) return &txs.AddDelegatorTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), @@ -479,7 +480,7 @@ func (b *builder) NewCreateChainTx( return nil, err } - ids.SortIDs(fxIDs) + utils.Sort(fxIDs) return &txs.CreateChainTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), @@ -511,7 +512,7 @@ func (b *builder) NewCreateSubnetTx( return nil, err } - ids.SortShortIDs(owner.Addrs) + utils.Sort(owner.Addrs) return &txs.CreateSubnetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), @@ -575,7 +576,7 @@ func (b *builder) NewImportTx( } importedAmounts[assetID] = newImportedAmount } - avax.SortTransferableInputs(importedInputs) // sort imported inputs + utils.Sort(importedInputs) // sort imported inputs if len(importedInputs) == 0 { return nil, fmt.Errorf( @@ -728,7 +729,7 @@ func (b *builder) NewTransformSubnetTx( } func (b *builder) NewAddPermissionlessValidatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, signer signer.Signer, assetID ids.ID, validationRewardsOwner *secp256k1fx.OutputOwners, @@ -752,8 +753,8 @@ func (b *builder) NewAddPermissionlessValidatorTx( return nil, err } - ids.SortShortIDs(validationRewardsOwner.Addrs) - ids.SortShortIDs(delegationRewardsOwner.Addrs) + utils.Sort(validationRewardsOwner.Addrs) + utils.Sort(delegationRewardsOwner.Addrs) return &txs.AddPermissionlessValidatorTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), @@ -773,7 +774,7 @@ func (b *builder) NewAddPermissionlessValidatorTx( } func (b *builder) NewAddPermissionlessDelegatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, assetID ids.ID, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, @@ -794,7 +795,7 @@ func (b *builder) NewAddPermissionlessDelegatorTx( return nil, err } - ids.SortShortIDs(rewardsOwner.Addrs) + utils.Sort(rewardsOwner.Addrs) return &txs.AddPermissionlessDelegatorTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ NetworkID: b.backend.NetworkID(), @@ -946,7 +947,7 @@ func (b *builder) spend( }) // Stake any value that should be staked - amountToStake := math.Min64( + amountToStake := math.Min( remainingAmountToStake, // Amount we still need to stake out.Amt, // Amount available to stake ) @@ -1024,7 +1025,7 @@ func (b *builder) spend( }) // Burn any value that should be burned - amountToBurn := math.Min64( + amountToBurn := math.Min( remainingAmountToBurn, // Amount we still need to burn out.Amt, // Amount available to burn ) @@ -1032,7 +1033,7 @@ func (b *builder) spend( amountAvalibleToStake := out.Amt - amountToBurn // Burn any value that should be burned - amountToStake := math.Min64( + amountToStake := math.Min( remainingAmountToStake, // Amount we still need to stake amountAvalibleToStake, // Amount available to stake ) @@ -1080,7 +1081,7 @@ func (b *builder) spend( } } - avax.SortTransferableInputs(inputs) // sort inputs + utils.Sort(inputs) // sort inputs avax.SortTransferableOutputs(changeOutputs, txs.Codec) // sort the change outputs avax.SortTransferableOutputs(stakeOutputs, txs.Codec) // sort stake outputs return inputs, changeOutputs, stakeOutputs, nil diff --git a/avalanchego/wallet/chain/p/builder_with_options.go b/avalanchego/wallet/chain/p/builder_with_options.go index bd597e39..9060d763 100644 --- a/avalanchego/wallet/chain/p/builder_with_options.go +++ b/avalanchego/wallet/chain/p/builder_with_options.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p @@ -10,12 +10,11 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) -var _ Builder = &builderWithOptions{} +var _ Builder = (*builderWithOptions)(nil) type builderWithOptions struct { Builder @@ -26,7 +25,7 @@ type builderWithOptions struct { // given options by default. // // - [builder] is the builder that will be called to perform the underlying -// opterations. +// operations. // - [options] will be provided to the builder in addition to the options // provided in the method calls. func NewBuilderWithOptions(builder Builder, options ...common.Option) Builder { @@ -55,7 +54,7 @@ func (b *builderWithOptions) GetImportableBalance( } func (b *builderWithOptions) NewAddValidatorTx( - vdr *validator.Validator, + vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, shares uint32, options ...common.Option, @@ -69,7 +68,7 @@ func (b *builderWithOptions) NewAddValidatorTx( } func (b *builderWithOptions) NewAddSubnetValidatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, options ...common.Option, ) (*txs.AddSubnetValidatorTx, error) { return b.Builder.NewAddSubnetValidatorTx( @@ -91,7 +90,7 @@ func (b *builderWithOptions) RemoveSubnetValidatorTx( } func (b *builderWithOptions) NewAddDelegatorTx( - vdr *validator.Validator, + vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, ) (*txs.AddDelegatorTx, error) { @@ -191,7 +190,7 @@ func (b *builderWithOptions) NewTransformSubnetTx( } func (b *builderWithOptions) NewAddPermissionlessValidatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, signer signer.Signer, assetID ids.ID, validationRewardsOwner *secp256k1fx.OutputOwners, @@ -211,7 +210,7 @@ func (b *builderWithOptions) NewAddPermissionlessValidatorTx( } func (b *builderWithOptions) NewAddPermissionlessDelegatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, assetID ids.ID, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, diff --git a/avalanchego/wallet/chain/p/context.go b/avalanchego/wallet/chain/p/context.go index 715991a4..75bcdb19 100644 --- a/avalanchego/wallet/chain/p/context.go +++ b/avalanchego/wallet/chain/p/context.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p @@ -11,7 +11,7 @@ import ( "github.com/ava-labs/avalanchego/vms/avm" ) -var _ Context = &context{} +var _ Context = (*context)(nil) type Context interface { NetworkID() uint32 @@ -105,13 +105,42 @@ func NewContext( } } -func (c *context) NetworkID() uint32 { return c.networkID } -func (c *context) AVAXAssetID() ids.ID { return c.avaxAssetID } -func (c *context) BaseTxFee() uint64 { return c.baseTxFee } -func (c *context) CreateSubnetTxFee() uint64 { return c.createSubnetTxFee } -func (c *context) TransformSubnetTxFee() uint64 { return c.transformSubnetTxFee } -func (c *context) CreateBlockchainTxFee() uint64 { return c.createBlockchainTxFee } -func (c *context) AddPrimaryNetworkValidatorFee() uint64 { return c.addPrimaryNetworkValidatorFee } -func (c *context) AddPrimaryNetworkDelegatorFee() uint64 { return c.addPrimaryNetworkDelegatorFee } -func (c *context) AddSubnetValidatorFee() uint64 { return c.addSubnetValidatorFee } -func (c *context) AddSubnetDelegatorFee() uint64 { return c.addSubnetDelegatorFee } +func (c *context) NetworkID() uint32 { + return c.networkID +} + +func (c *context) AVAXAssetID() ids.ID { + return c.avaxAssetID +} + +func (c *context) BaseTxFee() uint64 { + return c.baseTxFee +} + +func (c *context) CreateSubnetTxFee() uint64 { + return c.createSubnetTxFee +} + +func (c *context) TransformSubnetTxFee() uint64 { + return c.transformSubnetTxFee +} + +func (c *context) CreateBlockchainTxFee() uint64 { + return c.createBlockchainTxFee +} + +func (c *context) AddPrimaryNetworkValidatorFee() uint64 { + return c.addPrimaryNetworkValidatorFee +} + +func (c *context) AddPrimaryNetworkDelegatorFee() uint64 { + return c.addPrimaryNetworkDelegatorFee +} + +func (c *context) AddSubnetValidatorFee() uint64 { + return c.addSubnetValidatorFee +} + +func (c *context) AddSubnetDelegatorFee() uint64 { + return c.addSubnetDelegatorFee +} diff --git a/avalanchego/wallet/chain/p/signer.go b/avalanchego/wallet/chain/p/signer.go index 69fc5fff..a795dd63 100644 --- a/avalanchego/wallet/chain/p/signer.go +++ b/avalanchego/wallet/chain/p/signer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p @@ -7,12 +7,12 @@ import ( stdcontext "context" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/crypto/keychain" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -var _ Signer = &txSigner{} +var _ Signer = (*txSigner)(nil) type Signer interface { SignUnsigned(ctx stdcontext.Context, tx txs.UnsignedTx) (*txs.Tx, error) @@ -25,11 +25,11 @@ type SignerBackend interface { } type txSigner struct { - kc *secp256k1fx.Keychain + kc keychain.Keychain backend SignerBackend } -func NewSigner(kc *secp256k1fx.Keychain, backend SignerBackend) Signer { +func NewSigner(kc keychain.Keychain, backend SignerBackend) Signer { return &txSigner{ kc: kc, backend: backend, diff --git a/avalanchego/wallet/chain/p/signer_visitor.go b/avalanchego/wallet/chain/p/signer_visitor.go index f72683c3..52269ee6 100644 --- a/avalanchego/wallet/chain/p/signer_visitor.go +++ b/avalanchego/wallet/chain/p/signer_visitor.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p @@ -12,7 +12,8 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/keychain" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" @@ -22,7 +23,7 @@ import ( ) var ( - _ txs.Visitor = &signerVisitor{} + _ txs.Visitor = (*signerVisitor)(nil) errUnsupportedTxType = errors.New("unsupported tx type") errUnknownInputType = errors.New("unknown input type") @@ -31,26 +32,31 @@ var ( errUnknownSubnetAuthType = errors.New("unknown subnet auth type") errInvalidUTXOSigIndex = errors.New("invalid UTXO signature index") - emptySig [crypto.SECP256K1RSigLen]byte + emptySig [secp256k1.SignatureLen]byte ) // signerVisitor handles signing transactions for the signer type signerVisitor struct { - kc *secp256k1fx.Keychain + kc keychain.Keychain backend SignerBackend ctx stdcontext.Context tx *txs.Tx } -func (*signerVisitor) AdvanceTimeTx(*txs.AdvanceTimeTx) error { return errUnsupportedTxType } -func (*signerVisitor) RewardValidatorTx(*txs.RewardValidatorTx) error { return errUnsupportedTxType } +func (*signerVisitor) AdvanceTimeTx(*txs.AdvanceTimeTx) error { + return errUnsupportedTxType +} + +func (*signerVisitor) RewardValidatorTx(*txs.RewardValidatorTx) error { + return errUnsupportedTxType +} func (s *signerVisitor) AddValidatorTx(tx *txs.AddValidatorTx) error { txSigners, err := s.getSigners(constants.PlatformChainID, tx.Ins) if err != nil { return err } - return s.sign(s.tx, txSigners) + return sign(s.tx, false, txSigners) } func (s *signerVisitor) AddSubnetValidatorTx(tx *txs.AddSubnetValidatorTx) error { @@ -58,12 +64,12 @@ func (s *signerVisitor) AddSubnetValidatorTx(tx *txs.AddSubnetValidatorTx) error if err != nil { return err } - subnetAuthSigners, err := s.getSubnetSigners(tx.Validator.Subnet, tx.SubnetAuth) + subnetAuthSigners, err := s.getSubnetSigners(tx.SubnetValidator.Subnet, tx.SubnetAuth) if err != nil { return err } txSigners = append(txSigners, subnetAuthSigners) - return s.sign(s.tx, txSigners) + return sign(s.tx, false, txSigners) } func (s *signerVisitor) AddDelegatorTx(tx *txs.AddDelegatorTx) error { @@ -71,7 +77,7 @@ func (s *signerVisitor) AddDelegatorTx(tx *txs.AddDelegatorTx) error { if err != nil { return err } - return s.sign(s.tx, txSigners) + return sign(s.tx, false, txSigners) } func (s *signerVisitor) CreateChainTx(tx *txs.CreateChainTx) error { @@ -84,7 +90,7 @@ func (s *signerVisitor) CreateChainTx(tx *txs.CreateChainTx) error { return err } txSigners = append(txSigners, subnetAuthSigners) - return s.sign(s.tx, txSigners) + return sign(s.tx, false, txSigners) } func (s *signerVisitor) CreateSubnetTx(tx *txs.CreateSubnetTx) error { @@ -92,7 +98,7 @@ func (s *signerVisitor) CreateSubnetTx(tx *txs.CreateSubnetTx) error { if err != nil { return err } - return s.sign(s.tx, txSigners) + return sign(s.tx, false, txSigners) } func (s *signerVisitor) ImportTx(tx *txs.ImportTx) error { @@ -105,7 +111,7 @@ func (s *signerVisitor) ImportTx(tx *txs.ImportTx) error { return err } txSigners = append(txSigners, txImportSigners...) - return s.sign(s.tx, txSigners) + return sign(s.tx, false, txSigners) } func (s *signerVisitor) ExportTx(tx *txs.ExportTx) error { @@ -113,7 +119,7 @@ func (s *signerVisitor) ExportTx(tx *txs.ExportTx) error { if err != nil { return err } - return s.sign(s.tx, txSigners) + return sign(s.tx, false, txSigners) } func (s *signerVisitor) RemoveSubnetValidatorTx(tx *txs.RemoveSubnetValidatorTx) error { @@ -126,7 +132,7 @@ func (s *signerVisitor) RemoveSubnetValidatorTx(tx *txs.RemoveSubnetValidatorTx) return err } txSigners = append(txSigners, subnetAuthSigners) - return s.sign(s.tx, txSigners) + return sign(s.tx, true, txSigners) } func (s *signerVisitor) TransformSubnetTx(tx *txs.TransformSubnetTx) error { @@ -139,7 +145,7 @@ func (s *signerVisitor) TransformSubnetTx(tx *txs.TransformSubnetTx) error { return err } txSigners = append(txSigners, subnetAuthSigners) - return s.sign(s.tx, txSigners) + return sign(s.tx, true, txSigners) } func (s *signerVisitor) AddPermissionlessValidatorTx(tx *txs.AddPermissionlessValidatorTx) error { @@ -147,7 +153,7 @@ func (s *signerVisitor) AddPermissionlessValidatorTx(tx *txs.AddPermissionlessVa if err != nil { return err } - return s.sign(s.tx, txSigners) + return sign(s.tx, true, txSigners) } func (s *signerVisitor) AddPermissionlessDelegatorTx(tx *txs.AddPermissionlessDelegatorTx) error { @@ -155,11 +161,11 @@ func (s *signerVisitor) AddPermissionlessDelegatorTx(tx *txs.AddPermissionlessDe if err != nil { return err } - return s.sign(s.tx, txSigners) + return sign(s.tx, true, txSigners) } -func (s *signerVisitor) getSigners(sourceChainID ids.ID, ins []*avax.TransferableInput) ([][]*crypto.PrivateKeySECP256K1R, error) { - txSigners := make([][]*crypto.PrivateKeySECP256K1R, len(ins)) +func (s *signerVisitor) getSigners(sourceChainID ids.ID, ins []*avax.TransferableInput) ([][]keychain.Signer, error) { + txSigners := make([][]keychain.Signer, len(ins)) for credIndex, transferInput := range ins { inIntf := transferInput.In if stakeableIn, ok := inIntf.(*stakeable.LockIn); ok { @@ -171,7 +177,7 @@ func (s *signerVisitor) getSigners(sourceChainID ids.ID, ins []*avax.Transferabl return nil, errUnknownInputType } - inputSigners := make([]*crypto.PrivateKeySECP256K1R, len(input.SigIndices)) + inputSigners := make([]keychain.Signer, len(input.SigIndices)) txSigners[credIndex] = inputSigners utxoID := transferInput.InputID() @@ -213,7 +219,7 @@ func (s *signerVisitor) getSigners(sourceChainID ids.ID, ins []*avax.Transferabl return txSigners, nil } -func (s *signerVisitor) getSubnetSigners(subnetID ids.ID, subnetAuth verify.Verifiable) ([]*crypto.PrivateKeySECP256K1R, error) { +func (s *signerVisitor) getSubnetSigners(subnetID ids.ID, subnetAuth verify.Verifiable) ([]keychain.Signer, error) { subnetInput, ok := subnetAuth.(*secp256k1fx.Input) if !ok { return nil, errUnknownSubnetAuthType @@ -237,7 +243,7 @@ func (s *signerVisitor) getSubnetSigners(subnetID ids.ID, subnetAuth verify.Veri return nil, errUnknownOwnerType } - authSigners := make([]*crypto.PrivateKeySECP256K1R, len(subnetInput.SigIndices)) + authSigners := make([]keychain.Signer, len(subnetInput.SigIndices)) for sigIndex, addrIndex := range subnetInput.SigIndices { if addrIndex >= uint32(len(owner.Addrs)) { return nil, errInvalidUTXOSigIndex @@ -255,7 +261,8 @@ func (s *signerVisitor) getSubnetSigners(subnetID ids.ID, subnetAuth verify.Veri return authSigners, nil } -func (s *signerVisitor) sign(tx *txs.Tx, txSigners [][]*crypto.PrivateKeySECP256K1R) error { +// TODO: remove [signHash] after the ledger supports signing all transactions. +func sign(tx *txs.Tx, signHash bool, txSigners [][]keychain.Signer) error { unsignedBytes, err := txs.Codec.Marshal(txs.Version, &tx.Unsigned) if err != nil { return fmt.Errorf("couldn't marshal unsigned tx: %w", err) @@ -266,7 +273,7 @@ func (s *signerVisitor) sign(tx *txs.Tx, txSigners [][]*crypto.PrivateKeySECP256 tx.Creds = make([]verify.Verifiable, expectedLen) } - sigCache := make(map[ids.ShortID][crypto.SECP256K1RSigLen]byte) + sigCache := make(map[ids.ShortID][secp256k1.SignatureLen]byte) for credIndex, inputSigners := range txSigners { credIntf := tx.Creds[credIndex] if credIntf == nil { @@ -279,7 +286,7 @@ func (s *signerVisitor) sign(tx *txs.Tx, txSigners [][]*crypto.PrivateKeySECP256 return errUnknownCredentialType } if expectedLen := len(inputSigners); expectedLen != len(cred.Sigs) { - cred.Sigs = make([][crypto.SECP256K1RSigLen]byte, expectedLen) + cred.Sigs = make([][secp256k1.SignatureLen]byte, expectedLen) } for sigIndex, signer := range inputSigners { @@ -288,7 +295,7 @@ func (s *signerVisitor) sign(tx *txs.Tx, txSigners [][]*crypto.PrivateKeySECP256 // transaction. However, we can attempt to partially sign it. continue } - addr := signer.PublicKey().Address() + addr := signer.Address() if sig := cred.Sigs[sigIndex]; sig != emptySig { // If this signature has already been populated, we can just // copy the needed signature for the future. @@ -303,7 +310,12 @@ func (s *signerVisitor) sign(tx *txs.Tx, txSigners [][]*crypto.PrivateKeySECP256 continue } - sig, err := signer.SignHash(unsignedHash) + var sig []byte + if signHash { + sig, err = signer.SignHash(unsignedHash) + } else { + sig, err = signer.Sign(unsignedBytes) + } if err != nil { return fmt.Errorf("problem signing tx: %w", err) } @@ -316,6 +328,6 @@ func (s *signerVisitor) sign(tx *txs.Tx, txSigners [][]*crypto.PrivateKeySECP256 if err != nil { return fmt.Errorf("couldn't marshal tx: %w", err) } - tx.Initialize(unsignedBytes, signedBytes) + tx.SetBytes(unsignedBytes, signedBytes) return nil } diff --git a/avalanchego/wallet/chain/p/wallet.go b/avalanchego/wallet/chain/p/wallet.go index b5ecb9a1..e25dfac5 100644 --- a/avalanchego/wallet/chain/p/wallet.go +++ b/avalanchego/wallet/chain/p/wallet.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p @@ -13,7 +13,6 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) @@ -21,7 +20,7 @@ import ( var ( errNotCommitted = errors.New("not committed") - _ Wallet = &wallet{} + _ Wallet = (*wallet)(nil) ) type Wallet interface { @@ -55,7 +54,7 @@ type Wallet interface { // will take from delegation rewards. If 1,000,000 is provided, 100% of // the delegation reward will be sent to the validator's [rewardsOwner]. IssueAddValidatorTx( - vdr *validator.Validator, + vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, shares uint32, options ...common.Option, @@ -67,7 +66,7 @@ type Wallet interface { // - [vdr] specifies all the details of the validation period such as the // startTime, endTime, sampling weight, nodeID, and subnetID. IssueAddSubnetValidatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, options ...common.Option, ) (ids.ID, error) @@ -89,7 +88,7 @@ type Wallet interface { // - [rewardsOwner] specifies the owner of all the rewards this delegator // may accrue at the end of its delegation period. IssueAddDelegatorTx( - vdr *validator.Validator, + vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, ) (ids.ID, error) @@ -208,7 +207,7 @@ type Wallet interface { // will take from delegation rewards. If 1,000,000 is provided, 100% of // the delegation reward will be sent to the validator's [rewardsOwner]. IssueAddPermissionlessValidatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, signer signer.Signer, assetID ids.ID, validationRewardsOwner *secp256k1fx.OutputOwners, @@ -226,7 +225,7 @@ type Wallet interface { // - [rewardsOwner] specifies the owner of all the rewards this delegator // earns during its delegation period. IssueAddPermissionlessDelegatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, assetID ids.ID, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, @@ -266,9 +265,13 @@ type wallet struct { client platformvm.Client } -func (w *wallet) Builder() Builder { return w.builder } +func (w *wallet) Builder() Builder { + return w.builder +} -func (w *wallet) Signer() Signer { return w.signer } +func (w *wallet) Signer() Signer { + return w.signer +} func (w *wallet) IssueBaseTx( outputs []*avax.TransferableOutput, @@ -282,7 +285,7 @@ func (w *wallet) IssueBaseTx( } func (w *wallet) IssueAddValidatorTx( - vdr *validator.Validator, + vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, shares uint32, options ...common.Option, @@ -295,7 +298,7 @@ func (w *wallet) IssueAddValidatorTx( } func (w *wallet) IssueAddSubnetValidatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, options ...common.Option, ) (ids.ID, error) { utx, err := w.builder.NewAddSubnetValidatorTx(vdr, options...) @@ -318,7 +321,7 @@ func (w *wallet) IssueRemoveSubnetValidatorTx( } func (w *wallet) IssueAddDelegatorTx( - vdr *validator.Validator, + vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, ) (ids.ID, error) { @@ -420,7 +423,7 @@ func (w *wallet) IssueTransformSubnetTx( } func (w *wallet) IssueAddPermissionlessValidatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, signer signer.Signer, assetID ids.ID, validationRewardsOwner *secp256k1fx.OutputOwners, @@ -444,7 +447,7 @@ func (w *wallet) IssueAddPermissionlessValidatorTx( } func (w *wallet) IssueAddPermissionlessDelegatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, assetID ids.ID, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, diff --git a/avalanchego/wallet/chain/p/wallet_with_options.go b/avalanchego/wallet/chain/p/wallet_with_options.go index 144d7707..2d53d12c 100644 --- a/avalanchego/wallet/chain/p/wallet_with_options.go +++ b/avalanchego/wallet/chain/p/wallet_with_options.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package p @@ -10,12 +10,11 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) -var _ Wallet = &walletWithOptions{} +var _ Wallet = (*walletWithOptions)(nil) func NewWalletWithOptions( wallet Wallet, @@ -50,7 +49,7 @@ func (w *walletWithOptions) IssueBaseTx( } func (w *walletWithOptions) IssueAddValidatorTx( - vdr *validator.Validator, + vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, shares uint32, options ...common.Option, @@ -64,7 +63,7 @@ func (w *walletWithOptions) IssueAddValidatorTx( } func (w *walletWithOptions) IssueAddSubnetValidatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, options ...common.Option, ) (ids.ID, error) { return w.Wallet.IssueAddSubnetValidatorTx( @@ -86,7 +85,7 @@ func (w *walletWithOptions) IssueRemoveSubnetValidatorTx( } func (w *walletWithOptions) IssueAddDelegatorTx( - vdr *validator.Validator, + vdr *txs.Validator, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, ) (ids.ID, error) { @@ -186,7 +185,7 @@ func (w *walletWithOptions) IssueTransformSubnetTx( } func (w *walletWithOptions) IssueAddPermissionlessValidatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, signer signer.Signer, assetID ids.ID, validationRewardsOwner *secp256k1fx.OutputOwners, @@ -206,7 +205,7 @@ func (w *walletWithOptions) IssueAddPermissionlessValidatorTx( } func (w *walletWithOptions) IssueAddPermissionlessDelegatorTx( - vdr *validator.SubnetValidator, + vdr *txs.SubnetValidator, assetID ids.ID, rewardsOwner *secp256k1fx.OutputOwners, options ...common.Option, diff --git a/avalanchego/wallet/chain/x/backend.go b/avalanchego/wallet/chain/x/backend.go index 6b5c9481..194440e0 100644 --- a/avalanchego/wallet/chain/x/backend.go +++ b/avalanchego/wallet/chain/x/backend.go @@ -1,11 +1,9 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x import ( - "fmt" - stdcontext "context" "github.com/ava-labs/avalanchego/ids" @@ -13,7 +11,7 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" ) -var _ Backend = &backend{} +var _ Backend = (*backend)(nil) type ChainUTXOs interface { AddUTXO(ctx stdcontext.Context, destinationChainID ids.ID, utxo *avax.UTXO) error @@ -48,38 +46,14 @@ func NewBackend(ctx Context, chainID ids.ID, utxos ChainUTXOs) Backend { } } -// TODO: implement txs.Visitor here func (b *backend) AcceptTx(ctx stdcontext.Context, tx *txs.Tx) error { - switch utx := tx.Unsigned.(type) { - case *txs.BaseTx, *txs.CreateAssetTx, *txs.OperationTx: - case *txs.ImportTx: - for _, input := range utx.ImportedIns { - utxoID := input.UTXOID.InputID() - if err := b.RemoveUTXO(ctx, utx.SourceChain, utxoID); err != nil { - return err - } - } - case *txs.ExportTx: - txID := tx.ID() - for i, out := range utx.ExportedOuts { - err := b.AddUTXO( - ctx, - utx.DestinationChain, - &avax.UTXO{ - UTXOID: avax.UTXOID{ - TxID: txID, - OutputIndex: uint32(len(utx.Outs) + i), - }, - Asset: avax.Asset{ID: out.AssetID()}, - Out: out.Out, - }, - ) - if err != nil { - return err - } - } - default: - return fmt.Errorf("%w: %T", errUnknownTxType, tx.Unsigned) + err := tx.Unsigned.Visit(&backendVisitor{ + b: b, + ctx: ctx, + txID: tx.ID(), + }) + if err != nil { + return err } inputUTXOs := tx.Unsigned.InputUTXOs() diff --git a/avalanchego/wallet/chain/x/backend_visitor.go b/avalanchego/wallet/chain/x/backend_visitor.go new file mode 100644 index 00000000..d6176384 --- /dev/null +++ b/avalanchego/wallet/chain/x/backend_visitor.go @@ -0,0 +1,64 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package x + +import ( + stdcontext "context" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/avm/txs" + "github.com/ava-labs/avalanchego/vms/components/avax" +) + +var _ txs.Visitor = (*backendVisitor)(nil) + +// backendVisitor handles accepting of transactions for the backend +type backendVisitor struct { + b *backend + ctx stdcontext.Context + txID ids.ID +} + +func (*backendVisitor) BaseTx(*txs.BaseTx) error { + return nil +} + +func (*backendVisitor) CreateAssetTx(*txs.CreateAssetTx) error { + return nil +} + +func (*backendVisitor) OperationTx(*txs.OperationTx) error { + return nil +} + +func (b *backendVisitor) ImportTx(tx *txs.ImportTx) error { + for _, in := range tx.ImportedIns { + utxoID := in.UTXOID.InputID() + if err := b.b.RemoveUTXO(b.ctx, tx.SourceChain, utxoID); err != nil { + return err + } + } + return nil +} + +func (b *backendVisitor) ExportTx(tx *txs.ExportTx) error { + for i, out := range tx.ExportedOuts { + err := b.b.AddUTXO( + b.ctx, + tx.DestinationChain, + &avax.UTXO{ + UTXOID: avax.UTXOID{ + TxID: b.txID, + OutputIndex: uint32(len(tx.Outs) + i), + }, + Asset: avax.Asset{ID: out.AssetID()}, + Out: out.Out, + }, + ) + if err != nil { + return err + } + } + return nil +} diff --git a/avalanchego/wallet/chain/x/builder.go b/avalanchego/wallet/chain/x/builder.go index d8f99602..0b639a77 100644 --- a/avalanchego/wallet/chain/x/builder.go +++ b/avalanchego/wallet/chain/x/builder.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x @@ -10,7 +10,9 @@ import ( stdcontext "context" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" @@ -24,7 +26,7 @@ var ( errNoChangeAddress = errors.New("no possible change address") errInsufficientFunds = errors.New("insufficient funds") - _ Builder = &builder{} + _ Builder = (*builder)(nil) ) // Builder provides a convenient interface for building unsigned X-chain @@ -155,7 +157,7 @@ type BuilderBackend interface { } type builder struct { - addrs ids.ShortSet + addrs set.Set[ids.ShortID] backend BuilderBackend } @@ -165,7 +167,7 @@ type builder struct { // signing the transactions in the future. // - [backend] provides the required access to the chain's context and state // to build out the transactions. -func NewBuilder(addrs ids.ShortSet, backend BuilderBackend) Builder { +func NewBuilder(addrs set.Set[ids.ShortID], backend BuilderBackend) Builder { return &builder{ addrs: addrs, backend: backend, @@ -260,7 +262,7 @@ func (b *builder) NewCreateAssetTx( Denomination: denomination, States: states, } - tx.Sort() // sort the initial states + utils.Sort(tx.States) // sort the initial states return tx, nil } @@ -393,7 +395,7 @@ func (b *builder) NewImportTx( } importedAmounts[assetID] = newImportedAmount } - avax.SortTransferableInputs(importedInputs) // sort imported inputs + utils.Sort(importedInputs) // sort imported inputs if len(importedAmounts) == 0 { return nil, fmt.Errorf( @@ -585,7 +587,7 @@ func (b *builder) spend( }) // Burn any value that should be burned - amountToBurn := math.Min64( + amountToBurn := math.Min( remainingAmountToBurn, // Amount we still need to burn out.Amt, // Amount available to burn ) @@ -613,7 +615,7 @@ func (b *builder) spend( } } - avax.SortTransferableInputs(inputs) // sort inputs + utils.Sort(inputs) // sort inputs avax.SortTransferableOutputs(outputs, Parser.Codec()) // sort the change outputs return inputs, outputs, nil } diff --git a/avalanchego/wallet/chain/x/builder_with_options.go b/avalanchego/wallet/chain/x/builder_with_options.go index 2dd098fe..63d55400 100644 --- a/avalanchego/wallet/chain/x/builder_with_options.go +++ b/avalanchego/wallet/chain/x/builder_with_options.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x @@ -12,7 +12,7 @@ import ( "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) -var _ Builder = &builderWithOptions{} +var _ Builder = (*builderWithOptions)(nil) type builderWithOptions struct { Builder @@ -22,10 +22,10 @@ type builderWithOptions struct { // NewBuilderWithOptions returns a new transaction builder that will use the // given options by default. // -// - [builder] is the builder that will be called to perform the underlying -// opterations. -// - [options] will be provided to the builder in addition to the options -// provided in the method calls. +// - [builder] is the builder that will be called to perform the underlying +// operations. +// - [options] will be provided to the builder in addition to the options +// provided in the method calls. func NewBuilderWithOptions(builder Builder, options ...common.Option) Builder { return &builderWithOptions{ Builder: builder, diff --git a/avalanchego/wallet/chain/x/constants.go b/avalanchego/wallet/chain/x/constants.go index 6ac6e0e1..ec5ea7b4 100644 --- a/avalanchego/wallet/chain/x/constants.go +++ b/avalanchego/wallet/chain/x/constants.go @@ -1,11 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x import ( + "github.com/ava-labs/avalanchego/vms/avm/blocks" "github.com/ava-labs/avalanchego/vms/avm/fxs" - "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/nftfx" "github.com/ava-labs/avalanchego/vms/propertyfx" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -18,11 +18,11 @@ const ( ) // Parser to support serialization and deserialization -var Parser txs.Parser +var Parser blocks.Parser func init() { var err error - Parser, err = txs.NewParser([]fxs.Fx{ + Parser, err = blocks.NewParser([]fxs.Fx{ &secp256k1fx.Fx{}, &nftfx.Fx{}, &propertyfx.Fx{}, diff --git a/avalanchego/wallet/chain/x/context.go b/avalanchego/wallet/chain/x/context.go index 7f081828..bdabe2d9 100644 --- a/avalanchego/wallet/chain/x/context.go +++ b/avalanchego/wallet/chain/x/context.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x @@ -11,7 +11,7 @@ import ( "github.com/ava-labs/avalanchego/vms/avm" ) -var _ Context = &context{} +var _ Context = (*context)(nil) type Context interface { NetworkID() uint32 @@ -85,8 +85,22 @@ func NewContext( } } -func (c *context) NetworkID() uint32 { return c.networkID } -func (c *context) BlockchainID() ids.ID { return c.blockchainID } -func (c *context) AVAXAssetID() ids.ID { return c.avaxAssetID } -func (c *context) BaseTxFee() uint64 { return c.baseTxFee } -func (c *context) CreateAssetTxFee() uint64 { return c.createAssetTxFee } +func (c *context) NetworkID() uint32 { + return c.networkID +} + +func (c *context) BlockchainID() ids.ID { + return c.blockchainID +} + +func (c *context) AVAXAssetID() ids.ID { + return c.avaxAssetID +} + +func (c *context) BaseTxFee() uint64 { + return c.baseTxFee +} + +func (c *context) CreateAssetTxFee() uint64 { + return c.createAssetTxFee +} diff --git a/avalanchego/wallet/chain/x/signer.go b/avalanchego/wallet/chain/x/signer.go index ad7ce762..b393d767 100644 --- a/avalanchego/wallet/chain/x/signer.go +++ b/avalanchego/wallet/chain/x/signer.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x @@ -11,8 +11,8 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto" - "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/crypto/keychain" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/avm/fxs" "github.com/ava-labs/avalanchego/vms/avm/txs" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -31,9 +31,9 @@ var ( errUnknownOutputType = errors.New("unknown output type") errInvalidUTXOSigIndex = errors.New("invalid UTXO signature index") - emptySig [crypto.SECP256K1RSigLen]byte + emptySig [secp256k1.SignatureLen]byte - _ Signer = &signer{} + _ Signer = (*signer)(nil) ) type Signer interface { @@ -46,11 +46,11 @@ type SignerBackend interface { } type signer struct { - kc *secp256k1fx.Keychain + kc keychain.Keychain backend SignerBackend } -func NewSigner(kc *secp256k1fx.Keychain, backend SignerBackend) Signer { +func NewSigner(kc keychain.Keychain, backend SignerBackend) Signer { return &signer{ kc: kc, backend: backend, @@ -85,7 +85,7 @@ func (s *signer) signBaseTx(ctx stdcontext.Context, tx *txs.Tx, utx *txs.BaseTx) if err != nil { return err } - return s.sign(tx, txCreds, txSigners) + return sign(tx, txCreds, txSigners) } func (s *signer) signCreateAssetTx(ctx stdcontext.Context, tx *txs.Tx, utx *txs.CreateAssetTx) error { @@ -93,7 +93,7 @@ func (s *signer) signCreateAssetTx(ctx stdcontext.Context, tx *txs.Tx, utx *txs. if err != nil { return err } - return s.sign(tx, txCreds, txSigners) + return sign(tx, txCreds, txSigners) } func (s *signer) signOperationTx(ctx stdcontext.Context, tx *txs.Tx, utx *txs.OperationTx) error { @@ -107,7 +107,7 @@ func (s *signer) signOperationTx(ctx stdcontext.Context, tx *txs.Tx, utx *txs.Op } txCreds = append(txCreds, txOpsCreds...) txSigners = append(txSigners, txOpsSigners...) - return s.sign(tx, txCreds, txSigners) + return sign(tx, txCreds, txSigners) } func (s *signer) signImportTx(ctx stdcontext.Context, tx *txs.Tx, utx *txs.ImportTx) error { @@ -121,7 +121,7 @@ func (s *signer) signImportTx(ctx stdcontext.Context, tx *txs.Tx, utx *txs.Impor } txCreds = append(txCreds, txImportCreds...) txSigners = append(txSigners, txImportSigners...) - return s.sign(tx, txCreds, txSigners) + return sign(tx, txCreds, txSigners) } func (s *signer) signExportTx(ctx stdcontext.Context, tx *txs.Tx, utx *txs.ExportTx) error { @@ -129,12 +129,12 @@ func (s *signer) signExportTx(ctx stdcontext.Context, tx *txs.Tx, utx *txs.Expor if err != nil { return err } - return s.sign(tx, txCreds, txSigners) + return sign(tx, txCreds, txSigners) } -func (s *signer) getSigners(ctx stdcontext.Context, sourceChainID ids.ID, ins []*avax.TransferableInput) ([]verify.Verifiable, [][]*crypto.PrivateKeySECP256K1R, error) { +func (s *signer) getSigners(ctx stdcontext.Context, sourceChainID ids.ID, ins []*avax.TransferableInput) ([]verify.Verifiable, [][]keychain.Signer, error) { txCreds := make([]verify.Verifiable, len(ins)) - txSigners := make([][]*crypto.PrivateKeySECP256K1R, len(ins)) + txSigners := make([][]keychain.Signer, len(ins)) for credIndex, transferInput := range ins { txCreds[credIndex] = &secp256k1fx.Credential{} input, ok := transferInput.In.(*secp256k1fx.TransferInput) @@ -142,7 +142,7 @@ func (s *signer) getSigners(ctx stdcontext.Context, sourceChainID ids.ID, ins [] return nil, nil, errUnknownInputType } - inputSigners := make([]*crypto.PrivateKeySECP256K1R, len(input.SigIndices)) + inputSigners := make([]keychain.Signer, len(input.SigIndices)) txSigners[credIndex] = inputSigners utxoID := transferInput.InputID() @@ -179,9 +179,9 @@ func (s *signer) getSigners(ctx stdcontext.Context, sourceChainID ids.ID, ins [] return txCreds, txSigners, nil } -func (s *signer) getOpsSigners(ctx stdcontext.Context, sourceChainID ids.ID, ops []*txs.Operation) ([]verify.Verifiable, [][]*crypto.PrivateKeySECP256K1R, error) { +func (s *signer) getOpsSigners(ctx stdcontext.Context, sourceChainID ids.ID, ops []*txs.Operation) ([]verify.Verifiable, [][]keychain.Signer, error) { txCreds := make([]verify.Verifiable, len(ops)) - txSigners := make([][]*crypto.PrivateKeySECP256K1R, len(ops)) + txSigners := make([][]keychain.Signer, len(ops)) for credIndex, op := range ops { var input *secp256k1fx.Input switch op := op.Op.(type) { @@ -204,7 +204,7 @@ func (s *signer) getOpsSigners(ctx stdcontext.Context, sourceChainID ids.ID, ops return nil, nil, errUnknownOpType } - inputSigners := make([]*crypto.PrivateKeySECP256K1R, len(input.SigIndices)) + inputSigners := make([]keychain.Signer, len(input.SigIndices)) txSigners[credIndex] = inputSigners if len(op.UTXOIDs) != 1 { @@ -255,19 +255,18 @@ func (s *signer) getOpsSigners(ctx stdcontext.Context, sourceChainID ids.ID, ops return txCreds, txSigners, nil } -func (s *signer) sign(tx *txs.Tx, creds []verify.Verifiable, txSigners [][]*crypto.PrivateKeySECP256K1R) error { +func sign(tx *txs.Tx, creds []verify.Verifiable, txSigners [][]keychain.Signer) error { codec := Parser.Codec() unsignedBytes, err := codec.Marshal(txs.CodecVersion, &tx.Unsigned) if err != nil { return fmt.Errorf("couldn't marshal unsigned tx: %w", err) } - unsignedHash := hashing.ComputeHash256(unsignedBytes) if expectedLen := len(txSigners); expectedLen != len(tx.Creds) { tx.Creds = make([]*fxs.FxCredential, expectedLen) } - sigCache := make(map[ids.ShortID][crypto.SECP256K1RSigLen]byte) + sigCache := make(map[ids.ShortID][secp256k1.SignatureLen]byte) for credIndex, inputSigners := range txSigners { fxCred := tx.Creds[credIndex] if fxCred == nil { @@ -293,7 +292,7 @@ func (s *signer) sign(tx *txs.Tx, creds []verify.Verifiable, txSigners [][]*cryp } if expectedLen := len(inputSigners); expectedLen != len(cred.Sigs) { - cred.Sigs = make([][crypto.SECP256K1RSigLen]byte, expectedLen) + cred.Sigs = make([][secp256k1.SignatureLen]byte, expectedLen) } for sigIndex, signer := range inputSigners { @@ -302,7 +301,7 @@ func (s *signer) sign(tx *txs.Tx, creds []verify.Verifiable, txSigners [][]*cryp // transaction. However, we can attempt to partially sign it. continue } - addr := signer.PublicKey().Address() + addr := signer.Address() if sig := cred.Sigs[sigIndex]; sig != emptySig { // If this signature has already been populated, we can just // copy the needed signature for the future. @@ -317,7 +316,7 @@ func (s *signer) sign(tx *txs.Tx, creds []verify.Verifiable, txSigners [][]*cryp continue } - sig, err := signer.SignHash(unsignedHash) + sig, err := signer.Sign(unsignedBytes) if err != nil { return fmt.Errorf("problem signing tx: %w", err) } @@ -330,6 +329,6 @@ func (s *signer) sign(tx *txs.Tx, creds []verify.Verifiable, txSigners [][]*cryp if err != nil { return fmt.Errorf("couldn't marshal tx: %w", err) } - tx.Initialize(unsignedBytes, signedBytes) + tx.SetBytes(unsignedBytes, signedBytes) return nil } diff --git a/avalanchego/wallet/chain/x/wallet.go b/avalanchego/wallet/chain/x/wallet.go index 4bbde834..8f0562a6 100644 --- a/avalanchego/wallet/chain/x/wallet.go +++ b/avalanchego/wallet/chain/x/wallet.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x @@ -19,7 +19,7 @@ import ( var ( errNotAccepted = errors.New("not accepted") - _ Wallet = &wallet{} + _ Wallet = (*wallet)(nil) ) type Wallet interface { @@ -165,9 +165,13 @@ type wallet struct { client avm.Client } -func (w *wallet) Builder() Builder { return w.builder } +func (w *wallet) Builder() Builder { + return w.builder +} -func (w *wallet) Signer() Signer { return w.signer } +func (w *wallet) Signer() Signer { + return w.signer +} func (w *wallet) IssueBaseTx( outputs []*avax.TransferableOutput, diff --git a/avalanchego/wallet/chain/x/wallet_with_options.go b/avalanchego/wallet/chain/x/wallet_with_options.go index 64be4e20..cc22540c 100644 --- a/avalanchego/wallet/chain/x/wallet_with_options.go +++ b/avalanchego/wallet/chain/x/wallet_with_options.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package x @@ -12,7 +12,7 @@ import ( "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) -var _ Wallet = &walletWithOptions{} +var _ Wallet = (*walletWithOptions)(nil) func NewWalletWithOptions( wallet Wallet, diff --git a/avalanchego/wallet/subnet/primary/api.go b/avalanchego/wallet/subnet/primary/api.go index 4ec361af..4818178f 100644 --- a/avalanchego/wallet/subnet/primary/api.go +++ b/avalanchego/wallet/subnet/primary/api.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package primary @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/rpc" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/avm" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm" @@ -26,9 +27,8 @@ const ( fetchLimit = 1024 ) -// TODO: refactor UTXOClient definition to allow the client implementations to -// -// perform their own assertions. +// TODO: Refactor UTXOClient definition to allow the client implementations to +// perform their own assertions. var ( _ UTXOClient = platformvm.Client(nil) _ UTXOClient = avm.Client(nil) @@ -46,7 +46,7 @@ type UTXOClient interface { ) ([][]byte, ids.ShortID, ids.ID, error) } -func FetchState(ctx context.Context, uri string, addrs ids.ShortSet) (p.Context, x.Context, UTXOs, error) { +func FetchState(ctx context.Context, uri string, addrs set.Set[ids.ShortID]) (p.Context, x.Context, UTXOs, error) { infoClient := info.NewClient(uri) xClient := avm.NewClient(uri, "X") diff --git a/avalanchego/wallet/subnet/primary/common/options.go b/avalanchego/wallet/subnet/primary/common/options.go index 1d2ba7c1..9cfaed98 100644 --- a/avalanchego/wallet/subnet/primary/common/options.go +++ b/avalanchego/wallet/subnet/primary/common/options.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common @@ -8,6 +8,7 @@ import ( "time" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -19,7 +20,7 @@ type Options struct { ctx context.Context customAddressesSet bool - customAddresses ids.ShortSet + customAddresses set.Set[ids.ShortID] minIssuanceTimeSet bool minIssuanceTime uint64 @@ -63,7 +64,7 @@ func (o *Options) Context() context.Context { return context.Background() } -func (o *Options) Addresses(defaultAddresses ids.ShortSet) ids.ShortSet { +func (o *Options) Addresses(defaultAddresses set.Set[ids.ShortID]) set.Set[ids.ShortID] { if o.customAddressesSet { return o.customAddresses } @@ -77,7 +78,9 @@ func (o *Options) MinIssuanceTime() uint64 { return uint64(time.Now().Unix()) } -func (o *Options) AllowStakeableLocked() bool { return o.allowStakeableLocked } +func (o *Options) AllowStakeableLocked() bool { + return o.allowStakeableLocked +} func (o *Options) ChangeOwner(defaultOwner *secp256k1fx.OutputOwners) *secp256k1fx.OutputOwners { if o.changeOwner != nil { @@ -86,9 +89,13 @@ func (o *Options) ChangeOwner(defaultOwner *secp256k1fx.OutputOwners) *secp256k1 return defaultOwner } -func (o *Options) Memo() []byte { return o.memo } +func (o *Options) Memo() []byte { + return o.memo +} -func (o *Options) AssumeDecided() bool { return o.assumeDecided } +func (o *Options) AssumeDecided() bool { + return o.assumeDecided +} func (o *Options) PollFrequency() time.Duration { if o.pollFrequencySet { @@ -103,7 +110,7 @@ func WithContext(ctx context.Context) Option { } } -func WithCustomAddresses(addrs ids.ShortSet) Option { +func WithCustomAddresses(addrs set.Set[ids.ShortID]) Option { return func(o *Options) { o.customAddressesSet = true o.customAddresses = addrs diff --git a/avalanchego/wallet/subnet/primary/common/spend.go b/avalanchego/wallet/subnet/primary/common/spend.go index 0f08ba41..d7511317 100644 --- a/avalanchego/wallet/subnet/primary/common/spend.go +++ b/avalanchego/wallet/subnet/primary/common/spend.go @@ -1,10 +1,11 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package common import ( "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -12,7 +13,7 @@ import ( // threshold. func MatchOwners( owners *secp256k1fx.OutputOwners, - addrs ids.ShortSet, + addrs set.Set[ids.ShortID], minIssuanceTime uint64, ) ([]uint32, bool) { if owners.Locktime > minIssuanceTime { diff --git a/avalanchego/wallet/subnet/primary/example_test.go b/avalanchego/wallet/subnet/primary/example_test.go index 441a0841..3d9da8ac 100644 --- a/avalanchego/wallet/subnet/primary/example_test.go +++ b/avalanchego/wallet/subnet/primary/example_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package primary @@ -16,7 +16,7 @@ import ( "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/signer" - "github.com/ava-labs/avalanchego/vms/platformvm/validator" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -133,8 +133,8 @@ func ExampleWallet() { addPermissionlessValidatorStartTime := time.Now() startTime := time.Now().Add(time.Minute) addSubnetValidatorTxID, err := pWallet.IssueAddPermissionlessValidatorTx( - &validator.SubnetValidator{ - Validator: validator.Validator{ + &txs.SubnetValidator{ + Validator: txs.Validator{ NodeID: genesis.LocalConfig.InitialStakers[0].NodeID, Start: uint64(startTime.Unix()), End: uint64(startTime.Add(5 * time.Second).Unix()), @@ -156,8 +156,8 @@ func ExampleWallet() { addPermissionlessDelegatorStartTime := time.Now() addSubnetDelegatorTxID, err := pWallet.IssueAddPermissionlessDelegatorTx( - &validator.SubnetValidator{ - Validator: validator.Validator{ + &txs.SubnetValidator{ + Validator: txs.Validator{ NodeID: genesis.LocalConfig.InitialStakers[0].NodeID, Start: uint64(startTime.Unix()), End: uint64(startTime.Add(5 * time.Second).Unix()), diff --git a/avalanchego/wallet/subnet/primary/examples/add-permissioned-subnet-validator/main.go b/avalanchego/wallet/subnet/primary/examples/add-permissioned-subnet-validator/main.go new file mode 100644 index 00000000..c90ea345 --- /dev/null +++ b/avalanchego/wallet/subnet/primary/examples/add-permissioned-subnet-validator/main.go @@ -0,0 +1,70 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "context" + "log" + "time" + + "github.com/ava-labs/avalanchego/api/info" + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" +) + +func main() { + key := genesis.EWOQKey + uri := primary.LocalAPIURI + kc := secp256k1fx.NewKeychain(key) + subnetIDStr := "29uVeLPJB1eQJkzRemU8g8wZDw5uJRqpab5U2mX9euieVwiEbL" + startTime := time.Now().Add(time.Minute) + duration := 2 * 7 * 24 * time.Hour // 2 weeks + weight := units.Schmeckle + + subnetID, err := ids.FromString(subnetIDStr) + if err != nil { + log.Fatalf("failed to parse subnet ID: %s\n", err) + } + + ctx := context.Background() + infoClient := info.NewClient(uri) + + nodeInfoStartTime := time.Now() + nodeID, _, err := infoClient.GetNodeID(ctx) + if err != nil { + log.Fatalf("failed to fetch node IDs: %s\n", err) + } + log.Printf("fetched node ID %s in %s\n", nodeID, time.Since(nodeInfoStartTime)) + + // NewWalletWithTxs fetches the available UTXOs owned by [kc] on the network + // that [uri] is hosting and registers [subnetID]. + walletSyncStartTime := time.Now() + wallet, err := primary.NewWalletWithTxs(ctx, uri, kc, subnetID) + if err != nil { + log.Fatalf("failed to initialize wallet: %s\n", err) + } + log.Printf("synced wallet in %s\n", time.Since(walletSyncStartTime)) + + // Get the P-chain wallet + pWallet := wallet.P() + + addValidatorStartTime := time.Now() + addValidatorTxID, err := pWallet.IssueAddSubnetValidatorTx(&txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(startTime.Unix()), + End: uint64(startTime.Add(duration).Unix()), + Wght: weight, + }, + Subnet: subnetID, + }) + if err != nil { + log.Fatalf("failed to issue add subnet validator transaction: %s\n", err) + } + log.Printf("added new subnet validator %s to %s with %s in %s\n", nodeID, subnetID, addValidatorTxID, time.Since(addValidatorStartTime)) +} diff --git a/avalanchego/wallet/subnet/primary/examples/add-primary-validator/main.go b/avalanchego/wallet/subnet/primary/examples/add-primary-validator/main.go new file mode 100644 index 00000000..d2b7d5d7 --- /dev/null +++ b/avalanchego/wallet/subnet/primary/examples/add-primary-validator/main.go @@ -0,0 +1,79 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "context" + "log" + "time" + + "github.com/ava-labs/avalanchego/api/info" + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" +) + +func main() { + key := genesis.EWOQKey + uri := primary.LocalAPIURI + kc := secp256k1fx.NewKeychain(key) + startTime := time.Now().Add(time.Minute) + duration := 3 * 7 * 24 * time.Hour // 3 weeks + weight := 2_000 * units.Avax + validatorRewardAddr := key.Address() + delegatorRewardAddr := key.Address() + delegationFee := uint32(reward.PercentDenominator / 2) // 50% + + ctx := context.Background() + infoClient := info.NewClient(uri) + + nodeInfoStartTime := time.Now() + nodeID, nodePOP, err := infoClient.GetNodeID(ctx) + if err != nil { + log.Fatalf("failed to fetch node IDs: %s\n", err) + } + log.Printf("fetched node ID %s in %s\n", nodeID, time.Since(nodeInfoStartTime)) + + // NewWalletFromURI fetches the available UTXOs owned by [kc] on the network + // that [uri] is hosting. + walletSyncStartTime := time.Now() + wallet, err := primary.NewWalletFromURI(ctx, uri, kc) + if err != nil { + log.Fatalf("failed to initialize wallet: %s\n", err) + } + log.Printf("synced wallet in %s\n", time.Since(walletSyncStartTime)) + + // Get the P-chain wallet + pWallet := wallet.P() + avaxAssetID := pWallet.AVAXAssetID() + + addValidatorStartTime := time.Now() + addValidatorTxID, err := pWallet.IssueAddPermissionlessValidatorTx( + &txs.SubnetValidator{Validator: txs.Validator{ + NodeID: nodeID, + Start: uint64(startTime.Unix()), + End: uint64(startTime.Add(duration).Unix()), + Wght: weight, + }}, + nodePOP, + avaxAssetID, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{validatorRewardAddr}, + }, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{delegatorRewardAddr}, + }, + delegationFee, + ) + if err != nil { + log.Fatalf("failed to issue add permissionless validator transaction: %s\n", err) + } + log.Printf("added new primary network validator %s with %s in %s\n", nodeID, addValidatorTxID, time.Since(addValidatorStartTime)) +} diff --git a/avalanchego/wallet/subnet/primary/examples/create-asset/main.go b/avalanchego/wallet/subnet/primary/examples/create-asset/main.go new file mode 100644 index 00000000..d1eda231 --- /dev/null +++ b/avalanchego/wallet/subnet/primary/examples/create-asset/main.go @@ -0,0 +1,65 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "context" + "log" + "time" + + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" +) + +func main() { + key := genesis.EWOQKey + uri := primary.LocalAPIURI + kc := secp256k1fx.NewKeychain(key) + subnetOwner := key.Address() + + ctx := context.Background() + + // NewWalletFromURI fetches the available UTXOs owned by [kc] on the network + // that [uri] is hosting. + walletSyncStartTime := time.Now() + wallet, err := primary.NewWalletFromURI(ctx, uri, kc) + if err != nil { + log.Fatalf("failed to initialize wallet: %s\n", err) + } + log.Printf("synced wallet in %s\n", time.Since(walletSyncStartTime)) + + // Get the X-chain wallet + xWallet := wallet.X() + + // Pull out useful constants to use when issuing transactions. + owner := &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + subnetOwner, + }, + } + + createAssetStartTime := time.Now() + createAssetTxID, err := xWallet.IssueCreateAssetTx( + "HI", + "HI", + 1, + map[uint32][]verify.State{ + 0: { + &secp256k1fx.TransferOutput{ + Amt: units.Schmeckle, + OutputOwners: *owner, + }, + }, + }, + ) + if err != nil { + log.Fatalf("failed to issue create asset transaction: %s\n", err) + } + log.Printf("created new asset %s in %s\n", createAssetTxID, time.Since(createAssetStartTime)) +} diff --git a/avalanchego/wallet/subnet/primary/examples/create-chain/main.go b/avalanchego/wallet/subnet/primary/examples/create-chain/main.go new file mode 100644 index 00000000..1b4f02b0 --- /dev/null +++ b/avalanchego/wallet/subnet/primary/examples/create-chain/main.go @@ -0,0 +1,63 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "context" + "encoding/hex" + "log" + "time" + + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" +) + +func main() { + key := genesis.EWOQKey + uri := primary.LocalAPIURI + kc := secp256k1fx.NewKeychain(key) + subnetIDStr := "29uVeLPJB1eQJkzRemU8g8wZDw5uJRqpab5U2mX9euieVwiEbL" + genesisHex := "00000000000000000000000000017b5490493f8a2fff444ac8b54e27b3339d7c60dcffffffffffffffff" + vmID := ids.ID{'x', 's', 'v', 'm'} + name := "let there" + + subnetID, err := ids.FromString(subnetIDStr) + if err != nil { + log.Fatalf("failed to parse subnet ID: %s\n", err) + } + + genesisBytes, err := hex.DecodeString(genesisHex) + if err != nil { + log.Fatalf("failed to parse genesis bytes: %s\n", err) + } + + ctx := context.Background() + + // NewWalletWithTxs fetches the available UTXOs owned by [kc] on the network + // that [uri] is hosting and registers [subnetID]. + walletSyncStartTime := time.Now() + wallet, err := primary.NewWalletWithTxs(ctx, uri, kc, subnetID) + if err != nil { + log.Fatalf("failed to initialize wallet: %s\n", err) + } + log.Printf("synced wallet in %s\n", time.Since(walletSyncStartTime)) + + // Get the P-chain wallet + pWallet := wallet.P() + + createChainStartTime := time.Now() + createChainTxID, err := pWallet.IssueCreateChainTx( + subnetID, + genesisBytes, + vmID, + nil, + name, + ) + if err != nil { + log.Fatalf("failed to issue create chain transaction: %s\n", err) + } + log.Printf("created new chain %s in %s\n", createChainTxID, time.Since(createChainStartTime)) +} diff --git a/avalanchego/wallet/subnet/primary/examples/create-locked-stakeable/main.go b/avalanchego/wallet/subnet/primary/examples/create-locked-stakeable/main.go new file mode 100644 index 00000000..54d0eefb --- /dev/null +++ b/avalanchego/wallet/subnet/primary/examples/create-locked-stakeable/main.go @@ -0,0 +1,73 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "context" + "log" + "time" + + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/formatting/address" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/vms/components/avax" + "github.com/ava-labs/avalanchego/vms/platformvm/stakeable" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" +) + +func main() { + key := genesis.EWOQKey + uri := primary.LocalAPIURI + kc := secp256k1fx.NewKeychain(key) + amount := 500 * units.MilliAvax + locktime := uint64(time.Date(2030, 1, 1, 0, 0, 0, 0, time.UTC).Unix()) + destAddrStr := "P-local18jma8ppw3nhx5r4ap8clazz0dps7rv5u00z96u" + + destAddr, err := address.ParseToID(destAddrStr) + if err != nil { + log.Fatalf("failed to parse address: %s\n", err) + } + + ctx := context.Background() + + // NewWalletFromURI fetches the available UTXOs owned by [kc] on the network + // that [uri] is hosting. + walletSyncStartTime := time.Now() + wallet, err := primary.NewWalletFromURI(ctx, uri, kc) + if err != nil { + log.Fatalf("failed to initialize wallet: %s\n", err) + } + log.Printf("synced wallet in %s\n", time.Since(walletSyncStartTime)) + + // Get the P-chain wallet + pWallet := wallet.P() + avaxAssetID := pWallet.AVAXAssetID() + + issueTxStartTime := time.Now() + txID, err := pWallet.IssueBaseTx([]*avax.TransferableOutput{ + { + Asset: avax.Asset{ + ID: avaxAssetID, + }, + Out: &stakeable.LockOut{ + Locktime: locktime, + TransferableOut: &secp256k1fx.TransferOutput{ + Amt: amount, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + destAddr, + }, + }, + }, + }, + }, + }) + if err != nil { + log.Fatalf("failed to issue transaction: %s\n", err) + } + log.Printf("issued %s in %s\n", txID, time.Since(issueTxStartTime)) +} diff --git a/avalanchego/wallet/subnet/primary/examples/create-subnet/main.go b/avalanchego/wallet/subnet/primary/examples/create-subnet/main.go new file mode 100644 index 00000000..315712b3 --- /dev/null +++ b/avalanchego/wallet/subnet/primary/examples/create-subnet/main.go @@ -0,0 +1,51 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "context" + "log" + "time" + + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" +) + +func main() { + key := genesis.EWOQKey + uri := primary.LocalAPIURI + kc := secp256k1fx.NewKeychain(key) + subnetOwner := key.Address() + + ctx := context.Background() + + // NewWalletFromURI fetches the available UTXOs owned by [kc] on the network + // that [uri] is hosting. + walletSyncStartTime := time.Now() + wallet, err := primary.NewWalletFromURI(ctx, uri, kc) + if err != nil { + log.Fatalf("failed to initialize wallet: %s\n", err) + } + log.Printf("synced wallet in %s\n", time.Since(walletSyncStartTime)) + + // Get the P-chain wallet + pWallet := wallet.P() + + // Pull out useful constants to use when issuing transactions. + owner := &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + subnetOwner, + }, + } + + createSubnetStartTime := time.Now() + createSubnetTxID, err := pWallet.IssueCreateSubnetTx(owner) + if err != nil { + log.Fatalf("failed to issue create subnet transaction: %s\n", err) + } + log.Printf("created new subnet %s in %s\n", createSubnetTxID, time.Since(createSubnetStartTime)) +} diff --git a/avalanchego/wallet/subnet/primary/examples/remove-subnet-validator/main.go b/avalanchego/wallet/subnet/primary/examples/remove-subnet-validator/main.go new file mode 100644 index 00000000..05c54d27 --- /dev/null +++ b/avalanchego/wallet/subnet/primary/examples/remove-subnet-validator/main.go @@ -0,0 +1,57 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "context" + "log" + "time" + + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/wallet/subnet/primary" +) + +func main() { + key := genesis.EWOQKey + uri := primary.LocalAPIURI + kc := secp256k1fx.NewKeychain(key) + subnetIDStr := "29uVeLPJB1eQJkzRemU8g8wZDw5uJRqpab5U2mX9euieVwiEbL" + nodeIDStr := "NodeID-7Xhw2mDxuDS44j42TCB6U5579esbSt3Lg" + + subnetID, err := ids.FromString(subnetIDStr) + if err != nil { + log.Fatalf("failed to parse subnet ID: %s\n", err) + } + + nodeID, err := ids.NodeIDFromString(nodeIDStr) + if err != nil { + log.Fatalf("failed to parse node ID: %s\n", err) + } + + ctx := context.Background() + + // NewWalletWithTxs fetches the available UTXOs owned by [kc] on the network + // that [uri] is hosting and registers [subnetID]. + walletSyncStartTime := time.Now() + wallet, err := primary.NewWalletWithTxs(ctx, uri, kc, subnetID) + if err != nil { + log.Fatalf("failed to initialize wallet: %s\n", err) + } + log.Printf("synced wallet in %s\n", time.Since(walletSyncStartTime)) + + // Get the P-chain wallet + pWallet := wallet.P() + + removeValidatorStartTime := time.Now() + removeValidatorTxID, err := pWallet.IssueRemoveSubnetValidatorTx( + nodeID, + subnetID, + ) + if err != nil { + log.Fatalf("failed to issue remove subnet validator transaction: %s\n", err) + } + log.Printf("removed subnet validator %s from %s with %s in %s\n", nodeID, subnetID, removeValidatorTxID, time.Since(removeValidatorStartTime)) +} diff --git a/avalanchego/wallet/subnet/primary/utxos.go b/avalanchego/wallet/subnet/primary/utxos.go index 365aae95..d0bbd2be 100644 --- a/avalanchego/wallet/subnet/primary/utxos.go +++ b/avalanchego/wallet/subnet/primary/utxos.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package primary @@ -7,6 +7,8 @@ import ( "context" "sync" + "golang.org/x/exp/maps" + "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -15,8 +17,8 @@ import ( ) var ( - _ UTXOs = &utxos{} - _ ChainUTXOs = &chainUTXOs{} + _ UTXOs = (*utxos)(nil) + _ ChainUTXOs = (*chainUTXOs)(nil) // TODO: refactor ChainUTXOs definition to allow the client implementations // to perform their own assertions. @@ -110,11 +112,7 @@ func (u *utxos) UTXOs(_ context.Context, sourceChainID, destinationChainID ids.I destToUTXOIDToUTXO := u.sourceToDestToUTXOIDToUTXO[sourceChainID] utxoIDToUTXO := destToUTXOIDToUTXO[destinationChainID] - utxos := make([]*avax.UTXO, 0, len(utxoIDToUTXO)) - for _, utxo := range utxoIDToUTXO { - utxos = append(utxos, utxo) - } - return utxos, nil + return maps.Values(utxoIDToUTXO), nil } func (u *utxos) GetUTXO(_ context.Context, sourceChainID, destinationChainID, utxoID ids.ID) (*avax.UTXO, error) { diff --git a/avalanchego/wallet/subnet/primary/wallet.go b/avalanchego/wallet/subnet/primary/wallet.go index 323197c5..ce3bc3e4 100644 --- a/avalanchego/wallet/subnet/primary/wallet.go +++ b/avalanchego/wallet/subnet/primary/wallet.go @@ -1,4 +1,4 @@ -// Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved. +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. package primary @@ -8,16 +8,16 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/keychain" "github.com/ava-labs/avalanchego/vms/avm" "github.com/ava-labs/avalanchego/vms/platformvm" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/chain/p" "github.com/ava-labs/avalanchego/wallet/chain/x" "github.com/ava-labs/avalanchego/wallet/subnet/primary/common" ) -var _ Wallet = &wallet{} +var _ Wallet = (*wallet)(nil) // Wallet provides chain wallets for the primary network. type Wallet interface { @@ -30,8 +30,13 @@ type wallet struct { x x.Wallet } -func (w *wallet) P() p.Wallet { return w.p } -func (w *wallet) X() x.Wallet { return w.x } +func (w *wallet) P() p.Wallet { + return w.p +} + +func (w *wallet) X() x.Wallet { + return w.x +} // NewWalletFromURI returns a wallet that supports issuing transactions to the // chains living in the primary network to a provided [uri]. @@ -42,8 +47,8 @@ func (w *wallet) X() x.Wallet { return w.x } // the UTXOs may become out of sync. // // The wallet manages all UTXOs locally, and performs all tx signing locally. -func NewWalletFromURI(ctx context.Context, uri string, kc *secp256k1fx.Keychain) (Wallet, error) { - pCTX, xCTX, utxos, err := FetchState(ctx, uri, kc.Addrs) +func NewWalletFromURI(ctx context.Context, uri string, kc keychain.Keychain) (Wallet, error) { + pCTX, xCTX, utxos, err := FetchState(ctx, uri, kc.Addresses()) if err != nil { return nil, err } @@ -51,8 +56,8 @@ func NewWalletFromURI(ctx context.Context, uri string, kc *secp256k1fx.Keychain) } // Creates a wallet with pre-loaded/cached P-chain transactions. -func NewWalletWithTxs(ctx context.Context, uri string, kc *secp256k1fx.Keychain, preloadTXs ...ids.ID) (Wallet, error) { - pCTX, xCTX, utxos, err := FetchState(ctx, uri, kc.Addrs) +func NewWalletWithTxs(ctx context.Context, uri string, kc keychain.Keychain, preloadTXs ...ids.ID) (Wallet, error) { + pCTX, xCTX, utxos, err := FetchState(ctx, uri, kc.Addresses()) if err != nil { return nil, err } @@ -78,19 +83,20 @@ func NewWalletWithTxsAndState( pCTX p.Context, xCTX x.Context, utxos UTXOs, - kc *secp256k1fx.Keychain, + kc keychain.Keychain, pTXs map[ids.ID]*txs.Tx, ) Wallet { + addrs := kc.Addresses() pUTXOs := NewChainUTXOs(constants.PlatformChainID, utxos) pBackend := p.NewBackend(pCTX, pUTXOs, pTXs) - pBuilder := p.NewBuilder(kc.Addrs, pBackend) + pBuilder := p.NewBuilder(addrs, pBackend) pSigner := p.NewSigner(kc, pBackend) pClient := platformvm.NewClient(uri) xChainID := xCTX.BlockchainID() xUTXOs := NewChainUTXOs(xChainID, utxos) xBackend := x.NewBackend(xCTX, xChainID, xUTXOs) - xBuilder := x.NewBuilder(kc.Addrs, xBackend) + xBuilder := x.NewBuilder(addrs, xBackend) xSigner := x.NewSigner(kc, xBackend) xClient := avm.NewClient(uri, "X") @@ -106,7 +112,7 @@ func NewWalletWithState( pCTX p.Context, xCTX x.Context, utxos UTXOs, - kc *secp256k1fx.Keychain, + kc keychain.Keychain, ) Wallet { pTXs := make(map[ids.ID]*txs.Tx) return NewWalletWithTxsAndState(uri, pCTX, xCTX, utxos, kc, pTXs) diff --git a/avalanchego/x/README.md b/avalanchego/x/README.md new file mode 100644 index 00000000..92d5d465 --- /dev/null +++ b/avalanchego/x/README.md @@ -0,0 +1,3 @@ +# `x` Package + +This package contains experimental code that may be moved to other packages in the future. Code in this package is not stable and may be moved, removed or modified at any time. This code should not be relied on for correctness in important applications. \ No newline at end of file diff --git a/avalanchego/x/merkledb/README.md b/avalanchego/x/merkledb/README.md new file mode 100644 index 00000000..0a964a2c --- /dev/null +++ b/avalanchego/x/merkledb/README.md @@ -0,0 +1,71 @@ +# Path Based Merkelized Radix Trie + +## TODOs + +- [ ] Remove special casing around the root node from the physical structure of the hashed tree. +- [ ] Analyze performance impact of needing to skip intermediate nodes when generating range and change proofs + - [ ] Consider moving nodes with values to a separate db prefix +- [ ] Analyze performance of using database snapshots rather than in-memory history +- [ ] Improve intermediate node regeneration after ungraceful shutdown by reusing successfully written subtrees + +## Introduction + +The Merkle Trie is a data structure that allows efficient and secure verification of the contents. It is a combination of a [Merkle Tree](https://en.wikipedia.org/wiki/Merkle_tree) and a [Radix Trie](https://en.wikipedia.org/wiki/Radix_tree). + +The trie contains `Merkle Nodes`, which store key/value and children information. + +Each `Merkle Node` represents a key path into the trie. It stores the key, the value (if one exists), its ID, and the IDs of its children nodes. The children have keys that contain the current node's key path as a prefix, and the index of each child indicates the next nibble in that child's key. For example, if we have two nodes, Node 1 with key path `0x91A` and Node 2 with key path `0x91A4`, Node 2 is stored in index `0x4` of Node 1's children (since 0x4 is the first value after the common prefix). + +To reduce the depth of nodes in the trie, a `Merkle Node` utilizes path compression. Instead of having a long chain of nodes each containing only a single nibble of the key, we can "compress" the path by recording additional key information with each of a node's children. For example, if we have three nodes, Node 1 with key path `0x91A`, Node 2 with key path `0x91A4`, and Node 3 with key path `0x91A5132`, then Node 1 has a key of `0x91A`. Node 2 is stored at index `0x4` of Node 1's children since `4` is the next nibble in Node 2's key after skipping the common nibbles from Node 1's key. Node 3 is stored at index `0x5` of Node 1's children. Rather than have extra nodes for the remainder of Node 3's key, we instead store the rest of the key (`132`) in Node 1's children info. + +``` ++-----------------------------------+ +| Merkle Node | +| | +| ID: 0x0131 | an id representing the current node, derived from the node's value and all children ids +| Key: 0x91 | prefix of the key path, representing the location of the node in the trie +| Value: 0x00 | the value, if one exists, that is stored at the key path (pathPrefix + compressedPath) +| Children: | a map of children node ids for any nodes in the trie that have this node's key path as a prefix +| 0: [:0x00542F] | child 0 represents a node with key 0x910 with ID 0x00542F +| 1: [0x432:0xA0561C] | child 1 represents a node with key 0x911432 with ID 0xA0561C +| ... | +| 15: [0x9A67B:0x02FB093] | child 15 represents a node with key 0x91F9A67B with ID 0x02FB093 ++-----------------------------------+ +``` + +## Design choices + +### []byte copying +Nodes contain a []byte which represents its value. This slice should never be edited internally. This allows usage without having to make copies of it for safety. +Anytime these values leave the library, for example in `Get`, `GetValue`, `GetProof`, `GetRangeProof`, etc, they need to be copied into a new slice to prevent +edits made outside of the library from being reflected in the DB/TrieViews. + +### Single node type + +A `Merkle Node` holds the IDs of its children, its value, as well as any path extension. This simplifies some logic and allows all of the data about a node to be loaded in a single database read. This trades off a small amount of storage efficiency (some fields may be `nil` but are still stored for every node). + +### Validity + +A `trieView` is built atop another trie, and that trie could change at any point. If it does, all descendants of the trie will be marked invalid before the edit of the trie occurs. If an operation is performed on an invalid trie, an ErrInvalid error will be returned instead of the expected result. When a view is committed, all of its sibling views (the views that share the same parent) are marked invalid and any child views of the view have their parent updated to exclude any committed views between them and the db. + +### Locking + +`Database` has a `RWMutex` named `lock`. Its read operations don't store data in a map, so a read lock suffices for read operations. +`Database` has a `Mutex` named `commitLock`. It enforces that only a single view/batch is attempting to commit to the database at one time. `lock` is insufficient because there is a period of view preparation where read access should still be allowed, followed by a period where a full write lock is needed. The `commitLock` ensures that only a single goroutine makes the transition from read->write. + +A `trieView` is built atop another trie, which may be the underlying `Database` or another `trieView`. +It's important to guarantee atomicity/consistency of trie operations. +That is, if a view method is executing, the views/database underneath the view shouldn't be changing. +To prevent this, we need to use locking. + +`trieView` has a `RWMutex` named `lock` that's held when methods that access the trie's structure are executing. It is responsible for ensuring that writing/reading from a `trieView` or from any *ancestor* is safe. +It also has a `RWMutex` named `validityTrackingLock` that is held during methods that change the view's validity, tracking of child views' validity, or of the `trieView` parent trie. This lock ensures that writing/reading from `trieView` or any of its *descendants* is safe. +The `Commit` function also grabs the `Database`'s `commitLock` lock. This is the only `trieView` method that modifies the underlying `Database`. If an ancestor is modified during this time, the commit will error with ErrInvalid. + +In some of `Database`'s methods, we create a `trieView` and call unexported methods on it without locking it. +We do so because the exported counterpart of the method read locks the `Database`, which is already locked. +This pattern is safe because the `Database` is locked, so no data under the view is changing, and nobody else has a reference to the view, so there can't be any concurrent access. + +To prevent deadlocks, `trieView` and `Database` never acquire the `lock` of any descendant views that are built atop it. +That is, locking is always done from a view down to the underlying `Database`, never the other way around. +The `validityTrackingLock` goes the opposite way. Views can validityTrackingLock their children, but not their ancestors. Because of this, any function that takes the `validityTrackingLock` should avoid taking the `lock` as this will likely trigger a deadlock. Keeping `lock` solely in the ancestor direction and `validityTrackingLock` solely in the descendant direction prevents deadlocks from occurring. diff --git a/avalanchego/x/merkledb/batch.go b/avalanchego/x/merkledb/batch.go new file mode 100644 index 00000000..353c0839 --- /dev/null +++ b/avalanchego/x/merkledb/batch.go @@ -0,0 +1,27 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "github.com/ava-labs/avalanchego/database" +) + +var _ database.Batch = &batch{} + +// batch is a write-only database that commits changes to its host database +// when Write is called. +type batch struct { + database.BatchOps + + db *Database +} + +// apply all operations in order to the database and write the result to disk +func (b *batch) Write() error { + return b.db.commitBatch(b.Ops) +} + +func (b *batch) Inner() database.Batch { + return b +} diff --git a/avalanchego/x/merkledb/cache.go b/avalanchego/x/merkledb/cache.go new file mode 100644 index 00000000..9aecae74 --- /dev/null +++ b/avalanchego/x/merkledb/cache.go @@ -0,0 +1,80 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "sync" + + "github.com/ava-labs/avalanchego/utils/linkedhashmap" + "github.com/ava-labs/avalanchego/utils/wrappers" +) + +// A cache that calls [onEviction] on the evicted element. +type onEvictCache[K comparable, V any] struct { + lock sync.Mutex + maxSize int + // LRU --> MRU from left to right. + lru linkedhashmap.LinkedHashmap[K, V] + onEviction func(V) error +} + +func newOnEvictCache[K comparable, V any](maxSize int, onEviction func(V) error) onEvictCache[K, V] { + return onEvictCache[K, V]{ + maxSize: maxSize, + lru: linkedhashmap.New[K, V](), + onEviction: onEviction, + } +} + +// Get an element from this cache. +func (c *onEvictCache[K, V]) Get(key K) (V, bool) { + c.lock.Lock() + defer c.lock.Unlock() + + val, ok := c.lru.Get(key) + if ok { + // This key was touched; move it to the MRU position. + c.lru.Put(key, val) + } + return val, ok +} + +// Put an element into this cache. If this causes an element +// to be evicted, calls [c.onEviction] on the evicted element +// and returns the error from [c.onEviction]. Otherwise returns nil. +func (c *onEvictCache[K, V]) Put(key K, value V) error { + c.lock.Lock() + defer c.lock.Unlock() + + c.lru.Put(key, value) // Mark as MRU + + if c.lru.Len() > c.maxSize { + // Note that [c.cache] has already evicted the oldest + // element because its max size is [c.maxSize]. + oldestKey, oldsetVal, _ := c.lru.Oldest() + c.lru.Delete(oldestKey) + return c.onEviction(oldsetVal) + } + return nil +} + +// Removes all elements from the cache. +// Returns the last non-nil error during [c.onEviction], if any. +// If [c.onEviction] errors, it will still be called for any +// subsequent elements and the cache will still be emptied. +func (c *onEvictCache[K, V]) Flush() error { + c.lock.Lock() + defer func() { + c.lru = linkedhashmap.New[K, V]() + c.lock.Unlock() + }() + + var errs wrappers.Errs + iter := c.lru.NewIterator() + for iter.Next() { + val := iter.Value() + errs.Add(c.onEviction(val)) + } + return errs.Err +} diff --git a/avalanchego/x/merkledb/cache_test.go b/avalanchego/x/merkledb/cache_test.go new file mode 100644 index 00000000..a841aef7 --- /dev/null +++ b/avalanchego/x/merkledb/cache_test.go @@ -0,0 +1,213 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/require" +) + +var errTest = errors.New("test error") + +func TestNewOnEvictCache(t *testing.T) { + require := require.New(t) + + called := false + onEviction := func(int) error { + called = true + return nil + } + maxSize := 10 + + cache := newOnEvictCache[int](maxSize, onEviction) + require.Equal(maxSize, cache.maxSize) + require.NotNil(cache.lru) + require.Equal(0, cache.lru.Len()) + // Can't test function equality directly so do this + // to make sure it was assigned correctly + err := cache.onEviction(0) + require.NoError(err) + require.True(called) +} + +// Test the functionality of the cache when the onEviction function +// never returns an error. +// Note this test assumes the internal cache is an LRU cache. +func TestOnEvictCacheNoOnEvictionError(t *testing.T) { + require := require.New(t) + + evicted := []int{} + onEviction := func(n int) error { + evicted = append(evicted, n) + return nil + } + maxSize := 3 + + cache := newOnEvictCache[int](maxSize, onEviction) + + // Get non-existent key + _, ok := cache.Get(0) + require.False(ok) + + // Put key + err := cache.Put(0, 0) + require.NoError(err) + require.Equal(1, cache.lru.Len()) + + // Get key + val, ok := cache.Get(0) + require.True(ok) + require.Equal(0, val) + + // Get non-existent key + _, ok = cache.Get(1) + require.False(ok) + + // Fill the cache + for i := 1; i < maxSize; i++ { + err := cache.Put(i, i) + require.NoError(err) + require.Equal(i+1, cache.lru.Len()) + } + require.Len(evicted, 0) + + // Cache has [0,1,2] from LRU --> MRU + + // Put another key. This should evict the LRU key (0). + err = cache.Put(maxSize, maxSize) + require.NoError(err) + require.Equal(maxSize, cache.lru.Len()) + require.Len(evicted, 1) + require.Equal(0, evicted[0]) + + // Cache has [1,2,3] from LRU --> MRU + iter := cache.lru.NewIterator() + require.True(iter.Next()) + require.Equal(1, iter.Key()) + require.Equal(1, iter.Value()) + require.True(iter.Next()) + require.Equal(2, iter.Key()) + require.Equal(2, iter.Value()) + require.True(iter.Next()) + require.Equal(3, iter.Key()) + require.Equal(3, iter.Value()) + require.False(iter.Next()) + + // 0 should no longer be in the cache + _, ok = cache.Get(0) + require.False(ok) + + // Other keys should still be in the cache + for i := maxSize; i >= 1; i-- { + val, ok := cache.Get(i) + require.True(ok) + require.Equal(i, val) + } + + // Cache has [3,2,1] from LRU --> MRU + iter = cache.lru.NewIterator() + require.True(iter.Next()) + require.Equal(3, iter.Key()) + require.Equal(3, iter.Value()) + require.True(iter.Next()) + require.Equal(2, iter.Key()) + require.Equal(2, iter.Value()) + require.True(iter.Next()) + require.Equal(1, iter.Key()) + require.Equal(1, iter.Value()) + require.False(iter.Next()) + + // Put another key to evict the LRU key (3). + err = cache.Put(maxSize+1, maxSize+1) + require.NoError(err) + require.Equal(maxSize, cache.lru.Len()) + require.Len(evicted, 2) + require.Equal(3, evicted[1]) + + // Cache has [2,1,4] from LRU --> MRU + iter = cache.lru.NewIterator() + require.True(iter.Next()) + require.Equal(2, iter.Key()) + require.Equal(2, iter.Value()) + require.True(iter.Next()) + require.Equal(1, iter.Key()) + require.Equal(1, iter.Value()) + require.True(iter.Next()) + require.Equal(4, iter.Key()) + require.Equal(4, iter.Value()) + require.False(iter.Next()) + + // 3 should no longer be in the cache + _, ok = cache.Get(3) + require.False(ok) + + err = cache.Flush() + require.NoError(err) + + // Cache should be empty + require.Equal(0, cache.lru.Len()) + require.Len(evicted, 5) + require.Equal(evicted, []int{0, 3, 2, 1, 4}) + require.Equal(0, cache.lru.Len()) + require.Equal(maxSize, cache.maxSize) // Should be unchanged +} + +// Test the functionality of the cache when the onEviction function +// returns an error. +// Note this test assumes the internal cache is an LRU cache. +func TestOnEvictCacheOnEvictionError(t *testing.T) { + var ( + require = require.New(t) + evicted = []int{} + onEviction = func(n int) error { + // Evicting even keys errors + evicted = append(evicted, n) + if n%2 == 0 { + return errTest + } + return nil + } + maxSize = 2 + ) + + cache := newOnEvictCache[int](maxSize, onEviction) + + // Fill the cache + for i := 0; i < maxSize; i++ { + err := cache.Put(i, i) + require.NoError(err) + require.Equal(i+1, cache.lru.Len()) + } + + // Put another key. This should evict the LRU key (0) + // and return an error since 0 is even. + err := cache.Put(maxSize, maxSize) + require.ErrorIs(err, errTest) + + // Cache should still have correct state [1,2] + require.Equal(evicted, []int{0}) + require.Equal(maxSize, cache.lru.Len()) + _, ok := cache.Get(0) + require.False(ok) + _, ok = cache.Get(1) + require.True(ok) + _, ok = cache.Get(2) + require.True(ok) + + // Flush the cache. Should error on last element (2). + err = cache.Flush() + require.ErrorIs(err, errTest) + + // Should still be empty. + require.Equal(0, cache.lru.Len()) + require.Equal(evicted, []int{0, 1, 2}) + _, ok = cache.Get(0) + require.False(ok) + _, ok = cache.Get(1) + require.False(ok) + _, ok = cache.Get(2) + require.False(ok) +} diff --git a/avalanchego/x/merkledb/codec.go b/avalanchego/x/merkledb/codec.go new file mode 100644 index 00000000..7baa3715 --- /dev/null +++ b/avalanchego/x/merkledb/codec.go @@ -0,0 +1,858 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "sync" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/hashing" +) + +const ( + codecVersion = 0 + trueByte = 1 + falseByte = 0 + minVarIntLen = 1 + boolLen = 1 + idLen = hashing.HashLen + minCodecVersionLen = minVarIntLen + minSerializedPathLen = minVarIntLen + minByteSliceLen = minVarIntLen + minDeletedKeyLen = minByteSliceLen + minMaybeByteSliceLen = boolLen + minProofPathLen = minVarIntLen + minKeyValueLen = 2 * minByteSliceLen + minProofNodeLen = minSerializedPathLen + minMaybeByteSliceLen + minVarIntLen + minProofLen = minCodecVersionLen + minProofPathLen + minByteSliceLen + minChangeProofLen = minCodecVersionLen + +boolLen + 2*minProofPathLen + 2*minVarIntLen + minRangeProofLen = minCodecVersionLen + +2*minProofPathLen + minVarIntLen + minDBNodeLen = minCodecVersionLen + minMaybeByteSliceLen + minVarIntLen + minHashValuesLen = minCodecVersionLen + minVarIntLen + minMaybeByteSliceLen + minSerializedPathLen + minProofNodeChildLen = minVarIntLen + idLen + minChildLen = minVarIntLen + minSerializedPathLen + idLen +) + +var ( + _ EncoderDecoder = (*codecImpl)(nil) + + trueBytes = []byte{trueByte} + falseBytes = []byte{falseByte} + + errUnknownVersion = errors.New("unknown codec version") + errEncodeNil = errors.New("can't encode nil pointer or interface") + errDecodeNil = errors.New("can't decode nil") + errNegativeProofPathNodes = errors.New("negative proof path length") + errNegativeNumChildren = errors.New("number of children is negative") + errTooManyChildren = fmt.Errorf("length of children list is larger than branching factor of %d", NodeBranchFactor) + errChildIndexTooLarge = fmt.Errorf("invalid child index. Must be less than branching factor of %d", NodeBranchFactor) + errNegativeNibbleLength = errors.New("nibble length is negative") + errNegativeNumKeyValues = errors.New("negative number of key values") + errIntTooLarge = errors.New("integer too large to be decoded") + errLeadingZeroes = errors.New("varint has leading zeroes") + errInvalidBool = errors.New("decoded bool is neither true nor false") + errNonZeroNibblePadding = errors.New("nibbles should be padded with 0s") + errExtraSpace = errors.New("trailing buffer space") + errNegativeSliceLength = errors.New("negative slice length") + errInvalidCodecVersion = errors.New("invalid codec version") +) + +// EncoderDecoder defines the interface needed by merkleDB to marshal +// and unmarshal relevant types. +type EncoderDecoder interface { + Encoder + Decoder +} + +type Encoder interface { + EncodeProof(version uint16, p *Proof) ([]byte, error) + EncodeChangeProof(version uint16, p *ChangeProof) ([]byte, error) + EncodeRangeProof(version uint16, p *RangeProof) ([]byte, error) + + encodeDBNode(version uint16, n *dbNode) ([]byte, error) + encodeHashValues(version uint16, hv *hashValues) ([]byte, error) +} + +type Decoder interface { + DecodeProof(bytes []byte, p *Proof) (uint16, error) + DecodeChangeProof(bytes []byte, p *ChangeProof) (uint16, error) + DecodeRangeProof(bytes []byte, p *RangeProof) (uint16, error) + + decodeDBNode(bytes []byte, n *dbNode) (uint16, error) +} + +func newCodec() (EncoderDecoder, uint16) { + return &codecImpl{ + varIntPool: sync.Pool{ + New: func() interface{} { + return make([]byte, binary.MaxVarintLen64) + }, + }, + }, codecVersion +} + +type codecImpl struct { + varIntPool sync.Pool +} + +func (c *codecImpl) EncodeProof(version uint16, proof *Proof) ([]byte, error) { + if proof == nil { + return nil, errEncodeNil + } + + if version != codecVersion { + return nil, fmt.Errorf("%w: %d", errUnknownVersion, version) + } + + buf := &bytes.Buffer{} + if err := c.encodeInt(buf, int(version)); err != nil { + return nil, err + } + if err := c.encodeProofPath(buf, proof.Path); err != nil { + return nil, err + } + if err := c.encodeByteSlice(buf, proof.Key); err != nil { + return nil, err + } + if err := c.encodeMaybeByteSlice(buf, proof.Value); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (c *codecImpl) EncodeChangeProof(version uint16, proof *ChangeProof) ([]byte, error) { + if proof == nil { + return nil, errEncodeNil + } + + if version != codecVersion { + return nil, fmt.Errorf("%w: %d", errUnknownVersion, version) + } + + buf := &bytes.Buffer{} + + if err := c.encodeInt(buf, int(version)); err != nil { + return nil, err + } + if err := c.encodeBool(buf, proof.HadRootsInHistory); err != nil { + return nil, err + } + if err := c.encodeProofPath(buf, proof.StartProof); err != nil { + return nil, err + } + if err := c.encodeProofPath(buf, proof.EndProof); err != nil { + return nil, err + } + if err := c.encodeInt(buf, len(proof.KeyValues)); err != nil { + return nil, err + } + for _, kv := range proof.KeyValues { + if err := c.encodeKeyValue(kv, buf); err != nil { + return nil, err + } + } + + if err := c.encodeInt(buf, len(proof.DeletedKeys)); err != nil { + return nil, err + } + for _, key := range proof.DeletedKeys { + if err := c.encodeByteSlice(buf, key); err != nil { + return nil, err + } + } + return buf.Bytes(), nil +} + +func (c *codecImpl) EncodeRangeProof(version uint16, proof *RangeProof) ([]byte, error) { + if proof == nil { + return nil, errEncodeNil + } + + if version != codecVersion { + return nil, fmt.Errorf("%w: %d", errUnknownVersion, version) + } + + buf := &bytes.Buffer{} + if err := c.encodeInt(buf, int(version)); err != nil { + return nil, err + } + if err := c.encodeProofPath(buf, proof.StartProof); err != nil { + return nil, err + } + if err := c.encodeProofPath(buf, proof.EndProof); err != nil { + return nil, err + } + if err := c.encodeInt(buf, len(proof.KeyValues)); err != nil { + return nil, err + } + for _, kv := range proof.KeyValues { + if err := c.encodeKeyValue(kv, buf); err != nil { + return nil, err + } + } + + return buf.Bytes(), nil +} + +func (c *codecImpl) encodeDBNode(version uint16, n *dbNode) ([]byte, error) { + if n == nil { + return nil, errEncodeNil + } + + if version != codecVersion { + return nil, fmt.Errorf("%w: %d", errUnknownVersion, version) + } + + buf := &bytes.Buffer{} + if err := c.encodeInt(buf, int(version)); err != nil { + return nil, err + } + if err := c.encodeMaybeByteSlice(buf, n.value); err != nil { + return nil, err + } + childrenLength := len(n.children) + if err := c.encodeInt(buf, childrenLength); err != nil { + return nil, err + } + for index := byte(0); index < NodeBranchFactor; index++ { + if entry, ok := n.children[index]; ok { + if err := c.encodeInt(buf, int(index)); err != nil { + return nil, err + } + path := entry.compressedPath.Serialize() + if err := c.encodeSerializedPath(path, buf); err != nil { + return nil, err + } + if _, err := buf.Write(entry.id[:]); err != nil { + return nil, err + } + } + } + return buf.Bytes(), nil +} + +func (c *codecImpl) encodeHashValues(version uint16, hv *hashValues) ([]byte, error) { + if hv == nil { + return nil, errEncodeNil + } + + if version != codecVersion { + return nil, fmt.Errorf("%w: %d", errUnknownVersion, version) + } + + buf := &bytes.Buffer{} + + if err := c.encodeInt(buf, int(version)); err != nil { + return nil, err + } + + length := len(hv.Children) + if err := c.encodeInt(buf, length); err != nil { + return nil, err + } + + // ensure that the order of entries is consistent + for index := byte(0); index < NodeBranchFactor; index++ { + if entry, ok := hv.Children[index]; ok { + if err := c.encodeInt(buf, int(index)); err != nil { + return nil, err + } + if _, err := buf.Write(entry.id[:]); err != nil { + return nil, err + } + } + } + if err := c.encodeMaybeByteSlice(buf, hv.Value); err != nil { + return nil, err + } + if err := c.encodeSerializedPath(hv.Key, buf); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (c *codecImpl) DecodeProof(b []byte, proof *Proof) (uint16, error) { + if proof == nil { + return 0, errDecodeNil + } + if minProofLen > len(b) { + return 0, io.ErrUnexpectedEOF + } + + var ( + err error + src = bytes.NewReader(b) + ) + gotCodecVersion, err := c.decodeInt(src) + if err != nil { + return 0, err + } + if codecVersion != gotCodecVersion { + return 0, fmt.Errorf("%w: %d", errInvalidCodecVersion, gotCodecVersion) + } + if proof.Path, err = c.decodeProofPath(src); err != nil { + return 0, err + } + if proof.Key, err = c.decodeByteSlice(src); err != nil { + return 0, err + } + if proof.Value, err = c.decodeMaybeByteSlice(src); err != nil { + return 0, err + } + if src.Len() != 0 { + return 0, errExtraSpace + } + return codecVersion, nil +} + +func (c *codecImpl) DecodeChangeProof(b []byte, proof *ChangeProof) (uint16, error) { + if proof == nil { + return 0, errDecodeNil + } + if minChangeProofLen > len(b) { + return 0, io.ErrUnexpectedEOF + } + + var ( + src = bytes.NewReader(b) + err error + ) + + gotCodecVersion, err := c.decodeInt(src) + if err != nil { + return 0, err + } + if gotCodecVersion != codecVersion { + return 0, fmt.Errorf("%w: %d", errInvalidCodecVersion, gotCodecVersion) + } + if proof.HadRootsInHistory, err = c.decodeBool(src); err != nil { + return 0, err + } + if proof.StartProof, err = c.decodeProofPath(src); err != nil { + return 0, err + } + if proof.EndProof, err = c.decodeProofPath(src); err != nil { + return 0, err + } + + numKeyValues, err := c.decodeInt(src) + if err != nil { + return 0, err + } + if numKeyValues < 0 { + return 0, errNegativeNumKeyValues + } + if numKeyValues > src.Len()/minKeyValueLen { + return 0, io.ErrUnexpectedEOF + } + proof.KeyValues = make([]KeyValue, numKeyValues) + for i := range proof.KeyValues { + if proof.KeyValues[i], err = c.decodeKeyValue(src); err != nil { + return 0, err + } + } + + numDeletedKeys, err := c.decodeInt(src) + if err != nil { + return 0, err + } + if numDeletedKeys < 0 { + return 0, errNegativeNumKeyValues + } + if numDeletedKeys > src.Len()/minDeletedKeyLen { + return 0, io.ErrUnexpectedEOF + } + proof.DeletedKeys = make([][]byte, numDeletedKeys) + for i := range proof.DeletedKeys { + if proof.DeletedKeys[i], err = c.decodeByteSlice(src); err != nil { + return 0, err + } + } + if src.Len() != 0 { + return 0, errExtraSpace + } + return codecVersion, nil +} + +func (c *codecImpl) DecodeRangeProof(b []byte, proof *RangeProof) (uint16, error) { + if proof == nil { + return 0, errDecodeNil + } + if minRangeProofLen > len(b) { + return 0, io.ErrUnexpectedEOF + } + + var ( + src = bytes.NewReader(b) + err error + ) + gotCodecVersion, err := c.decodeInt(src) + if err != nil { + return 0, err + } + if codecVersion != gotCodecVersion { + return 0, fmt.Errorf("%w: %d", errInvalidCodecVersion, gotCodecVersion) + } + if proof.StartProof, err = c.decodeProofPath(src); err != nil { + return 0, err + } + if proof.EndProof, err = c.decodeProofPath(src); err != nil { + return 0, err + } + + numKeyValues, err := c.decodeInt(src) + if err != nil { + return 0, err + } + if numKeyValues < 0 { + return 0, errNegativeNumKeyValues + } + if numKeyValues > src.Len()/minKeyValueLen { + return 0, io.ErrUnexpectedEOF + } + proof.KeyValues = make([]KeyValue, numKeyValues) + for i := range proof.KeyValues { + if proof.KeyValues[i], err = c.decodeKeyValue(src); err != nil { + return 0, err + } + } + if src.Len() != 0 { + return 0, errExtraSpace + } + return codecVersion, nil +} + +func (c *codecImpl) decodeDBNode(b []byte, n *dbNode) (uint16, error) { + if n == nil { + return 0, errDecodeNil + } + if minDBNodeLen > len(b) { + return 0, io.ErrUnexpectedEOF + } + + var ( + src = bytes.NewReader(b) + err error + ) + + gotCodecVersion, err := c.decodeInt(src) + if err != nil { + return 0, err + } + if codecVersion != gotCodecVersion { + return 0, fmt.Errorf("%w: %d", errInvalidCodecVersion, gotCodecVersion) + } + + if n.value, err = c.decodeMaybeByteSlice(src); err != nil { + return 0, err + } + + numChildren, err := c.decodeInt(src) + if err != nil { + return 0, err + } + switch { + case numChildren < 0: + return 0, errNegativeNumChildren + case numChildren > NodeBranchFactor: + return 0, errTooManyChildren + case numChildren > src.Len()/minChildLen: + return 0, io.ErrUnexpectedEOF + } + + n.children = make(map[byte]child, NodeBranchFactor) + previousChild := -1 + for i := 0; i < numChildren; i++ { + var index int + if index, err = c.decodeInt(src); err != nil { + return 0, err + } + if index <= previousChild || index > NodeBranchFactor-1 { + return 0, errChildIndexTooLarge + } + previousChild = index + + var compressedPath SerializedPath + if compressedPath, err = c.decodeSerializedPath(src); err != nil { + return 0, err + } + var childID ids.ID + if childID, err = c.decodeID(src); err != nil { + return 0, err + } + n.children[byte(index)] = child{ + compressedPath: compressedPath.deserialize(), + id: childID, + } + } + if src.Len() != 0 { + return 0, errExtraSpace + } + return codecVersion, err +} + +func (c *codecImpl) decodeKeyValue(src *bytes.Reader) (KeyValue, error) { + if minKeyValueLen > src.Len() { + return KeyValue{}, io.ErrUnexpectedEOF + } + + var ( + result KeyValue + err error + ) + if result.Key, err = c.decodeByteSlice(src); err != nil { + return result, err + } + if result.Value, err = c.decodeByteSlice(src); err != nil { + return result, err + } + return result, nil +} + +func (c *codecImpl) encodeKeyValue(kv KeyValue, dst io.Writer) error { + if err := c.encodeByteSlice(dst, kv.Key); err != nil { + return err + } + if err := c.encodeByteSlice(dst, kv.Value); err != nil { + return err + } + return nil +} + +func (*codecImpl) encodeBool(dst io.Writer, value bool) error { + bytesValue := falseBytes + if value { + bytesValue = trueBytes + } + _, err := dst.Write(bytesValue) + return err +} + +func (*codecImpl) decodeBool(src *bytes.Reader) (bool, error) { + boolByte, err := src.ReadByte() + if err == io.EOF { + return false, io.ErrUnexpectedEOF + } + if err != nil { + return false, err + } + switch boolByte { + case trueByte: + return true, nil + case falseByte: + return false, nil + default: + return false, errInvalidBool + } +} + +func (c *codecImpl) encodeInt(dst io.Writer, value int) error { + return c.encodeInt64(dst, int64(value)) +} + +func (*codecImpl) decodeInt(src *bytes.Reader) (int, error) { + // To ensure encoding/decoding is canonical, we need to check for leading + // zeroes in the varint. + // The last byte of the varint we read is the most significant byte. + // If it's 0, then it's a leading zero, which is considered invalid in the + // canonical encoding. + startLen := src.Len() + val64, err := binary.ReadVarint(src) + switch { + case err == io.EOF: + return 0, io.ErrUnexpectedEOF + case err != nil: + return 0, err + case val64 > math.MaxInt: + return 0, errIntTooLarge + } + endLen := src.Len() + + // Just 0x00 is a valid value so don't check if the varint is 1 byte + if startLen-endLen > 1 { + if err := src.UnreadByte(); err != nil { + return 0, err + } + lastByte, err := src.ReadByte() + if err != nil { + return 0, err + } + if lastByte == 0x00 { + return 0, errLeadingZeroes + } + } + + return int(val64), nil +} + +func (c *codecImpl) encodeInt64(dst io.Writer, value int64) error { + buf := c.varIntPool.Get().([]byte) + size := binary.PutVarint(buf, value) + _, err := dst.Write(buf[:size]) + c.varIntPool.Put(buf) + return err +} + +func (c *codecImpl) encodeMaybeByteSlice(dst io.Writer, maybeValue Maybe[[]byte]) error { + if err := c.encodeBool(dst, !maybeValue.IsNothing()); err != nil { + return err + } + if maybeValue.IsNothing() { + return nil + } + return c.encodeByteSlice(dst, maybeValue.Value()) +} + +func (c *codecImpl) decodeMaybeByteSlice(src *bytes.Reader) (Maybe[[]byte], error) { + if minMaybeByteSliceLen > src.Len() { + return Nothing[[]byte](), io.ErrUnexpectedEOF + } + + if hasValue, err := c.decodeBool(src); err != nil || !hasValue { + return Nothing[[]byte](), err + } + + bytes, err := c.decodeByteSlice(src) + if err != nil { + return Nothing[[]byte](), err + } + + return Some(bytes), nil +} + +func (c *codecImpl) decodeByteSlice(src *bytes.Reader) ([]byte, error) { + if minByteSliceLen > src.Len() { + return nil, io.ErrUnexpectedEOF + } + + var ( + length int + err error + result []byte + ) + if length, err = c.decodeInt(src); err != nil { + if err == io.EOF { + return nil, io.ErrUnexpectedEOF + } + return nil, err + } + + switch { + case length < 0: + return nil, errNegativeSliceLength + case length == 0: + return nil, nil + case length > src.Len(): + return nil, io.ErrUnexpectedEOF + } + + result = make([]byte, length) + if _, err := io.ReadFull(src, result); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + return result, nil +} + +func (c *codecImpl) encodeByteSlice(dst io.Writer, value []byte) error { + if err := c.encodeInt(dst, len(value)); err != nil { + return err + } + if value != nil { + if _, err := dst.Write(value); err != nil { + return err + } + } + return nil +} + +func (*codecImpl) decodeID(src *bytes.Reader) (ids.ID, error) { + if idLen > src.Len() { + return ids.ID{}, io.ErrUnexpectedEOF + } + + var id ids.ID + if _, err := io.ReadFull(src, id[:]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return id, err + } + return id, nil +} + +// Assumes a proof path has > 0 nodes. +func (c *codecImpl) decodeProofPath(src *bytes.Reader) ([]ProofNode, error) { + if minProofPathLen > src.Len() { + return nil, io.ErrUnexpectedEOF + } + + numProofNodes, err := c.decodeInt(src) + if err != nil { + return nil, err + } + if numProofNodes < 0 { + return nil, errNegativeProofPathNodes + } + if numProofNodes > src.Len()/minProofNodeLen { + return nil, io.ErrUnexpectedEOF + } + result := make([]ProofNode, numProofNodes) + for i := 0; i < numProofNodes; i++ { + if result[i], err = c.decodeProofNode(src); err != nil { + return nil, err + } + } + return result, nil +} + +// Invariant: len(path) > 0. +func (c *codecImpl) encodeProofPath(dst io.Writer, path []ProofNode) error { + if err := c.encodeInt(dst, len(path)); err != nil { + return err + } + for _, proofNode := range path { + if err := c.encodeProofNode(proofNode, dst); err != nil { + return err + } + } + return nil +} + +func (c *codecImpl) decodeProofNode(src *bytes.Reader) (ProofNode, error) { + if minProofNodeLen > src.Len() { + return ProofNode{}, io.ErrUnexpectedEOF + } + + var ( + result ProofNode + err error + ) + if result.KeyPath, err = c.decodeSerializedPath(src); err != nil { + return result, err + } + if result.ValueOrHash, err = c.decodeMaybeByteSlice(src); err != nil { + return result, err + } + numChildren, err := c.decodeInt(src) + if err != nil { + return result, err + } + switch { + case numChildren < 0: + return result, errNegativeNumChildren + case numChildren > NodeBranchFactor: + return result, errTooManyChildren + case numChildren > src.Len()/minProofNodeChildLen: + return result, io.ErrUnexpectedEOF + } + + result.Children = make(map[byte]ids.ID, numChildren) + previousChild := -1 + for addedEntries := 0; addedEntries < numChildren; addedEntries++ { + index, err := c.decodeInt(src) + if err != nil { + return result, err + } + if index <= previousChild || index >= NodeBranchFactor { + return result, errChildIndexTooLarge + } + previousChild = index + + childID, err := c.decodeID(src) + if err != nil { + return result, err + } + result.Children[byte(index)] = childID + } + return result, nil +} + +func (c *codecImpl) encodeProofNode(pn ProofNode, dst io.Writer) error { + if err := c.encodeSerializedPath(pn.KeyPath, dst); err != nil { + return err + } + if err := c.encodeMaybeByteSlice(dst, pn.ValueOrHash); err != nil { + return err + } + if err := c.encodeInt(dst, len(pn.Children)); err != nil { + return err + } + // ensure this is in order + childrenCount := 0 + for index := byte(0); index < NodeBranchFactor; index++ { + childID, ok := pn.Children[index] + if !ok { + continue + } + childrenCount++ + if err := c.encodeInt(dst, int(index)); err != nil { + return err + } + if _, err := dst.Write(childID[:]); err != nil { + return err + } + } + // there are children present with index >= NodeBranchFactor + if childrenCount != len(pn.Children) { + return errChildIndexTooLarge + } + return nil +} + +func (c *codecImpl) encodeSerializedPath(s SerializedPath, dst io.Writer) error { + if err := c.encodeInt(dst, s.NibbleLength); err != nil { + return err + } + _, err := dst.Write(s.Value) + return err +} + +func (c *codecImpl) decodeSerializedPath(src *bytes.Reader) (SerializedPath, error) { + if minSerializedPathLen > src.Len() { + return SerializedPath{}, io.ErrUnexpectedEOF + } + + var ( + result SerializedPath + err error + ) + if result.NibbleLength, err = c.decodeInt(src); err != nil { + return result, err + } + if result.NibbleLength < 0 { + return result, errNegativeNibbleLength + } + pathBytesLen := result.NibbleLength >> 1 + hasOddLen := result.hasOddLength() + if hasOddLen { + pathBytesLen++ + } + if pathBytesLen > src.Len() { + return result, io.ErrUnexpectedEOF + } + result.Value = make([]byte, pathBytesLen) + if _, err := io.ReadFull(src, result.Value); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return result, err + } + if hasOddLen { + paddedNibble := result.Value[pathBytesLen-1] & 0x0F + if paddedNibble != 0 { + return result, errNonZeroNibblePadding + } + } + return result, nil +} diff --git a/avalanchego/x/merkledb/codec_test.go b/avalanchego/x/merkledb/codec_test.go new file mode 100644 index 00000000..f3561c81 --- /dev/null +++ b/avalanchego/x/merkledb/codec_test.go @@ -0,0 +1,698 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "bytes" + "io" + "math/rand" + "reflect" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/hashing" +) + +// TODO add more codec tests + +func newRandomProofNode(r *rand.Rand) ProofNode { + key := make([]byte, r.Intn(32)) // #nosec G404 + _, _ = r.Read(key) // #nosec G404 + val := make([]byte, r.Intn(64)) // #nosec G404 + _, _ = r.Read(val) // #nosec G404 + + children := map[byte]ids.ID{} + for j := 0; j < NodeBranchFactor; j++ { + if r.Float64() < 0.5 { + var childID ids.ID + _, _ = r.Read(childID[:]) // #nosec G404 + children[byte(j)] = childID + } + } + // use the hash instead when length is greater than the hash length + if len(val) >= HashLength { + val = hashing.ComputeHash256(val) + } else if len(val) == 0 { + // We do this because when we encode a value of []byte{} we will later + // decode it as nil. + // Doing this prevents inconsistency when comparing the encoded and + // decoded values. + // Calling nilEmptySlices doesn't set this because it is a private + // variable on the struct + val = nil + } + + return ProofNode{ + KeyPath: newPath(key).Serialize(), + ValueOrHash: Some(val), + Children: children, + } +} + +func newKeyValues(r *rand.Rand, num uint) []KeyValue { + keyValues := make([]KeyValue, num) + for i := range keyValues { + key := make([]byte, r.Intn(32)) // #nosec G404 + _, _ = r.Read(key) // #nosec G404 + val := make([]byte, r.Intn(32)) // #nosec G404 + _, _ = r.Read(val) // #nosec G404 + keyValues[i] = KeyValue{ + Key: key, + Value: val, + } + } + return keyValues +} + +func nilEmptySlices(dest interface{}) { + if dest == nil { + return + } + + destPtr := reflect.ValueOf(dest) + if destPtr.Kind() != reflect.Ptr { + return + } + nilEmptySlicesRec(destPtr.Elem()) +} + +func nilEmptySlicesRec(value reflect.Value) { + switch value.Kind() { + case reflect.Slice: + if value.Len() == 0 { + newValue := reflect.Zero(value.Type()) + value.Set(newValue) + return + } + + for i := 0; i < value.Len(); i++ { + f := value.Index(i) + nilEmptySlicesRec(f) + } + case reflect.Array: + for i := 0; i < value.Len(); i++ { + f := value.Index(i) + nilEmptySlicesRec(f) + } + case reflect.Interface, reflect.Ptr: + if value.IsNil() { + return + } + nilEmptySlicesRec(value.Elem()) + case reflect.Struct: + t := value.Type() + numFields := value.NumField() + for i := 0; i < numFields; i++ { + tField := t.Field(i) + if tField.IsExported() { + field := value.Field(i) + nilEmptySlicesRec(field) + } + } + } +} + +func FuzzCodecBool(f *testing.F) { + f.Fuzz( + func( + t *testing.T, + b []byte, + ) { + require := require.New(t) + + codec := Codec.(*codecImpl) + reader := bytes.NewReader(b) + startLen := reader.Len() + got, err := codec.decodeBool(reader) + if err != nil { + return + } + endLen := reader.Len() + numRead := startLen - endLen + + // Encoding [got] should be the same as [b]. + var buf bytes.Buffer + err = codec.encodeBool(&buf, got) + require.NoError(err) + bufBytes := buf.Bytes() + require.Len(bufBytes, numRead) + require.Equal(b[:numRead], bufBytes) + }, + ) +} + +func FuzzCodecInt(f *testing.F) { + f.Fuzz( + func( + t *testing.T, + b []byte, + ) { + require := require.New(t) + + codec := Codec.(*codecImpl) + reader := bytes.NewReader(b) + startLen := reader.Len() + got, err := codec.decodeInt(reader) + if err != nil { + return + } + endLen := reader.Len() + numRead := startLen - endLen + + // Encoding [got] should be the same as [b]. + var buf bytes.Buffer + err = codec.encodeInt(&buf, got) + require.NoError(err) + bufBytes := buf.Bytes() + require.Len(bufBytes, numRead) + require.Equal(b[:numRead], bufBytes) + }, + ) +} + +func FuzzCodecSerializedPath(f *testing.F) { + f.Fuzz( + func( + t *testing.T, + b []byte, + ) { + require := require.New(t) + + codec := Codec.(*codecImpl) + reader := bytes.NewReader(b) + startLen := reader.Len() + got, err := codec.decodeSerializedPath(reader) + if err != nil { + return + } + endLen := reader.Len() + numRead := startLen - endLen + + // Encoding [got] should be the same as [b]. + var buf bytes.Buffer + err = codec.encodeSerializedPath(got, &buf) + require.NoError(err) + bufBytes := buf.Bytes() + require.Len(bufBytes, numRead) + require.Equal(b[:numRead], bufBytes) + + clonedGot := got.deserialize().Serialize() + require.Equal(got, clonedGot) + }, + ) +} + +func FuzzCodecProofCanonical(f *testing.F) { + f.Add( + []byte{ + // RootID: + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + // Path: + // Num proof nodes = 1 + 0x02, + // Key Path: + // Nibble Length: + 0x00, + // Value: + // Has Value = false + 0x00, + // Num Children = 2 + 0x04, + // Child 0: + // index = 0 + 0x00, + // childID: + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + // Child 1: + // index = 0 <- should fail + 0x00, + // childID: + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + // Key: + // length = 0 + 0x00, + }, + ) + f.Fuzz( + func( + t *testing.T, + b []byte, + ) { + require := require.New(t) + + codec := Codec.(*codecImpl) + proof := &Proof{} + got, err := codec.DecodeProof(b, proof) + if err != nil { + return + } + + // Encoding [proof] should be the same as [b]. + buf, err := codec.EncodeProof(got, proof) + require.NoError(err) + require.Equal(b, buf) + }, + ) +} + +func FuzzCodecChangeProofCanonical(f *testing.F) { + f.Fuzz( + func( + t *testing.T, + b []byte, + ) { + require := require.New(t) + + codec := Codec.(*codecImpl) + proof := &ChangeProof{} + got, err := codec.DecodeChangeProof(b, proof) + if err != nil { + return + } + + // Encoding [proof] should be the same as [b]. + buf, err := codec.EncodeChangeProof(got, proof) + require.NoError(err) + require.Equal(b, buf) + }, + ) +} + +func FuzzCodecRangeProofCanonical(f *testing.F) { + f.Fuzz( + func( + t *testing.T, + b []byte, + ) { + require := require.New(t) + + codec := Codec.(*codecImpl) + proof := &RangeProof{} + got, err := codec.DecodeRangeProof(b, proof) + if err != nil { + return + } + + // Encoding [proof] should be the same as [b]. + buf, err := codec.EncodeRangeProof(got, proof) + require.NoError(err) + require.Equal(b, buf) + }, + ) +} + +func FuzzCodecDBNodeCanonical(f *testing.F) { + f.Fuzz( + func( + t *testing.T, + b []byte, + ) { + require := require.New(t) + + codec := Codec.(*codecImpl) + node := &dbNode{} + got, err := codec.decodeDBNode(b, node) + if err != nil { + return + } + + // Encoding [node] should be the same as [b]. + buf, err := codec.encodeDBNode(got, node) + require.NoError(err) + require.Equal(b, buf) + }, + ) +} + +func FuzzCodecProofDeterministic(f *testing.F) { + f.Fuzz( + func( + t *testing.T, + randSeed int, + key []byte, + numProofNodes uint, + ) { + require := require.New(t) + + r := rand.New(rand.NewSource(int64(randSeed))) // #nosec G404 + + proofNodes := make([]ProofNode, numProofNodes) + for i := range proofNodes { + proofNodes[i] = newRandomProofNode(r) + } + + proof := Proof{ + Path: proofNodes, + Key: key, + } + + proofBytes, err := Codec.EncodeProof(Version, &proof) + require.NoError(err) + + var gotProof Proof + gotVersion, err := Codec.DecodeProof(proofBytes, &gotProof) + require.NoError(err) + require.Equal(Version, gotVersion) + + nilEmptySlices(&proof) + nilEmptySlices(&gotProof) + require.Equal(proof, gotProof) + + proofBytes2, err := Codec.EncodeProof(Version, &gotProof) + require.NoError(err) + require.Equal(proofBytes, proofBytes2) + }, + ) +} + +func FuzzCodecChangeProofDeterministic(f *testing.F) { + f.Fuzz( + func( + t *testing.T, + randSeed int, + hadRootsInHistory bool, + numProofNodes uint, + numDeletedKeys uint, + ) { + require := require.New(t) + + r := rand.New(rand.NewSource(int64(randSeed))) // #nosec G404 + + startProofNodes := make([]ProofNode, numProofNodes) + endProofNodes := make([]ProofNode, numProofNodes) + for i := range startProofNodes { + startProofNodes[i] = newRandomProofNode(r) + endProofNodes[i] = newRandomProofNode(r) + } + + deletedKeys := make([][]byte, numDeletedKeys) + for i := range deletedKeys { + deletedKeys[i] = make([]byte, r.Intn(32)) // #nosec G404 + _, _ = r.Read(deletedKeys[i]) // #nosec G404 + } + + proof := ChangeProof{ + HadRootsInHistory: hadRootsInHistory, + StartProof: startProofNodes, + EndProof: endProofNodes, + KeyValues: newKeyValues(r, numProofNodes), + DeletedKeys: deletedKeys, + } + + proofBytes, err := Codec.EncodeChangeProof(Version, &proof) + require.NoError(err) + + var gotProof ChangeProof + gotVersion, err := Codec.DecodeChangeProof(proofBytes, &gotProof) + require.NoError(err) + require.Equal(Version, gotVersion) + + nilEmptySlices(&proof) + nilEmptySlices(&gotProof) + require.Equal(proof, gotProof) + + proofBytes2, err := Codec.EncodeChangeProof(Version, &gotProof) + require.NoError(err) + require.Equal(proofBytes, proofBytes2) + }, + ) +} + +func FuzzCodecRangeProofDeterministic(f *testing.F) { + f.Fuzz( + func( + t *testing.T, + randSeed int, + numStartProofNodes uint, + numEndProofNodes uint, + numKeyValues uint, + ) { + r := rand.New(rand.NewSource(int64(randSeed))) // #nosec G404 + + var rootID ids.ID + _, _ = r.Read(rootID[:]) // #nosec G404 + + startProofNodes := make([]ProofNode, numStartProofNodes) + for i := range startProofNodes { + startProofNodes[i] = newRandomProofNode(r) + } + + endProofNodes := make([]ProofNode, numEndProofNodes) + for i := range endProofNodes { + endProofNodes[i] = newRandomProofNode(r) + } + + keyValues := make([]KeyValue, numKeyValues) + for i := range keyValues { + key := make([]byte, r.Intn(32)) // #nosec G404 + _, _ = r.Read(key) // #nosec G404 + val := make([]byte, r.Intn(32)) // #nosec G404 + _, _ = r.Read(val) // #nosec G404 + keyValues[i] = KeyValue{ + Key: key, + Value: val, + } + } + + proof := RangeProof{ + StartProof: startProofNodes, + EndProof: endProofNodes, + KeyValues: keyValues, + } + + proofBytes, err := Codec.EncodeRangeProof(Version, &proof) + require.NoError(t, err) + + var gotProof RangeProof + _, err = Codec.DecodeRangeProof(proofBytes, &gotProof) + require.NoError(t, err) + + nilEmptySlices(&proof) + nilEmptySlices(&gotProof) + require.Equal(t, proof, gotProof) + + proofBytes2, err := Codec.EncodeRangeProof(Version, &gotProof) + require.NoError(t, err) + require.Equal(t, proofBytes, proofBytes2) + }, + ) +} + +func FuzzCodecDBNodeDeterministic(f *testing.F) { + f.Fuzz( + func( + t *testing.T, + randSeed int, + hasValue bool, + valueBytes []byte, + ) { + require := require.New(t) + + r := rand.New(rand.NewSource(int64(randSeed))) // #nosec G404 + + value := Nothing[[]byte]() + if hasValue { + if len(valueBytes) == 0 { + // We do this because when we encode a value of []byte{} + // we will later decode it as nil. + // Doing this prevents inconsistency when comparing the + // encoded and decoded values below. + // Calling nilEmptySlices doesn't set this because it is a + // private variable on the struct + valueBytes = nil + } + value = Some(valueBytes) + } + + numChildren := r.Intn(NodeBranchFactor) // #nosec G404 + + children := map[byte]child{} + for i := 0; i < numChildren; i++ { + var childID ids.ID + _, _ = r.Read(childID[:]) // #nosec G404 + + childPathBytes := make([]byte, r.Intn(32)) // #nosec G404 + _, _ = r.Read(childPathBytes) // #nosec G404 + + children[byte(i)] = child{ + compressedPath: newPath(childPathBytes), + id: childID, + } + } + node := dbNode{ + value: value, + children: children, + } + + nodeBytes, err := Codec.encodeDBNode(Version, &node) + require.NoError(err) + + var gotNode dbNode + gotVersion, err := Codec.decodeDBNode(nodeBytes, &gotNode) + require.NoError(err) + require.Equal(Version, gotVersion) + + nilEmptySlices(&node) + nilEmptySlices(&gotNode) + require.Equal(node, gotNode) + + nodeBytes2, err := Codec.encodeDBNode(Version, &gotNode) + require.NoError(err) + require.Equal(nodeBytes, nodeBytes2) + }, + ) +} + +func TestCodec_DecodeProof(t *testing.T) { + require := require.New(t) + + _, err := Codec.DecodeProof([]byte{1}, nil) + require.ErrorIs(err, errDecodeNil) + + var ( + proof Proof + tooShortBytes = make([]byte, minProofLen-1) + ) + _, err = Codec.DecodeProof(tooShortBytes, &proof) + require.ErrorIs(err, io.ErrUnexpectedEOF) +} + +func TestCodec_DecodeChangeProof(t *testing.T) { + require := require.New(t) + + _, err := Codec.DecodeChangeProof([]byte{1}, nil) + require.ErrorIs(err, errDecodeNil) + + var ( + parsedProof ChangeProof + tooShortBytes = make([]byte, minChangeProofLen-1) + ) + _, err = Codec.DecodeChangeProof(tooShortBytes, &parsedProof) + require.ErrorIs(err, io.ErrUnexpectedEOF) + + proof := ChangeProof{ + HadRootsInHistory: true, + StartProof: nil, + EndProof: nil, + KeyValues: nil, + DeletedKeys: nil, + } + + proofBytes, err := Codec.EncodeChangeProof(Version, &proof) + require.NoError(err) + + // Remove key-values length and deleted keys length (both 0) from end + proofBytes = proofBytes[:len(proofBytes)-2*minVarIntLen] + + // Put key-values length of -1 and deleted keys length of 0 + proofBytesBuf := bytes.NewBuffer(proofBytes) + err = Codec.(*codecImpl).encodeInt(proofBytesBuf, -1) + require.NoError(err) + err = Codec.(*codecImpl).encodeInt(proofBytesBuf, 0) + require.NoError(err) + + _, err = Codec.DecodeChangeProof(proofBytesBuf.Bytes(), &parsedProof) + require.ErrorIs(err, errNegativeNumKeyValues) + + proofBytes = proofBytesBuf.Bytes() + proofBytes = proofBytes[:len(proofBytes)-2*minVarIntLen] + proofBytesBuf = bytes.NewBuffer(proofBytes) + + // Remove key-values length and deleted keys length from end + // Put key-values length of 0 and deleted keys length of -1 + err = Codec.(*codecImpl).encodeInt(proofBytesBuf, 0) + require.NoError(err) + err = Codec.(*codecImpl).encodeInt(proofBytesBuf, -1) + require.NoError(err) + + _, err = Codec.DecodeChangeProof(proofBytesBuf.Bytes(), &parsedProof) + require.ErrorIs(err, errNegativeNumKeyValues) +} + +func TestCodec_DecodeRangeProof(t *testing.T) { + require := require.New(t) + + _, err := Codec.DecodeRangeProof([]byte{1}, nil) + require.ErrorIs(err, errDecodeNil) + + var ( + parsedProof RangeProof + tooShortBytes = make([]byte, minRangeProofLen-1) + ) + _, err = Codec.DecodeRangeProof(tooShortBytes, &parsedProof) + require.ErrorIs(err, io.ErrUnexpectedEOF) + + proof := RangeProof{ + StartProof: nil, + EndProof: nil, + KeyValues: nil, + } + + proofBytes, err := Codec.EncodeRangeProof(Version, &proof) + require.NoError(err) + + // Remove key-values length (0) from end + proofBytes = proofBytes[:len(proofBytes)-minVarIntLen] + proofBytesBuf := bytes.NewBuffer(proofBytes) + // Put key-value length (-1) at end + err = Codec.(*codecImpl).encodeInt(proofBytesBuf, -1) + require.NoError(err) + + _, err = Codec.DecodeRangeProof(proofBytesBuf.Bytes(), &parsedProof) + require.ErrorIs(err, errNegativeNumKeyValues) +} + +func TestCodec_DecodeDBNode(t *testing.T) { + require := require.New(t) + + _, err := Codec.decodeDBNode([]byte{1}, nil) + require.ErrorIs(err, errDecodeNil) + + var ( + parsedDBNode dbNode + tooShortBytes = make([]byte, minDBNodeLen-1) + ) + _, err = Codec.decodeDBNode(tooShortBytes, &parsedDBNode) + require.ErrorIs(err, io.ErrUnexpectedEOF) + + proof := dbNode{ + value: Some([]byte{1}), + children: map[byte]child{}, + } + + nodeBytes, err := Codec.encodeDBNode(Version, &proof) + require.NoError(err) + + // Remove num children (0) from end + nodeBytes = nodeBytes[:len(nodeBytes)-minVarIntLen] + proofBytesBuf := bytes.NewBuffer(nodeBytes) + // Put num children -1 at end + err = Codec.(*codecImpl).encodeInt(proofBytesBuf, -1) + require.NoError(err) + + _, err = Codec.decodeDBNode(proofBytesBuf.Bytes(), &parsedDBNode) + require.ErrorIs(err, errNegativeNumChildren) + + // Remove num children from end + nodeBytes = proofBytesBuf.Bytes() + nodeBytes = nodeBytes[:len(nodeBytes)-minVarIntLen] + proofBytesBuf = bytes.NewBuffer(nodeBytes) + // Put num children NodeBranchFactor+1 at end + err = Codec.(*codecImpl).encodeInt(proofBytesBuf, NodeBranchFactor+1) + require.NoError(err) + + _, err = Codec.decodeDBNode(proofBytesBuf.Bytes(), &parsedDBNode) + require.ErrorIs(err, errTooManyChildren) +} diff --git a/avalanchego/x/merkledb/db.go b/avalanchego/x/merkledb/db.go new file mode 100644 index 00000000..88a63d23 --- /dev/null +++ b/avalanchego/x/merkledb/db.go @@ -0,0 +1,1147 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "bytes" + "context" + "errors" + "fmt" + "sync" + + "github.com/prometheus/client_golang/prometheus" + + "go.opentelemetry.io/otel/attribute" + + oteltrace "go.opentelemetry.io/otel/trace" + + "golang.org/x/exp/maps" + "golang.org/x/exp/slices" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/prefixdb" + "github.com/ava-labs/avalanchego/database/versiondb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/set" +) + +const ( + RootPath = EmptyPath + + // TODO: name better + rebuildViewSizeFractionOfCacheSize = 50 + minRebuildViewSizePerCommit = 1000 +) + +var ( + _ Trie = &Database{} + _ database.Database = &Database{} + + Codec, Version = newCodec() + + rootKey = []byte{} + nodePrefix = []byte("node") + metadataPrefix = []byte("metadata") + cleanShutdownKey = []byte("cleanShutdown") + hadCleanShutdown = []byte{1} + didNotHaveCleanShutdown = []byte{0} + + errSameRoot = errors.New("start and end root are the same") +) + +type Config struct { + // The number of changes to the database that we store in memory in order to + // serve change proofs. + HistoryLength int + NodeCacheSize int + // If [Reg] is nil, metrics are collected locally but not exported through + // Prometheus. + // This may be useful for testing. + Reg prometheus.Registerer + Tracer trace.Tracer +} + +// Can only be edited by committing changes from a trieView. +type Database struct { + // Must be held when reading/writing fields. + lock sync.RWMutex + + // Must be held when preparing work to be committed to the DB. + // Used to prevent editing of the trie without restricting read access + // until the full set of changes is ready to be written. + // Should be held before taking [db.lock] + commitLock sync.RWMutex + + // versiondb that the other dbs are built on. + // Allows the changes made to the snapshot and [nodeDB] to be atomic. + nodeDB *versiondb.Database + + // Stores data about the database's current state. + metadataDB database.Database + + // If a value is nil, the corresponding key isn't in the trie. + nodeCache onEvictCache[path, *node] + onEvictionErr utils.Atomic[error] + + // Stores change lists. Used to serve change proofs and construct + // historical views of the trie. + history *trieHistory + + // True iff the db has been closed. + closed bool + + metrics merkleMetrics + + tracer trace.Tracer + + // The root of this trie. + root *node + + // Valid children of this trie. + childViews []*trieView +} + +func newDatabase( + ctx context.Context, + db database.Database, + config Config, + metrics merkleMetrics, +) (*Database, error) { + trieDB := &Database{ + metrics: metrics, + nodeDB: versiondb.New(prefixdb.New(nodePrefix, db)), + metadataDB: prefixdb.New(metadataPrefix, db), + history: newTrieHistory(config.HistoryLength), + tracer: config.Tracer, + childViews: make([]*trieView, 0, defaultPreallocationSize), + } + + // Note: trieDB.OnEviction is responsible for writing intermediary nodes to + // disk as they are evicted from the cache. + trieDB.nodeCache = newOnEvictCache[path](config.NodeCacheSize, trieDB.onEviction) + + root, err := trieDB.initializeRootIfNeeded() + if err != nil { + return nil, err + } + + // add current root to history (has no changes) + trieDB.history.record(&changeSummary{ + rootID: root, + values: map[path]*change[Maybe[[]byte]]{}, + nodes: map[path]*change[*node]{}, + }) + + shutdownType, err := trieDB.metadataDB.Get(cleanShutdownKey) + switch err { + case nil: + if bytes.Equal(shutdownType, didNotHaveCleanShutdown) { + if err := trieDB.rebuild(ctx); err != nil { + return nil, err + } + } + case database.ErrNotFound: + // If the marker wasn't found then the DB is being created for the first + // time and there is nothing to do. + default: + return nil, err + } + + // mark that the db has not yet been cleanly closed + err = trieDB.metadataDB.Put(cleanShutdownKey, didNotHaveCleanShutdown) + return trieDB, err +} + +// Deletes every intermediate node and rebuilds them by re-adding every key/value. +// TODO: make this more efficient by only clearing out the stale portions of the trie. +func (db *Database) rebuild(ctx context.Context) error { + db.root = newNode(nil, RootPath) + if err := db.nodeDB.Delete(rootKey); err != nil { + return err + } + it := db.nodeDB.NewIterator() + defer it.Release() + + currentViewSize := 0 + viewSizeLimit := math.Max( + db.nodeCache.maxSize/rebuildViewSizeFractionOfCacheSize, + minRebuildViewSizePerCommit, + ) + + currentView, err := db.newUntrackedView(viewSizeLimit) + if err != nil { + return err + } + + for it.Next() { + if currentViewSize >= viewSizeLimit { + if err := currentView.commitToDB(ctx); err != nil { + return err + } + currentView, err = db.newUntrackedView(viewSizeLimit) + if err != nil { + return err + } + currentViewSize = 0 + } + + key := it.Key() + path := path(key) + value := it.Value() + n, err := parseNode(path, value) + if err != nil { + return err + } + if n.hasValue() { + serializedPath := path.Serialize() + if err := currentView.Insert(ctx, serializedPath.Value, n.value.value); err != nil { + return err + } + currentViewSize++ + } + if err := db.nodeDB.Delete(key); err != nil { + return err + } + } + if err := it.Error(); err != nil { + return err + } + if err := currentView.commitToDB(ctx); err != nil { + return err + } + return db.nodeDB.Compact(nil, nil) +} + +// New returns a new merkle database. +func New(ctx context.Context, db database.Database, config Config) (*Database, error) { + metrics, err := newMetrics("merkleDB", config.Reg) + if err != nil { + return nil, err + } + return newDatabase(ctx, db, config, metrics) +} + +// Commits the key/value pairs within the [proof] to the db. +func (db *Database) CommitChangeProof(ctx context.Context, proof *ChangeProof) error { + db.commitLock.Lock() + defer db.commitLock.Unlock() + + view, err := db.prepareChangeProofView(proof) + if err != nil { + return err + } + return view.commitToDB(ctx) +} + +// Commits the key/value pairs within the [proof] to the db. +// [start] is the smallest key in the range this [proof] covers. +func (db *Database) CommitRangeProof(ctx context.Context, start []byte, proof *RangeProof) error { + db.commitLock.Lock() + defer db.commitLock.Unlock() + + view, err := db.prepareRangeProofView(start, proof) + if err != nil { + return err + } + return view.commitToDB(ctx) +} + +func (db *Database) Compact(start []byte, limit []byte) error { + return db.nodeDB.Compact(start, limit) +} + +func (db *Database) Close() error { + db.commitLock.Lock() + defer db.commitLock.Unlock() + + db.lock.Lock() + defer db.lock.Unlock() + + if db.closed { + return database.ErrClosed + } + + db.closed = true + + defer func() { + _ = db.metadataDB.Close() + _ = db.nodeDB.Close() + }() + + if err := db.onEvictionErr.Get(); err != nil { + // If there was an error during cache eviction, + // [db.nodeCache] and [db.nodeDB] are in an inconsistent state. + // Do not write cached nodes to disk or mark clean shutdown. + return nil + } + + // Flush [nodeCache] to persist intermediary nodes to disk. + if err := db.nodeCache.Flush(); err != nil { + // There was an error during cache eviction. + // Don't commit to disk. + return err + } + + if err := db.nodeDB.Commit(); err != nil { + return err + } + + // Successfully wrote intermediate nodes. + return db.metadataDB.Put(cleanShutdownKey, hadCleanShutdown) +} + +func (db *Database) Delete(key []byte) error { + // this is a duplicate because the database interface doesn't support + // contexts, which are used for tracing + return db.Remove(context.Background(), key) +} + +func (db *Database) Get(key []byte) ([]byte, error) { + // this is a duplicate because the database interface doesn't support + // contexts, which are used for tracing + return db.GetValue(context.Background(), key) +} + +func (db *Database) GetValues(ctx context.Context, keys [][]byte) ([][]byte, []error) { + _, span := db.tracer.Start(ctx, "MerkleDB.GetValues", oteltrace.WithAttributes( + attribute.Int("keyCount", len(keys)), + )) + defer span.End() + + db.lock.RLock() + defer db.lock.RUnlock() + + values := make([][]byte, len(keys)) + errors := make([]error, len(keys)) + for i, key := range keys { + values[i], errors[i] = db.getValueCopy(newPath(key), false) + } + return values, errors +} + +// GetValue returns the value associated with [key]. +// Returns database.ErrNotFound if it doesn't exist. +func (db *Database) GetValue(ctx context.Context, key []byte) ([]byte, error) { + _, span := db.tracer.Start(ctx, "MerkleDB.GetValue") + defer span.End() + + return db.getValueCopy(newPath(key), true) +} + +// getValueCopy returns a copy of the value for the given [key]. +// Returns database.ErrNotFound if it doesn't exist. +func (db *Database) getValueCopy(key path, lock bool) ([]byte, error) { + val, err := db.getValue(key, lock) + if err != nil { + return nil, err + } + return slices.Clone(val), nil +} + +// getValue returns the value for the given [key]. +// Returns database.ErrNotFound if it doesn't exist. +func (db *Database) getValue(key path, lock bool) ([]byte, error) { + if lock { + db.lock.RLock() + defer db.lock.RUnlock() + } + + if db.closed { + return nil, database.ErrClosed + } + n, err := db.getNode(key) + if err != nil { + return nil, err + } + if n.value.IsNothing() { + return nil, database.ErrNotFound + } + return n.value.value, nil +} + +// Returns the ID of the root node of the merkle trie. +func (db *Database) GetMerkleRoot(ctx context.Context) (ids.ID, error) { + _, span := db.tracer.Start(ctx, "MerkleDB.GetMerkleRoot") + defer span.End() + + db.lock.RLock() + defer db.lock.RUnlock() + + return db.getMerkleRoot(), nil +} + +// Returns the ID of the root node of the merkle trie. +// Assumes [db.lock] is read locked. +func (db *Database) getMerkleRoot() ids.ID { + return db.root.id +} + +// Returns a proof of the existence/non-existence of [key] in this trie. +func (db *Database) GetProof(ctx context.Context, key []byte) (*Proof, error) { + db.commitLock.RLock() + defer db.commitLock.RUnlock() + + return db.getProof(ctx, key) +} + +// Returns a proof of the existence/non-existence of [key] in this trie. +// Assumes [db.commitLock] is read locked. +func (db *Database) getProof(ctx context.Context, key []byte) (*Proof, error) { + view, err := db.newUntrackedView(defaultPreallocationSize) + if err != nil { + return nil, err + } + // Don't need to lock [view] because nobody else has a reference to it. + return view.getProof(ctx, key) +} + +// Returns a proof for the key/value pairs in this trie within the range +// [start, end]. +func (db *Database) GetRangeProof( + ctx context.Context, + start, + end []byte, + maxLength int, +) (*RangeProof, error) { + db.commitLock.RLock() + defer db.commitLock.RUnlock() + + return db.getRangeProofAtRoot(ctx, db.getMerkleRoot(), start, end, maxLength) +} + +// Returns a proof for the key/value pairs in this trie within the range +// [start, end] when the root of the trie was [rootID]. +func (db *Database) GetRangeProofAtRoot( + ctx context.Context, + rootID ids.ID, + start, + end []byte, + maxLength int, +) (*RangeProof, error) { + db.commitLock.RLock() + defer db.commitLock.RUnlock() + + return db.getRangeProofAtRoot(ctx, rootID, start, end, maxLength) +} + +// Assumes [db.commitLock] is read locked. +func (db *Database) getRangeProofAtRoot( + ctx context.Context, + rootID ids.ID, + start, + end []byte, + maxLength int, +) (*RangeProof, error) { + if maxLength <= 0 { + return nil, fmt.Errorf("%w but was %d", ErrInvalidMaxLength, maxLength) + } + + historicalView, err := db.getHistoricalViewForRange(rootID, start, end) + if err != nil { + return nil, err + } + return historicalView.GetRangeProof(ctx, start, end, maxLength) +} + +// Returns a proof for a subset of the key/value changes in key range +// [start, end] that occurred between [startRootID] and [endRootID]. +// Returns at most [maxLength] key/value pairs. +func (db *Database) GetChangeProof( + ctx context.Context, + startRootID ids.ID, + endRootID ids.ID, + start []byte, + end []byte, + maxLength int, +) (*ChangeProof, error) { + if len(end) > 0 && bytes.Compare(start, end) == 1 { + return nil, ErrStartAfterEnd + } + if startRootID == endRootID { + return nil, errSameRoot + } + + db.commitLock.RLock() + defer db.commitLock.RUnlock() + + result := &ChangeProof{ + HadRootsInHistory: true, + } + changes, err := db.history.getValueChanges(startRootID, endRootID, start, end, maxLength) + if err == ErrRootIDNotPresent { + result.HadRootsInHistory = false + return result, nil + } + if err != nil { + return nil, err + } + + // [changedKeys] are a subset of the keys that were added or had their + // values modified between [startRootID] to [endRootID] sorted in increasing + // order. + changedKeys := maps.Keys(changes.values) + slices.SortFunc(changedKeys, func(i, j path) bool { + return i.Compare(j) < 0 + }) + + // TODO: sync.pool these buffers + result.KeyValues = make([]KeyValue, 0, len(changedKeys)) + result.DeletedKeys = make([][]byte, 0, len(changedKeys)) + + for _, key := range changedKeys { + change := changes.values[key] + serializedKey := key.Serialize().Value + + if change.after.IsNothing() { + result.DeletedKeys = append(result.DeletedKeys, serializedKey) + } else { + result.KeyValues = append(result.KeyValues, KeyValue{ + Key: serializedKey, + // create a copy so edits of the []byte don't affect the db + Value: slices.Clone(change.after.value), + }) + } + } + largestKey := result.getLargestKey(end) + + // Since we hold [db.commitlock] we must still have sufficient + // history to recreate the trie at [endRootID]. + historicalView, err := db.getHistoricalViewForRange(endRootID, start, largestKey) + if err != nil { + return nil, err + } + + if len(largestKey) > 0 { + endProof, err := historicalView.getProof(ctx, largestKey) + if err != nil { + return nil, err + } + result.EndProof = endProof.Path + } + + if len(start) > 0 { + startProof, err := historicalView.getProof(ctx, start) + if err != nil { + return nil, err + } + result.StartProof = startProof.Path + + // strip out any common nodes to reduce proof size + commonNodeIndex := 0 + for ; commonNodeIndex < len(result.StartProof) && + commonNodeIndex < len(result.EndProof) && + result.StartProof[commonNodeIndex].KeyPath.Equal(result.EndProof[commonNodeIndex].KeyPath); commonNodeIndex++ { + } + result.StartProof = result.StartProof[commonNodeIndex:] + } + + // Note that one of the following must be true: + // - [result.StartProof] is non-empty. + // - [result.EndProof] is non-empty. + // - [result.KeyValues] is non-empty. + // - [result.DeletedKeys] is non-empty. + // If all of these were false, it would mean that no + // [start] and [end] were given, and no diff between + // the trie at [startRootID] and [endRootID] was found. + // Since [startRootID] != [endRootID], this is impossible. + return result, nil +} + +// Returns a new view on top of this trie. +// Changes made to the view will only be reflected in the original trie if Commit is called. +// Assumes [db.lock] isn't held. +func (db *Database) NewView() (TrieView, error) { + return db.NewPreallocatedView(defaultPreallocationSize) +} + +// Returns a new view that isn't tracked in [db.childViews]. +// For internal use only, namely in methods that create short-lived views. +// Assumes [db.lock] is read locked. +func (db *Database) newUntrackedView(estimatedSize int) (*trieView, error) { + return newTrieView(db, db, db.root.clone(), estimatedSize) +} + +// Returns a new view preallocated to hold at least [estimatedSize] value changes at a time. +// If more changes are made, additional memory will be allocated. +// The returned view is added to [db.childViews]. +// Assumes [db.lock] isn't held. +func (db *Database) NewPreallocatedView(estimatedSize int) (TrieView, error) { + db.lock.Lock() + defer db.lock.Unlock() + + newView, err := newTrieView(db, db, db.root.clone(), estimatedSize) + if err != nil { + return nil, err + } + db.childViews = append(db.childViews, newView) + return newView, nil +} + +func (db *Database) Has(k []byte) (bool, error) { + db.lock.RLock() + defer db.lock.RUnlock() + + if db.closed { + return false, database.ErrClosed + } + + _, err := db.getValue(newPath(k), true) + if err == database.ErrNotFound { + return false, nil + } + return err == nil, err +} + +func (db *Database) HealthCheck(ctx context.Context) (interface{}, error) { + return db.nodeDB.HealthCheck(ctx) +} + +func (db *Database) Insert(ctx context.Context, k, v []byte) error { + db.commitLock.Lock() + defer db.commitLock.Unlock() + + db.lock.RLock() + view, err := db.newUntrackedView(defaultPreallocationSize) + db.lock.RUnlock() + + if err != nil { + return err + } + // Don't need to lock [view] because nobody else has a reference to it. + if err := view.insert(k, v); err != nil { + return err + } + return view.commitToDB(ctx) +} + +func (db *Database) NewBatch() database.Batch { + return &batch{ + db: db, + } +} + +func (db *Database) NewIterator() database.Iterator { + return &iterator{ + nodeIter: db.nodeDB.NewIterator(), + db: db, + } +} + +func (db *Database) NewIteratorWithStart(start []byte) database.Iterator { + return &iterator{ + nodeIter: db.nodeDB.NewIteratorWithStart(newPath(start).Bytes()), + db: db, + } +} + +func (db *Database) NewIteratorWithPrefix(prefix []byte) database.Iterator { + return &iterator{ + nodeIter: db.nodeDB.NewIteratorWithPrefix(newPath(prefix).Bytes()), + db: db, + } +} + +func (db *Database) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { + startBytes := newPath(start).Bytes() + prefixBytes := newPath(prefix).Bytes() + return &iterator{ + nodeIter: db.nodeDB.NewIteratorWithStartAndPrefix(startBytes, prefixBytes), + db: db, + } +} + +// If [node] is an intermediary node, puts it in [nodeDB]. +// Note this is called by [db.nodeCache] with its lock held, so +// the movement of [node] from [db.nodeCache] to [db.nodeDB] is atomic. +// As soon as [db.nodeCache] no longer has [node], [db.nodeDB] does. +// Non-nil error is fatal -- causes [db] to close. +func (db *Database) onEviction(node *node) error { + if node == nil || node.hasValue() { + // only persist intermediary nodes + return nil + } + + nodeBytes, err := node.marshal() + if err != nil { + db.onEvictionErr.Set(err) + // Prevent reads/writes from/to [db.nodeDB] to avoid inconsistent state. + _ = db.nodeDB.Close() + // This is a fatal error. + go db.Close() + return err + } + + if err := db.nodeDB.Put(node.key.Bytes(), nodeBytes); err != nil { + db.onEvictionErr.Set(err) + _ = db.nodeDB.Close() + go db.Close() + return err + } + return nil +} + +// Inserts the key/value pair into the db. +func (db *Database) Put(k, v []byte) error { + return db.Insert(context.Background(), k, v) +} + +func (db *Database) Remove(ctx context.Context, key []byte) error { + db.commitLock.Lock() + defer db.commitLock.Unlock() + + db.lock.RLock() + view, err := db.newUntrackedView(defaultPreallocationSize) + db.lock.RUnlock() + if err != nil { + return err + } + // Don't need to lock [view] because nobody else has a reference to it. + if err = view.remove(key); err != nil { + return err + } + return view.commitToDB(ctx) +} + +func (db *Database) commitBatch(ops []database.BatchOp) error { + db.commitLock.Lock() + defer db.commitLock.Unlock() + + view, err := db.prepareBatchView(ops) + if err != nil { + return err + } + return view.commitToDB(context.Background()) +} + +// CommitToParent is a no-op for the db because it has no parent +func (*Database) CommitToParent(context.Context) error { + return nil +} + +// commitToDB is a no-op for the db because it is the db +func (*Database) commitToDB(context.Context) error { + return nil +} + +// commitChanges commits the changes in trieToCommit to the db +func (db *Database) commitChanges(ctx context.Context, trieToCommit *trieView) error { + db.lock.Lock() + defer db.lock.Unlock() + + if trieToCommit == nil { + return nil + } + if trieToCommit.isInvalid() { + return ErrInvalid + } + changes := trieToCommit.changes + _, span := db.tracer.Start(ctx, "MerkleDB.commitChanges", oteltrace.WithAttributes( + attribute.Int("nodesChanged", len(changes.nodes)), + attribute.Int("valuesChanged", len(changes.values)), + )) + defer span.End() + + if db.closed { + return database.ErrClosed + } + + // invalidate all child views except for the view being committed + db.invalidateChildrenExcept(trieToCommit) + + // move any child views of the committed trie onto the db + db.moveChildViewsToDB(trieToCommit) + + if len(changes.nodes) == 0 { + return nil + } + + rootChange, ok := changes.nodes[RootPath] + if !ok { + return errNoNewRoot + } + + // commit any outstanding cache evicted nodes. + // Note that we do this here because below we may Abort + // [db.nodeDB], which would cause us to lose these changes. + if err := db.nodeDB.Commit(); err != nil { + return err + } + + _, nodesSpan := db.tracer.Start(ctx, "MerkleDB.commitChanges.writeNodes") + for key, nodeChange := range changes.nodes { + if nodeChange.after == nil { + db.metrics.IOKeyWrite() + if err := db.nodeDB.Delete(key.Bytes()); err != nil { + db.nodeDB.Abort() + nodesSpan.End() + return err + } + } else if nodeChange.after.hasValue() || (nodeChange.before != nil && nodeChange.before.hasValue()) { + // Note: If [nodeChange.after] is an intermediary node we only + // persist [nodeChange] if [nodeChange.before] was a leaf. + // This guarantees that the key/value pairs are correctly persisted + // on disk, without being polluted by the previous value. + // Otherwise, intermediary nodes are persisted on cache eviction or + // shutdown. + db.metrics.IOKeyWrite() + nodeBytes, err := nodeChange.after.marshal() + if err != nil { + db.nodeDB.Abort() + nodesSpan.End() + return err + } + + if err := db.nodeDB.Put(key.Bytes(), nodeBytes); err != nil { + db.nodeDB.Abort() + nodesSpan.End() + return err + } + } + } + nodesSpan.End() + + _, commitSpan := db.tracer.Start(ctx, "MerkleDB.commitChanges.dbCommit") + err := db.nodeDB.Commit() + commitSpan.End() + if err != nil { + db.nodeDB.Abort() + return err + } + + // Only modify in-memory state after the commit succeeds + // so that we don't need to clean up on error. + db.root = rootChange.after + + for key, nodeChange := range changes.nodes { + if err := db.putNodeInCache(key, nodeChange.after); err != nil { + return err + } + } + + db.history.record(changes) + return nil +} + +// moveChildViewsToDB removes any child views from the trieToCommit and moves them to the db +// assumes [db.lock] is held +func (db *Database) moveChildViewsToDB(trieToCommit *trieView) { + trieToCommit.validityTrackingLock.Lock() + defer trieToCommit.validityTrackingLock.Unlock() + + for _, childView := range trieToCommit.childViews { + childView.updateParent(db) + db.childViews = append(db.childViews, childView) + } + trieToCommit.childViews = make([]*trieView, 0, defaultPreallocationSize) +} + +// CommitToDB is a No Op for db since it is already in sync with itself +// here to satisfy TrieView interface +func (*Database) CommitToDB(context.Context) error { + return nil +} + +// invalidate and remove any child views that aren't the exception +// Assumes [db.lock] is held. +func (db *Database) invalidateChildrenExcept(exception *trieView) { + isTrackedView := false + + for _, childView := range db.childViews { + if childView != exception { + childView.invalidate() + } else { + isTrackedView = true + } + } + db.childViews = make([]*trieView, 0, defaultPreallocationSize) + if isTrackedView { + db.childViews = append(db.childViews, exception) + } +} + +func (db *Database) initializeRootIfNeeded() (ids.ID, error) { + // ensure that root exists + nodeBytes, err := db.nodeDB.Get(rootKey) + if err == nil { + // Root already exists, so parse it and set the in-mem copy + db.root, err = parseNode(RootPath, nodeBytes) + if err != nil { + return ids.Empty, err + } + if err := db.root.calculateID(db.metrics); err != nil { + return ids.Empty, err + } + return db.root.id, nil + } + if err != database.ErrNotFound { + return ids.Empty, err + } + + // Root doesn't exist; make a new one. + db.root = newNode(nil, RootPath) + + // update its ID + if err := db.root.calculateID(db.metrics); err != nil { + return ids.Empty, err + } + + // write the newly constructed root to the DB + rootBytes, err := db.root.marshal() + if err != nil { + return ids.Empty, err + } + if err := db.nodeDB.Put(rootKey, rootBytes); err != nil { + return ids.Empty, err + } + + return db.root.id, db.nodeDB.Commit() +} + +// Returns a view of the trie as it was when it had root [rootID] for keys within range [start, end]. +// Assumes [db.commitLock] is read locked. +func (db *Database) getHistoricalViewForRange( + rootID ids.ID, + start []byte, + end []byte, +) (*trieView, error) { + currentRootID := db.getMerkleRoot() + + // looking for the trie's current root id, so return the trie unmodified + if currentRootID == rootID { + return newTrieView(db, db, db.root.clone(), 100) + } + + changeHistory, err := db.history.getChangesToGetToRoot(rootID, start, end) + if err != nil { + return nil, err + } + return newTrieViewWithChanges(db, db, changeHistory, len(changeHistory.nodes)) +} + +// Returns all of the keys in range [start, end] that aren't in [keySet]. +// If [start] is nil, then the range has no lower bound. +// If [end] is nil, then the range has no upper bound. +func (db *Database) getKeysNotInSet(start, end []byte, keySet set.Set[string]) ([][]byte, error) { + db.lock.RLock() + defer db.lock.RUnlock() + + it := db.NewIteratorWithStart(start) + defer it.Release() + + keysNotInSet := make([][]byte, 0, keySet.Len()) + for it.Next() { + key := it.Key() + if len(end) != 0 && bytes.Compare(key, end) > 0 { + break + } + if !keySet.Contains(string(key)) { + keysNotInSet = append(keysNotInSet, key) + } + } + return keysNotInSet, it.Error() +} + +// Returns a copy of the node with the given [key]. +// This copy may be edited by the caller without affecting the database state. +// Returns database.ErrNotFound if the node doesn't exist. +// Assumes [db.lock] isn't held. +func (db *Database) getEditableNode(key path) (*node, error) { + db.lock.RLock() + defer db.lock.RUnlock() + + n, err := db.getNode(key) + if err != nil { + return nil, err + } + return n.clone(), nil +} + +// Returns the node with the given [key]. +// Editing the returned node affects the database state. +// Returns database.ErrNotFound if the node doesn't exist. +// Assumes [db.lock] is read locked. +func (db *Database) getNode(key path) (*node, error) { + if key == RootPath { + return db.root, nil + } + + if n, isCached := db.getNodeInCache(key); isCached { + db.metrics.DBNodeCacheHit() + if n == nil { + return nil, database.ErrNotFound + } + return n, nil + } + + db.metrics.DBNodeCacheMiss() + db.metrics.IOKeyRead() + rawBytes, err := db.nodeDB.Get(key.Bytes()) + if err != nil { + if err == database.ErrNotFound { + // Cache the miss. + if err := db.putNodeInCache(key, nil); err != nil { + return nil, err + } + } + return nil, err + } + + node, err := parseNode(key, rawBytes) + if err != nil { + return nil, err + } + + err = db.putNodeInCache(key, node) + return node, err +} + +// If [lock], grabs [db.lock]'s read lock. +// Otherwise assumes [db.lock] is already read locked. +func (db *Database) getKeyValues( + start []byte, + end []byte, + maxLength int, + keysToIgnore set.Set[string], + lock bool, +) ([]KeyValue, error) { + if lock { + db.lock.RLock() + defer db.lock.RUnlock() + } + if maxLength <= 0 { + return nil, fmt.Errorf("%w but was %d", ErrInvalidMaxLength, maxLength) + } + + it := db.NewIteratorWithStart(start) + defer it.Release() + + remainingLength := maxLength + result := make([]KeyValue, 0, maxLength) + // Keep adding key/value pairs until one of the following: + // * We hit a key that is lexicographically larger than the end key. + // * [maxLength] elements are in [result]. + // * There are no more values to add. + for remainingLength > 0 && it.Next() { + key := it.Key() + if len(end) != 0 && bytes.Compare(it.Key(), end) > 0 { + break + } + if keysToIgnore.Contains(string(key)) { + continue + } + result = append(result, KeyValue{ + Key: key, + Value: it.Value(), + }) + remainingLength-- + } + + return result, it.Error() +} + +// Returns a new view atop [db] with the changes in [ops] applied to it. +func (db *Database) prepareBatchView( + ops []database.BatchOp, +) (*trieView, error) { + db.lock.RLock() + view, err := db.newUntrackedView(len(ops)) + db.lock.RUnlock() + if err != nil { + return nil, err + } + // Don't need to lock [view] because nobody else has a reference to it. + + // write into the trie + for _, op := range ops { + if op.Delete { + if err := view.remove(op.Key); err != nil { + return nil, err + } + } else if err := view.insert(op.Key, op.Value); err != nil { + return nil, err + } + } + + return view, nil +} + +// Returns a new view atop [db] with the key/value pairs in [proof.KeyValues] +// inserted and the key/value pairs in [proof.DeletedKeys] removed. +func (db *Database) prepareChangeProofView(proof *ChangeProof) (*trieView, error) { + db.lock.RLock() + view, err := db.newUntrackedView(len(proof.KeyValues)) + db.lock.RUnlock() + if err != nil { + return nil, err + } + // Don't need to lock [view] because nobody else has a reference to it. + + for _, kv := range proof.KeyValues { + if err := view.insert(kv.Key, kv.Value); err != nil { + return nil, err + } + } + + for _, keyToDelete := range proof.DeletedKeys { + if err := view.remove(keyToDelete); err != nil { + return nil, err + } + } + return view, nil +} + +// Returns a new view atop [db] with the key/value pairs in [proof.KeyValues] added and +// any existing key-value pairs in the proof's range but not in the proof removed. +// assumes [db.commitLock] is held +func (db *Database) prepareRangeProofView(start []byte, proof *RangeProof) (*trieView, error) { + // Don't need to lock [view] because nobody else has a reference to it. + db.lock.RLock() + view, err := db.newUntrackedView(len(proof.KeyValues)) + db.lock.RUnlock() + + if err != nil { + return nil, err + } + keys := set.NewSet[string](len(proof.KeyValues)) + for _, kv := range proof.KeyValues { + keys.Add(string(kv.Key)) + if err := view.insert(kv.Key, kv.Value); err != nil { + return nil, err + } + } + + var largestKey []byte + if len(proof.KeyValues) > 0 { + largestKey = proof.KeyValues[len(proof.KeyValues)-1].Key + } + keysToDelete, err := db.getKeysNotInSet(start, largestKey, keys) + if err != nil { + return nil, err + } + for _, keyToDelete := range keysToDelete { + if err := view.remove(keyToDelete); err != nil { + return nil, err + } + } + return view, nil +} + +// Non-nil error is fatal -- [db] will close. +func (db *Database) putNodeInCache(key path, n *node) error { + // TODO Cache metrics + // Note that this may cause a node to be evicted from the cache, + // which will call [OnEviction]. + return db.nodeCache.Put(key, n) +} + +func (db *Database) getNodeInCache(key path) (*node, bool) { + // TODO Cache metrics + if node, ok := db.nodeCache.Get(key); ok { + return node, true + } + return nil, false +} diff --git a/avalanchego/x/merkledb/db_test.go b/avalanchego/x/merkledb/db_test.go new file mode 100644 index 00000000..8a0f98d3 --- /dev/null +++ b/avalanchego/x/merkledb/db_test.go @@ -0,0 +1,1003 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "bytes" + "context" + "math/rand" + "strconv" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/utils/hashing" +) + +const minCacheSize = 1000 + +func newNoopTracer() trace.Tracer { + tracer, _ := trace.New(trace.Config{Enabled: false}) + return tracer +} + +func Test_MerkleDB_Get_Safety(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + require.NoError(t, db.Put([]byte{0}, []byte{0, 1, 2})) + + val, err := db.Get([]byte{0}) + require.NoError(t, err) + n, err := db.getNode(newPath([]byte{0})) + require.NoError(t, err) + val[0] = 1 + + // node's value shouldn't be affected by the edit + require.NotEqual(t, val, n.value.value) +} + +func Test_MerkleDB_GetValues_Safety(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + require.NoError(t, db.Put([]byte{0}, []byte{0, 1, 2})) + + vals, errs := db.GetValues(context.Background(), [][]byte{{0}}) + require.Len(t, errs, 1) + require.NoError(t, errs[0]) + require.Equal(t, []byte{0, 1, 2}, vals[0]) + vals[0][0] = 1 + + // editing the value array shouldn't affect the db + vals, errs = db.GetValues(context.Background(), [][]byte{{0}}) + require.Len(t, errs, 1) + require.NoError(t, errs[0]) + require.Equal(t, []byte{0, 1, 2}, vals[0]) +} + +func Test_MerkleDB_DB_Interface(t *testing.T) { + for _, test := range database.Tests { + db, err := getBasicDB() + require.NoError(t, err) + test(t, db) + } +} + +func Benchmark_MerkleDB_DBInterface(b *testing.B) { + for _, size := range database.BenchmarkSizes { + keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) + for _, bench := range database.Benchmarks { + db, err := getBasicDB() + require.NoError(b, err) + bench(b, db, "merkledb", keys, values) + } + } +} + +func Test_MerkleDB_DB_Load_Root_From_DB(t *testing.T) { + require := require.New(t) + rdb := memdb.New() + defer rdb.Close() + + db, err := New( + context.Background(), + rdb, + Config{ + Tracer: newNoopTracer(), + HistoryLength: 100, + NodeCacheSize: 100, + }, + ) + require.NoError(err) + + // Populate initial set of keys + view, err := db.NewView() + require.NoError(err) + for i := 0; i < 100; i++ { + k := []byte(strconv.Itoa(i)) + require.NoError(view.Insert(context.Background(), k, hashing.ComputeHash256(k))) + } + require.NoError(view.commitToDB(context.Background())) + + root, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + + require.NoError(db.Close()) + + // reloading the DB, should set the root back to the one that was saved to the memdb + db, err = New( + context.Background(), + rdb, + Config{ + Tracer: newNoopTracer(), + HistoryLength: 100, + NodeCacheSize: 100, + }, + ) + require.NoError(err) + reloadedRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + require.Equal(root, reloadedRoot) +} + +func Test_MerkleDB_DB_Rebuild(t *testing.T) { + require := require.New(t) + + rdb := memdb.New() + defer rdb.Close() + + initialSize := 10_000 + + db, err := New( + context.Background(), + rdb, + Config{ + Tracer: newNoopTracer(), + HistoryLength: 100, + NodeCacheSize: initialSize, + }, + ) + require.NoError(err) + + // Populate initial set of keys + view, err := db.NewView() + require.NoError(err) + for i := 0; i < initialSize; i++ { + k := []byte(strconv.Itoa(i)) + require.NoError(view.Insert(context.Background(), k, hashing.ComputeHash256(k))) + } + require.NoError(view.CommitToDB(context.Background())) + + root, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + + require.NoError(db.rebuild(context.Background())) + + rebuiltRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + require.Equal(root, rebuiltRoot) +} + +func Test_MerkleDB_Failed_Batch_Commit(t *testing.T) { + memDB := memdb.New() + db, err := New( + context.Background(), + memDB, + Config{ + Tracer: newNoopTracer(), + HistoryLength: 300, + }, + ) + require.NoError(t, err) + + _ = memDB.Close() + + batch := db.NewBatch() + err = batch.Put([]byte("key1"), []byte("1")) + require.NoError(t, err) + err = batch.Put([]byte("key2"), []byte("2")) + require.NoError(t, err) + err = batch.Put([]byte("key3"), []byte("3")) + require.NoError(t, err) + err = batch.Write() + // batch fails + require.ErrorIs(t, err, database.ErrClosed) +} + +func Test_MerkleDB_Value_Cache(t *testing.T) { + memDB := memdb.New() + db, err := New( + context.Background(), + memDB, + Config{ + Tracer: newNoopTracer(), + HistoryLength: 300, + NodeCacheSize: minCacheSize, + }, + ) + require.NoError(t, err) + + batch := db.NewBatch() + err = batch.Put([]byte("key1"), []byte("1")) + require.NoError(t, err) + + err = batch.Put([]byte("key2"), []byte("2")) + require.NoError(t, err) + + require.NoError(t, err) + err = batch.Write() + require.NoError(t, err) + + batch = db.NewBatch() + // force key2 to be inserted into the cache as not found + err = batch.Delete([]byte("key2")) + require.NoError(t, err) + err = batch.Write() + require.NoError(t, err) + + _ = memDB.Close() + + // still works because key1 is read from cache + value, err := db.Get([]byte("key1")) + require.NoError(t, err) + require.Equal(t, []byte("1"), value) + + // still returns missing instead of closed because key2 is read from cache + _, err = db.Get([]byte("key2")) + require.ErrorIs(t, err, database.ErrNotFound) +} + +func Test_MerkleDB_Invalidate_Siblings_On_Commit(t *testing.T) { + dbTrie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, dbTrie) + + viewToCommit, err := dbTrie.NewView() + require.NoError(t, err) + + sibling1, err := dbTrie.NewView() + require.NoError(t, err) + sibling2, err := dbTrie.NewView() + require.NoError(t, err) + + require.False(t, sibling1.(*trieView).isInvalid()) + require.False(t, sibling2.(*trieView).isInvalid()) + + require.NoError(t, viewToCommit.Insert(context.Background(), []byte{0}, []byte{0})) + require.NoError(t, viewToCommit.CommitToDB(context.Background())) + + require.True(t, sibling1.(*trieView).isInvalid()) + require.True(t, sibling2.(*trieView).isInvalid()) + require.False(t, viewToCommit.(*trieView).isInvalid()) +} + +func Test_MerkleDB_Commit_Proof_To_Empty_Trie(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + batch := db.NewBatch() + err = batch.Put([]byte("key1"), []byte("1")) + require.NoError(t, err) + err = batch.Put([]byte("key2"), []byte("2")) + require.NoError(t, err) + err = batch.Put([]byte("key3"), []byte("3")) + require.NoError(t, err) + err = batch.Write() + require.NoError(t, err) + + proof, err := db.GetRangeProof(context.Background(), []byte("key1"), []byte("key3"), 10) + require.NoError(t, err) + + freshDB, err := getBasicDB() + require.NoError(t, err) + + err = freshDB.CommitRangeProof(context.Background(), []byte("key1"), proof) + require.NoError(t, err) + + value, err := freshDB.Get([]byte("key2")) + require.NoError(t, err) + require.Equal(t, []byte("2"), value) + + freshRoot, err := freshDB.GetMerkleRoot(context.Background()) + require.NoError(t, err) + oldRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(t, err) + require.Equal(t, oldRoot, freshRoot) +} + +func Test_MerkleDB_Commit_Proof_To_Filled_Trie(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + batch := db.NewBatch() + err = batch.Put([]byte("key1"), []byte("1")) + require.NoError(t, err) + err = batch.Put([]byte("key2"), []byte("2")) + require.NoError(t, err) + err = batch.Put([]byte("key3"), []byte("3")) + require.NoError(t, err) + err = batch.Write() + require.NoError(t, err) + + proof, err := db.GetRangeProof(context.Background(), []byte("key1"), []byte("key3"), 10) + require.NoError(t, err) + + freshDB, err := getBasicDB() + require.NoError(t, err) + batch = freshDB.NewBatch() + err = batch.Put([]byte("key1"), []byte("3")) + require.NoError(t, err) + err = batch.Put([]byte("key2"), []byte("4")) + require.NoError(t, err) + err = batch.Put([]byte("key3"), []byte("5")) + require.NoError(t, err) + err = batch.Put([]byte("key25"), []byte("5")) + require.NoError(t, err) + err = batch.Write() + require.NoError(t, err) + + err = freshDB.CommitRangeProof(context.Background(), []byte("key1"), proof) + require.NoError(t, err) + + value, err := freshDB.Get([]byte("key2")) + require.NoError(t, err) + require.Equal(t, []byte("2"), value) + + freshRoot, err := freshDB.GetMerkleRoot(context.Background()) + require.NoError(t, err) + oldRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(t, err) + require.Equal(t, oldRoot, freshRoot) +} + +func Test_MerkleDB_GetValues(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + + writeBasicBatch(t, db) + keys := [][]byte{{0}, {1}, {2}, {10}} + values, errors := db.GetValues(context.Background(), keys) + require.Len(t, values, len(keys)) + require.Len(t, errors, len(keys)) + + // first 3 have values + // last was not found + require.NoError(t, errors[0]) + require.NoError(t, errors[1]) + require.NoError(t, errors[2]) + require.ErrorIs(t, errors[3], database.ErrNotFound) + + require.Equal(t, []byte{0}, values[0]) + require.Equal(t, []byte{1}, values[1]) + require.Equal(t, []byte{2}, values[2]) + require.Nil(t, values[3]) +} + +func Test_MerkleDB_InsertNil(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + batch := db.NewBatch() + err = batch.Put([]byte("key0"), nil) + require.NoError(t, err) + err = batch.Write() + require.NoError(t, err) + + value, err := db.Get([]byte("key0")) + require.NoError(t, err) + require.Nil(t, value) + + value, err = getNodeValue(db, "key0") + require.NoError(t, err) + require.Nil(t, value) +} + +func Test_MerkleDB_InsertAndRetrieve(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + + // value hasn't been inserted so shouldn't exist + value, err := db.Get([]byte("key")) + require.Error(t, err) + require.Equal(t, database.ErrNotFound, err) + require.Nil(t, value) + + err = db.Put([]byte("key"), []byte("value")) + require.NoError(t, err) + + value, err = db.Get([]byte("key")) + require.NoError(t, err) + require.Equal(t, []byte("value"), value) +} + +func Test_MerkleDB_HealthCheck(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + val, err := db.HealthCheck(context.Background()) + require.NoError(t, err) + require.Nil(t, val) +} + +func Test_MerkleDB_Overwrite(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + + err = db.Put([]byte("key"), []byte("value0")) + require.NoError(t, err) + + value, err := db.Get([]byte("key")) + require.NoError(t, err) + require.Equal(t, []byte("value0"), value) + + err = db.Put([]byte("key"), []byte("value1")) + require.NoError(t, err) + + value, err = db.Get([]byte("key")) + require.NoError(t, err) + require.Equal(t, []byte("value1"), value) +} + +func Test_MerkleDB_Delete(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + + err = db.Put([]byte("key"), []byte("value0")) + require.NoError(t, err) + + value, err := db.Get([]byte("key")) + require.NoError(t, err) + require.Equal(t, []byte("value0"), value) + + err = db.Delete([]byte("key")) + require.NoError(t, err) + + value, err = db.Get([]byte("key")) + require.ErrorIs(t, err, database.ErrNotFound) + require.Nil(t, value) +} + +func Test_MerkleDB_DeleteMissingKey(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + + err = db.Delete([]byte("key")) + require.NoError(t, err) +} + +// Test that untracked views aren't persisted to [db.childViews]. +func TestDatabaseNewUntrackedView(t *testing.T) { + require := require.New(t) + + db, err := getBasicDB() + require.NoError(err) + + // Create a new untracked view. + view, err := db.newUntrackedView(defaultPreallocationSize) + require.NoError(err) + require.Empty(db.childViews) + + // Write to the untracked view. + err = view.Insert(context.Background(), []byte{1}, []byte{1}) + require.NoError(err) + + // Commit the view + err = view.CommitToDB(context.Background()) + require.NoError(err) + + // The untracked view should not be tracked by the parent database. + require.Empty(db.childViews) +} + +// Test that tracked views are persisted to [db.childViews]. +func TestDatabaseNewPreallocatedViewTracked(t *testing.T) { + require := require.New(t) + + db, err := getBasicDB() + require.NoError(err) + + // Create a new tracked view. + view, err := db.NewPreallocatedView(10) + require.NoError(err) + require.Len(db.childViews, 1) + + // Write to the view. + err = view.Insert(context.Background(), []byte{1}, []byte{1}) + require.NoError(err) + + // Commit the view + err = view.CommitToDB(context.Background()) + require.NoError(err) + + // The untracked view should be tracked by the parent database. + require.Contains(db.childViews, view) + require.Len(db.childViews, 1) +} + +func TestDatabaseCommitChanges(t *testing.T) { + require := require.New(t) + + db, err := getBasicDB() + require.NoError(err) + dbRoot := db.getMerkleRoot() + + // Committing a nil view should be a no-op. + err = db.commitToDB(context.Background()) + require.NoError(err) + require.Equal(dbRoot, db.getMerkleRoot()) // Root didn't change + + // Committing an invalid view should fail. + invalidView, err := db.NewView() + require.NoError(err) + invalidView.(*trieView).invalidate() + err = invalidView.commitToDB(context.Background()) + require.ErrorIs(err, ErrInvalid) + + // Add key-value pairs to the database + err = db.Put([]byte{1}, []byte{1}) + require.NoError(err) + err = db.Put([]byte{2}, []byte{2}) + require.NoError(err) + + // Make a view and inser/delete a key-value pair. + view1Intf, err := db.NewView() + require.NoError(err) + view1, ok := view1Intf.(*trieView) + require.True(ok) + err = view1.Insert(context.Background(), []byte{3}, []byte{3}) + require.NoError(err) + err = view1.Remove(context.Background(), []byte{1}) + require.NoError(err) + view1Root, err := view1.getMerkleRoot(context.Background()) + require.NoError(err) + + // Make a second view + view2Intf, err := db.NewView() + require.NoError(err) + view2, ok := view2Intf.(*trieView) + require.True(ok) + + // Make a view atop a view + view3Intf, err := view1.NewView() + require.NoError(err) + view3, ok := view3Intf.(*trieView) + require.True(ok) + + // view3 + // | + // view1 view2 + // \ / + // db + + // Commit view1 + err = view1.commitToDB(context.Background()) + require.NoError(err) + + // Make sure the key-value pairs are correct. + _, err = db.Get([]byte{1}) + require.ErrorIs(err, database.ErrNotFound) + value, err := db.Get([]byte{2}) + require.NoError(err) + require.Equal([]byte{2}, value) + value, err = db.Get([]byte{3}) + require.NoError(err) + require.Equal([]byte{3}, value) + + // Make sure the root is right + require.Equal(view1Root, db.getMerkleRoot()) + + // Make sure view2 is invalid and view1 and view3 is valid. + require.False(view1.invalidated) + require.True(view2.invalidated) + require.False(view3.invalidated) + + // Make sure view2 isn't tracked by the database. + require.NotContains(db.childViews, view2) + + // Make sure view1 and view3 is tracked by the database. + require.Contains(db.childViews, view1) + require.Contains(db.childViews, view3) + + // Make sure view3 is now a child of db. + require.Equal(db, view3.parentTrie) +} + +func TestDatabaseInvalidateChildrenExcept(t *testing.T) { + require := require.New(t) + + db, err := getBasicDB() + require.NoError(err) + + // Create children + view1Intf, err := db.NewView() + require.NoError(err) + view1, ok := view1Intf.(*trieView) + require.True(ok) + + view2Intf, err := db.NewView() + require.NoError(err) + view2, ok := view2Intf.(*trieView) + require.True(ok) + + view3Intf, err := db.NewView() + require.NoError(err) + view3, ok := view3Intf.(*trieView) + require.True(ok) + + db.invalidateChildrenExcept(view1) + + // Make sure view1 is valid and view2 and view3 are invalid. + require.False(view1.invalidated) + require.True(view2.invalidated) + require.True(view3.invalidated) + require.Contains(db.childViews, view1) + require.Len(db.childViews, 1) + + db.invalidateChildrenExcept(nil) + + // Make sure all views are invalid. + require.True(view1.invalidated) + require.True(view2.invalidated) + require.True(view3.invalidated) + require.Empty(db.childViews) + + // Calling with an untracked view doesn't add the untracked view + db.invalidateChildrenExcept(view1) + require.Empty(db.childViews) +} + +func Test_MerkleDB_Random_Insert_Ordering(t *testing.T) { + totalState := 1000 + var ( + allKeys [][]byte + keyMap map[string]struct{} + ) + genKey := func(r *rand.Rand) []byte { + count := 0 + for { + var key []byte + if len(allKeys) > 2 && r.Intn(100) < 10 { + // new prefixed key + prefix := allKeys[r.Intn(len(allKeys))] + key = make([]byte, r.Intn(50)+len(prefix)) + copy(key, prefix) + _, err := r.Read(key[len(prefix):]) + require.NoError(t, err) + } else { + key = make([]byte, r.Intn(50)) + _, err := r.Read(key) + require.NoError(t, err) + } + if _, ok := keyMap[string(key)]; !ok { + allKeys = append(allKeys, key) + keyMap[string(key)] = struct{}{} + return key + } + count++ + } + } + + for i := 0; i < 3; i++ { + r := rand.New(rand.NewSource(int64(i))) // #nosec G404 + + ops := make([]*testOperation, 0, totalState) + allKeys = [][]byte{} + keyMap = map[string]struct{}{} + for x := 0; x < totalState; x++ { + key := genKey(r) + value := make([]byte, r.Intn(51)) + if len(value) == 51 { + value = nil + } else { + _, err := r.Read(value) + require.NoError(t, err) + } + ops = append(ops, &testOperation{key: key, value: value}) + } + db, err := getBasicDB() + require.NoError(t, err) + result, err := applyOperations(db, ops) + require.NoError(t, err) + primaryRoot, err := result.GetMerkleRoot(context.Background()) + require.NoError(t, err) + for shuffleIndex := 0; shuffleIndex < 3; shuffleIndex++ { + r.Shuffle(totalState, func(i, j int) { + ops[i], ops[j] = ops[j], ops[i] + }) + result, err := applyOperations(db, ops) + require.NoError(t, err) + newRoot, err := result.GetMerkleRoot(context.Background()) + require.NoError(t, err) + require.Equal(t, primaryRoot, newRoot) + } + } +} + +type testOperation struct { + key []byte + value []byte + delete bool +} + +func applyOperations(t *Database, ops []*testOperation) (Trie, error) { + view, err := t.NewView() + if err != nil { + return nil, err + } + for _, op := range ops { + if op.delete { + if err := view.Remove(context.Background(), op.key); err != nil { + return nil, err + } + } else { + if err := view.Insert(context.Background(), op.key, op.value); err != nil { + return nil, err + } + } + } + return view, nil +} + +func Test_MerkleDB_RandomCases(t *testing.T) { + require := require.New(t) + + for i := 150; i < 500; i += 10 { + r := rand.New(rand.NewSource(int64(i))) // #nosec G404 + runRandDBTest(require, r, generate(require, r, i, .01)) + } +} + +func Test_MerkleDB_RandomCases_InitialValues(t *testing.T) { + require := require.New(t) + + r := rand.New(rand.NewSource(int64(0))) // #nosec G404 + runRandDBTest(require, r, generateInitialValues(require, r, 2000, 3500, 0.0)) +} + +// randTest performs random trie operations. +// Instances of this test are created by Generate. +type randTest []randTestStep + +type randTestStep struct { + op int + key []byte // for opUpdate, opDelete, opGet + value []byte // for opUpdate +} + +const ( + opUpdate = iota + opDelete + opGet + opWriteBatch + opGenerateRangeProof + opGenerateChangeProof + opCheckhash + opMax // boundary value, not an actual op +) + +func runRandDBTest(require *require.Assertions, r *rand.Rand, rt randTest) { + db, err := getBasicDB() + require.NoError(err) + + startRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + + values := make(map[path][]byte) // tracks content of the trie + currentBatch := db.NewBatch() + currentValues := make(map[path][]byte) + deleteValues := make(map[path]struct{}) + pastRoots := []ids.ID{} + + for i, step := range rt { + require.LessOrEqual(i, len(rt)) + switch step.op { + case opUpdate: + err := currentBatch.Put(step.key, step.value) + require.NoError(err) + currentValues[newPath(step.key)] = step.value + delete(deleteValues, newPath(step.key)) + case opDelete: + err := currentBatch.Delete(step.key) + require.NoError(err) + deleteValues[newPath(step.key)] = struct{}{} + delete(currentValues, newPath(step.key)) + case opGenerateRangeProof: + root, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + if len(pastRoots) > 0 { + root = pastRoots[r.Intn(len(pastRoots))] + } + rangeProof, err := db.GetRangeProofAtRoot(context.Background(), root, step.key, step.value, 100) + require.NoError(err) + err = rangeProof.Verify( + context.Background(), + step.key, + step.value, + root, + ) + require.NoError(err) + require.LessOrEqual(len(rangeProof.KeyValues), 100) + case opGenerateChangeProof: + root, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + if len(pastRoots) > 1 { + root = pastRoots[r.Intn(len(pastRoots))] + } + changeProof, err := db.GetChangeProof(context.Background(), startRoot, root, step.key, step.value, 100) + if startRoot == root { + require.ErrorIs(err, errSameRoot) + continue + } + require.NoError(err) + changeProofDB, err := getBasicDB() + require.NoError(err) + err = changeProof.Verify( + context.Background(), + changeProofDB, + step.key, + step.value, + root, + ) + require.NoError(err) + require.LessOrEqual(len(changeProof.KeyValues)+len(changeProof.DeletedKeys), 100) + case opWriteBatch: + oldRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + err = currentBatch.Write() + require.NoError(err) + for key, value := range currentValues { + values[key] = value + } + for key := range deleteValues { + delete(values, key) + } + + if len(currentValues) == 0 && len(deleteValues) == 0 { + continue + } + newRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + if oldRoot != newRoot { + pastRoots = append(pastRoots, newRoot) + if len(pastRoots) > 300 { + pastRoots = pastRoots[len(pastRoots)-300:] + } + } + currentValues = map[path][]byte{} + deleteValues = map[path]struct{}{} + currentBatch = db.NewBatch() + case opGet: + v, err := db.Get(step.key) + if err != nil { + require.ErrorIs(err, database.ErrNotFound) + } + want := values[newPath(step.key)] + require.True(bytes.Equal(want, v)) // Use bytes.Equal so nil treated equal to []byte{} + trieValue, err := getNodeValue(db, string(step.key)) + if err != nil { + require.ErrorIs(err, database.ErrNotFound) + } + require.True(bytes.Equal(want, trieValue)) // Use bytes.Equal so nil treated equal to []byte{} + case opCheckhash: + dbTrie, err := newDatabase( + context.Background(), + memdb.New(), + Config{ + Tracer: newNoopTracer(), + HistoryLength: 0, + NodeCacheSize: minCacheSize, + }, + &mockMetrics{}, + ) + require.NoError(err) + localTrie := Trie(dbTrie) + for key, value := range values { + err := localTrie.Insert(context.Background(), key.Serialize().Value, value) + require.NoError(err) + } + calculatedRoot, err := localTrie.GetMerkleRoot(context.Background()) + require.NoError(err) + dbRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + require.Equal(dbRoot, calculatedRoot) + } + } +} + +func generateWithKeys(require *require.Assertions, allKeys [][]byte, r *rand.Rand, size int, percentChanceToFullHash float64) randTest { + genKey := func() []byte { + if len(allKeys) < 2 || r.Intn(100) < 10 { + // new key + key := make([]byte, r.Intn(50)) + _, err := r.Read(key) + require.NoError(err) + allKeys = append(allKeys, key) + return key + } + if len(allKeys) > 2 && r.Intn(100) < 10 { + // new prefixed key + prefix := allKeys[r.Intn(len(allKeys))] + key := make([]byte, r.Intn(50)+len(prefix)) + copy(key, prefix) + _, err := r.Read(key[len(prefix):]) + require.NoError(err) + allKeys = append(allKeys, key) + return key + } + // use existing key + return allKeys[r.Intn(len(allKeys))] + } + + genEnd := func(key []byte) []byte { + shouldBeNil := r.Intn(10) + if shouldBeNil == 0 { + return nil + } + + endKey := make([]byte, len(key)) + copy(endKey, key) + for i := 0; i < len(endKey); i += 2 { + n := r.Intn(len(endKey)) + if endKey[n] < 250 { + endKey[n] += byte(r.Intn(int(255 - endKey[n]))) + } + } + return endKey + } + + var steps randTest + for i := 0; i < size-1; { + step := randTestStep{op: r.Intn(opMax)} + switch step.op { + case opUpdate: + step.key = genKey() + step.value = make([]byte, r.Intn(50)) + if len(step.value) == 51 { + step.value = nil + } else { + _, err := r.Read(step.value) + require.NoError(err) + } + case opGet, opDelete: + step.key = genKey() + case opGenerateRangeProof, opGenerateChangeProof: + step.key = genKey() + step.value = genEnd(step.key) + case opCheckhash: + // this gets really expensive so control how often it happens + if r.Float64() >= percentChanceToFullHash { + continue + } + } + steps = append(steps, step) + i++ + } + // always end with a full hash of the trie + steps = append(steps, randTestStep{op: opCheckhash}) + return steps +} + +func generateInitialValues(require *require.Assertions, r *rand.Rand, initialValues int, size int, percentChanceToFullHash float64) randTest { + var allKeys [][]byte + genKey := func() []byte { + // new prefixed key + if len(allKeys) > 2 && r.Intn(100) < 10 { + prefix := allKeys[r.Intn(len(allKeys))] + key := make([]byte, r.Intn(50)+len(prefix)) + copy(key, prefix) + _, err := r.Read(key[len(prefix):]) + require.NoError(err) + allKeys = append(allKeys, key) + return key + } + + // new key + key := make([]byte, r.Intn(50)) + _, err := r.Read(key) + require.NoError(err) + allKeys = append(allKeys, key) + return key + } + + var steps randTest + for i := 0; i < initialValues; i++ { + step := randTestStep{op: opUpdate} + step.key = genKey() + step.value = make([]byte, r.Intn(51)) + if len(step.value) == 51 { + step.value = nil + } else { + _, err := r.Read(step.value) + require.NoError(err) + } + steps = append(steps, step) + } + steps = append(steps, randTestStep{op: opWriteBatch}) + steps = append(steps, generateWithKeys(require, allKeys, r, size, percentChanceToFullHash)...) + return steps +} + +func generate(require *require.Assertions, r *rand.Rand, size int, percentChanceToFullHash float64) randTest { + var allKeys [][]byte + return generateWithKeys(require, allKeys, r, size, percentChanceToFullHash) +} diff --git a/avalanchego/x/merkledb/history.go b/avalanchego/x/merkledb/history.go new file mode 100644 index 00000000..e58dedbd --- /dev/null +++ b/avalanchego/x/merkledb/history.go @@ -0,0 +1,264 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "errors" + "fmt" + + "github.com/google/btree" + + "github.com/ava-labs/avalanchego/ids" +) + +var ( + ErrStartRootNotFound = errors.New("start root is not before end root in history") + ErrRootIDNotPresent = errors.New("root id is not present in history") +) + +// stores previous trie states +type trieHistory struct { + // Root ID --> The most recent change resulting in [rootID]. + lastChanges map[ids.ID]*changeSummaryAndIndex + + // Maximum number of previous roots/changes to store in [history]. + maxHistoryLen int + + // Contains the history. + // Sorted by increasing order of insertion. + // Contains at most [maxHistoryLen] values. + history *btree.BTreeG[*changeSummaryAndIndex] + + nextIndex uint64 +} + +// Tracks the beginning and ending state of a value. +type change[T any] struct { + before T + after T +} + +// Wrapper around a changeSummary that allows comparison +// of when the change was made. +type changeSummaryAndIndex struct { + *changeSummary + // Another changeSummaryAndIndex with a greater + // [index] means that change was after this one. + index uint64 +} + +// Tracks all of the node and value changes that resulted in the rootID. +type changeSummary struct { + rootID ids.ID + // key is path prefix + nodes map[path]*change[*node] + // key is full path + values map[path]*change[Maybe[[]byte]] +} + +func newChangeSummary(estimatedSize int) *changeSummary { + return &changeSummary{ + nodes: make(map[path]*change[*node], estimatedSize), + values: make(map[path]*change[Maybe[[]byte]], estimatedSize), + } +} + +func newTrieHistory(maxHistoryLookback int) *trieHistory { + return &trieHistory{ + maxHistoryLen: maxHistoryLookback, + history: btree.NewG( + 2, + func(a, b *changeSummaryAndIndex) bool { + return a.index < b.index + }, + ), + lastChanges: make(map[ids.ID]*changeSummaryAndIndex), + } +} + +// Returns up to [maxLength] key-value pair changes with keys in [start, end] that +// occurred between [startRoot] and [endRoot]. +func (th *trieHistory) getValueChanges(startRoot, endRoot ids.ID, start, end []byte, maxLength int) (*changeSummary, error) { + if maxLength <= 0 { + return nil, fmt.Errorf("%w but was %d", ErrInvalidMaxLength, maxLength) + } + + if startRoot == endRoot { + return newChangeSummary(maxLength), nil + } + + // Confirm there's a change resulting in [startRoot] before + // a change resulting in [endRoot] in the history. + // [lastEndRootChange] is the last change in the history resulting in [endRoot]. + lastEndRootChange, ok := th.lastChanges[endRoot] + if !ok { + return nil, ErrRootIDNotPresent + } + + // [startRootChanges] is the last appearance of [startRoot] + startRootChanges, ok := th.lastChanges[startRoot] + if !ok { + return nil, ErrStartRootNotFound + } + + // startRootChanges is after the lastEndRootChange, but that is just the latest appearance of start root + // there may be an earlier entry, so attempt to find an entry that comes before lastEndRootChange + if startRootChanges.index > lastEndRootChange.index { + th.history.DescendLessOrEqual( + lastEndRootChange, + func(item *changeSummaryAndIndex) bool { + if item == lastEndRootChange { + return true // Skip first iteration + } + if item.rootID == startRoot { + startRootChanges = item + return false + } + return true + }, + ) + // There's no change resulting in [startRoot] before the latest change resulting in [endRoot]. + if startRootChanges.index > lastEndRootChange.index { + return nil, ErrStartRootNotFound + } + } + + // Keep changes sorted so the largest can be removed in order to stay within the maxLength limit. + sortedKeys := btree.NewG( + 2, + func(a, b path) bool { + return a.Compare(b) < 0 + }, + ) + + startPath := newPath(start) + endPath := newPath(end) + + // For each element in the history in the range between [startRoot]'s + // last appearance (exclusive) and [endRoot]'s last appearance (inclusive), + // add the changes to keys in [start, end] to [combinedChanges]. + // Only the key-value pairs with the greatest [maxLength] keys will be kept. + combinedChanges := newChangeSummary(maxLength) + + // For each change after [startRootChanges] up to and including + // [lastEndRootChange], record the change in [combinedChanges]. + th.history.AscendGreaterOrEqual( + startRootChanges, + func(item *changeSummaryAndIndex) bool { + if item == startRootChanges { + // Start from the first change after [startRootChanges]. + return true + } + if item.index > lastEndRootChange.index { + // Don't go past [lastEndRootChange]. + return false + } + + for key, valueChange := range item.values { + if (len(startPath) == 0 || key.Compare(startPath) >= 0) && + (len(endPath) == 0 || key.Compare(endPath) <= 0) { + if existing, ok := combinedChanges.values[key]; ok { + existing.after = valueChange.after + } else { + combinedChanges.values[key] = &change[Maybe[[]byte]]{ + before: valueChange.before, + after: valueChange.after, + } + } + sortedKeys.ReplaceOrInsert(key) + } + } + + // Keep only the smallest [maxLength] items in [combinedChanges.values]. + for sortedKeys.Len() > maxLength { + if greatestKey, found := sortedKeys.DeleteMax(); found { + delete(combinedChanges.values, greatestKey) + } + } + + return true + }, + ) + return combinedChanges, nil +} + +// Returns the changes to go from the current trie state back to the requested [rootID] +// for the keys in [start, end]. +// If [start] is nil, all keys are considered > [start]. +// If [end] is nil, all keys are considered < [end]. +func (th *trieHistory) getChangesToGetToRoot(rootID ids.ID, start, end []byte) (*changeSummary, error) { + // [lastRootChange] is the last change in the history resulting in [rootID]. + lastRootChange, ok := th.lastChanges[rootID] + if !ok { + return nil, ErrRootIDNotPresent + } + + var ( + startPath = newPath(start) + endPath = newPath(end) + combinedChanges = newChangeSummary(defaultPreallocationSize) + ) + + // Go backward from the most recent change in the history up to but + // not including the last change resulting in [rootID]. + // Record each change in [combinedChanges]. + th.history.Descend( + func(item *changeSummaryAndIndex) bool { + if item == lastRootChange { + return false + } + for key, changedNode := range item.nodes { + combinedChanges.nodes[key] = &change[*node]{ + after: changedNode.before, + } + } + + for key, valueChange := range item.values { + if (len(startPath) == 0 || key.Compare(startPath) >= 0) && + (len(endPath) == 0 || key.Compare(endPath) <= 0) { + if existing, ok := combinedChanges.values[key]; ok { + existing.after = valueChange.before + } else { + combinedChanges.values[key] = &change[Maybe[[]byte]]{ + before: valueChange.after, + after: valueChange.before, + } + } + } + } + return true + }, + ) + return combinedChanges, nil +} + +// record the provided set of changes in the history +func (th *trieHistory) record(changes *changeSummary) { + // we aren't recording history so noop + if th.maxHistoryLen == 0 { + return + } + + for th.history.Len() == th.maxHistoryLen { + // This change causes us to go over our lookback limit. + // Remove the oldest set of changes. + oldestEntry, _ := th.history.DeleteMin() + latestChange := th.lastChanges[oldestEntry.rootID] + if latestChange == oldestEntry { + // The removed change was the most recent resulting in this root ID. + delete(th.lastChanges, oldestEntry.rootID) + } + } + + changesAndIndex := &changeSummaryAndIndex{ + changeSummary: changes, + index: th.nextIndex, + } + th.nextIndex++ + + // Add [changes] to the sorted change list. + _, _ = th.history.ReplaceOrInsert(changesAndIndex) + // Mark that this is the most recent change resulting in [changes.rootID]. + th.lastChanges[changes.rootID] = changesAndIndex +} diff --git a/avalanchego/x/merkledb/history_test.go b/avalanchego/x/merkledb/history_test.go new file mode 100644 index 00000000..29da9974 --- /dev/null +++ b/avalanchego/x/merkledb/history_test.go @@ -0,0 +1,891 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "context" + "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" +) + +func Test_History_Simple(t *testing.T) { + require := require.New(t) + + db, err := New( + context.Background(), + memdb.New(), + Config{ + Tracer: newNoopTracer(), + HistoryLength: 300, + NodeCacheSize: minCacheSize, + }, + ) + require.NoError(err) + batch := db.NewBatch() + err = batch.Put([]byte("key"), []byte("value")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + + val, err := db.Get([]byte("key")) + require.NoError(err) + require.Equal([]byte("value"), val) + + origProof, err := db.GetRangeProof(context.Background(), []byte("k"), []byte("key3"), 10) + require.NoError(err) + require.NotNil(origProof) + origRootID := db.root.id + err = origProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) + require.NoError(err) + + batch = db.NewBatch() + err = batch.Put([]byte("key"), []byte("value0")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) + require.NoError(err) + require.NotNil(newProof) + err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) + require.NoError(err) + + batch = db.NewBatch() + err = batch.Put([]byte("key1"), []byte("value1")) + require.NoError(err) + err = batch.Put([]byte("key8"), []byte("value8")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) + require.NoError(err) + require.NotNil(newProof) + err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) + require.NoError(err) + + batch = db.NewBatch() + err = batch.Put([]byte("k"), []byte("v")) + require.NoError(err) + require.NoError(err) + err = batch.Write() + require.NoError(err) + newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) + require.NoError(err) + require.NotNil(newProof) + err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) + require.NoError(err) + + batch = db.NewBatch() + err = batch.Delete([]byte("k")) + require.NoError(err) + err = batch.Delete([]byte("ke")) + require.NoError(err) + err = batch.Delete([]byte("key")) + require.NoError(err) + err = batch.Delete([]byte("key1")) + require.NoError(err) + err = batch.Put([]byte("key2"), []byte("value2")) + require.NoError(err) + err = batch.Delete([]byte("key3")) + require.NoError(err) + err = batch.Delete([]byte("key4")) + require.NoError(err) + err = batch.Delete([]byte("key5")) + require.NoError(err) + err = batch.Delete([]byte("key8")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) + require.NoError(err) + require.NotNil(newProof) + err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) + require.NoError(err) +} + +func Test_History_Large(t *testing.T) { + require := require.New(t) + + for i := 1; i < 10; i++ { + r := rand.New(rand.NewSource(int64(i))) // #nosec G404 + db, err := New( + context.Background(), + memdb.New(), + Config{ + Tracer: newNoopTracer(), + HistoryLength: 1500, + NodeCacheSize: 1000, + }, + ) + require.NoError(err) + roots := []ids.ID{} + // make sure they stay in sync + for x := 0; x < 500; x++ { + addkey := make([]byte, r.Intn(50)) + _, err := r.Read(addkey) + require.NoError(err) + val := make([]byte, r.Intn(50)) + _, err = r.Read(val) + require.NoError(err) + + err = db.Put(addkey, val) + require.NoError(err) + + addNilkey := make([]byte, r.Intn(50)) + _, err = r.Read(addNilkey) + require.NoError(err) + err = db.Put(addNilkey, nil) + require.NoError(err) + + deleteKeyStart := make([]byte, r.Intn(50)) + _, err = r.Read(deleteKeyStart) + require.NoError(err) + + it := db.NewIteratorWithStart(deleteKeyStart) + if it.Next() { + err = db.Delete(it.Key()) + require.NoError(err) + } + require.NoError(it.Error()) + it.Release() + + root, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + roots = append(roots, root) + } + proof, err := db.GetRangeProofAtRoot(context.Background(), roots[0], nil, nil, 10) + require.NoError(err) + require.NotNil(proof) + + err = proof.Verify(context.Background(), nil, nil, roots[0]) + require.NoError(err) + } +} + +func Test_History_Bad_GetValueChanges_Input(t *testing.T) { + require := require.New(t) + + db, err := New( + context.Background(), + memdb.New(), + Config{ + Tracer: newNoopTracer(), + HistoryLength: 5, + NodeCacheSize: minCacheSize, + }, + ) + require.NoError(err) + batch := db.NewBatch() + err = batch.Put([]byte("key"), []byte("value")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + + toBeDeletedRoot := db.getMerkleRoot() + + batch = db.NewBatch() + err = batch.Put([]byte("key"), []byte("value0")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + + startRoot := db.getMerkleRoot() + + batch = db.NewBatch() + err = batch.Put([]byte("key1"), []byte("value0")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + + batch = db.NewBatch() + err = batch.Put([]byte("key1"), []byte("value1")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + + batch = db.NewBatch() + err = batch.Put([]byte("key2"), []byte("value3")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + + endRoot := db.getMerkleRoot() + + // ensure these start as valid calls + _, err = db.history.getValueChanges(toBeDeletedRoot, endRoot, nil, nil, 1) + require.NoError(err) + _, err = db.history.getValueChanges(startRoot, endRoot, nil, nil, 1) + require.NoError(err) + + _, err = db.history.getValueChanges(startRoot, endRoot, nil, nil, -1) + require.Error(err, ErrInvalidMaxLength) + + _, err = db.history.getValueChanges(endRoot, startRoot, nil, nil, 1) + require.Error(err, ErrStartRootNotFound) + + // trigger the first root to be deleted by exiting the lookback window + batch = db.NewBatch() + err = batch.Put([]byte("key2"), []byte("value4")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + + // now this root should no lnger be present + _, err = db.history.getValueChanges(toBeDeletedRoot, endRoot, nil, nil, 1) + require.Error(err, ErrRootIDNotPresent) + + // same start/end roots should yield an empty changelist + changes, err := db.history.getValueChanges(endRoot, endRoot, nil, nil, 10) + require.NoError(err) + require.Len(changes.values, 0) +} + +func Test_History_Trigger_History_Queue_Looping(t *testing.T) { + require := require.New(t) + + db, err := New( + context.Background(), + memdb.New(), + Config{ + Tracer: newNoopTracer(), + HistoryLength: 2, + NodeCacheSize: minCacheSize, + }, + ) + require.NoError(err) + batch := db.NewBatch() + err = batch.Put([]byte("key"), []byte("value")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + origRootID := db.getMerkleRoot() + + origProof, err := db.GetRangeProof(context.Background(), []byte("k"), []byte("key3"), 10) + require.NoError(err) + require.NotNil(origProof) + err = origProof.Verify( + context.Background(), + []byte("k"), + []byte("key3"), + origRootID, + ) + require.NoError(err) + + // write a new value into the db, now there should be 2 roots in the history + batch = db.NewBatch() + err = batch.Put([]byte("key"), []byte("value0")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + + // ensure that previous root is still present and generates a valid proof + newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) + require.NoError(err) + require.NotNil(newProof) + err = newProof.Verify( + context.Background(), + []byte("k"), + []byte("key3"), + origRootID, + ) + require.NoError(err) + + // trigger a new root to be added to the history, which should cause rollover since there can only be 2 + batch = db.NewBatch() + err = batch.Put([]byte("key1"), []byte("value1")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + + // proof from first root shouldn't be generatable since it should have been removed from the history + _, err = db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) + require.Error(err, ErrRootIDNotPresent) +} + +func Test_History_Values_Lookup_Over_Queue_Break(t *testing.T) { + require := require.New(t) + + db, err := New( + context.Background(), + memdb.New(), + Config{ + Tracer: newNoopTracer(), + HistoryLength: 4, + NodeCacheSize: minCacheSize, + }, + ) + require.NoError(err) + batch := db.NewBatch() + err = batch.Put([]byte("key"), []byte("value")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + + // write a new value into the db + batch = db.NewBatch() + err = batch.Put([]byte("key"), []byte("value0")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + + startRoot := db.getMerkleRoot() + + // write a new value into the db + batch = db.NewBatch() + err = batch.Put([]byte("key1"), []byte("value0")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + + // write a new value into the db that overwrites key1 + batch = db.NewBatch() + err = batch.Put([]byte("key1"), []byte("value1")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + + // trigger a new root to be added to the history, which should cause rollover since there can only be 3 + batch = db.NewBatch() + err = batch.Put([]byte("key2"), []byte("value3")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + + endRoot := db.getMerkleRoot() + + // changes should still be collectable even though the history has had to loop due to hitting max size + changes, err := db.history.getValueChanges(startRoot, endRoot, nil, nil, 10) + require.NoError(err) + require.Contains(changes.values, newPath([]byte("key1"))) + require.Equal([]byte("value1"), changes.values[newPath([]byte("key1"))].after.value) + require.Contains(changes.values, newPath([]byte("key2"))) + require.Equal([]byte("value3"), changes.values[newPath([]byte("key2"))].after.value) +} + +func Test_History_RepeatedRoot(t *testing.T) { + require := require.New(t) + + db, err := New( + context.Background(), + memdb.New(), + Config{ + Tracer: newNoopTracer(), + HistoryLength: 100, + NodeCacheSize: minCacheSize, + }, + ) + require.NoError(err) + batch := db.NewBatch() + err = batch.Put([]byte("key1"), []byte("value1")) + require.NoError(err) + err = batch.Put([]byte("key2"), []byte("value2")) + require.NoError(err) + err = batch.Put([]byte("key3"), []byte("value3")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + + origProof, err := db.GetRangeProof(context.Background(), []byte("k"), []byte("key3"), 10) + require.NoError(err) + require.NotNil(origProof) + origRootID := db.root.id + err = origProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) + require.NoError(err) + + batch = db.NewBatch() + err = batch.Put([]byte("key1"), []byte("other")) + require.NoError(err) + err = batch.Put([]byte("key2"), []byte("other")) + require.NoError(err) + err = batch.Put([]byte("key3"), []byte("other")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) + require.NoError(err) + require.NotNil(newProof) + err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) + require.NoError(err) + + // revert state to be the same as in orig proof + batch = db.NewBatch() + err = batch.Put([]byte("key1"), []byte("value1")) + require.NoError(err) + err = batch.Put([]byte("key2"), []byte("value2")) + require.NoError(err) + err = batch.Put([]byte("key3"), []byte("value3")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + + newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) + require.NoError(err) + require.NotNil(newProof) + err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) + require.NoError(err) +} + +func Test_History_ExcessDeletes(t *testing.T) { + require := require.New(t) + + db, err := New( + context.Background(), + memdb.New(), + Config{ + Tracer: newNoopTracer(), + HistoryLength: 100, + NodeCacheSize: minCacheSize, + }, + ) + require.NoError(err) + batch := db.NewBatch() + err = batch.Put([]byte("key"), []byte("value")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + + origProof, err := db.GetRangeProof(context.Background(), []byte("k"), []byte("key3"), 10) + require.NoError(err) + require.NotNil(origProof) + origRootID := db.root.id + err = origProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) + require.NoError(err) + + batch = db.NewBatch() + err = batch.Delete([]byte("key1")) + require.NoError(err) + err = batch.Delete([]byte("key2")) + require.NoError(err) + err = batch.Delete([]byte("key3")) + require.NoError(err) + err = batch.Delete([]byte("key4")) + require.NoError(err) + err = batch.Delete([]byte("key5")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) + require.NoError(err) + require.NotNil(newProof) + err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) + require.NoError(err) +} + +func Test_History_DontIncludeAllNodes(t *testing.T) { + require := require.New(t) + + db, err := New( + context.Background(), + memdb.New(), + Config{ + Tracer: newNoopTracer(), + HistoryLength: 100, + NodeCacheSize: minCacheSize, + }, + ) + require.NoError(err) + batch := db.NewBatch() + err = batch.Put([]byte("key"), []byte("value")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + + origProof, err := db.GetRangeProof(context.Background(), []byte("k"), []byte("key3"), 10) + require.NoError(err) + require.NotNil(origProof) + origRootID := db.root.id + err = origProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) + require.NoError(err) + + batch = db.NewBatch() + err = batch.Put([]byte("z"), []byte("z")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) + require.NoError(err) + require.NotNil(newProof) + err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) + require.NoError(err) +} + +func Test_History_Branching2Nodes(t *testing.T) { + require := require.New(t) + + db, err := New( + context.Background(), + memdb.New(), + Config{ + Tracer: newNoopTracer(), + HistoryLength: 100, + NodeCacheSize: minCacheSize, + }, + ) + require.NoError(err) + batch := db.NewBatch() + err = batch.Put([]byte("key"), []byte("value")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + + origProof, err := db.GetRangeProof(context.Background(), []byte("k"), []byte("key3"), 10) + require.NoError(err) + require.NotNil(origProof) + origRootID := db.root.id + err = origProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) + require.NoError(err) + + batch = db.NewBatch() + err = batch.Put([]byte("k"), []byte("v")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) + require.NoError(err) + require.NotNil(newProof) + err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) + require.NoError(err) +} + +func Test_History_Branching3Nodes(t *testing.T) { + require := require.New(t) + + db, err := New( + context.Background(), + memdb.New(), + Config{ + Tracer: newNoopTracer(), + HistoryLength: 100, + NodeCacheSize: minCacheSize, + }, + ) + require.NoError(err) + batch := db.NewBatch() + err = batch.Put([]byte("key123"), []byte("value123")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + + origProof, err := db.GetRangeProof(context.Background(), []byte("k"), []byte("key3"), 10) + require.NoError(err) + require.NotNil(origProof) + origRootID := db.root.id + err = origProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) + require.NoError(err) + + batch = db.NewBatch() + err = batch.Put([]byte("key321"), []byte("value321")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) + require.NoError(err) + require.NotNil(newProof) + err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) + require.NoError(err) +} + +func Test_History_MaxLength(t *testing.T) { + require := require.New(t) + + db, err := New( + context.Background(), + memdb.New(), + Config{ + Tracer: newNoopTracer(), + HistoryLength: 2, + NodeCacheSize: 1000, + }, + ) + require.NoError(err) + + batch := db.NewBatch() + err = batch.Put([]byte("key"), []byte("value")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + + oldRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + + batch = db.NewBatch() + err = batch.Put([]byte("k"), []byte("v")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + + require.Contains(db.history.lastChanges, oldRoot) + + batch = db.NewBatch() + err = batch.Put([]byte("k1"), []byte("v2")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + + require.NotContains(db.history.lastChanges, oldRoot) +} + +func Test_Change_List(t *testing.T) { + require := require.New(t) + + db, err := New( + context.Background(), + memdb.New(), + Config{ + Tracer: newNoopTracer(), + HistoryLength: 100, + NodeCacheSize: minCacheSize, + }, + ) + require.NoError(err) + batch := db.NewBatch() + err = batch.Put([]byte("key20"), []byte("value20")) + require.NoError(err) + err = batch.Put([]byte("key21"), []byte("value21")) + require.NoError(err) + err = batch.Put([]byte("key22"), []byte("value22")) + require.NoError(err) + err = batch.Put([]byte("key23"), []byte("value23")) + require.NoError(err) + err = batch.Put([]byte("key24"), []byte("value24")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + startRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + + batch = db.NewBatch() + err = batch.Put([]byte("key25"), []byte("value25")) + require.NoError(err) + err = batch.Put([]byte("key26"), []byte("value26")) + require.NoError(err) + err = batch.Put([]byte("key27"), []byte("value27")) + require.NoError(err) + err = batch.Put([]byte("key28"), []byte("value28")) + require.NoError(err) + err = batch.Put([]byte("key29"), []byte("value29")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + + batch = db.NewBatch() + err = batch.Put([]byte("key30"), []byte("value30")) + require.NoError(err) + err = batch.Put([]byte("key31"), []byte("value31")) + require.NoError(err) + err = batch.Put([]byte("key32"), []byte("value32")) + require.NoError(err) + err = batch.Delete([]byte("key21")) + require.NoError(err) + err = batch.Delete([]byte("key22")) + require.NoError(err) + err = batch.Write() + require.NoError(err) + + endRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + + changes, err := db.history.getValueChanges(startRoot, endRoot, nil, nil, 8) + require.NoError(err) + require.Equal(8, len(changes.values)) +} + +func TestHistoryRecord(t *testing.T) { + require := require.New(t) + + maxHistoryLen := 3 + th := newTrieHistory(maxHistoryLen) + + changes := []*changeSummary{} + for i := 0; i < maxHistoryLen; i++ { // Fill the history + changes = append(changes, &changeSummary{rootID: ids.GenerateTestID()}) + + th.record(changes[i]) + require.Equal(uint64(i+1), th.nextIndex) + require.Equal(i+1, th.history.Len()) + require.Len(th.lastChanges, i+1) + require.Contains(th.lastChanges, changes[i].rootID) + changeAndIndex := th.lastChanges[changes[i].rootID] + require.Equal(uint64(i), changeAndIndex.index) + require.True(th.history.Has(changeAndIndex)) + } + // history is [changes[0], changes[1], changes[2]] + + // Add a new change + change3 := &changeSummary{rootID: ids.GenerateTestID()} + th.record(change3) + // history is [changes[1], changes[2], change3] + require.Equal(uint64(maxHistoryLen+1), th.nextIndex) + require.Equal(maxHistoryLen, th.history.Len()) + require.Len(th.lastChanges, maxHistoryLen) + require.Contains(th.lastChanges, change3.rootID) + changeAndIndex := th.lastChanges[change3.rootID] + require.Equal(uint64(maxHistoryLen), changeAndIndex.index) + require.True(th.history.Has(changeAndIndex)) + + // Make sure the oldest change was evicted + require.NotContains(th.lastChanges, changes[0].rootID) + minChange, _ := th.history.Min() + require.Equal(uint64(1), minChange.index) + + // Add another change which was the same root ID as changes[2] + change4 := &changeSummary{rootID: changes[2].rootID} + th.record(change4) + // history is [changes[2], change3, change4] + + change5 := &changeSummary{rootID: ids.GenerateTestID()} + th.record(change5) + // history is [change3, change4, change5] + + // Make sure that even though changes[2] was evicted, we still remember + // that the most recent change resulting in that change's root ID. + require.Len(th.lastChanges, maxHistoryLen) + require.Contains(th.lastChanges, changes[2].rootID) + changeAndIndex = th.lastChanges[changes[2].rootID] + require.Equal(uint64(maxHistoryLen+1), changeAndIndex.index) + + // Make sure [t.history] is right. + require.Equal(maxHistoryLen, th.history.Len()) + got, _ := th.history.DeleteMin() + require.Equal(uint64(maxHistoryLen), got.index) + require.Equal(change3.rootID, got.rootID) + got, _ = th.history.DeleteMin() + require.Equal(uint64(maxHistoryLen+1), got.index) + require.Equal(change4.rootID, got.rootID) + got, _ = th.history.DeleteMin() + require.Equal(uint64(maxHistoryLen+2), got.index) + require.Equal(change5.rootID, got.rootID) +} + +func TestHistoryGetChangesToRoot(t *testing.T) { + maxHistoryLen := 3 + history := newTrieHistory(maxHistoryLen) + + changes := []*changeSummary{} + for i := 0; i < maxHistoryLen; i++ { // Fill the history + changes = append(changes, &changeSummary{ + rootID: ids.GenerateTestID(), + nodes: map[path]*change[*node]{ + newPath([]byte{byte(i)}): { + before: &node{id: ids.GenerateTestID()}, + after: &node{id: ids.GenerateTestID()}, + }, + }, + values: map[path]*change[Maybe[[]byte]]{ + newPath([]byte{byte(i)}): { + before: Some([]byte{byte(i)}), + after: Some([]byte{byte(i + 1)}), + }, + }, + }) + history.record(changes[i]) + } + + type test struct { + name string + rootID ids.ID + start []byte + end []byte + validateFunc func(*require.Assertions, *changeSummary) + expectedErr error + } + + tests := []test{ + { + name: "unknown root ID", + rootID: ids.GenerateTestID(), + expectedErr: ErrRootIDNotPresent, + }, + { + name: "most recent change", + rootID: changes[maxHistoryLen-1].rootID, + validateFunc: func(require *require.Assertions, got *changeSummary) { + require.Equal(newChangeSummary(defaultPreallocationSize), got) + }, + }, + { + name: "second most recent change", + rootID: changes[maxHistoryLen-2].rootID, + validateFunc: func(require *require.Assertions, got *changeSummary) { + // Ensure this is the reverse of the most recent change + require.Len(got.nodes, 1) + require.Len(got.values, 1) + reversedChanges := changes[maxHistoryLen-1] + removedKey := newPath([]byte{byte(maxHistoryLen - 1)}) + require.Equal(reversedChanges.nodes[removedKey].before, got.nodes[removedKey].after) + require.Equal(reversedChanges.values[removedKey].before, got.values[removedKey].after) + require.Equal(reversedChanges.values[removedKey].after, got.values[removedKey].before) + }, + }, + { + name: "third most recent change", + rootID: changes[maxHistoryLen-3].rootID, + validateFunc: func(require *require.Assertions, got *changeSummary) { + require.Len(got.nodes, 2) + require.Len(got.values, 2) + reversedChanges1 := changes[maxHistoryLen-1] + removedKey1 := newPath([]byte{byte(maxHistoryLen - 1)}) + require.Equal(reversedChanges1.nodes[removedKey1].before, got.nodes[removedKey1].after) + require.Equal(reversedChanges1.values[removedKey1].before, got.values[removedKey1].after) + require.Equal(reversedChanges1.values[removedKey1].after, got.values[removedKey1].before) + reversedChanges2 := changes[maxHistoryLen-2] + removedKey2 := newPath([]byte{byte(maxHistoryLen - 2)}) + require.Equal(reversedChanges2.nodes[removedKey2].before, got.nodes[removedKey2].after) + require.Equal(reversedChanges2.values[removedKey2].before, got.values[removedKey2].after) + require.Equal(reversedChanges2.values[removedKey2].after, got.values[removedKey2].before) + }, + }, + { + name: "third most recent change with start filter", + rootID: changes[maxHistoryLen-3].rootID, + start: []byte{byte(maxHistoryLen - 1)}, // Omit values from second most recent change + validateFunc: func(require *require.Assertions, got *changeSummary) { + require.Len(got.nodes, 2) + require.Len(got.values, 1) + reversedChanges1 := changes[maxHistoryLen-1] + removedKey1 := newPath([]byte{byte(maxHistoryLen - 1)}) + require.Equal(reversedChanges1.nodes[removedKey1].before, got.nodes[removedKey1].after) + require.Equal(reversedChanges1.values[removedKey1].before, got.values[removedKey1].after) + require.Equal(reversedChanges1.values[removedKey1].after, got.values[removedKey1].before) + reversedChanges2 := changes[maxHistoryLen-2] + removedKey2 := newPath([]byte{byte(maxHistoryLen - 2)}) + require.Equal(reversedChanges2.nodes[removedKey2].before, got.nodes[removedKey2].after) + }, + }, + { + name: "third most recent change with end filter", + rootID: changes[maxHistoryLen-3].rootID, + end: []byte{byte(maxHistoryLen - 2)}, // Omit values from most recent change + validateFunc: func(require *require.Assertions, got *changeSummary) { + require.Len(got.nodes, 2) + require.Len(got.values, 1) + reversedChanges1 := changes[maxHistoryLen-1] + removedKey1 := newPath([]byte{byte(maxHistoryLen - 1)}) + require.Equal(reversedChanges1.nodes[removedKey1].before, got.nodes[removedKey1].after) + reversedChanges2 := changes[maxHistoryLen-2] + removedKey2 := newPath([]byte{byte(maxHistoryLen - 2)}) + require.Equal(reversedChanges2.nodes[removedKey2].before, got.nodes[removedKey2].after) + require.Equal(reversedChanges2.values[removedKey2].before, got.values[removedKey2].after) + require.Equal(reversedChanges2.values[removedKey2].after, got.values[removedKey2].before) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + got, err := history.getChangesToGetToRoot(tt.rootID, tt.start, tt.end) + require.ErrorIs(err, tt.expectedErr) + if err != nil { + return + } + tt.validateFunc(require, got) + }) + } +} diff --git a/avalanchego/x/merkledb/iterator.go b/avalanchego/x/merkledb/iterator.go new file mode 100644 index 00000000..ad235b7d --- /dev/null +++ b/avalanchego/x/merkledb/iterator.go @@ -0,0 +1,63 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import "github.com/ava-labs/avalanchego/database" + +var _ database.Iterator = &iterator{} + +type iterator struct { + db *Database + nodeIter database.Iterator + current *node + err error +} + +func (i *iterator) Error() error { + if i.err != nil { + return i.err + } + return i.nodeIter.Error() +} + +func (i *iterator) Key() []byte { + if i.current == nil { + return nil + } + return i.current.key.Serialize().Value +} + +func (i *iterator) Value() []byte { + if i.current == nil { + return nil + } + return i.current.value.value +} + +func (i *iterator) Next() bool { + i.current = nil + if i.err != nil { + return false + } + for i.nodeIter.Next() { + i.db.metrics.IOKeyRead() + n, err := parseNode(path(i.nodeIter.Key()), i.nodeIter.Value()) + if err != nil { + i.err = err + return false + } + if n.hasValue() { + i.current = n + return true + } + } + if i.err == nil { + i.err = i.nodeIter.Error() + } + return false +} + +func (i *iterator) Release() { + i.nodeIter.Release() +} diff --git a/avalanchego/x/merkledb/maybe.go b/avalanchego/x/merkledb/maybe.go new file mode 100644 index 00000000..acebb47f --- /dev/null +++ b/avalanchego/x/merkledb/maybe.go @@ -0,0 +1,47 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import "golang.org/x/exp/slices" + +// Maybe T = Some T | Nothing. +// A data wrapper that allows values to be something [Some T] or nothing [Nothing]. +// Maybe is used to wrap types: +// * That can't be represented by nil. +// * That use nil as a valid value instead of an indicator of a missing value. +// For more info see https://en.wikipedia.org/wiki/Option_type +type Maybe[T any] struct { + hasValue bool + value T +} + +// Returns a new Maybe[T] with the value val. +func Some[T any](val T) Maybe[T] { + return Maybe[T]{ + value: val, + hasValue: true, + } +} + +// Returns a new Maybe[T] with no value. +func Nothing[T any]() Maybe[T] { + return Maybe[T]{} +} + +// Returns true iff [m] has a value. +func (m Maybe[T]) IsNothing() bool { + return !m.hasValue +} + +// Returns the value of [m]. +func (m Maybe[T]) Value() T { + return m.value +} + +func Clone(m Maybe[[]byte]) Maybe[[]byte] { + if !m.hasValue { + return Nothing[[]byte]() + } + return Some(slices.Clone(m.value)) +} diff --git a/avalanchego/x/merkledb/maybe_test.go b/avalanchego/x/merkledb/maybe_test.go new file mode 100644 index 00000000..acaf1630 --- /dev/null +++ b/avalanchego/x/merkledb/maybe_test.go @@ -0,0 +1,31 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "testing" + + "github.com/stretchr/testify/require" + "golang.org/x/exp/slices" +) + +func TestMaybeClone(t *testing.T) { + // Case: Value is maybe + { + val := []byte{1, 2, 3} + originalVal := slices.Clone(val) + m := Some(val) + mClone := Clone(m) + m.value[0] = 0 + require.NotEqual(t, mClone.value, m.value) + require.Equal(t, originalVal, mClone.value) + } + + // Case: Value is nothing + { + m := Nothing[[]byte]() + mClone := Clone(m) + require.True(t, mClone.IsNothing()) + } +} diff --git a/avalanchego/x/merkledb/metrics.go b/avalanchego/x/merkledb/metrics.go new file mode 100644 index 00000000..cc1efb08 --- /dev/null +++ b/avalanchego/x/merkledb/metrics.go @@ -0,0 +1,219 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/utils/wrappers" +) + +var ( + _ merkleMetrics = &mockMetrics{} + _ merkleMetrics = &metrics{} +) + +type merkleMetrics interface { + IOKeyRead() + IOKeyWrite() + HashCalculated() + DBNodeCacheHit() + DBNodeCacheMiss() + ViewNodeCacheHit() + ViewNodeCacheMiss() + ViewValueCacheHit() + ViewValueCacheMiss() +} + +type mockMetrics struct { + lock sync.Mutex + keyReadCount int64 + keyWriteCount int64 + hashCount int64 + dbNodeCacheHit int64 + dbNodeCacheMiss int64 + viewNodeCacheHit int64 + viewNodeCacheMiss int64 + viewValueCacheHit int64 + viewValueCacheMiss int64 +} + +func (m *mockMetrics) HashCalculated() { + m.lock.Lock() + defer m.lock.Unlock() + + m.hashCount++ +} + +func (m *mockMetrics) IOKeyRead() { + m.lock.Lock() + defer m.lock.Unlock() + + m.keyReadCount++ +} + +func (m *mockMetrics) IOKeyWrite() { + m.lock.Lock() + defer m.lock.Unlock() + + m.keyWriteCount++ +} + +func (m *mockMetrics) ViewNodeCacheHit() { + m.lock.Lock() + defer m.lock.Unlock() + + m.viewNodeCacheHit++ +} + +func (m *mockMetrics) ViewValueCacheHit() { + m.lock.Lock() + defer m.lock.Unlock() + + m.viewValueCacheHit++ +} + +func (m *mockMetrics) ViewNodeCacheMiss() { + m.lock.Lock() + defer m.lock.Unlock() + + m.viewNodeCacheMiss++ +} + +func (m *mockMetrics) ViewValueCacheMiss() { + m.lock.Lock() + defer m.lock.Unlock() + + m.viewValueCacheMiss++ +} + +func (m *mockMetrics) DBNodeCacheHit() { + m.lock.Lock() + defer m.lock.Unlock() + + m.dbNodeCacheHit++ +} + +func (m *mockMetrics) DBNodeCacheMiss() { + m.lock.Lock() + defer m.lock.Unlock() + + m.dbNodeCacheMiss++ +} + +type metrics struct { + ioKeyWrite prometheus.Counter + ioKeyRead prometheus.Counter + hashCount prometheus.Counter + dbNodeCacheHit prometheus.Counter + dbNodeCacheMiss prometheus.Counter + viewNodeCacheHit prometheus.Counter + viewNodeCacheMiss prometheus.Counter + viewValueCacheHit prometheus.Counter + viewValueCacheMiss prometheus.Counter +} + +func newMetrics(namespace string, reg prometheus.Registerer) (merkleMetrics, error) { + if reg == nil { + return &mockMetrics{}, nil + } + m := metrics{ + ioKeyWrite: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "io_key_write", + Help: "cumulative amount of io write to the key db", + }), + ioKeyRead: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "io_key_read", + Help: "cumulative amount of io read to the key db", + }), + hashCount: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "hashes_calculated", + Help: "cumulative number of node hashes done", + }), + dbNodeCacheHit: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "db_node_cache_hit", + Help: "cumulative amount of hits on the db node cache", + }), + dbNodeCacheMiss: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "db_node_cache_miss", + Help: "cumulative amount of misses on the db node cache", + }), + viewNodeCacheHit: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "view_node_cache_hit", + Help: "cumulative amount of hits on the view node cache", + }), + viewNodeCacheMiss: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "view_node_cache_miss", + Help: "cumulative amount of misses on the view node cache", + }), + viewValueCacheHit: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "view_value_cache_hit", + Help: "cumulative amount of hits on the view value cache", + }), + viewValueCacheMiss: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "view_value_cache_miss", + Help: "cumulative amount of misses on the view value cache", + }), + } + errs := wrappers.Errs{} + errs.Add( + reg.Register(m.ioKeyWrite), + reg.Register(m.ioKeyRead), + reg.Register(m.hashCount), + reg.Register(m.dbNodeCacheHit), + reg.Register(m.dbNodeCacheMiss), + reg.Register(m.viewNodeCacheHit), + reg.Register(m.viewNodeCacheMiss), + reg.Register(m.viewValueCacheHit), + reg.Register(m.viewValueCacheMiss), + ) + return &m, errs.Err +} + +func (m *metrics) IOKeyRead() { + m.ioKeyRead.Inc() +} + +func (m *metrics) IOKeyWrite() { + m.ioKeyWrite.Inc() +} + +func (m *metrics) HashCalculated() { + m.hashCount.Inc() +} + +func (m *metrics) ViewNodeCacheHit() { + m.viewNodeCacheHit.Inc() +} + +func (m *metrics) ViewNodeCacheMiss() { + m.viewNodeCacheMiss.Inc() +} + +func (m *metrics) ViewValueCacheHit() { + m.viewValueCacheHit.Inc() +} + +func (m *metrics) ViewValueCacheMiss() { + m.viewValueCacheMiss.Inc() +} + +func (m *metrics) DBNodeCacheHit() { + m.dbNodeCacheHit.Inc() +} + +func (m *metrics) DBNodeCacheMiss() { + m.dbNodeCacheMiss.Inc() +} diff --git a/avalanchego/x/merkledb/metrics_test.go b/avalanchego/x/merkledb/metrics_test.go new file mode 100644 index 00000000..be08d7d8 --- /dev/null +++ b/avalanchego/x/merkledb/metrics_test.go @@ -0,0 +1,74 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "context" + "testing" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" +) + +func Test_Metrics_Basic_Usage(t *testing.T) { + db, err := New( + context.Background(), + memdb.New(), + Config{ + Tracer: newNoopTracer(), + HistoryLength: 300, + NodeCacheSize: minCacheSize, + }, + ) + require.NoError(t, err) + + err = db.Put([]byte("key"), []byte("value")) + require.NoError(t, err) + + require.Equal(t, int64(1), db.metrics.(*mockMetrics).keyReadCount) + require.Equal(t, int64(1), db.metrics.(*mockMetrics).keyWriteCount) + require.Equal(t, int64(3), db.metrics.(*mockMetrics).hashCount) + + err = db.Delete([]byte("key")) + require.NoError(t, err) + + require.Equal(t, int64(1), db.metrics.(*mockMetrics).keyReadCount) + require.Equal(t, int64(2), db.metrics.(*mockMetrics).keyWriteCount) + require.Equal(t, int64(4), db.metrics.(*mockMetrics).hashCount) + + _, err = db.Get([]byte("key2")) + require.ErrorIs(t, err, database.ErrNotFound) + + require.Equal(t, int64(2), db.metrics.(*mockMetrics).keyReadCount) + require.Equal(t, int64(2), db.metrics.(*mockMetrics).keyWriteCount) + require.Equal(t, int64(4), db.metrics.(*mockMetrics).hashCount) +} + +func Test_Metrics_Initialize(t *testing.T) { + db, err := New( + context.Background(), + memdb.New(), + Config{ + Tracer: newNoopTracer(), + HistoryLength: 300, + Reg: prometheus.NewRegistry(), + NodeCacheSize: 1000, + }, + ) + require.NoError(t, err) + + err = db.Put([]byte("key"), []byte("value")) + require.NoError(t, err) + + val, err := db.Get([]byte("key")) + require.NoError(t, err) + require.Equal(t, []byte("value"), val) + + err = db.Delete([]byte("key")) + require.NoError(t, err) +} diff --git a/avalanchego/x/merkledb/node.go b/avalanchego/x/merkledb/node.go new file mode 100644 index 00000000..edcb78c7 --- /dev/null +++ b/avalanchego/x/merkledb/node.go @@ -0,0 +1,203 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "golang.org/x/exp/maps" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/hashing" +) + +const ( + NodeBranchFactor = 16 + HashLength = 32 +) + +// the values that go into the node's id +type hashValues struct { + Children map[byte]child + Value Maybe[[]byte] + Key SerializedPath +} + +// Representation of a node stored in the database. +type dbNode struct { + value Maybe[[]byte] + children map[byte]child +} + +type child struct { + compressedPath path + id ids.ID +} + +// node holds additional information on top of the dbNode that makes calulcations easier to do +type node struct { + dbNode + id ids.ID + key path + nodeBytes []byte + valueDigest Maybe[[]byte] +} + +// Returns a new node with the given [key] and no value. +// If [parent] isn't nil, the new node is added as a child of [parent]. +func newNode(parent *node, key path) *node { + newNode := &node{ + dbNode: dbNode{ + children: make(map[byte]child, NodeBranchFactor), + }, + key: key, + } + if parent != nil { + parent.addChild(newNode) + } + return newNode +} + +// Parse [nodeBytes] to a node and set its key to [key]. +func parseNode(key path, nodeBytes []byte) (*node, error) { + n := dbNode{} + if _, err := Codec.decodeDBNode(nodeBytes, &n); err != nil { + return nil, err + } + result := &node{ + dbNode: n, + key: key, + nodeBytes: nodeBytes, + } + + result.setValueDigest() + return result, nil +} + +// Returns true iff this node has a value. +func (n *node) hasValue() bool { + return !n.value.IsNothing() +} + +// Returns the byte representation of this node. +func (n *node) marshal() ([]byte, error) { + if n.nodeBytes != nil { + return n.nodeBytes, nil + } + + nodeBytes, err := Codec.encodeDBNode(Version, &(n.dbNode)) + if err != nil { + return nil, err + } + n.nodeBytes = nodeBytes + return n.nodeBytes, nil +} + +// clear the cached values that will need to be recalculated whenever the node changes +// for example, node ID and byte representation +func (n *node) onNodeChanged() { + n.id = ids.Empty + n.nodeBytes = nil +} + +// Returns and caches the ID of this node. +func (n *node) calculateID(metrics merkleMetrics) error { + if n.id != ids.Empty { + return nil + } + + hv := &hashValues{ + Children: n.children, + Value: n.valueDigest, + Key: n.key.Serialize(), + } + + bytes, err := Codec.encodeHashValues(Version, hv) + if err != nil { + return err + } + + metrics.HashCalculated() + n.id = hashing.ComputeHash256Array(bytes) + return nil +} + +// Set [n]'s value to [val]. +func (n *node) setValue(val Maybe[[]byte]) { + n.onNodeChanged() + n.value = val + n.setValueDigest() +} + +func (n *node) setValueDigest() { + if n.value.IsNothing() || len(n.value.value) < HashLength { + n.valueDigest = n.value + } else { + n.valueDigest = Some(hashing.ComputeHash256(n.value.value)) + } +} + +// Adds [child] as a child of [n]. +// Assumes [child]'s key is valid as a child of [n]. +// That is, [n.key] is a prefix of [child.key]. +func (n *node) addChild(child *node) { + n.addChildWithoutNode( + child.key[len(n.key)], + child.key[len(n.key)+1:], + child.id, + ) +} + +// Adds a child to [n] without a reference to the child node. +func (n *node) addChildWithoutNode(index byte, compressedPath path, childID ids.ID) { + n.onNodeChanged() + n.children[index] = child{ + compressedPath: compressedPath, + id: childID, + } +} + +// Returns the path of the only child of this node. +// Assumes this node has exactly one child. +func (n *node) getSingleChildPath() path { + for index, entry := range n.children { + return n.key + path(index) + entry.compressedPath + } + return "" +} + +// Removes [child] from [n]'s children. +func (n *node) removeChild(child *node) { + n.onNodeChanged() + delete(n.children, child.key[len(n.key)]) +} + +// clone Returns a copy of [n]. +// nodeBytes is intentionally not included because it can cause a race. +// nodes being evicted by the cache can write nodeBytes, +// so reading them during the cloning would be a data race. +// Note: value isn't cloned because it is never edited, only overwritten +// if this ever changes, value will need to be copied as well +func (n *node) clone() *node { + return &node{ + id: n.id, + key: n.key, + dbNode: dbNode{ + value: n.value, + children: maps.Clone(n.children), + }, + valueDigest: n.valueDigest, + } +} + +// Returns the ProofNode representation of this node. +func (n *node) asProofNode() ProofNode { + pn := ProofNode{ + KeyPath: n.key.Serialize(), + Children: make(map[byte]ids.ID, len(n.children)), + ValueOrHash: Clone(n.valueDigest), + } + for index, entry := range n.children { + pn.Children[index] = entry.id + } + return pn +} diff --git a/avalanchego/x/merkledb/node_test.go b/avalanchego/x/merkledb/node_test.go new file mode 100644 index 00000000..7c7c2578 --- /dev/null +++ b/avalanchego/x/merkledb/node_test.go @@ -0,0 +1,69 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "io" + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_Node_Marshal(t *testing.T) { + root := newNode(nil, EmptyPath) + require.NotNil(t, root) + + fullpath := newPath([]byte("key")) + childNode := newNode(root, fullpath) + childNode.setValue(Some([]byte("value"))) + require.NotNil(t, childNode) + + err := childNode.calculateID(&mockMetrics{}) + require.NoError(t, err) + root.addChild(childNode) + + data, err := root.marshal() + require.NoError(t, err) + rootParsed, err := parseNode(newPath([]byte("")), data) + require.NoError(t, err) + require.Equal(t, 1, len(rootParsed.children)) + + rootIndex := root.getSingleChildPath()[len(root.key)] + parsedIndex := rootParsed.getSingleChildPath()[len(rootParsed.key)] + rootChildEntry := root.children[rootIndex] + parseChildEntry := rootParsed.children[parsedIndex] + require.Equal(t, rootChildEntry.id, parseChildEntry.id) +} + +func Test_Node_Marshal_Errors(t *testing.T) { + root := newNode(nil, EmptyPath) + require.NotNil(t, root) + + fullpath := newPath([]byte{255}) + childNode1 := newNode(root, fullpath) + childNode1.setValue(Some([]byte("value1"))) + require.NotNil(t, childNode1) + + err := childNode1.calculateID(&mockMetrics{}) + require.NoError(t, err) + root.addChild(childNode1) + + fullpath = newPath([]byte{237}) + childNode2 := newNode(root, fullpath) + childNode2.setValue(Some([]byte("value2"))) + require.NotNil(t, childNode2) + + err = childNode2.calculateID(&mockMetrics{}) + require.NoError(t, err) + root.addChild(childNode2) + + data, err := root.marshal() + require.NoError(t, err) + + for i := 1; i < len(data); i++ { + broken := data[:i] + _, err = parseNode(newPath([]byte("")), broken) + require.ErrorIs(t, err, io.ErrUnexpectedEOF) + } +} diff --git a/avalanchego/x/merkledb/path.go b/avalanchego/x/merkledb/path.go new file mode 100644 index 00000000..7a78f2ac --- /dev/null +++ b/avalanchego/x/merkledb/path.go @@ -0,0 +1,153 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "bytes" + "reflect" + "strings" + "unsafe" +) + +const EmptyPath path = "" + +// SerializedPath contains a path from the trie. +// The trie branch factor is 16, so the path may contain an odd number of nibbles. +// If it did contain an odd number of nibbles, the last 4 bits of the last byte should be discarded. +type SerializedPath struct { + NibbleLength int + Value []byte +} + +func (s SerializedPath) hasOddLength() bool { + return s.NibbleLength&1 == 1 +} + +func (s SerializedPath) Equal(other SerializedPath) bool { + return s.NibbleLength == other.NibbleLength && bytes.Equal(s.Value, other.Value) +} + +func (s SerializedPath) deserialize() path { + result := newPath(s.Value) + // trim the last nibble if the path has an odd length + return result[:len(result)-s.NibbleLength&1] +} + +// Returns true iff [prefix] is a prefix of [s] or equal to it. +func (s SerializedPath) HasPrefix(prefix SerializedPath) bool { + if len(s.Value) < len(prefix.Value) { + return false + } + prefixValue := prefix.Value + if !prefix.hasOddLength() { + return bytes.HasPrefix(s.Value, prefixValue) + } + reducedSize := len(prefixValue) - 1 + + // grab the last nibble in the prefix and serialized path + prefixRemainder := prefixValue[reducedSize] >> 4 + valueRemainder := s.Value[reducedSize] >> 4 + prefixValue = prefixValue[:reducedSize] + return bytes.HasPrefix(s.Value, prefixValue) && valueRemainder == prefixRemainder +} + +// Returns true iff [prefix] is a prefix of [s] but not equal to it. +func (s SerializedPath) HasStrictPrefix(prefix SerializedPath) bool { + return s.HasPrefix(prefix) && !s.Equal(prefix) +} + +func (s SerializedPath) NibbleVal(nibbleIndex int) byte { + value := s.Value[nibbleIndex>>1] + isOdd := byte(nibbleIndex & 1) + isEven := (1 - isOdd) + + // return value first(even index) or last 4(odd index) bits of the corresponding byte + return isEven*value>>4 + isOdd*(value&0x0F) +} + +func (s SerializedPath) AppendNibble(nibble byte) SerializedPath { + // even is 1 if even, 0 if odd + even := 1 - s.NibbleLength&1 + value := make([]byte, len(s.Value)+even) + copy(value, s.Value) + + // shift the nibble 4 left if even, do nothing if odd + value[len(value)-1] += nibble << (4 * even) + return SerializedPath{Value: value, NibbleLength: s.NibbleLength + 1} +} + +type path string + +// Returns: +// * 0 if [p] == [other]. +// * -1 if [p] < [other]. +// * 1 if [p] > [other]. +func (p path) Compare(other path) int { + return strings.Compare(string(p), string(other)) +} + +// Invariant: The returned value must not be modified. +func (p path) Bytes() []byte { + // avoid copying during the conversion + // "safe" because we never edit the value, only used as DB key + buf := *(*[]byte)(unsafe.Pointer(&p)) + (*reflect.SliceHeader)(unsafe.Pointer(&buf)).Cap = len(p) + return buf +} + +// Returns true iff [p] begins with [prefix]. +func (p path) HasPrefix(prefix path) bool { + return strings.HasPrefix(string(p), string(prefix)) +} + +// Append [val] to [p]. +func (p path) Append(val byte) path { + return p + path(val) +} + +// Returns the serialized representation of [p]. +func (p path) Serialize() SerializedPath { + // need half the number of bytes as nibbles + // add one so there is a byte for the odd nibble if it exists + // the extra nibble gets rounded down if even length + byteLength := (len(p) + 1) / 2 + + result := SerializedPath{ + NibbleLength: len(p), + Value: make([]byte, byteLength), + } + + // loop over the path's bytes + // if the length is odd, subtract 1 so we don't overflow on the p[pathIndex+1] + keyIndex := 0 + lastIndex := len(p) - len(p)&1 + for pathIndex := 0; pathIndex < lastIndex; pathIndex += 2 { + result.Value[keyIndex] = p[pathIndex]<<4 + p[pathIndex+1] + keyIndex++ + } + + // if there is was a odd number of nibbles, grab the last one + if result.hasOddLength() { + result.Value[keyIndex] = p[keyIndex<<1] << 4 + } + + return result +} + +func newPath(p []byte) path { + // create new buffer with double the length of the input since each byte gets split into two nibbles + buffer := make([]byte, 2*len(p)) + + // first nibble gets shifted right 4 (divided by 16) to isolate the first nibble + // second nibble gets bitwise anded with 0x0F (1111) to isolate the second nibble + bufferIndex := 0 + for _, currentByte := range p { + buffer[bufferIndex] = currentByte >> 4 + buffer[bufferIndex+1] = currentByte & 0x0F + bufferIndex += 2 + } + + // avoid copying during the conversion + return *(*path)(unsafe.Pointer(&buffer)) +} diff --git a/avalanchego/x/merkledb/path_test.go b/avalanchego/x/merkledb/path_test.go new file mode 100644 index 00000000..6dcab62a --- /dev/null +++ b/avalanchego/x/merkledb/path_test.go @@ -0,0 +1,77 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_SerializedPath_NibbleVal(t *testing.T) { + path := SerializedPath{Value: []byte{240, 237}} + require.Equal(t, byte(15), path.NibbleVal(0)) + require.Equal(t, byte(0), path.NibbleVal(1)) + require.Equal(t, byte(14), path.NibbleVal(2)) + require.Equal(t, byte(13), path.NibbleVal(3)) +} + +func Test_SerializedPath_AppendNibble(t *testing.T) { + path := SerializedPath{Value: []byte{}} + require.Equal(t, 0, path.NibbleLength) + + path = path.AppendNibble(1) + require.Equal(t, 1, path.NibbleLength) + require.Equal(t, byte(1), path.NibbleVal(0)) + + path = path.AppendNibble(2) + require.Equal(t, 2, path.NibbleLength) + require.Equal(t, byte(2), path.NibbleVal(1)) +} + +func Test_SerializedPath_Has_Prefix(t *testing.T) { + first := SerializedPath{Value: []byte("FirstKey")} + prefix := SerializedPath{Value: []byte("FirstKe")} + require.True(t, first.HasPrefix(prefix)) + require.True(t, first.HasStrictPrefix(prefix)) + + first = SerializedPath{Value: []byte("FirstKey"), NibbleLength: 16} + prefix = SerializedPath{Value: []byte("FirstKey"), NibbleLength: 15} + require.True(t, first.HasPrefix(prefix)) + require.True(t, first.HasStrictPrefix(prefix)) + + first = SerializedPath{Value: []byte("FirstKey"), NibbleLength: 15} + prefix = SerializedPath{Value: []byte("FirstKey"), NibbleLength: 15} + require.True(t, first.HasPrefix(prefix)) + require.False(t, first.HasStrictPrefix(prefix)) + + first = SerializedPath{Value: []byte{247}, NibbleLength: 2} + prefix = SerializedPath{Value: []byte{240}, NibbleLength: 2} + require.False(t, first.HasPrefix(prefix)) + require.False(t, first.HasStrictPrefix(prefix)) + + first = SerializedPath{Value: []byte{247}, NibbleLength: 2} + prefix = SerializedPath{Value: []byte{240}, NibbleLength: 1} + require.True(t, first.HasPrefix(prefix)) + require.True(t, first.HasStrictPrefix(prefix)) + + first = SerializedPath{Value: []byte{}, NibbleLength: 0} + prefix = SerializedPath{Value: []byte{}, NibbleLength: 0} + require.True(t, first.HasPrefix(prefix)) + require.False(t, first.HasStrictPrefix(prefix)) +} + +func Test_SerializedPath_Equal(t *testing.T) { + first := SerializedPath{Value: []byte("FirstKey"), NibbleLength: 16} + prefix := SerializedPath{Value: []byte("FirstKey"), NibbleLength: 16} + require.True(t, first.Equal(prefix)) + + first = SerializedPath{Value: []byte("FirstKey"), NibbleLength: 16} + prefix = SerializedPath{Value: []byte("FirstKey"), NibbleLength: 15} + require.False(t, first.Equal(prefix)) + + first = SerializedPath{Value: []byte("FirstKey"), NibbleLength: 15} + prefix = SerializedPath{Value: []byte("FirstKey"), NibbleLength: 15} + require.True(t, first.Equal(prefix)) +} diff --git a/avalanchego/x/merkledb/proof.go b/avalanchego/x/merkledb/proof.go new file mode 100644 index 00000000..651ae480 --- /dev/null +++ b/avalanchego/x/merkledb/proof.go @@ -0,0 +1,689 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "bytes" + "context" + "errors" + "fmt" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/utils/hashing" +) + +const verificationCacheSize = 2_000 + +var ( + ErrInvalidProof = errors.New("proof obtained an invalid root ID") + ErrInvalidMaxLength = errors.New("expected max length to be > 0") + ErrNonIncreasingValues = errors.New("keys sent are not in increasing order") + ErrStateFromOutsideOfRange = errors.New("state key falls outside of the start->end range") + ErrNonIncreasingProofNodes = errors.New("each proof node key must be a strict prefix of the next") + ErrExtraProofNodes = errors.New("extra proof nodes in path") + ErrDataInMissingRootProof = errors.New("there should be no state or deleted keys in a change proof that had a missing root") + ErrNoMerkleProof = errors.New("empty key response must include merkle proof") + ErrShouldJustBeRoot = errors.New("end proof should only contain root") + ErrNoStartProof = errors.New("no start proof") + ErrNoEndProof = errors.New("no end proof") + ErrNoProof = errors.New("proof has no nodes") + ErrProofNodeNotForKey = errors.New("the provided node has a key that is not a prefix of the specified key") + ErrProofValueDoesntMatch = errors.New("the provided value does not match the proof node for the provided key's value") + ErrProofNodeHasUnincludedValue = errors.New("the provided proof has a value for a key within the range that is not present in the provided key/values") +) + +type ProofNode struct { + KeyPath SerializedPath + // Nothing if this is an intermediate node. + // The value in this node if its length < [HashLen]. + // The hash of the value in this node otherwise. + ValueOrHash Maybe[[]byte] + Children map[byte]ids.ID +} + +// An inclusion/exclustion proof of a key. +type Proof struct { + // Nodes in the proof path from root --> target key + // (or node that would be where key is if it doesn't exist). + // Must always be non-empty (i.e. have the root node). + Path []ProofNode + // This is a proof that [key] exists/doesn't exist. + Key []byte + + // Nothing if [Key] isn't in the trie. + // Otherwise the value corresponding to [Key]. + Value Maybe[[]byte] +} + +// Returns nil if the trie given in [proof] has root [expectedRootID]. +// That is, this is a valid proof that [proof.Key] exists/doesn't exist +// in the trie with root [expectedRootID]. +func (proof *Proof) Verify(ctx context.Context, expectedRootID ids.ID) error { + // Make sure the proof is well-formed. + if len(proof.Path) == 0 { + return ErrNoProof + } + if err := verifyProofPath(proof.Path, newPath(proof.Key)); err != nil { + return err + } + + // Confirm that the last proof node's value matches the claimed proof value + lastNode := proof.Path[len(proof.Path)-1] + + // If the last proof node's key is [proof.Key] (i.e. this is an inclusion proof) + // then the value of the last proof node must match [proof.Value]. + // Note odd length keys can never match the [proof.Key] since it's bytes, + // and thus an even number of nibbles. + if !lastNode.KeyPath.hasOddLength() && + bytes.Equal(proof.Key, lastNode.KeyPath.Value) && + !valueOrHashMatches(proof.Value, lastNode.ValueOrHash) { + return ErrProofValueDoesntMatch + } + + // If the last proof node has an odd length or a different key than [proof.Key] + // then this is an exclusion proof and should prove that [proof.Key] isn't in the trie.. + // Note odd length keys can never match the [proof.Key] since it's bytes, + // and thus an even number of nibbles. + if (lastNode.KeyPath.hasOddLength() || !bytes.Equal(proof.Key, lastNode.KeyPath.Value)) && + !proof.Value.IsNothing() { + return ErrProofValueDoesntMatch + } + + view, err := getEmptyTrieView(ctx) + if err != nil { + return err + } + + // Insert all of the proof nodes. + // [provenPath] is the path that we are proving exists, or the path + // that is where the path we are proving doesn't exist should be. + provenPath := proof.Path[len(proof.Path)-1].KeyPath.deserialize() + + // Don't bother locking [db] and [view] -- nobody else has a reference to them. + if err = addPathInfo(view, proof.Path, provenPath, provenPath); err != nil { + return err + } + + gotRootID, err := view.GetMerkleRoot(ctx) + if err != nil { + return err + } + if expectedRootID != gotRootID { + return fmt.Errorf("%w:[%s], expected:[%s]", ErrInvalidProof, gotRootID, expectedRootID) + } + return nil +} + +type KeyValue struct { + Key []byte + Value []byte +} + +// A proof that a given set of key-value pairs are in a trie. +type RangeProof struct { + // A proof that the smallest key in the requested range does/doesn't exist. + // Note that this may not be an entire proof -- nodes are omitted if + // they are also in [EndProof]. + StartProof []ProofNode + + // A proof of the greatest key in [KeyValues], or, if this proof contains + // no [KeyValues], just the root. + // Empty if the request for this range proof gave no upper bound + // on the range to fetch, unless this proof contains no [KeyValues] + // and [StartProof] is empty. + EndProof []ProofNode + + // This proof proves that the key-value pairs in [KeyValues] are in the trie. + // Sorted by increasing key. + KeyValues []KeyValue +} + +// Returns nil iff all the following hold: +// - [start] <= [end]. +// - [proof] is non-empty. +// - All keys in [proof.KeyValues] are in the range [start, end]. +// If [start] is empty, all keys are considered > [start]. +// If [end] is empty, all keys are considered < [end]. +// - [proof.KeyValues] is sorted by increasing key. +// - [proof.StartProof] and [proof.EndProof] are well-formed. +// - One of the following holds: +// [end] and [proof.EndProof] are empty. +// [proof.StartProof], [start], [end], and [proof.KeyValues] are empty and +// [proof.EndProof] is just the root. +// [end] is non-empty and [proof.EndProof] is a valid proof of a key <= [end]. +// - [expectedRootID] is the root of the trie containing the given key-value +// pairs and start/end proofs. +func (proof *RangeProof) Verify( + ctx context.Context, + start []byte, + end []byte, + expectedRootID ids.ID, +) error { + switch { + case len(end) > 0 && bytes.Compare(start, end) > 0: + return ErrStartAfterEnd + case len(proof.KeyValues) == 0 && len(proof.StartProof) == 0 && len(proof.EndProof) == 0: + return ErrNoMerkleProof + case len(start) == 0 && len(end) == 0 && len(proof.KeyValues) == 0 && len(proof.EndProof) != 1: + return ErrShouldJustBeRoot + case len(proof.EndProof) == 0 && len(end) > 0: + return ErrNoEndProof + } + + // Make sure the key-value pairs are sorted and in [start, end]. + if err := verifyKeyValues(proof.KeyValues, start, end); err != nil { + return err + } + + largestkey := end + if len(proof.KeyValues) > 0 { + // If [proof] has key-value pairs, we should insert children + // greater than [end] to ancestors of the node containing [end] + // so that we get the expected root ID. + largestkey = proof.KeyValues[len(proof.KeyValues)-1].Key + } + + // The key-value pairs (allegedly) proven by [proof]. + keyValues := make(map[path][]byte, len(proof.KeyValues)) + for _, keyValue := range proof.KeyValues { + keyValues[newPath(keyValue.Key)] = keyValue.Value + } + + smallestPath := newPath(start) + largestPath := newPath(largestkey) + + // Ensure that the start proof is valid and contains values that + // match the key/values that were sent. + if err := verifyProofPath(proof.StartProof, smallestPath); err != nil { + return err + } + if err := verifyAllRangeProofKeyValuesPresent(proof.StartProof, smallestPath, largestPath, keyValues); err != nil { + return err + } + + // Ensure that the end proof is valid and contains values that + // match the key/values that were sent. + if err := verifyProofPath(proof.EndProof, largestPath); err != nil { + return err + } + if err := verifyAllRangeProofKeyValuesPresent(proof.EndProof, smallestPath, largestPath, keyValues); err != nil { + return err + } + + // Don't need to lock [view] because nobody else has a reference to it. + view, err := getEmptyTrieView(ctx) + if err != nil { + return err + } + + // Insert all key-value pairs into the trie. + for _, kv := range proof.KeyValues { + if _, err := view.insertIntoTrie(newPath(kv.Key), Some(kv.Value)); err != nil { + return err + } + } + + // For all the nodes along the edges of the proof, insert children < [start] and > [end] + // into the trie so that we get the expected root ID (if this proof is valid). + // By inserting all children < [start], we prove that there are no keys + // > [start] but less than the first key given. That is, the peer who + // gave us this proof is not omitting nodes. + if err := addPathInfo(view, proof.StartProof, smallestPath, largestPath); err != nil { + return err + } + if err := addPathInfo(view, proof.EndProof, smallestPath, largestPath); err != nil { + return err + } + + calculatedRoot, err := view.GetMerkleRoot(ctx) + if err != nil { + return err + } + if expectedRootID != calculatedRoot { + return fmt.Errorf("%w:[%s], expected:[%s]", ErrInvalidProof, calculatedRoot, expectedRootID) + } + return nil +} + +// Verify that all non-intermediate nodes in [proof] which have keys +// in [[start], [end]] have the value given for that key in [keysValues]. +func verifyAllRangeProofKeyValuesPresent(proof []ProofNode, start, end path, keysValues map[path][]byte) error { + for i := 0; i < len(proof); i++ { + var ( + node = proof[i] + nodeKey = node.KeyPath + nodePath = nodeKey.deserialize() + ) + + // Skip odd length keys since they cannot have a value (enforced by [verifyProofPath]). + if !nodeKey.hasOddLength() && nodePath.Compare(start) >= 0 && nodePath.Compare(end) <= 0 { + value, ok := keysValues[nodePath] + if !ok && !node.ValueOrHash.IsNothing() { + // We didn't get a key-value pair for this key, but the proof node has a value. + return ErrProofNodeHasUnincludedValue + } + if ok && !valueOrHashMatches(Some(value), node.ValueOrHash) { + // We got a key-value pair for this key, but the value in the proof + // node doesn't match the value we got for this key. + return ErrProofValueDoesntMatch + } + } + } + return nil +} + +type ChangeProof struct { + // If false, the node that created this doesn't have + // sufficient history to generate a change proof and + // all other fields must be empty. + // Otherwise at least one other field is non-empty. + HadRootsInHistory bool + // A proof that the smallest key in the requested range does/doesn't + // exist in the trie with the requested start root. + // Empty if no lower bound on the requested range was given. + // Note that this may not be an entire proof -- nodes are omitted if + // they are also in [EndProof]. + StartProof []ProofNode + // A proof that the largest key in [KeyValues] and [DeletedKeys] + // does/doesn't exist in the trie with the requested start root. + // Empty iff no upper bound on the requested range was given + // and [KeyValues] and [DeletedKeys] are empty. + EndProof []ProofNode + // A subset of key-values that were added or had their values modified + // between the requested start root (exclusive) and the requested + // end root (inclusive). + // Sorted by increasing key. + KeyValues []KeyValue + // A subset of keys that were removed from the trie between the requested + // start root (exclusive) and the requested end root (inclusive). + // Sorted by increasing key. + DeletedKeys [][]byte +} + +// Returns nil iff all of the following hold: +// - [start] <= [end]. +// - [proof] is non-empty iff [proof.HadRootsInHistory]. +// - All keys in [proof.KeyValues] and [proof.DeletedKeys] are in [start, end]. +// If [start] is empty, all keys are considered > [start]. +// If [end] is empty, all keys are considered < [end]. +// - [proof.KeyValues] and [proof.DeletedKeys] are sorted in order of increasing key. +// - [proof.StartProof] and [proof.EndProof] are well-formed. +// - When the keys in [proof.KeyValues] are added to [db] and the keys in [proof.DeletedKeys] +// are removed from [db], the root ID of [db] is [expectedEndRootID]. +// +// Assumes [db.lock] isn't held. +func (proof *ChangeProof) Verify( + ctx context.Context, + db *Database, + start []byte, + end []byte, + expectedEndRootID ids.ID, +) error { + if len(end) > 0 && bytes.Compare(start, end) > 0 { + return ErrStartAfterEnd + } + + if !proof.HadRootsInHistory { + // The node we requested the proof from didn't have sufficient + // history to fulfill this request. + if !proof.Empty() { + // cannot have any changes if the root was missing + return ErrDataInMissingRootProof + } + return nil + } + + switch { + case proof.Empty(): + return ErrNoMerkleProof + case len(end) > 0 && len(proof.EndProof) == 0: + // We requested an end proof but didn't get one. + return ErrNoEndProof + case len(start) > 0 && len(proof.StartProof) == 0 && len(proof.EndProof) == 0: + // We requested a start proof but didn't get one. + // Note that we also have to check that [proof.EndProof] is empty + // to handle the case that the start proof is empty because all + // its nodes are also in the end proof, and those nodes are omitted. + return ErrNoStartProof + } + + // Make sure the key-value pairs are sorted and in [start, end]. + if err := verifyKeyValues(proof.KeyValues, start, end); err != nil { + return err + } + + // Make sure the deleted keys are sorted and in [start, end]. + deletedKeys := make([]KeyValue, len(proof.DeletedKeys)) + for i, key := range proof.DeletedKeys { + deletedKeys[i] = KeyValue{Key: key, Value: nil} + } + if err := verifyKeyValues(deletedKeys, start, end); err != nil { + return err + } + + smallestPath := newPath(start) + + // Make sure the start proof, if given, is well-formed. + if err := verifyProofPath(proof.StartProof, smallestPath); err != nil { + return err + } + + // Find the greatest key in [proof.KeyValues] and [proof.DeletedKeys]. + // Note that [proof.EndProof] is a proof for this key. + // [largestPath] is also used when we add children of proof nodes to [trie] below. + largestPath := newPath(proof.getLargestKey(end)) + + // Make sure the end proof, if given, is well-formed. + if err := verifyProofPath(proof.EndProof, largestPath); err != nil { + return err + } + + // gather all key/values in the proof + keyValues := make(map[path]Maybe[[]byte], len(proof.KeyValues)+len(proof.DeletedKeys)) + for _, keyValue := range proof.KeyValues { + keyValues[newPath(keyValue.Key)] = Some(keyValue.Value) + } + for _, key := range proof.DeletedKeys { + keyValues[newPath(key)] = Nothing[[]byte]() + } + + // want to prevent commit writes to DB, but not prevent db reads + db.commitLock.RLock() + defer db.commitLock.RUnlock() + + if err := verifyAllChangeProofKeyValuesPresent( + ctx, + db, + proof.StartProof, + smallestPath, + largestPath, + keyValues, + ); err != nil { + return err + } + + if err := verifyAllChangeProofKeyValuesPresent( + ctx, + db, + proof.EndProof, + smallestPath, + largestPath, + keyValues, + ); err != nil { + return err + } + + // Don't need to lock [view] because nobody else has a reference to it. + view, err := db.newUntrackedView(len(proof.KeyValues)) + if err != nil { + return err + } + + // Insert the key-value pairs into the trie. + for _, kv := range proof.KeyValues { + if _, err := view.insertIntoTrie(newPath(kv.Key), Some(kv.Value)); err != nil { + return err + } + } + + // Remove the deleted keys from the trie. + for _, key := range proof.DeletedKeys { + if err := view.removeFromTrie(newPath(key)); err != nil { + return err + } + } + + // For all the nodes along the edges of the proof, insert children < [start] and > [largestKey] + // into the trie so that we get the expected root ID (if this proof is valid). + if err := addPathInfo(view, proof.StartProof, smallestPath, largestPath); err != nil { + return err + } + if err := addPathInfo(view, proof.EndProof, smallestPath, largestPath); err != nil { + return err + } + + // Make sure we get the expected root. + calculatedRoot, err := view.getMerkleRoot(ctx) + if err != nil { + return err + } + if expectedEndRootID != calculatedRoot { + return fmt.Errorf("%w:[%s], expected:[%s]", ErrInvalidProof, calculatedRoot, expectedEndRootID) + } + + return nil +} + +// Verifies that all values present in the [proof]: +// - Are nothing when deleted, not in the db, or the node has an odd path length. +// - if the node's path is within the key range, that has a value that matches the value passed in the change list or in the db +func verifyAllChangeProofKeyValuesPresent( + ctx context.Context, + db *Database, + proof []ProofNode, + start path, + end path, + keysValues map[path]Maybe[[]byte], +) error { + for i := 0; i < len(proof); i++ { + var ( + node = proof[i] + nodeKey = node.KeyPath + nodePath = nodeKey.deserialize() + ) + + // Check the value of any node with a key that is within the range. + // Skip odd length keys since they cannot have a value (enforced by [verifyProofPath]). + if !nodeKey.hasOddLength() && nodePath.Compare(start) >= 0 && nodePath.Compare(end) <= 0 { + value, ok := keysValues[nodePath] + if !ok { + // This value isn't in the list of key-value pairs we got. + dbValue, err := db.GetValue(ctx, nodeKey.Value) + if err != nil { + if err != database.ErrNotFound { + return err + } + // This key isn't in the database so proof node should have Nothing. + value = Nothing[[]byte]() + } else { + // This key is in the database so proof node should have matching value. + value = Some(dbValue) + } + } + if !valueOrHashMatches(value, node.ValueOrHash) { + return ErrProofValueDoesntMatch + } + } + } + return nil +} + +func (proof *ChangeProof) Empty() bool { + return len(proof.KeyValues) == 0 && len(proof.DeletedKeys) == 0 && + len(proof.StartProof) == 0 && len(proof.EndProof) == 0 +} + +// Returns the largest key in [proof.KeyValues] and [proof.DeletedKeys]. +// If there are no keys in the proof, returns [end]. +func (proof *ChangeProof) getLargestKey(end []byte) []byte { + largestKey := end + if len(proof.KeyValues) > 0 { + largestKey = proof.KeyValues[len(proof.KeyValues)-1].Key + } + if len(proof.DeletedKeys) > 0 { + lastDeleted := proof.DeletedKeys[len(proof.DeletedKeys)-1] + if bytes.Compare(lastDeleted, largestKey) > 0 || len(proof.KeyValues) == 0 { + largestKey = lastDeleted + } + } + return largestKey +} + +// Returns nil iff both hold: +// 1. [kvs] is sorted by key in increasing order. +// 2. All keys in [kvs] are in the range [start, end]. +// If [start] is nil, there is no lower bound on acceptable keys. +// If [end] is nil, there is no upper bound on acceptable keys. +// If [kvs] is empty, returns nil. +func verifyKeyValues(kvs []KeyValue, start, end []byte) error { + hasLowerBound := len(start) > 0 + hasUpperBound := len(end) > 0 + for i := 0; i < len(kvs); i++ { + if i < len(kvs)-1 && bytes.Compare(kvs[i].Key, kvs[i+1].Key) >= 0 { + return ErrNonIncreasingValues + } + if (hasLowerBound && bytes.Compare(kvs[i].Key, start) < 0) || + (hasUpperBound && bytes.Compare(kvs[i].Key, end) > 0) { + return ErrStateFromOutsideOfRange + } + } + return nil +} + +// Returns nil iff all the following hold: +// - Any node with an odd nibble length, should not have a value associated with it +// since all keys with values are written in bytes, so have even nibble length. +// - Each key in [proof] is a strict prefix of the following key. +// - Each key in [proof] is a strict prefix of [keyBytes], except possibly the last. +// - If the last element in [proof] is [keyBytes], this is an inclusion proof. +// Otherwise, this is an exclusion proof and [keyBytes] must not be in [proof]. +func verifyProofPath(proof []ProofNode, keyPath path) error { + provenKey := keyPath.Serialize() + + // loop over all but the last node since it will not have the prefix in exclusion proofs + for i := 0; i < len(proof)-1; i++ { + nodeKey := proof[i].KeyPath + + // intermediate nodes (nodes with odd nibble length) should never have a value associated with them + if nodeKey.hasOddLength() && !proof[i].ValueOrHash.IsNothing() { + return ErrOddLengthWithValue + } + + // each node should have a key that has the proven key as a prefix + if !provenKey.HasStrictPrefix(nodeKey) { + return ErrProofNodeNotForKey + } + + // each node should have a key that is a prefix of the next node's key + nextKey := proof[i+1].KeyPath + if !nextKey.HasStrictPrefix(nodeKey) { + return ErrNonIncreasingProofNodes + } + } + + // check the last node for a value since the above loop doesn't check the last node + if len(proof) > 0 { + lastNode := proof[len(proof)-1] + if lastNode.KeyPath.hasOddLength() && !lastNode.ValueOrHash.IsNothing() { + return ErrOddLengthWithValue + } + } + + return nil +} + +// Returns true if [value] and [valueDigest] match. +// [valueOrHash] should be the [ValueOrHash] field of a [ProofNode]. +func valueOrHashMatches(value Maybe[[]byte], valueOrHash Maybe[[]byte]) bool { + var ( + valueIsNothing = value.IsNothing() + digestIsNothing = valueOrHash.IsNothing() + ) + + switch { + case valueIsNothing != digestIsNothing: + // One is nothing and the other isn't -- no match. + return false + case valueIsNothing: + // Both are nothing -- match. + return true + case len(value.value) < HashLength: + return bytes.Equal(value.value, valueOrHash.value) + default: + valueHash := hashing.ComputeHash256(value.value) + return bytes.Equal(valueHash, valueOrHash.value) + } +} + +// Adds each key/value pair in [proofPath] to [t]. +// For each proof node, adds the children that are < [start] or > [end]. +// If [start] is empty, no children are < [start]. +// If [end] is empty, no children are > [end]. +// Assumes [t.lock] is held. +func addPathInfo( + t *trieView, + proofPath []ProofNode, + startPath path, + endPath path, +) error { + var ( + hasLowerBound = len(startPath) > 0 + hasUpperBound = len(endPath) > 0 + ) + + for i := len(proofPath) - 1; i >= 0; i-- { + proofNode := proofPath[i] + keyPath := proofNode.KeyPath.deserialize() + + if len(keyPath)&1 == 1 && !proofNode.ValueOrHash.IsNothing() { + // a value cannot have an odd number of nibbles in its key + return ErrOddLengthWithValue + } + + // load the node associated with the key or create a new one + // pass nothing because we are going to overwrite the value digest below + n, err := t.insertIntoTrie(keyPath, Nothing[[]byte]()) + if err != nil { + return err + } + // We overwrite the valueDigest to be the hash provided in the proof + // node because we may not know the pre-image of the valueDigest. + n.valueDigest = proofNode.ValueOrHash + + if !hasLowerBound && !hasUpperBound { + // No children of proof nodes are outside the range. + // No need to add any children to [n]. + continue + } + + // Add [proofNode]'s children which are outside the range [start, end]. + compressedPath := EmptyPath + for index, childID := range proofNode.Children { + if existingChild, ok := n.children[index]; ok { + compressedPath = existingChild.compressedPath + } + childPath := keyPath.Append(index) + compressedPath + if (hasLowerBound && childPath.Compare(startPath) < 0) || + (hasUpperBound && childPath.Compare(endPath) > 0) { + n.addChildWithoutNode(index, compressedPath, childID) + } + } + } + + return nil +} + +func getEmptyTrieView(ctx context.Context) (*trieView, error) { + tracer, err := trace.New(trace.Config{Enabled: false}) + if err != nil { + return nil, err + } + db, err := newDatabase( + ctx, + memdb.New(), + Config{ + Tracer: tracer, + NodeCacheSize: verificationCacheSize, + }, + &mockMetrics{}, + ) + if err != nil { + return nil, err + } + + return db.newUntrackedView(defaultPreallocationSize) +} diff --git a/avalanchego/x/merkledb/proof_test.go b/avalanchego/x/merkledb/proof_test.go new file mode 100644 index 00000000..c4d1eb86 --- /dev/null +++ b/avalanchego/x/merkledb/proof_test.go @@ -0,0 +1,1660 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "bytes" + "context" + "io" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/hashing" +) + +func getBasicDB() (*Database, error) { + return newDatabase( + context.Background(), + memdb.New(), + Config{ + Tracer: newNoopTracer(), + HistoryLength: 1000, + NodeCacheSize: 1000, + }, + &mockMetrics{}, + ) +} + +func writeBasicBatch(t *testing.T, db *Database) { + batch := db.NewBatch() + require.NoError(t, batch.Put([]byte{0}, []byte{0})) + require.NoError(t, batch.Put([]byte{1}, []byte{1})) + require.NoError(t, batch.Put([]byte{2}, []byte{2})) + require.NoError(t, batch.Put([]byte{3}, []byte{3})) + require.NoError(t, batch.Put([]byte{4}, []byte{4})) + require.NoError(t, batch.Write()) +} + +func Test_Proof_Marshal(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() + require.NoError(err) + require.NotNil(dbTrie) + writeBasicBatch(t, dbTrie) + + proof, err := dbTrie.GetProof(context.Background(), []byte{1}) + require.NoError(err) + require.NotNil(proof) + + proofBytes, err := Codec.EncodeProof(Version, proof) + require.NoError(err) + + parsedProof := &Proof{} + _, err = Codec.DecodeProof(proofBytes, parsedProof) + require.NoError(err) + + verifyPath(t, proof.Path, parsedProof.Path) + require.Equal([]byte{1}, proof.Value.value) +} + +func Test_Proof_Empty(t *testing.T) { + proof := &Proof{} + err := proof.Verify(context.Background(), ids.Empty) + require.ErrorIs(t, err, ErrNoProof) +} + +func Test_Proof_MissingValue(t *testing.T) { + trie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, trie) + + require.NoError(t, trie.Insert(context.Background(), []byte{1}, []byte{0})) + require.NoError(t, trie.Insert(context.Background(), []byte{1, 2}, []byte{0})) + require.NoError(t, trie.Insert(context.Background(), []byte{1, 2, 4}, []byte{0})) + require.NoError(t, trie.Insert(context.Background(), []byte{1, 3}, []byte{0})) + + // get a proof for a value not in the db + proof, err := trie.GetProof(context.Background(), []byte{1, 2, 3}) + require.NoError(t, err) + require.NotNil(t, proof) + + require.True(t, proof.Value.IsNothing()) + + proofBytes, err := Codec.EncodeProof(Version, proof) + require.NoError(t, err) + + parsedProof := &Proof{} + _, err = Codec.DecodeProof(proofBytes, parsedProof) + require.NoError(t, err) + + verifyPath(t, proof.Path, parsedProof.Path) +} + +func Test_Proof_Marshal_Errors(t *testing.T) { + trie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, trie) + + writeBasicBatch(t, trie) + + proof, err := trie.GetProof(context.Background(), []byte{1}) + require.NoError(t, err) + require.NotNil(t, proof) + + proofBytes, err := Codec.EncodeProof(Version, proof) + require.NoError(t, err) + + for i := 1; i < len(proofBytes); i++ { + broken := proofBytes[:i] + parsed := &Proof{} + _, err = Codec.DecodeProof(broken, parsed) + require.ErrorIs(t, err, io.ErrUnexpectedEOF) + } + + // add a child at an invalid index + proof.Path[0].Children[255] = ids.Empty + _, err = Codec.EncodeProof(Version, proof) + require.ErrorIs(t, err, errChildIndexTooLarge) +} + +func verifyPath(t *testing.T, path1, path2 []ProofNode) { + require.Equal(t, len(path1), len(path2)) + for i := range path1 { + require.True(t, bytes.Equal(path1[i].KeyPath.Value, path2[i].KeyPath.Value)) + require.Equal(t, path1[i].KeyPath.hasOddLength(), path2[i].KeyPath.hasOddLength()) + require.True(t, bytes.Equal(path1[i].ValueOrHash.value, path2[i].ValueOrHash.value)) + for childIndex := range path1[i].Children { + require.Equal(t, path1[i].Children[childIndex], path2[i].Children[childIndex]) + } + } +} + +func Test_Proof_Verify_Bad_Data(t *testing.T) { + type test struct { + name string + malform func(proof *Proof) + expectedErr error + } + + tests := []test{ + { + name: "happyPath", + malform: func(proof *Proof) {}, + expectedErr: nil, + }, + { + name: "odd length key path with value", + malform: func(proof *Proof) { + proof.Path[1].ValueOrHash = Some([]byte{1, 2}) + }, + expectedErr: ErrOddLengthWithValue, + }, + { + name: "last proof node has missing value", + malform: func(proof *Proof) { + proof.Path[len(proof.Path)-1].ValueOrHash = Nothing[[]byte]() + }, + expectedErr: ErrProofValueDoesntMatch, + }, + { + name: "missing value on proof", + malform: func(proof *Proof) { + proof.Value = Nothing[[]byte]() + }, + expectedErr: ErrProofValueDoesntMatch, + }, + { + name: "mismatched value on proof", + malform: func(proof *Proof) { + proof.Value = Some([]byte{10}) + }, + expectedErr: ErrProofValueDoesntMatch, + }, + { + name: "value of exclusion proof", + malform: func(proof *Proof) { + // remove the value node to make it look like it is an exclusion proof + proof.Path = proof.Path[:len(proof.Path)-1] + }, + expectedErr: ErrProofValueDoesntMatch, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + + writeBasicBatch(t, db) + + proof, err := db.GetProof(context.Background(), []byte{2}) + require.NoError(t, err) + require.NotNil(t, proof) + + tt.malform(proof) + + err = proof.Verify(context.Background(), db.getMerkleRoot()) + require.ErrorIs(t, err, tt.expectedErr) + }) + } +} + +func Test_Proof_ValueOrHashMatches(t *testing.T) { + require.True(t, valueOrHashMatches(Some([]byte{0}), Some([]byte{0}))) + require.False(t, valueOrHashMatches(Nothing[[]byte](), Some(hashing.ComputeHash256([]byte{0})))) + require.True(t, valueOrHashMatches(Nothing[[]byte](), Nothing[[]byte]())) + + require.False(t, valueOrHashMatches(Some([]byte{0}), Nothing[[]byte]())) + require.False(t, valueOrHashMatches(Nothing[[]byte](), Some([]byte{0}))) + require.False(t, valueOrHashMatches(Nothing[[]byte](), Some(hashing.ComputeHash256([]byte{1})))) + require.False(t, valueOrHashMatches(Some(hashing.ComputeHash256([]byte{0})), Nothing[[]byte]())) +} + +func Test_RangeProof_Extra_Value(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + writeBasicBatch(t, db) + + val, err := db.Get([]byte{2}) + require.NoError(t, err) + require.Equal(t, []byte{2}, val) + + proof, err := db.GetRangeProof(context.Background(), []byte{1}, []byte{5, 5}, 10) + require.NoError(t, err) + require.NotNil(t, proof) + + err = proof.Verify( + context.Background(), + []byte{1}, + []byte{5, 5}, + db.root.id, + ) + require.NoError(t, err) + + proof.KeyValues = append(proof.KeyValues, KeyValue{Key: []byte{5}, Value: []byte{5}}) + + err = proof.Verify( + context.Background(), + []byte{1}, + []byte{5, 5}, + db.root.id, + ) + require.ErrorIs(t, err, ErrInvalidProof) +} + +func Test_RangeProof_Verify_Bad_Data(t *testing.T) { + type test struct { + name string + malform func(proof *RangeProof) + expectedErr error + } + + tests := []test{ + { + name: "happyPath", + malform: func(proof *RangeProof) {}, + expectedErr: nil, + }, + { + name: "StartProof: last proof node has missing value", + malform: func(proof *RangeProof) { + proof.StartProof[len(proof.StartProof)-1].ValueOrHash = Nothing[[]byte]() + }, + expectedErr: ErrProofValueDoesntMatch, + }, + { + name: "EndProof: odd length key path with value", + malform: func(proof *RangeProof) { + proof.EndProof[1].ValueOrHash = Some([]byte{1, 2}) + }, + expectedErr: ErrOddLengthWithValue, + }, + { + name: "EndProof: last proof node has missing value", + malform: func(proof *RangeProof) { + proof.EndProof[len(proof.EndProof)-1].ValueOrHash = Nothing[[]byte]() + }, + expectedErr: ErrProofValueDoesntMatch, + }, + { + name: "missing key/value", + malform: func(proof *RangeProof) { + proof.KeyValues = proof.KeyValues[1:] + }, + expectedErr: ErrProofNodeHasUnincludedValue, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + writeBasicBatch(t, db) + + proof, err := db.GetRangeProof(context.Background(), []byte{2}, []byte{3, 0}, 50) + require.NoError(t, err) + require.NotNil(t, proof) + + tt.malform(proof) + + err = proof.Verify(context.Background(), []byte{2}, []byte{3, 0}, db.getMerkleRoot()) + require.ErrorIs(t, err, tt.expectedErr) + }) + } +} + +func Test_RangeProof_MaxLength(t *testing.T) { + dbTrie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, dbTrie) + trie, err := dbTrie.NewView() + require.NoError(t, err) + + _, err = trie.GetRangeProof(context.Background(), nil, nil, -1) + require.ErrorIs(t, err, ErrInvalidMaxLength) + + _, err = trie.GetRangeProof(context.Background(), nil, nil, 0) + require.ErrorIs(t, err, ErrInvalidMaxLength) +} + +func Test_Proof(t *testing.T) { + dbTrie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, dbTrie) + trie, err := dbTrie.NewView() + require.NoError(t, err) + + err = trie.Insert(context.Background(), []byte("key0"), []byte("value0")) + require.NoError(t, err) + err = trie.Insert(context.Background(), []byte("key1"), []byte("value1")) + require.NoError(t, err) + err = trie.Insert(context.Background(), []byte("key2"), []byte("value2")) + require.NoError(t, err) + err = trie.Insert(context.Background(), []byte("key3"), []byte("value3")) + require.NoError(t, err) + err = trie.Insert(context.Background(), []byte("key4"), []byte("value4")) + require.NoError(t, err) + + _, err = trie.GetMerkleRoot(context.Background()) + require.NoError(t, err) + proof, err := trie.GetProof(context.Background(), []byte("key1")) + require.NoError(t, err) + require.NotNil(t, proof) + + require.Len(t, proof.Path, 3) + + require.Equal(t, newPath([]byte("key1")).Serialize(), proof.Path[2].KeyPath) + require.Equal(t, Some([]byte("value1")), proof.Path[2].ValueOrHash) + + require.Equal(t, newPath([]byte{}).Serialize(), proof.Path[0].KeyPath) + require.True(t, proof.Path[0].ValueOrHash.IsNothing()) + + expectedRootID, err := trie.GetMerkleRoot(context.Background()) + require.NoError(t, err) + err = proof.Verify(context.Background(), expectedRootID) + require.NoError(t, err) + + proof.Path[0].ValueOrHash = Some([]byte("value2")) + + err = proof.Verify(context.Background(), expectedRootID) + require.ErrorIs(t, err, ErrInvalidProof) +} + +func Test_RangeProof_Syntactic_Verify(t *testing.T) { + type test struct { + name string + start []byte + end []byte + proof *RangeProof + expectedErr error + } + + tests := []test{ + { + name: "start > end", + start: []byte{1}, + end: []byte{0}, + proof: &RangeProof{}, + expectedErr: ErrStartAfterEnd, + }, + { + name: "empty", // Also tests start can be > end if end is nil + start: []byte{1}, + end: nil, + proof: &RangeProof{}, + expectedErr: ErrNoMerkleProof, + }, + { + name: "should just be root", + start: nil, + end: nil, + proof: &RangeProof{ + EndProof: []ProofNode{{}, {}}, + }, + expectedErr: ErrShouldJustBeRoot, + }, + { + name: "no end proof", + start: []byte{1}, + end: []byte{1}, + proof: &RangeProof{ + KeyValues: []KeyValue{{Key: []byte{1}, Value: []byte{1}}}, + }, + expectedErr: ErrNoEndProof, + }, + { + name: "unsorted key values", + start: []byte{1}, + end: nil, + proof: &RangeProof{ + KeyValues: []KeyValue{ + {Key: []byte{1}, Value: []byte{1}}, + {Key: []byte{0}, Value: []byte{0}}, + }, + }, + expectedErr: ErrNonIncreasingValues, + }, + { + name: "key lower than start", + start: []byte{1}, + end: nil, + proof: &RangeProof{ + KeyValues: []KeyValue{ + {Key: []byte{0}, Value: []byte{0}}, + }, + }, + expectedErr: ErrStateFromOutsideOfRange, + }, + { + name: "key greater than end", + start: []byte{1}, + end: []byte{1}, + proof: &RangeProof{ + KeyValues: []KeyValue{ + {Key: []byte{2}, Value: []byte{0}}, + }, + EndProof: []ProofNode{{}}, + }, + expectedErr: ErrStateFromOutsideOfRange, + }, + { + name: "start proof nodes in wrong order", + start: []byte{1, 2}, + end: nil, + proof: &RangeProof{ + KeyValues: []KeyValue{ + {Key: []byte{1, 2}, Value: []byte{1}}, + }, + StartProof: []ProofNode{ + { + KeyPath: newPath([]byte{2}).Serialize(), + }, + { + KeyPath: newPath([]byte{1}).Serialize(), + }, + }, + }, + expectedErr: ErrProofNodeNotForKey, + }, + { + name: "start proof has node for wrong key", + start: []byte{1, 2}, + end: nil, + proof: &RangeProof{ + KeyValues: []KeyValue{ + {Key: []byte{1, 2}, Value: []byte{1}}, + }, + StartProof: []ProofNode{ + { + KeyPath: newPath([]byte{1}).Serialize(), + }, + { + KeyPath: newPath([]byte{1, 2, 3}).Serialize(), // Not a prefix of [1, 2] + }, + { + KeyPath: newPath([]byte{1, 2, 3, 4}).Serialize(), + }, + }, + }, + expectedErr: ErrProofNodeNotForKey, + }, + { + name: "end proof nodes in wrong order", + start: nil, + end: []byte{1, 2}, + proof: &RangeProof{ + KeyValues: []KeyValue{ + {Key: []byte{1, 2}, Value: []byte{1}}, + }, + EndProof: []ProofNode{ + { + KeyPath: newPath([]byte{2}).Serialize(), + }, + { + KeyPath: newPath([]byte{1}).Serialize(), + }, + }, + }, + expectedErr: ErrProofNodeNotForKey, + }, + { + name: "end proof has node for wrong key", + start: nil, + end: []byte{1, 2}, + proof: &RangeProof{ + KeyValues: []KeyValue{ + {Key: []byte{1, 2}, Value: []byte{1}}, + }, + EndProof: []ProofNode{ + { + KeyPath: newPath([]byte{1}).Serialize(), + }, + { + KeyPath: newPath([]byte{1, 2, 3}).Serialize(), // Not a prefix of [1, 2] + }, + { + KeyPath: newPath([]byte{1, 2, 3, 4}).Serialize(), + }, + }, + }, + expectedErr: ErrProofNodeNotForKey, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + err := tt.proof.Verify(context.Background(), tt.start, tt.end, ids.Empty) + require.ErrorIs(err, tt.expectedErr) + }) + } +} + +func Test_RangeProof(t *testing.T) { + require := require.New(t) + + db, err := getBasicDB() + require.NoError(err) + writeBasicBatch(t, db) + + proof, err := db.GetRangeProof(context.Background(), []byte{1}, []byte{3, 5}, 10) + require.NoError(err) + require.NotNil(proof) + require.Len(proof.KeyValues, 3) + + require.Equal([]byte{1}, proof.KeyValues[0].Key) + require.Equal([]byte{2}, proof.KeyValues[1].Key) + require.Equal([]byte{3}, proof.KeyValues[2].Key) + + require.Equal([]byte{1}, proof.KeyValues[0].Value) + require.Equal([]byte{2}, proof.KeyValues[1].Value) + require.Equal([]byte{3}, proof.KeyValues[2].Value) + + require.Equal([]byte{}, proof.EndProof[0].KeyPath.Value) + require.Equal([]byte{0}, proof.EndProof[1].KeyPath.Value) + require.Equal([]byte{3}, proof.EndProof[2].KeyPath.Value) + + // only a single node here since others are duplicates in endproof + require.Equal([]byte{1}, proof.StartProof[0].KeyPath.Value) + + err = proof.Verify( + context.Background(), + []byte{1}, + []byte{3, 5}, + db.root.id, + ) + require.NoError(err) +} + +func Test_RangeProof_BadBounds(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + + // non-nil start/end + proof, err := db.GetRangeProof(context.Background(), []byte{4}, []byte{3}, 50) + require.ErrorIs(t, err, ErrStartAfterEnd) + require.Nil(t, proof) +} + +func Test_RangeProof_NilStart(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + batch := db.NewBatch() + err = batch.Put([]byte("key1"), []byte("value1")) + require.NoError(t, err) + err = batch.Put([]byte("key2"), []byte("value2")) + require.NoError(t, err) + err = batch.Put([]byte("key3"), []byte("value3")) + require.NoError(t, err) + err = batch.Put([]byte("key4"), []byte("value4")) + require.NoError(t, err) + err = batch.Write() + require.NoError(t, err) + + val, err := db.Get([]byte("key1")) + require.NoError(t, err) + require.Equal(t, []byte("value1"), val) + + proof, err := db.GetRangeProof(context.Background(), nil, []byte("key35"), 2) + require.NoError(t, err) + require.NotNil(t, proof) + + require.Len(t, proof.KeyValues, 2) + + require.Equal(t, []byte("key1"), proof.KeyValues[0].Key) + require.Equal(t, []byte("key2"), proof.KeyValues[1].Key) + + require.Equal(t, []byte("value1"), proof.KeyValues[0].Value) + require.Equal(t, []byte("value2"), proof.KeyValues[1].Value) + + require.Equal(t, newPath([]byte("key2")).Serialize(), proof.EndProof[2].KeyPath) + require.Equal(t, SerializedPath{Value: []uint8{0x6b, 0x65, 0x79, 0x30}, NibbleLength: 7}, proof.EndProof[1].KeyPath) + require.Equal(t, newPath([]byte("")).Serialize(), proof.EndProof[0].KeyPath) + + err = proof.Verify( + context.Background(), + nil, + []byte("key35"), + db.root.id, + ) + require.NoError(t, err) +} + +func Test_RangeProof_NilEnd(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + writeBasicBatch(t, db) + require.NoError(t, err) + + proof, err := db.GetRangeProof(context.Background(), []byte{1}, nil, 2) + require.NoError(t, err) + require.NotNil(t, proof) + + require.Len(t, proof.KeyValues, 2) + + require.Equal(t, []byte{1}, proof.KeyValues[0].Key) + require.Equal(t, []byte{2}, proof.KeyValues[1].Key) + + require.Equal(t, []byte{1}, proof.KeyValues[0].Value) + require.Equal(t, []byte{2}, proof.KeyValues[1].Value) + + require.Equal(t, []byte{1}, proof.StartProof[0].KeyPath.Value) + + require.Equal(t, []byte{}, proof.EndProof[0].KeyPath.Value) + require.Equal(t, []byte{0}, proof.EndProof[1].KeyPath.Value) + require.Equal(t, []byte{2}, proof.EndProof[2].KeyPath.Value) + + err = proof.Verify( + context.Background(), + []byte{1}, + nil, + db.root.id, + ) + require.NoError(t, err) +} + +func Test_RangeProof_EmptyValues(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + batch := db.NewBatch() + err = batch.Put([]byte("key1"), nil) + require.NoError(t, err) + err = batch.Put([]byte("key12"), []byte("value1")) + require.NoError(t, err) + err = batch.Put([]byte("key2"), []byte{}) + require.NoError(t, err) + err = batch.Write() + require.NoError(t, err) + + val, err := db.Get([]byte("key12")) + require.NoError(t, err) + require.Equal(t, []byte("value1"), val) + + proof, err := db.GetRangeProof(context.Background(), []byte("key1"), []byte("key2"), 10) + require.NoError(t, err) + require.NotNil(t, proof) + + require.Len(t, proof.KeyValues, 3) + require.Equal(t, []byte("key1"), proof.KeyValues[0].Key) + require.Empty(t, proof.KeyValues[0].Value) + require.Equal(t, []byte("key12"), proof.KeyValues[1].Key) + require.Equal(t, []byte("value1"), proof.KeyValues[1].Value) + require.Equal(t, []byte("key2"), proof.KeyValues[2].Key) + require.Empty(t, proof.KeyValues[2].Value) + + require.Len(t, proof.StartProof, 1) + require.Equal(t, newPath([]byte("key1")).Serialize(), proof.StartProof[0].KeyPath) + + require.Len(t, proof.EndProof, 3) + require.Equal(t, newPath([]byte("key2")).Serialize(), proof.EndProof[2].KeyPath) + require.Equal(t, newPath([]byte{}).Serialize(), proof.EndProof[0].KeyPath) + + err = proof.Verify( + context.Background(), + []byte("key1"), + []byte("key2"), + db.root.id, + ) + require.NoError(t, err) +} + +func Test_RangeProof_Marshal_Nil(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + writeBasicBatch(t, db) + + val, err := db.Get([]byte{1}) + require.NoError(t, err) + require.Equal(t, []byte{1}, val) + + proof, err := db.GetRangeProof(context.Background(), []byte("key1"), []byte("key35"), 10) + require.NoError(t, err) + require.NotNil(t, proof) + + proofBytes, err := Codec.EncodeRangeProof(Version, proof) + require.NoError(t, err) + + parsedProof := &RangeProof{} + _, err = Codec.DecodeRangeProof(proofBytes, parsedProof) + require.NoError(t, err) + + verifyPath(t, proof.StartProof, parsedProof.StartProof) + verifyPath(t, proof.EndProof, parsedProof.EndProof) + + for index, kv := range proof.KeyValues { + require.True(t, bytes.Equal(kv.Key, parsedProof.KeyValues[index].Key)) + require.True(t, bytes.Equal(kv.Value, parsedProof.KeyValues[index].Value)) + } +} + +func Test_RangeProof_Marshal(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + + writeBasicBatch(t, db) + + val, err := db.Get([]byte{1}) + require.NoError(t, err) + require.Equal(t, []byte{1}, val) + + proof, err := db.GetRangeProof(context.Background(), nil, nil, 10) + require.NoError(t, err) + require.NotNil(t, proof) + + proofBytes, err := Codec.EncodeRangeProof(Version, proof) + require.NoError(t, err) + + parsedProof := &RangeProof{} + _, err = Codec.DecodeRangeProof(proofBytes, parsedProof) + require.NoError(t, err) + + verifyPath(t, proof.StartProof, parsedProof.StartProof) + verifyPath(t, proof.EndProof, parsedProof.EndProof) + + for index, state := range proof.KeyValues { + require.True(t, bytes.Equal(state.Key, parsedProof.KeyValues[index].Key)) + require.True(t, bytes.Equal(state.Value, parsedProof.KeyValues[index].Value)) + } +} + +func Test_RangeProof_Marshal_Errors(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + writeBasicBatch(t, db) + + proof, err := db.GetRangeProof(context.Background(), nil, nil, 10) + require.NoError(t, err) + require.NotNil(t, proof) + + proofBytes, err := Codec.EncodeRangeProof(Version, proof) + require.NoError(t, err) + + for i := 1; i < len(proofBytes); i++ { + broken := proofBytes[:i] + parsedProof := &RangeProof{} + _, err = Codec.DecodeRangeProof(broken, parsedProof) + require.ErrorIs(t, err, io.ErrUnexpectedEOF) + } +} + +func TestChangeProofGetLargestKey(t *testing.T) { + type test struct { + name string + proof ChangeProof + end []byte + expected []byte + } + + tests := []test{ + { + name: "empty proof", + proof: ChangeProof{}, + end: []byte{0}, + expected: []byte{0}, + }, + { + name: "1 KV no deleted keys", + proof: ChangeProof{ + KeyValues: []KeyValue{ + { + Key: []byte{1}, + }, + }, + }, + end: []byte{0}, + expected: []byte{1}, + }, + { + name: "2 KV no deleted keys", + proof: ChangeProof{ + KeyValues: []KeyValue{ + { + Key: []byte{1}, + }, + { + Key: []byte{2}, + }, + }, + }, + end: []byte{0}, + expected: []byte{2}, + }, + { + name: "no KVs 1 deleted key", + proof: ChangeProof{ + DeletedKeys: [][]byte{{1}}, + }, + end: []byte{0}, + expected: []byte{1}, + }, + { + name: "no KVs 2 deleted keys", + proof: ChangeProof{ + DeletedKeys: [][]byte{{1}, {2}}, + }, + end: []byte{0}, + expected: []byte{2}, + }, + { + name: "KV and deleted keys; KV larger", + proof: ChangeProof{ + KeyValues: []KeyValue{ + { + Key: []byte{1}, + }, + { + Key: []byte{3}, + }, + }, + DeletedKeys: [][]byte{{0}, {2}}, + }, + end: []byte{5}, + expected: []byte{3}, + }, + { + name: "KV and deleted keys; deleted key larger", + proof: ChangeProof{ + KeyValues: []KeyValue{ + { + Key: []byte{0}, + }, + { + Key: []byte{2}, + }, + }, + DeletedKeys: [][]byte{{1}, {3}}, + }, + end: []byte{5}, + expected: []byte{3}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require.Equal(t, tt.expected, tt.proof.getLargestKey(tt.end)) + }) + } +} + +func Test_ChangeProof_Marshal(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + batch := db.NewBatch() + err = batch.Put([]byte("key0"), []byte("value0")) + require.NoError(t, err) + err = batch.Put([]byte("key1"), []byte("value1")) + require.NoError(t, err) + err = batch.Put([]byte("key2"), []byte("value2")) + require.NoError(t, err) + err = batch.Put([]byte("key3"), []byte("value3")) + require.NoError(t, err) + err = batch.Put([]byte("key4"), []byte("value4")) + require.NoError(t, err) + err = batch.Write() + require.NoError(t, err) + startRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + batch = db.NewBatch() + err = batch.Put([]byte("key4"), []byte("value0")) + require.NoError(t, err) + err = batch.Put([]byte("key5"), []byte("value1")) + require.NoError(t, err) + err = batch.Put([]byte("key6"), []byte("value2")) + require.NoError(t, err) + err = batch.Put([]byte("key7"), []byte("value3")) + require.NoError(t, err) + err = batch.Put([]byte("key8"), []byte("value4")) + require.NoError(t, err) + err = batch.Write() + require.NoError(t, err) + + batch = db.NewBatch() + err = batch.Put([]byte("key9"), []byte("value0")) + require.NoError(t, err) + err = batch.Put([]byte("key10"), []byte("value1")) + require.NoError(t, err) + err = batch.Put([]byte("key11"), []byte("value2")) + require.NoError(t, err) + err = batch.Put([]byte("key12"), []byte("value3")) + require.NoError(t, err) + err = batch.Put([]byte("key13"), []byte("value4")) + require.NoError(t, err) + err = batch.Write() + require.NoError(t, err) + endroot, err := db.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + proof, err := db.GetChangeProof(context.Background(), startRoot, endroot, nil, nil, 50) + require.NoError(t, err) + require.NotNil(t, proof) + require.True(t, proof.HadRootsInHistory) + + proofBytes, err := Codec.EncodeChangeProof(Version, proof) + require.NoError(t, err) + + parsedProof := &ChangeProof{} + _, err = Codec.DecodeChangeProof(proofBytes, parsedProof) + require.NoError(t, err) + + verifyPath(t, proof.StartProof, parsedProof.StartProof) + verifyPath(t, proof.EndProof, parsedProof.EndProof) + + for index, kv := range proof.KeyValues { + require.True(t, bytes.Equal(kv.Key, parsedProof.KeyValues[index].Key)) + require.True(t, bytes.Equal(kv.Value, parsedProof.KeyValues[index].Value)) + } +} + +func Test_ChangeProof_Marshal_Errors(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + writeBasicBatch(t, db) + startRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + batch := db.NewBatch() + require.NoError(t, batch.Put([]byte{5}, []byte{5})) + require.NoError(t, batch.Put([]byte{6}, []byte{6})) + require.NoError(t, batch.Put([]byte{7}, []byte{7})) + require.NoError(t, batch.Put([]byte{8}, []byte{8})) + require.NoError(t, batch.Delete([]byte{0})) + require.NoError(t, batch.Write()) + + batch = db.NewBatch() + require.NoError(t, batch.Put([]byte{9}, []byte{9})) + require.NoError(t, batch.Put([]byte{10}, []byte{10})) + require.NoError(t, batch.Put([]byte{11}, []byte{11})) + require.NoError(t, batch.Put([]byte{12}, []byte{12})) + require.NoError(t, batch.Delete([]byte{1})) + require.NoError(t, batch.Write()) + endroot, err := db.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + proof, err := db.GetChangeProof(context.Background(), startRoot, endroot, nil, nil, 50) + require.NoError(t, err) + require.NotNil(t, proof) + require.True(t, proof.HadRootsInHistory) + require.Len(t, proof.KeyValues, 8) + require.Len(t, proof.DeletedKeys, 2) + + proofBytes, err := Codec.EncodeChangeProof(Version, proof) + require.NoError(t, err) + + for i := 1; i < len(proofBytes); i++ { + broken := proofBytes[:i] + parsedProof := &ChangeProof{} + _, err = Codec.DecodeChangeProof(broken, parsedProof) + require.ErrorIs(t, err, io.ErrUnexpectedEOF) + } +} + +func Test_ChangeProof_Missing_History_For_EndRoot(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + startRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + proof, err := db.GetChangeProof(context.Background(), startRoot, ids.Empty, nil, nil, 50) + require.NoError(t, err) + require.NotNil(t, proof) + require.False(t, proof.HadRootsInHistory) + + require.NoError(t, proof.Verify(context.Background(), db, nil, nil, db.getMerkleRoot())) +} + +func Test_ChangeProof_BadBounds(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + + startRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + require.NoError(t, db.Insert(context.Background(), []byte{0}, []byte{0})) + + endRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + // non-nil start/end + proof, err := db.GetChangeProof(context.Background(), startRoot, endRoot, []byte("key4"), []byte("key3"), 50) + require.ErrorIs(t, err, ErrStartAfterEnd) + require.Nil(t, proof) +} + +func Test_ChangeProof_Verify(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + batch := db.NewBatch() + err = batch.Put([]byte("key20"), []byte("value0")) + require.NoError(t, err) + err = batch.Put([]byte("key21"), []byte("value1")) + require.NoError(t, err) + err = batch.Put([]byte("key22"), []byte("value2")) + require.NoError(t, err) + err = batch.Put([]byte("key23"), []byte("value3")) + require.NoError(t, err) + err = batch.Put([]byte("key24"), []byte("value4")) + require.NoError(t, err) + err = batch.Write() + require.NoError(t, err) + startRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + // create a second db that has "synced" to the start root + dbClone, err := getBasicDB() + require.NoError(t, err) + batch = dbClone.NewBatch() + err = batch.Put([]byte("key20"), []byte("value0")) + require.NoError(t, err) + err = batch.Put([]byte("key21"), []byte("value1")) + require.NoError(t, err) + err = batch.Put([]byte("key22"), []byte("value2")) + require.NoError(t, err) + err = batch.Put([]byte("key23"), []byte("value3")) + require.NoError(t, err) + err = batch.Put([]byte("key24"), []byte("value4")) + require.NoError(t, err) + err = batch.Write() + require.NoError(t, err) + + // the second db has started to sync some of the range outside of the range proof + batch = dbClone.NewBatch() + err = batch.Put([]byte("key31"), []byte("value1")) + require.NoError(t, err) + err = batch.Write() + require.NoError(t, err) + + batch = db.NewBatch() + err = batch.Put([]byte("key25"), []byte("value0")) + require.NoError(t, err) + err = batch.Put([]byte("key26"), []byte("value1")) + require.NoError(t, err) + err = batch.Put([]byte("key27"), []byte("value2")) + require.NoError(t, err) + err = batch.Put([]byte("key28"), []byte("value3")) + require.NoError(t, err) + err = batch.Put([]byte("key29"), []byte("value4")) + require.NoError(t, err) + err = batch.Write() + require.NoError(t, err) + + batch = db.NewBatch() + err = batch.Put([]byte("key30"), []byte("value0")) + require.NoError(t, err) + err = batch.Put([]byte("key31"), []byte("value1")) + require.NoError(t, err) + err = batch.Put([]byte("key32"), []byte("value2")) + require.NoError(t, err) + err = batch.Delete([]byte("key21")) + require.NoError(t, err) + err = batch.Delete([]byte("key22")) + require.NoError(t, err) + err = batch.Write() + require.NoError(t, err) + + endRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + // non-nil start/end + proof, err := db.GetChangeProof(context.Background(), startRoot, endRoot, []byte("key21"), []byte("key30"), 50) + require.NoError(t, err) + require.NotNil(t, proof) + + err = proof.Verify(context.Background(), dbClone, []byte("key21"), []byte("key30"), db.getMerkleRoot()) + require.NoError(t, err) + + // low maxLength + proof, err = db.GetChangeProof(context.Background(), startRoot, endRoot, nil, nil, 5) + require.NoError(t, err) + require.NotNil(t, proof) + + err = proof.Verify(context.Background(), dbClone, nil, nil, db.getMerkleRoot()) + require.NoError(t, err) + + // nil start/end + proof, err = db.GetChangeProof(context.Background(), startRoot, endRoot, nil, nil, 50) + require.NoError(t, err) + require.NotNil(t, proof) + + err = proof.Verify(context.Background(), dbClone, nil, nil, endRoot) + require.NoError(t, err) + + err = dbClone.CommitChangeProof(context.Background(), proof) + require.NoError(t, err) + + newRoot, err := dbClone.GetMerkleRoot(context.Background()) + require.NoError(t, err) + require.Equal(t, endRoot, newRoot) + + proof, err = db.GetChangeProof(context.Background(), startRoot, endRoot, []byte("key20"), []byte("key30"), 50) + require.NoError(t, err) + require.NotNil(t, proof) + + err = proof.Verify(context.Background(), dbClone, []byte("key20"), []byte("key30"), db.getMerkleRoot()) + require.NoError(t, err) +} + +func Test_ChangeProof_Verify_Bad_Data(t *testing.T) { + type test struct { + name string + malform func(proof *ChangeProof) + expectedErr error + } + + tests := []test{ + { + name: "happyPath", + malform: func(proof *ChangeProof) {}, + expectedErr: nil, + }, + { + name: "odd length key path with value", + malform: func(proof *ChangeProof) { + proof.EndProof[1].ValueOrHash = Some([]byte{1, 2}) + }, + expectedErr: ErrOddLengthWithValue, + }, + { + name: "last proof node has missing value", + malform: func(proof *ChangeProof) { + proof.EndProof[len(proof.EndProof)-1].ValueOrHash = Nothing[[]byte]() + }, + expectedErr: ErrProofValueDoesntMatch, + }, + { + name: "missing key/value", + malform: func(proof *ChangeProof) { + proof.KeyValues = proof.KeyValues[1:] + }, + expectedErr: ErrProofValueDoesntMatch, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + + startRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + writeBasicBatch(t, db) + + endRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + // create a second db that will be synced to the first db + dbClone, err := getBasicDB() + require.NoError(t, err) + + proof, err := db.GetChangeProof(context.Background(), startRoot, endRoot, []byte{2}, []byte{3, 0}, 50) + require.NoError(t, err) + require.NotNil(t, proof) + + tt.malform(proof) + + err = proof.Verify(context.Background(), dbClone, []byte{2}, []byte{3, 0}, db.getMerkleRoot()) + require.ErrorIs(t, err, tt.expectedErr) + }) + } +} + +func Test_ChangeProof_Syntactic_Verify(t *testing.T) { + type test struct { + name string + proof *ChangeProof + start []byte + end []byte + expectedErr error + } + + tests := []test{ + { + name: "start after end", + proof: nil, + start: []byte{1}, + end: []byte{0}, + expectedErr: ErrStartAfterEnd, + }, + { + name: "no roots in history and non-empty key-values", + proof: &ChangeProof{ + HadRootsInHistory: false, + KeyValues: []KeyValue{{Key: []byte{1}, Value: []byte{1}}}, + }, + start: []byte{0}, + end: nil, // Also tests start can be after end if end is nil + expectedErr: ErrDataInMissingRootProof, + }, + { + name: "no roots in history and non-empty deleted keys", + proof: &ChangeProof{ + HadRootsInHistory: false, + DeletedKeys: [][]byte{{1}}, + }, + start: nil, + end: nil, + expectedErr: ErrDataInMissingRootProof, + }, + { + name: "no roots in history and non-empty start proof", + proof: &ChangeProof{ + HadRootsInHistory: false, + StartProof: []ProofNode{{}}, + }, + start: nil, + end: nil, + expectedErr: ErrDataInMissingRootProof, + }, + { + name: "no roots in history and non-empty end proof", + proof: &ChangeProof{ + HadRootsInHistory: false, + EndProof: []ProofNode{{}}, + }, + start: nil, + end: nil, + expectedErr: ErrDataInMissingRootProof, + }, + { + name: "no roots in history; empty", + proof: &ChangeProof{ + HadRootsInHistory: false, + }, + start: nil, + end: nil, + expectedErr: nil, + }, + { + name: "root in history; empty", + proof: &ChangeProof{ + HadRootsInHistory: true, + }, + start: nil, + end: nil, + expectedErr: ErrNoMerkleProof, + }, + { + name: "no end proof", + proof: &ChangeProof{ + HadRootsInHistory: true, + StartProof: []ProofNode{{}}, + }, + start: nil, + end: []byte{1}, + expectedErr: ErrNoEndProof, + }, + { + name: "no start proof", + proof: &ChangeProof{ + HadRootsInHistory: true, + DeletedKeys: [][]byte{{1}}, + }, + start: []byte{1}, + end: nil, + expectedErr: ErrNoStartProof, + }, + { + name: "non-increasing key-values", + proof: &ChangeProof{ + HadRootsInHistory: true, + KeyValues: []KeyValue{ + {Key: []byte{1}}, + {Key: []byte{0}}, + }, + }, + start: nil, + end: nil, + expectedErr: ErrNonIncreasingValues, + }, + { + name: "key-value too low", + proof: &ChangeProof{ + HadRootsInHistory: true, + StartProof: []ProofNode{{}}, + KeyValues: []KeyValue{ + {Key: []byte{0}}, + }, + }, + start: []byte{1}, + end: nil, + expectedErr: ErrStateFromOutsideOfRange, + }, + { + name: "key-value too great", + proof: &ChangeProof{ + HadRootsInHistory: true, + EndProof: []ProofNode{{}}, + KeyValues: []KeyValue{ + {Key: []byte{2}}, + }, + }, + start: nil, + end: []byte{1}, + expectedErr: ErrStateFromOutsideOfRange, + }, + { + name: "non-increasing deleted keys", + proof: &ChangeProof{ + HadRootsInHistory: true, + DeletedKeys: [][]byte{ + {1}, + {1}, + }, + }, + start: nil, + end: nil, + expectedErr: ErrNonIncreasingValues, + }, + { + name: "deleted key too low", + proof: &ChangeProof{ + HadRootsInHistory: true, + StartProof: []ProofNode{{}}, + DeletedKeys: [][]byte{ + {0}, + }, + }, + start: []byte{1}, + end: nil, + expectedErr: ErrStateFromOutsideOfRange, + }, + { + name: "deleted key too great", + proof: &ChangeProof{ + HadRootsInHistory: true, + EndProof: []ProofNode{{}}, + DeletedKeys: [][]byte{ + {1}, + }, + }, + start: nil, + end: []byte{0}, + expectedErr: ErrStateFromOutsideOfRange, + }, + { + name: "start proof node has wrong prefix", + proof: &ChangeProof{ + HadRootsInHistory: true, + StartProof: []ProofNode{ + {KeyPath: newPath([]byte{2}).Serialize()}, + {KeyPath: newPath([]byte{2, 3}).Serialize()}, + }, + }, + start: []byte{1, 2, 3}, + end: nil, + expectedErr: ErrProofNodeNotForKey, + }, + { + name: "start proof non-increasing", + proof: &ChangeProof{ + HadRootsInHistory: true, + StartProof: []ProofNode{ + {KeyPath: newPath([]byte{1}).Serialize()}, + {KeyPath: newPath([]byte{2, 3}).Serialize()}, + }, + }, + start: []byte{1, 2, 3}, + end: nil, + expectedErr: ErrNonIncreasingProofNodes, + }, + { + name: "end proof node has wrong prefix", + proof: &ChangeProof{ + HadRootsInHistory: true, + KeyValues: []KeyValue{ + {Key: []byte{1, 2}}, // Also tests [end] set to greatest key-value/deleted key + }, + EndProof: []ProofNode{ + {KeyPath: newPath([]byte{2}).Serialize()}, + {KeyPath: newPath([]byte{2, 3}).Serialize()}, + }, + }, + start: nil, + end: nil, + expectedErr: ErrProofNodeNotForKey, + }, + { + name: "end proof non-increasing", + proof: &ChangeProof{ + HadRootsInHistory: true, + DeletedKeys: [][]byte{ + {1, 2, 3}, // Also tests [end] set to greatest key-value/deleted key + }, + EndProof: []ProofNode{ + {KeyPath: newPath([]byte{1}).Serialize()}, + {KeyPath: newPath([]byte{2, 3}).Serialize()}, + }, + }, + start: nil, + end: nil, + expectedErr: ErrNonIncreasingProofNodes, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + db, err := getBasicDB() + require.NoError(t, err) + err = tt.proof.Verify(context.Background(), db, tt.start, tt.end, ids.Empty) + require.ErrorIs(t, err, tt.expectedErr) + }) + } +} + +func TestVerifyKeyValues(t *testing.T) { + type test struct { + name string + start []byte + end []byte + kvs []KeyValue + expectedErr error + } + + tests := []test{ + { + name: "empty", + start: nil, + end: nil, + kvs: nil, + expectedErr: nil, + }, + { + name: "1 key", + start: nil, + end: nil, + kvs: []KeyValue{ + {Key: []byte{0}}, + }, + expectedErr: nil, + }, + { + name: "non-increasing keys", + start: nil, + end: nil, + kvs: []KeyValue{ + {Key: []byte{0}}, + {Key: []byte{0}}, + }, + expectedErr: ErrNonIncreasingValues, + }, + { + name: "key before start", + start: []byte{1, 2}, + end: nil, + kvs: []KeyValue{ + {Key: []byte{1}}, + {Key: []byte{1, 2}}, + }, + expectedErr: ErrStateFromOutsideOfRange, + }, + { + name: "key after end", + start: nil, + end: []byte{1, 2}, + kvs: []KeyValue{ + {Key: []byte{1}}, + {Key: []byte{1, 2}}, + {Key: []byte{1, 2, 3}}, + }, + expectedErr: ErrStateFromOutsideOfRange, + }, + { + name: "happy path", + start: nil, + end: []byte{1, 2, 3}, + kvs: []KeyValue{ + {Key: []byte{1}}, + {Key: []byte{1, 2}}, + }, + expectedErr: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := verifyKeyValues(tt.kvs, tt.start, tt.end) + require.ErrorIs(t, err, tt.expectedErr) + }) + } +} + +func TestVerifyProofPath(t *testing.T) { + type test struct { + name string + path []ProofNode + proofKey []byte + expectedErr error + } + + tests := []test{ + { + name: "empty", + path: nil, + proofKey: nil, + expectedErr: nil, + }, + { + name: "1 element", + path: []ProofNode{{}}, + proofKey: nil, + expectedErr: nil, + }, + { + name: "non-increasing keys", + path: []ProofNode{ + {KeyPath: newPath([]byte{1}).Serialize()}, + {KeyPath: newPath([]byte{1, 2}).Serialize()}, + {KeyPath: newPath([]byte{1, 3}).Serialize()}, + }, + proofKey: []byte{1, 2, 3}, + expectedErr: ErrNonIncreasingProofNodes, + }, + { + name: "invalid key", + path: []ProofNode{ + {KeyPath: newPath([]byte{1}).Serialize()}, + {KeyPath: newPath([]byte{1, 2}).Serialize()}, + {KeyPath: newPath([]byte{1, 2, 4}).Serialize()}, + {KeyPath: newPath([]byte{1, 2, 3}).Serialize()}, + }, + proofKey: []byte{1, 2, 3}, + expectedErr: ErrProofNodeNotForKey, + }, + { + name: "extra node inclusion proof", + path: []ProofNode{ + {KeyPath: newPath([]byte{1}).Serialize()}, + {KeyPath: newPath([]byte{1, 2}).Serialize()}, + {KeyPath: newPath([]byte{1, 2, 3}).Serialize()}, + }, + proofKey: []byte{1, 2}, + expectedErr: ErrProofNodeNotForKey, + }, + { + name: "extra node exclusion proof", + path: []ProofNode{ + {KeyPath: newPath([]byte{1}).Serialize()}, + {KeyPath: newPath([]byte{1, 3}).Serialize()}, + {KeyPath: newPath([]byte{1, 3, 4}).Serialize()}, + }, + proofKey: []byte{1, 2}, + expectedErr: ErrProofNodeNotForKey, + }, + { + name: "happy path exclusion proof", + path: []ProofNode{ + {KeyPath: newPath([]byte{1}).Serialize()}, + {KeyPath: newPath([]byte{1, 2}).Serialize()}, + {KeyPath: newPath([]byte{1, 2, 4}).Serialize()}, + }, + proofKey: []byte{1, 2, 3}, + expectedErr: nil, + }, + { + name: "happy path inclusion proof", + path: []ProofNode{ + {KeyPath: newPath([]byte{1}).Serialize()}, + {KeyPath: newPath([]byte{1, 2}).Serialize()}, + {KeyPath: newPath([]byte{1, 2, 3}).Serialize()}, + }, + proofKey: []byte{1, 2, 3}, + expectedErr: nil, + }, + { + name: "repeat nodes", + path: []ProofNode{ + {KeyPath: newPath([]byte{1}).Serialize()}, + {KeyPath: newPath([]byte{1}).Serialize()}, + {KeyPath: newPath([]byte{1, 2}).Serialize()}, + {KeyPath: newPath([]byte{1, 2, 3}).Serialize()}, + }, + proofKey: []byte{1, 2, 3}, + expectedErr: ErrNonIncreasingProofNodes, + }, + { + name: "repeat nodes 2", + path: []ProofNode{ + {KeyPath: newPath([]byte{1}).Serialize()}, + {KeyPath: newPath([]byte{1, 2}).Serialize()}, + {KeyPath: newPath([]byte{1, 2}).Serialize()}, + {KeyPath: newPath([]byte{1, 2, 3}).Serialize()}, + }, + proofKey: []byte{1, 2, 3}, + expectedErr: ErrNonIncreasingProofNodes, + }, + { + name: "repeat nodes 3", + path: []ProofNode{ + {KeyPath: newPath([]byte{1}).Serialize()}, + {KeyPath: newPath([]byte{1, 2}).Serialize()}, + {KeyPath: newPath([]byte{1, 2, 3}).Serialize()}, + {KeyPath: newPath([]byte{1, 2, 3}).Serialize()}, + }, + proofKey: []byte{1, 2, 3}, + expectedErr: ErrProofNodeNotForKey, + }, + { + name: "oddLength key with value", + path: []ProofNode{ + {KeyPath: newPath([]byte{1}).Serialize()}, + {KeyPath: newPath([]byte{1, 2}).Serialize()}, + {KeyPath: SerializedPath{Value: []byte{1, 2, 240}, NibbleLength: 5}, ValueOrHash: Some([]byte{1})}, + }, + proofKey: []byte{1, 2, 3}, + expectedErr: ErrOddLengthWithValue, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := verifyProofPath(tt.path, newPath(tt.proofKey)) + require.ErrorIs(t, err, tt.expectedErr) + }) + } +} diff --git a/avalanchego/x/merkledb/trie.go b/avalanchego/x/merkledb/trie.go new file mode 100644 index 00000000..47f0860b --- /dev/null +++ b/avalanchego/x/merkledb/trie.go @@ -0,0 +1,84 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "context" + "errors" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" +) + +var errNoNewRoot = errors.New("there was no updated root in change list") + +type ReadOnlyTrie interface { + // GetValue gets the value associated with the specified key + // database.ErrNotFound if the key is not present + GetValue(ctx context.Context, key []byte) ([]byte, error) + + // GetValues gets the values associated with the specified keys + // database.ErrNotFound if the key is not present + GetValues(ctx context.Context, keys [][]byte) ([][]byte, []error) + + // get the value associated with the key in path form + // database.ErrNotFound if the key is not present + getValue(key path, lock bool) ([]byte, error) + + // GetMerkleRoot returns the merkle root of the Trie + GetMerkleRoot(ctx context.Context) (ids.ID, error) + + // get an editable copy of the node with the given key path + getEditableNode(key path) (*node, error) + + // GetProof generates a proof of the value associated with a particular key, or a proof of its absence from the trie + GetProof(ctx context.Context, bytesPath []byte) (*Proof, error) + + // GetRangeProof generates a proof of up to maxLength smallest key/values with keys between start and end + GetRangeProof(ctx context.Context, start, end []byte, maxLength int) (*RangeProof, error) + + getKeyValues( + start []byte, + end []byte, + maxLength int, + keysToIgnore set.Set[string], + lock bool, + ) ([]KeyValue, error) +} + +type Trie interface { + ReadOnlyTrie + + // Remove will delete a key from the Trie + Remove(ctx context.Context, key []byte) error + + // NewPreallocatedView returns a new view on top of this Trie with space allocated for changes + NewPreallocatedView(estimatedChanges int) (TrieView, error) + + // NewView returns a new view on top of this Trie + NewView() (TrieView, error) + + // Insert a key/value pair into the Trie + Insert(ctx context.Context, key, value []byte) error +} + +type TrieView interface { + Trie + + // CommitToDB takes the changes of this trie and commits them down the view stack + // until all changes in the stack commit to the database + // Takes the DB commit lock + CommitToDB(ctx context.Context) error + + // CommitToParent takes changes of this TrieView and commits them to its parent Trie + // Takes the DB commit lock + CommitToParent(ctx context.Context) error + + // commits changes in the trie to its parent + // then commits the combined changes down the stack until all changes in the stack commit to the database + commitToDB(ctx context.Context) error + + // commits changes in the trieToCommit into the current trie + commitChanges(ctx context.Context, trieToCommit *trieView) error +} diff --git a/avalanchego/x/merkledb/trie_test.go b/avalanchego/x/merkledb/trie_test.go new file mode 100644 index 00000000..530a2db6 --- /dev/null +++ b/avalanchego/x/merkledb/trie_test.go @@ -0,0 +1,1465 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "context" + "math/rand" + "strconv" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/hashing" +) + +func getNodeValue(t ReadOnlyTrie, key string) ([]byte, error) { + if asTrieView, ok := t.(*trieView); ok { + if err := asTrieView.calculateNodeIDs(context.Background()); err != nil { + return nil, err + } + path := newPath([]byte(key)) + nodePath, err := asTrieView.getPathTo(path) + if err != nil { + return nil, err + } + closestNode := nodePath[len(nodePath)-1] + if closestNode.key.Compare(path) != 0 || closestNode == nil { + return nil, database.ErrNotFound + } + + return closestNode.value.value, nil + } + if asDatabases, ok := t.(*Database); ok { + view, err := asDatabases.NewView() + if err != nil { + return nil, err + } + path := newPath([]byte(key)) + nodePath, err := view.(*trieView).getPathTo(path) + if err != nil { + return nil, err + } + closestNode := nodePath[len(nodePath)-1] + if closestNode.key.Compare(path) != 0 || closestNode == nil { + return nil, database.ErrNotFound + } + + return closestNode.value.value, nil + } + return nil, nil +} + +func Test_GetValue_Safety(t *testing.T) { + require := require.New(t) + + db, err := getBasicDB() + require.NoError(err) + + trieView, err := db.NewView() + require.NoError(err) + + require.NoError(trieView.Insert(context.Background(), []byte{0}, []byte{0})) + trieVal, err := trieView.GetValue(context.Background(), []byte{0}) + require.NoError(err) + require.Equal([]byte{0}, trieVal) + trieVal[0] = 1 + + // should still be []byte{0} after edit + trieVal, err = trieView.GetValue(context.Background(), []byte{0}) + require.NoError(err) + require.Equal([]byte{0}, trieVal) +} + +func Test_GetValues_Safety(t *testing.T) { + require := require.New(t) + + db, err := getBasicDB() + require.NoError(err) + + trieView, err := db.NewView() + require.NoError(err) + + require.NoError(trieView.Insert(context.Background(), []byte{0}, []byte{0})) + trieVals, errs := trieView.GetValues(context.Background(), [][]byte{{0}}) + require.Len(errs, 1) + require.NoError(errs[0]) + require.Equal([]byte{0}, trieVals[0]) + trieVals[0][0] = 1 + require.Equal([]byte{1}, trieVals[0]) + + // should still be []byte{0} after edit + trieVals, errs = trieView.GetValues(context.Background(), [][]byte{{0}}) + require.Len(errs, 1) + require.NoError(errs[0]) + require.Equal([]byte{0}, trieVals[0]) +} + +func TestTrieViewGetPathTo(t *testing.T) { + require := require.New(t) + + db, err := getBasicDB() + require.NoError(err) + + trieIntf, err := db.NewView() + require.NoError(err) + trie, ok := trieIntf.(*trieView) + require.True(ok) + + path, err := trie.getPathTo(newPath(nil)) + require.NoError(err) + + // Just the root + require.Len(path, 1) + require.Equal(trie.root, path[0]) + + // Insert a key + key1 := []byte{0} + err = trie.Insert(context.Background(), key1, []byte("value")) + require.NoError(err) + err = trie.calculateNodeIDs(context.Background()) + require.NoError(err) + + path, err = trie.getPathTo(newPath(key1)) + require.NoError(err) + + // Root and 1 value + require.Len(path, 2) + require.Equal(trie.root, path[0]) + require.Equal(newPath(key1), path[1].key) + + // Insert another key which is a child of the first + key2 := []byte{0, 1} + err = trie.Insert(context.Background(), key2, []byte("value")) + require.NoError(err) + err = trie.calculateNodeIDs(context.Background()) + require.NoError(err) + + path, err = trie.getPathTo(newPath(key2)) + require.NoError(err) + require.Len(path, 3) + require.Equal(trie.root, path[0]) + require.Equal(newPath(key1), path[1].key) + require.Equal(newPath(key2), path[2].key) + + // Insert a key which shares no prefix with the others + key3 := []byte{255} + err = trie.Insert(context.Background(), key3, []byte("value")) + require.NoError(err) + err = trie.calculateNodeIDs(context.Background()) + require.NoError(err) + + path, err = trie.getPathTo(newPath(key3)) + require.NoError(err) + require.Len(path, 2) + require.Equal(trie.root, path[0]) + require.Equal(newPath(key3), path[1].key) + + // Other key paths not affected + path, err = trie.getPathTo(newPath(key2)) + require.NoError(err) + require.Len(path, 3) + require.Equal(trie.root, path[0]) + require.Equal(newPath(key1), path[1].key) + require.Equal(newPath(key2), path[2].key) + + // Gets closest node when key doesn't exist + key4 := []byte{0, 1, 2} + path, err = trie.getPathTo(newPath(key4)) + require.NoError(err) + require.Len(path, 3) + require.Equal(trie.root, path[0]) + require.Equal(newPath(key1), path[1].key) + require.Equal(newPath(key2), path[2].key) + + // Gets just root when key doesn't exist and no key shares a prefix + key5 := []byte{128} + path, err = trie.getPathTo(newPath(key5)) + require.NoError(err) + require.Len(path, 1) + require.Equal(trie.root, path[0]) +} + +func Test_Trie_ViewOnCommitedView(t *testing.T) { + dbTrie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, dbTrie) + + committedTrie, err := dbTrie.NewView() + require.NoError(t, err) + err = committedTrie.Insert(context.Background(), []byte{0}, []byte{0}) + require.NoError(t, err) + + require.NoError(t, committedTrie.CommitToDB(context.Background())) + + newView, err := committedTrie.NewView() + require.NoError(t, err) + + err = newView.Insert(context.Background(), []byte{1}, []byte{1}) + require.NoError(t, err) + require.NoError(t, newView.CommitToDB(context.Background())) + + val0, err := dbTrie.GetValue(context.Background(), []byte{0}) + require.NoError(t, err) + require.Equal(t, []byte{0}, val0) + val1, err := dbTrie.GetValue(context.Background(), []byte{1}) + require.NoError(t, err) + require.Equal(t, []byte{1}, val1) +} + +func Test_Trie_Partial_Commit_Leaves_Valid_Tries(t *testing.T) { + dbTrie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, dbTrie) + + trie2, err := dbTrie.NewView() + require.NoError(t, err) + err = trie2.Insert(context.Background(), []byte("key"), []byte("value")) + require.NoError(t, err) + + trie3, err := trie2.NewView() + require.NoError(t, err) + err = trie3.Insert(context.Background(), []byte("key1"), []byte("value1")) + require.NoError(t, err) + + trie4, err := trie3.NewView() + require.NoError(t, err) + err = trie4.Insert(context.Background(), []byte("key2"), []byte("value2")) + require.NoError(t, err) + + trie5, err := trie4.NewView() + require.NoError(t, err) + err = trie5.Insert(context.Background(), []byte("key3"), []byte("value3")) + require.NoError(t, err) + + err = trie3.CommitToDB(context.Background()) + require.NoError(t, err) + + root, err := trie3.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + dbRoot, err := dbTrie.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + require.Equal(t, root, dbRoot) +} + +func Test_Trie_WriteToDB(t *testing.T) { + dbTrie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, dbTrie) + trie, err := dbTrie.NewView() + require.NoError(t, err) + + // value hasn't been inserted so shouldn't exist + value, err := trie.GetValue(context.Background(), []byte("key")) + require.Error(t, err) + require.Equal(t, database.ErrNotFound, err) + require.Nil(t, value) + + err = trie.Insert(context.Background(), []byte("key"), []byte("value")) + require.NoError(t, err) + + value, err = getNodeValue(trie, "key") + require.NoError(t, err) + require.Equal(t, []byte("value"), value) + + err = trie.CommitToDB(context.Background()) + require.NoError(t, err) + p := newPath([]byte("key")) + rawBytes, err := dbTrie.nodeDB.Get(p.Bytes()) + require.NoError(t, err) + node, err := parseNode(p, rawBytes) + require.NoError(t, err) + require.Equal(t, []byte("value"), node.value.value) +} + +func Test_Trie_InsertAndRetrieve(t *testing.T) { + dbTrie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, dbTrie) + trie := Trie(dbTrie) + + // value hasn't been inserted so shouldn't exist + value, err := dbTrie.Get([]byte("key")) + require.Error(t, err) + require.Equal(t, database.ErrNotFound, err) + require.Nil(t, value) + + err = trie.Insert(context.Background(), []byte("key"), []byte("value")) + require.NoError(t, err) + + value, err = getNodeValue(trie, "key") + require.NoError(t, err) + require.Equal(t, []byte("value"), value) +} + +func Test_Trie_Overwrite(t *testing.T) { + dbTrie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, dbTrie) + trie := Trie(dbTrie) + + err = trie.Insert(context.Background(), []byte("key"), []byte("value0")) + require.NoError(t, err) + + value, err := getNodeValue(trie, "key") + require.NoError(t, err) + require.Equal(t, []byte("value0"), value) + + err = trie.Insert(context.Background(), []byte("key"), []byte("value1")) + require.NoError(t, err) + + value, err = getNodeValue(trie, "key") + require.NoError(t, err) + require.Equal(t, []byte("value1"), value) +} + +func Test_Trie_Delete(t *testing.T) { + dbTrie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, dbTrie) + trie := Trie(dbTrie) + + err = trie.Insert(context.Background(), []byte("key"), []byte("value0")) + require.NoError(t, err) + + value, err := getNodeValue(trie, "key") + require.NoError(t, err) + require.Equal(t, []byte("value0"), value) + + err = trie.Remove(context.Background(), []byte("key")) + require.NoError(t, err) + + value, err = getNodeValue(trie, "key") + require.ErrorIs(t, err, database.ErrNotFound) + require.Nil(t, value) +} + +func Test_Trie_DeleteMissingKey(t *testing.T) { + trie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, trie) + + err = trie.Remove(context.Background(), []byte("key")) + require.NoError(t, err) +} + +func Test_Trie_ExpandOnKeyPath(t *testing.T) { + dbTrie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, dbTrie) + trie := Trie(dbTrie) + + err = trie.Insert(context.Background(), []byte("key"), []byte("value0")) + require.NoError(t, err) + + value, err := getNodeValue(trie, "key") + require.NoError(t, err) + require.Equal(t, []byte("value0"), value) + + err = trie.Insert(context.Background(), []byte("key1"), []byte("value1")) + require.NoError(t, err) + + value, err = getNodeValue(trie, "key") + require.NoError(t, err) + require.Equal(t, []byte("value0"), value) + + value, err = getNodeValue(trie, "key1") + require.NoError(t, err) + require.Equal(t, []byte("value1"), value) + + err = trie.Insert(context.Background(), []byte("key12"), []byte("value12")) + require.NoError(t, err) + + value, err = getNodeValue(trie, "key") + require.NoError(t, err) + require.Equal(t, []byte("value0"), value) + + value, err = getNodeValue(trie, "key1") + require.NoError(t, err) + require.Equal(t, []byte("value1"), value) + + value, err = getNodeValue(trie, "key12") + require.NoError(t, err) + require.Equal(t, []byte("value12"), value) +} + +func Test_Trie_CompressedPaths(t *testing.T) { + dbTrie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, dbTrie) + trie := Trie(dbTrie) + + err = trie.Insert(context.Background(), []byte("key12"), []byte("value12")) + require.NoError(t, err) + + value, err := getNodeValue(trie, "key12") + require.NoError(t, err) + require.Equal(t, []byte("value12"), value) + + err = trie.Insert(context.Background(), []byte("key1"), []byte("value1")) + require.NoError(t, err) + + value, err = getNodeValue(trie, "key12") + require.NoError(t, err) + require.Equal(t, []byte("value12"), value) + + value, err = getNodeValue(trie, "key1") + require.NoError(t, err) + require.Equal(t, []byte("value1"), value) + + err = trie.Insert(context.Background(), []byte("key"), []byte("value")) + require.NoError(t, err) + + value, err = getNodeValue(trie, "key12") + require.NoError(t, err) + require.Equal(t, []byte("value12"), value) + + value, err = getNodeValue(trie, "key1") + require.NoError(t, err) + require.Equal(t, []byte("value1"), value) + + value, err = getNodeValue(trie, "key") + require.NoError(t, err) + require.Equal(t, []byte("value"), value) +} + +func Test_Trie_SplitBranch(t *testing.T) { + dbTrie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, dbTrie) + trie := Trie(dbTrie) + + // force a new node to generate with common prefix "key1" and have these two nodes as children + err = trie.Insert(context.Background(), []byte("key12"), []byte("value12")) + require.NoError(t, err) + err = trie.Insert(context.Background(), []byte("key134"), []byte("value134")) + require.NoError(t, err) + + value, err := getNodeValue(trie, "key12") + require.NoError(t, err) + require.Equal(t, []byte("value12"), value) + + value, err = getNodeValue(trie, "key134") + require.NoError(t, err) + require.Equal(t, []byte("value134"), value) +} + +func Test_Trie_HashCountOnBranch(t *testing.T) { + dbTrie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, dbTrie) + trie := Trie(dbTrie) + + // force a new node to generate with common prefix "key1" and have these two nodes as children + err = trie.Insert(context.Background(), []byte("key12"), []byte("value12")) + require.NoError(t, err) + oldCount := dbTrie.metrics.(*mockMetrics).hashCount + err = trie.Insert(context.Background(), []byte("key134"), []byte("value134")) + require.NoError(t, err) + // only hashes the new branch node, the new child node, and root + // shouldn't hash the existing node + require.Equal(t, oldCount+3, dbTrie.metrics.(*mockMetrics).hashCount) +} + +func Test_Trie_HashCountOnDelete(t *testing.T) { + trie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, trie) + + err = trie.Insert(context.Background(), []byte("k"), []byte("value0")) + require.NoError(t, err) + err = trie.Insert(context.Background(), []byte("ke"), []byte("value1")) + require.NoError(t, err) + err = trie.Insert(context.Background(), []byte("key"), []byte("value2")) + require.NoError(t, err) + err = trie.Insert(context.Background(), []byte("key1"), []byte("value3")) + require.NoError(t, err) + err = trie.Insert(context.Background(), []byte("key2"), []byte("value4")) + require.NoError(t, err) + + oldCount := trie.metrics.(*mockMetrics).hashCount + + // delete the middle values + view, err := trie.NewView() + require.NoError(t, err) + err = view.Remove(context.Background(), []byte("k")) + require.NoError(t, err) + err = view.Remove(context.Background(), []byte("ke")) + require.NoError(t, err) + err = view.Remove(context.Background(), []byte("key")) + require.NoError(t, err) + err = view.CommitToDB(context.Background()) + require.NoError(t, err) + + // the root is the only updated node so only one new hash + require.Equal(t, oldCount+1, trie.metrics.(*mockMetrics).hashCount) +} + +func Test_Trie_NoExistingResidual(t *testing.T) { + dbTrie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, dbTrie) + trie := Trie(dbTrie) + + err = trie.Insert(context.Background(), []byte("k"), []byte("1")) + require.NoError(t, err) + err = trie.Insert(context.Background(), []byte("ke"), []byte("2")) + require.NoError(t, err) + err = trie.Insert(context.Background(), []byte("key1"), []byte("3")) + require.NoError(t, err) + err = trie.Insert(context.Background(), []byte("key123"), []byte("4")) + require.NoError(t, err) + + value, err := getNodeValue(trie, "k") + require.NoError(t, err) + require.Equal(t, []byte("1"), value) + + value, err = getNodeValue(trie, "ke") + require.NoError(t, err) + require.Equal(t, []byte("2"), value) + + value, err = getNodeValue(trie, "key1") + require.NoError(t, err) + require.Equal(t, []byte("3"), value) + + value, err = getNodeValue(trie, "key123") + require.NoError(t, err) + require.Equal(t, []byte("4"), value) +} + +func Test_Trie_CommitChanges(t *testing.T) { + require := require.New(t) + + db, err := getBasicDB() + require.NoError(err) + + view1Intf, err := db.NewView() + require.NoError(err) + view1, ok := view1Intf.(*trieView) + require.True(ok) + + err = view1.Insert(context.Background(), []byte{1}, []byte{1}) + require.NoError(err) + + // view1 + // | + // db + + // Case: Committing to an invalid view + view1.invalidated = true + err = view1.commitChanges(context.Background(), &trieView{}) + require.ErrorIs(err, ErrInvalid) + view1.invalidated = false // Reset + + // Case: Committing a nil view is a no-op + oldRoot, err := view1.getMerkleRoot(context.Background()) + require.NoError(err) + err = view1.commitChanges(context.Background(), nil) + require.NoError(err) + newRoot, err := view1.getMerkleRoot(context.Background()) + require.NoError(err) + require.Equal(oldRoot, newRoot) + + // Case: Committing a view with the wrong parent. + err = view1.commitChanges(context.Background(), &trieView{}) + require.ErrorIs(err, ErrViewIsNotAChild) + + // Case: Committing a view which is invalid + err = view1.commitChanges(context.Background(), &trieView{ + parentTrie: view1, + invalidated: true, + }) + require.ErrorIs(err, ErrInvalid) + + // Make more views atop the existing one + view2Intf, err := view1.NewView() + require.NoError(err) + view2, ok := view2Intf.(*trieView) + require.True(ok) + + err = view2.Insert(context.Background(), []byte{2}, []byte{2}) + require.NoError(err) + err = view2.Remove(context.Background(), []byte{1}) + require.NoError(err) + + view2Root, err := view2.getMerkleRoot(context.Background()) + require.NoError(err) + + // view1 has 1 --> 1 + // view2 has 2 --> 2 + + view3Intf, err := view1.NewView() + require.NoError(err) + view3, ok := view3Intf.(*trieView) + require.True(ok) + + view4Intf, err := view2.NewView() + require.NoError(err) + view4, ok := view4Intf.(*trieView) + require.True(ok) + + // view4 + // | + // view2 view3 + // | / + // view1 + // | + // db + + // Commit view2 to view1 + err = view1.commitChanges(context.Background(), view2) + require.NoError(err) + + // All siblings of view2 should be invalidated + require.True(view3.invalidated) + + // Children of view2 are now children of view1 + require.Equal(view1, view4.parentTrie) + require.Contains(view1.childViews, view4) + + // Value changes from view2 are reflected in view1 + newView1Root, err := view1.getMerkleRoot(context.Background()) + require.NoError(err) + require.Equal(view2Root, newView1Root) + _, err = view1.GetValue(context.Background(), []byte{1}) + require.ErrorIs(err, database.ErrNotFound) + got, err := view1.GetValue(context.Background(), []byte{2}) + require.NoError(err) + require.Equal([]byte{2}, got) +} + +func Test_Trie_BatchApply(t *testing.T) { + dbTrie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, dbTrie) + trie, err := dbTrie.NewView() + require.NoError(t, err) + + err = trie.Insert(context.Background(), []byte("key1"), []byte("value1")) + require.NoError(t, err) + err = trie.Insert(context.Background(), []byte("key12"), []byte("value12")) + require.NoError(t, err) + err = trie.Insert(context.Background(), []byte("key134"), []byte("value134")) + require.NoError(t, err) + err = trie.Remove(context.Background(), []byte("key1")) + require.NoError(t, err) + + value, err := getNodeValue(trie, "key12") + require.NoError(t, err) + require.Equal(t, []byte("value12"), value) + + value, err = getNodeValue(trie, "key134") + require.NoError(t, err) + require.Equal(t, []byte("value134"), value) + + _, err = getNodeValue(trie, "key1") + require.Error(t, err) + require.Equal(t, database.ErrNotFound, err) +} + +func Test_Trie_ChainDeletion(t *testing.T) { + trie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, trie) + newTrie, err := trie.NewView() + require.NoError(t, err) + + err = newTrie.Insert(context.Background(), []byte("k"), []byte("value0")) + require.NoError(t, err) + err = newTrie.Insert(context.Background(), []byte("ke"), []byte("value1")) + require.NoError(t, err) + err = newTrie.Insert(context.Background(), []byte("key"), []byte("value2")) + require.NoError(t, err) + err = newTrie.Insert(context.Background(), []byte("key1"), []byte("value3")) + require.NoError(t, err) + err = newTrie.(*trieView).calculateNodeIDs(context.Background()) + require.NoError(t, err) + root, err := newTrie.getEditableNode(EmptyPath) + require.NoError(t, err) + require.Equal(t, 1, len(root.children)) + + err = newTrie.Remove(context.Background(), []byte("k")) + require.NoError(t, err) + err = newTrie.Remove(context.Background(), []byte("ke")) + require.NoError(t, err) + err = newTrie.Remove(context.Background(), []byte("key")) + require.NoError(t, err) + err = newTrie.Remove(context.Background(), []byte("key1")) + require.NoError(t, err) + err = newTrie.(*trieView).calculateNodeIDs(context.Background()) + require.NoError(t, err) + root, err = newTrie.getEditableNode(EmptyPath) + require.NoError(t, err) + // since all values have been deleted, the nodes should have been cleaned up + require.Equal(t, 0, len(root.children)) +} + +func Test_Trie_Invalidate_Children_On_Edits(t *testing.T) { + dbTrie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, dbTrie) + + trie, err := dbTrie.NewView() + require.NoError(t, err) + + childTrie1, err := trie.NewView() + require.NoError(t, err) + childTrie2, err := trie.NewView() + require.NoError(t, err) + childTrie3, err := trie.NewView() + require.NoError(t, err) + + require.False(t, childTrie1.(*trieView).isInvalid()) + require.False(t, childTrie2.(*trieView).isInvalid()) + require.False(t, childTrie3.(*trieView).isInvalid()) + + err = trie.Insert(context.Background(), []byte{0}, []byte{0}) + require.NoError(t, err) + + require.True(t, childTrie1.(*trieView).isInvalid()) + require.True(t, childTrie2.(*trieView).isInvalid()) + require.True(t, childTrie3.(*trieView).isInvalid()) +} + +func Test_Trie_Invalidate_Siblings_On_Commit(t *testing.T) { + dbTrie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, dbTrie) + + baseView, err := dbTrie.NewView() + require.NoError(t, err) + + viewToCommit, err := baseView.NewView() + require.NoError(t, err) + + sibling1, err := baseView.NewView() + require.NoError(t, err) + sibling2, err := baseView.NewView() + require.NoError(t, err) + + require.False(t, sibling1.(*trieView).isInvalid()) + require.False(t, sibling2.(*trieView).isInvalid()) + + require.NoError(t, viewToCommit.Insert(context.Background(), []byte{0}, []byte{0})) + require.NoError(t, viewToCommit.CommitToDB(context.Background())) + + require.True(t, sibling1.(*trieView).isInvalid()) + require.True(t, sibling2.(*trieView).isInvalid()) + require.False(t, viewToCommit.(*trieView).isInvalid()) +} + +func Test_Trie_NodeCollapse(t *testing.T) { + dbTrie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, dbTrie) + trie, err := dbTrie.NewView() + require.NoError(t, err) + + err = trie.Insert(context.Background(), []byte("k"), []byte("value0")) + require.NoError(t, err) + err = trie.Insert(context.Background(), []byte("ke"), []byte("value1")) + require.NoError(t, err) + err = trie.Insert(context.Background(), []byte("key"), []byte("value2")) + require.NoError(t, err) + err = trie.Insert(context.Background(), []byte("key1"), []byte("value3")) + require.NoError(t, err) + err = trie.Insert(context.Background(), []byte("key2"), []byte("value4")) + require.NoError(t, err) + + err = trie.(*trieView).calculateNodeIDs(context.Background()) + require.NoError(t, err) + root, err := trie.getEditableNode(EmptyPath) + require.NoError(t, err) + require.Equal(t, 1, len(root.children)) + + root, err = trie.getEditableNode(EmptyPath) + require.NoError(t, err) + require.Equal(t, 1, len(root.children)) + + firstNode, err := trie.getEditableNode(root.getSingleChildPath()) + require.NoError(t, err) + require.Equal(t, 1, len(firstNode.children)) + + // delete the middle values + err = trie.Remove(context.Background(), []byte("k")) + require.NoError(t, err) + err = trie.Remove(context.Background(), []byte("ke")) + require.NoError(t, err) + err = trie.Remove(context.Background(), []byte("key")) + require.NoError(t, err) + + err = trie.(*trieView).calculateNodeIDs(context.Background()) + require.NoError(t, err) + + root, err = trie.getEditableNode(EmptyPath) + require.NoError(t, err) + require.Equal(t, 1, len(root.children)) + + firstNode, err = trie.getEditableNode(root.getSingleChildPath()) + require.NoError(t, err) + require.Equal(t, 2, len(firstNode.children)) +} + +func Test_Trie_MultipleStates(t *testing.T) { + randCount := int64(0) + for _, commitApproach := range []string{"never", "before", "after"} { + t.Run(commitApproach, func(t *testing.T) { + r := rand.New(rand.NewSource(randCount)) // #nosec G404 + randCount++ + rdb := memdb.New() + defer rdb.Close() + db, err := New( + context.Background(), + rdb, + Config{ + Tracer: newNoopTracer(), + HistoryLength: 100, + NodeCacheSize: 100, + }, + ) + require.NoError(t, err) + defer db.Close() + + initialSet := 1000 + // Populate initial set of keys + root, err := db.NewView() + require.NoError(t, err) + kv := [][]byte{} + for i := 0; i < initialSet; i++ { + k := []byte(strconv.Itoa(i)) + kv = append(kv, k) + require.NoError(t, root.Insert(context.Background(), k, hashing.ComputeHash256(k))) + } + + // Get initial root + _, err = root.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + if commitApproach == "before" { + require.NoError(t, root.CommitToDB(context.Background())) + } + + // Populate additional states + concurrentStates := []Trie{} + for i := 0; i < 5; i++ { + newState, err := root.NewView() + require.NoError(t, err) + concurrentStates = append(concurrentStates, newState) + } + + if commitApproach == "after" { + require.NoError(t, root.CommitToDB(context.Background())) + } + + // Process ops + newStart := initialSet + for i := 0; i < 100; i++ { + if r.Intn(100) < 20 { + // New Key + for _, state := range concurrentStates { + k := []byte(strconv.Itoa(newStart)) + require.NoError(t, state.Insert(context.Background(), k, hashing.ComputeHash256(k))) + } + newStart++ + } else { + // Fetch and update old + selectedKey := kv[r.Intn(len(kv))] + var pastV []byte + for _, state := range concurrentStates { + v, err := state.GetValue(context.Background(), selectedKey) + require.NoError(t, err) + if pastV == nil { + pastV = v + } else { + require.Equal(t, pastV, v, "lookup mismatch") + } + require.NoError(t, state.Insert(context.Background(), selectedKey, hashing.ComputeHash256(v))) + } + } + } + + // Generate roots + var pastRoot ids.ID + for _, state := range concurrentStates { + mroot, err := state.GetMerkleRoot(context.Background()) + require.NoError(t, err) + if pastRoot == ids.Empty { + pastRoot = mroot + } else { + require.Equal(t, pastRoot, mroot, "root mismatch") + } + } + }) + } +} + +func TestNewViewOnCommittedView(t *testing.T) { + require := require.New(t) + + db, err := getBasicDB() + require.NoError(err) + + // Create a view + view1Intf, err := db.NewView() + require.NoError(err) + view1, ok := view1Intf.(*trieView) + require.True(ok) + + // view1 + // | + // db + + require.Len(db.childViews, 1) + require.Contains(db.childViews, view1) + require.Equal(db, view1.parentTrie) + + err = view1.Insert(context.Background(), []byte{1}, []byte{1}) + require.NoError(err) + + // Commit the view + err = view1.CommitToDB(context.Background()) + require.NoError(err) + + // view1 (committed) + // | + // db + + require.Len(db.childViews, 1) + require.Contains(db.childViews, view1) + require.Equal(db, view1.parentTrie) + + // Create a new view on the committed view + view2Intf, err := view1.NewView() + require.NoError(err) + view2, ok := view2Intf.(*trieView) + require.True(ok) + + // view2 + // | + // view1 (committed) + // | + // db + + require.Equal(db, view2.parentTrie) + require.Contains(db.childViews, view1) + require.Contains(db.childViews, view2) + require.Len(db.childViews, 2) + + // Make sure the new view has the right value + got, err := view2.GetValue(context.Background(), []byte{1}) + require.NoError(err) + require.Equal([]byte{1}, got) + + // Make another view + view3Intf, err := view2.NewView() + require.NoError(err) + view3, ok := view3Intf.(*trieView) + require.True(ok) + + // view3 + // | + // view2 + // | + // view1 (committed) + // | + // db + + require.Equal(view2, view3.parentTrie) + require.Contains(view2.childViews, view3) + require.Len(view2.childViews, 1) + require.Contains(db.childViews, view1) + require.Contains(db.childViews, view2) + require.Len(db.childViews, 2) + + // Commit view2 + err = view2.CommitToDB(context.Background()) + require.NoError(err) + + // view3 + // | + // view2 (committed) + // | + // view1 (committed) + // | + // db + + // Note that view2 being committed invalidates view1 + require.True(view1.invalidated) + require.Contains(db.childViews, view2) + require.Contains(db.childViews, view3) + require.Len(db.childViews, 2) + require.Equal(db, view3.parentTrie) + + // Commit view3 + err = view3.CommitToDB(context.Background()) + require.NoError(err) + + // view3 being committed invalidates view2 + require.True(view2.invalidated) + require.Contains(db.childViews, view3) + require.Len(db.childViews, 1) + require.Equal(db, view3.parentTrie) +} + +func Test_TrieView_NewView(t *testing.T) { + require := require.New(t) + + db, err := getBasicDB() + require.NoError(err) + + // Create a view + view1Intf, err := db.NewView() + require.NoError(err) + view1, ok := view1Intf.(*trieView) + require.True(ok) + + // Create a view atop view1 + view2Intf, err := view1.NewView() + require.NoError(err) + view2, ok := view2Intf.(*trieView) + require.True(ok) + + // view2 + // | + // view1 + // | + // db + + // Assert view2's parent is view1 + require.Equal(view1, view2.parentTrie) + require.Contains(view1.childViews, view2) + require.Len(view1.childViews, 1) + + // Commit view1 + err = view1.CommitToDB(context.Background()) + require.NoError(err) + + // Make another view atop view1 + view3Intf, err := view1.NewView() + require.NoError(err) + view3, ok := view3Intf.(*trieView) + require.True(ok) + + // view3 + // | + // view2 + // | + // view1 + // | + // db + + // Assert view3's parent is db + require.Equal(db, view3.parentTrie) + require.Contains(db.childViews, view3) + require.NotContains(view1.childViews, view3) + + // Assert that NewPreallocatedView on an invalid view fails + invalidView := &trieView{invalidated: true} + _, err = invalidView.NewView() + require.ErrorIs(err, ErrInvalid) +} + +func TestTrieViewInvalidate(t *testing.T) { + require := require.New(t) + + db, err := getBasicDB() + require.NoError(err) + + // Create a view + view1Intf, err := db.NewView() + require.NoError(err) + view1, ok := view1Intf.(*trieView) + require.True(ok) + + // Create 2 views atop view1 + view2Intf, err := view1.NewView() + require.NoError(err) + view2, ok := view2Intf.(*trieView) + require.True(ok) + + view3Intf, err := view1.NewView() + require.NoError(err) + view3, ok := view3Intf.(*trieView) + require.True(ok) + + // view2 view3 + // | / + // view1 + // | + // db + + // Invalidate view1 + view1.invalidate() + + require.Empty(view1.childViews) + require.True(view1.invalidated) + require.True(view2.invalidated) + require.True(view3.invalidated) +} + +func TestTrieViewMoveChildViewsToView(t *testing.T) { + require := require.New(t) + + db, err := getBasicDB() + require.NoError(err) + + // Create a view + view1Intf, err := db.NewView() + require.NoError(err) + view1, ok := view1Intf.(*trieView) + require.True(ok) + + // Create a view atop view1 + view2Intf, err := view1.NewView() + require.NoError(err) + view2, ok := view2Intf.(*trieView) + require.True(ok) + + // Create a view atop view2 + view3Intf, err := view1.NewView() + require.NoError(err) + view3, ok := view3Intf.(*trieView) + require.True(ok) + + // view3 + // | + // view2 + // | + // view1 + // | + // db + + view1.moveChildViewsToView(view2) + + require.Equal(view1, view3.parentTrie) + require.Contains(view1.childViews, view3) + require.Contains(view1.childViews, view2) + require.Len(view1.childViews, 2) +} + +func TestTrieViewInvalidChildrenExcept(t *testing.T) { + require := require.New(t) + + db, err := getBasicDB() + require.NoError(err) + + // Create a view + view1Intf, err := db.NewView() + require.NoError(err) + view1, ok := view1Intf.(*trieView) + require.True(ok) + + // Create 2 views atop view1 + view2Intf, err := view1.NewView() + require.NoError(err) + view2, ok := view2Intf.(*trieView) + require.True(ok) + + view3Intf, err := view1.NewView() + require.NoError(err) + view3, ok := view3Intf.(*trieView) + require.True(ok) + + view1.invalidateChildrenExcept(view2) + + require.False(view2.invalidated) + require.True(view3.invalidated) + require.Contains(view1.childViews, view2) + require.Len(view1.childViews, 1) + + view1.invalidateChildrenExcept(nil) + require.True(view2.invalidated) + require.True(view3.invalidated) + require.Empty(view1.childViews) +} + +func Test_Trie_CommitToParentView_Concurrent(t *testing.T) { + for i := 0; i < 5000; i++ { + dbTrie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, dbTrie) + + baseView, err := dbTrie.NewView() + require.NoError(t, err) + + parentView, err := baseView.NewView() + require.NoError(t, err) + err = parentView.Insert(context.Background(), []byte{0}, []byte{0}) + require.NoError(t, err) + + childView1, err := parentView.NewView() + require.NoError(t, err) + err = childView1.Insert(context.Background(), []byte{1}, []byte{1}) + require.NoError(t, err) + + childView2, err := childView1.NewView() + require.NoError(t, err) + err = childView2.Insert(context.Background(), []byte{2}, []byte{2}) + require.NoError(t, err) + + var wg sync.WaitGroup + wg.Add(3) + go func() { + defer wg.Done() + require.NoError(t, parentView.CommitToParent(context.Background())) + }() + go func() { + defer wg.Done() + require.NoError(t, childView1.CommitToParent(context.Background())) + }() + go func() { + defer wg.Done() + require.NoError(t, childView2.CommitToParent(context.Background())) + }() + + wg.Wait() + + val0, err := baseView.GetValue(context.Background(), []byte{0}) + require.NoError(t, err) + require.Equal(t, []byte{0}, val0) + + val1, err := baseView.GetValue(context.Background(), []byte{1}) + require.NoError(t, err) + require.Equal(t, []byte{1}, val1) + + val2, err := baseView.GetValue(context.Background(), []byte{2}) + require.NoError(t, err) + require.Equal(t, []byte{2}, val2) + } +} + +func Test_Trie_CommitToParentDB_Concurrent(t *testing.T) { + for i := 0; i < 5000; i++ { + dbTrie, err := getBasicDB() + require.NoError(t, err) + require.NotNil(t, dbTrie) + + parentView, err := dbTrie.NewView() + require.NoError(t, err) + err = parentView.Insert(context.Background(), []byte{0}, []byte{0}) + require.NoError(t, err) + + childView1, err := parentView.NewView() + require.NoError(t, err) + err = childView1.Insert(context.Background(), []byte{1}, []byte{1}) + require.NoError(t, err) + + childView2, err := childView1.NewView() + require.NoError(t, err) + err = childView2.Insert(context.Background(), []byte{2}, []byte{2}) + require.NoError(t, err) + + var wg sync.WaitGroup + wg.Add(3) + go func() { + defer wg.Done() + require.NoError(t, parentView.CommitToParent(context.Background())) + }() + go func() { + defer wg.Done() + require.NoError(t, childView1.CommitToParent(context.Background())) + }() + go func() { + defer wg.Done() + require.NoError(t, childView2.CommitToParent(context.Background())) + }() + + wg.Wait() + + val0, err := dbTrie.GetValue(context.Background(), []byte{0}) + require.NoError(t, err) + require.Equal(t, []byte{0}, val0) + + val1, err := dbTrie.GetValue(context.Background(), []byte{1}) + require.NoError(t, err) + require.Equal(t, []byte{1}, val1) + + val2, err := dbTrie.GetValue(context.Background(), []byte{2}) + require.NoError(t, err) + require.Equal(t, []byte{2}, val2) + } +} + +func Test_Trie_ConcurrentReadWrite(t *testing.T) { + require := require.New(t) + + trie, err := getBasicDB() + require.NoError(err) + require.NotNil(trie) + newTrie, err := trie.NewView() + require.NoError(err) + + var wg sync.WaitGroup + defer wg.Wait() + + wg.Add(1) + go func() { + defer wg.Done() + err := newTrie.Insert(context.Background(), []byte("key"), []byte("value")) + require.NoError(err) + }() + + require.Eventually( + func() bool { + value, err := newTrie.GetValue(context.Background(), []byte("key")) + + if err == database.ErrNotFound { + return false + } + + require.NoError(err) + require.Equal([]byte("value"), value) + return true + }, + time.Second, + time.Millisecond, + ) +} + +func Test_Trie_ConcurrentNewViewAndCommit(t *testing.T) { + require := require.New(t) + + trie, err := getBasicDB() + require.NoError(err) + require.NotNil(trie) + + newTrie, err := trie.NewView() + require.NoError(err) + err = newTrie.Insert(context.Background(), []byte("key"), []byte("value0")) + require.NoError(err) + + var wg sync.WaitGroup + defer wg.Wait() + + wg.Add(1) + go func() { + defer wg.Done() + err := newTrie.CommitToDB(context.Background()) + require.NoError(err) + }() + + newView, err := newTrie.NewView() + require.NoError(err) + require.NotNil(newView) +} + +func Test_Trie_ConcurrentDeleteAndMerkleRoot(t *testing.T) { + require := require.New(t) + + trie, err := getBasicDB() + require.NoError(err) + require.NotNil(trie) + + newTrie, err := trie.NewView() + require.NoError(err) + err = newTrie.Insert(context.Background(), []byte("key"), []byte("value0")) + require.NoError(err) + + var wg sync.WaitGroup + defer wg.Wait() + + wg.Add(1) + go func() { + defer wg.Done() + err := newTrie.Remove(context.Background(), []byte("key")) + require.NoError(err) + }() + + rootID, err := newTrie.GetMerkleRoot(context.Background()) + require.NoError(err) + require.NotZero(rootID) +} + +func Test_Trie_ConcurrentInsertProveCommit(t *testing.T) { + require := require.New(t) + + trie, err := getBasicDB() + require.NoError(err) + require.NotNil(trie) + + newTrie, err := trie.NewView() + require.NoError(err) + + var wg sync.WaitGroup + defer wg.Wait() + + wg.Add(1) + go func() { + defer wg.Done() + err := newTrie.Insert(context.Background(), []byte("key2"), []byte("value2")) + require.NoError(err) + }() + + require.Eventually( + func() bool { + proof, err := newTrie.GetProof(context.Background(), []byte("key2")) + require.NoError(err) + require.NotNil(proof) + + if proof.Value.value == nil { + // this is an exclusion proof since the value is nil + // return false to keep waiting for Insert to complete. + return false + } + require.Equal([]byte("value2"), proof.Value.value) + + err = newTrie.CommitToDB(context.Background()) + require.NoError(err) + return true + }, + time.Second, + time.Millisecond, + ) +} + +func Test_Trie_ConcurrentInsertAndRangeProof(t *testing.T) { + require := require.New(t) + + trie, err := getBasicDB() + require.NoError(err) + require.NotNil(trie) + + newTrie, err := trie.NewView() + require.NoError(err) + err = newTrie.Insert(context.Background(), []byte("key1"), []byte("value1")) + require.NoError(err) + + var wg sync.WaitGroup + defer wg.Wait() + + wg.Add(1) + go func() { + defer wg.Done() + err := newTrie.Insert(context.Background(), []byte("key2"), []byte("value2")) + require.NoError(err) + err = newTrie.Insert(context.Background(), []byte("key3"), []byte("value3")) + require.NoError(err) + }() + + require.Eventually( + func() bool { + rangeProof, err := newTrie.GetRangeProof(context.Background(), []byte("key1"), []byte("key3"), 3) + require.NoError(err) + require.NotNil(rangeProof) + + if len(rangeProof.KeyValues) < 3 { + // Wait for the other goroutine to finish inserting + return false + } + + // Make sure we have exactly 3 KeyValues + require.Len(rangeProof.KeyValues, 3) + return true + }, + time.Second, + time.Millisecond, + ) +} diff --git a/avalanchego/x/merkledb/trieview.go b/avalanchego/x/merkledb/trieview.go new file mode 100644 index 00000000..a1fc676d --- /dev/null +++ b/avalanchego/x/merkledb/trieview.go @@ -0,0 +1,1413 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package merkledb + +import ( + "bytes" + "context" + "errors" + "fmt" + "runtime" + "sync" + + "go.opentelemetry.io/otel/attribute" + + oteltrace "go.opentelemetry.io/otel/trace" + + "golang.org/x/exp/slices" + "golang.org/x/sync/errgroup" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" +) + +const defaultPreallocationSize = 100 + +var ( + ErrCommitted = errors.New("view has been committed") + ErrInvalid = errors.New("the trie this view was based on has changed, rendering this view invalid") + ErrOddLengthWithValue = errors.New( + "the underlying db only supports whole number of byte keys, so cannot record changes with odd nibble length", + ) + ErrGetPathToFailure = errors.New("GetPathTo failed to return the closest node") + ErrStartAfterEnd = errors.New("start key > end key") + ErrViewIsNotAChild = errors.New("passed in view is required to be a child of the current view") + ErrNoValidRoot = errors.New("a valid root was not provided to the trieView constructor") + + _ TrieView = &trieView{} + + numCPU = runtime.NumCPU() +) + +// Editable view of a trie, collects changes on top of a parent trie. +// Delays adding key/value pairs to the trie. +type trieView struct { + // Must be held when reading/writing fields except validity tracking fields: + // [childViews], [parentTrie], and [invalidated]. + // Only use to lock current trieView or ancestors of the current trieView + lock sync.RWMutex + + // Controls the trie's validity related fields. + // Must be held while reading/writing [childViews], [invalidated], and [parentTrie]. + // Only use to lock current trieView or descendants of the current trieView + // DO NOT grab the [lock] or [validityTrackingLock] of this trie or any ancestor trie while this is held. + validityTrackingLock sync.RWMutex + + // If true, this view has been invalidated and can't be used. + // + // Invariant: This view is marked as invalid before any of its ancestors change. + // Since we ensure that all subviews are marked invalid before making an invalidating change + // then if we are still valid at the end of the function, then no corrupting changes could have + // occurred during execution. + // Namely, if we have a method with: + // + // *Code Accessing Ancestor State* + // + // if t.isInvalid() { + // return ErrInvalid + // } + // return [result] + // + // If the invalidated check passes, then we're guaranteed that no ancestor changes occurred + // during the code that accessed ancestor state and the result of that work is still valid + // + // [validityTrackingLock] must be held when reading/writing this field. + invalidated bool + + // the uncommitted parent trie of this view + // [validityTrackingLock] must be held when reading/writing this field. + parentTrie TrieView + + // The valid children of this trie. + // [validityTrackingLock] must be held when reading/writing this field. + childViews []*trieView + + // Changes made to this view. + // May include nodes that haven't been updated + // but will when their ID is recalculated. + changes *changeSummary + + // Key/value pairs that have been inserted/removed but not + // yet reflected in the trie's structure. This allows us to + // defer the cost of updating the trie until we calculate node IDs. + // A Nothing value indicates that the key has been removed. + unappliedValueChanges map[path]Maybe[[]byte] + + db *Database + + // The root of the trie represented by this view. + root *node + + // True if the IDs of nodes in this view need to be recalculated. + needsRecalculation bool + + // If true, this view has been committed and cannot be edited. + // Calls to Insert and Remove will return ErrCommitted. + committed bool + + estimatedSize int +} + +// NewView returns a new view on top of this one. +// Adds the new view to [t.childViews]. +// Assumes [t.lock] is not held. +func (t *trieView) NewView() (TrieView, error) { + return t.NewPreallocatedView(defaultPreallocationSize) +} + +// NewPreallocatedView returns a new view on top of this one with memory allocated to store the +// [estimatedChanges] number of key/value changes. +// If this view is already committed, the new view's parent will +// be set to the parent of the current view. +// Otherwise, adds the new view to [t.childViews]. +// Assumes [t.lock] is not held. +func (t *trieView) NewPreallocatedView( + estimatedChanges int, +) (TrieView, error) { + t.lock.RLock() + defer t.lock.RUnlock() + + if t.isInvalid() { + return nil, ErrInvalid + } + + if t.committed { + return t.getParentTrie().NewPreallocatedView(estimatedChanges) + } + + newView, err := newTrieView(t.db, t, t.root.clone(), estimatedChanges) + if err != nil { + return nil, err + } + + t.validityTrackingLock.Lock() + defer t.validityTrackingLock.Unlock() + + if t.invalidated { + return nil, ErrInvalid + } + t.childViews = append(t.childViews, newView) + + return newView, nil +} + +// Creates a new view with the given [parentTrie]. +func newTrieView( + db *Database, + parentTrie TrieView, + root *node, + estimatedSize int, +) (*trieView, error) { + if root == nil { + return nil, ErrNoValidRoot + } + + return &trieView{ + root: root, + db: db, + parentTrie: parentTrie, + changes: newChangeSummary(estimatedSize), + estimatedSize: estimatedSize, + unappliedValueChanges: make(map[path]Maybe[[]byte], estimatedSize), + }, nil +} + +// Creates a new view with the given [parentTrie]. +func newTrieViewWithChanges( + db *Database, + parentTrie TrieView, + changes *changeSummary, + estimatedSize int, +) (*trieView, error) { + if changes == nil { + return nil, ErrNoValidRoot + } + + passedRootChange, ok := changes.nodes[RootPath] + if !ok { + return nil, ErrNoValidRoot + } + + return &trieView{ + root: passedRootChange.after, + db: db, + parentTrie: parentTrie, + changes: changes, + estimatedSize: estimatedSize, + unappliedValueChanges: make(map[path]Maybe[[]byte], estimatedSize), + }, nil +} + +// Recalculates the node IDs for all changed nodes in the trie. +// Assumes [t.lock] is held. +func (t *trieView) calculateNodeIDs(ctx context.Context) error { + switch { + case t.isInvalid(): + return ErrInvalid + case !t.needsRecalculation: + return nil + case t.committed: + // Note that this should never happen. If a view is committed, it should + // never be edited, so [t.needsRecalculation] should always be false. + return ErrCommitted + } + + // We wait to create the span until after checking that we need to actually + // calculateNodeIDs to make traces more useful (otherwise there may be a span + // per key modified even though IDs are not re-calculated). + ctx, span := t.db.tracer.Start(ctx, "MerkleDB.trieview.calculateNodeIDs") + defer span.End() + + // ensure that the view under this one is up-to-date before potentially pulling in nodes from it + // getting the Merkle root forces any unupdated nodes to recalculate their ids + if _, err := t.getParentTrie().GetMerkleRoot(ctx); err != nil { + return err + } + + if err := t.applyChangedValuesToTrie(ctx); err != nil { + return err + } + + _, helperSpan := t.db.tracer.Start(ctx, "MerkleDB.trieview.calculateNodeIDsHelper") + defer helperSpan.End() + + // [eg] limits the number of goroutines we start. + var eg errgroup.Group + eg.SetLimit(numCPU) + if err := t.calculateNodeIDsHelper(ctx, t.root, &eg); err != nil { + return err + } + if err := eg.Wait(); err != nil { + return err + } + t.needsRecalculation = false + t.changes.rootID = t.root.id + + // ensure no ancestor changes occurred during execution + if t.isInvalid() { + return ErrInvalid + } + + return nil +} + +// Calculates the ID of all descendants of [n] which need to be recalculated, +// and then calculates the ID of [n] itself. +func (t *trieView) calculateNodeIDsHelper(ctx context.Context, n *node, eg *errgroup.Group) error { + var ( + // We use [wg] to wait until all descendants of [n] have been updated. + // Note we can't wait on [eg] because [eg] may have started goroutines + // that aren't calculating IDs for descendants of [n]. + wg sync.WaitGroup + updatedChildren = make(chan *node, len(n.children)) + ) + + for childIndex, child := range n.children { + childIndex, child := childIndex, child + + childPath := n.key + path(childIndex) + child.compressedPath + childNodeChange, ok := t.changes.nodes[childPath] + if !ok { + // This child wasn't changed. + continue + } + + wg.Add(1) + updateChild := func() error { + defer wg.Done() + + if err := t.calculateNodeIDsHelper(ctx, childNodeChange.after, eg); err != nil { + return err + } + + // Note that this will never block + updatedChildren <- childNodeChange.after + return nil + } + + // Try updating the child and its descendants in a goroutine. + if ok := eg.TryGo(updateChild); !ok { + // We're at the goroutine limit; do the work in this goroutine. + if err := updateChild(); err != nil { + return err + } + } + } + + // Wait until all descendants of [n] have been updated. + wg.Wait() + close(updatedChildren) + + for child := range updatedChildren { + n.addChild(child) + } + + // The IDs [n]'s descendants are up to date so we can calculate [n]'s ID. + return n.calculateID(t.db.metrics) +} + +// GetProof returns a proof that [bytesPath] is in or not in trie [t]. +func (t *trieView) GetProof(ctx context.Context, key []byte) (*Proof, error) { + _, span := t.db.tracer.Start(ctx, "MerkleDB.trieview.GetProof") + defer span.End() + + t.lock.RLock() + defer t.lock.RUnlock() + + // only need full lock if nodes ids need to be calculated + // looped to ensure that the value didn't change after the lock was released + for t.needsRecalculation { + t.lock.RUnlock() + t.lock.Lock() + if err := t.calculateNodeIDs(ctx); err != nil { + return nil, err + } + t.lock.Unlock() + t.lock.RLock() + } + + return t.getProof(ctx, key) +} + +// Returns a proof that [bytesPath] is in or not in trie [t]. +// Assumes [t.lock] is held. +func (t *trieView) getProof(ctx context.Context, key []byte) (*Proof, error) { + _, span := t.db.tracer.Start(ctx, "MerkleDB.trieview.getProof") + defer span.End() + + proof := &Proof{ + Key: key, + } + + // Get the node at the given path, or the node closest to it. + keyPath := newPath(key) + + proofPath, err := t.getPathTo(keyPath) + if err != nil { + return nil, err + } + + // From root --> node from left --> right. + proof.Path = make([]ProofNode, len(proofPath), len(proofPath)+1) + for i, node := range proofPath { + proof.Path[i] = node.asProofNode() + } + + closestNode := proofPath[len(proofPath)-1] + + if closestNode.key.Compare(keyPath) == 0 { + // There is a node with the given [key]. + proof.Value = Clone(closestNode.value) + return proof, nil + } + + // There is no node with the given [key]. + // If there is a child at the index where the node would be + // if it existed, include that child in the proof. + nextIndex := keyPath[len(closestNode.key)] + child, ok := closestNode.children[nextIndex] + if !ok { + return proof, nil + } + + childPath := closestNode.key + path(nextIndex) + child.compressedPath + childNode, err := t.getNodeFromParent(closestNode, childPath) + if err != nil { + return nil, err + } + proof.Path = append(proof.Path, childNode.asProofNode()) + if t.isInvalid() { + return nil, ErrInvalid + } + return proof, nil +} + +// GetRangeProof returns a range proof for (at least part of) the key range [start, end]. +// The returned proof's [KeyValues] has at most [maxLength] values. +// [maxLength] must be > 0. +func (t *trieView) GetRangeProof( + ctx context.Context, + start, end []byte, + maxLength int, +) (*RangeProof, error) { + ctx, span := t.db.tracer.Start(ctx, "MerkleDB.trieview.GetRangeProof") + defer span.End() + + if len(end) > 0 && bytes.Compare(start, end) == 1 { + return nil, ErrStartAfterEnd + } + + if maxLength <= 0 { + return nil, fmt.Errorf("%w but was %d", ErrInvalidMaxLength, maxLength) + } + + t.lock.RLock() + defer t.lock.RUnlock() + + // only need full lock if nodes ids need to be calculated + // looped to ensure that the value didn't change after the lock was released + for t.needsRecalculation { + t.lock.RUnlock() + t.lock.Lock() + if err := t.calculateNodeIDs(ctx); err != nil { + return nil, err + } + t.lock.Unlock() + t.lock.RLock() + } + + var ( + result RangeProof + err error + ) + + result.KeyValues, err = t.getKeyValues( + start, + end, + maxLength, + set.Set[string]{}, + false, /*lock*/ + ) + if err != nil { + return nil, err + } + + // copy values, so edits won't affect the underlying arrays + for i, kv := range result.KeyValues { + result.KeyValues[i] = KeyValue{Key: kv.Key, Value: slices.Clone(kv.Value)} + } + + // This proof may not contain all key-value pairs in [start, end] due to size limitations. + // The end proof we provide should be for the last key-value pair in the proof, not for + // the last key-value pair requested, which may not be in this proof. + if len(result.KeyValues) > 0 { + end = result.KeyValues[len(result.KeyValues)-1].Key + } + + if len(end) > 0 { + endProof, err := t.getProof(ctx, end) + if err != nil { + return nil, err + } + result.EndProof = endProof.Path + } + + if len(start) > 0 { + startProof, err := t.getProof(ctx, start) + if err != nil { + return nil, err + } + result.StartProof = startProof.Path + + // strip out any common nodes to reduce proof size + i := 0 + for ; i < len(result.StartProof) && + i < len(result.EndProof) && + result.StartProof[i].KeyPath.Equal(result.EndProof[i].KeyPath); i++ { + } + result.StartProof = result.StartProof[i:] + } + + if len(result.StartProof) == 0 && len(result.EndProof) == 0 && len(result.KeyValues) == 0 { + // If the range is empty, return the root proof. + rootProof, err := t.getProof(ctx, rootKey) + if err != nil { + return nil, err + } + result.EndProof = rootProof.Path + } + if t.isInvalid() { + return nil, ErrInvalid + } + return &result, nil +} + +// CommitToDB commits changes from this trie to the underlying DB. +func (t *trieView) CommitToDB(ctx context.Context) error { + ctx, span := t.db.tracer.Start(ctx, "MerkleDB.trieview.CommitToDB") + defer span.End() + + t.db.commitLock.Lock() + defer t.db.commitLock.Unlock() + + return t.commitToDB(ctx) +} + +// Adds the changes from [trieToCommit] to this trie. +// Assumes [trieToCommit.lock] is held if trieToCommit is not nil. +func (t *trieView) commitChanges(ctx context.Context, trieToCommit *trieView) error { + t.lock.Lock() + defer t.lock.Unlock() + + _, span := t.db.tracer.Start(ctx, "MerkleDB.trieview.commitChanges", oteltrace.WithAttributes( + attribute.Int("changeCount", len(t.changes.values)), + )) + defer span.End() + + switch { + case t.isInvalid(): + // don't apply changes to an invalid view + return ErrInvalid + case trieToCommit == nil: + // no changes to apply + return nil + case trieToCommit.getParentTrie() != t: + // trieToCommit needs to be a child of t, otherwise the changes merge would not work + return ErrViewIsNotAChild + case trieToCommit.isInvalid(): + // don't apply changes from an invalid view + return ErrInvalid + } + + // Invalidate all child views except the view being committed. + // Note that we invalidate children before modifying their ancestor [t] + // to uphold the invariant on [t.invalidated]. + t.invalidateChildrenExcept(trieToCommit) + + if err := trieToCommit.calculateNodeIDs(ctx); err != nil { + return err + } + + for key, nodeChange := range trieToCommit.changes.nodes { + if existing, ok := t.changes.nodes[key]; ok { + existing.after = nodeChange.after + } else { + t.changes.nodes[key] = &change[*node]{ + before: nodeChange.before, + after: nodeChange.after, + } + } + } + + for key, valueChange := range trieToCommit.changes.values { + if existing, ok := t.changes.values[key]; ok { + existing.after = valueChange.after + } else { + t.changes.values[key] = &change[Maybe[[]byte]]{ + before: valueChange.before, + after: valueChange.after, + } + } + } + // update this view's root info to match the newly committed root + t.root = trieToCommit.root + t.changes.rootID = trieToCommit.changes.rootID + + // move the children from the incoming trieview to the current trieview + // do this after the current view has been updated + // this allows child views calls to their parent to remain consistent during the move + t.moveChildViewsToView(trieToCommit) + + return nil +} + +// CommitToParent commits the changes from this view to its parent Trie +func (t *trieView) CommitToParent(ctx context.Context) error { + // TODO: Only lock the commitlock when the parent is the DB + // TODO: fix concurrency bugs with CommitToParent + t.db.commitLock.Lock() + defer t.db.commitLock.Unlock() + + t.lock.Lock() + defer t.lock.Unlock() + + return t.commitToParent(ctx) +} + +// commitToParent commits the changes from this view to its parent Trie +// assumes [t.lock] is held +func (t *trieView) commitToParent(ctx context.Context) error { + ctx, span := t.db.tracer.Start(ctx, "MerkleDB.trieview.commitToParent") + defer span.End() + + if t.isInvalid() { + return ErrInvalid + } + if t.committed { + return ErrCommitted + } + + // ensure all of this view's changes have been calculated + if err := t.calculateNodeIDs(ctx); err != nil { + return err + } + + // write this view's changes into its parent + if err := t.getParentTrie().commitChanges(ctx, t); err != nil { + return err + } + if t.isInvalid() { + return ErrInvalid + } + + t.committed = true + + return nil +} + +// Commits the changes from [trieToCommit] to this view, +// this view to its parent, and so on until committing to the db. +// Assumes [t.db.commitLock] is held. +func (t *trieView) commitToDB(ctx context.Context) error { + t.lock.Lock() + defer t.lock.Unlock() + + ctx, span := t.db.tracer.Start(ctx, "MerkleDB.trieview.commitToDB", oteltrace.WithAttributes( + attribute.Int("changeCount", len(t.changes.values)), + )) + defer span.End() + + // first merge changes into the parent trie + if err := t.commitToParent(ctx); err != nil { + return err + } + + // now commit the parent trie to the db + return t.getParentTrie().commitToDB(ctx) +} + +// Assumes [t.validityTrackingLock] isn't held. +func (t *trieView) isInvalid() bool { + t.validityTrackingLock.RLock() + defer t.validityTrackingLock.RUnlock() + + return t.invalidated +} + +// Invalidates this view and all descendants. +// Assumes [t.validityTrackingLock] isn't held. +func (t *trieView) invalidate() { + t.validityTrackingLock.Lock() + defer t.validityTrackingLock.Unlock() + + t.invalidated = true + + for _, childView := range t.childViews { + childView.invalidate() + } + + // after invalidating the children, they no longer need to be tracked + t.childViews = make([]*trieView, 0, defaultPreallocationSize) +} + +// Invalidates all children of this view. +// Assumes [t.validityTrackingLock] isn't held. +func (t *trieView) invalidateChildren() { + t.invalidateChildrenExcept(nil) +} + +// moveChildViewsToView removes any child views from the trieToCommit and moves them to the current trie view +func (t *trieView) moveChildViewsToView(trieToCommit *trieView) { + t.validityTrackingLock.Lock() + defer t.validityTrackingLock.Unlock() + + trieToCommit.validityTrackingLock.Lock() + defer trieToCommit.validityTrackingLock.Unlock() + + for _, childView := range trieToCommit.childViews { + childView.updateParent(t) + t.childViews = append(t.childViews, childView) + } + trieToCommit.childViews = make([]*trieView, 0, defaultPreallocationSize) +} + +func (t *trieView) updateParent(newParent TrieView) { + t.validityTrackingLock.Lock() + defer t.validityTrackingLock.Unlock() + + t.parentTrie = newParent +} + +// Invalidates all children of this view except [exception]. +// [t.childViews] will only contain the exception after invalidation is complete. +// Assumes [t.validityTrackingLock] isn't held. +func (t *trieView) invalidateChildrenExcept(exception *trieView) { + t.validityTrackingLock.Lock() + defer t.validityTrackingLock.Unlock() + + for _, childView := range t.childViews { + if childView != exception { + childView.invalidate() + } + } + + // after invalidating the children, they no longer need to be tracked + t.childViews = make([]*trieView, 0, defaultPreallocationSize) + + // add back in the exception view since it is still valid + if exception != nil { + t.childViews = append(t.childViews, exception) + } +} + +// GetMerkleRoot returns the ID of the root of this trie. +func (t *trieView) GetMerkleRoot(ctx context.Context) (ids.ID, error) { + t.lock.Lock() + defer t.lock.Unlock() + + return t.getMerkleRoot(ctx) +} + +// Returns the ID of the root node of this trie. +// Assumes [t.lock] is held. +func (t *trieView) getMerkleRoot(ctx context.Context) (ids.ID, error) { + if err := t.calculateNodeIDs(ctx); err != nil { + return ids.Empty, err + } + return t.root.id, nil +} + +// Returns up to [maxLength] key/values from keys in closed range [start, end]. +// Acts similarly to the merge step of a merge sort to combine state from the view +// with state from the parent trie. +// If [lock], grabs [t.lock]'s read lock. +// Otherwise assumes [t.lock]'s read lock is held. +func (t *trieView) getKeyValues( + start []byte, + end []byte, + maxLength int, + keysToIgnore set.Set[string], + lock bool, +) ([]KeyValue, error) { + if lock { + t.lock.RLock() + defer t.lock.RUnlock() + } + + if maxLength <= 0 { + return nil, fmt.Errorf("%w but was %d", ErrInvalidMaxLength, maxLength) + } + + if t.isInvalid() { + return nil, ErrInvalid + } + + // collect all values that have changed or been deleted + changes := make([]KeyValue, 0, len(t.changes.values)) + for key, change := range t.changes.values { + if change.after.IsNothing() { + // This was deleted + keysToIgnore.Add(string(key.Serialize().Value)) + } else { + changes = append(changes, KeyValue{ + Key: key.Serialize().Value, + Value: change.after.value, + }) + } + } + // sort [changes] so they can be merged with the parent trie's state + slices.SortFunc(changes, func(a, b KeyValue) bool { + return bytes.Compare(a.Key, b.Key) == -1 + }) + + baseKeyValues, err := t.getParentTrie().getKeyValues( + start, + end, + maxLength, + keysToIgnore, + true, /*lock*/ + ) + if err != nil { + return nil, err + } + + var ( + // True if there are more key/value pairs from [baseKeyValues] to add to result + baseKeyValuesFinished = false + // True if there are more key/value pairs from [changes] to add to result + changesFinished = false + // The index of the next key/value pair to add from [baseKeyValues]. + baseKeyValuesIndex = 0 + // The index of the next key/value pair to add from [changes]. + changesIndex = 0 + remainingLength = maxLength + hasUpperBound = len(end) > 0 + result = make([]KeyValue, 0, len(baseKeyValues)) + ) + + // keep adding key/value pairs until one of the following: + // * a key that is lexicographically larger than the end key is hit + // * the maxLength is hit + // * no more values are available to add + for remainingLength > 0 { + // the baseKeyValues iterator is finished when we have run out of keys or hit a key greater than the end key + baseKeyValuesFinished = baseKeyValuesFinished || + (baseKeyValuesIndex >= len(baseKeyValues) || (hasUpperBound && bytes.Compare(baseKeyValues[baseKeyValuesIndex].Key, end) == 1)) + + // the changes iterator is finished when we have run out of keys or hit a key greater than the end key + changesFinished = changesFinished || + (changesIndex >= len(changes) || (hasUpperBound && bytes.Compare(changes[changesIndex].Key, end) == 1)) + + // if both the base state and changes are finished, return the result of the merge + if baseKeyValuesFinished && changesFinished { + return result, nil + } + + // one or both iterators still have values, so one will be added to the result + remainingLength-- + + // both still have key/values available, so add the smallest key + if !changesFinished && !baseKeyValuesFinished { + currentChangeState := changes[changesIndex] + currentKeyValues := baseKeyValues[baseKeyValuesIndex] + + switch bytes.Compare(currentChangeState.Key, currentKeyValues.Key) { + case -1: + result = append(result, currentChangeState) + changesIndex++ + case 0: + // the keys are the same, so override the base value with the changed value + result = append(result, currentChangeState) + changesIndex++ + baseKeyValuesIndex++ + case 1: + result = append(result, currentKeyValues) + baseKeyValuesIndex++ + } + continue + } + + // the base state is not finished, but the changes is finished. + // add the next base state value. + if !baseKeyValuesFinished { + currentBaseState := baseKeyValues[baseKeyValuesIndex] + result = append(result, currentBaseState) + baseKeyValuesIndex++ + continue + } + + // the base state is finished, but the changes is not finished. + // add the next changes value. + currentChangeState := changes[changesIndex] + result = append(result, currentChangeState) + changesIndex++ + } + + // ensure no ancestor changes occurred during execution + if t.isInvalid() { + return nil, ErrInvalid + } + + return result, nil +} + +func (t *trieView) GetValues(_ context.Context, keys [][]byte) ([][]byte, []error) { + t.lock.RLock() + defer t.lock.RUnlock() + + results := make([][]byte, len(keys)) + valueErrors := make([]error, len(keys)) + + for i, key := range keys { + results[i], valueErrors[i] = t.getValueCopy(newPath(key), false) + } + return results, valueErrors +} + +// GetValue returns the value for the given [key]. +// Returns database.ErrNotFound if it doesn't exist. +func (t *trieView) GetValue(_ context.Context, key []byte) ([]byte, error) { + return t.getValueCopy(newPath(key), true) +} + +// getValueCopy returns a copy of the value for the given [key]. +// Returns database.ErrNotFound if it doesn't exist. +func (t *trieView) getValueCopy(key path, lock bool) ([]byte, error) { + val, err := t.getValue(key, lock) + if err != nil { + return nil, err + } + return slices.Clone(val), nil +} + +func (t *trieView) getValue(key path, lock bool) ([]byte, error) { + if lock { + t.lock.RLock() + defer t.lock.RUnlock() + } + + if t.isInvalid() { + return nil, ErrInvalid + } + + if change, ok := t.changes.values[key]; ok { + t.db.metrics.ViewValueCacheHit() + if change.after.IsNothing() { + return nil, database.ErrNotFound + } + return change.after.value, nil + } + t.db.metrics.ViewValueCacheMiss() + + // if we don't have local copy of the key, then grab a copy from the parent trie + value, err := t.getParentTrie().getValue(key, true) + if err != nil { + return nil, err + } + + // ensure no ancestor changes occurred during execution + if t.isInvalid() { + return nil, ErrInvalid + } + + return value, nil +} + +// Insert will upsert the key/value pair into the trie. +func (t *trieView) Insert(_ context.Context, key []byte, value []byte) error { + t.lock.Lock() + defer t.lock.Unlock() + + return t.insert(key, value) +} + +// Assumes [t.lock] is held. +// Assumes [t.validityTrackingLock] isn't held. +func (t *trieView) insert(key []byte, value []byte) error { + if t.committed { + return ErrCommitted + } + if t.isInvalid() { + return ErrInvalid + } + + // the trie has been changed, so invalidate all children and remove them from tracking + t.invalidateChildren() + + valCopy := slices.Clone(value) + + if err := t.recordValueChange(newPath(key), Some(valCopy)); err != nil { + return err + } + + // ensure no ancestor changes occurred during execution + if t.isInvalid() { + return ErrInvalid + } + + return nil +} + +// Remove will delete the value associated with [key] from this trie. +func (t *trieView) Remove(_ context.Context, key []byte) error { + t.lock.Lock() + defer t.lock.Unlock() + + return t.remove(key) +} + +// Assumes [t.lock] is held. +// Assumes [t.validityTrackingLock] isn't held. +func (t *trieView) remove(key []byte) error { + if t.committed { + return ErrCommitted + } + + if t.isInvalid() { + return ErrInvalid + } + + // the trie has been changed, so invalidate all children and remove them from tracking + t.invalidateChildren() + + if err := t.recordValueChange(newPath(key), Nothing[[]byte]()); err != nil { + return err + } + + // ensure no ancestor changes occurred during execution + if t.isInvalid() { + return ErrInvalid + } + + return nil +} + +// Assumes [t.lock] is held. +func (t *trieView) applyChangedValuesToTrie(ctx context.Context) error { + _, span := t.db.tracer.Start(ctx, "MerkleDB.trieview.applyChangedValuesToTrie") + defer span.End() + + unappliedValues := t.unappliedValueChanges + t.unappliedValueChanges = make(map[path]Maybe[[]byte], t.estimatedSize) + + for key, change := range unappliedValues { + if change.IsNothing() { + if err := t.removeFromTrie(key); err != nil { + return err + } + } else { + if _, err := t.insertIntoTrie(key, change); err != nil { + return err + } + } + } + return nil +} + +// Merges together nodes in the inclusive descendants of [node] that +// have no value and a single child into one node with a compressed +// path until a node that doesn't meet those criteria is reached. +// [parent] is [node]'s parent. +// Assumes at least one of the following is true: +// * [node] has a value. +// * [node] has children. +// Assumes [t.lock] is held. +func (t *trieView) compressNodePath(parent, node *node) error { + // don't collapse into this node if it's the root, doesn't have 1 child, or has a value + if len(node.children) != 1 || node.hasValue() { + return nil + } + + // delete all empty nodes with a single child under [node] + for len(node.children) == 1 && !node.hasValue() { + if err := t.recordNodeDeleted(node); err != nil { + return err + } + + nextNode, err := t.getNodeFromParent(node, node.getSingleChildPath()) + if err != nil { + return err + } + node = nextNode + } + + // [node] is the first node with multiple children. + // combine it with the [node] passed in. + parent.addChild(node) + return t.recordNodeChange(parent) +} + +// Starting from the last node in [nodePath], traverses toward the root +// and deletes each node that has no value and no children. +// Stops when a node with a value or children is reached. +// Assumes [nodePath] is a path from the root to a node. +// Assumes [t.lock] is held. +func (t *trieView) deleteEmptyNodes(nodePath []*node) error { + node := nodePath[len(nodePath)-1] + nextParentIndex := len(nodePath) - 2 + + for ; nextParentIndex >= 0 && len(node.children) == 0 && !node.hasValue(); nextParentIndex-- { + if err := t.recordNodeDeleted(node); err != nil { + return err + } + + parent := nodePath[nextParentIndex] + + parent.removeChild(node) + if err := t.recordNodeChange(parent); err != nil { + return err + } + + node = parent + } + + if nextParentIndex < 0 { + return nil + } + parent := nodePath[nextParentIndex] + + return t.compressNodePath(parent, node) +} + +// Returns the nodes along the path to [key]. +// The first node is the root, and the last node is either the node with the +// given [key], if it's in the trie, or the node with the largest prefix of +// the [key] if it isn't in the trie. +// Always returns at least the root node. +func (t *trieView) getPathTo(key path) ([]*node, error) { + var ( + // all paths start at the root + currentNode = t.root + matchedKeyIndex = 0 + nodes = []*node{t.root} + ) + + // while the entire path hasn't been matched + for matchedKeyIndex < len(key) { + // confirm that a child exists and grab its ID before attempting to load it + nextChildEntry, hasChild := currentNode.children[key[matchedKeyIndex]] + + // the nibble for the child entry has now been handled, so increment the matchedPathIndex + matchedKeyIndex += 1 + + if !hasChild || !key[matchedKeyIndex:].HasPrefix(nextChildEntry.compressedPath) { + // there was no child along the path or the child that was there doesn't match the remaining path + return nodes, nil + } + + // the compressed path of the entry there matched the path, so increment the matched index + matchedKeyIndex += len(nextChildEntry.compressedPath) + + // grab the next node along the path + var err error + currentNode, err = t.getNodeWithID(nextChildEntry.id, key[:matchedKeyIndex]) + if err != nil { + return nil, err + } + + // add node to path + nodes = append(nodes, currentNode) + } + return nodes, nil +} + +func getLengthOfCommonPrefix(first, second path) int { + commonIndex := 0 + for len(first) > commonIndex && len(second) > commonIndex && first[commonIndex] == second[commonIndex] { + commonIndex++ + } + return commonIndex +} + +// Get a copy of the node matching the passed key from the trie +// Used by views to get nodes from their ancestors +// assumes that [t.needsRecalculation] is false +func (t *trieView) getEditableNode(key path) (*node, error) { + t.lock.RLock() + defer t.lock.RUnlock() + + if t.isInvalid() { + return nil, ErrInvalid + } + + // grab the node in question + n, err := t.getNodeWithID(ids.Empty, key) + if err != nil { + return nil, err + } + + // ensure no ancestor changes occurred during execution + if t.isInvalid() { + return nil, ErrInvalid + } + + // return a clone of the node, so it can be edited without affecting this trie + return n.clone(), nil +} + +// Inserts a key/value pair into the trie. +// Assumes [t.lock] is held. +func (t *trieView) insertIntoTrie( + key path, + value Maybe[[]byte], +) (*node, error) { + // find the node that most closely matches [key] + pathToNode, err := t.getPathTo(key) + if err != nil { + return nil, err + } + + // We're inserting a node whose ancestry is [pathToNode] + // so we'll need to recalculate their IDs. + for _, node := range pathToNode { + if err := t.recordNodeChange(node); err != nil { + return nil, err + } + } + + closestNode := pathToNode[len(pathToNode)-1] + + // a node with that exact path already exists so update its value + if closestNode.key.Compare(key) == 0 { + closestNode.setValue(value) + return closestNode, nil + } + + closestNodeKeyLength := len(closestNode.key) + // A node with the exact key doesn't exist so determine the portion of the + // key that hasn't been matched yet + // Note that [key] has prefix [closestNodeFullPath] but exactMatch was false, + // so [key] must be longer than [closestNodeFullPath] and the following slice won't OOB. + remainingKey := key[closestNodeKeyLength+1:] + + existingChildEntry, hasChild := closestNode.children[key[closestNodeKeyLength]] + // there are no existing nodes along the path [fullPath], so create a new node to insert [value] + if !hasChild { + newNode := newNode( + closestNode, + key, + ) + newNode.setValue(value) + return newNode, t.recordNodeChange(newNode) + } else if err != nil { + return nil, err + } + + // if we have reached this point, then the [fullpath] we are trying to insert and + // the existing path node have some common prefix. + // a new branching node will be created that will represent this common prefix and + // have the existing path node and the value being inserted as children. + + // generate the new branch node + branchNode := newNode( + closestNode, + key[:closestNodeKeyLength+1+getLengthOfCommonPrefix(existingChildEntry.compressedPath, remainingKey)], + ) + if err := t.recordNodeChange(closestNode); err != nil { + return nil, err + } + nodeWithValue := branchNode + + if len(key)-len(branchNode.key) == 0 { + // there was no residual path for the inserted key, so the value goes directly into the new branch node + branchNode.setValue(value) + } else { + // generate a new node and add it as a child of the branch node + newNode := newNode( + branchNode, + key, + ) + newNode.setValue(value) + if err := t.recordNodeChange(newNode); err != nil { + return nil, err + } + nodeWithValue = newNode + } + + existingChildKey := key[:closestNodeKeyLength+1] + existingChildEntry.compressedPath + + // the existing child's key is of length: len(closestNodekey) + 1 for the child index + len(existing child's compressed key) + // if that length is less than or equal to the branch node's key that implies that the existing child's key matched the key to be inserted + // since it matched the key to be inserted, it should have been returned by GetPathTo + if len(existingChildKey) <= len(branchNode.key) { + return nil, ErrGetPathToFailure + } + + branchNode.addChildWithoutNode( + existingChildKey[len(branchNode.key)], + existingChildKey[len(branchNode.key)+1:], + existingChildEntry.id, + ) + + return nodeWithValue, t.recordNodeChange(branchNode) +} + +// Records that a node has been changed. +// Assumes [t.lock] is held. +func (t *trieView) recordNodeChange(after *node) error { + return t.recordKeyChange(after.key, after) +} + +// Records that the node associated with the given key has been deleted. +// Assumes [t.lock] is held. +func (t *trieView) recordNodeDeleted(after *node) error { + // don't delete the root. + if len(after.key) == 0 { + return t.recordKeyChange(after.key, after) + } + return t.recordKeyChange(after.key, nil) +} + +// Records that the node associated with the given key has been changed. +// Assumes [t.lock] is held. +func (t *trieView) recordKeyChange(key path, after *node) error { + t.needsRecalculation = true + + if existing, ok := t.changes.nodes[key]; ok { + existing.after = after + return nil + } + + before, err := t.getParentTrie().getEditableNode(key) + if err != nil { + if err != database.ErrNotFound { + return err + } + before = nil + } + + t.changes.nodes[key] = &change[*node]{ + before: before, + after: after, + } + return nil +} + +// Records that a key's value has been added or updated. +// Doesn't actually change the trie data structure. +// That's deferred until we calculate node IDs. +// Assumes [t.lock] is held. +func (t *trieView) recordValueChange(key path, value Maybe[[]byte]) error { + t.needsRecalculation = true + + // record the value change so that it can be inserted + // into a trie nodes later + t.unappliedValueChanges[key] = value + + // update the existing change if it exists + if existing, ok := t.changes.values[key]; ok { + existing.after = value + return nil + } + + // grab the before value + var beforeMaybe Maybe[[]byte] + before, err := t.getParentTrie().getValue(key, true) + switch err { + case nil: + beforeMaybe = Some(before) + case database.ErrNotFound: + beforeMaybe = Nothing[[]byte]() + default: + return err + } + + t.changes.values[key] = &change[Maybe[[]byte]]{ + before: beforeMaybe, + after: value, + } + return nil +} + +// Removes the provided [key] from the trie. +// Assumes [t.lock] write lock is held. +func (t *trieView) removeFromTrie(key path) error { + nodePath, err := t.getPathTo(key) + if err != nil { + return err + } + + nodeToDelete := nodePath[len(nodePath)-1] + + if nodeToDelete.key.Compare(key) != 0 || !nodeToDelete.hasValue() { + // the key wasn't in the trie or doesn't have a value so there's nothing to do + return nil + } + + // A node with ancestry [nodePath] is being deleted, so we need to recalculate + // all the nodes in this path. + for _, node := range nodePath { + if err := t.recordNodeChange(node); err != nil { + return err + } + } + + nodeToDelete.setValue(Nothing[[]byte]()) + if err := t.recordNodeChange(nodeToDelete); err != nil { + return err + } + + // if the removed node has no children, the node can be removed from the trie + if len(nodeToDelete.children) == 0 { + return t.deleteEmptyNodes(nodePath) + } + + if len(nodePath) == 1 { + return nil + } + parent := nodePath[len(nodePath)-2] + + // merge this node and its descendants into a single node if possible + return t.compressNodePath(parent, nodeToDelete) +} + +// Retrieves the node with the given [key], which is a child of [parent], and +// uses the [parent] node to initialize the child node's ID. +// Returns database.ErrNotFound if the child doesn't exist. +// Assumes [t.lock] write or read lock is held. +func (t *trieView) getNodeFromParent(parent *node, key path) (*node, error) { + // confirm the child exists and get its ID before attempting to load it + if child, exists := parent.children[key[len(parent.key)]]; exists { + return t.getNodeWithID(child.id, key) + } + + return nil, database.ErrNotFound +} + +// Retrieves a node with the given [key]. +// If the node is fetched from [t.parentTrie] and [id] isn't empty, +// sets the node's ID to [id]. +// Returns database.ErrNotFound if the node doesn't exist. +// Assumes [t.lock] write or read lock is held. +func (t *trieView) getNodeWithID(id ids.ID, key path) (*node, error) { + // check for the key within the changed nodes + if nodeChange, isChanged := t.changes.nodes[key]; isChanged { + t.db.metrics.ViewNodeCacheHit() + if nodeChange.after == nil { + return nil, database.ErrNotFound + } + return nodeChange.after, nil + } + + // get the node from the parent trie and store a local copy + parentTrieNode, err := t.getParentTrie().getEditableNode(key) + if err != nil { + return nil, err + } + + // only need to initialize the id if it's from the parent trie. + // nodes in the current view change list have already been initialized. + if id != ids.Empty { + parentTrieNode.id = id + } + return parentTrieNode, nil +} + +// Get the parent trie of the view +func (t *trieView) getParentTrie() TrieView { + t.validityTrackingLock.RLock() + defer t.validityTrackingLock.RUnlock() + return t.parentTrie +} diff --git a/avalanchego/x/sync/client.go b/avalanchego/x/sync/client.go new file mode 100644 index 00000000..99416f0d --- /dev/null +++ b/avalanchego/x/sync/client.go @@ -0,0 +1,201 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sync + +import ( + "context" + "errors" + "fmt" + "sync/atomic" + "time" + + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/x/merkledb" +) + +const ( + failedRequestSleepInterval = 10 * time.Millisecond + + epsilon = 1e-6 // small amount to add to time to avoid division by 0 +) + +var ( + _ Client = &client{} + + errInvalidRangeProof = errors.New("failed to verify range proof") + errTooManyLeaves = errors.New("response contains more than requested leaves") +) + +// Client synchronously fetches data from the network to fulfill state sync requests. +// Repeatedly retries failed requests until the context is canceled. +type Client interface { + // GetRangeProof synchronously sends the given request, returning a parsed StateResponse or error + // Note: this verifies the response including the range proof. + GetRangeProof(ctx context.Context, request *RangeProofRequest) (*merkledb.RangeProof, error) + // GetChangeProof synchronously sends the given request, returning a parsed ChangesResponse or error + // [verificationDB] is the local db that has all key/values in it for the proof's startroot within the proof's key range + // Note: this verifies the response including the change proof. + GetChangeProof(ctx context.Context, request *ChangeProofRequest, verificationDB *merkledb.Database) (*merkledb.ChangeProof, error) +} + +type client struct { + networkClient NetworkClient + stateSyncNodes []ids.NodeID + stateSyncNodeIdx uint32 + stateSyncMinVersion *version.Application + log logging.Logger + metrics SyncMetrics +} + +type ClientConfig struct { + NetworkClient NetworkClient + StateSyncNodeIDs []ids.NodeID + StateSyncMinVersion *version.Application + Log logging.Logger + Metrics SyncMetrics +} + +func NewClient(config *ClientConfig) Client { + c := &client{ + networkClient: config.NetworkClient, + stateSyncNodes: config.StateSyncNodeIDs, + stateSyncMinVersion: config.StateSyncMinVersion, + log: config.Log, + metrics: config.Metrics, + } + return c +} + +// GetChangeProof synchronously retrieves the change proof given by [req]. +// Upon failure, retries until the context is expired. +// The returned change proof is verified. +func (c *client) GetChangeProof(ctx context.Context, req *ChangeProofRequest, db *merkledb.Database) (*merkledb.ChangeProof, error) { + parseFn := func(ctx context.Context, responseBytes []byte) (*merkledb.ChangeProof, error) { + changeProof := &merkledb.ChangeProof{} + if _, err := merkledb.Codec.DecodeChangeProof(responseBytes, changeProof); err != nil { + return nil, err + } + + // Ensure the response does not contain more than the requested number of leaves + // and the start and end roots match the requested roots. + if len(changeProof.KeyValues)+len(changeProof.DeletedKeys) > int(req.Limit) { + return nil, fmt.Errorf("%w: (%d) > %d)", errTooManyLeaves, len(changeProof.KeyValues), req.Limit) + } + + if err := changeProof.Verify(ctx, db, req.Start, req.End, req.EndingRoot); err != nil { + return nil, fmt.Errorf("%s due to %w", errInvalidRangeProof, err) + } + return changeProof, nil + } + return getAndParse(ctx, c, req, parseFn) +} + +// GetRangeProof synchronously retrieves the range proof given by [req]. +// Upon failure, retries until the context is expired. +// The returned range proof is verified. +func (c *client) GetRangeProof(ctx context.Context, req *RangeProofRequest) (*merkledb.RangeProof, error) { + parseFn := func(ctx context.Context, responseBytes []byte) (*merkledb.RangeProof, error) { + rangeProof := &merkledb.RangeProof{} + if _, err := merkledb.Codec.DecodeRangeProof(responseBytes, rangeProof); err != nil { + return nil, err + } + + // Ensure the response does not contain more than the maximum requested number of leaves. + if len(rangeProof.KeyValues) > int(req.Limit) { + return nil, fmt.Errorf("%w: (%d) > %d)", errTooManyLeaves, len(rangeProof.KeyValues), req.Limit) + } + + if err := rangeProof.Verify( + ctx, + req.Start, + req.End, + req.Root, + ); err != nil { + return nil, fmt.Errorf("%s due to %w", errInvalidRangeProof, err) + } + return rangeProof, nil + } + return getAndParse(ctx, c, req, parseFn) +} + +// getAndParse uses [client] to send [request] to an arbitrary peer. If the peer responds, +// [parseFn] is called with the raw response. If [parseFn] returns an error or the request +// times out, this function will retry the request to a different peer until [ctx] expires. +// If [parseFn] returns a nil error, the result is returned from getAndParse. +func getAndParse[T any](ctx context.Context, client *client, request Request, parseFn func(context.Context, []byte) (*T, error)) (*T, error) { + // marshal the request into requestBytes + requestBytes, err := syncCodec.Marshal(Version, &request) + if err != nil { + return nil, err + } + + var ( + lastErr error + response *T + ) + // Loop until the context is cancelled or we get a valid response. + for attempt := 0; ; attempt++ { + // If the context has finished, return the context error early. + if err := ctx.Err(); err != nil { + if lastErr != nil { + return nil, fmt.Errorf("request failed after %d attempts with last error %w and ctx error %s", attempt, lastErr, err) + } + return nil, err + } + responseBytes, nodeID, err := client.get(ctx, requestBytes) + if err == nil { + if response, err = parseFn(ctx, responseBytes); err == nil { + return response, nil + } + } + + client.log.Debug("request failed, retrying", + zap.Stringer("nodeID", nodeID), + zap.Int("attempt", attempt), + zap.Stringer("request", request), + zap.Error(err)) + + if err != ctx.Err() { + // if [err] is being propagated from [ctx], avoid overwriting [lastErr]. + lastErr = err + time.Sleep(failedRequestSleepInterval) + } + } +} + +// get sends [request] to an arbitrary peer and blocks until the node receives a response +// or [ctx] expires. Returns the raw response from the peer, the peer's NodeID, and an +// error if the request timed out. Thread safe. +func (c *client) get(ctx context.Context, requestBytes []byte) ([]byte, ids.NodeID, error) { + c.metrics.RequestMade() + var ( + response []byte + nodeID ids.NodeID + err error + startTime = time.Now() + ) + if len(c.stateSyncNodes) == 0 { + response, nodeID, err = c.networkClient.RequestAny(ctx, c.stateSyncMinVersion, requestBytes) + } else { + // get the next nodeID using the nodeIdx offset. If we're out of nodes, loop back to 0 + // we do this every attempt to ensure we get a different node each time if possible. + nodeIdx := atomic.AddUint32(&c.stateSyncNodeIdx, 1) + nodeID = c.stateSyncNodes[nodeIdx%uint32(len(c.stateSyncNodes))] + response, err = c.networkClient.Request(ctx, nodeID, requestBytes) + } + if err != nil { + c.metrics.RequestFailed() + c.networkClient.TrackBandwidth(nodeID, 0) + return response, nodeID, err + } + + bandwidth := float64(len(response)) / (time.Since(startTime).Seconds() + epsilon) + c.networkClient.TrackBandwidth(nodeID, bandwidth) + c.metrics.RequestSucceeded() + return response, nodeID, nil +} diff --git a/avalanchego/x/sync/client_test.go b/avalanchego/x/sync/client_test.go new file mode 100644 index 00000000..ac72e312 --- /dev/null +++ b/avalanchego/x/sync/client_test.go @@ -0,0 +1,286 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sync + +import ( + "context" + "math/rand" + "sync" + "testing" + "time" + + "github.com/golang/mock/gomock" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/version" + "github.com/ava-labs/avalanchego/x/merkledb" +) + +func sendRequest( + t *testing.T, + db *merkledb.Database, + request *RangeProofRequest, + maxAttempts uint32, + modifyResponse func(*merkledb.RangeProof), +) (*merkledb.RangeProof, error) { + t.Helper() + + var wg sync.WaitGroup + defer wg.Wait() // wait for goroutines spawned + + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + sender := common.NewMockSender(ctrl) + handler := NewNetworkServer(sender, db, logging.NoLog{}) + clientNodeID, serverNodeID := ids.GenerateTestNodeID(), ids.GenerateTestNodeID() + networkClient := NewNetworkClient(sender, clientNodeID, 1, logging.NoLog{}) + err := networkClient.Connected(context.Background(), serverNodeID, version.CurrentApp) + require.NoError(err) + client := NewClient(&ClientConfig{ + NetworkClient: networkClient, + Metrics: &mockMetrics{}, + Log: logging.NoLog{}, + }) + + ctx, cancel := context.WithCancel(context.Background()) + deadline := time.Now().Add(1 * time.Hour) // enough time to complete a request + defer cancel() // avoid leaking a goroutine + + expectedSendNodeIDs := set.NewSet[ids.NodeID](1) + expectedSendNodeIDs.Add(serverNodeID) + sender.EXPECT().SendAppRequest( + gomock.Any(), // ctx + expectedSendNodeIDs, // {serverNodeID} + gomock.Any(), // requestID + gomock.Any(), // requestBytes + ).DoAndReturn( + func(ctx context.Context, _ set.Set[ids.NodeID], requestID uint32, requestBytes []byte) error { + // limit the number of attempts to [maxAttempts] by cancelling the context if needed. + if requestID >= maxAttempts { + cancel() + return ctx.Err() + } + + wg.Add(1) + go func() { + defer wg.Done() + err := handler.AppRequest(ctx, clientNodeID, requestID, deadline, requestBytes) + require.NoError(err) + }() // should be on a goroutine so the test can make progress. + return nil + }, + ).AnyTimes() + sender.EXPECT().SendAppResponse( + gomock.Any(), // ctx + clientNodeID, + gomock.Any(), // requestID + gomock.Any(), // responseBytes + ).DoAndReturn( + func(_ context.Context, _ ids.NodeID, requestID uint32, responseBytes []byte) error { + // deserialize the response so we can modify it if needed. + response := &merkledb.RangeProof{} + _, err := merkledb.Codec.DecodeRangeProof(responseBytes, response) + require.NoError(err) + + // modify if needed + if modifyResponse != nil { + modifyResponse(response) + } + + // reserialize the response and pass it to the client to complete the handling. + responseBytes, err = merkledb.Codec.EncodeRangeProof(merkledb.Version, response) + require.NoError(err) + err = networkClient.AppResponse(context.Background(), serverNodeID, requestID, responseBytes) + require.NoError(err) + return nil + }, + ).AnyTimes() + + return client.GetRangeProof(ctx, request) +} + +func TestGetRangeProof(t *testing.T) { + r := rand.New(rand.NewSource(1)) // #nosec G404 + + smallTrieKeyCount := defaultLeafRequestLimit + smallTrieDB, _, err := generateTrieWithMinKeyLen(t, r, smallTrieKeyCount, 1) + require.NoError(t, err) + smallTrieRoot, err := smallTrieDB.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + largeTrieKeyCount := 10_000 + largeTrieDB, largeTrieKeys, err := generateTrieWithMinKeyLen(t, r, largeTrieKeyCount, 1) + require.NoError(t, err) + largeTrieRoot, err := largeTrieDB.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + tests := map[string]struct { + db *merkledb.Database + request *RangeProofRequest + modifyResponse func(*merkledb.RangeProof) + expectedErr error + expectedResponseLen int + }{ + "full response for small (single request) trie": { + db: smallTrieDB, + request: &RangeProofRequest{ + Root: smallTrieRoot, + Limit: defaultLeafRequestLimit, + }, + expectedResponseLen: defaultLeafRequestLimit, + }, + "too many leaves in response": { + db: smallTrieDB, + request: &RangeProofRequest{ + Root: smallTrieRoot, + Limit: defaultLeafRequestLimit, + }, + modifyResponse: func(response *merkledb.RangeProof) { + response.KeyValues = append(response.KeyValues, merkledb.KeyValue{}) + }, + expectedErr: errTooManyLeaves, + }, + "partial response to request for entire trie (full leaf limit)": { + db: largeTrieDB, + request: &RangeProofRequest{ + Root: largeTrieRoot, + Limit: defaultLeafRequestLimit, + }, + expectedResponseLen: defaultLeafRequestLimit, + }, + "full response from near end of trie to end of trie (less than leaf limit)": { + db: largeTrieDB, + request: &RangeProofRequest{ + Root: largeTrieRoot, + Start: largeTrieKeys[len(largeTrieKeys)-30], // Set start 30 keys from the end of the large trie + Limit: defaultLeafRequestLimit, + }, + expectedResponseLen: 30, + }, + "full response for intermediate range of trie (less than leaf limit)": { + db: largeTrieDB, + request: &RangeProofRequest{ + Root: largeTrieRoot, + Start: largeTrieKeys[1000], // Set the range for 1000 leafs in an intermediate range of the trie + End: largeTrieKeys[1099], // (inclusive range) + Limit: defaultLeafRequestLimit, + }, + expectedResponseLen: 100, + }, + "removed first key in response": { + db: largeTrieDB, + request: &RangeProofRequest{ + Root: largeTrieRoot, + Limit: defaultLeafRequestLimit, + }, + modifyResponse: func(response *merkledb.RangeProof) { + response.KeyValues = response.KeyValues[1:] + }, + expectedErr: merkledb.ErrInvalidProof, + }, + "removed first key in response and replaced proof": { + db: largeTrieDB, + request: &RangeProofRequest{ + Root: largeTrieRoot, + Limit: defaultLeafRequestLimit, + }, + modifyResponse: func(response *merkledb.RangeProof) { + start := response.KeyValues[1].Key + proof, err := largeTrieDB.GetRangeProof(context.Background(), start, nil, defaultLeafRequestLimit) + if err != nil { + panic(err) + } + response.KeyValues = proof.KeyValues + response.StartProof = proof.StartProof + response.EndProof = proof.EndProof + }, + expectedErr: merkledb.ErrProofNodeNotForKey, + }, + "removed last key in response": { + db: largeTrieDB, + request: &RangeProofRequest{ + Root: largeTrieRoot, + Limit: defaultLeafRequestLimit, + }, + modifyResponse: func(response *merkledb.RangeProof) { + response.KeyValues = response.KeyValues[:len(response.KeyValues)-2] + }, + expectedErr: merkledb.ErrInvalidProof, + }, + "removed key from middle of response": { + db: largeTrieDB, + request: &RangeProofRequest{ + Root: largeTrieRoot, + Limit: defaultLeafRequestLimit, + }, + modifyResponse: func(response *merkledb.RangeProof) { + response.KeyValues = append(response.KeyValues[:100], response.KeyValues[101:]...) + }, + expectedErr: merkledb.ErrInvalidProof, + }, + "all proof keys removed from response": { + db: largeTrieDB, + request: &RangeProofRequest{ + Root: largeTrieRoot, + Limit: defaultLeafRequestLimit, + }, + modifyResponse: func(response *merkledb.RangeProof) { + response.StartProof = nil + response.EndProof = nil + }, + expectedErr: merkledb.ErrInvalidProof, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + require := require.New(t) + proof, err := sendRequest(t, test.db, test.request, 1, test.modifyResponse) + if test.expectedErr != nil { + require.ErrorIs(err, test.expectedErr) + return + } + require.NoError(err) + require.Len(proof.KeyValues, test.expectedResponseLen) + }) + } +} + +func TestRetries(t *testing.T) { + r := rand.New(rand.NewSource(1)) // #nosec G404 + require := require.New(t) + + keyCount := defaultLeafRequestLimit + db, _, err := generateTrieWithMinKeyLen(t, r, keyCount, 1) + require.NoError(err) + root, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + + maxRequests := 4 + request := &RangeProofRequest{ + Root: root, + Limit: uint16(keyCount), + } + + responseCount := 0 + modifyResponse := func(response *merkledb.RangeProof) { + responseCount++ + if responseCount < maxRequests { + // corrupt the first [maxRequests] responses, to force the client to retry. + response.KeyValues = nil + } + } + proof, err := sendRequest(t, db, request, uint32(maxRequests), modifyResponse) + require.NoError(err) + require.Len(proof.KeyValues, keyCount) + + require.Equal(responseCount, maxRequests) // check the client performed retries. +} diff --git a/avalanchego/x/sync/codec.go b/avalanchego/x/sync/codec.go new file mode 100644 index 00000000..68aada26 --- /dev/null +++ b/avalanchego/x/sync/codec.go @@ -0,0 +1,34 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sync + +import ( + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/codec/linearcodec" + "github.com/ava-labs/avalanchego/utils/units" + "github.com/ava-labs/avalanchego/utils/wrappers" +) + +const ( + Version = 0 // TODO danlaine unexport this + maxMessageSize = 1 * units.MiB +) + +var syncCodec codec.Manager + +func init() { + syncCodec = codec.NewManager(maxMessageSize) + c := linearcodec.NewDefault() + + errs := wrappers.Errs{} + errs.Add( + c.RegisterType(&ChangeProofRequest{}), + c.RegisterType(&RangeProofRequest{}), + syncCodec.RegisterCodec(Version, c), + ) + + if errs.Errored() { + panic(errs.Err) + } +} diff --git a/avalanchego/x/sync/metrics.go b/avalanchego/x/sync/metrics.go new file mode 100644 index 00000000..d67d61f6 --- /dev/null +++ b/avalanchego/x/sync/metrics.go @@ -0,0 +1,96 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sync + +import ( + "sync" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/utils/wrappers" +) + +var ( + _ SyncMetrics = &mockMetrics{} + _ SyncMetrics = &metrics{} +) + +type SyncMetrics interface { + RequestFailed() + RequestMade() + RequestSucceeded() +} + +type mockMetrics struct { + lock sync.Mutex + requestsFailed int + requestsMade int + requestsSucceeded int +} + +func (m *mockMetrics) RequestFailed() { + m.lock.Lock() + defer m.lock.Unlock() + + m.requestsFailed++ +} + +func (m *mockMetrics) RequestMade() { + m.lock.Lock() + defer m.lock.Unlock() + + m.requestsMade++ +} + +func (m *mockMetrics) RequestSucceeded() { + m.lock.Lock() + defer m.lock.Unlock() + + m.requestsSucceeded++ +} + +type metrics struct { + requestsFailed prometheus.Counter + requestsMade prometheus.Counter + requestsSucceeded prometheus.Counter +} + +func NewMetrics(namespace string, reg prometheus.Registerer) (SyncMetrics, error) { + m := metrics{ + requestsFailed: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "requests_failed", + Help: "cumulative amount of failed proof requests", + }), + requestsMade: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "requests_made", + Help: "cumulative amount of proof requests made", + }), + requestsSucceeded: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "requests_succeeded", + Help: "cumulative amount of proof requests that were successful", + }), + } + errs := wrappers.Errs{} + errs.Add( + reg.Register(m.requestsFailed), + reg.Register(m.requestsMade), + reg.Register(m.requestsSucceeded), + ) + return &m, errs.Err +} + +func (m *metrics) RequestFailed() { + m.requestsFailed.Inc() +} + +func (m *metrics) RequestMade() { + m.requestsMade.Inc() +} + +func (m *metrics) RequestSucceeded() { + m.requestsSucceeded.Inc() +} diff --git a/avalanchego/x/sync/mock_client.go b/avalanchego/x/sync/mock_client.go new file mode 100644 index 00000000..08f02c31 --- /dev/null +++ b/avalanchego/x/sync/mock_client.go @@ -0,0 +1,69 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/x/sync (interfaces: Client) + +// Package sync is a generated GoMock package. +package sync + +import ( + context "context" + reflect "reflect" + + merkledb "github.com/ava-labs/avalanchego/x/merkledb" + gomock "github.com/golang/mock/gomock" +) + +// MockClient is a mock of Client interface. +type MockClient struct { + ctrl *gomock.Controller + recorder *MockClientMockRecorder +} + +// MockClientMockRecorder is the mock recorder for MockClient. +type MockClientMockRecorder struct { + mock *MockClient +} + +// NewMockClient creates a new mock instance. +func NewMockClient(ctrl *gomock.Controller) *MockClient { + mock := &MockClient{ctrl: ctrl} + mock.recorder = &MockClientMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClient) EXPECT() *MockClientMockRecorder { + return m.recorder +} + +// GetChangeProof mocks base method. +func (m *MockClient) GetChangeProof(arg0 context.Context, arg1 *ChangeProofRequest, arg2 *merkledb.Database) (*merkledb.ChangeProof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetChangeProof", arg0, arg1, arg2) + ret0, _ := ret[0].(*merkledb.ChangeProof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetChangeProof indicates an expected call of GetChangeProof. +func (mr *MockClientMockRecorder) GetChangeProof(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChangeProof", reflect.TypeOf((*MockClient)(nil).GetChangeProof), arg0, arg1, arg2) +} + +// GetRangeProof mocks base method. +func (m *MockClient) GetRangeProof(arg0 context.Context, arg1 *RangeProofRequest) (*merkledb.RangeProof, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetRangeProof", arg0, arg1) + ret0, _ := ret[0].(*merkledb.RangeProof) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetRangeProof indicates an expected call of GetRangeProof. +func (mr *MockClientMockRecorder) GetRangeProof(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRangeProof", reflect.TypeOf((*MockClient)(nil).GetRangeProof), arg0, arg1) +} diff --git a/avalanchego/x/sync/network_client.go b/avalanchego/x/sync/network_client.go new file mode 100644 index 00000000..7496d131 --- /dev/null +++ b/avalanchego/x/sync/network_client.go @@ -0,0 +1,294 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sync + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "go.uber.org/zap" + + "golang.org/x/sync/semaphore" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/version" +) + +// Minimum amount of time to handle a request +const minRequestHandlingDuration = 100 * time.Millisecond + +var ( + _ NetworkClient = &networkClient{} + + ErrAcquiringSemaphore = errors.New("error acquiring semaphore") + ErrRequestFailed = errors.New("request failed") +) + +// NetworkClient defines ability to send request / response through the Network +type NetworkClient interface { + // RequestAny synchronously sends request to an arbitrary peer with a + // node version greater than or equal to minVersion. + // Returns response bytes, the ID of the chosen peer, and ErrRequestFailed if + // the request should be retried. + RequestAny(ctx context.Context, minVersion *version.Application, request []byte) ([]byte, ids.NodeID, error) + + // Request synchronously sends request to the selected nodeID. + // Returns response bytes, and ErrRequestFailed if the request should be retried. + Request(ctx context.Context, nodeID ids.NodeID, request []byte) ([]byte, error) + + // TrackBandwidth should be called for each valid response with the bandwidth + // (length of response divided by request time), and with 0 if the response is invalid. + TrackBandwidth(nodeID ids.NodeID, bandwidth float64) + + // The following declarations allow this interface to be embedded in the VM + // to handle incoming responses from peers. + AppResponse(context.Context, ids.NodeID, uint32, []byte) error + AppRequestFailed(context.Context, ids.NodeID, uint32) error + Connected(context.Context, ids.NodeID, *version.Application) error + Disconnected(context.Context, ids.NodeID) error +} + +type networkClient struct { + lock sync.Mutex // lock for mutating state of this Network struct + myNodeID ids.NodeID // NodeID of this node + requestID uint32 // requestID counter used to track outbound requests + outstandingRequestHandlers map[uint32]ResponseHandler // requestID => handler for the response/failure + activeRequests *semaphore.Weighted // controls maximum number of active outbound requests + peers *peerTracker // tracking of peers & bandwidth + appSender common.AppSender // AppSender for sending messages + log logging.Logger +} + +func NewNetworkClient( + appSender common.AppSender, + myNodeID ids.NodeID, + maxActiveRequests int64, + log logging.Logger, +) NetworkClient { + return &networkClient{ + appSender: appSender, + myNodeID: myNodeID, + outstandingRequestHandlers: make(map[uint32]ResponseHandler), + activeRequests: semaphore.NewWeighted(maxActiveRequests), + peers: newPeerTracker(log), + log: log, + } +} + +// AppResponse is called when this node receives a response from a peer. +// As the engine considers errors returned from this function as fatal, +// this function always returns nil. +func (c *networkClient) AppResponse(_ context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { + c.lock.Lock() + defer c.lock.Unlock() + + c.log.Info( + "received AppResponse from peer", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Int("responseLen", len(response)), + ) + + handler, exists := c.getRequestHandler(requestID) + if !exists { + // Should never happen since the engine should be managing outstanding requests + c.log.Error( + "received response to unknown request", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Int("responseLen", len(response)), + ) + return nil + } + handler.OnResponse(response) + return nil +} + +// AppRequestFailed can be called by the avalanchego -> VM in following cases: +// - node is benched +// - failed to send message to [nodeID] due to a network issue +// - timeout +// As the engine considers errors returned from this function as fatal, +// this function always returns nil. +func (c *networkClient) AppRequestFailed(_ context.Context, nodeID ids.NodeID, requestID uint32) error { + c.lock.Lock() + defer c.lock.Unlock() + + c.log.Info( + "received AppRequestFailed from peer", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + ) + + handler, exists := c.getRequestHandler(requestID) + if !exists { + // Should never happen since the engine should be managing outstanding requests + c.log.Error( + "received request failed to unknown request", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + ) + return nil + } + handler.OnFailure() + return nil +} + +// Returns the handler for [requestID] and marks the request as fulfilled. +// This is called by either [AppResponse] or [AppRequestFailed]. +// Assumes [c.lock] is held. +func (c *networkClient) getRequestHandler(requestID uint32) (ResponseHandler, bool) { + handler, exists := c.outstandingRequestHandlers[requestID] + if !exists { + return nil, false + } + // mark message as processed, release activeRequests slot + delete(c.outstandingRequestHandlers, requestID) + c.activeRequests.Release(1) + return handler, true +} + +// RequestAny synchronously sends [request] to a randomly chosen peer with a +// node version greater than or equal to [minVersion]. If [minVersion] is nil, +// the request is sent to any peer regardless of their version. +// If the limit on active requests is reached, this function blocks until +// a slot becomes available. +// Returns the node's response and the ID of the node. +func (c *networkClient) RequestAny( + ctx context.Context, + minVersion *version.Application, + request []byte, +) ([]byte, ids.NodeID, error) { + // Take a slot from total [activeRequests] and block until a slot becomes available. + if err := c.activeRequests.Acquire(ctx, 1); err != nil { + return nil, ids.EmptyNodeID, ErrAcquiringSemaphore + } + + c.lock.Lock() + if nodeID, ok := c.peers.GetAnyPeer(minVersion); ok { + response, err := c.request(ctx, nodeID, request) + return response, nodeID, err + } + + c.lock.Unlock() + c.activeRequests.Release(1) + return nil, ids.EmptyNodeID, fmt.Errorf("no peers found matching version %s out of %d peers", minVersion, c.peers.Size()) +} + +// Sends [request] to [nodeID] and registers a handler for the response/failure. +// If the limit on active requests is reached, this function blocks until +// a slot becomes available. +func (c *networkClient) Request(ctx context.Context, nodeID ids.NodeID, request []byte) ([]byte, error) { + // TODO danlaine: is it possible for this condition to occur? + if nodeID == ids.EmptyNodeID { + return nil, fmt.Errorf("cannot send request to empty nodeID, nodeID=%s, requestLen=%d", nodeID, len(request)) + } + + // Take a slot from total [activeRequests] and block until a slot becomes available. + if err := c.activeRequests.Acquire(ctx, 1); err != nil { + return nil, ErrAcquiringSemaphore + } + + c.lock.Lock() + return c.request(ctx, nodeID, request) +} + +// Sends [request] to [nodeID] and adds the response handler to [c.outstandingRequestHandlers] +// so that it can be invoked upon response/failure. +// Blocks until a response is received or the request fails. +// Assumes [nodeID] is never [c.myNodeID] since we guarantee [c.myNodeID] will not be added to [c.peers]. +// Releases active requests semaphore if there was an error in sending the request. +// Returns an error if [appSender] is unable to make the request. +// Assumes [c.lock] is held and unlocks [c.lock] before returning. +func (c *networkClient) request(ctx context.Context, nodeID ids.NodeID, request []byte) ([]byte, error) { + c.log.Debug("sending request to peer", zap.Stringer("nodeID", nodeID), zap.Int("requestLen", len(request))) + c.peers.TrackPeer(nodeID) + + // generate requestID + requestID := c.requestID + c.requestID++ + + handler := newResponseHandler() + c.outstandingRequestHandlers[requestID] = handler + + nodeIDs := set.NewSet[ids.NodeID](1) + nodeIDs.Add(nodeID) + + // Send an app request to the peer. + if err := c.appSender.SendAppRequest(ctx, nodeIDs, requestID, request); err != nil { + // On failure, release the activeRequests slot and mark the message as processed. + c.activeRequests.Release(1) + delete(c.outstandingRequestHandlers, requestID) + c.lock.Unlock() + return nil, err + } + + c.lock.Unlock() // unlock so response can be received + + var response []byte + select { + case <-ctx.Done(): + return nil, ctx.Err() + case response = <-handler.responseChan: + } + if handler.failed { + return nil, ErrRequestFailed + } + + c.log.Debug("received response from peer", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Int("responseLen", len(response))) + return response, nil +} + +// Connected adds the given nodeID to the peer list so that it can receive messages. +// If [nodeID] is [c.myNodeID], this is a no-op. +func (c *networkClient) Connected(_ context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { + c.lock.Lock() + defer c.lock.Unlock() + + if nodeID == c.myNodeID { + c.log.Debug("skipping registering self as peer") + return nil + } + + c.log.Debug("adding new peer", zap.Stringer("nodeID", nodeID)) + c.peers.Connected(nodeID, nodeVersion) + return nil +} + +// Disconnected removes given [nodeID] from the peer list. +// TODO danlaine: should this be a no-op if [nodeID] is [c.myNodeID]? +func (c *networkClient) Disconnected(_ context.Context, nodeID ids.NodeID) error { + c.lock.Lock() + defer c.lock.Unlock() + + c.log.Debug("disconnecting peer", zap.Stringer("nodeID", nodeID)) + c.peers.Disconnected(nodeID) + return nil +} + +// Shutdown disconnects all peers +func (c *networkClient) Shutdown() { + c.lock.Lock() + defer c.lock.Unlock() + + // reset peers + // TODO danlaine: should we call [Disconnected] on each peer? + c.peers = newPeerTracker(c.log) +} + +func (c *networkClient) TrackBandwidth(nodeID ids.NodeID, bandwidth float64) { + c.lock.Lock() + defer c.lock.Unlock() + + c.peers.TrackBandwidth(nodeID, bandwidth) +} diff --git a/avalanchego/x/sync/network_server.go b/avalanchego/x/sync/network_server.go new file mode 100644 index 00000000..9109a3bb --- /dev/null +++ b/avalanchego/x/sync/network_server.go @@ -0,0 +1,212 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sync + +import ( + "bytes" + "context" + "errors" + "time" + + "go.uber.org/zap" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/x/merkledb" +) + +// Maximum number of key-value pairs to return in a proof. +// This overrides any other Limit specified in a RangeProofRequest +// or ChangeProofRequest if the given Limit is greater. +const maxKeyValuesLimit = 1024 + +var _ Handler = (*NetworkServer)(nil) + +type NetworkServer struct { + appSender common.AppSender // Used to respond to peer requests via AppResponse. + db *merkledb.Database + log logging.Logger +} + +func NewNetworkServer(appSender common.AppSender, db *merkledb.Database, log logging.Logger) *NetworkServer { + return &NetworkServer{ + appSender: appSender, + db: db, + log: log, + } +} + +// AppRequest is called by avalanchego -> VM when there is an incoming AppRequest from a peer. +// Never returns errors as they are considered fatal. +// Sends a response back to the sender if length of response returned by the handler > 0. +func (s *NetworkServer) AppRequest( + _ context.Context, + nodeID ids.NodeID, + requestID uint32, + deadline time.Time, + request []byte, +) error { + var req Request + if _, err := syncCodec.Unmarshal(request, &req); err != nil { + s.log.Debug( + "failed to unmarshal app request", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Int("requestLen", len(request)), + zap.Error(err), + ) + return nil + } + s.log.Debug( + "processing AppRequest from node", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Stringer("request", req), + ) + + // bufferedDeadline is half the time till actual deadline so that the message has a + // reasonable chance of completing its processing and sending the response to the peer. + timeTillDeadline := time.Until(deadline) + bufferedDeadline := time.Now().Add(timeTillDeadline / 2) + + // check if we have enough time to handle this request. + // TODO danlaine: Do we need this? Why? + if time.Until(bufferedDeadline) < minRequestHandlingDuration { + // Drop the request if we already missed the deadline to respond. + s.log.Info( + "deadline to process AppRequest has expired, skipping", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Stringer("req", req), + ) + return nil + } + + // TODO danlaine: Why don't we use the passed in context instead of [context.Background()]? + handleCtx, cancel := context.WithDeadline(context.Background(), bufferedDeadline) + defer cancel() + + err := req.Handle(handleCtx, nodeID, requestID, s) + if err != nil && !isTimeout(err) { + // log unexpected errors instead of returning them, since they are fatal. + s.log.Warn( + "unexpected error handling AppRequest", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Stringer("req", req), + zap.Error(err), + ) + } + return nil +} + +// isTimeout returns true if err is a timeout from a context cancellation +// or a context cancellation over grpc. +func isTimeout(err error) bool { + // handle grpc wrapped DeadlineExceeded + if e, ok := status.FromError(err); ok { + if e.Code() == codes.DeadlineExceeded { + return true + } + } + // otherwise, check for context.DeadlineExceeded directly + return errors.Is(err, context.DeadlineExceeded) +} + +// Generates a change proof and sends it to [nodeID]. +func (s *NetworkServer) HandleChangeProofRequest( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, + req *ChangeProofRequest, +) error { + if req.Limit == 0 || req.EndingRoot == ids.Empty || (len(req.End) > 0 && bytes.Compare(req.Start, req.End) > 0) { + s.log.Debug( + "dropping invalid change proof request", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Stringer("req", req), + ) + return nil // dropping request + } + + // override limit if it is greater than maxKeyValuesLimit + limit := req.Limit + if limit > maxKeyValuesLimit { + limit = maxKeyValuesLimit + } + + changeProof, err := s.db.GetChangeProof(ctx, req.StartingRoot, req.EndingRoot, req.Start, req.End, int(limit)) + if err != nil { + // handle expected errors so clients cannot cause servers to spam warning logs. + if errors.Is(err, merkledb.ErrRootIDNotPresent) || errors.Is(err, merkledb.ErrStartRootNotFound) { + s.log.Debug( + "dropping invalid change proof request", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Stringer("req", req), + zap.Error(err), + ) + return nil // dropping request + } + return err + } + + proofBytes, err := merkledb.Codec.EncodeChangeProof(Version, changeProof) + if err != nil { + return err + } + return s.appSender.SendAppResponse(ctx, nodeID, requestID, proofBytes) +} + +// Generates a range proof and sends it to [nodeID]. +// TODO danlaine how should we handle context cancellation? +func (s *NetworkServer) HandleRangeProofRequest( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, + req *RangeProofRequest, +) error { + if req.Limit == 0 || req.Root == ids.Empty || (len(req.End) > 0 && bytes.Compare(req.Start, req.End) > 0) { + s.log.Debug( + "dropping invalid range proof request", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Stringer("req", req), + ) + return nil // dropping request + } + + // override limit if it is greater than maxKeyValuesLimit + limit := req.Limit + if limit > maxKeyValuesLimit { + limit = maxKeyValuesLimit + } + + rangeProof, err := s.db.GetRangeProofAtRoot(ctx, req.Root, req.Start, req.End, int(limit)) + if err != nil { + // handle expected errors so clients cannot cause servers to spam warning logs. + if errors.Is(err, merkledb.ErrRootIDNotPresent) { + s.log.Debug( + "dropping invalid range proof request", + zap.Stringer("nodeID", nodeID), + zap.Uint32("requestID", requestID), + zap.Stringer("req", req), + zap.Error(err), + ) + return nil // dropping request + } + return err + } + + proofBytes, err := merkledb.Codec.EncodeRangeProof(Version, rangeProof) + if err != nil { + return err + } + return s.appSender.SendAppResponse(ctx, nodeID, requestID, proofBytes) +} diff --git a/avalanchego/x/sync/peer_tracker.go b/avalanchego/x/sync/peer_tracker.go new file mode 100644 index 00000000..d045ace8 --- /dev/null +++ b/avalanchego/x/sync/peer_tracker.go @@ -0,0 +1,230 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sync + +import ( + "math/rand" + "time" + + stdmath "math" + + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/version" +) + +const ( + bandwidthHalflife = 5 * time.Minute + + // controls how eagerly we connect to new peers vs. using + // peers with known good response bandwidth. + desiredMinResponsivePeers = 20 + newPeerConnectFactor = 0.1 + + // The probability that, when we select a peer, we select randomly rather + // than based on their performance. + randomPeerProbability = 0.2 +) + +// information we track on a given peer +type peerInfo struct { + version *version.Application + bandwidth math.Averager +} + +// Tracks the bandwidth of responses coming from peers, +// preferring to contact peers with known good bandwidth, connecting +// to new peers with an exponentially decaying probability. +// Note: not thread safe. Caller must handle synchronization. +type peerTracker struct { + // All peers we are connected to + peers map[ids.NodeID]*peerInfo + // Peers that we're connected to that we've sent a request to + // since we most recently connected to them. + trackedPeers set.Set[ids.NodeID] + // Peers that we're connected to that responded to the last request they were sent. + responsivePeers set.Set[ids.NodeID] + // Max heap that contains the average bandwidth of peers. + bandwidthHeap math.AveragerHeap + averageBandwidth math.Averager + log logging.Logger + // numTrackedPeers prometheus.Gauge + // numResponsivePeers prometheus.Gauge + // averageBandwidthMetric prometheus.Gauge +} + +func newPeerTracker(log logging.Logger) *peerTracker { + // TODO: initialize metrics + return &peerTracker{ + peers: make(map[ids.NodeID]*peerInfo), + trackedPeers: make(set.Set[ids.NodeID]), + responsivePeers: make(set.Set[ids.NodeID]), + bandwidthHeap: math.NewMaxAveragerHeap(), + averageBandwidth: math.NewAverager(0, bandwidthHalflife, time.Now()), + log: log, + // numTrackedPeers: metrics.GetOrRegisterGauge("net_tracked_peers", nil), + // numResponsivePeers: metrics.GetOrRegisterGauge("net_responsive_peers", nil), + // averageBandwidthMetric: metrics.GetOrRegisterGaugeFloat64("net_average_bandwidth", nil), + } +} + +// Returns true if we're not connected to enough peers. +// Otherwise returns true probabilistically based on the number of tracked peers. +func (p *peerTracker) shouldTrackNewPeer() bool { + numResponsivePeers := p.responsivePeers.Len() + if numResponsivePeers < desiredMinResponsivePeers { + return true + } + if len(p.trackedPeers) >= len(p.peers) { + // already tracking all the peers + return false + } + // TODO danlaine: we should consider tuning this probability function. + // With [newPeerConnectFactor] as 0.1 the probabilities are: + // + // numResponsivePeers | probability + // 100 | 4.5399929762484854e-05 + // 200 | 2.061153622438558e-09 + // 500 | 1.9287498479639178e-22 + // 1000 | 3.720075976020836e-44 + // 2000 | 1.3838965267367376e-87 + // 5000 | 7.124576406741286e-218 + // + // In other words, the probability drops off extremely quickly. + newPeerProbability := stdmath.Exp(-float64(numResponsivePeers) * newPeerConnectFactor) + return rand.Float64() < newPeerProbability // #nosec G404 +} + +// Returns a peer that we're connected to. +// If we should track more peers, returns a random peer with version >= [minVersion], if any exist. +// Otherwise, with probability [randomPeerProbability] returns a random peer from [p.responsivePeers]. +// With probability [1-randomPeerProbability] returns the peer in [p.bandwidthHeap] with the highest bandwidth. +func (p *peerTracker) GetAnyPeer(minVersion *version.Application) (ids.NodeID, bool) { + if p.shouldTrackNewPeer() { + for nodeID := range p.peers { + // if minVersion is specified and peer's version is less, skip + if minVersion != nil && p.peers[nodeID].version.Compare(minVersion) < 0 { + continue + } + // skip peers already tracked + if p.trackedPeers.Contains(nodeID) { + continue + } + p.log.Debug( + "tracking peer", + zap.Int("trackedPeers", len(p.trackedPeers)), + zap.Stringer("nodeID", nodeID), + ) + return nodeID, true + } + } + + var ( + nodeID ids.NodeID + ok bool + ) + useRand := rand.Float64() < randomPeerProbability // #nosec G404 + if useRand { + nodeID, ok = p.responsivePeers.Peek() + } else { + nodeID, _, ok = p.bandwidthHeap.Pop() + } + if !ok { + // if no nodes found in the bandwidth heap, return a tracked node at random + return p.trackedPeers.Peek() + } + p.log.Debug( + "peer tracking: popping peer", + zap.Stringer("nodeID", nodeID), + zap.Bool("random", useRand), + ) + return nodeID, true +} + +// Record that we sent a request to [nodeID]. +func (p *peerTracker) TrackPeer(nodeID ids.NodeID) { + p.trackedPeers.Add(nodeID) + // p.numTrackedPeers.Set(float64(p.trackedPeers.Len())) +} + +// Record that we observed that [nodeID]'s bandwidth is [bandwidth]. +// Adds the peer's bandwidth averager to the bandwidth heap. +func (p *peerTracker) TrackBandwidth(nodeID ids.NodeID, bandwidth float64) { + peer := p.peers[nodeID] + if peer == nil { + // we're not connected to this peer, nothing to do here + p.log.Debug("tracking bandwidth for untracked peer", zap.Stringer("nodeID", nodeID)) + return + } + + now := time.Now() + if peer.bandwidth == nil { + peer.bandwidth = math.NewAverager(bandwidth, bandwidthHalflife, now) + } else { + peer.bandwidth.Observe(bandwidth, now) + } + p.bandwidthHeap.Add(nodeID, peer.bandwidth) + + if bandwidth == 0 { + p.responsivePeers.Remove(nodeID) + } else { + p.responsivePeers.Add(nodeID) + // TODO danlaine: shouldn't we add the observation of 0 + // to the average bandwidth in the if statement? + p.averageBandwidth.Observe(bandwidth, now) + // p.averageBandwidthMetric.Set(p.averageBandwidth.Read()) + } + // p.numResponsivePeers.Set(float64(p.responsivePeers.Len())) +} + +// Connected should be called when [nodeID] connects to this node +func (p *peerTracker) Connected(nodeID ids.NodeID, nodeVersion *version.Application) { + peer := p.peers[nodeID] + if peer == nil { + p.peers[nodeID] = &peerInfo{ + version: nodeVersion, + } + return + } + + // Peer is already connected, update the version if it has changed. + // Log a warning message since the consensus engine should never call Connected on a peer + // that we have already marked as Connected. + if nodeVersion.Compare(peer.version) != 0 { + p.peers[nodeID] = &peerInfo{ + version: nodeVersion, + bandwidth: peer.bandwidth, + } + p.log.Warn( + "updating node version of already connected peer", + zap.Stringer("nodeID", nodeID), + zap.Stringer("storedVersion", peer.version), + zap.Stringer("nodeVersion", nodeVersion), + ) + } else { + p.log.Warn( + "ignoring peer connected event for already connected peer with identical version", + zap.Stringer("nodeID", nodeID), + ) + } +} + +// Disconnected should be called when [nodeID] disconnects from this node +func (p *peerTracker) Disconnected(nodeID ids.NodeID) { + p.bandwidthHeap.Remove(nodeID) + p.trackedPeers.Remove(nodeID) + // p.numTrackedPeers.Set(float64(p.trackedPeers.Len())) + p.responsivePeers.Remove(nodeID) + // p.numResponsivePeers.Set(float64(p.responsivePeers.Len())) + delete(p.peers, nodeID) +} + +// Returns the number of peers the node is connected to. +func (p *peerTracker) Size() int { + return len(p.peers) +} diff --git a/avalanchego/x/sync/request.go b/avalanchego/x/sync/request.go new file mode 100644 index 00000000..ca516888 --- /dev/null +++ b/avalanchego/x/sync/request.go @@ -0,0 +1,96 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sync + +import ( + "context" + "encoding/hex" + "fmt" + + "github.com/ava-labs/avalanchego/ids" +) + +var ( + _ Request = (*RangeProofRequest)(nil) + _ Request = (*ChangeProofRequest)(nil) +) + +// A request to this node for a proof. +type Request interface { + fmt.Stringer + Handle(ctx context.Context, nodeID ids.NodeID, requestID uint32, h Handler) error +} + +type rangeProofHandler interface { + // Generates a range proof and sends it to [nodeID]. + // TODO danlaine how should we handle context cancellation? + HandleRangeProofRequest( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, + request *RangeProofRequest, + ) error +} + +type changeProofHandler interface { + // Generates a change proof and sends it to [nodeID]. + // TODO danlaine how should we handle context cancellation? + HandleChangeProofRequest( + ctx context.Context, + nodeID ids.NodeID, + requestID uint32, + request *ChangeProofRequest, + ) error +} + +type Handler interface { + rangeProofHandler + changeProofHandler +} + +type RangeProofRequest struct { + Root ids.ID `serialize:"true"` + Start []byte `serialize:"true"` + End []byte `serialize:"true"` + Limit uint16 `serialize:"true"` +} + +func (r *RangeProofRequest) Handle(ctx context.Context, nodeID ids.NodeID, requestID uint32, h Handler) error { + return h.HandleRangeProofRequest(ctx, nodeID, requestID, r) +} + +func (r RangeProofRequest) String() string { + return fmt.Sprintf( + "RangeProofRequest(Root=%s, Start=%s, End=%s, Limit=%d)", + r.Root, + hex.EncodeToString(r.Start), + hex.EncodeToString(r.End), + r.Limit, + ) +} + +// ChangeProofRequest is a request to receive trie leaves at specified Root within Start and End byte range +// Limit outlines maximum number of leaves to returns starting at Start +type ChangeProofRequest struct { + StartingRoot ids.ID `serialize:"true"` + EndingRoot ids.ID `serialize:"true"` + Start []byte `serialize:"true"` + End []byte `serialize:"true"` + Limit uint16 `serialize:"true"` +} + +func (r *ChangeProofRequest) Handle(ctx context.Context, nodeID ids.NodeID, requestID uint32, h Handler) error { + return h.HandleChangeProofRequest(ctx, nodeID, requestID, r) +} + +func (r ChangeProofRequest) String() string { + return fmt.Sprintf( + "ChangeProofRequest(StartRoot=%s, EndRoot=%s, Start=%s, End=%s, Limit=%d)", + r.StartingRoot, + r.EndingRoot, + hex.EncodeToString(r.Start), + hex.EncodeToString(r.End), + r.Limit, + ) +} diff --git a/avalanchego/x/sync/response_handler.go b/avalanchego/x/sync/response_handler.go new file mode 100644 index 00000000..c35c0a01 --- /dev/null +++ b/avalanchego/x/sync/response_handler.go @@ -0,0 +1,46 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sync + +// TODO danlaine: We create a new response handler for every request. +// Look into making a struct to handle requests/responses that uses a sync pool +// to avoid allocations. + +var _ ResponseHandler = &responseHandler{} + +// Handles responses/failure notifications for a sent request. +// Exactly one of OnResponse or OnFailure is eventually called. +type ResponseHandler interface { + // Called when [response] is received. + OnResponse(response []byte) + // Called when the request failed or timed out. + OnFailure() +} + +func newResponseHandler() *responseHandler { + return &responseHandler{responseChan: make(chan []byte)} +} + +// Implements [ResponseHandler]. +// Used to wait for a response after making a synchronous request. +// responseChan may contain response bytes if the original request has not failed. +// responseChan is closed in either fail or success scenario. +type responseHandler struct { + // If [OnResponse] is called, the response bytes are sent on this channel. + responseChan chan []byte + // Set to true in [OnFailure]. + failed bool +} + +// OnResponse passes the response bytes to the responseChan and closes the channel +func (h *responseHandler) OnResponse(response []byte) { + h.responseChan <- response + close(h.responseChan) +} + +// OnFailure sets the failed flag to true and closes the channel +func (h *responseHandler) OnFailure() { + h.failed = true + close(h.responseChan) +} diff --git a/avalanchego/x/sync/sync_test.go b/avalanchego/x/sync/sync_test.go new file mode 100644 index 00000000..b130416c --- /dev/null +++ b/avalanchego/x/sync/sync_test.go @@ -0,0 +1,804 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sync + +import ( + "bytes" + "context" + "math/rand" + "testing" + "time" + + "github.com/golang/mock/gomock" + + "github.com/stretchr/testify/require" + + "golang.org/x/exp/slices" + + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/trace" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/x/merkledb" +) + +var _ Client = &mockClient{} + +func newNoopTracer() trace.Tracer { + tracer, _ := trace.New(trace.Config{Enabled: false}) + return tracer +} + +type mockClient struct { + db *merkledb.Database +} + +func (client *mockClient) GetChangeProof(ctx context.Context, request *ChangeProofRequest, _ *merkledb.Database) (*merkledb.ChangeProof, error) { + return client.db.GetChangeProof(ctx, request.StartingRoot, request.EndingRoot, request.Start, request.End, int(request.Limit)) +} + +func (client *mockClient) GetRangeProof(ctx context.Context, request *RangeProofRequest) (*merkledb.RangeProof, error) { + return client.db.GetRangeProofAtRoot(ctx, request.Root, request.Start, request.End, int(request.Limit)) +} + +func Test_Creation(t *testing.T) { + db, err := merkledb.New( + context.Background(), + memdb.New(), + merkledb.Config{ + Tracer: newNoopTracer(), + HistoryLength: 0, + NodeCacheSize: 1000, + }, + ) + require.NoError(t, err) + + syncer, err := NewStateSyncManager(StateSyncConfig{ + SyncDB: db, + Client: &mockClient{}, + TargetRoot: ids.Empty, + SimultaneousWorkLimit: 5, + Log: logging.NoLog{}, + }) + require.NotNil(t, syncer) + require.NoError(t, err) +} + +func Test_Completion(t *testing.T) { + for i := 0; i < 10; i++ { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + emptyDB, err := merkledb.New( + context.Background(), + memdb.New(), + merkledb.Config{ + Tracer: newNoopTracer(), + HistoryLength: 0, + NodeCacheSize: 1000, + }, + ) + require.NoError(t, err) + emptyRoot, err := emptyDB.GetMerkleRoot(context.Background()) + require.NoError(t, err) + db, err := merkledb.New( + context.Background(), + memdb.New(), + merkledb.Config{ + Tracer: newNoopTracer(), + HistoryLength: 0, + NodeCacheSize: 1000, + }, + ) + require.NoError(t, err) + syncer, err := NewStateSyncManager(StateSyncConfig{ + SyncDB: db, + Client: &mockClient{db: emptyDB}, + TargetRoot: emptyRoot, + SimultaneousWorkLimit: 5, + Log: logging.NoLog{}, + }) + require.NoError(t, err) + require.NotNil(t, syncer) + err = syncer.StartSyncing(context.Background()) + require.NoError(t, err) + err = syncer.Wait(context.Background()) + require.NoError(t, err) + syncer.workLock.Lock() + require.Equal(t, 0, syncer.unprocessedWork.Len()) + require.Equal(t, 1, syncer.processedWork.Len()) + syncer.workLock.Unlock() + } +} + +func Test_Midpoint(t *testing.T) { + mid := midPoint([]byte{1, 255}, []byte{2, 1}) + require.Equal(t, []byte{2, 0}, mid) + + mid = midPoint(nil, []byte{255, 255, 0}) + require.Equal(t, []byte{127, 255, 128}, mid) + + mid = midPoint([]byte{255, 255, 255}, []byte{255, 255}) + require.Equal(t, []byte{255, 255, 127, 128}, mid) + + mid = midPoint(nil, []byte{255}) + require.Equal(t, []byte{127, 127}, mid) + + mid = midPoint([]byte{1, 255}, []byte{255, 1}) + require.Equal(t, []byte{128, 128}, mid) + + mid = midPoint([]byte{140, 255}, []byte{141, 0}) + require.Equal(t, []byte{140, 255, 127}, mid) + + mid = midPoint([]byte{126, 255}, []byte{127}) + require.Equal(t, []byte{126, 255, 127}, mid) + + mid = midPoint(nil, nil) + require.Equal(t, []byte{127}, mid) + + low := midPoint(nil, mid) + require.Equal(t, []byte{63, 127}, low) + + high := midPoint(mid, nil) + require.Equal(t, []byte{191}, high) + + mid = midPoint([]byte{255, 255}, nil) + require.Equal(t, []byte{255, 255, 127, 127}, mid) + + mid = midPoint([]byte{255}, nil) + require.Equal(t, []byte{255, 127, 127}, mid) + + for i := 0; i < 5000; i++ { + r := rand.New(rand.NewSource(int64(i))) // #nosec G404 + + start := make([]byte, r.Intn(99)+1) + _, err := r.Read(start) + require.NoError(t, err) + + end := make([]byte, r.Intn(99)+1) + _, err = r.Read(end) + require.NoError(t, err) + + for bytes.Equal(start, end) { + _, err = r.Read(end) + require.NoError(t, err) + } + + if bytes.Compare(start, end) == 1 { + start, end = end, start + } + + mid = midPoint(start, end) + require.Equal(t, -1, bytes.Compare(start, mid)) + require.Equal(t, -1, bytes.Compare(mid, end)) + } +} + +func Test_Sync_FindNextKey_InSync(t *testing.T) { + for i := 0; i < 3; i++ { + r := rand.New(rand.NewSource(int64(i))) // #nosec G404 + dbToSync, err := generateTrie(t, r, 1000) + require.NoError(t, err) + syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + db, err := merkledb.New( + context.Background(), + memdb.New(), + merkledb.Config{ + Tracer: newNoopTracer(), + HistoryLength: 0, + NodeCacheSize: 1000, + }, + ) + require.NoError(t, err) + syncer, err := NewStateSyncManager(StateSyncConfig{ + SyncDB: db, + Client: &mockClient{db: dbToSync}, + TargetRoot: syncRoot, + SimultaneousWorkLimit: 5, + Log: logging.NoLog{}, + }) + require.NoError(t, err) + require.NotNil(t, syncer) + + err = syncer.StartSyncing(context.Background()) + require.NoError(t, err) + err = syncer.Wait(context.Background()) + require.NoError(t, err) + + proof, err := dbToSync.GetRangeProof(context.Background(), nil, nil, 500) + require.NoError(t, err) + + // the two dbs should be in sync, so next key should be nil + lastKey := proof.KeyValues[len(proof.KeyValues)-1].Key + nextKey, err := syncer.findNextKey(context.Background(), lastKey, nil, proof.EndProof) + require.NoError(t, err) + require.Nil(t, nextKey) + + // add an extra value to sync db past the last key returned + newKey := midPoint(lastKey, nil) + err = db.Put(newKey, []byte{1}) + require.NoError(t, err) + + // create a range endpoint that is before the newly added key, but after the last key + endPointBeforeNewKey := make([]byte, 0, 2) + for i := 0; i < len(newKey); i++ { + endPointBeforeNewKey = append(endPointBeforeNewKey, newKey[i]) + + // we need the new key to be after the last key + // don't subtract anything from the current byte if newkey and lastkey are equal + if lastKey[i] == newKey[i] { + continue + } + + // if the first nibble is > 0, subtract "1" from it + if endPointBeforeNewKey[i] >= 16 { + endPointBeforeNewKey[i] -= 16 + break + } + // if the second nibble > 0, subtract 1 from it + if endPointBeforeNewKey[i] > 0 { + endPointBeforeNewKey[i] -= 1 + break + } + // both nibbles were 0, so move onto the next byte + } + + nextKey, err = syncer.findNextKey(context.Background(), lastKey, endPointBeforeNewKey, proof.EndProof) + require.NoError(t, err) + + // next key would be after the end of the range, so it returns nil instead + require.Nil(t, nextKey) + } +} + +func Test_Sync_FindNextKey_ExtraValues(t *testing.T) { + for i := 0; i < 10; i++ { + r := rand.New(rand.NewSource(int64(i))) // #nosec G404 + dbToSync, err := generateTrie(t, r, 1000) + require.NoError(t, err) + syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + db, err := merkledb.New( + context.Background(), + memdb.New(), + merkledb.Config{ + Tracer: newNoopTracer(), + HistoryLength: 0, + NodeCacheSize: 1000, + }, + ) + require.NoError(t, err) + syncer, err := NewStateSyncManager(StateSyncConfig{ + SyncDB: db, + Client: &mockClient{db: dbToSync}, + TargetRoot: syncRoot, + SimultaneousWorkLimit: 5, + Log: logging.NoLog{}, + }) + require.NoError(t, err) + require.NotNil(t, syncer) + + err = syncer.StartSyncing(context.Background()) + require.NoError(t, err) + err = syncer.Wait(context.Background()) + require.NoError(t, err) + + proof, err := dbToSync.GetRangeProof(context.Background(), nil, nil, 500) + require.NoError(t, err) + + // add an extra value to local db + lastKey := proof.KeyValues[len(proof.KeyValues)-1].Key + midpoint := midPoint(lastKey, nil) + + err = db.Put(midpoint, []byte{1}) + require.NoError(t, err) + + // next key at prefix of newly added point + nextKey, err := syncer.findNextKey(context.Background(), lastKey, nil, proof.EndProof) + require.NoError(t, err) + require.NotNil(t, nextKey) + + require.True(t, isPrefix(midpoint, nextKey)) + + err = db.Delete(midpoint) + require.NoError(t, err) + + err = dbToSync.Put(midpoint, []byte{1}) + require.NoError(t, err) + + proof, err = dbToSync.GetRangeProof(context.Background(), nil, lastKey, 500) + require.NoError(t, err) + + // next key at prefix of newly added point + nextKey, err = syncer.findNextKey(context.Background(), lastKey, nil, proof.EndProof) + require.NoError(t, err) + require.NotNil(t, nextKey) + + // deal with odd length key + require.True(t, isPrefix(midpoint, nextKey)) + } +} + +func isPrefix(data []byte, prefix []byte) bool { + if prefix[len(prefix)-1]%16 == 0 { + index := 0 + for ; index < len(prefix)-1; index++ { + if data[index] != prefix[index] { + return false + } + } + return data[index]>>4 == prefix[index]>>4 + } + return bytes.HasPrefix(data, prefix) +} + +func Test_Sync_FindNextKey_DifferentChild(t *testing.T) { + for i := 0; i < 10; i++ { + r := rand.New(rand.NewSource(int64(i))) // #nosec G404 + dbToSync, err := generateTrie(t, r, 500) + require.NoError(t, err) + syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + db, err := merkledb.New( + context.Background(), + memdb.New(), + merkledb.Config{ + Tracer: newNoopTracer(), + HistoryLength: 0, + NodeCacheSize: 1000, + }, + ) + require.NoError(t, err) + syncer, err := NewStateSyncManager(StateSyncConfig{ + SyncDB: db, + Client: &mockClient{db: dbToSync}, + TargetRoot: syncRoot, + SimultaneousWorkLimit: 5, + Log: logging.NoLog{}, + }) + require.NoError(t, err) + require.NotNil(t, syncer) + err = syncer.StartSyncing(context.Background()) + require.NoError(t, err) + err = syncer.Wait(context.Background()) + require.NoError(t, err) + + proof, err := dbToSync.GetRangeProof(context.Background(), nil, nil, 100) + require.NoError(t, err) + lastKey := proof.KeyValues[len(proof.KeyValues)-1].Key + + // local db has a different child than remote db + lastKey = append(lastKey, 16) + err = db.Put(lastKey, []byte{1}) + require.NoError(t, err) + + err = dbToSync.Put(lastKey, []byte{2}) + require.NoError(t, err) + + proof, err = dbToSync.GetRangeProof(context.Background(), nil, proof.KeyValues[len(proof.KeyValues)-1].Key, 100) + require.NoError(t, err) + + nextKey, err := syncer.findNextKey(context.Background(), proof.KeyValues[len(proof.KeyValues)-1].Key, nil, proof.EndProof) + require.NoError(t, err) + require.Equal(t, nextKey, lastKey) + } +} + +func Test_Sync_Result_Correct_Root(t *testing.T) { + for i := 0; i < 3; i++ { + r := rand.New(rand.NewSource(int64(i))) // #nosec G404 + dbToSync, err := generateTrie(t, r, 5000) + require.NoError(t, err) + syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + db, err := merkledb.New( + context.Background(), + memdb.New(), + merkledb.Config{ + Tracer: newNoopTracer(), + HistoryLength: 0, + NodeCacheSize: 1000, + }, + ) + require.NoError(t, err) + syncer, err := NewStateSyncManager(StateSyncConfig{ + SyncDB: db, + Client: &mockClient{db: dbToSync}, + TargetRoot: syncRoot, + SimultaneousWorkLimit: 5, + Log: logging.NoLog{}, + }) + require.NoError(t, err) + require.NotNil(t, syncer) + err = syncer.StartSyncing(context.Background()) + require.NoError(t, err) + + err = syncer.Wait(context.Background()) + require.NoError(t, err) + require.NoError(t, syncer.Error()) + + // new db has fully sync'ed and should be at the same root as the original db + newRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(t, err) + require.Equal(t, syncRoot, newRoot) + + // make sure they stay in sync + for x := 0; x < 50; x++ { + addkey := make([]byte, r.Intn(50)) + _, err = r.Read(addkey) + require.NoError(t, err) + val := make([]byte, r.Intn(50)) + _, err = r.Read(val) + require.NoError(t, err) + + err = db.Put(addkey, val) + require.NoError(t, err) + + err = dbToSync.Put(addkey, val) + require.NoError(t, err) + + addNilkey := make([]byte, r.Intn(50)) + _, err = r.Read(addNilkey) + require.NoError(t, err) + err = db.Put(addNilkey, nil) + require.NoError(t, err) + + err = dbToSync.Put(addNilkey, nil) + require.NoError(t, err) + + deleteKeyStart := make([]byte, r.Intn(50)) + _, err = r.Read(deleteKeyStart) + require.NoError(t, err) + + it := dbToSync.NewIteratorWithStart(deleteKeyStart) + if it.Next() { + err = dbToSync.Delete(it.Key()) + require.NoError(t, err) + err = db.Delete(it.Key()) + require.NoError(t, err) + } + require.NoError(t, it.Error()) + it.Release() + + syncRoot, err = dbToSync.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + newRoot, err = db.GetMerkleRoot(context.Background()) + require.NoError(t, err) + require.Equal(t, syncRoot, newRoot) + } + } +} + +func Test_Sync_Result_Correct_Root_With_Sync_Restart(t *testing.T) { + for i := 0; i < 5; i++ { + r := rand.New(rand.NewSource(int64(i))) // #nosec G404 + dbToSync, err := generateTrie(t, r, 5000) + require.NoError(t, err) + syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) + require.NoError(t, err) + + db, err := merkledb.New( + context.Background(), + memdb.New(), + merkledb.Config{ + Tracer: newNoopTracer(), + HistoryLength: 0, + NodeCacheSize: 1000, + }, + ) + require.NoError(t, err) + + syncer, err := NewStateSyncManager(StateSyncConfig{ + SyncDB: db, + Client: &mockClient{db: dbToSync}, + TargetRoot: syncRoot, + SimultaneousWorkLimit: 5, + Log: logging.NoLog{}, + }) + require.NoError(t, err) + require.NotNil(t, syncer) + err = syncer.StartSyncing(context.Background()) + require.NoError(t, err) + + time.Sleep(15 * time.Millisecond) + syncer.Close() + + newSyncer, err := NewStateSyncManager(StateSyncConfig{ + SyncDB: db, + Client: &mockClient{db: dbToSync}, + TargetRoot: syncRoot, + SimultaneousWorkLimit: 5, + Log: logging.NoLog{}, + }) + require.NoError(t, err) + require.NotNil(t, newSyncer) + err = newSyncer.StartSyncing(context.Background()) + require.NoError(t, err) + require.NoError(t, newSyncer.Error()) + err = newSyncer.Wait(context.Background()) + require.NoError(t, err) + newRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(t, err) + require.Equal(t, syncRoot, newRoot) + } +} + +func Test_Sync_Error_During_Sync(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + r := rand.New(rand.NewSource(int64(0))) // #nosec G404 + + dbToSync, err := generateTrie(t, r, 100) + require.NoError(err) + + syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) + require.NoError(err) + + db, err := merkledb.New( + context.Background(), + memdb.New(), + merkledb.Config{ + Tracer: newNoopTracer(), + HistoryLength: 0, + NodeCacheSize: 1000, + }, + ) + require.NoError(err) + + client := NewMockClient(ctrl) + client.EXPECT().GetRangeProof(gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, request *RangeProofRequest) (*merkledb.RangeProof, error) { + return nil, errInvalidRangeProof + }, + ).AnyTimes() + client.EXPECT().GetChangeProof(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, request *ChangeProofRequest, _ *merkledb.Database) (*merkledb.ChangeProof, error) { + return dbToSync.GetChangeProof(ctx, request.StartingRoot, request.EndingRoot, request.Start, request.End, int(request.Limit)) + }, + ).AnyTimes() + + syncer, err := NewStateSyncManager(StateSyncConfig{ + SyncDB: db, + Client: client, + TargetRoot: syncRoot, + SimultaneousWorkLimit: 5, + Log: logging.NoLog{}, + }) + require.NoError(err) + require.NotNil(t, syncer) + + err = syncer.StartSyncing(context.Background()) + require.NoError(err) + + err = syncer.Wait(context.Background()) + require.ErrorIs(err, errInvalidRangeProof) +} + +func Test_Sync_Result_Correct_Root_Update_Root_During(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + for i := 0; i < 5; i++ { + r := rand.New(rand.NewSource(int64(i))) // #nosec G404 + + dbToSync, err := generateTrie(t, r, 10000) + require.NoError(err) + + syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) + require.NoError(err) + + db, err := merkledb.New( + context.Background(), + memdb.New(), + merkledb.Config{ + Tracer: newNoopTracer(), + HistoryLength: 0, + NodeCacheSize: 1000, + }, + ) + require.NoError(err) + + // Only let one response go through until we update the root. + updatedRootChan := make(chan struct{}, 1) + updatedRootChan <- struct{}{} + client := NewMockClient(ctrl) + client.EXPECT().GetRangeProof(gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, request *RangeProofRequest) (*merkledb.RangeProof, error) { + <-updatedRootChan + return dbToSync.GetRangeProofAtRoot(ctx, request.Root, request.Start, request.End, int(request.Limit)) + }, + ).AnyTimes() + client.EXPECT().GetChangeProof(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn( + func(ctx context.Context, request *ChangeProofRequest, _ *merkledb.Database) (*merkledb.ChangeProof, error) { + <-updatedRootChan + return dbToSync.GetChangeProof(ctx, request.StartingRoot, request.EndingRoot, request.Start, request.End, int(request.Limit)) + }, + ).AnyTimes() + + syncer, err := NewStateSyncManager(StateSyncConfig{ + SyncDB: db, + Client: client, + TargetRoot: syncRoot, + SimultaneousWorkLimit: 5, + Log: logging.NoLog{}, + }) + require.NoError(err) + require.NotNil(t, syncer) + for x := 0; x < 50; x++ { + key := make([]byte, r.Intn(50)) + _, err = r.Read(key) + require.NoError(err) + + val := make([]byte, r.Intn(50)) + _, err = r.Read(val) + require.NoError(err) + + err = dbToSync.Put(key, val) + require.NoError(err) + + deleteKeyStart := make([]byte, r.Intn(50)) + _, err = r.Read(deleteKeyStart) + require.NoError(err) + + it := dbToSync.NewIteratorWithStart(deleteKeyStart) + if it.Next() { + err = dbToSync.Delete(it.Key()) + require.NoError(err) + } + require.NoError(it.Error()) + it.Release() + } + + syncRoot, err = dbToSync.GetMerkleRoot(context.Background()) + require.NoError(err) + + err = syncer.StartSyncing(context.Background()) + require.NoError(err) + + // Wait until we've processed some work + // before updating the sync target. + require.Eventually( + func() bool { + syncer.workLock.Lock() + defer syncer.workLock.Unlock() + + return syncer.processedWork.Len() > 0 + }, + 3*time.Second, + 10*time.Millisecond, + ) + err = syncer.UpdateSyncTarget(syncRoot) + require.NoError(err) + close(updatedRootChan) + + err = syncer.Wait(context.Background()) + require.NoError(err) + require.NoError(syncer.Error()) + + newRoot, err := db.GetMerkleRoot(context.Background()) + require.NoError(err) + require.Equal(syncRoot, newRoot) + } +} + +func Test_Sync_UpdateSyncTarget(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + m, err := NewStateSyncManager(StateSyncConfig{ + SyncDB: &merkledb.Database{}, // Not used + Client: NewMockClient(ctrl), // Not used + TargetRoot: ids.Empty, + SimultaneousWorkLimit: 5, + Log: logging.NoLog{}, + }) + require.NoError(err) + + // Populate [m.processWork] to ensure that UpdateSyncTarget + // moves the work to [m.unprocessedWork]. + item := &syncWorkItem{ + start: []byte{1}, + end: []byte{2}, + LocalRootID: ids.GenerateTestID(), + } + m.processedWork.Insert(item) + + // Make sure that [m.unprocessedWorkCond] is signaled. + gotSignalChan := make(chan struct{}) + // Don't UpdateSyncTarget until we're waiting for the signal. + startedWaiting := make(chan struct{}) + go func() { + m.workLock.Lock() + defer m.workLock.Unlock() + + close(startedWaiting) + m.unprocessedWorkCond.Wait() + close(gotSignalChan) + }() + + <-startedWaiting + newSyncRoot := ids.GenerateTestID() + err = m.UpdateSyncTarget(newSyncRoot) + require.NoError(err) + <-gotSignalChan + + require.Equal(newSyncRoot, m.config.TargetRoot) + require.Equal(0, m.processedWork.Len()) + require.Equal(1, m.unprocessedWork.Len()) +} + +func generateTrie(t *testing.T, r *rand.Rand, count int) (*merkledb.Database, error) { + db, _, err := generateTrieWithMinKeyLen(t, r, count, 0) + return db, err +} + +func generateTrieWithMinKeyLen(t *testing.T, r *rand.Rand, count int, minKeyLen int) (*merkledb.Database, [][]byte, error) { + db, err := merkledb.New( + context.Background(), + memdb.New(), + merkledb.Config{ + Tracer: newNoopTracer(), + HistoryLength: 1000, + NodeCacheSize: 1000, + }, + ) + if err != nil { + return nil, nil, err + } + var ( + allKeys [][]byte + seenKeys = make(map[string]struct{}) + batch = db.NewBatch() + ) + genKey := func() []byte { + // new prefixed key + if len(allKeys) > 2 && r.Intn(25) < 10 { + prefix := allKeys[r.Intn(len(allKeys))] + key := make([]byte, r.Intn(50)+len(prefix)) + copy(key, prefix) + _, err := r.Read(key[len(prefix):]) + require.NoError(t, err) + return key + } + + // new key + key := make([]byte, r.Intn(50)+minKeyLen) + _, err = r.Read(key) + require.NoError(t, err) + return key + } + + for i := 0; i < count; { + value := make([]byte, r.Intn(51)) + if len(value) == 0 { + value = nil + } else { + _, err = r.Read(value) + require.NoError(t, err) + } + key := genKey() + if _, seen := seenKeys[string(key)]; seen { + continue // avoid duplicate keys so we always get the count + } + allKeys = append(allKeys, key) + seenKeys[string(key)] = struct{}{} + if err = batch.Put(key, value); err != nil { + return db, nil, err + } + i++ + } + slices.SortFunc(allKeys, func(a, b []byte) bool { + return bytes.Compare(a, b) < 0 + }) + return db, allKeys, batch.Write() +} diff --git a/avalanchego/x/sync/syncmanager.go b/avalanchego/x/sync/syncmanager.go new file mode 100644 index 00000000..deb6870a --- /dev/null +++ b/avalanchego/x/sync/syncmanager.go @@ -0,0 +1,679 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sync + +import ( + "bytes" + "context" + "errors" + "fmt" + "sync" + "time" + + "go.uber.org/zap" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/x/merkledb" +) + +const ( + defaultLeafRequestLimit = 1024 + maxTokenWaitTime = 5 * time.Second +) + +var ( + token = struct{}{} + ErrAlreadyStarted = errors.New("cannot start a StateSyncManager that has already been started") + ErrAlreadyClosed = errors.New("StateSyncManager is closed") + ErrNotEnoughBytes = errors.New("less bytes read than the specified length") + ErrNoClientProvided = errors.New("client is a required field of the sync config") + ErrNoDatabaseProvided = errors.New("sync database is a required field of the sync config") + ErrNoLogProvided = errors.New("log is a required field of the sync config") + ErrZeroWorkLimit = errors.New("simultaneous work limit must be greater than 0") + ErrFinishedWithUnexpectedRoot = errors.New("finished syncing with an unexpected root") +) + +type priority byte + +// Note that [highPriority] > [medPriority] > [lowPriority]. +const ( + lowPriority priority = iota + 1 + medPriority + highPriority +) + +// Signifies that we should sync the range [start, end]. +// nil [start] means there is no lower bound. +// nil [end] means there is no upper bound. +// [LocalRootID] is the ID of the root of this range in our database. +// If we have no local root for this range, [LocalRootID] is ids.Empty. +type syncWorkItem struct { + start []byte + end []byte + priority priority + LocalRootID ids.ID +} + +// TODO danlaine look into using a sync.Pool for syncWorkItems +func newWorkItem(localRootID ids.ID, start, end []byte, priority priority) *syncWorkItem { + return &syncWorkItem{ + LocalRootID: localRootID, + start: start, + end: end, + priority: priority, + } +} + +type StateSyncManager struct { + // Must be held when accessing [config.TargetRoot]. + syncTargetLock sync.RWMutex + config StateSyncConfig + + workLock sync.Mutex + // The number of work items currently being processed. + // Namely, the number of goroutines executing [doWork]. + // [workLock] must be held when accessing [processingWorkItems]. + processingWorkItems int + // [workLock] must be held while accessing [unprocessedWork]. + unprocessedWork *syncWorkHeap + // Signalled when: + // - An item is added to [unprocessedWork]. + // - There are no more items in [unprocessedWork] and [processingWorkItems] is 0. + // - Close() is called. + // [workLock] is its inner lock. + unprocessedWorkCond sync.Cond + // [workLock] must be held while accessing [processedWork]. + processedWork *syncWorkHeap + + // When this is closed: + // - [closed] is true. + // - [cancelCtx] was called. + // - [workToBeDone] and [completedWork] are closed. + syncDoneChan chan struct{} + + // Rate-limits the number of concurrently processing work items. + workTokens chan struct{} + + errLock sync.Mutex + // If non-nil, there was a fatal error. + // [errLock] must be held when accessing [fatalError]. + fatalError error + + // Cancels all currently processing work items. + cancelCtx context.CancelFunc + + // Set to true when StartSyncing is called. + syncing bool + closeOnce sync.Once +} + +type StateSyncConfig struct { + SyncDB *merkledb.Database + Client Client + SimultaneousWorkLimit int + Log logging.Logger + TargetRoot ids.ID +} + +func NewStateSyncManager(config StateSyncConfig) (*StateSyncManager, error) { + switch { + case config.Client == nil: + return nil, ErrNoClientProvided + case config.SyncDB == nil: + return nil, ErrNoDatabaseProvided + case config.Log == nil: + return nil, ErrNoLogProvided + case config.SimultaneousWorkLimit == 0: + return nil, ErrZeroWorkLimit + } + + m := &StateSyncManager{ + config: config, + syncDoneChan: make(chan struct{}), + unprocessedWork: newSyncWorkHeap(2 * config.SimultaneousWorkLimit), + processedWork: newSyncWorkHeap(2 * config.SimultaneousWorkLimit), + workTokens: make(chan struct{}, config.SimultaneousWorkLimit), + } + m.unprocessedWorkCond.L = &m.workLock + + // fill the work tokens channel with work tokens + for i := 0; i < config.SimultaneousWorkLimit; i++ { + m.workTokens <- token + } + return m, nil +} + +func (m *StateSyncManager) StartSyncing(ctx context.Context) error { + m.workLock.Lock() + defer m.workLock.Unlock() + + if m.syncing { + return ErrAlreadyStarted + } + + // Add work item to fetch the entire key range. + // Note that this will be the first work item to be processed. + m.unprocessedWork.Insert(newWorkItem(ids.Empty, nil, nil, lowPriority)) + + m.syncing = true + ctx, m.cancelCtx = context.WithCancel(ctx) + + go m.sync(ctx) + return nil +} + +// Repeatedly awaits signal on [m.unprocessedWorkCond] that there +// is work to do or we're done, and dispatches a goroutine to do +// the work. +func (m *StateSyncManager) sync(ctx context.Context) { + defer func() { + // Note we release [m.workLock] before calling Close() + // because Close() will try to acquire [m.workLock]. + // Invariant: [m.workLock] is held when we return from this goroutine. + m.workLock.Unlock() + m.Close() + }() + + // Keep doing work until we're closed, done or [ctx] is canceled. + m.workLock.Lock() + for { + // Invariant: [m.workLock] is held here. + if ctx.Err() != nil { // [m] is closed. + return // [m.workLock] released by defer. + } + if m.unprocessedWork.Len() == 0 { + if m.processingWorkItems == 0 { + // There's no work to do, and there are no work items being processed + // which could cause work to be added, so we're done. + return // [m.workLock] released by defer. + } + // There's no work to do. + // Note that if [m].Close() is called, or [ctx] is canceled, + // Close() will be called, which will broadcast on [m.unprocessedWorkCond], + // which will cause Wait() to return, and this goroutine to exit. + m.unprocessedWorkCond.Wait() + continue + } + m.processingWorkItems++ + workItem := m.unprocessedWork.GetWork() + // TODO danlaine: We won't release [m.workLock] until + // we've started a goroutine for each available work item. + // We can't apply proofs we receive until we release [m.workLock]. + // Is this OK? Is it possible we end up with too many goroutines? + go m.doWork(ctx, workItem) + } +} + +// Called when there is a fatal error or sync is complete. +func (m *StateSyncManager) Close() { + m.closeOnce.Do(func() { + m.workLock.Lock() + defer m.workLock.Unlock() + + // Don't process any more work items. + // Drop currently processing work items. + if m.cancelCtx != nil { + m.cancelCtx() + } + + // ensure any goroutines waiting for work from the heaps gets released + m.unprocessedWork.Close() + m.unprocessedWorkCond.Signal() + m.processedWork.Close() + + // signal all code waiting on the sync to complete + close(m.syncDoneChan) + }) +} + +// Processes [item] by fetching and applying a change or range proof. +// Assumes [m.workLock] is not held. +func (m *StateSyncManager) doWork(ctx context.Context, item *syncWorkItem) { + // Wait until we get a work token or we close. + select { + case <-m.workTokens: + case <-ctx.Done(): + // [m] is closed and sync() is returning so don't care about cleanup. + return + } + + defer func() { + m.workTokens <- token + m.workLock.Lock() + m.processingWorkItems-- + if m.processingWorkItems == 0 && m.unprocessedWork.Len() == 0 { + // There are no processing or unprocessed work items so we're done. + m.unprocessedWorkCond.Signal() + } + m.workLock.Unlock() + }() + + if item.LocalRootID == ids.Empty { + // the keys in this range have not been downloaded, so get all key/values + m.getAndApplyRangeProof(ctx, item) + } else { + // the keys in this range have already been downloaded, but the root changed, so get all changes + m.getAndApplyChangeProof(ctx, item) + } +} + +// Fetch and apply the change proof given by [workItem]. +// Assumes [m.workLock] is not held. +func (m *StateSyncManager) getAndApplyChangeProof(ctx context.Context, workItem *syncWorkItem) { + rootID := m.getTargetRoot() + + if workItem.LocalRootID == rootID { + // Start root is the same as the end root, so we're done. + m.completeWorkItem(ctx, workItem, workItem.end, rootID, nil) + return + } + + changeproof, err := m.config.Client.GetChangeProof(ctx, + &ChangeProofRequest{ + StartingRoot: workItem.LocalRootID, + EndingRoot: rootID, + Start: workItem.start, + End: workItem.end, + Limit: defaultLeafRequestLimit, + }, + m.config.SyncDB, + ) + if err != nil { + m.setError(err) + return + } + + select { + case <-m.syncDoneChan: + // If we're closed, don't apply the proof. + return + default: + } + + // The start or end root IDs are not present in other nodes' history. + // Add this range as a fresh uncompleted work item to the work heap. + // TODO danlaine send range proof instead of failure notification + if !changeproof.HadRootsInHistory { + workItem.LocalRootID = ids.Empty + m.enqueueWork(workItem) + return + } + + largestHandledKey := workItem.end + // if the proof wasn't empty, apply changes to the sync DB + if len(changeproof.KeyValues)+len(changeproof.DeletedKeys) > 0 { + if err := m.config.SyncDB.CommitChangeProof(ctx, changeproof); err != nil { + m.setError(err) + return + } + + if len(changeproof.KeyValues) > 0 { + largestHandledKey = changeproof.KeyValues[len(changeproof.KeyValues)-1].Key + } + if len(changeproof.DeletedKeys) > 0 { + lastDeletedKey := changeproof.DeletedKeys[len(changeproof.DeletedKeys)-1] + if bytes.Compare(lastDeletedKey, largestHandledKey) == 1 { + largestHandledKey = lastDeletedKey + } + } + } + + m.completeWorkItem(ctx, workItem, largestHandledKey, rootID, changeproof.EndProof) +} + +// Fetch and apply the range proof given by [workItem]. +// Assumes [m.workLock] is not held. +func (m *StateSyncManager) getAndApplyRangeProof(ctx context.Context, workItem *syncWorkItem) { + rootID := m.getTargetRoot() + proof, err := m.config.Client.GetRangeProof(ctx, + &RangeProofRequest{ + Root: rootID, + Start: workItem.start, + End: workItem.end, + Limit: defaultLeafRequestLimit, + }, + ) + if err != nil { + m.setError(err) + return + } + + select { + case <-m.syncDoneChan: + // If we're closed, don't apply the proof. + return + default: + } + + largestHandledKey := workItem.end + if len(proof.KeyValues) > 0 { + // Add all the key-value pairs we got to the database. + if err := m.config.SyncDB.CommitRangeProof(ctx, workItem.start, proof); err != nil { + m.setError(err) + return + } + + largestHandledKey = proof.KeyValues[len(proof.KeyValues)-1].Key + } + + m.completeWorkItem(ctx, workItem, largestHandledKey, rootID, proof.EndProof) +} + +// Attempt to find what key to query next based on the differences between +// the local trie path to a node and the path recently received. +func (m *StateSyncManager) findNextKey( + ctx context.Context, + start []byte, + end []byte, + receivedProofNodes []merkledb.ProofNode, +) ([]byte, error) { + proofOfStart, err := m.config.SyncDB.GetProof(ctx, start) + if err != nil { + return nil, err + } + localProofNodes := proofOfStart.Path + + var result []byte + localIndex := len(localProofNodes) - 1 + receivedIndex := len(receivedProofNodes) - 1 + startKeyPath := merkledb.SerializedPath{Value: start, NibbleLength: 2 * len(start)} + + // Just return the start key when the proof nodes contain keys that are not prefixes of the start key + // this occurs mostly in change proofs where the largest returned key was a deleted key. + // Since the key was deleted, it no longer shows up in the proof nodes + // for now, just fallback to using the start key, which is always correct. + // TODO: determine a more accurate nextKey in this scenario + if !startKeyPath.HasPrefix(localProofNodes[localIndex].KeyPath) || !startKeyPath.HasPrefix(receivedProofNodes[receivedIndex].KeyPath) { + return start, nil + } + + // walk up the node paths until a difference is found + for receivedIndex >= 0 && result == nil { + localNode := localProofNodes[localIndex] + receivedNode := receivedProofNodes[receivedIndex] + // the two nodes have the same key + if localNode.KeyPath.Equal(receivedNode.KeyPath) { + startingChildIndex := byte(0) + if localNode.KeyPath.NibbleLength < startKeyPath.NibbleLength { + startingChildIndex = startKeyPath.NibbleVal(localNode.KeyPath.NibbleLength) + 1 + } + // the two nodes have the same path, so ensure that all children have matching ids + for childIndex := startingChildIndex; childIndex < 16; childIndex++ { + receivedChildID, receiveOk := receivedNode.Children[childIndex] + localChildID, localOk := localNode.Children[childIndex] + // if they both don't have a child or have matching children, continue + if (receiveOk || localOk) && receivedChildID != localChildID { + result = localNode.KeyPath.AppendNibble(childIndex).Value + break + } + } + if result != nil { + break + } + // only want to move both indexes when they have equal keys + localIndex-- + receivedIndex-- + continue + } + + var branchNode merkledb.ProofNode + + if receivedNode.KeyPath.NibbleLength > localNode.KeyPath.NibbleLength { + // the received proof has an extra node due to a branch that is not present locally + branchNode = receivedNode + receivedIndex-- + } else { + // the local proof has an extra node due to a branch that was not present in the received proof + branchNode = localNode + localIndex-- + } + + // the two nodes have different paths, so find where they branched + for nextKeyNibble := startKeyPath.NibbleVal(branchNode.KeyPath.NibbleLength) + 1; nextKeyNibble < 16; nextKeyNibble++ { + if _, ok := branchNode.Children[nextKeyNibble]; ok { + result = branchNode.KeyPath.AppendNibble(nextKeyNibble).Value + break + } + } + } + + if result == nil || (len(end) > 0 && bytes.Compare(result, end) >= 0) { + return nil, nil + } + + return result, nil +} + +func (m *StateSyncManager) Error() error { + m.errLock.Lock() + defer m.errLock.Unlock() + + return m.fatalError +} + +// Blocks until either: +// - sync is complete. +// - sync fatally errored. +// - [ctx] is canceled. +// If [ctx] is canceled, returns [ctx].Err(). +func (m *StateSyncManager) Wait(ctx context.Context) error { + select { + case <-m.syncDoneChan: + case <-ctx.Done(): + return ctx.Err() + } + + // There was a fatal error. + if err := m.Error(); err != nil { + return err + } + + root, err := m.config.SyncDB.GetMerkleRoot(ctx) + if err != nil { + m.config.Log.Info("completed with error", zap.Error(err)) + return err + } + if m.getTargetRoot() != root { + // This should never happen. + return fmt.Errorf("%w: expected %s, got %s", ErrFinishedWithUnexpectedRoot, m.getTargetRoot(), root) + } + m.config.Log.Info("completed", zap.String("new root", root.String())) + return nil +} + +func (m *StateSyncManager) UpdateSyncTarget(syncTargetRoot ids.ID) error { + m.workLock.Lock() + defer m.workLock.Unlock() + + select { + case <-m.syncDoneChan: + return ErrAlreadyClosed + default: + } + + m.syncTargetLock.Lock() + defer m.syncTargetLock.Unlock() + + if m.config.TargetRoot == syncTargetRoot { + // the target hasn't changed, so there is nothing to do + return nil + } + + m.config.TargetRoot = syncTargetRoot + + // move all completed ranges into the work heap with high priority + shouldSignal := m.processedWork.Len() > 0 + for m.processedWork.Len() > 0 { + // Note that [m.processedWork].Close() hasn't + // been called because we have [m.workLock] + // and we checked that [m.closed] is false. + currentItem := m.processedWork.GetWork() + currentItem.priority = highPriority + m.unprocessedWork.Insert(currentItem) + } + if shouldSignal { + // Only signal once because we only have 1 goroutine + // waiting on [m.unprocessedWorkCond]. + m.unprocessedWorkCond.Signal() + } + return nil +} + +func (m *StateSyncManager) getTargetRoot() ids.ID { + m.syncTargetLock.RLock() + defer m.syncTargetLock.RUnlock() + + return m.config.TargetRoot +} + +// Record that there was a fatal error and begin shutting down. +func (m *StateSyncManager) setError(err error) { + m.errLock.Lock() + defer m.errLock.Unlock() + + m.config.Log.Error("syncing failed", zap.Error(err)) + m.fatalError = err + // Call in goroutine because we might be holding [m.workLock] + // which [m.Close] will try to acquire. + go m.Close() +} + +// Mark the range [start, end] as synced up to [rootID]. +// Assumes [m.workLock] is not held. +func (m *StateSyncManager) completeWorkItem(ctx context.Context, workItem *syncWorkItem, largestHandledKey []byte, rootID ids.ID, proofOfLargestKey []merkledb.ProofNode) { + // if the last key is equal to the end, then the full range is completed + if !bytes.Equal(largestHandledKey, workItem.end) { + // find the next key to start querying by comparing the proofs for the last completed key + nextStartKey, err := m.findNextKey(ctx, largestHandledKey, workItem.end, proofOfLargestKey) + if err != nil { + m.setError(err) + return + } + + largestHandledKey = workItem.end + + // nextStartKey being nil indicates that the entire range has been completed + if nextStartKey != nil { + // the full range wasn't completed, so enqueue a new work item for the range [nextStartKey, workItem.end] + m.enqueueWork(newWorkItem(workItem.LocalRootID, nextStartKey, workItem.end, workItem.priority)) + largestHandledKey = nextStartKey + } + } + + // completed the range [workItem.start, lastKey], log and record in the completed work heap + m.config.Log.Info("completed range", + zap.Binary("start", workItem.start), + zap.Binary("end", largestHandledKey), + ) + if m.getTargetRoot() == rootID { + m.workLock.Lock() + defer m.workLock.Unlock() + + m.processedWork.MergeInsert(newWorkItem(rootID, workItem.start, largestHandledKey, workItem.priority)) + } else { + // the root has changed, so reinsert with high priority + m.enqueueWork(newWorkItem(rootID, workItem.start, largestHandledKey, highPriority)) + } +} + +// Queue the given key range to be fetched and applied. +// If there are sufficiently few unprocessed/processing work items, +// splits the range into two items and queues them both. +// Assumes [m.workLock] is not held. +func (m *StateSyncManager) enqueueWork(item *syncWorkItem) { + m.workLock.Lock() + defer func() { + m.workLock.Unlock() + m.unprocessedWorkCond.Signal() + }() + + if m.processingWorkItems+m.unprocessedWork.Len() > 2*m.config.SimultaneousWorkLimit { + // There are too many work items already, don't split the range + m.unprocessedWork.Insert(item) + return + } + + // Split the remaining range into to 2. + // Find the middle point. + mid := midPoint(item.start, item.end) + + // first item gets higher priority than the second to encourage finished ranges to grow + // rather than start a new range that is not contiguous with existing completed ranges + first := newWorkItem(item.LocalRootID, item.start, mid, medPriority) + second := newWorkItem(item.LocalRootID, mid, item.end, lowPriority) + + m.unprocessedWork.Insert(first) + m.unprocessedWork.Insert(second) +} + +// find the midpoint between two keys +// nil on start is treated as all 0's +// nil on end is treated as all 255's +func midPoint(start, end []byte) []byte { + length := len(start) + if len(end) > length { + length = len(end) + } + if length == 0 { + return []byte{127} + } + + // This check deals with cases where the end has a 255(or is nil which is treated as all 255s) and the start key ends 255. + // For example, midPoint([255], nil) should be [255, 127], not [255]. + // The result needs the extra byte added on to the end to deal with the fact that the naive midpoint between 255 and 255 would be 255 + if (len(start) > 0 && start[len(start)-1] == 255) && (len(end) == 0 || end[len(end)-1] == 255) { + length++ + } + + leftover := 0 + midpoint := make([]byte, length+1) + for i := 0; i < length; i++ { + startVal := 0 + if i < len(start) { + startVal = int(start[i]) + } + + endVal := 0 + if len(end) == 0 { + endVal = 255 + } + if i < len(end) { + endVal = int(end[i]) + } + + total := startVal + endVal + leftover + leftover = 0 + // if total is odd, when we divide, we will lose the .5, + // record that in the leftover for the next digits + if total%2 == 1 { + leftover = 256 + } + + // find the midpoint between the start and the end + total /= 2 + + // larger than byte can hold, so carry over to previous byte + if total >= 256 { + total -= 256 + index := i - 1 + for index >= 0 { + if midpoint[index] != 255 { + midpoint[index]++ + break + } + + midpoint[index] = 0 + index-- + } + } + midpoint[i] = byte(total) + } + if leftover > 0 { + midpoint[length] = 127 + } else { + midpoint = midpoint[0:length] + } + return midpoint +} diff --git a/avalanchego/x/sync/syncworkheap.go b/avalanchego/x/sync/syncworkheap.go new file mode 100644 index 00000000..b015b023 --- /dev/null +++ b/avalanchego/x/sync/syncworkheap.go @@ -0,0 +1,198 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sync + +import ( + "bytes" + "container/heap" + + "github.com/google/btree" +) + +var _ heap.Interface = &syncWorkHeap{} + +type heapItem struct { + workItem *syncWorkItem + heapIndex int +} + +// A priority queue of syncWorkItems. +// Note that work item ranges never overlap. +// Supports range merging and priority updating. +// Not safe for concurrent use. +type syncWorkHeap struct { + // Max heap of items by priority. + // i.e. heap.Pop returns highest priority item. + priorityHeap []*heapItem + // The heap items sorted by range start. + // A nil start is considered to be the smallest. + sortedItems *btree.BTreeG[*heapItem] + closed bool +} + +func newSyncWorkHeap(maxSize int) *syncWorkHeap { + return &syncWorkHeap{ + priorityHeap: make([]*heapItem, 0, maxSize), + sortedItems: btree.NewG( + 2, + func(a, b *heapItem) bool { + if a.workItem == nil { + return b.workItem != nil + } + if b.workItem == nil { + return false + } + return bytes.Compare(a.workItem.start, b.workItem.start) < 0 + }, + ), + } +} + +// Marks the heap as closed. +func (wh *syncWorkHeap) Close() { + wh.closed = true +} + +// Adds a new [item] into the heap. Will not merge items, unlike MergeInsert. +func (wh *syncWorkHeap) Insert(item *syncWorkItem) { + if wh.closed { + return + } + + heap.Push(wh, &heapItem{workItem: item}) +} + +// Pops and returns a work item from the heap. +// Returns nil if no work is available or the heap is closed. +func (wh *syncWorkHeap) GetWork() *syncWorkItem { + if wh.closed || wh.Len() == 0 { + return nil + } + return heap.Pop(wh).(*heapItem).workItem +} + +// Insert the item into the heap, merging it with existing items +// that share a boundary and root ID. +// e.g. if the heap contains a work item with range +// [0,10] and then [10,20] is inserted, we will merge the two +// into a single work item with range [0,20]. +// e.g. if the heap contains work items [0,10] and [20,30], +// and we add [10,20], we will merge them into [0,30]. +func (wh *syncWorkHeap) MergeInsert(item *syncWorkItem) { + if wh.closed { + return + } + + var mergedRange *heapItem + + // Find the item with the greatest start range which is less than [item.start]. + // Note that the iterator function will run at most once, since it always returns false. + wh.sortedItems.DescendLessOrEqual( + &heapItem{ + workItem: &syncWorkItem{ + start: item.start, + }, + }, + func(beforeItem *heapItem) bool { + if item.LocalRootID == beforeItem.workItem.LocalRootID && bytes.Equal(beforeItem.workItem.end, item.start) { + // [beforeItem.start, beforeItem.end] and [item.start, item.end] are + // merged into [beforeItem.start, item.end] + beforeItem.workItem.end = item.end + mergedRange = beforeItem + } + return false + }) + + // Find the item with the smallest start range which is greater than [item.start]. + // Note that the iterator function will run at most once, since it always returns false. + wh.sortedItems.AscendGreaterOrEqual( + &heapItem{ + workItem: &syncWorkItem{ + start: item.start, + }, + }, + func(afterItem *heapItem) bool { + if item.LocalRootID == afterItem.workItem.LocalRootID && bytes.Equal(afterItem.workItem.start, item.end) { + if mergedRange != nil { + // [beforeItem.start, item.end] and [afterItem.start, afterItem.end] are merged + // into [beforeItem.start, afterItem.end]. + // Modify [mergedRange] and remove [afterItem] since [mergedRange] now contains the entire + // range that was covered by [afterItem]. + wh.remove(afterItem) + mergedRange.workItem.end = afterItem.workItem.end + if afterItem.workItem.priority > mergedRange.workItem.priority { + mergedRange.workItem.priority = afterItem.workItem.priority + heap.Fix(wh, mergedRange.heapIndex) + } + } else { + // [item.start, item.end] and [afterItem.start, afterItem.end] are merged into + // [item.start, afterItem.end]. + afterItem.workItem.start = item.start + mergedRange = afterItem + } + } + return false + }) + + if mergedRange != nil { + // We merged [item] with at least one existing item. + if item.priority > mergedRange.workItem.priority { + mergedRange.workItem.priority = item.priority + // Priority was updated; fix position in the heap. + heap.Fix(wh, mergedRange.heapIndex) + } + } else { + // We didn't merge [item] with an existing one; put it in the heap. + heap.Push(wh, &heapItem{workItem: item}) + } +} + +// Deletes [item] from the heap. +func (wh *syncWorkHeap) remove(item *heapItem) { + oldIndex := item.heapIndex + newLength := len(wh.priorityHeap) - 1 + + // swap with last item, delete item, then fix heap if required + wh.Swap(newLength, item.heapIndex) + wh.priorityHeap[newLength] = nil + wh.priorityHeap = wh.priorityHeap[:newLength] + + // the item was already the last item, so nothing needs to be fixed + if oldIndex != newLength { + heap.Fix(wh, oldIndex) + } + wh.sortedItems.Delete(item) +} + +// below this line are the implementations required for heap.Interface + +func (wh *syncWorkHeap) Len() int { + return len(wh.priorityHeap) +} + +func (wh *syncWorkHeap) Less(i int, j int) bool { + return wh.priorityHeap[i].workItem.priority > wh.priorityHeap[j].workItem.priority +} + +func (wh *syncWorkHeap) Swap(i int, j int) { + wh.priorityHeap[i], wh.priorityHeap[j] = wh.priorityHeap[j], wh.priorityHeap[i] + wh.priorityHeap[i].heapIndex = i + wh.priorityHeap[j].heapIndex = j +} + +func (wh *syncWorkHeap) Pop() interface{} { + newLength := len(wh.priorityHeap) - 1 + value := wh.priorityHeap[newLength] + wh.priorityHeap[newLength] = nil + wh.priorityHeap = wh.priorityHeap[:newLength] + wh.sortedItems.Delete(value) + return value +} + +func (wh *syncWorkHeap) Push(x interface{}) { + item := x.(*heapItem) + item.heapIndex = len(wh.priorityHeap) + wh.priorityHeap = append(wh.priorityHeap, item) + wh.sortedItems.ReplaceOrInsert(item) +} diff --git a/avalanchego/x/sync/syncworkheap_test.go b/avalanchego/x/sync/syncworkheap_test.go new file mode 100644 index 00000000..67582901 --- /dev/null +++ b/avalanchego/x/sync/syncworkheap_test.go @@ -0,0 +1,233 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package sync + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/ids" +) + +// Tests heap.Interface methods Push, Pop, Swap, Len, Less. +func Test_SyncWorkHeap_Heap_Methods(t *testing.T) { + require := require.New(t) + + h := newSyncWorkHeap(1) + require.Equal(0, h.Len()) + + item1 := &heapItem{ + workItem: &syncWorkItem{ + start: nil, + end: nil, + priority: highPriority, + LocalRootID: ids.GenerateTestID(), + }, + } + h.Push(item1) + require.Equal(1, h.Len()) + require.Len(h.priorityHeap, 1) + require.Equal(item1, h.priorityHeap[0]) + require.Equal(0, h.priorityHeap[0].heapIndex) + require.Equal(1, h.sortedItems.Len()) + gotItem, ok := h.sortedItems.Get(item1) + require.True(ok) + require.Equal(item1, gotItem) + + h.Pop() + require.Equal(0, h.Len()) + require.Len(h.priorityHeap, 0) + require.Equal(0, h.sortedItems.Len()) + + item2 := &heapItem{ + workItem: &syncWorkItem{ + start: []byte{0}, + end: []byte{1}, + priority: highPriority, + LocalRootID: ids.GenerateTestID(), + }, + } + h.Push(item1) + h.Push(item2) + require.Equal(2, h.Len()) + require.Len(h.priorityHeap, 2) + require.Equal(item1, h.priorityHeap[0]) + require.Equal(item2, h.priorityHeap[1]) + require.Equal(0, item1.heapIndex) + require.Equal(1, item2.heapIndex) + require.Equal(2, h.sortedItems.Len()) + gotItem, ok = h.sortedItems.Get(item1) + require.True(ok) + require.Equal(item1, gotItem) + gotItem, ok = h.sortedItems.Get(item2) + require.True(ok) + require.Equal(item2, gotItem) + + require.False(h.Less(0, 1)) + + h.Swap(0, 1) + require.Equal(item2, h.priorityHeap[0]) + require.Equal(item1, h.priorityHeap[1]) + require.Equal(1, item1.heapIndex) + require.Equal(0, item2.heapIndex) + + require.False(h.Less(0, 1)) + + item1.workItem.priority = lowPriority + require.True(h.Less(0, 1)) + + gotItem = h.Pop().(*heapItem) + require.Equal(item1, gotItem) + + gotItem = h.Pop().(*heapItem) + require.Equal(item2, gotItem) + + require.Equal(0, h.Len()) + require.Len(h.priorityHeap, 0) + require.Equal(0, h.sortedItems.Len()) +} + +// Tests Insert and GetWork +func Test_SyncWorkHeap_Insert_GetWork(t *testing.T) { + require := require.New(t) + h := newSyncWorkHeap(1) + + item1 := &syncWorkItem{ + start: []byte{0}, + end: []byte{1}, + priority: lowPriority, + LocalRootID: ids.GenerateTestID(), + } + item2 := &syncWorkItem{ + start: []byte{2}, + end: []byte{3}, + priority: medPriority, + LocalRootID: ids.GenerateTestID(), + } + item3 := &syncWorkItem{ + start: []byte{4}, + end: []byte{5}, + priority: highPriority, + LocalRootID: ids.GenerateTestID(), + } + h.Insert(item3) + h.Insert(item2) + h.Insert(item1) + require.Equal(3, h.Len()) + + // Ensure [sortedItems] is in right order. + got := []*syncWorkItem{} + h.sortedItems.Ascend( + func(i *heapItem) bool { + got = append(got, i.workItem) + return true + }, + ) + require.Equal([]*syncWorkItem{item1, item2, item3}, got) + + // Ensure priorities are in right order. + gotItem := h.GetWork() + require.Equal(item3, gotItem) + gotItem = h.GetWork() + require.Equal(item2, gotItem) + gotItem = h.GetWork() + require.Equal(item1, gotItem) + gotItem = h.GetWork() + require.Nil(gotItem) + + require.Equal(0, h.Len()) +} + +func Test_SyncWorkHeap_remove(t *testing.T) { + require := require.New(t) + + h := newSyncWorkHeap(1) + + item1 := &syncWorkItem{ + start: []byte{0}, + end: []byte{1}, + priority: lowPriority, + LocalRootID: ids.GenerateTestID(), + } + + h.Insert(item1) + + heapItem1 := h.priorityHeap[0] + h.remove(heapItem1) + + require.Equal(0, h.Len()) + require.Len(h.priorityHeap, 0) + require.Equal(0, h.sortedItems.Len()) + + item2 := &syncWorkItem{ + start: []byte{2}, + end: []byte{3}, + priority: medPriority, + LocalRootID: ids.GenerateTestID(), + } + + h.Insert(item1) + h.Insert(item2) + + heapItem2 := h.priorityHeap[0] + require.Equal(item2, heapItem2.workItem) + h.remove(heapItem2) + require.Equal(1, h.Len()) + require.Len(h.priorityHeap, 1) + require.Equal(1, h.sortedItems.Len()) + require.Equal(0, h.priorityHeap[0].heapIndex) + require.Equal(item1, h.priorityHeap[0].workItem) + + heapItem1 = h.priorityHeap[0] + require.Equal(item1, heapItem1.workItem) + h.remove(heapItem1) + require.Equal(0, h.Len()) + require.Len(h.priorityHeap, 0) + require.Equal(0, h.sortedItems.Len()) +} + +func Test_SyncWorkHeap_Merge_Insert(t *testing.T) { + // merge with range before + syncHeap := newSyncWorkHeap(1000) + + syncHeap.MergeInsert(&syncWorkItem{start: nil, end: []byte{63}}) + require.Equal(t, 1, syncHeap.Len()) + + syncHeap.MergeInsert(&syncWorkItem{start: []byte{127}, end: []byte{192}}) + require.Equal(t, 2, syncHeap.Len()) + + syncHeap.MergeInsert(&syncWorkItem{start: []byte{193}, end: nil}) + require.Equal(t, 3, syncHeap.Len()) + + syncHeap.MergeInsert(&syncWorkItem{start: []byte{63}, end: []byte{126}, priority: lowPriority}) + require.Equal(t, 3, syncHeap.Len()) + + // merge with range after + syncHeap = newSyncWorkHeap(1000) + + syncHeap.MergeInsert(&syncWorkItem{start: nil, end: []byte{63}}) + require.Equal(t, 1, syncHeap.Len()) + + syncHeap.MergeInsert(&syncWorkItem{start: []byte{127}, end: []byte{192}}) + require.Equal(t, 2, syncHeap.Len()) + + syncHeap.MergeInsert(&syncWorkItem{start: []byte{193}, end: nil}) + require.Equal(t, 3, syncHeap.Len()) + + syncHeap.MergeInsert(&syncWorkItem{start: []byte{64}, end: []byte{127}, priority: lowPriority}) + require.Equal(t, 3, syncHeap.Len()) + + // merge both sides at the same time + syncHeap = newSyncWorkHeap(1000) + + syncHeap.MergeInsert(&syncWorkItem{start: nil, end: []byte{63}}) + require.Equal(t, 1, syncHeap.Len()) + + syncHeap.MergeInsert(&syncWorkItem{start: []byte{127}, end: nil}) + require.Equal(t, 2, syncHeap.Len()) + + syncHeap.MergeInsert(&syncWorkItem{start: []byte{63}, end: []byte{127}, priority: lowPriority}) + require.Equal(t, 1, syncHeap.Len()) +} diff --git a/coreth/.golangci.yml b/coreth/.golangci.yml index 726822ac..a0dd1c43 100644 --- a/coreth/.golangci.yml +++ b/coreth/.golangci.yml @@ -8,11 +8,11 @@ run: skip-dirs-use-default: true skip-files: - core/genesis_alloc.go + - precompile/ linters: disable-all: true enable: - - deadcode - goconst - goimports - gosimple @@ -20,7 +20,7 @@ linters: - ineffassign - misspell - unconvert - - varcheck + - unused - whitespace linters-settings: @@ -29,21 +29,3 @@ linters-settings: goconst: min-len: 3 # minimum length of string constant min-occurrences: 6 # minimum number of occurrences - -issues: - exclude-rules: - - path: crypto/blake2b/ - linters: - - deadcode - - path: crypto/bn256/cloudflare - linters: - - deadcode - - path: p2p/discv5/ - linters: - - deadcode - - path: core/vm/instructions_test.go - linters: - - goconst - - path: cmd/faucet/ - linters: - - deadcode diff --git a/coreth/Dockerfile b/coreth/Dockerfile index 26abe9f7..d6b8578f 100644 --- a/coreth/Dockerfile +++ b/coreth/Dockerfile @@ -1,7 +1,7 @@ # ============= Compilation Stage ================ -FROM golang:1.18.5-buster AS builder +FROM golang:1.20.1-buster AS builder -RUN apt-get update && apt-get install -y --no-install-recommends bash=5.0-4 git=1:2.20.1-2+deb10u3 make=4.2.1-1.2 gcc=4:8.3.0-1 musl-dev=1.1.21-2 ca-certificates=20200601~deb10u2 linux-headers-amd64 +RUN apt-get update && apt-get install -y --no-install-recommends bash=5.0-4 make=4.2.1-1.2 gcc=4:8.3.0-1 musl-dev=1.1.21-2 ca-certificates=20200601~deb10u2 linux-headers-amd64 ARG AVALANCHE_VERSION @@ -19,7 +19,7 @@ WORKDIR $GOPATH/src/github.com/ava-labs/avalanchego RUN go mod download # Replace the coreth dependency RUN go mod edit -replace github.com/ava-labs/coreth=../coreth -RUN go mod download && go mod tidy -compat=1.18 +RUN go mod download && go mod tidy -compat=1.19 # Build the AvalancheGo binary with local version of coreth. RUN ./scripts/build_avalanche.sh diff --git a/coreth/README.md b/coreth/README.md index 713d5db7..5915fea7 100644 --- a/coreth/README.md +++ b/coreth/README.md @@ -15,6 +15,13 @@ cd $GOPATH/src/github.com/ava-labs/avalanchego go mod edit -replace github.com/ava-labs/coreth=../coreth ``` +Now that AvalancheGo depends on the local version of Coreth, we can build with the normal build script: + +```bash +./scripts/build.sh +./build/avalanchego +``` + Note: the C-Chain originally ran in a separate process from the main AvalancheGo process and communicated with it over a local gRPC connection. When this was the case, AvalancheGo's build script would download Coreth, compile it, and place the binary into the `avalanchego/build/plugins` directory. ## API diff --git a/coreth/RELEASES.md b/coreth/RELEASES.md index d004d833..cf484b38 100644 --- a/coreth/RELEASES.md +++ b/coreth/RELEASES.md @@ -1,5 +1,76 @@ # Release Notes +## [v0.11.9](https://github.com/ava-labs/coreth/releases/tag/v0.11.9) + +- Downgrade SetPreference log from warn to debug + +## [v0.11.8](https://github.com/ava-labs/coreth/releases/tag/v0.11.8) + +- Fix shutdown hanging during state sync +- Add pre-check for imported UTXOs +- Fix bug in `BadBlockReason` output to display error string correctly +- Update golangci-lint version to v1.51.2 + +## [v0.11.7](https://github.com/ava-labs/coreth/releases/tag/v0.11.7) + +- Enable state sync by default when syncing from an empty database +- Increase block gas limit to 15M for Cortina Network Upgrade +- Add back file tracer endpoint +- Add back JS tracer + +## [v0.11.6](https://github.com/ava-labs/coreth/releases/tag/v0.11.6) + +- Bump AvalancheGo to v1.9.6 + +## [v0.11.5](https://github.com/ava-labs/coreth/releases/tag/v0.11.5) + +- Add support for eth_call over VM2VM messaging +- Add config flags for tx pool behavior + +## [v0.11.4](https://github.com/ava-labs/coreth/releases/tag/v0.11.4) + +- Add config option to perform database inspection on startup +- Add configurable transaction indexing to reduce disk usage +- Add special case to allow transactions using Nick's Method to bypass API level replay protection +- Add counter metrics for number of accepted/processed logs +- Improve header and logs caching using maximum accepted depth cache + +## [v0.11.3](https://github.com/ava-labs/coreth/releases/tag/v0.11.3) + +- Add counter for number of processed and accepted transactions +- Wait for state sync goroutines to complete on shutdown +- Bump go-ethereum dependency to v1.10.26 +- Increase soft cap on transaction size limits +- Add back isForkIncompatible checks for all existing forks +- Clean up Apricot Phase 6 code + +## [v0.11.2](https://github.com/ava-labs/coreth/releases/tag/v0.11.2) + +- Add trie clean cache journaling to disk to improve processing time on restart +- Fix regression where snapshot could be marked as stale by async acceptor during block processing +- Add fine-grained block processing metrics + +## [v0.11.1](https://github.com/ava-labs/coreth/releases/tag/v0.11.1) + +- Add cache size config parameters for `trie-clean-cache`, `trie-dirty-cache`, `trie-dirty-commit-target`, and `snapshot-cache` +- Increase default `trie-clean-cache` size from 256 MB to 512 MB +- Increase default `snapshot-cache` size from 128 MB to 256 MB +- Add optional flag to skip chain config upgrade check on startup (allows VM to start after missing a network upgrade) +- Make Avalanche blockchainID (separate from EVM ChainID) available within the EVM +- Record block height when performing state sync +- Add support for VM-to-VM messaging +- Move `eth_getChainConfig` under the `BlockChainAPI` +- Simplify block builder timer logic to a simple retry delay +- Add Opentelemetry support +- Simplify caching logic for gas price estimation + +## [v0.11.0](https://github.com/ava-labs/coreth/releases/tag/v0.11.0) + +- Update Chain Config compatibility check to compare against last accepted block timestamp +- Bump go-ethereum dependency to v1.10.25 +- Add Banff activation times for Mainnet and Fuji for October 18 4pm UTC and October 3 2pm UTC respectively +- Banff cleanup + ## [v0.10.0](https://github.com/ava-labs/coreth/releases/tag/v0.10.0) - Deprecate Native Asset Call and Native Asset Balance diff --git a/coreth/accounts/abi/abi.go b/coreth/accounts/abi/abi.go index 66dadac2..4c70970d 100644 --- a/coreth/accounts/abi/abi.go +++ b/coreth/accounts/abi/abi.go @@ -91,6 +91,42 @@ func (abi ABI) Pack(name string, args ...interface{}) ([]byte, error) { return append(method.ID, arguments...), nil } +// PackOutput packs the given [args] as the output of given method [name] to conform the ABI. +// This does not include method ID. +func (abi ABI) PackOutput(name string, args ...interface{}) ([]byte, error) { + // Fetch the ABI of the requested method + method, exist := abi.Methods[name] + if !exist { + return nil, fmt.Errorf("method '%s' not found", name) + } + arguments, err := method.Outputs.Pack(args...) + if err != nil { + return nil, err + } + return arguments, nil +} + +// getInputs gets input arguments of the given [name] method. +func (abi ABI) getInputs(name string, data []byte) (Arguments, error) { + // since there can't be naming collisions with contracts and events, + // we need to decide whether we're calling a method or an event + var args Arguments + if method, ok := abi.Methods[name]; ok { + if len(data)%32 != 0 { + return nil, fmt.Errorf("abi: improperly formatted input: %s - Bytes: [%+v]", string(data), data) + } + args = method.Inputs + } + if event, ok := abi.Events[name]; ok { + args = event.Inputs + } + if args == nil { + return nil, fmt.Errorf("abi: could not locate named method or event: %s", name) + } + return args, nil +} + +// getArguments gets output arguments of the given [name] method. func (abi ABI) getArguments(name string, data []byte) (Arguments, error) { // since there can't be naming collisions with contracts and events, // we need to decide whether we're calling a method or an event @@ -110,6 +146,15 @@ func (abi ABI) getArguments(name string, data []byte) (Arguments, error) { return args, nil } +// UnpackInput unpacks the input according to the ABI specification. +func (abi ABI) UnpackInput(name string, data []byte) ([]interface{}, error) { + args, err := abi.getInputs(name, data) + if err != nil { + return nil, err + } + return args.Unpack(data) +} + // Unpack unpacks the output according to the abi specification. func (abi ABI) Unpack(name string, data []byte) ([]interface{}, error) { args, err := abi.getArguments(name, data) @@ -119,6 +164,21 @@ func (abi ABI) Unpack(name string, data []byte) ([]interface{}, error) { return args.Unpack(data) } +// UnpackInputIntoInterface unpacks the input in v according to the ABI specification. +// It performs an additional copy. Please only use, if you want to unpack into a +// structure that does not strictly conform to the ABI structure (e.g. has additional arguments) +func (abi ABI) UnpackInputIntoInterface(v interface{}, name string, data []byte) error { + args, err := abi.getInputs(name, data) + if err != nil { + return err + } + unpacked, err := args.Unpack(data) + if err != nil { + return err + } + return args.Copy(v, unpacked) +} + // UnpackIntoInterface unpacks the output in v according to the abi specification. // It performs an additional copy. Please only use, if you want to unpack into a // structure that does not strictly conform to the abi structure (e.g. has additional arguments) diff --git a/coreth/accounts/abi/abi_test.go b/coreth/accounts/abi/abi_test.go index e2687086..d156d82c 100644 --- a/coreth/accounts/abi/abi_test.go +++ b/coreth/accounts/abi/abi_test.go @@ -175,8 +175,9 @@ func TestInvalidABI(t *testing.T) { // TestConstructor tests a constructor function. // The test is based on the following contract: -// contract TestConstructor { -// constructor(uint256 a, uint256 b) public{} +// +// contract TestConstructor { +// constructor(uint256 a, uint256 b) public{} // } func TestConstructor(t *testing.T) { json := `[{ "inputs": [{"internalType": "uint256","name": "a","type": "uint256" },{ "internalType": "uint256","name": "b","type": "uint256"}],"stateMutability": "nonpayable","type": "constructor"}]` @@ -734,16 +735,19 @@ func TestBareEvents(t *testing.T) { } // TestUnpackEvent is based on this contract: -// contract T { -// event received(address sender, uint amount, bytes memo); -// event receivedAddr(address sender); -// function receive(bytes memo) external payable { -// received(msg.sender, msg.value, memo); -// receivedAddr(msg.sender); -// } -// } +// +// contract T { +// event received(address sender, uint amount, bytes memo); +// event receivedAddr(address sender); +// function receive(bytes memo) external payable { +// received(msg.sender, msg.value, memo); +// receivedAddr(msg.sender); +// } +// } +// // When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt: -// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]} +// +// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]} func TestUnpackEvent(t *testing.T) { const abiJSON = `[{"constant":false,"inputs":[{"name":"memo","type":"bytes"}],"name":"receive","outputs":[],"payable":true,"stateMutability":"payable","type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"},{"indexed":false,"name":"amount","type":"uint256"},{"indexed":false,"name":"memo","type":"bytes"}],"name":"received","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"sender","type":"address"}],"name":"receivedAddr","type":"event"}]` abi, err := JSON(strings.NewReader(abiJSON)) @@ -1088,8 +1092,9 @@ func TestDoubleDuplicateMethodNames(t *testing.T) { // TestDoubleDuplicateEventNames checks that if send0 already exists, there won't be a name // conflict and that the second send event will be renamed send1. // The test runs the abi of the following contract. -// contract DuplicateEvent { -// event send(uint256 a); +// +// contract DuplicateEvent { +// event send(uint256 a); // event send0(); // event send(); // } @@ -1116,7 +1121,8 @@ func TestDoubleDuplicateEventNames(t *testing.T) { // TestUnnamedEventParam checks that an event with unnamed parameters is // correctly handled. // The test runs the abi of the following contract. -// contract TestEvent { +// +// contract TestEvent { // event send(uint256, uint256); // } func TestUnnamedEventParam(t *testing.T) { diff --git a/coreth/accounts/abi/bind/backends/simulated.go b/coreth/accounts/abi/bind/backends/simulated.go index 38aa5ad2..cc1e2f1b 100644 --- a/coreth/accounts/abi/bind/backends/simulated.go +++ b/coreth/accounts/abi/bind/backends/simulated.go @@ -109,10 +109,13 @@ type SimulatedBackend struct { func NewSimulatedBackendWithDatabase(database ethdb.Database, alloc core.GenesisAlloc, gasLimit uint64) *SimulatedBackend { cpcfg := params.TestChainConfig cpcfg.ChainID = big.NewInt(1337) - genesis := core.Genesis{Config: cpcfg, GasLimit: gasLimit, Alloc: alloc} - genesis.MustCommit(database) + genesis := core.Genesis{ + Config: cpcfg, + GasLimit: gasLimit, + Alloc: alloc, + } cacheConfig := &core.CacheConfig{} - blockchain, _ := core.NewBlockChain(database, cacheConfig, genesis.Config, dummy.NewFaker(), vm.Config{}, common.Hash{}) + blockchain, _ := core.NewBlockChain(database, cacheConfig, &genesis, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) backend := &SimulatedBackend{ database: database, diff --git a/coreth/accounts/abi/bind/backends/simulated_test.go b/coreth/accounts/abi/bind/backends/simulated_test.go index 037549ab..9c44b249 100644 --- a/coreth/accounts/abi/bind/backends/simulated_test.go +++ b/coreth/accounts/abi/bind/backends/simulated_test.go @@ -104,17 +104,18 @@ func TestSimulatedBackend(t *testing.T) { var testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") -// the following is based on this contract: -// contract T { -// event received(address sender, uint amount, bytes memo); -// event receivedAddr(address sender); +// the following is based on this contract: // -// function receive(bytes calldata memo) external payable returns (string memory res) { -// emit received(msg.sender, msg.value, memo); -// emit receivedAddr(msg.sender); -// return "hello world"; -// } -// } +// contract T { +// event received(address sender, uint amount, bytes memo); +// event receivedAddr(address sender); +// +// function receive(bytes calldata memo) external payable returns (string memory res) { +// emit received(msg.sender, msg.value, memo); +// emit receivedAddr(msg.sender); +// return "hello world"; +// } +// } const abiJSON = `[ { "constant": false, "inputs": [ { "name": "memo", "type": "bytes" } ], "name": "receive", "outputs": [ { "name": "res", "type": "string" } ], "payable": true, "stateMutability": "payable", "type": "function" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" }, { "indexed": false, "name": "amount", "type": "uint256" }, { "indexed": false, "name": "memo", "type": "bytes" } ], "name": "received", "type": "event" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" } ], "name": "receivedAddr", "type": "event" } ]` const abiBin = `0x608060405234801561001057600080fd5b506102a0806100206000396000f3fe60806040526004361061003b576000357c010000000000000000000000000000000000000000000000000000000090048063a69b6ed014610040575b600080fd5b6100b76004803603602081101561005657600080fd5b810190808035906020019064010000000081111561007357600080fd5b82018360208201111561008557600080fd5b803590602001918460018302840111640100000000831117156100a757600080fd5b9091929391929390505050610132565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100f75780820151818401526020810190506100dc565b50505050905090810190601f1680156101245780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60607f75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed33348585604051808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001848152602001806020018281038252848482818152602001925080828437600081840152601f19601f8201169050808301925050509550505050505060405180910390a17f46923992397eac56cf13058aced2a1871933622717e27b24eabc13bf9dd329c833604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a16040805190810160405280600b81526020017f68656c6c6f20776f726c6400000000000000000000000000000000000000000081525090509291505056fea165627a7a72305820ff0c57dad254cfeda48c9cfb47f1353a558bccb4d1bc31da1dae69315772d29e0029` const deployedCode = `60806040526004361061003b576000357c010000000000000000000000000000000000000000000000000000000090048063a69b6ed014610040575b600080fd5b6100b76004803603602081101561005657600080fd5b810190808035906020019064010000000081111561007357600080fd5b82018360208201111561008557600080fd5b803590602001918460018302840111640100000000831117156100a757600080fd5b9091929391929390505050610132565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100f75780820151818401526020810190506100dc565b50505050905090810190601f1680156101245780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60607f75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed33348585604051808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001848152602001806020018281038252848482818152602001925080828437600081840152601f19601f8201169050808301925050509550505050505060405180910390a17f46923992397eac56cf13058aced2a1871933622717e27b24eabc13bf9dd329c833604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a16040805190810160405280600b81526020017f68656c6c6f20776f726c6400000000000000000000000000000000000000000081525090509291505056fea165627a7a72305820ff0c57dad254cfeda48c9cfb47f1353a558bccb4d1bc31da1dae69315772d29e0029` @@ -433,12 +434,13 @@ func TestEstimateGas(t *testing.T) { /* pragma solidity ^0.6.4; contract GasEstimation { - function PureRevert() public { revert(); } - function Revert() public { revert("revert reason");} - function OOG() public { for (uint i = 0; ; i++) {}} - function Assert() public { assert(false);} - function Valid() public {} - }*/ + function PureRevert() public { revert(); } + function Revert() public { revert("revert reason");} + function OOG() public { for (uint i = 0; ; i++) {}} + function Assert() public { assert(false);} + function Valid() public {} + } + */ const contractAbi = "[{\"inputs\":[],\"name\":\"Assert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"OOG\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"PureRevert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Revert\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"Valid\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]" const contractBin = "0x60806040523480156100115760006000fd5b50610017565b61016e806100266000396000f3fe60806040523480156100115760006000fd5b506004361061005c5760003560e01c806350f6fe3414610062578063aa8b1d301461006c578063b9b046f914610076578063d8b9839114610080578063e09fface1461008a5761005c565b60006000fd5b61006a610094565b005b6100746100ad565b005b61007e6100b5565b005b6100886100c2565b005b610092610135565b005b6000600090505b5b808060010191505061009b565b505b565b60006000fd5b565b600015156100bf57fe5b5b565b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600d8152602001807f72657665727420726561736f6e0000000000000000000000000000000000000081526020015060200191505060405180910390fd5b565b5b56fea2646970667358221220345bbcbb1a5ecf22b53a78eaebf95f8ee0eceff6d10d4b9643495084d2ec934a64736f6c63430006040033" @@ -1015,7 +1017,8 @@ func TestCodeAt(t *testing.T) { } // When receive("X") is called with sender 0x00... and value 1, it produces this tx receipt: -// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]} +// +// receipt{status=1 cgas=23949 bloom=00000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000040200000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 logs=[log: b6818c8064f645cd82d99b59a1a267d6d61117ef [75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed] 000000000000000000000000376c47978271565f56deb45495afa69e59c16ab200000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000158 9ae378b6d4409eada347a5dc0c180f186cb62dc68fcc0f043425eb917335aa28 0 95d429d309bb9d753954195fe2d69bd140b4ae731b9b5b605c34323de162cf00 0]} func TestPendingAndCallContract(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) sim := simTestBackend(testAddr) @@ -1078,27 +1081,27 @@ func TestPendingAndCallContract(t *testing.T) { // This test is based on the following contract: /* contract Reverter { - function revertString() public pure{ - require(false, "some error"); - } - function revertNoString() public pure { - require(false, ""); - } - function revertASM() public pure { - assembly { - revert(0x0, 0x0) - } - } - function noRevert() public pure { - assembly { - // Assembles something that looks like require(false, "some error") but is not reverted - mstore(0x0, 0x08c379a000000000000000000000000000000000000000000000000000000000) - mstore(0x4, 0x0000000000000000000000000000000000000000000000000000000000000020) - mstore(0x24, 0x000000000000000000000000000000000000000000000000000000000000000a) - mstore(0x44, 0x736f6d65206572726f7200000000000000000000000000000000000000000000) - return(0x0, 0x64) - } - } + function revertString() public pure{ + require(false, "some error"); + } + function revertNoString() public pure { + require(false, ""); + } + function revertASM() public pure { + assembly { + revert(0x0, 0x0) + } + } + function noRevert() public pure { + assembly { + // Assembles something that looks like require(false, "some error") but is not reverted + mstore(0x0, 0x08c379a000000000000000000000000000000000000000000000000000000000) + mstore(0x4, 0x0000000000000000000000000000000000000000000000000000000000000020) + mstore(0x24, 0x000000000000000000000000000000000000000000000000000000000000000a) + mstore(0x44, 0x736f6d65206572726f7200000000000000000000000000000000000000000000) + return(0x0, 0x64) + } + } }*/ func TestCallContractRevert(t *testing.T) { testAddr := crypto.PubkeyToAddress(testKey.PublicKey) @@ -1225,11 +1228,11 @@ func TestFork(t *testing.T) { /* Example contract to test event emission: -pragma solidity >=0.7.0 <0.9.0; -contract Callable { - event Called(); - function Call() public { emit Called(); } -} + pragma solidity >=0.7.0 <0.9.0; + contract Callable { + event Called(); + function Call() public { emit Called(); } + } */ // The fork tests are commented out because transactions are not indexed in coreth until they are marked // as accepted, which breaks the logic of these tests. @@ -1249,7 +1252,7 @@ contract Callable { // // 7. Mine two blocks to trigger a reorg. // // 8. Check that the event was removed. // // 9. Re-send the transaction and mine a block. -// // 10. Check that the event was reborn. +// // 10. Check that the event was reborn. // func TestForkLogsReborn(t *testing.T) { // testAddr := crypto.PubkeyToAddress(testKey.PublicKey) // sim := simTestBackend(testAddr) diff --git a/coreth/accounts/abi/bind/base.go b/coreth/accounts/abi/bind/base.go index 91eb43ec..7c6df083 100644 --- a/coreth/accounts/abi/bind/base.go +++ b/coreth/accounts/abi/bind/base.go @@ -43,6 +43,8 @@ import ( "github.com/ethereum/go-ethereum/event" ) +const basefeeWiggleMultiplier = 2 + var ( ErrNilAssetAmount = errors.New("cannot specify nil asset amount for native asset call") errNativeAssetDeployContract = errors.New("cannot specify native asset params while deploying a contract") @@ -316,7 +318,7 @@ func (c *BoundContract) createDynamicTx(opts *TransactOpts, contract *common.Add if gasFeeCap == nil { gasFeeCap = new(big.Int).Add( gasTipCap, - new(big.Int).Mul(head.BaseFee, big.NewInt(2)), + new(big.Int).Mul(head.BaseFee, big.NewInt(basefeeWiggleMultiplier)), ) } if gasFeeCap.Cmp(gasTipCap) < 0 { diff --git a/coreth/accounts/abi/bind/bind_test.go b/coreth/accounts/abi/bind/bind_test.go index 8cf1d37f..b06e6bba 100644 --- a/coreth/accounts/abi/bind/bind_test.go +++ b/coreth/accounts/abi/bind/bind_test.go @@ -2151,7 +2151,7 @@ func golangBindings(t *testing.T, overload bool) { if out, err := replacer.CombinedOutput(); err != nil { t.Fatalf("failed to replace binding test dependency to current source tree: %v\n%s", err, out) } - tidier := exec.Command(gocmd, "mod", "tidy", "-compat=1.18") + tidier := exec.Command(gocmd, "mod", "tidy", "-compat=1.19") tidier.Dir = pkg if out, err := tidier.CombinedOutput(); err != nil { t.Fatalf("failed to tidy Go module file: %v\n%s", err, out) diff --git a/coreth/accounts/abi/reflect.go b/coreth/accounts/abi/reflect.go index 07bada39..9f7a07a0 100644 --- a/coreth/accounts/abi/reflect.go +++ b/coreth/accounts/abi/reflect.go @@ -35,16 +35,19 @@ import ( ) // ConvertType converts an interface of a runtime type into a interface of the -// given type -// e.g. turn -// var fields []reflect.StructField -// fields = append(fields, reflect.StructField{ -// Name: "X", -// Type: reflect.TypeOf(new(big.Int)), -// Tag: reflect.StructTag("json:\"" + "x" + "\""), -// } -// into -// type TupleT struct { X *big.Int } +// given type, e.g. turn this code: +// +// var fields []reflect.StructField +// +// fields = append(fields, reflect.StructField{ +// Name: "X", +// Type: reflect.TypeOf(new(big.Int)), +// Tag: reflect.StructTag("json:\"" + "x" + "\""), +// } +// +// into: +// +// type TupleT struct { X *big.Int } func ConvertType(in interface{}, proto interface{}) interface{} { protoType := reflect.TypeOf(proto) if reflect.TypeOf(in).ConvertibleTo(protoType) { @@ -180,11 +183,13 @@ func setStruct(dst, src reflect.Value) error { } // mapArgNamesToStructFields maps a slice of argument names to struct fields. -// first round: for each Exportable field that contains a `abi:""` tag -// and this field name exists in the given argument name list, pair them together. -// second round: for each argument name that has not been already linked, -// find what variable is expected to be mapped into, if it exists and has not been -// used, pair them. +// +// first round: for each Exportable field that contains a `abi:""` tag and this field name +// exists in the given argument name list, pair them together. +// +// second round: for each argument name that has not been already linked, find what +// variable is expected to be mapped into, if it exists and has not been used, pair them. +// // Note this function assumes the given value is a struct value. func mapArgNamesToStructFields(argNames []string, value reflect.Value) (map[string]string, error) { typ := value.Type() diff --git a/coreth/accounts/abi/utils.go b/coreth/accounts/abi/utils.go index 45b27ab4..62b5fcd4 100644 --- a/coreth/accounts/abi/utils.go +++ b/coreth/accounts/abi/utils.go @@ -31,15 +31,14 @@ import "fmt" // ResolveNameConflict returns the next available name for a given thing. // This helper can be used for lots of purposes: // -// - In solidity function overloading is supported, this function can fix -// the name conflicts of overloaded functions. -// - In golang binding generation, the parameter(in function, event, error, -// and struct definition) name will be converted to camelcase style which -// may eventually lead to name conflicts. -// -// Name conflicts are mostly resolved by adding number suffix. -// e.g. if the abi contains Methods send, send1 -// ResolveNameConflict would return send2 for input send. +// - In solidity function overloading is supported, this function can fix +// the name conflicts of overloaded functions. +// - In golang binding generation, the parameter(in function, event, error, +// and struct definition) name will be converted to camelcase style which +// may eventually lead to name conflicts. +// +// Name conflicts are mostly resolved by adding number suffix. e.g. if the abi contains +// Methods "send" and "send1", ResolveNameConflict would return "send2" for input "send". func ResolveNameConflict(rawName string, used func(string) bool) string { name := rawName ok := used(name) diff --git a/coreth/accounts/accounts.go b/coreth/accounts/accounts.go index b59fe580..acde6b43 100644 --- a/coreth/accounts/accounts.go +++ b/coreth/accounts/accounts.go @@ -187,7 +187,8 @@ type Backend interface { // safely used to calculate a signature from. // // The hash is calculated as -// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}). +// +// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}). // // This gives context to the signed message and prevents signing of transactions. func TextHash(data []byte) []byte { @@ -199,7 +200,8 @@ func TextHash(data []byte) []byte { // safely used to calculate a signature from. // // The hash is calculated as -// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}). +// +// keccak256("\x19Ethereum Signed Message:\n"${message length}${message}). // // This gives context to the signed message and prevents signing of transactions. func TextAndHash(data []byte) ([]byte, string) { diff --git a/coreth/accounts/hd.go b/coreth/accounts/hd.go index e081038a..50f030fe 100644 --- a/coreth/accounts/hd.go +++ b/coreth/accounts/hd.go @@ -56,7 +56,7 @@ var LegacyLedgerBaseDerivationPath = DerivationPath{0x80000000 + 44, 0x80000000 // The BIP-32 spec https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki // defines derivation paths to be of the form: // -// m / purpose' / coin_type' / account' / change / address_index +// m / purpose' / coin_type' / account' / change / address_index // // The BIP-44 spec https://github.com/bitcoin/bips/blob/master/bip-0044.mediawiki // defines that the `purpose` be 44' (or 0x8000002C) for crypto currencies, and diff --git a/coreth/accounts/keystore/account_cache_test.go b/coreth/accounts/keystore/account_cache_test.go index c96c1dbd..a84a37e0 100644 --- a/coreth/accounts/keystore/account_cache_test.go +++ b/coreth/accounts/keystore/account_cache_test.go @@ -61,7 +61,9 @@ var ( ) func TestWatchNewFile(t *testing.T) { - t.Skip("FLAKY") + if os.Getenv("RUN_FLAKY_TESTS") != "true" { + t.Skip("FLAKY") + } t.Parallel() dir, ks := tmpKeyStore(t, false) @@ -326,7 +328,9 @@ func waitForAccounts(wantAccounts []accounts.Account, ks *KeyStore) error { // TestUpdatedKeyfileContents tests that updating the contents of a keystore file // is noticed by the watcher, and the account cache is updated accordingly func TestUpdatedKeyfileContents(t *testing.T) { - t.Skip("FLAKY") + if os.Getenv("RUN_FLAKY_TESTS") != "true" { + t.Skip("FLAKY") + } t.Parallel() // Create a temporary keystore to test with diff --git a/coreth/accounts/keystore/keystore_test.go b/coreth/accounts/keystore/keystore_test.go index 0a778647..b543dfd7 100644 --- a/coreth/accounts/keystore/keystore_test.go +++ b/coreth/accounts/keystore/keystore_test.go @@ -285,7 +285,9 @@ type walletEvent struct { // Tests that wallet notifications and correctly fired when accounts are added // or deleted from the keystore. func TestWalletNotifications(t *testing.T) { - t.Skip("FLAKY") + if os.Getenv("RUN_FLAKY_TESTS") != "true" { + t.Skip("FLAKY") + } _, ks := tmpKeyStore(t, false) // Subscribe to the wallet feed and collect events. diff --git a/coreth/accounts/scwallet/wallet.go b/coreth/accounts/scwallet/wallet.go index c377c372..a95701b6 100644 --- a/coreth/accounts/scwallet/wallet.go +++ b/coreth/accounts/scwallet/wallet.go @@ -889,6 +889,7 @@ func (s *Session) walletStatus() (*walletStatus, error) { } // derivationPath fetches the wallet's current derivation path from the card. +// //lint:ignore U1000 needs to be added to the console interface func (s *Session) derivationPath() (accounts.DerivationPath, error) { response, err := s.Channel.transmitEncrypted(claSCWallet, insStatus, statusP1Path, 0, nil) @@ -1004,6 +1005,7 @@ func (s *Session) derive(path accounts.DerivationPath) (accounts.Account, error) } // keyExport contains information on an exported keypair. +// //lint:ignore U1000 needs to be added to the console interface type keyExport struct { PublicKey []byte `asn1:"tag:0"` @@ -1011,6 +1013,7 @@ type keyExport struct { } // publicKey returns the public key for the current derivation path. +// //lint:ignore U1000 needs to be added to the console interface func (s *Session) publicKey() ([]byte, error) { response, err := s.Channel.transmitEncrypted(claSCWallet, insExportKey, exportP1Any, exportP2Pubkey, nil) diff --git a/coreth/accounts/url.go b/coreth/accounts/url.go index 5764f977..d7ebeaef 100644 --- a/coreth/accounts/url.go +++ b/coreth/accounts/url.go @@ -102,10 +102,9 @@ func (u *URL) UnmarshalJSON(input []byte) error { // Cmp compares x and y and returns: // -// -1 if x < y -// 0 if x == y -// +1 if x > y -// +// -1 if x < y +// 0 if x == y +// +1 if x > y func (u URL) Cmp(url URL) int { if u.Scheme == url.Scheme { return strings.Compare(u.Path, url.Path) diff --git a/coreth/consensus/dummy/consensus.go b/coreth/consensus/dummy/consensus.go index 38c8c054..935ecdf9 100644 --- a/coreth/consensus/dummy/consensus.go +++ b/coreth/consensus/dummy/consensus.go @@ -96,32 +96,38 @@ func (self *DummyEngine) verifyHeaderGasFields(config *params.ChainConfig, heade if header.GasUsed > header.GasLimit { return fmt.Errorf("invalid gasUsed: have %d, gasLimit %d", header.GasUsed, header.GasLimit) } - if config.IsSongbirdCode() { - // Verify that the gas limit is correct for the current phase - if config.IsSongbirdTransition(timestamp) { - if header.GasLimit != params.SgbTransitionGasLimit { - return fmt.Errorf("expected gas limit to be %d in sgb transition but got %d", params.SgbTransitionGasLimit, header.GasLimit) - } - } else if config.IsApricotPhase5(timestamp) { - if header.GasLimit != params.SgbApricotPhase5GasLimit { - return fmt.Errorf("expected gas limit to be %d in apricot phase 5 but got %d", params.SgbApricotPhase5GasLimit, header.GasLimit) - } + if config.IsCortina(timestamp) { + if header.GasLimit != params.CortinaGasLimit { + return fmt.Errorf("expected gas limit to be %d in Cortina, but found %d", params.CortinaGasLimit, header.GasLimit) } } else { - if config.IsApricotPhase1(timestamp) { - if header.GasLimit != params.ApricotPhase1GasLimit { - return fmt.Errorf("expected gas limit to be %d, but found %d", params.ApricotPhase1GasLimit, header.GasLimit) + if config.IsSongbirdCode() { + // Verify that the gas limit is correct for the current phase + if config.IsSongbirdTransition(timestamp) { + if header.GasLimit != params.SgbTransitionGasLimit { + return fmt.Errorf("expected gas limit to be %d in SgbTransition but found %d", params.SgbTransitionGasLimit, header.GasLimit) + } + } else if config.IsApricotPhase5(timestamp) { + if header.GasLimit != params.SgbApricotPhase5GasLimit { + return fmt.Errorf("expected gas limit to be %d in ApricotPhase5 but found %d", params.SgbApricotPhase5GasLimit, header.GasLimit) + } } } else { - // Verify that the gas limit remains within allowed bounds - diff := int64(parent.GasLimit) - int64(header.GasLimit) - if diff < 0 { - diff *= -1 - } - limit := parent.GasLimit / params.GasLimitBoundDivisor - - if uint64(diff) >= limit || header.GasLimit < params.MinGasLimit { - return fmt.Errorf("invalid gas limit: have %d, want %d += %d", header.GasLimit, parent.GasLimit, limit) + if config.IsApricotPhase1(timestamp) { + if header.GasLimit != params.ApricotPhase1GasLimit { + return fmt.Errorf("expected gas limit to be %d in ApricotPhase1, but found %d", params.ApricotPhase1GasLimit, header.GasLimit) + } + } else { + // Verify that the gas limit remains within allowed bounds + diff := int64(parent.GasLimit) - int64(header.GasLimit) + if diff < 0 { + diff *= -1 + } + limit := parent.GasLimit / params.GasLimitBoundDivisor + + if uint64(diff) >= limit || header.GasLimit < params.MinGasLimit { + return fmt.Errorf("invalid gas limit: have %d, want %d += %d", header.GasLimit, parent.GasLimit, limit) + } } } } diff --git a/coreth/consensus/misc/dao.go b/coreth/consensus/misc/dao.go deleted file mode 100644 index a0ab4029..00000000 --- a/coreth/consensus/misc/dao.go +++ /dev/null @@ -1,95 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package misc - -import ( - "bytes" - "errors" - "math/big" - - "github.com/ava-labs/coreth/core/state" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/params" -) - -var ( - // ErrBadProDAOExtra is returned if a header doesn't support the DAO fork on a - // pro-fork client. - ErrBadProDAOExtra = errors.New("bad DAO pro-fork extra-data") - - // ErrBadNoDAOExtra is returned if a header does support the DAO fork on a no- - // fork client. - ErrBadNoDAOExtra = errors.New("bad DAO no-fork extra-data") -) - -// VerifyDAOHeaderExtraData validates the extra-data field of a block header to -// ensure it conforms to DAO hard-fork rules. -// -// DAO hard-fork extension to the header validity: -// a) if the node is no-fork, do not accept blocks in the [fork, fork+10) range -// with the fork specific extra-data set -// b) if the node is pro-fork, require blocks in the specific range to have the -// unique extra-data set. -func VerifyDAOHeaderExtraData(config *params.ChainConfig, header *types.Header) error { - // Short circuit validation if the node doesn't care about the DAO fork - if config.DAOForkBlock == nil { - return nil - } - // Make sure the block is within the fork's modified extra-data range - limit := new(big.Int).Add(config.DAOForkBlock, params.DAOForkExtraRange) - if header.Number.Cmp(config.DAOForkBlock) < 0 || header.Number.Cmp(limit) >= 0 { - return nil - } - // Depending on whether we support or oppose the fork, validate the extra-data contents - if config.DAOForkSupport { - if !bytes.Equal(header.Extra, params.DAOForkBlockExtra) { - return ErrBadProDAOExtra - } - } else { - if bytes.Equal(header.Extra, params.DAOForkBlockExtra) { - return ErrBadNoDAOExtra - } - } - // All ok, header has the same extra-data we expect - return nil -} - -// ApplyDAOHardFork modifies the state database according to the DAO hard-fork -// rules, transferring all balances of a set of DAO accounts to a single refund -// contract. -func ApplyDAOHardFork(statedb *state.StateDB) { - // Retrieve the contract to refund balances into - if !statedb.Exist(params.DAORefundContract) { - statedb.CreateAccount(params.DAORefundContract) - } - - // Move every DAO account and extra-balance account funds into the refund contract - for _, addr := range params.DAODrainList() { - statedb.AddBalance(params.DAORefundContract, statedb.GetBalance(addr)) - statedb.SetBalance(addr, new(big.Int)) - } -} diff --git a/coreth/core/bench_test.go b/coreth/core/bench_test.go index 331c676f..0bacc6f6 100644 --- a/coreth/core/bench_test.go +++ b/coreth/core/bench_test.go @@ -88,7 +88,7 @@ func genValueTx(nbytes int) func(int, *BlockGen) { return func(i int, gen *BlockGen) { toaddr := common.Address{} data := make([]byte, nbytes) - gas, _ := IntrinsicGas(data, nil, false, false, false) + gas, _ := IntrinsicGas(data, nil, false, false, false, false) tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(benchRootAddr), toaddr, big.NewInt(1), gas, big.NewInt(225000000000), data), types.HomesteadSigner{}, benchRootKey) gen.AddTx(tx) } @@ -156,16 +156,15 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) { // Generate a chain of b.N blocks using the supplied block // generator function. - gspec := Genesis{ + gspec := &Genesis{ Config: params.TestChainConfig, Alloc: GenesisAlloc{benchRootAddr: {Balance: benchRootFunds}}, } - genesis := gspec.MustCommit(db) - chain, _, _ := GenerateChain(gspec.Config, genesis, dummy.NewFaker(), db, b.N, 10, gen) + _, chain, _, _ := GenerateChainWithGenesis(gspec, dummy.NewFaker(), b.N, 10, gen) // Time the insertion of the new chain. // State and blocks are stored in the same DB. - chainman, _ := NewBlockChain(db, DefaultCacheConfig, gspec.Config, dummy.NewFaker(), vm.Config{}, common.Hash{}) + chainman, _ := NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) defer chainman.Stop() b.ReportAllocs() b.ResetTimer() @@ -230,6 +229,11 @@ func makeChainForBench(db ethdb.Database, full bool, count uint64) { rawdb.WriteHeader(db, header) rawdb.WriteCanonicalHash(db, hash, n) + if n == 0 { + rawdb.WriteChainConfig(db, hash, params.TestChainConfig) + } + rawdb.WriteHeadHeaderHash(db, hash) + if full || n == 0 { block := types.NewBlockWithHeader(header) rawdb.WriteBody(db, hash, n, block.Body()) @@ -268,7 +272,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) { if err != nil { b.Fatalf("error opening database at %v: %v", dir, err) } - chain, err := NewBlockChain(db, DefaultCacheConfig, params.TestChainConfig, dummy.NewFaker(), vm.Config{}, common.Hash{}) + chain, err := NewBlockChain(db, DefaultCacheConfig, nil, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) if err != nil { b.Fatalf("error creating chain: %v", err) } diff --git a/coreth/core/blockchain.go b/coreth/core/blockchain.go index 3d8c0f08..b102ce27 100644 --- a/coreth/core/blockchain.go +++ b/coreth/core/blockchain.go @@ -34,6 +34,7 @@ import ( "io" "math/big" "runtime" + "strings" "sync" "sync/atomic" "time" @@ -55,10 +56,41 @@ import ( ) var ( - acceptorQueueGauge = metrics.NewRegisteredGauge("blockchain/acceptor/queue/size", nil) - processedBlockGasUsedCounter = metrics.NewRegisteredCounter("blockchain/blocks/gas/used/processed", nil) - acceptedBlockGasUsedCounter = metrics.NewRegisteredCounter("blockchain/blocks/gas/used/accepted", nil) - badBlockCounter = metrics.NewRegisteredCounter("blockchain/blocks/bad/count", nil) + accountReadTimer = metrics.NewRegisteredCounter("chain/account/reads", nil) + accountHashTimer = metrics.NewRegisteredCounter("chain/account/hashes", nil) + accountUpdateTimer = metrics.NewRegisteredCounter("chain/account/updates", nil) + accountCommitTimer = metrics.NewRegisteredCounter("chain/account/commits", nil) + storageReadTimer = metrics.NewRegisteredCounter("chain/storage/reads", nil) + storageHashTimer = metrics.NewRegisteredCounter("chain/storage/hashes", nil) + storageUpdateTimer = metrics.NewRegisteredCounter("chain/storage/updates", nil) + storageCommitTimer = metrics.NewRegisteredCounter("chain/storage/commits", nil) + snapshotAccountReadTimer = metrics.NewRegisteredCounter("chain/snapshot/account/reads", nil) + snapshotStorageReadTimer = metrics.NewRegisteredCounter("chain/snapshot/storage/reads", nil) + snapshotCommitTimer = metrics.NewRegisteredCounter("chain/snapshot/commits", nil) + triedbCommitTimer = metrics.NewRegisteredCounter("chain/triedb/commits", nil) + + blockInsertTimer = metrics.NewRegisteredCounter("chain/block/inserts", nil) + blockInsertCount = metrics.NewRegisteredCounter("chain/block/inserts/count", nil) + blockContentValidationTimer = metrics.NewRegisteredCounter("chain/block/validations/content", nil) + blockStateInitTimer = metrics.NewRegisteredCounter("chain/block/inits/state", nil) + blockExecutionTimer = metrics.NewRegisteredCounter("chain/block/executions", nil) + blockTrieOpsTimer = metrics.NewRegisteredCounter("chain/block/trie", nil) + blockStateValidationTimer = metrics.NewRegisteredCounter("chain/block/validations/state", nil) + blockWriteTimer = metrics.NewRegisteredCounter("chain/block/writes", nil) + + acceptorQueueGauge = metrics.NewRegisteredGauge("chain/acceptor/queue/size", nil) + acceptorWorkTimer = metrics.NewRegisteredCounter("chain/acceptor/work", nil) + acceptorWorkCount = metrics.NewRegisteredCounter("chain/acceptor/work/count", nil) + processedBlockGasUsedCounter = metrics.NewRegisteredCounter("chain/block/gas/used/processed", nil) + acceptedBlockGasUsedCounter = metrics.NewRegisteredCounter("chain/block/gas/used/accepted", nil) + badBlockCounter = metrics.NewRegisteredCounter("chain/block/bad/count", nil) + + txUnindexTimer = metrics.NewRegisteredCounter("chain/txs/unindex", nil) + acceptedTxsCounter = metrics.NewRegisteredCounter("chain/txs/accepted", nil) + processedTxsCounter = metrics.NewRegisteredCounter("chain/txs/processed", nil) + + acceptedLogsCounter = metrics.NewRegisteredCounter("chain/logs/accepted", nil) + processedLogsCounter = metrics.NewRegisteredCounter("chain/logs/processed", nil) ErrRefuseToCorruptArchiver = errors.New("node has operated with pruning disabled, shutting down to prevent missing tries") @@ -72,7 +104,6 @@ const ( receiptsCacheLimit = 32 txLookupCacheLimit = 1024 badBlockLimit = 10 - TriesInMemory = 128 // BlockChainVersion ensures that an incompatible database forces a resync from scratch. // @@ -102,26 +133,34 @@ const ( // statsReportLimit is the time limit during import and export after which we // always print out progress. This avoids the user wondering what's going on. statsReportLimit = 8 * time.Second + + // trieCleanCacheStatsNamespace is the namespace to surface stats from the trie + // clean cache's underlying fastcache. + trieCleanCacheStatsNamespace = "trie/memcache/clean/fastcache" ) // CacheConfig contains the configuration values for the trie caching/pruning // that's resident in a blockchain. type CacheConfig struct { - TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory - TrieDirtyLimit int // Memory limit (MB) at which to block on insert and force a flush of dirty trie nodes to disk - TrieDirtyCommitTarget int // Memory limit (MB) to target for the dirties cache before invoking commit - CommitInterval uint64 // Commit the trie every [CommitInterval] blocks. - Pruning bool // Whether to disable trie write caching and GC altogether (archive node) - AcceptorQueueLimit int // Blocks to queue before blocking during acceptance - PopulateMissingTries *uint64 // If non-nil, sets the starting height for re-generating historical tries. - PopulateMissingTriesParallelism int // Is the number of readers to use when trying to populate missing tries. - AllowMissingTries bool // Whether to allow an archive node to run with pruning enabled - SnapshotDelayInit bool // Whether to initialize snapshots on startup or wait for external call - SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory - SnapshotAsync bool // Generate snapshot tree async - SnapshotVerify bool // Verify generated snapshots - SkipSnapshotRebuild bool // Whether to skip rebuilding the snapshot in favor of returning an error (only set to true for tests) - Preimages bool // Whether to store preimage of trie key to the disk + TrieCleanLimit int // Memory allowance (MB) to use for caching trie nodes in memory + TrieCleanJournal string // Disk journal for saving clean cache entries. + TrieCleanRejournal time.Duration // Time interval to dump clean cache to disk periodically + TrieDirtyLimit int // Memory limit (MB) at which to block on insert and force a flush of dirty trie nodes to disk + TrieDirtyCommitTarget int // Memory limit (MB) to target for the dirties cache before invoking commit + CommitInterval uint64 // Commit the trie every [CommitInterval] blocks. + Pruning bool // Whether to disable trie write caching and GC altogether (archive node) + AcceptorQueueLimit int // Blocks to queue before blocking during acceptance + PopulateMissingTries *uint64 // If non-nil, sets the starting height for re-generating historical tries. + PopulateMissingTriesParallelism int // Is the number of readers to use when trying to populate missing tries. + AllowMissingTries bool // Whether to allow an archive node to run with pruning enabled + SnapshotDelayInit bool // Whether to initialize snapshots on startup or wait for external call + SnapshotLimit int // Memory allowance (MB) to use for caching snapshot entries in memory + SnapshotAsync bool // Generate snapshot tree async + SnapshotVerify bool // Verify generated snapshots + SkipSnapshotRebuild bool // Whether to skip rebuilding the snapshot in favor of returning an error (only set to true for tests) + Preimages bool // Whether to store preimage of trie key to the disk + AcceptedCacheSize int // Depth of accepted headers cache and accepted logs cache at the accepted tip + TxLookupLimit uint64 // Number of recent blocks for which to maintain transaction lookup indices } var DefaultCacheConfig = &CacheConfig{ @@ -132,6 +171,7 @@ var DefaultCacheConfig = &CacheConfig{ CommitInterval: 4096, AcceptorQueueLimit: 64, // Provides 2 minutes of buffer (2s block target) for a commit delay SnapshotLimit: 256, + AcceptedCacheSize: 32, } // BlockChain represents the canonical chain given a database with a genesis @@ -215,19 +255,34 @@ type BlockChain struct { // during shutdown and in tests. acceptorWg sync.WaitGroup + // [wg] is used to wait for the async blockchain processes to finish on shutdown. + wg sync.WaitGroup + + // quit channel is used to listen for when the blockchain is shut down to close + // async processes. + // WaitGroups are used to ensure that async processes have finished during shutdown. + quit chan struct{} + // [acceptorTip] is the last block processed by the acceptor. This is // returned as the LastAcceptedBlock() to ensure clients get only fully // processed blocks. This may be equal to [lastAccepted]. acceptorTip *types.Block acceptorTipLock sync.Mutex + + // [flattenLock] prevents the [acceptor] from flattening snapshots while + // a block is being verified. + flattenLock sync.Mutex + + // [acceptedLogsCache] stores recently accepted logs to improve the performance of eth_getLogs. + acceptedLogsCache FIFOCache[common.Hash, [][]*types.Log] } // NewBlockChain returns a fully initialised block chain using information // available in the database. It initialises the default Ethereum Validator and // Processor. func NewBlockChain( - db ethdb.Database, cacheConfig *CacheConfig, chainConfig *params.ChainConfig, engine consensus.Engine, - vmConfig vm.Config, lastAcceptedHash common.Hash, + db ethdb.Database, cacheConfig *CacheConfig, genesis *Genesis, engine consensus.Engine, + vmConfig vm.Config, lastAcceptedHash common.Hash, skipChainConfigCheckCompatible bool, ) (*BlockChain, error) { if cacheConfig == nil { return nil, errCacheConfigNotSpecified @@ -238,30 +293,51 @@ func NewBlockChain( txLookupCache, _ := lru.New(txLookupCacheLimit) badBlocks, _ := lru.New(badBlockLimit) + // Setup the genesis block, commit the provided genesis specification + // to database if the genesis block is not present yet, or load the + // stored one from database. + // Note: In go-ethereum, the code rewinds the chain on an incompatible config upgrade. + // We don't do this and expect the node operator to always update their node's configuration + // before network upgrades take effect. + chainConfig, _, err := SetupGenesisBlock(db, genesis, lastAcceptedHash, skipChainConfigCheckCompatible) + if err != nil { + return nil, err + } + log.Info("") + log.Info(strings.Repeat("-", 153)) + for _, line := range strings.Split(chainConfig.String(), "\n") { + log.Info(line) + } + log.Info(strings.Repeat("-", 153)) + log.Info("") + bc := &BlockChain{ chainConfig: chainConfig, cacheConfig: cacheConfig, db: db, stateCache: state.NewDatabaseWithConfig(db, &trie.Config{ - Cache: cacheConfig.TrieCleanLimit, - Preimages: cacheConfig.Preimages, + Cache: cacheConfig.TrieCleanLimit, + Journal: cacheConfig.TrieCleanJournal, + Preimages: cacheConfig.Preimages, + StatsPrefix: trieCleanCacheStatsNamespace, }), - bodyCache: bodyCache, - receiptsCache: receiptsCache, - blockCache: blockCache, - txLookupCache: txLookupCache, - engine: engine, - vmConfig: vmConfig, - badBlocks: badBlocks, - senderCacher: newTxSenderCacher(runtime.NumCPU()), - acceptorQueue: make(chan *types.Block, cacheConfig.AcceptorQueueLimit), + bodyCache: bodyCache, + receiptsCache: receiptsCache, + blockCache: blockCache, + txLookupCache: txLookupCache, + engine: engine, + vmConfig: vmConfig, + badBlocks: badBlocks, + senderCacher: newTxSenderCacher(runtime.NumCPU()), + acceptorQueue: make(chan *types.Block, cacheConfig.AcceptorQueueLimit), + quit: make(chan struct{}), + acceptedLogsCache: NewFIFOCache[common.Hash, [][]*types.Log](cacheConfig.AcceptedCacheSize), } bc.validator = NewBlockValidator(chainConfig, bc, engine) bc.prefetcher = newStatePrefetcher(chainConfig, bc, engine) bc.processor = NewStateProcessor(chainConfig, bc, engine) - var err error - bc.hc, err = NewHeaderChain(db, chainConfig, engine) + bc.hc, err = NewHeaderChain(db, chainConfig, cacheConfig, engine) if err != nil { return nil, err } @@ -276,6 +352,13 @@ func NewBlockChain( // Create the state manager bc.stateManager = NewTrieWriter(bc.stateCache.TrieDB(), cacheConfig) + // loadLastState writes indices, so we should start the tx indexer after that. + // Start tx indexer/unindexer here. + if bc.cacheConfig.TxLookupLimit != 0 { + bc.wg.Add(1) + go bc.dispatchTxUnindexer() + } + // Re-generate current block state if it is missing if err := bc.loadLastState(lastAcceptedHash); err != nil { return nil, err @@ -312,12 +395,93 @@ func NewBlockChain( bc.initSnapshot(head) } + // Warm up [hc.acceptedNumberCache] and [acceptedLogsCache] + bc.warmAcceptedCaches() + // Start processing accepted blocks effects in the background go bc.startAcceptor() + // If periodic cache journal is required, spin it up. + if bc.cacheConfig.TrieCleanRejournal > 0 && len(bc.cacheConfig.TrieCleanJournal) > 0 { + log.Info("Starting to save trie clean cache periodically", "journalDir", bc.cacheConfig.TrieCleanJournal, "freq", bc.cacheConfig.TrieCleanRejournal) + + triedb := bc.stateCache.TrieDB() + bc.wg.Add(1) + go func() { + defer bc.wg.Done() + triedb.SaveCachePeriodically(bc.cacheConfig.TrieCleanJournal, bc.cacheConfig.TrieCleanRejournal, bc.quit) + }() + } + return bc, nil } +// dispatchTxUnindexer is responsible for the deletion of the +// transaction index. +// Invariant: If TxLookupLimit is 0, it means all tx indices will be preserved. +// Meaning that this function should never be called. +func (bc *BlockChain) dispatchTxUnindexer() { + defer bc.wg.Done() + txLookupLimit := bc.cacheConfig.TxLookupLimit + + // If the user just upgraded to a new version which supports transaction + // index pruning, write the new tail and remove anything older. + if rawdb.ReadTxIndexTail(bc.db) == nil { + rawdb.WriteTxIndexTail(bc.db, 0) + } + + // unindexes transactions depending on user configuration + unindexBlocks := func(tail uint64, head uint64, done chan struct{}) { + start := time.Now() + defer func() { + txUnindexTimer.Inc(time.Since(start).Milliseconds()) + done <- struct{}{} + }() + + // Update the transaction index to the new chain state + if head-txLookupLimit+1 >= tail { + // Unindex a part of stale indices and forward index tail to HEAD-limit + rawdb.UnindexTransactions(bc.db, tail, head-txLookupLimit+1, bc.quit) + } + } + // Any reindexing done, start listening to chain events and moving the index window + var ( + done chan struct{} // Non-nil if background unindexing or reindexing routine is active. + headCh = make(chan ChainEvent, 1) // Buffered to avoid locking up the event feed + ) + sub := bc.SubscribeChainAcceptedEvent(headCh) + if sub == nil { + log.Warn("could not create chain accepted subscription to unindex txs") + return + } + defer sub.Unsubscribe() + + for { + select { + case head := <-headCh: + headNum := head.Block.NumberU64() + if headNum < txLookupLimit { + break + } + + if done == nil { + done = make(chan struct{}) + // Note: tail will not be nil since it is initialized in this function. + tail := rawdb.ReadTxIndexTail(bc.db) + go unindexBlocks(*tail, headNum, done) + } + case <-done: + done = nil + case <-bc.quit: + if done != nil { + log.Info("Waiting background transaction indexer to exit") + <-done + } + return + } + } +} + // writeBlockAcceptedIndices writes any indices that must be persisted for accepted block. // This includes the following: // - transaction lookup indices @@ -350,18 +514,60 @@ func (bc *BlockChain) flattenSnapshot(postAbortWork func() error, hash common.Ha return err } + // Ensure we avoid flattening the snapshot while we are processing a block, or + // block execution will fallback to reading from the trie (which is much + // slower). + bc.flattenLock.Lock() + defer bc.flattenLock.Unlock() + // Flatten the entire snap Trie to disk // // Note: This resumes snapshot generation. return bc.snaps.Flatten(hash) } +// warmAcceptedCaches fetches previously accepted headers and logs from disk to +// pre-populate [hc.acceptedNumberCache] and [acceptedLogsCache]. +func (bc *BlockChain) warmAcceptedCaches() { + var ( + startTime = time.Now() + lastAccepted = bc.LastAcceptedBlock().NumberU64() + startIndex = uint64(1) + targetCacheSize = uint64(bc.cacheConfig.AcceptedCacheSize) + ) + if targetCacheSize == 0 { + log.Info("Not warming accepted cache because disabled") + return + } + if lastAccepted < startIndex { + // This could occur if we haven't accepted any blocks yet + log.Info("Not warming accepted cache because there are no accepted blocks") + return + } + cacheDiff := targetCacheSize - 1 // last accepted lookback is inclusive, so we reduce size by 1 + if cacheDiff < lastAccepted { + startIndex = lastAccepted - cacheDiff + } + for i := startIndex; i <= lastAccepted; i++ { + header := bc.GetHeaderByNumber(i) + if header == nil { + // This could happen if a node state-synced + log.Info("Exiting accepted cache warming early because header is nil", "height", i, "t", time.Since(startTime)) + break + } + bc.hc.acceptedNumberCache.Put(header.Number.Uint64(), header) + bc.acceptedLogsCache.Put(header.Hash(), rawdb.ReadLogs(bc.db, header.Hash(), header.Number.Uint64())) + } + log.Info("Warmed accepted caches", "start", startIndex, "end", lastAccepted, "t", time.Since(startTime)) +} + // startAcceptor starts processing items on the [acceptorQueue]. If a [nil] // object is placed on the [acceptorQueue], the [startAcceptor] will exit. func (bc *BlockChain) startAcceptor() { log.Info("Starting Acceptor", "queue length", bc.cacheConfig.AcceptorQueueLimit) for next := range bc.acceptorQueue { + start := time.Now() acceptorQueueGauge.Dec(1) if err := bc.flattenSnapshot(func() error { @@ -375,13 +581,16 @@ func (bc *BlockChain) startAcceptor() { log.Crit("failed to write accepted block effects", "err", err) } - // Fetch block logs - logs := bc.gatherBlockLogs(next.Hash(), next.NumberU64(), false) + // Ensure [hc.acceptedNumberCache] and [acceptedLogsCache] have latest content + bc.hc.acceptedNumberCache.Put(next.NumberU64(), next.Header()) + logs := rawdb.ReadLogs(bc.db, next.Hash(), next.NumberU64()) + bc.acceptedLogsCache.Put(next.Hash(), logs) // Update accepted feeds - bc.chainAcceptedFeed.Send(ChainEvent{Block: next, Hash: next.Hash(), Logs: logs}) - if len(logs) > 0 { - bc.logsAcceptedFeed.Send(logs) + flattenedLogs := types.FlattenLogs(logs) + bc.chainAcceptedFeed.Send(ChainEvent{Block: next, Hash: next.Hash(), Logs: flattenedLogs}) + if len(flattenedLogs) > 0 { + bc.logsAcceptedFeed.Send(flattenedLogs) } if len(next.Transactions()) != 0 { bc.txAcceptedFeed.Send(NewTxsEvent{next.Transactions()}) @@ -391,6 +600,12 @@ func (bc *BlockChain) startAcceptor() { bc.acceptorTip = next bc.acceptorTipLock.Unlock() bc.acceptorWg.Done() + + acceptorWorkTimer.Inc(time.Since(start).Milliseconds()) + acceptorWorkCount.Inc(1) + // Note: in contrast to most accepted metrics, we increment the accepted log metrics in the acceptor queue because + // the logs are already processed in the acceptor queue. + acceptedLogsCounter.Inc(int64(len(logs))) } } @@ -414,8 +629,8 @@ func (bc *BlockChain) addAcceptorQueue(b *types.Block) { // DrainAcceptorQueue blocks until all items in [acceptorQueue] have been // processed. func (bc *BlockChain) DrainAcceptorQueue() { - bc.acceptorClosingLock.Lock() - defer bc.acceptorClosingLock.Unlock() + bc.acceptorClosingLock.RLock() + defer bc.acceptorClosingLock.RUnlock() if bc.acceptorClosed { return @@ -641,7 +856,8 @@ func (bc *BlockChain) ValidateCanonicalChain() error { // Transactions are only indexed beneath the last accepted block, so we only check // that the transactions have been indexed, if we are checking below the last accepted // block. - if current.NumberU64() <= bc.lastAccepted.NumberU64() { + shouldIndexTxs := bc.cacheConfig.TxLookupLimit == 0 || bc.lastAccepted.NumberU64() < current.NumberU64()+bc.cacheConfig.TxLookupLimit + if current.NumberU64() <= bc.lastAccepted.NumberU64() && shouldIndexTxs { // Ensure that all of the transactions have been stored correctly in the canonical // chain for txIndex, tx := range txs { @@ -699,6 +915,8 @@ func (bc *BlockChain) Stop() { return } + log.Info("Closing quit channel") + close(bc.quit) // Wait for accepted feed to process all remaining items log.Info("Stopping Acceptor") start := time.Now() @@ -724,6 +942,10 @@ func (bc *BlockChain) Stop() { log.Info("Closing scope") bc.scope.Close() + // Waiting for background processes to complete + log.Info("Waiting for background processes to complete") + bc.wg.Wait() + log.Info("Blockchain stopped") } @@ -821,10 +1043,11 @@ func (bc *BlockChain) Accept(block *types.Block) error { } } + // Enqueue block in the acceptor bc.lastAccepted = block bc.addAcceptorQueue(block) acceptedBlockGasUsedCounter.Inc(int64(block.GasUsed())) - + acceptedTxsCounter.Inc(int64(len(block.Transactions()))) return nil } @@ -1006,26 +1229,11 @@ func (bc *BlockChain) InsertBlockManual(block *types.Block, writes bool) error { return err } -// gatherBlockLogs fetches logs from a previously inserted block. -func (bc *BlockChain) gatherBlockLogs(hash common.Hash, number uint64, removed bool) []*types.Log { - receipts := rawdb.ReadReceipts(bc.db, hash, number, bc.chainConfig) - var logs []*types.Log - for _, receipt := range receipts { - for _, log := range receipt.Logs { - l := *log - if removed { - l.Removed = true - } - logs = append(logs, &l) - } - } - - return logs -} - func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { + start := time.Now() bc.senderCacher.Recover(types.MakeSigner(bc.chainConfig, block.Number(), new(big.Int).SetUint64(block.Time())), block.Transactions()) + substart := time.Now() err := bc.engine.VerifyHeader(bc, block.Header()) if err == nil { err = bc.validator.ValidateBody(block) @@ -1057,6 +1265,8 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { bc.reportBlock(block, nil, err) return err } + blockContentValidationTimer.Inc(time.Since(substart).Milliseconds()) + // No validation errors for the block var activeState *state.StateDB defer func() { @@ -1069,15 +1279,21 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { } }() - // Retrieve the parent block and its state to execute on top - start := time.Now() - - // Retrieve the parent block and its state to execute block + // Retrieve the parent block to determine which root to build state on + substart = time.Now() parent := bc.GetHeader(block.ParentHash(), block.NumberU64()-1) + + // Instantiate the statedb to use for processing transactions + // + // NOTE: Flattening a snapshot during block execution requires fetching state + // entries directly from the trie (much slower). + bc.flattenLock.Lock() + defer bc.flattenLock.Unlock() statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps) if err != nil { return err } + blockStateInitTimer.Inc(time.Since(substart).Milliseconds()) // Enable prefetching to pull in trie node paths while processing transactions statedb.StartPrefetcher("chain") @@ -1086,6 +1302,7 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { // If we have a followup block, run that against the current state to pre-cache // transactions and probabilistically some of the account/storage trie nodes. // Process block using the parent state as reference point + substart = time.Now() receipts, logs, usedGas, err := bc.processor.Process(block, parent, statedb, bc.vmConfig) if serr := statedb.Error(); serr != nil { log.Error("statedb error encountered", "err", serr, "number", block.Number(), "hash", block.Hash()) @@ -1095,12 +1312,32 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { return err } + // Update the metrics touched during block processing + accountReadTimer.Inc(statedb.AccountReads.Milliseconds()) // Account reads are complete, we can mark them + storageReadTimer.Inc(statedb.StorageReads.Milliseconds()) // Storage reads are complete, we can mark them + snapshotAccountReadTimer.Inc(statedb.SnapshotAccountReads.Milliseconds()) // Account reads are complete, we can mark them + snapshotStorageReadTimer.Inc(statedb.SnapshotStorageReads.Milliseconds()) // Storage reads are complete, we can mark them + trieproc := statedb.AccountHashes + statedb.StorageHashes // Save to not double count in validation + trieproc += statedb.SnapshotAccountReads + statedb.AccountReads + statedb.AccountUpdates + trieproc += statedb.SnapshotStorageReads + statedb.StorageReads + statedb.StorageUpdates + blockExecutionTimer.Inc((time.Since(substart) - trieproc).Milliseconds()) + // Validate the state using the default validator + substart = time.Now() if err := bc.validator.ValidateState(block, statedb, receipts, usedGas); err != nil { bc.reportBlock(block, receipts, err) return err } + // Update the metrics touched during block validation + accountUpdateTimer.Inc(statedb.AccountUpdates.Milliseconds()) // Account updates are complete, we can mark them + storageUpdateTimer.Inc(statedb.StorageUpdates.Milliseconds()) // Storage updates are complete, we can mark them + accountHashTimer.Inc(statedb.AccountHashes.Milliseconds()) // Account hashes are complete, we can mark them + storageHashTimer.Inc(statedb.StorageHashes.Milliseconds()) // Storage hashes are complete, we can mark them + additionalTrieProc := statedb.AccountHashes + statedb.StorageHashes + statedb.AccountUpdates + statedb.StorageUpdates - trieproc + blockStateValidationTimer.Inc((time.Since(substart) - additionalTrieProc).Milliseconds()) + blockTrieOpsTimer.Inc((trieproc + additionalTrieProc).Milliseconds()) + // If [writes] are disabled, skip [writeBlockWithState] so that we do not write the block // or the state trie to disk. // Note: in pruning mode, this prevents us from generating a reference to the state root. @@ -1112,9 +1349,18 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { // writeBlockWithState (called within writeBlockAndSethead) creates a reference that // will be cleaned up in Accept/Reject so we need to ensure an error cannot occur // later in verification, since that would cause the referenced root to never be dereferenced. + substart = time.Now() if err := bc.writeBlockAndSetHead(block, receipts, logs, statedb); err != nil { return err } + // Update the metrics touched during block commit + accountCommitTimer.Inc(statedb.AccountCommits.Milliseconds()) // Account commits are complete, we can mark them + storageCommitTimer.Inc(statedb.StorageCommits.Milliseconds()) // Storage commits are complete, we can mark them + snapshotCommitTimer.Inc(statedb.SnapshotCommits.Milliseconds()) // Snapshot commits are complete, we can mark them + triedbCommitTimer.Inc(statedb.TrieDBCommits.Milliseconds()) // Triedb commits are complete, we can mark them + blockWriteTimer.Inc((time.Since(substart) - statedb.AccountCommits - statedb.StorageCommits - statedb.SnapshotCommits - statedb.TrieDBCommits).Milliseconds()) + blockInsertTimer.Inc(time.Since(start).Milliseconds()) + log.Debug("Inserted new block", "number", block.Number(), "hash", block.Hash(), "parentHash", block.ParentHash(), "uncles", len(block.Uncles()), "txs", len(block.Transactions()), "gas", block.GasUsed(), @@ -1123,6 +1369,9 @@ func (bc *BlockChain) insertBlock(block *types.Block, writes bool) error { ) processedBlockGasUsedCounter.Inc(int64(block.GasUsed())) + processedTxsCounter.Inc(int64(block.Transactions().Len())) + processedLogsCounter.Inc(int64(len(logs))) + blockInsertCount.Inc(1) return nil } @@ -1134,22 +1383,19 @@ func (bc *BlockChain) collectLogs(hash common.Hash, removed bool) []*types.Log { if number == nil { return nil } - return bc.gatherBlockLogs(hash, *number, removed) -} + receipts := rawdb.ReadReceipts(bc.db, hash, *number, bc.chainConfig) -// mergeLogs returns a merged log slice with specified sort order. -func mergeLogs(logs [][]*types.Log, reverse bool) []*types.Log { - var ret []*types.Log - if reverse { - for i := len(logs) - 1; i >= 0; i-- { - ret = append(ret, logs[i]...) - } - } else { - for i := 0; i < len(logs); i++ { - ret = append(ret, logs[i]...) + var logs []*types.Log + for _, receipt := range receipts { + for _, log := range receipt.Logs { + l := *log + if removed { + l.Removed = true + } + logs = append(logs, &l) } } - return ret + return logs } // reorg takes two blocks, an old chain and a new chain and will reconstruct the @@ -1163,20 +1409,12 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { newChain types.Blocks oldChain types.Blocks commonBlock *types.Block - - deletedLogs [][]*types.Log - rebirthLogs [][]*types.Log ) // Reduce the longer chain to the same number as the shorter one if oldBlock.NumberU64() > newBlock.NumberU64() { // Old chain is longer, gather all transactions and logs as deleted ones for ; oldBlock != nil && oldBlock.NumberU64() != newBlock.NumberU64(); oldBlock = bc.GetBlock(oldBlock.ParentHash(), oldBlock.NumberU64()-1) { oldChain = append(oldChain, oldBlock) - // Collect deleted logs for notification - logs := bc.collectLogs(oldBlock.Hash(), true) - if len(logs) > 0 { - deletedLogs = append(deletedLogs, logs) - } } } else { // New chain is longer, stash all blocks away for subsequent insertion @@ -1200,12 +1438,6 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { } // Remove an old block as well as stash away a new block oldChain = append(oldChain, oldBlock) - // Collect deleted logs for notification - logs := bc.collectLogs(oldBlock.Hash(), true) - if len(logs) > 0 { - deletedLogs = append(deletedLogs, logs) - } - newChain = append(newChain, newBlock) // Step back with both chains @@ -1237,20 +1469,15 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { logFn(msg, "number", commonBlock.Number(), "hash", commonBlock.Hash(), "drop", len(oldChain), "dropfrom", oldChain[0].Hash(), "add", len(newChain), "addfrom", newChain[0].Hash()) } else { - log.Warn("Unlikely preference change (rewind to ancestor) occurred", "oldnum", oldHead.Number(), "oldhash", oldHead.Hash(), "newnum", newHead.Number(), "newhash", newHead.Hash()) + log.Debug("Preference change (rewind to ancestor) occurred", "oldnum", oldHead.Number(), "oldhash", oldHead.Hash(), "newnum", newHead.Number(), "newhash", newHead.Hash()) } // Insert the new chain(except the head block(reverse order)), // taking care of the proper incremental order. for i := len(newChain) - 1; i >= 1; i-- { // Insert the block in the canonical way, re-writing history bc.writeHeadBlock(newChain[i]) - - // Collect reborn logs due to chain reorg - logs := bc.collectLogs(newChain[i].Hash(), false) - if len(logs) > 0 { - rebirthLogs = append(rebirthLogs, logs) - } } + // Delete any canonical number assignments above the new head indexesBatch := bc.db.NewBatch() @@ -1269,21 +1496,43 @@ func (bc *BlockChain) reorg(oldBlock, newBlock *types.Block) error { log.Crit("Failed to delete useless indexes", "err", err) } - // If any logs need to be fired, do it now. In theory we could avoid creating - // this goroutine if there are no events to fire, but realistcally that only - // ever happens if we're reorging empty blocks, which will only happen on idle - // networks where performance is not an issue either way. - if len(deletedLogs) > 0 { - bc.rmLogsFeed.Send(RemovedLogsEvent{mergeLogs(deletedLogs, true)}) + // Send out events for logs from the old canon chain, and 'reborn' + // logs from the new canon chain. The number of logs can be very + // high, so the events are sent in batches of size around 512. + + // Deleted logs + blocks: + var deletedLogs []*types.Log + for i := len(oldChain) - 1; i >= 0; i-- { + // Also send event for blocks removed from the canon chain. + bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]}) + + // Collect deleted logs for notification + if logs := bc.collectLogs(oldChain[i].Hash(), true); len(logs) > 0 { + deletedLogs = append(deletedLogs, logs...) + } + if len(deletedLogs) > 512 { + bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) + deletedLogs = nil + } } - if len(rebirthLogs) > 0 { - bc.logsFeed.Send(mergeLogs(rebirthLogs, false)) + if len(deletedLogs) > 0 { + bc.rmLogsFeed.Send(RemovedLogsEvent{deletedLogs}) } - if len(oldChain) > 0 { - for i := len(oldChain) - 1; i >= 0; i-- { - bc.chainSideFeed.Send(ChainSideEvent{Block: oldChain[i]}) + + // New logs: + var rebirthLogs []*types.Log + for i := len(newChain) - 1; i >= 1; i-- { + if logs := bc.collectLogs(newChain[i].Hash(), false); len(logs) > 0 { + rebirthLogs = append(rebirthLogs, logs...) + } + if len(rebirthLogs) > 512 { + bc.logsFeed.Send(rebirthLogs) + rebirthLogs = nil } } + if len(rebirthLogs) > 0 { + bc.logsFeed.Send(rebirthLogs) + } return nil } @@ -1297,7 +1546,7 @@ type BadBlockReason struct { Receipts types.Receipts `json:"receipts"` Number uint64 `json:"number"` Hash common.Hash `json:"hash"` - Error error `json:"error"` + Error string `json:"error"` } func (b *BadBlockReason) String() string { @@ -1315,7 +1564,7 @@ func (b *BadBlockReason) String() string { Hash: %#x %v - Error: %v + Error: %s ############################## `, b.ChainConfig, b.Number, b.Hash, receiptString, b.Error) @@ -1353,7 +1602,7 @@ func (bc *BlockChain) reportBlock(block *types.Block, receipts types.Receipts, e Receipts: receipts, Number: block.NumberU64(), Hash: block.Hash(), - Error: err, + Error: err.Error(), } badBlockCounter.Inc(1) @@ -1766,11 +2015,11 @@ func (bc *BlockChain) gatherBlockRootsAboveLastAccepted() map[common.Hash]struct return blockRoots } -// ResetState reinitializes the state of the blockchain +// ResetToStateSyncedBlock reinitializes the state of the blockchain // to the trie represented by [block.Root()] after updating -// in-memory current block pointers to [block]. -// Only used in state sync. -func (bc *BlockChain) ResetState(block *types.Block) error { +// in-memory and on disk current block pointers to [block]. +// Only should be called after state sync has completed. +func (bc *BlockChain) ResetToStateSyncedBlock(block *types.Block) error { bc.chainmu.Lock() defer bc.chainmu.Unlock() @@ -1781,6 +2030,10 @@ func (bc *BlockChain) ResetState(block *types.Block) error { rawdb.WriteHeadHeaderHash(batch, block.Hash()) rawdb.WriteSnapshotBlockHash(batch, block.Hash()) rawdb.WriteSnapshotRoot(batch, block.Root()) + if err := rawdb.WriteSyncPerformed(batch, block.NumberU64()); err != nil { + return err + } + if err := batch.Write(); err != nil { return err } @@ -1793,8 +2046,10 @@ func (bc *BlockChain) ResetState(block *types.Block) error { lastAcceptedHash := block.Hash() bc.stateCache = state.NewDatabaseWithConfig(bc.db, &trie.Config{ - Cache: bc.cacheConfig.TrieCleanLimit, - Preimages: bc.cacheConfig.Preimages, + Cache: bc.cacheConfig.TrieCleanLimit, + Journal: bc.cacheConfig.TrieCleanJournal, + Preimages: bc.cacheConfig.Preimages, + StatsPrefix: trieCleanCacheStatsNamespace, }) if err := bc.loadLastState(lastAcceptedHash); err != nil { return err diff --git a/coreth/core/blockchain_reader.go b/coreth/core/blockchain_reader.go index e812fdd0..96163694 100644 --- a/coreth/core/blockchain_reader.go +++ b/coreth/core/blockchain_reader.go @@ -334,3 +334,12 @@ func (bc *BlockChain) SubscribeAcceptedLogsEvent(ch chan<- []*types.Log) event.S func (bc *BlockChain) SubscribeAcceptedTransactionEvent(ch chan<- NewTxsEvent) event.Subscription { return bc.scope.Track(bc.txAcceptedFeed.Subscribe(ch)) } + +// GetLogs fetches all logs from a given block. +func (bc *BlockChain) GetLogs(hash common.Hash, number uint64) [][]*types.Log { + logs, ok := bc.acceptedLogsCache.Get(hash) // this cache is thread-safe + if ok { + return logs + } + return rawdb.ReadLogs(bc.db, hash, number) +} diff --git a/coreth/core/blockchain_repair_test.go b/coreth/core/blockchain_repair_test.go index 0f5eaf8d..add53826 100644 --- a/coreth/core/blockchain_repair_test.go +++ b/coreth/core/blockchain_repair_test.go @@ -516,9 +516,12 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { // Initialize a fresh chain var ( - genesis = (&Genesis{Config: params.TestChainConfig, BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee)}).MustCommit(db) - engine = dummy.NewFullFaker() - config = &CacheConfig{ + gspec = &Genesis{ + BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), + Config: params.TestChainConfig, + } + engine = dummy.NewFullFaker() + config = &CacheConfig{ TrieCleanLimit: 256, TrieDirtyLimit: 256, SnapshotLimit: 0, // Disable snapshot by default @@ -528,7 +531,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { if snapshots { config.SnapshotLimit = 256 } - chain, err := NewBlockChain(db, config, params.TestChainConfig, engine, vm.Config{}, common.Hash{}) + chain, err := NewBlockChain(db, config, gspec, engine, vm.Config{}, common.Hash{}, false) if err != nil { t.Fatalf("Failed to create chain: %v", err) } @@ -537,14 +540,14 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { // If sidechain blocks are needed, make a light chain and import it var sideblocks types.Blocks if tt.sidechainBlocks > 0 { - sideblocks, _, _ = GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, 10, func(i int, b *BlockGen) { + sideblocks, _, _ = GenerateChain(gspec.Config, gspec.ToBlock(nil), engine, rawdb.NewMemoryDatabase(), tt.sidechainBlocks, 10, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{0x01}) }) if _, err := chain.InsertChain(sideblocks); err != nil { t.Fatalf("Failed to import side chain: %v", err) } } - canonblocks, _, _ := GenerateChain(params.TestChainConfig, genesis, engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, 10, func(i int, b *BlockGen) { + canonblocks, _, _ := GenerateChain(gspec.Config, gspec.ToBlock(nil), engine, rawdb.NewMemoryDatabase(), tt.canonicalBlocks, 10, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{0x02}) b.SetDifficulty(big.NewInt(1000000)) }) @@ -576,7 +579,7 @@ func testRepair(t *testing.T, tt *rewindTest, snapshots bool) { } defer db.Close() - newChain, err := NewBlockChain(db, config, params.TestChainConfig, engine, vm.Config{}, lastAcceptedHash) + newChain, err := NewBlockChain(db, config, gspec, engine, vm.Config{}, lastAcceptedHash, false) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } diff --git a/coreth/core/blockchain_snapshot_test.go b/coreth/core/blockchain_snapshot_test.go index aae31baf..98c7456a 100644 --- a/coreth/core/blockchain_snapshot_test.go +++ b/coreth/core/blockchain_snapshot_test.go @@ -59,8 +59,9 @@ type snapshotTestBasic struct { // share fields, set in runtime datadir string db ethdb.Database - gendb ethdb.Database + genDb ethdb.Database engine consensus.Engine + gspec *Genesis lastAcceptedHash common.Hash } @@ -75,20 +76,22 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo } // Initialize a fresh chain var ( - genesis = (&Genesis{Config: params.TestChainConfig, BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee)}).MustCommit(db) - engine = dummy.NewFullFaker() - gendb = rawdb.NewMemoryDatabase() + gspec = &Genesis{ + BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), + Config: params.TestChainConfig, + } + engine = dummy.NewFullFaker() // Snapshot is enabled, the first snapshot is created from the Genesis. // The snapshot memory allowance is 256MB, it means no snapshot flush // will happen during the block insertion. cacheConfig = DefaultCacheConfig ) - chain, err := NewBlockChain(db, cacheConfig, params.TestChainConfig, engine, vm.Config{}, common.Hash{}) + chain, err := NewBlockChain(db, cacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false) if err != nil { t.Fatalf("Failed to create chain: %v", err) } - blocks, _, _ := GenerateChain(params.TestChainConfig, genesis, engine, gendb, basic.chainBlocks, 10, func(i int, b *BlockGen) {}) + genDb, blocks, _, _ := GenerateChainWithGenesis(gspec, engine, basic.chainBlocks, 10, func(i int, b *BlockGen) {}) // genesis as last accepted basic.lastAcceptedHash = chain.GetBlockByNumber(0).Hash() @@ -126,8 +129,9 @@ func (basic *snapshotTestBasic) prepare(t *testing.T) (*BlockChain, []*types.Blo // Set runtime fields basic.datadir = datadir basic.db = db - basic.gendb = gendb + basic.genDb = genDb basic.engine = engine + basic.gspec = gspec return chain, blocks } @@ -146,11 +150,11 @@ func (basic *snapshotTestBasic) verify(t *testing.T, chain *BlockChain, blocks [ // Check the disk layer, ensure they are matched block := chain.GetBlockByNumber(basic.expSnapshotBottom) if block == nil { - t.Errorf("The correspnding block[%d] of snapshot disk layer is missing", basic.expSnapshotBottom) + t.Errorf("The corresponding block[%d] of snapshot disk layer is missing", basic.expSnapshotBottom) } else if !bytes.Equal(chain.snaps.DiskRoot().Bytes(), block.Root().Bytes()) { t.Errorf("The snapshot disk layer root is incorrect, want %x, get %x", block.Root(), chain.snaps.DiskRoot()) } else if len(chain.snaps.Snapshots(block.Hash(), -1, false)) != 1 { - t.Errorf("The correspnding block[%d] of snapshot disk layer is missing", basic.expSnapshotBottom) + t.Errorf("The corresponding block[%d] of snapshot disk layer is missing", basic.expSnapshotBottom) } // Check the snapshot, ensure it's integrated @@ -203,7 +207,7 @@ func (basic *snapshotTestBasic) dump() string { func (basic *snapshotTestBasic) teardown() { basic.db.Close() - basic.gendb.Close() + basic.genDb.Close() os.RemoveAll(basic.datadir) } @@ -221,7 +225,7 @@ func (snaptest *snapshotTest) test(t *testing.T) { // Restart the chain normally chain.Stop() - newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfig, params.TestChainConfig, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash) + newchain, err := NewBlockChain(snaptest.db, DefaultCacheConfig, snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -257,13 +261,13 @@ func (snaptest *crashSnapshotTest) test(t *testing.T) { // the crash, we do restart twice here: one after the crash and one // after the normal stop. It's used to ensure the broken snapshot // can be detected all the time. - newchain, err := NewBlockChain(newdb, DefaultCacheConfig, params.TestChainConfig, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash) + newchain, err := NewBlockChain(newdb, DefaultCacheConfig, snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } newchain.Stop() - newchain, err = NewBlockChain(newdb, DefaultCacheConfig, params.TestChainConfig, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash) + newchain, err = NewBlockChain(newdb, DefaultCacheConfig, snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -290,7 +294,7 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) { // Insert blocks without enabling snapshot if gapping is required. chain.Stop() - gappedBlocks, _, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.gapped, 10, func(i int, b *BlockGen) {}) + gappedBlocks, _, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.genDb, snaptest.gapped, 10, func(i int, b *BlockGen) {}) // Insert a few more blocks without enabling snapshot var cacheConfig = &CacheConfig{ @@ -300,7 +304,7 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) { Pruning: true, CommitInterval: 4096, } - newchain, err := NewBlockChain(snaptest.db, cacheConfig, params.TestChainConfig, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash) + newchain, err := NewBlockChain(snaptest.db, cacheConfig, snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -308,7 +312,7 @@ func (snaptest *gappedSnapshotTest) test(t *testing.T) { newchain.Stop() // Restart the chain with enabling the snapshot - newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfig, params.TestChainConfig, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash) + newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfig, snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } @@ -344,11 +348,11 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) { Pruning: true, CommitInterval: 4096, } - newchain, err := NewBlockChain(snaptest.db, config, params.TestChainConfig, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash) + newchain, err := NewBlockChain(snaptest.db, config, snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } - newBlocks, _, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.gendb, snaptest.newBlocks, 10, func(i int, b *BlockGen) {}) + newBlocks, _, _ := GenerateChain(params.TestChainConfig, blocks[len(blocks)-1], snaptest.engine, snaptest.genDb, snaptest.newBlocks, 10, func(i int, b *BlockGen) {}) newchain.InsertChain(newBlocks) newchain.Stop() @@ -360,13 +364,12 @@ func (snaptest *wipeCrashSnapshotTest) test(t *testing.T) { Pruning: true, CommitInterval: 4096, } - _, err = NewBlockChain(snaptest.db, config, params.TestChainConfig, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash) + _, err = NewBlockChain(snaptest.db, config, snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } // Simulate the blockchain crash. - - newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfig, params.TestChainConfig, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash) + newchain, err = NewBlockChain(snaptest.db, DefaultCacheConfig, snaptest.gspec, snaptest.engine, vm.Config{}, snaptest.lastAcceptedHash, false) if err != nil { t.Fatalf("Failed to recreate chain: %v", err) } diff --git a/coreth/core/blockchain_test.go b/coreth/core/blockchain_test.go index da5d7863..615c1757 100644 --- a/coreth/core/blockchain_test.go +++ b/coreth/core/blockchain_test.go @@ -6,7 +6,9 @@ package core import ( "fmt" "math/big" + "os" "testing" + "time" "github.com/ava-labs/coreth/consensus/dummy" "github.com/ava-labs/coreth/core/rawdb" @@ -18,6 +20,9 @@ import ( "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" + "github.com/fsnotify/fsnotify" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var ( @@ -44,33 +49,25 @@ var ( func createBlockChain( db ethdb.Database, cacheConfig *CacheConfig, - chainConfig *params.ChainConfig, + gspec *Genesis, lastAcceptedHash common.Hash, ) (*BlockChain, error) { // Import the chain. This runs all block validation rules. blockchain, err := NewBlockChain( db, cacheConfig, - chainConfig, - dummy.NewDummyEngine(&dummy.ConsensusCallbacks{ - OnExtraStateChange: func(block *types.Block, sdb *state.StateDB) (*big.Int, *big.Int, error) { - sdb.SetBalanceMultiCoin(common.HexToAddress("0xdeadbeef"), common.HexToHash("0xdeadbeef"), big.NewInt(block.Number().Int64())) - return nil, nil, nil - }, - OnFinalizeAndAssemble: func(header *types.Header, sdb *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { - sdb.SetBalanceMultiCoin(common.HexToAddress("0xdeadbeef"), common.HexToHash("0xdeadbeef"), big.NewInt(header.Number.Int64())) - return nil, nil, nil, nil - }, - }), + gspec, + dummy.NewDummyEngine(&TestCallbacks), vm.Config{}, lastAcceptedHash, + false, ) return blockchain, err } func TestArchiveBlockChain(t *testing.T) { - createArchiveBlockChain := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { - return createBlockChain(db, archiveConfig, chainConfig, lastAcceptedHash) + createArchiveBlockChain := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { + return createBlockChain(db, archiveConfig, gspec, lastAcceptedHash) } for _, tt := range tests { t.Run(tt.Name, func(t *testing.T) { @@ -79,8 +76,102 @@ func TestArchiveBlockChain(t *testing.T) { } } +// awaitWatcherEventsSubside waits for at least one event on [watcher] and then waits +// for at least [subsideTimeout] before returning +func awaitWatcherEventsSubside(watcher *fsnotify.Watcher, subsideTimeout time.Duration) { + done := make(chan struct{}) + + go func() { + defer func() { + close(done) + }() + + select { + case <-watcher.Events: + case <-watcher.Errors: + return + } + + for { + select { + case <-watcher.Events: + case <-watcher.Errors: + return + case <-time.After(subsideTimeout): + return + } + } + }() + <-done +} + +func TestTrieCleanJournal(t *testing.T) { + if os.Getenv("RUN_FLAKY_TESTS") != "true" { + t.Skip("FLAKY") + } + require := require.New(t) + assert := assert.New(t) + + trieCleanJournal := t.TempDir() + trieCleanJournalWatcher, err := fsnotify.NewWatcher() + require.NoError(err) + defer func() { + assert.NoError(trieCleanJournalWatcher.Close()) + }() + require.NoError(trieCleanJournalWatcher.Add(trieCleanJournal)) + + create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { + config := *archiveConfig + config.TrieCleanJournal = trieCleanJournal + config.TrieCleanRejournal = 100 * time.Millisecond + return createBlockChain(db, &config, gspec, lastAcceptedHash) + } + + var ( + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + addr2 = crypto.PubkeyToAddress(key2.PublicKey) + chainDB = rawdb.NewMemoryDatabase() + ) + + // Ensure that key1 has some funds in the genesis block. + genesisBalance := big.NewInt(1000000) + gspec := &Genesis{ + Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, + Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, + } + + blockchain, err := create(chainDB, gspec, common.Hash{}) + require.NoError(err) + defer blockchain.Stop() + + // This call generates a chain of 3 blocks. + signer := types.HomesteadSigner{} + _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 10, func(i int, gen *BlockGen) { + tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) + gen.AddTx(tx) + }) + require.NoError(err) + + // Insert and accept the generated chain + _, err = blockchain.InsertChain(chain) + require.NoError(err) + + for _, block := range chain { + require.NoError(blockchain.Accept(block)) + } + blockchain.DrainAcceptorQueue() + + awaitWatcherEventsSubside(trieCleanJournalWatcher, time.Second) + // Assert that a new file is created in the trie clean journal + dirEntries, err := os.ReadDir(trieCleanJournal) + require.NoError(err) + require.NotEmpty(dirEntries) +} + func TestArchiveBlockChainSnapsDisabled(t *testing.T) { - create := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { + create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { return createBlockChain( db, &CacheConfig{ @@ -91,7 +182,7 @@ func TestArchiveBlockChainSnapsDisabled(t *testing.T) { SnapshotLimit: 0, // Disable snapshots AcceptorQueueLimit: 64, }, - chainConfig, + gspec, lastAcceptedHash, ) } @@ -103,8 +194,8 @@ func TestArchiveBlockChainSnapsDisabled(t *testing.T) { } func TestPruningBlockChain(t *testing.T) { - createPruningBlockChain := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { - return createBlockChain(db, pruningConfig, chainConfig, lastAcceptedHash) + createPruningBlockChain := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { + return createBlockChain(db, pruningConfig, gspec, lastAcceptedHash) } for _, tt := range tests { t.Run(tt.Name, func(t *testing.T) { @@ -114,7 +205,7 @@ func TestPruningBlockChain(t *testing.T) { } func TestPruningBlockChainSnapsDisabled(t *testing.T) { - create := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { + create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { return createBlockChain( db, &CacheConfig{ @@ -126,7 +217,7 @@ func TestPruningBlockChainSnapsDisabled(t *testing.T) { SnapshotLimit: 0, // Disable snapshots AcceptorQueueLimit: 64, }, - chainConfig, + gspec, lastAcceptedHash, ) } @@ -144,8 +235,8 @@ type wrappedStateManager struct { func (w *wrappedStateManager) Shutdown() error { return nil } func TestPruningBlockChainUngracefulShutdown(t *testing.T) { - create := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { - blockchain, err := createBlockChain(db, pruningConfig, chainConfig, lastAcceptedHash) + create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { + blockchain, err := createBlockChain(db, pruningConfig, gspec, lastAcceptedHash) if err != nil { return nil, err } @@ -163,7 +254,7 @@ func TestPruningBlockChainUngracefulShutdown(t *testing.T) { } func TestPruningBlockChainUngracefulShutdownSnapsDisabled(t *testing.T) { - create := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { + create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { blockchain, err := createBlockChain( db, &CacheConfig{ @@ -175,7 +266,7 @@ func TestPruningBlockChainUngracefulShutdownSnapsDisabled(t *testing.T) { SnapshotLimit: 0, // Disable snapshots AcceptorQueueLimit: 64, }, - chainConfig, + gspec, lastAcceptedHash, ) if err != nil { @@ -197,7 +288,7 @@ func TestPruningBlockChainUngracefulShutdownSnapsDisabled(t *testing.T) { func TestEnableSnapshots(t *testing.T) { // Set snapshots to be disabled the first time, and then enable them on the restart snapLimit := 0 - create := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { + create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { // Import the chain. This runs all block validation rules. blockchain, err := createBlockChain( db, @@ -210,7 +301,7 @@ func TestEnableSnapshots(t *testing.T) { SnapshotLimit: snapLimit, AcceptorQueueLimit: 64, }, - chainConfig, + gspec, lastAcceptedHash, ) if err != nil { @@ -228,13 +319,13 @@ func TestEnableSnapshots(t *testing.T) { } func TestCorruptSnapshots(t *testing.T) { - create := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { + create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { // Delete the snapshot block hash and state root to ensure that if we die in between writing a snapshot // diff layer to disk at any point, we can still recover on restart. rawdb.DeleteSnapshotBlockHash(db) rawdb.DeleteSnapshotRoot(db) - return createBlockChain(db, pruningConfig, chainConfig, lastAcceptedHash) + return createBlockChain(db, pruningConfig, gspec, lastAcceptedHash) } for _, tt := range tests { t.Run(tt.Name, func(t *testing.T) { @@ -244,9 +335,9 @@ func TestCorruptSnapshots(t *testing.T) { } func TestBlockChainOfflinePruningUngracefulShutdown(t *testing.T) { - create := func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { + create := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { // Import the chain. This runs all block validation rules. - blockchain, err := createBlockChain(db, pruningConfig, chainConfig, lastAcceptedHash) + blockchain, err := createBlockChain(db, pruningConfig, gspec, lastAcceptedHash) if err != nil { return nil, err } @@ -273,10 +364,11 @@ func TestBlockChainOfflinePruningUngracefulShutdown(t *testing.T) { return nil, fmt.Errorf("failed to prune blockchain with target root: %s due to: %w", targetRoot, err) } // Re-initialize the blockchain after pruning - return createBlockChain(db, pruningConfig, chainConfig, lastAcceptedHash) + return createBlockChain(db, pruningConfig, gspec, lastAcceptedHash) } for _, tt := range tests { t.Run(tt.Name, func(t *testing.T) { + tt := tt t.Parallel() tt.testFunc(t, create) }) @@ -289,11 +381,7 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) - // We use two separate databases since GenerateChain commits the state roots to its underlying - // database. - genDB = rawdb.NewMemoryDatabase() - chainDB = rawdb.NewMemoryDatabase() - lastAcceptedHash common.Hash + chainDB = rawdb.NewMemoryDatabase() ) // Ensure that key1 has some funds in the genesis block. @@ -302,10 +390,8 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := createBlockChain(chainDB, pruningConfig, gspec.Config, lastAcceptedHash) + blockchain, err := createBlockChain(chainDB, pruningConfig, gspec, common.Hash{}) if err != nil { t.Fatal(err) } @@ -313,9 +399,7 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { // This call generates a chain of 3 blocks. signer := types.HomesteadSigner{} - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. - chain, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 10, 10, func(i int, gen *BlockGen) { + _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 10, 10, func(i int, gen *BlockGen) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) @@ -333,10 +417,10 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { } blockchain.DrainAcceptorQueue() - lastAcceptedHash = blockchain.LastConsensusAcceptedBlock().Hash() + lastAcceptedHash := blockchain.LastConsensusAcceptedBlock().Hash() blockchain.Stop() - blockchain, err = createBlockChain(chainDB, pruningConfig, gspec.Config, lastAcceptedHash) + blockchain, err = createBlockChain(chainDB, pruningConfig, gspec, lastAcceptedHash) if err != nil { t.Fatal(err) } @@ -363,7 +447,7 @@ func testRepopulateMissingTriesParallel(t *testing.T, parallelism int) { PopulateMissingTriesParallelism: parallelism, AcceptorQueueLimit: 64, }, - gspec.Config, + gspec, lastAcceptedHash, ) if err != nil { @@ -386,7 +470,7 @@ func TestRepopulateMissingTries(t *testing.T) { func TestUngracefulAsyncShutdown(t *testing.T) { var ( - create = func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error) { + create = func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { blockchain, err := createBlockChain(db, &CacheConfig{ TrieCleanLimit: 256, TrieDirtyLimit: 256, @@ -396,7 +480,7 @@ func TestUngracefulAsyncShutdown(t *testing.T) { SnapshotLimit: 256, SkipSnapshotRebuild: true, // Ensure the test errors if snapshot initialization fails AcceptorQueueLimit: 1000, // ensure channel doesn't block - }, chainConfig, lastAcceptedHash) + }, gspec, lastAcceptedHash) if err != nil { return nil, err } @@ -407,9 +491,6 @@ func TestUngracefulAsyncShutdown(t *testing.T) { key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) - // We use two separate databases since GenerateChain commits the state roots to its underlying - // database. - genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -419,10 +500,8 @@ func TestUngracefulAsyncShutdown(t *testing.T) { Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } @@ -430,9 +509,7 @@ func TestUngracefulAsyncShutdown(t *testing.T) { // This call generates a chain of 10 blocks. signer := types.HomesteadSigner{} - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. - chain, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 10, 10, func(i int, gen *BlockGen) { + _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 10, 10, func(i int, gen *BlockGen) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) @@ -571,28 +648,25 @@ func TestCanonicalHashMarker(t *testing.T) { } for _, c := range cases { var ( - db = rawdb.NewMemoryDatabase() gspec = &Genesis{ Config: params.TestChainConfig, Alloc: GenesisAlloc{}, BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), } - genesis = gspec.MustCommit(db) - engine = dummy.NewFaker() + engine = dummy.NewFaker() ) - forkA, _, err := GenerateChain(params.TestChainConfig, genesis, engine, db, c.forkA, 10, func(i int, gen *BlockGen) {}) + _, forkA, _, err := GenerateChainWithGenesis(gspec, engine, c.forkA, 10, func(i int, gen *BlockGen) {}) if err != nil { t.Fatal(err) } - forkB, _, err := GenerateChain(params.TestChainConfig, genesis, engine, db, c.forkB, 10, func(i int, gen *BlockGen) {}) + _, forkB, _, err := GenerateChainWithGenesis(gspec, engine, c.forkB, 10, func(i int, gen *BlockGen) {}) if err != nil { t.Fatal(err) } // Initialize test chain diskdb := rawdb.NewMemoryDatabase() - gspec.MustCommit(diskdb) - chain, err := NewBlockChain(diskdb, DefaultCacheConfig, params.TestChainConfig, engine, vm.Config{}, common.Hash{}) + chain, err := NewBlockChain(diskdb, DefaultCacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -645,3 +719,138 @@ func TestCanonicalHashMarker(t *testing.T) { } } } + +func TestTransactionIndices(t *testing.T) { + // Configure and generate a sample block chain + require := require.New(t) + var ( + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + addr2 = crypto.PubkeyToAddress(key2.PublicKey) + funds = big.NewInt(10000000000000) + gspec = &Genesis{ + Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, + Alloc: GenesisAlloc{addr1: {Balance: funds}}, + } + signer = types.LatestSigner(gspec.Config) + ) + height := uint64(128) + genDb, blocks, _, err := GenerateChainWithGenesis(gspec, dummy.NewDummyEngine(&TestCallbacks), int(height), 10, func(i int, block *BlockGen) { + tx, err := types.SignTx(types.NewTransaction(block.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) + require.NoError(err) + block.AddTx(tx) + }) + require.NoError(err) + + blocks2, _, err := GenerateChain(gspec.Config, blocks[len(blocks)-1], dummy.NewDummyEngine(&TestCallbacks), genDb, 10, 10, nil) + require.NoError(err) + + check := func(tail *uint64, chain *BlockChain) { + stored := rawdb.ReadTxIndexTail(chain.db) + require.EqualValues(tail, stored) + + if tail == nil { + return + } + for i := *tail; i <= chain.CurrentBlock().NumberU64(); i++ { + block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i) + if block.Transactions().Len() == 0 { + continue + } + for _, tx := range block.Transactions() { + index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()) + require.NotNilf(index, "Miss transaction indices, number %d hash %s", i, tx.Hash().Hex()) + } + } + + for i := uint64(0); i < *tail; i++ { + block := rawdb.ReadBlock(chain.db, rawdb.ReadCanonicalHash(chain.db, i), i) + if block.Transactions().Len() == 0 { + continue + } + for _, tx := range block.Transactions() { + index := rawdb.ReadTxLookupEntry(chain.db, tx.Hash()) + require.Nilf(index, "Transaction indices should be deleted, number %d hash %s", i, tx.Hash().Hex()) + } + } + } + + conf := &CacheConfig{ + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + Pruning: true, + CommitInterval: 4096, + SnapshotLimit: 256, + SkipSnapshotRebuild: true, // Ensure the test errors if snapshot initialization fails + AcceptorQueueLimit: 64, + } + + // Init block chain and check all needed indices has been indexed. + chainDB := rawdb.NewMemoryDatabase() + chain, err := createBlockChain(chainDB, conf, gspec, common.Hash{}) + require.NoError(err) + + _, err = chain.InsertChain(blocks) + require.NoError(err) + + for _, block := range blocks { + err := chain.Accept(block) + require.NoError(err) + } + chain.DrainAcceptorQueue() + + chain.Stop() + check(nil, chain) // check all indices has been indexed + + lastAcceptedHash := chain.CurrentHeader().Hash() + + // Reconstruct a block chain which only reserves limited tx indices + // 128 blocks were previously indexed. Now we add a new block at each test step. + limit := []uint64{130 /* 129 + 1 reserve all */, 64 /* drop stale */, 32 /* shorten history */} + tails := []uint64{0 /* reserve all */, 67 /* 130 - 64 + 1 */, 100 /* 131 - 32 + 1 */} + for i, l := range limit { + conf.TxLookupLimit = l + + chain, err := createBlockChain(chainDB, conf, gspec, lastAcceptedHash) + require.NoError(err) + + newBlks := blocks2[i : i+1] + _, err = chain.InsertChain(newBlks) // Feed chain a higher block to trigger indices updater. + require.NoError(err) + + err = chain.Accept(newBlks[0]) // Accept the block to trigger indices updater. + require.NoError(err) + + chain.DrainAcceptorQueue() + time.Sleep(50 * time.Millisecond) // Wait for indices initialisation + + chain.Stop() + check(&tails[i], chain) + + lastAcceptedHash = chain.CurrentHeader().Hash() + } +} + +func TestTxLookupBlockChain(t *testing.T) { + cacheConf := &CacheConfig{ + TrieCleanLimit: 256, + TrieDirtyLimit: 256, + TrieDirtyCommitTarget: 20, + Pruning: true, + CommitInterval: 4096, + SnapshotLimit: 256, + SkipSnapshotRebuild: true, // Ensure the test errors if snapshot initialization fails + AcceptorQueueLimit: 64, // ensure channel doesn't block + TxLookupLimit: 5, + } + createTxLookupBlockChain := func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error) { + return createBlockChain(db, cacheConf, gspec, lastAcceptedHash) + } + for _, tt := range tests { + t.Run(tt.Name, func(t *testing.T) { + tt.testFunc(t, createTxLookupBlockChain) + }) + } +} diff --git a/coreth/core/bounded_buffer.go b/coreth/core/bounded_buffer.go index c99042fa..b6170682 100644 --- a/coreth/core/bounded_buffer.go +++ b/coreth/core/bounded_buffer.go @@ -3,37 +3,42 @@ package core -import ( - "github.com/ethereum/go-ethereum/common" -) - -// BoundedBuffer keeps [size] common.Hash entries in a buffer and calls -// [callback] on any item that is evicted. This is typically used for +// BoundedBuffer keeps [size] entries of type [K] in a buffer and calls +// [callback] on any item that is overwritten. This is typically used for // dereferencing old roots during block processing. -type BoundedBuffer struct { +// +// BoundedBuffer is not thread-safe and requires the caller synchronize usage. +type BoundedBuffer[K any] struct { lastPos int size int - callback func(common.Hash) - buffer []common.Hash + callback func(K) + buffer []K + + cycled bool } // NewBoundedBuffer creates a new [BoundedBuffer]. -func NewBoundedBuffer(size int, callback func(common.Hash)) *BoundedBuffer { - return &BoundedBuffer{ +func NewBoundedBuffer[K any](size int, callback func(K)) *BoundedBuffer[K] { + return &BoundedBuffer[K]{ + lastPos: -1, size: size, callback: callback, - buffer: make([]common.Hash, size), + buffer: make([]K, size), } } -// Insert adds a new common.Hash to the buffer. If the buffer is full, the -// oldest common.Hash will be evicted and [callback] will be invoked. -// -// WARNING: BoundedBuffer does not support the insertion of empty common.Hash. -// Inserting such data will cause unintended behavior. -func (b *BoundedBuffer) Insert(h common.Hash) { - nextPos := (b.lastPos + 1) % b.size // the first item added to the buffer will be at position 1 - if b.buffer[nextPos] != (common.Hash{}) { +// Insert adds a new value to the buffer. If the buffer is full, the +// oldest value will be overwritten and [callback] will be invoked. +func (b *BoundedBuffer[K]) Insert(h K) { + nextPos := b.lastPos + 1 // the first item added to the buffer will be at position 0 + if nextPos == b.size { + nextPos = 0 + // Set [cycled] since we are back to the 0th element + b.cycled = true + } + if b.cycled { + // We ensure we have cycled through the buffer once before invoking the + // [callback] to ensure we don't call it with unset values. b.callback(b.buffer[nextPos]) } b.buffer[nextPos] = h @@ -41,7 +46,12 @@ func (b *BoundedBuffer) Insert(h common.Hash) { } // Last retrieves the last item added to the buffer. -// If no items have been added to the buffer, Last returns an empty hash. -func (b *BoundedBuffer) Last() common.Hash { - return b.buffer[b.lastPos] +// +// If no items have been added to the buffer, Last returns the default value of +// [K] and [false]. +func (b *BoundedBuffer[K]) Last() (K, bool) { + if b.lastPos == -1 { + return *new(K), false + } + return b.buffer[b.lastPos], true } diff --git a/coreth/core/chain_makers.go b/coreth/core/chain_makers.go index c15b7798..1bb00ea8 100644 --- a/coreth/core/chain_makers.go +++ b/coreth/core/chain_makers.go @@ -32,7 +32,7 @@ import ( "github.com/ava-labs/coreth/consensus" "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/consensus/misc" + "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" @@ -222,22 +222,6 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse b := &BlockGen{i: i, chain: blocks, parent: parent, statedb: statedb, config: config, engine: engine} b.header = makeHeader(chainreader, config, parent, gap, statedb, b.engine) - // Mutate the state and block according to any hard-fork specs - timestamp := new(big.Int).SetUint64(b.header.Time) - if !config.IsApricotPhase3(timestamp) { - // avoid dynamic fee extra data override - if daoBlock := config.DAOForkBlock; daoBlock != nil { - limit := new(big.Int).Add(daoBlock, params.DAOForkExtraRange) - if b.header.Number.Cmp(daoBlock) >= 0 && b.header.Number.Cmp(limit) < 0 { - if config.DAOForkSupport { - b.header.Extra = common.CopyBytes(params.DAOForkBlockExtra) - } - } - } - } - if config.DAOForkSupport && config.DAOForkBlock != nil && config.DAOForkBlock.Cmp(b.header.Number) == 0 { - misc.ApplyDAOHardFork(statedb) - } // Execute any user modifications to the block if gen != nil { gen(i, b) @@ -280,6 +264,19 @@ func GenerateChain(config *params.ChainConfig, parent *types.Block, engine conse return blocks, receipts, nil } +// GenerateChainWithGenesis is a wrapper of GenerateChain which will initialize +// genesis block to database first according to the provided genesis specification +// then generate chain on top. +func GenerateChainWithGenesis(genesis *Genesis, engine consensus.Engine, n int, gap uint64, gen func(int, *BlockGen)) (ethdb.Database, []*types.Block, []types.Receipts, error) { + db := rawdb.NewMemoryDatabase() + _, err := genesis.Commit(db) + if err != nil { + return nil, nil, nil, err + } + blocks, receipts, err := GenerateChain(genesis.Config, genesis.ToBlock(nil), engine, db, n, gap, gen) + return db, blocks, receipts, err +} + func makeHeader(chain consensus.ChainReader, config *params.ChainConfig, parent *types.Block, gap uint64, state *state.StateDB, engine consensus.Engine) *types.Header { var time uint64 if parent.Time() == 0 { @@ -290,21 +287,25 @@ func makeHeader(chain consensus.ChainReader, config *params.ChainConfig, parent timestamp := new(big.Int).SetUint64(time) var gasLimit uint64 - if config.IsSongbirdCode() { - if config.IsSongbirdTransition(timestamp) { - gasLimit = params.SgbTransitionGasLimit - } else if config.IsApricotPhase5(timestamp) { - gasLimit = params.SgbApricotPhase5GasLimit - } else if config.IsApricotPhase1(timestamp) { - gasLimit = params.ApricotPhase1GasLimit - } else { - gasLimit = CalcGasLimit(parent.GasUsed(), parent.GasLimit(), parent.GasLimit(), parent.GasLimit()) - } + if config.IsCortina(timestamp) { + gasLimit = params.CortinaGasLimit } else { - if config.IsApricotPhase1(timestamp) { - gasLimit = params.ApricotPhase1GasLimit + if config.IsSongbirdCode() { + if config.IsSongbirdTransition(timestamp) { + gasLimit = params.SgbTransitionGasLimit + } else if config.IsApricotPhase5(timestamp) { + gasLimit = params.SgbApricotPhase5GasLimit + } else if config.IsApricotPhase1(timestamp) { + gasLimit = params.ApricotPhase1GasLimit + } else { + gasLimit = CalcGasLimit(parent.GasUsed(), parent.GasLimit(), parent.GasLimit(), parent.GasLimit()) + } } else { - gasLimit = CalcGasLimit(parent.GasUsed(), parent.GasLimit(), parent.GasLimit(), parent.GasLimit()) + if config.IsApricotPhase1(timestamp) { + gasLimit = params.ApricotPhase1GasLimit + } else { + gasLimit = CalcGasLimit(parent.GasUsed(), parent.GasLimit(), parent.GasLimit(), parent.GasLimit()) + } } } diff --git a/coreth/core/chain_makers_test.go b/coreth/core/chain_makers_test.go index cd003932..a2e1bf2e 100644 --- a/coreth/core/chain_makers_test.go +++ b/coreth/core/chain_makers_test.go @@ -55,13 +55,12 @@ func ExampleGenerateChain() { Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: big.NewInt(1000000)}}, } - genesis := gspec.MustCommit(db) // This call generates a chain of 3 blocks. The function runs for // each block and adds different features to gen based on the // block index. signer := types.HomesteadSigner{} - chain, _, err := GenerateChain(gspec.Config, genesis, dummy.NewFaker(), db, 3, 10, func(i int, gen *BlockGen) { + _, chain, _, err := GenerateChainWithGenesis(gspec, dummy.NewFaker(), 3, 10, func(i int, gen *BlockGen) { switch i { case 0: // In block 1, addr1 sends addr2 some ether. @@ -82,7 +81,7 @@ func ExampleGenerateChain() { } // Import the chain. This runs all block validation rules. - blockchain, _ := NewBlockChain(db, DefaultCacheConfig, gspec.Config, dummy.NewFaker(), vm.Config{}, common.Hash{}) + blockchain, _ := NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) defer blockchain.Stop() if i, err := blockchain.InsertChain(chain); err != nil { diff --git a/coreth/core/dao_test.go b/coreth/core/dao_test.go deleted file mode 100644 index 60c6c507..00000000 --- a/coreth/core/dao_test.go +++ /dev/null @@ -1,197 +0,0 @@ -// (c) 2021-2022, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package core - -import ( - "math/big" - "testing" - - "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/core/rawdb" - "github.com/ava-labs/coreth/core/vm" - "github.com/ava-labs/coreth/params" - "github.com/ethereum/go-ethereum/common" -) - -// Tests that DAO-fork enabled clients can properly filter out fork-commencing -// blocks based on their extradata fields. -func TestDAOForkRangeExtradata(t *testing.T) { - forkBlock := big.NewInt(32) - - // Generate a common prefix for both pro-forkers and non-forkers - db := rawdb.NewMemoryDatabase() - gspec := &Genesis{ - BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), - Config: params.TestApricotPhase2Config, - } - genesis := gspec.MustCommit(db) - prefix, _, _ := GenerateChain(params.TestApricotPhase2Config, genesis, dummy.NewFaker(), db, int(forkBlock.Int64()-1), 10, func(i int, gen *BlockGen) {}) - - // Create the concurrent, conflicting two nodes - proDb := rawdb.NewMemoryDatabase() - gspec.MustCommit(proDb) - - proConf := *params.TestApricotPhase2Config - proConf.DAOForkBlock = forkBlock - proConf.DAOForkSupport = true - - proBc, _ := NewBlockChain(proDb, DefaultCacheConfig, &proConf, dummy.NewFaker(), vm.Config{}, common.Hash{}) - defer proBc.Stop() - - conDb := rawdb.NewMemoryDatabase() - gspec.MustCommit(conDb) - - conConf := *params.TestApricotPhase2Config - conConf.DAOForkBlock = forkBlock - conConf.DAOForkSupport = false - - conBc, _ := NewBlockChain(conDb, DefaultCacheConfig, &conConf, dummy.NewFaker(), vm.Config{}, common.Hash{}) - defer conBc.Stop() - - if _, err := proBc.InsertChain(prefix); err != nil { - t.Fatalf("pro-fork: failed to import chain prefix: %v", err) - } - if _, err := conBc.InsertChain(prefix); err != nil { - t.Fatalf("con-fork: failed to import chain prefix: %v", err) - } - // Try to expand both pro-fork and non-fork chains iteratively with other camp's blocks - for i := int64(0); i < params.DAOForkExtraRange.Int64(); i++ { - // Create a pro-fork block, and try to feed into the no-fork chain - db = rawdb.NewMemoryDatabase() - gspec.MustCommit(db) - bc, _ := NewBlockChain(db, DefaultCacheConfig, &conConf, dummy.NewFaker(), vm.Config{}, common.Hash{}) - defer bc.Stop() - - blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64())) - for j := 0; j < len(blocks)/2; j++ { - blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j] - } - if _, err := bc.InsertChain(blocks); err != nil { - t.Fatalf("failed to import contra-fork chain for expansion: %v", err) - } - if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil { - t.Fatalf("failed to commit contra-fork head for expansion: %v", err) - } - blocks, _, _ = GenerateChain(&proConf, conBc.CurrentBlock(), dummy.NewFaker(), db, 1, 10, func(i int, gen *BlockGen) {}) - if _, err := conBc.InsertChain(blocks); err != nil { - t.Fatalf("contra-fork chain accepted pro-fork block: %v", blocks[0]) - } - // Create a proper no-fork block for the contra-forker - blocks, _, _ = GenerateChain(&conConf, conBc.CurrentBlock(), dummy.NewFaker(), db, 1, 10, func(i int, gen *BlockGen) {}) - if _, err := conBc.InsertChain(blocks); err != nil { - t.Fatalf("contra-fork chain didn't accepted no-fork block: %v", err) - } - // Create a no-fork block, and try to feed into the pro-fork chain - db = rawdb.NewMemoryDatabase() - gspec.MustCommit(db) - bc, _ = NewBlockChain(db, DefaultCacheConfig, &proConf, dummy.NewFaker(), vm.Config{}, common.Hash{}) - defer bc.Stop() - - blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64())) - for j := 0; j < len(blocks)/2; j++ { - blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j] - } - if _, err := bc.InsertChain(blocks); err != nil { - t.Fatalf("failed to import pro-fork chain for expansion: %v", err) - } - if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil { - t.Fatalf("failed to commit pro-fork head for expansion: %v", err) - } - blocks, _, _ = GenerateChain(&conConf, proBc.CurrentBlock(), dummy.NewFaker(), db, 1, 10, func(i int, gen *BlockGen) {}) - if _, err := proBc.InsertChain(blocks); err != nil { - t.Fatalf("pro-fork chain accepted contra-fork block: %v", blocks[0]) - } - // Create a proper pro-fork block for the pro-forker - blocks, _, _ = GenerateChain(&proConf, proBc.CurrentBlock(), dummy.NewFaker(), db, 1, 10, func(i int, gen *BlockGen) {}) - if _, err := proBc.InsertChain(blocks); err != nil { - t.Fatalf("pro-fork chain didn't accepted pro-fork block: %v", err) - } - } - // Verify that contra-forkers accept pro-fork extra-datas after forking finishes - db = rawdb.NewMemoryDatabase() - gspec.MustCommit(db) - bc, _ := NewBlockChain(db, DefaultCacheConfig, &conConf, dummy.NewFaker(), vm.Config{}, common.Hash{}) - defer bc.Stop() - - blocks := conBc.GetBlocksFromHash(conBc.CurrentBlock().Hash(), int(conBc.CurrentBlock().NumberU64())) - for j := 0; j < len(blocks)/2; j++ { - blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j] - } - if _, err := bc.InsertChain(blocks); err != nil { - t.Fatalf("failed to import contra-fork chain for expansion: %v", err) - } - if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil { - t.Fatalf("failed to commit contra-fork head for expansion: %v", err) - } - blocks, _, _ = GenerateChain(&proConf, conBc.CurrentBlock(), dummy.NewFaker(), db, 1, 10, func(i int, gen *BlockGen) {}) - if _, err := conBc.InsertChain(blocks); err != nil { - t.Fatalf("contra-fork chain didn't accept pro-fork block post-fork: %v", err) - } - // Verify that pro-forkers accept contra-fork extra-datas after forking finishes - db = rawdb.NewMemoryDatabase() - gspec.MustCommit(db) - bc, _ = NewBlockChain(db, DefaultCacheConfig, &proConf, dummy.NewFaker(), vm.Config{}, common.Hash{}) - defer bc.Stop() - - blocks = proBc.GetBlocksFromHash(proBc.CurrentBlock().Hash(), int(proBc.CurrentBlock().NumberU64())) - for j := 0; j < len(blocks)/2; j++ { - blocks[j], blocks[len(blocks)-1-j] = blocks[len(blocks)-1-j], blocks[j] - } - if _, err := bc.InsertChain(blocks); err != nil { - t.Fatalf("failed to import pro-fork chain for expansion: %v", err) - } - if err := bc.stateCache.TrieDB().Commit(bc.CurrentHeader().Root, true, nil); err != nil { - t.Fatalf("failed to commit pro-fork head for expansion: %v", err) - } - blocks, _, _ = GenerateChain(&conConf, proBc.CurrentBlock(), dummy.NewFaker(), db, 1, 10, func(i int, gen *BlockGen) {}) - if _, err := proBc.InsertChain(blocks); err != nil { - t.Fatalf("pro-fork chain didn't accept contra-fork block post-fork: %v", err) - } -} - -func TestDAOForkSupportPostApricotPhase3(t *testing.T) { - forkBlock := big.NewInt(0) - - conf := *params.TestChainConfig - conf.DAOForkSupport = true - conf.DAOForkBlock = forkBlock - - db := rawdb.NewMemoryDatabase() - gspec := &Genesis{ - BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), - Config: &conf, - } - genesis := gspec.MustCommit(db) - bc, _ := NewBlockChain(db, DefaultCacheConfig, &conf, dummy.NewFaker(), vm.Config{}, common.Hash{}) - defer bc.Stop() - - blocks, _, _ := GenerateChain(&conf, genesis, dummy.NewFaker(), db, 32, 10, func(i int, gen *BlockGen) {}) - - if _, err := bc.InsertChain(blocks); err != nil { - t.Fatalf("failed to import blocks: %v", err) - } -} diff --git a/coreth/core/error.go b/coreth/core/error.go index a90475fe..955049ee 100644 --- a/coreth/core/error.go +++ b/coreth/core/error.go @@ -96,7 +96,7 @@ var ( ErrFeeCapVeryHigh = errors.New("max fee per gas higher than 2^256-1") // ErrFeeCapTooLow is returned if the transaction fee cap is less than the - // the base fee of the block. + // base fee of the block. ErrFeeCapTooLow = errors.New("max fee per gas less than block base fee") // ErrSenderNoEOA is returned if the sender of a transaction is a contract. diff --git a/coreth/core/fifo_cache.go b/coreth/core/fifo_cache.go new file mode 100644 index 00000000..c941382f --- /dev/null +++ b/coreth/core/fifo_cache.go @@ -0,0 +1,70 @@ +// (c) 2021, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package core + +import "sync" + +var ( + _ FIFOCache[int, int] = (*BufferFIFOCache[int, int])(nil) + _ FIFOCache[int, int] = (*NoOpFIFOCache[int, int])(nil) +) + +// FIFOCache evicts the oldest element added to it after [limit] items are +// added. +type FIFOCache[K comparable, V any] interface { + Put(K, V) + Get(K) (V, bool) +} + +// NewFIFOCache creates a new First-In-First-Out cache of size [limit]. +// +// If a [limit] of 0 is passed as an argument, a no-op cache is returned that +// does nothing. +func NewFIFOCache[K comparable, V any](limit int) FIFOCache[K, V] { + if limit <= 0 { + return &NoOpFIFOCache[K, V]{} + } + + c := &BufferFIFOCache[K, V]{ + m: make(map[K]V, limit), + } + c.buffer = NewBoundedBuffer(limit, c.remove) + return c +} + +type BufferFIFOCache[K comparable, V any] struct { + l sync.RWMutex + + buffer *BoundedBuffer[K] + m map[K]V +} + +func (f *BufferFIFOCache[K, V]) Put(key K, val V) { + f.l.Lock() + defer f.l.Unlock() + + f.buffer.Insert(key) // Insert will remove the oldest [K] if we are at the [limit] + f.m[key] = val +} + +func (f *BufferFIFOCache[K, V]) Get(key K) (V, bool) { + f.l.RLock() + defer f.l.RUnlock() + + v, ok := f.m[key] + return v, ok +} + +// remove is used as the callback in [BoundedBuffer]. It is assumed that the +// [WriteLock] is held when this is accessed. +func (f *BufferFIFOCache[K, V]) remove(key K) { + delete(f.m, key) +} + +type NoOpFIFOCache[K comparable, V any] struct{} + +func (f *NoOpFIFOCache[K, V]) Put(_ K, _ V) {} +func (f *NoOpFIFOCache[K, V]) Get(_ K) (V, bool) { + return *new(V), false +} diff --git a/coreth/core/genesis.go b/coreth/core/genesis.go index 5c330ac3..54da8dff 100644 --- a/coreth/core/genesis.go +++ b/coreth/core/genesis.go @@ -131,7 +131,6 @@ func (h *storageJSON) UnmarshalText(text []byte) error { } offset := len(h) - len(text)/2 // pad on the left if _, err := hex.Decode(h[offset:], text); err != nil { - fmt.Println(err) return fmt.Errorf("invalid hex storage key/value %q", text) } return nil @@ -162,22 +161,24 @@ func (e *GenesisMismatchError) Error() string { // The stored chain configuration will be updated if it is compatible (i.e. does not // specify a fork block below the local head block). In case of a conflict, the // error is a *params.ConfigCompatError and the new, unwritten config is returned. -func SetupGenesisBlock(db ethdb.Database, genesis *Genesis, lastAcceptedHash common.Hash) (*params.ChainConfig, error) { +func SetupGenesisBlock( + db ethdb.Database, genesis *Genesis, lastAcceptedHash common.Hash, skipChainConfigCheckCompatible bool, +) (*params.ChainConfig, common.Hash, error) { if genesis == nil { - return nil, ErrNoGenesis + return nil, common.Hash{}, ErrNoGenesis } if genesis.Config == nil { - return nil, errGenesisNoConfig + return nil, common.Hash{}, errGenesisNoConfig } // Just commit the new block if there is no stored genesis block. stored := rawdb.ReadCanonicalHash(db, 0) if (stored == common.Hash{}) { log.Info("Writing genesis to database") - _, err := genesis.Commit(db) + block, err := genesis.Commit(db) if err != nil { - return genesis.Config, err + return genesis.Config, common.Hash{}, err } - return genesis.Config, nil + return genesis.Config, block.Hash(), nil } // We have the genesis block in database but the corresponding state is missing. header := rawdb.ReadHeader(db, stored, 0) @@ -185,28 +186,27 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis, lastAcceptedHash com // Ensure the stored genesis matches with the given one. hash := genesis.ToBlock(nil).Hash() if hash != stored { - return genesis.Config, &GenesisMismatchError{stored, hash} + return genesis.Config, common.Hash{}, &GenesisMismatchError{stored, hash} } _, err := genesis.Commit(db) - return genesis.Config, err + return genesis.Config, common.Hash{}, err } // Check whether the genesis block is already written. hash := genesis.ToBlock(nil).Hash() if hash != stored { - return genesis.Config, &GenesisMismatchError{stored, hash} + return genesis.Config, common.Hash{}, &GenesisMismatchError{stored, hash} } // Get the existing chain configuration. newcfg := genesis.Config if err := newcfg.CheckConfigForkOrder(); err != nil { - return newcfg, err + return newcfg, common.Hash{}, err } storedcfg := rawdb.ReadChainConfig(db, stored) if storedcfg == nil { log.Warn("Found genesis block without chain config") rawdb.WriteChainConfig(db, stored, newcfg) - return newcfg, nil + return newcfg, stored, nil } - // Check config compatibility and write the config. Compatibility errors // are returned to the caller unless we're already at block zero. // we use last accepted block for cfg compatibility check. Note this allows @@ -217,16 +217,20 @@ func SetupGenesisBlock(db ethdb.Database, genesis *Genesis, lastAcceptedHash com // when we start syncing from scratch, the last accepted block // will be genesis block if lastBlock == nil { - return newcfg, fmt.Errorf("missing last accepted block") + return newcfg, common.Hash{}, fmt.Errorf("missing last accepted block") } height := lastBlock.NumberU64() timestamp := lastBlock.Time() - compatErr := storedcfg.CheckCompatible(newcfg, height, timestamp) - if compatErr != nil && height != 0 && compatErr.RewindTo != 0 { - return newcfg, compatErr + if skipChainConfigCheckCompatible { + log.Info("skipping verifying activated network upgrades on chain config") + } else { + compatErr := storedcfg.CheckCompatible(newcfg, height, timestamp) + if compatErr != nil && height != 0 && compatErr.RewindTo != 0 { + return newcfg, stored, compatErr + } } rawdb.WriteChainConfig(db, stored, newcfg) - return newcfg, nil + return newcfg, stored, nil } // ToBlock creates the genesis block and writes state of a genesis specification diff --git a/coreth/core/genesis_test.go b/coreth/core/genesis_test.go index ba1fa841..ebfa92f5 100644 --- a/coreth/core/genesis_test.go +++ b/coreth/core/genesis_test.go @@ -43,9 +43,7 @@ import ( ) func setupGenesisBlock(db ethdb.Database, genesis *Genesis, lastAcceptedHash common.Hash) (*params.ChainConfig, common.Hash, error) { - conf, err := SetupGenesisBlock(db, genesis, lastAcceptedHash) - stored := rawdb.ReadCanonicalHash(db, 0) - return conf, stored, err + return SetupGenesisBlock(db, genesis, lastAcceptedHash, false) } func TestGenesisBlockForTesting(t *testing.T) { @@ -103,7 +101,6 @@ func TestSetupGenesis(t *testing.T) { return setupGenesisBlock(db, nil, common.Hash{}) }, wantErr: ErrNoGenesis, - wantHash: customghash, wantConfig: nil, }, { @@ -122,12 +119,12 @@ func TestSetupGenesis(t *testing.T) { // Advance to block #4, past the ApricotPhase1 transition block of customg. genesis := oldcustomg.MustCommit(db) - bc, _ := NewBlockChain(db, DefaultCacheConfig, oldcustomg.Config, dummy.NewFullFaker(), vm.Config{}, common.Hash{}) + bc, _ := NewBlockChain(db, DefaultCacheConfig, &oldcustomg, dummy.NewFullFaker(), vm.Config{}, genesis.Hash(), false) defer bc.Stop() blocks, _, _ := GenerateChain(oldcustomg.Config, genesis, dummy.NewFullFaker(), db, 4, 25, nil) bc.InsertChain(blocks) - bc.CurrentBlock() + for _, block := range blocks { if err := bc.Accept(block); err != nil { t.Fatal(err) @@ -183,12 +180,11 @@ func TestNetworkUpgradeBetweenHeadAndAcceptedBlock(t *testing.T) { {1}: {Balance: big.NewInt(1), Storage: map[common.Hash]common.Hash{{1}: {1}}}, }, } - genesis := customg.MustCommit(db) - bc, _ := NewBlockChain(db, DefaultCacheConfig, customg.Config, dummy.NewFullFaker(), vm.Config{}, common.Hash{}) + bc, _ := NewBlockChain(db, DefaultCacheConfig, &customg, dummy.NewFullFaker(), vm.Config{}, common.Hash{}, false) defer bc.Stop() // Advance header to block #4, past the ApricotPhase2 timestamp. - blocks, _, _ := GenerateChain(customg.Config, genesis, dummy.NewFullFaker(), db, 4, 25, nil) + _, blocks, _, _ := GenerateChainWithGenesis(&customg, dummy.NewFullFaker(), 4, 25, nil) require := require.New(t) _, err := bc.InsertChain(blocks) diff --git a/coreth/core/headerchain.go b/coreth/core/headerchain.go index 94b34058..b782959a 100644 --- a/coreth/core/headerchain.go +++ b/coreth/core/headerchain.go @@ -70,9 +70,10 @@ type HeaderChain struct { currentHeader atomic.Value // Current head of the header chain (may be above the block chain!) currentHeaderHash common.Hash // Hash of the current head of the header chain (prevent recomputing all the time) - headerCache *lru.Cache // Cache for the most recent block headers - tdCache *lru.Cache // Cache for the most recent block total difficulties - numberCache *lru.Cache // Cache for the most recent block numbers + headerCache *lru.Cache // Cache for the most recent block headers + tdCache *lru.Cache // Cache for the most recent block total difficulties + numberCache *lru.Cache // Cache for the most recent block numbers + acceptedNumberCache FIFOCache[uint64, *types.Header] // Cache for most recent accepted heights to headers (only modified in accept) rand *mrand.Rand engine consensus.Engine @@ -80,10 +81,11 @@ type HeaderChain struct { // NewHeaderChain creates a new HeaderChain structure. ProcInterrupt points // to the parent's interrupt semaphore. -func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine) (*HeaderChain, error) { +func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, cacheConfig *CacheConfig, engine consensus.Engine) (*HeaderChain, error) { headerCache, _ := lru.New(headerCacheLimit) tdCache, _ := lru.New(tdCacheLimit) numberCache, _ := lru.New(numberCacheLimit) + acceptedNumberCache := NewFIFOCache[uint64, *types.Header](cacheConfig.AcceptedCacheSize) // Seed a fast but crypto originating random generator seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64)) @@ -92,13 +94,14 @@ func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine c } hc := &HeaderChain{ - config: config, - chainDb: chainDb, - headerCache: headerCache, - tdCache: tdCache, - numberCache: numberCache, - rand: mrand.New(mrand.NewSource(seed.Int64())), - engine: engine, + config: config, + chainDb: chainDb, + headerCache: headerCache, + tdCache: tdCache, + numberCache: numberCache, + acceptedNumberCache: acceptedNumberCache, + rand: mrand.New(mrand.NewSource(seed.Int64())), + engine: engine, } hc.genesisHeader = hc.GetHeaderByNumber(0) @@ -170,6 +173,9 @@ func (hc *HeaderChain) HasHeader(hash common.Hash, number uint64) bool { // GetHeaderByNumber retrieves a block header from the database by number, // caching it (associated with its hash) if found. func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header { + if cachedHeader, ok := hc.acceptedNumberCache.Get(number); ok { + return cachedHeader + } hash := rawdb.ReadCanonicalHash(hc.chainDb, number) if hash == (common.Hash{}) { return nil diff --git a/coreth/core/headerchain_test.go b/coreth/core/headerchain_test.go index 8c8b5985..2170dded 100644 --- a/coreth/core/headerchain_test.go +++ b/coreth/core/headerchain_test.go @@ -39,7 +39,6 @@ import ( "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" ) func verifyUnbrokenCanonchain(bc *BlockChain) error { @@ -74,13 +73,14 @@ func testInsert(t *testing.T, bc *BlockChain, chain []*types.Block, wantErr erro // This test checks status reporting of InsertHeaderChain. func TestHeaderInsertion(t *testing.T) { var ( - db = rawdb.NewMemoryDatabase() - genesis = (&Genesis{ + db = rawdb.NewMemoryDatabase() + gspec = &Genesis{ BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), Config: params.TestChainConfig, - }).MustCommit(db) + } ) - chain, err := NewBlockChain(db, DefaultCacheConfig, params.TestChainConfig, dummy.NewFaker(), vm.Config{}, common.Hash{}) + genesis := gspec.ToBlock(nil) + chain, err := NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) if err != nil { t.Fatal(err) } @@ -92,7 +92,6 @@ func TestHeaderInsertion(t *testing.T) { chainB, _, _ := GenerateChain(params.TestChainConfig, types.NewBlockWithHeader(chainA[0].Header()), dummy.NewFaker(), db, 128, 10, func(i int, b *BlockGen) { b.SetCoinbase(common.Address{0: byte(10), 19: byte(i)}) }) - log.Root().SetHandler(log.StdoutHandler) // Inserting 64 headers on an empty chain testInsert(t, chain, chainA[:64], nil) diff --git a/coreth/core/mkalloc.go b/coreth/core/mkalloc.go index 76978a54..e7bdf8f1 100644 --- a/coreth/core/mkalloc.go +++ b/coreth/core/mkalloc.go @@ -28,12 +28,10 @@ // +build none /* +The mkalloc tool creates the genesis allocation constants in genesis_alloc.go +It outputs a const declaration that contains an RLP-encoded list of (address, balance) tuples. - The mkalloc tool creates the genesis allocation constants in genesis_alloc.go - It outputs a const declaration that contains an RLP-encoded list of (address, balance) tuples. - - go run mkalloc.go genesis.json - + go run mkalloc.go genesis.json */ package main diff --git a/coreth/core/rawdb/accessors_chain.go b/coreth/core/rawdb/accessors_chain.go index 647d4f3c..6d4b1e55 100644 --- a/coreth/core/rawdb/accessors_chain.go +++ b/coreth/core/rawdb/accessors_chain.go @@ -588,3 +588,23 @@ func ReadHeadBlock(db ethdb.Reader) *types.Block { } return ReadBlock(db, headBlockHash, *headBlockNumber) } + +// ReadTxIndexTail retrieves the number of oldest indexed block +// whose transaction indices has been indexed. If the corresponding entry +// is non-existent in database it means the indexing has been finished. +func ReadTxIndexTail(db ethdb.KeyValueReader) *uint64 { + data, _ := db.Get(txIndexTailKey) + if len(data) != 8 { + return nil + } + number := binary.BigEndian.Uint64(data) + return &number +} + +// WriteTxIndexTail stores the number of oldest indexed block +// into database. +func WriteTxIndexTail(db ethdb.KeyValueWriter, number uint64) { + if err := db.Put(txIndexTailKey, encodeBlockNumber(number)); err != nil { + log.Crit("Failed to store the transaction index tail", "err", err) + } +} diff --git a/coreth/core/rawdb/accessors_state_sync.go b/coreth/core/rawdb/accessors_state_sync.go index 4a16055f..ab947e9f 100644 --- a/coreth/core/rawdb/accessors_state_sync.go +++ b/coreth/core/rawdb/accessors_state_sync.go @@ -4,6 +4,9 @@ package rawdb import ( + "encoding/binary" + + "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/coreth/ethdb" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -153,3 +156,24 @@ func packSyncStorageTrieKey(root common.Hash, account common.Hash) []byte { bytes = append(bytes, account[:]...) return bytes } + +// WriteSyncPerformed logs an entry in [db] indicating the VM state synced to [blockNumber]. +func WriteSyncPerformed(db ethdb.KeyValueWriter, blockNumber uint64) error { + syncPerformedPrefixLen := len(syncPerformedPrefix) + bytes := make([]byte, syncPerformedPrefixLen+wrappers.LongLen) + copy(bytes[:syncPerformedPrefixLen], syncPerformedPrefix) + binary.BigEndian.PutUint64(bytes[syncPerformedPrefixLen:], blockNumber) + return db.Put(bytes, []byte{0x01}) +} + +// NewSyncPerformedIterator returns an iterator over all block numbers the VM +// has state synced to. +func NewSyncPerformedIterator(db ethdb.Iteratee) ethdb.Iterator { + return NewKeyLengthIterator(db.NewIterator(syncPerformedPrefix, nil), syncPerformedKeyLength) +} + +// UnpackSyncPerformedKey returns the block number from keys the iterator returned +// from NewSyncPerformedIterator. +func UnpackSyncPerformedKey(key []byte) uint64 { + return binary.BigEndian.Uint64(key[len(syncPerformedPrefix):]) +} diff --git a/coreth/core/rawdb/chain_iterator.go b/coreth/core/rawdb/chain_iterator.go new file mode 100644 index 00000000..5a436f1d --- /dev/null +++ b/coreth/core/rawdb/chain_iterator.go @@ -0,0 +1,311 @@ +// (c) 2019-2022, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rawdb + +import ( + "runtime" + "sync/atomic" + "time" + + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/ethdb" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/prque" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" +) + +type blockTxHashes struct { + number uint64 + hashes []common.Hash +} + +// iterateTransactions iterates over all transactions in the (canon) block +// number(s) given, and yields the hashes on a channel. If there is a signal +// received from interrupt channel, the iteration will be aborted and result +// channel will be closed. +// Iterates blocks in the range [from, to) +func iterateTransactions(db ethdb.Database, from uint64, to uint64, reverse bool, interrupt chan struct{}) chan *blockTxHashes { + // One thread sequentially reads data from db + type numberRlp struct { + number uint64 + rlp rlp.RawValue + } + if to == from { + return nil + } + threads := to - from + if cpus := runtime.NumCPU(); threads > uint64(cpus) { + threads = uint64(cpus) + } + var ( + rlpCh = make(chan *numberRlp, threads*2) // we send raw rlp over this channel + hashesCh = make(chan *blockTxHashes, threads*2) // send hashes over hashesCh + ) + // lookup runs in one instance + lookup := func() { + n, end := from, to + if reverse { + n, end = to-1, from-1 + } + defer close(rlpCh) + for n != end { + data := ReadCanonicalBodyRLP(db, n) + // Feed the block to the aggregator, or abort on interrupt + select { + case rlpCh <- &numberRlp{n, data}: + case <-interrupt: + return + } + if reverse { + n-- + } else { + n++ + } + } + } + // process runs in parallel + nThreadsAlive := int32(threads) + process := func() { + defer func() { + // Last processor closes the result channel + if atomic.AddInt32(&nThreadsAlive, -1) == 0 { + close(hashesCh) + } + }() + for data := range rlpCh { + var body types.Body + if err := rlp.DecodeBytes(data.rlp, &body); err != nil { + log.Warn("Failed to decode block body", "block", data.number, "error", err) + return + } + var hashes []common.Hash + for _, tx := range body.Transactions { + hashes = append(hashes, tx.Hash()) + } + result := &blockTxHashes{ + hashes: hashes, + number: data.number, + } + // Feed the block to the aggregator, or abort on interrupt + select { + case hashesCh <- result: + case <-interrupt: + return + } + } + } + go lookup() // start the sequential db accessor + for i := 0; i < int(threads); i++ { + go process() + } + return hashesCh +} + +// indexTransactions creates txlookup indices of the specified block range. +// +// This function iterates canonical chain in reverse order, it has one main advantage: +// We can write tx index tail flag periodically even without the whole indexing +// procedure is finished. So that we can resume indexing procedure next time quickly. +// +// There is a passed channel, the whole procedure will be interrupted if any +// signal received. +func indexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) { + // short circuit for invalid range + if from >= to { + return + } + var ( + hashesCh = iterateTransactions(db, from, to, true, interrupt) + batch = db.NewBatch() + start = time.Now() + logged = start.Add(-7 * time.Second) + // Since we iterate in reverse, we expect the first number to come + // in to be [to-1]. Therefore, setting lastNum to means that the + // prqueue gap-evaluation will work correctly + lastNum = to + queue = prque.New(nil) + // for stats reporting + blocks, txs = 0, 0 + ) + for chanDelivery := range hashesCh { + // Push the delivery into the queue and process contiguous ranges. + // Since we iterate in reverse, so lower numbers have lower prio, and + // we can use the number directly as prio marker + queue.Push(chanDelivery, int64(chanDelivery.number)) + for !queue.Empty() { + // If the next available item is gapped, return + if _, priority := queue.Peek(); priority != int64(lastNum-1) { + break + } + // For testing + if hook != nil && !hook(lastNum-1) { + break + } + // Next block available, pop it off and index it + delivery := queue.PopItem().(*blockTxHashes) + lastNum = delivery.number + WriteTxLookupEntries(batch, delivery.number, delivery.hashes) + blocks++ + txs += len(delivery.hashes) + // If enough data was accumulated in memory or we're at the last block, dump to disk + if batch.ValueSize() > ethdb.IdealBatchSize { + WriteTxIndexTail(batch, lastNum) // Also write the tail here + if err := batch.Write(); err != nil { + log.Crit("Failed writing batch to db", "error", err) + return + } + batch.Reset() + } + // If we've spent too much time already, notify the user of what we're doing + if time.Since(logged) > 8*time.Second { + log.Info("Indexing transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "total", to-from, "elapsed", common.PrettyDuration(time.Since(start))) + logged = time.Now() + } + } + } + // Flush the new indexing tail and the last committed data. It can also happen + // that the last batch is empty because nothing to index, but the tail has to + // be flushed anyway. + WriteTxIndexTail(batch, lastNum) + if err := batch.Write(); err != nil { + log.Crit("Failed writing batch to db", "error", err) + return + } + select { + case <-interrupt: + log.Debug("Transaction indexing interrupted", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start))) + default: + log.Info("Indexed transactions", "blocks", blocks, "txs", txs, "tail", lastNum, "elapsed", common.PrettyDuration(time.Since(start))) + } +} + +// // IndexTransactions creates txlookup indices of the specified block range. The from +// // is included while to is excluded. +// // +// // This function iterates canonical chain in reverse order, it has one main advantage: +// // We can write tx index tail flag periodically even without the whole indexing +// // procedure is finished. So that we can resume indexing procedure next time quickly. +// // +// // There is a passed channel, the whole procedure will be interrupted if any +// // signal received. +// func IndexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}) { +// indexTransactions(db, from, to, interrupt, nil) +// } + +// indexTransactionsForTesting is the internal debug version with an additional hook. +func indexTransactionsForTesting(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) { + indexTransactions(db, from, to, interrupt, hook) +} + +// unindexTransactions removes txlookup indices of the specified block range. +// +// There is a passed channel, the whole procedure will be interrupted if any +// signal received. +func unindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) { + // short circuit for invalid range + if from >= to { + return + } + var ( + hashesCh = iterateTransactions(db, from, to, false, interrupt) + batch = db.NewBatch() + start = time.Now() + logged = start.Add(-7 * time.Second) + // we expect the first number to come in to be [from]. Therefore, setting + // nextNum to from means that the prqueue gap-evaluation will work correctly + nextNum = from + queue = prque.New(nil) + // for stats reporting + blocks, txs = 0, 0 + ) + // Otherwise spin up the concurrent iterator and unindexer + for delivery := range hashesCh { + // Push the delivery into the queue and process contiguous ranges. + queue.Push(delivery, -int64(delivery.number)) + for !queue.Empty() { + // If the next available item is gapped, return + if _, priority := queue.Peek(); -priority != int64(nextNum) { + break + } + // For testing + if hook != nil && !hook(nextNum) { + break + } + delivery := queue.PopItem().(*blockTxHashes) + nextNum = delivery.number + 1 + DeleteTxLookupEntries(batch, delivery.hashes) + txs += len(delivery.hashes) + blocks++ + + // If enough data was accumulated in memory or we're at the last block, dump to disk + // A batch counts the size of deletion as '1', so we need to flush more + // often than that. + if blocks%1000 == 0 { + WriteTxIndexTail(batch, nextNum) + if err := batch.Write(); err != nil { + log.Crit("Failed writing batch to db", "error", err) + return + } + batch.Reset() + } + // If we've spent too much time already, notify the user of what we're doing + if time.Since(logged) > 8*time.Second { + log.Info("Unindexing transactions", "blocks", blocks, "txs", txs, "total", to-from, "elapsed", common.PrettyDuration(time.Since(start))) + logged = time.Now() + } + } + } + // Flush the new indexing tail and the last committed data. It can also happen + // that the last batch is empty because nothing to unindex, but the tail has to + // be flushed anyway. + WriteTxIndexTail(batch, nextNum) + if err := batch.Write(); err != nil { + log.Crit("Failed writing batch to db", "error", err) + return + } + select { + case <-interrupt: + log.Debug("Transaction unindexing interrupted", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start))) + default: + log.Info("Unindexed transactions", "blocks", blocks, "txs", txs, "tail", to, "elapsed", common.PrettyDuration(time.Since(start))) + } +} + +// UnindexTransactions removes txlookup indices of the specified block range. +// The from is included while to is excluded. +// +// There is a passed channel, the whole procedure will be interrupted if any +// signal received. +func UnindexTransactions(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}) { + unindexTransactions(db, from, to, interrupt, nil) +} + +// unindexTransactionsForTesting is the internal debug version with an additional hook. +func unindexTransactionsForTesting(db ethdb.Database, from uint64, to uint64, interrupt chan struct{}, hook func(uint64) bool) { + unindexTransactions(db, from, to, interrupt, hook) +} diff --git a/coreth/core/rawdb/chain_iterator_test.go b/coreth/core/rawdb/chain_iterator_test.go new file mode 100644 index 00000000..282849c6 --- /dev/null +++ b/coreth/core/rawdb/chain_iterator_test.go @@ -0,0 +1,218 @@ +// (c) 2019-2022, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rawdb + +import ( + "math/big" + "reflect" + "sort" + "sync" + "testing" + + "github.com/ava-labs/coreth/core/types" + "github.com/ethereum/go-ethereum/common" +) + +func TestChainIterator(t *testing.T) { + // Construct test chain db + chainDb := NewMemoryDatabase() + + var block *types.Block + var txs []*types.Transaction + to := common.BytesToAddress([]byte{0x11}) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newHasher(), nil, true) // Empty genesis block + WriteBlock(chainDb, block) + WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) + for i := uint64(1); i <= 10; i++ { + var tx *types.Transaction + if i%2 == 0 { + tx = types.NewTx(&types.LegacyTx{ + Nonce: i, + GasPrice: big.NewInt(11111), + Gas: 1111, + To: &to, + Value: big.NewInt(111), + Data: []byte{0x11, 0x11, 0x11}, + }) + } else { + tx = types.NewTx(&types.AccessListTx{ + ChainID: big.NewInt(1337), + Nonce: i, + GasPrice: big.NewInt(11111), + Gas: 1111, + To: &to, + Value: big.NewInt(111), + Data: []byte{0x11, 0x11, 0x11}, + }) + } + txs = append(txs, tx) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher(), nil, true) + WriteBlock(chainDb, block) + WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) + } + + var cases = []struct { + from, to uint64 + reverse bool + expect []int + }{ + {0, 11, true, []int{10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}}, + {0, 0, true, nil}, + {0, 5, true, []int{4, 3, 2, 1, 0}}, + {10, 11, true, []int{10}}, + {0, 11, false, []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}, + {0, 0, false, nil}, + {10, 11, false, []int{10}}, + } + for i, c := range cases { + var numbers []int + hashCh := iterateTransactions(chainDb, c.from, c.to, c.reverse, nil) + if hashCh != nil { + for h := range hashCh { + numbers = append(numbers, int(h.number)) + if len(h.hashes) > 0 { + if got, exp := h.hashes[0], txs[h.number-1].Hash(); got != exp { + t.Fatalf("block %d: hash wrong, got %x exp %x", h.number, got, exp) + } + } + } + } + if !c.reverse { + sort.Ints(numbers) + } else { + sort.Sort(sort.Reverse(sort.IntSlice(numbers))) + } + if !reflect.DeepEqual(numbers, c.expect) { + t.Fatalf("Case %d failed, visit element mismatch, want %v, got %v", i, c.expect, numbers) + } + } +} + +func TestIndexTransactions(t *testing.T) { + // Construct test chain db + chainDb := NewMemoryDatabase() + + var block *types.Block + var txs []*types.Transaction + to := common.BytesToAddress([]byte{0x11}) + + // Write empty genesis block + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(0))}, nil, nil, nil, newHasher(), nil, true) + WriteBlock(chainDb, block) + WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) + + for i := uint64(1); i <= 10; i++ { + var tx *types.Transaction + if i%2 == 0 { + tx = types.NewTx(&types.LegacyTx{ + Nonce: i, + GasPrice: big.NewInt(11111), + Gas: 1111, + To: &to, + Value: big.NewInt(111), + Data: []byte{0x11, 0x11, 0x11}, + }) + } else { + tx = types.NewTx(&types.AccessListTx{ + ChainID: big.NewInt(1337), + Nonce: i, + GasPrice: big.NewInt(11111), + Gas: 1111, + To: &to, + Value: big.NewInt(111), + Data: []byte{0x11, 0x11, 0x11}, + }) + } + txs = append(txs, tx) + block = types.NewBlock(&types.Header{Number: big.NewInt(int64(i))}, []*types.Transaction{tx}, nil, nil, newHasher(), nil, true) + WriteBlock(chainDb, block) + WriteCanonicalHash(chainDb, block.Hash(), block.NumberU64()) + } + // verify checks whether the tx indices in the range [from, to) + // is expected. + verify := func(from, to int, exist bool, tail uint64) { + for i := from; i < to; i++ { + if i == 0 { + continue + } + number := ReadTxLookupEntry(chainDb, txs[i-1].Hash()) + if exist && number == nil { + t.Fatalf("Transaction index %d missing", i) + } + if !exist && number != nil { + t.Fatalf("Transaction index %d is not deleted", i) + } + } + number := ReadTxIndexTail(chainDb) + if number == nil || *number != tail { + t.Fatalf("Transaction tail mismatch") + } + } + indexTransactionsForTesting(chainDb, 5, 11, nil, nil) + verify(5, 11, true, 5) + verify(0, 5, false, 5) + + indexTransactionsForTesting(chainDb, 0, 5, nil, nil) + verify(0, 11, true, 0) + + UnindexTransactions(chainDb, 0, 5, nil) + verify(5, 11, true, 5) + verify(0, 5, false, 5) + + UnindexTransactions(chainDb, 5, 11, nil) + verify(0, 11, false, 11) + + // Testing corner cases + signal := make(chan struct{}) + var once sync.Once + indexTransactionsForTesting(chainDb, 5, 11, signal, func(n uint64) bool { + if n <= 8 { + once.Do(func() { + close(signal) + }) + return false + } + return true + }) + verify(9, 11, true, 9) + verify(0, 9, false, 9) + indexTransactionsForTesting(chainDb, 0, 9, nil, nil) + + signal = make(chan struct{}) + var once2 sync.Once + unindexTransactionsForTesting(chainDb, 0, 11, signal, func(n uint64) bool { + if n >= 8 { + once2.Do(func() { + close(signal) + }) + return false + } + return true + }) + verify(8, 11, true, 8) + verify(0, 8, false, 8) +} diff --git a/coreth/core/rawdb/database.go b/coreth/core/rawdb/database.go index a4dfccb3..2989344a 100644 --- a/coreth/core/rawdb/database.go +++ b/coreth/core/rawdb/database.go @@ -131,9 +131,10 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { cliqueSnaps stat // State sync statistics - codeToFetch stat - syncProgress stat - syncSegments stat + codeToFetch stat + syncProgress stat + syncSegments stat + syncPerformed stat // Les statistic chtTrieNodes stat @@ -198,12 +199,14 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { syncSegments.Add(size) case bytes.HasPrefix(key, CodeToFetchPrefix) && len(key) == codeToFetchKeyLength: codeToFetch.Add(size) + case bytes.HasPrefix(key, syncPerformedPrefix) && len(key) == syncPerformedKeyLength: + syncPerformed.Add(size) default: var accounted bool for _, meta := range [][]byte{ databaseVersionKey, headHeaderKey, headBlockKey, snapshotRootKey, snapshotBlockHashKey, snapshotGeneratorKey, - uncleanShutdownKey, syncRootKey, + uncleanShutdownKey, syncRootKey, txIndexTailKey, } { if bytes.Equal(key, meta) { metadata.Add(size) @@ -242,6 +245,7 @@ func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { {"State sync", "Trie segments", syncSegments.Size(), syncSegments.Count()}, {"State sync", "Storage tries to fetch", syncProgress.Size(), syncProgress.Count()}, {"State sync", "Code to fetch", codeToFetch.Size(), codeToFetch.Count()}, + {"State sync", "Block numbers synced to", syncPerformed.Size(), syncPerformed.Count()}, } table := tablewriter.NewWriter(os.Stdout) table.SetHeader([]string{"Database", "Category", "Size", "Items"}) diff --git a/coreth/core/rawdb/schema.go b/coreth/core/rawdb/schema.go index 12ad9086..f817d218 100644 --- a/coreth/core/rawdb/schema.go +++ b/coreth/core/rawdb/schema.go @@ -31,6 +31,7 @@ import ( "bytes" "encoding/binary" + "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/coreth/metrics" "github.com/ethereum/go-ethereum/common" ) @@ -55,6 +56,9 @@ var ( // snapshotGeneratorKey tracks the snapshot generation marker across restarts. snapshotGeneratorKey = []byte("SnapshotGenerator") + // txIndexTailKey tracks the oldest block whose transactions have been indexed. + txIndexTailKey = []byte("TransactionIndexTail") + // uncleanShutdownKey tracks the list of local crashes uncleanShutdownKey = []byte("unclean-shutdown") // config prefix for the db @@ -96,6 +100,10 @@ var ( syncSegmentsKeyLength = len(syncSegmentsPrefix) + 2*common.HashLength codeToFetchKeyLength = len(CodeToFetchPrefix) + common.HashLength + // State sync metadata + syncPerformedPrefix = []byte("sync_performed") + syncPerformedKeyLength = len(syncPerformedPrefix) + wrappers.LongLen // prefix + block number as uint64 + preimagePrefix = []byte("secure-key-") // preimagePrefix + hash -> preimage configPrefix = []byte("ethereum-config-") // config prefix for the db diff --git a/coreth/core/rlp_test.go b/coreth/core/rlp_test.go index b6588ca6..9c9614f8 100644 --- a/coreth/core/rlp_test.go +++ b/coreth/core/rlp_test.go @@ -32,7 +32,6 @@ import ( "testing" "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" @@ -43,10 +42,9 @@ import ( func getBlock(transactions int, uncles int, dataSize int) *types.Block { var ( - aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") - // Generate a canonical chain to act as the main dataset + aa = common.HexToAddress("0x000000000000000000000000000000000000aaaa") engine = dummy.NewFaker() - db = rawdb.NewMemoryDatabase() + // A sender who makes transactions, has some funds key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") address = crypto.PubkeyToAddress(key.PublicKey) @@ -55,11 +53,9 @@ func getBlock(transactions int, uncles int, dataSize int) *types.Block { Config: params.TestChainConfig, Alloc: GenesisAlloc{address: {Balance: funds}}, } - genesis = gspec.MustCommit(db) ) - // We need to generate as many blocks +1 as uncles - blocks, _, _ := GenerateChain(params.TestChainConfig, genesis, engine, db, uncles+1, 10, + _, blocks, _, _ := GenerateChainWithGenesis(gspec, engine, uncles+1, 10, func(n int, b *BlockGen) { if n == uncles { // Add transactions and stuff on the last block diff --git a/coreth/core/state/database.go b/coreth/core/state/database.go index e861234d..181fbec4 100644 --- a/coreth/core/state/database.go +++ b/coreth/core/state/database.go @@ -64,6 +64,9 @@ type Database interface { // ContractCodeSize retrieves a particular contracts code's size. ContractCodeSize(addrHash, codeHash common.Hash) (int, error) + // DiskDB returns the underlying key-value disk database. + DiskDB() ethdb.KeyValueStore + // TrieDB retrieves the low level trie database used for data storage. TrieDB() *trie.Database } @@ -140,6 +143,7 @@ func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database { csc, _ := lru.New(codeSizeCacheSize) return &cachingDB{ db: trie.NewDatabaseWithConfig(db, config), + disk: db, codeSizeCache: csc, codeCache: fastcache.New(codeCacheSize), } @@ -147,6 +151,7 @@ func NewDatabaseWithConfig(db ethdb.Database, config *trie.Config) Database { type cachingDB struct { db *trie.Database + disk ethdb.KeyValueStore codeSizeCache *lru.Cache codeCache *fastcache.Cache } @@ -184,7 +189,7 @@ func (db *cachingDB) ContractCode(addrHash, codeHash common.Hash) ([]byte, error if code := db.codeCache.Get(nil, codeHash.Bytes()); len(code) > 0 { return code, nil } - code := rawdb.ReadCode(db.db.DiskDB(), codeHash) + code := rawdb.ReadCode(db.disk, codeHash) if len(code) > 0 { db.codeCache.Set(codeHash.Bytes(), code) db.codeSizeCache.Add(codeHash, len(code)) @@ -202,6 +207,11 @@ func (db *cachingDB) ContractCodeSize(addrHash, codeHash common.Hash) (int, erro return len(code), err } +// DiskDB returns the underlying key-value disk database. +func (db *cachingDB) DiskDB() ethdb.KeyValueStore { + return db.disk +} + // TrieDB retrieves any intermediate trie-node caching layer. func (db *cachingDB) TrieDB() *trie.Database { return db.db diff --git a/coreth/core/state/pruner/pruner.go b/coreth/core/state/pruner/pruner.go index d7301562..665ed8f9 100644 --- a/coreth/core/state/pruner/pruner.go +++ b/coreth/core/state/pruner/pruner.go @@ -76,9 +76,9 @@ var ( // Pruner is an offline tool to prune the stale state with the // help of the snapshot. The workflow of pruner is very simple: // -// - iterate the snapshot, reconstruct the relevant state -// - iterate the database, delete all other state entries which -// don't belong to the target state and the genesis state +// - iterate the snapshot, reconstruct the relevant state +// - iterate the database, delete all other state entries which +// don't belong to the target state and the genesis state // // It can take several hours(around 2 hours for mainnet) to finish // the whole pruning work. It's recommended to run this offline tool diff --git a/coreth/core/state/snapshot/difflayer.go b/coreth/core/state/snapshot/difflayer.go index d96289ca..6b897a14 100644 --- a/coreth/core/state/snapshot/difflayer.go +++ b/coreth/core/state/snapshot/difflayer.go @@ -78,7 +78,7 @@ var ( bloomFuncs = math.Round((bloomSize / float64(aggregatorItemLimit)) * math.Log(2)) // the bloom offsets are runtime constants which determines which part of the - // the account/storage hash the hasher functions looks at, to determine the + // account/storage hash the hasher functions looks at, to determine the // bloom key for an account/slot. This is randomized at init(), so that the // global population of nodes do not all display the exact same behaviour with // regards to bloom content diff --git a/coreth/core/state/snapshot/difflayer_test.go b/coreth/core/state/snapshot/difflayer_test.go index 588bab08..29520fa8 100644 --- a/coreth/core/state/snapshot/difflayer_test.go +++ b/coreth/core/state/snapshot/difflayer_test.go @@ -31,8 +31,8 @@ import ( "math/rand" "testing" - "github.com/VictoriaMetrics/fastcache" "github.com/ava-labs/coreth/ethdb/memorydb" + "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" ) @@ -245,7 +245,7 @@ func TestInsertAndMerge(t *testing.T) { func emptyLayer() *diskLayer { return &diskLayer{ diskdb: memorydb.New(), - cache: fastcache.New(500 * 1024), + cache: utils.NewMeteredCache(500*1024, "", "", 0), } } diff --git a/coreth/core/state/snapshot/disklayer.go b/coreth/core/state/snapshot/disklayer.go index 07add6be..051a4ac5 100644 --- a/coreth/core/state/snapshot/disklayer.go +++ b/coreth/core/state/snapshot/disklayer.go @@ -31,10 +31,10 @@ import ( "sync" "time" - "github.com/VictoriaMetrics/fastcache" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/trie" + "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/rlp" ) @@ -43,7 +43,7 @@ import ( type diskLayer struct { diskdb ethdb.KeyValueStore // Key-value store containing the base snapshot triedb *trie.Database // Trie node cache for reconstruction purposes - cache *fastcache.Cache // Cache to avoid hitting the disk for direct access + cache *utils.MeteredCache // Cache to avoid hitting the disk for direct access blockHash common.Hash // Block hash of the base snapshot root common.Hash // Root hash of the base snapshot diff --git a/coreth/core/state/snapshot/generate.go b/coreth/core/state/snapshot/generate.go index 7362e530..c985abe2 100644 --- a/coreth/core/state/snapshot/generate.go +++ b/coreth/core/state/snapshot/generate.go @@ -33,10 +33,10 @@ import ( "math/big" "time" - "github.com/VictoriaMetrics/fastcache" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/trie" + "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/crypto" @@ -44,6 +44,11 @@ import ( "github.com/ethereum/go-ethereum/rlp" ) +const ( + snapshotCacheNamespace = "state/snapshot/clean/fastcache" // prefix for detailed stats from the snapshot fastcache + snapshotCacheStatsUpdateFrequency = 1000 // update stats from the snapshot fastcache once per 1000 ops +) + var ( // emptyRoot is the known root hash of an empty trie. emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") @@ -155,7 +160,7 @@ func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache i triedb: triedb, blockHash: blockHash, root: root, - cache: fastcache.New(cache * 1024 * 1024), + cache: newMeteredSnapshotCache(cache * 1024 * 1024), genMarker: genMarker, genPending: make(chan struct{}), genAbort: make(chan chan struct{}), @@ -399,3 +404,7 @@ func (dl *diskLayer) generate(stats *generatorStats) { abort := <-dl.genAbort close(abort) } + +func newMeteredSnapshotCache(size int) *utils.MeteredCache { + return utils.NewMeteredCache(size, "", snapshotCacheNamespace, snapshotCacheStatsUpdateFrequency) +} diff --git a/coreth/core/state/snapshot/generate_test.go b/coreth/core/state/snapshot/generate_test.go index b1ab8a49..973276f9 100644 --- a/coreth/core/state/snapshot/generate_test.go +++ b/coreth/core/state/snapshot/generate_test.go @@ -230,10 +230,12 @@ func (t *testHelper) CommitAndGenerate() (common.Hash, *diskLayer) { // - miss in the beginning // - miss in the middle // - miss in the end +// // - the contract(non-empty storage) has wrong storage slots // - wrong slots in the beginning // - wrong slots in the middle // - wrong slots in the end +// // - the contract(non-empty storage) has extra storage slots // - extra slots in the beginning // - extra slots in the middle @@ -499,12 +501,12 @@ func TestGenerateWithExtraAccounts(t *testing.T) { // Identical in the snap key := hashData([]byte("acc-1")) - rawdb.WriteAccountSnapshot(helper.triedb.DiskDB(), key, val) - rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("key-1")), []byte("val-1")) - rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("key-2")), []byte("val-2")) - rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("key-3")), []byte("val-3")) - rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("key-4")), []byte("val-4")) - rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("key-5")), []byte("val-5")) + rawdb.WriteAccountSnapshot(helper.diskdb, key, val) + rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-1")), []byte("val-1")) + rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-2")), []byte("val-2")) + rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-3")), []byte("val-3")) + rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-4")), []byte("val-4")) + rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("key-5")), []byte("val-5")) } { // Account two exists only in the snapshot @@ -516,15 +518,15 @@ func TestGenerateWithExtraAccounts(t *testing.T) { acc := &Account{Balance: big.NewInt(1), Root: stRoot, CodeHash: emptyCode.Bytes()} val, _ := rlp.EncodeToBytes(acc) key := hashData([]byte("acc-2")) - rawdb.WriteAccountSnapshot(helper.triedb.DiskDB(), key, val) - rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("b-key-1")), []byte("b-val-1")) - rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("b-key-2")), []byte("b-val-2")) - rawdb.WriteStorageSnapshot(helper.triedb.DiskDB(), key, hashData([]byte("b-key-3")), []byte("b-val-3")) + rawdb.WriteAccountSnapshot(helper.diskdb, key, val) + rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("b-key-1")), []byte("b-val-1")) + rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("b-key-2")), []byte("b-val-2")) + rawdb.WriteStorageSnapshot(helper.diskdb, key, hashData([]byte("b-key-3")), []byte("b-val-3")) } root := helper.Commit() // To verify the test: If we now inspect the snap db, there should exist extraneous storage items - if data := rawdb.ReadStorageSnapshot(helper.triedb.DiskDB(), hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data == nil { + if data := rawdb.ReadStorageSnapshot(helper.diskdb, hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data == nil { t.Fatalf("expected snap storage to exist") } snap := generateSnapshot(helper.diskdb, helper.triedb, 16, testBlockHash, root, nil) @@ -542,7 +544,7 @@ func TestGenerateWithExtraAccounts(t *testing.T) { snap.genAbort <- stop <-stop // If we now inspect the snap db, there should exist no extraneous storage items - if data := rawdb.ReadStorageSnapshot(helper.triedb.DiskDB(), hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data != nil { + if data := rawdb.ReadStorageSnapshot(helper.diskdb, hashData([]byte("acc-2")), hashData([]byte("b-key-1"))); data != nil { t.Fatalf("expected slot to be removed, got %v", string(data)) } } diff --git a/coreth/core/state/snapshot/journal.go b/coreth/core/state/snapshot/journal.go index d970913a..31318da7 100644 --- a/coreth/core/state/snapshot/journal.go +++ b/coreth/core/state/snapshot/journal.go @@ -32,7 +32,6 @@ import ( "fmt" "time" - "github.com/VictoriaMetrics/fastcache" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/trie" @@ -91,7 +90,7 @@ func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, snapshot := &diskLayer{ diskdb: diskdb, triedb: triedb, - cache: fastcache.New(cache * 1024 * 1024), + cache: newMeteredSnapshotCache(cache * 1024 * 1024), root: baseRoot, blockHash: baseBlockHash, created: time.Now(), diff --git a/coreth/core/state/snapshot/snapshot.go b/coreth/core/state/snapshot/snapshot.go index 8d0475d9..b5b62f2d 100644 --- a/coreth/core/state/snapshot/snapshot.go +++ b/coreth/core/state/snapshot/snapshot.go @@ -35,11 +35,11 @@ import ( "sync/atomic" "time" - "github.com/VictoriaMetrics/fastcache" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/trie" + "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) @@ -379,11 +379,13 @@ func (t *Tree) verifyIntegrity(base *diskLayer, waitBuild bool) error { // Note: a blockHash is used instead of a state root so that the exact state // transition between the two states is well defined. This is intended to // prevent the following edge case -// A -// / \ -// B C -// | -// D +// +// A +// / \ +// B C +// | +// D +// // In this scenario, it's possible For (A, B) and (A, C, D) to be two // different paths to the resulting state. We use block hashes and parent // block hashes to ensure that the exact path through which we flatten @@ -599,6 +601,7 @@ func diffToDisk(bottom *diffLayer) (*diskLayer, bool, error) { // Mark the original base as stale as we're going to create a new wrapper base.lock.Lock() if base.stale { + base.lock.Unlock() return nil, false, ErrStaleParentLayer // we've committed into the same base from two children, boo } base.stale = true @@ -934,7 +937,7 @@ func NewDiskLayer(diskdb ethdb.KeyValueStore) Snapshot { // state sync uses iterators to access data, so this cache is not used. // initializing it out of caution. - cache: fastcache.New(32 * 1024), + cache: utils.NewMeteredCache(32*1024, "", "", 0), } } @@ -944,7 +947,7 @@ func NewTestTree(diskdb ethdb.KeyValueStore, blockHash, root common.Hash) *Tree diskdb: diskdb, root: root, blockHash: blockHash, - cache: fastcache.New(128 * 256), + cache: utils.NewMeteredCache(128*256, "", "", 0), created: time.Now(), } return &Tree{ diff --git a/coreth/core/state/snapshot/snapshot_test.go b/coreth/core/state/snapshot/snapshot_test.go index 03879c48..5bebc406 100644 --- a/coreth/core/state/snapshot/snapshot_test.go +++ b/coreth/core/state/snapshot/snapshot_test.go @@ -396,17 +396,19 @@ func TestPostFlattenBasicDataAccess(t *testing.T) { // different blocks inserted with an identical state root. // In this example, (B, C) and (D, E) share the identical state root, but were // inserted under different blocks. -// A -// / \ -// B C -// | | -// D E +// +// A +// / \ +// B C +// | | +// D E // // `t.Flatten(C)` should result in: // -// B C -// | | -// D E +// B C +// | | +// D E +// // With the branch D, E, hanging and relying on Discard to be called to // garbage collect the references. func TestTreeFlattenDoesNotDropPendingLayers(t *testing.T) { diff --git a/coreth/core/state/statedb.go b/coreth/core/state/statedb.go index ca69d1bd..9af59926 100644 --- a/coreth/core/state/statedb.go +++ b/coreth/core/state/statedb.go @@ -129,6 +129,7 @@ type StateDB struct { SnapshotAccountReads time.Duration SnapshotStorageReads time.Duration SnapshotCommits time.Duration + TrieDBCommits time.Duration AccountUpdated int StorageUpdated int @@ -987,7 +988,7 @@ func (s *StateDB) commit(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHas storageTrieNodes int nodes = trie.NewMergedNodeSet() ) - codeWriter := s.db.TrieDB().DiskDB().NewBatch() + codeWriter := s.db.DiskDB().NewBatch() for addr := range s.stateObjectsDirty { if obj := s.stateObjects[addr]; !obj.deleted { // Write any contract code associated with the state object @@ -1068,6 +1069,9 @@ func (s *StateDB) commit(deleteEmptyObjects bool, snaps *snapshot.Tree, blockHas } } s.originalRoot = root + if metrics.EnabledExpensive { + s.TrieDBCommits += time.Since(start) + } return root, err } diff --git a/coreth/core/state/trie_prefetcher.go b/coreth/core/state/trie_prefetcher.go index 5da0e9cc..c058214a 100644 --- a/coreth/core/state/trie_prefetcher.go +++ b/coreth/core/state/trie_prefetcher.go @@ -50,7 +50,10 @@ type triePrefetcher struct { fetches map[string]Trie // Partially or fully fetcher tries fetchers map[string]*subfetcher // Subfetchers for each trie - deliveryMissMeter metrics.Meter + deliveryCopyMissMeter metrics.Meter + deliveryRequestMissMeter metrics.Meter + deliveryWaitMissMeter metrics.Meter + accountLoadMeter metrics.Meter accountDupMeter metrics.Meter accountSkipMeter metrics.Meter @@ -68,7 +71,10 @@ func newTriePrefetcher(db Database, root common.Hash, namespace string) *triePre root: root, fetchers: make(map[string]*subfetcher), // Active prefetchers use the fetchers map - deliveryMissMeter: metrics.GetOrRegisterMeter(prefix+"/deliverymiss", nil), + deliveryCopyMissMeter: metrics.GetOrRegisterMeter(prefix+"/deliverymiss/copy", nil), + deliveryRequestMissMeter: metrics.GetOrRegisterMeter(prefix+"/deliverymiss/request", nil), + deliveryWaitMissMeter: metrics.GetOrRegisterMeter(prefix+"/deliverymiss/wait", nil), + accountLoadMeter: metrics.GetOrRegisterMeter(prefix+"/account/load", nil), accountDupMeter: metrics.GetOrRegisterMeter(prefix+"/account/dup", nil), accountSkipMeter: metrics.GetOrRegisterMeter(prefix+"/account/skip", nil), @@ -123,7 +129,10 @@ func (p *triePrefetcher) copy() *triePrefetcher { root: p.root, fetches: make(map[string]Trie), // Active prefetchers use the fetches map - deliveryMissMeter: p.deliveryMissMeter, + deliveryCopyMissMeter: p.deliveryCopyMissMeter, + deliveryRequestMissMeter: p.deliveryRequestMissMeter, + deliveryWaitMissMeter: p.deliveryWaitMissMeter, + accountLoadMeter: p.accountLoadMeter, accountDupMeter: p.accountDupMeter, accountSkipMeter: p.accountSkipMeter, @@ -136,6 +145,9 @@ func (p *triePrefetcher) copy() *triePrefetcher { // If the prefetcher is already a copy, duplicate the data if p.fetches != nil { for root, fetch := range p.fetches { + if fetch == nil { + continue + } copy.fetches[root] = p.db.CopyTrie(fetch) } return copy @@ -171,7 +183,7 @@ func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) Trie { if p.fetches != nil { trie := p.fetches[id] if trie == nil { - p.deliveryMissMeter.Mark(1) + p.deliveryCopyMissMeter.Mark(1) return nil } return p.db.CopyTrie(trie) @@ -179,7 +191,7 @@ func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) Trie { // Otherwise the prefetcher is active, bail if no trie was prefetched for this root fetcher := p.fetchers[id] if fetcher == nil { - p.deliveryMissMeter.Mark(1) + p.deliveryRequestMissMeter.Mark(1) return nil } // Interrupt the prefetcher if it's by any chance still running and return @@ -188,7 +200,7 @@ func (p *triePrefetcher) trie(owner common.Hash, root common.Hash) Trie { trie := fetcher.peek() if trie == nil { - p.deliveryMissMeter.Mark(1) + p.deliveryWaitMissMeter.Mark(1) return nil } return trie diff --git a/coreth/core/state_manager.go b/coreth/core/state_manager.go index db0a7ba3..02521aa5 100644 --- a/coreth/core/state_manager.go +++ b/coreth/core/state_manager.go @@ -121,7 +121,7 @@ type cappedMemoryTrieWriter struct { imageCap common.StorageSize commitInterval uint64 - tipBuffer *BoundedBuffer + tipBuffer *BoundedBuffer[common.Hash] } func (cm *cappedMemoryTrieWriter) InsertTrie(block *types.Block) error { @@ -192,8 +192,8 @@ func (cm *cappedMemoryTrieWriter) RejectTrie(block *types.Block) error { func (cm *cappedMemoryTrieWriter) Shutdown() error { // If [tipBuffer] entry is empty, no need to do any cleanup on // shutdown. - last := cm.tipBuffer.Last() - if last == (common.Hash{}) { + last, exists := cm.tipBuffer.Last() + if !exists { return nil } diff --git a/coreth/core/state_processor.go b/coreth/core/state_processor.go index c6874e31..68d118fd 100644 --- a/coreth/core/state_processor.go +++ b/coreth/core/state_processor.go @@ -31,7 +31,6 @@ import ( "math/big" "github.com/ava-labs/coreth/consensus" - "github.com/ava-labs/coreth/consensus/misc" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" @@ -81,10 +80,6 @@ func (p *StateProcessor) Process(block *types.Block, parent *types.Header, state // Configure any stateful precompiles that should go into effect during this block. p.config.CheckConfigurePrecompiles(new(big.Int).SetUint64(parent.Time), block, statedb) - // Mutate the block and state according to any hard-fork specs - if p.config.DAOForkSupport && p.config.DAOForkBlock != nil && p.config.DAOForkBlock.Cmp(block.Number()) == 0 { - misc.ApplyDAOHardFork(statedb) - } blockContext := NewEVMBlockContext(header, p.bc, nil) vmenv := vm.NewEVM(blockContext, vm.TxContext{}, statedb, p.config, cfg) // Iterate over and process the individual transactions diff --git a/coreth/core/state_processor_test.go b/coreth/core/state_processor_test.go index 1ca66bf3..05e90592 100644 --- a/coreth/core/state_processor_test.go +++ b/coreth/core/state_processor_test.go @@ -27,6 +27,7 @@ package core import ( + "fmt" "math/big" "testing" @@ -42,31 +43,52 @@ import ( "golang.org/x/crypto/sha3" ) +var ( + config = params.TestChainConfig + signer = types.LatestSigner(config) + testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + testAddr = common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7") +) + +func makeTx(nonce uint64, to common.Address, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *types.Transaction { + tx, _ := types.SignTx(types.NewTransaction(nonce, to, amount, gasLimit, gasPrice, data), signer, testKey) + return tx +} + +func makeContractTx(nonce uint64, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *types.Transaction { + tx, _ := types.SignTx(types.NewContractCreation(nonce, amount, gasLimit, gasPrice, data), signer, testKey) + return tx +} + +func mkDynamicTx(nonce uint64, to common.Address, gasLimit uint64, gasTipCap, gasFeeCap *big.Int) *types.Transaction { + tx, _ := types.SignTx(types.NewTx(&types.DynamicFeeTx{ + Nonce: nonce, + GasTipCap: gasTipCap, + GasFeeCap: gasFeeCap, + Gas: gasLimit, + To: &to, + Value: big.NewInt(0), + }), signer, testKey) + return tx +} + +func mkDynamicCreationTx(nonce uint64, gasLimit uint64, gasTipCap, gasFeeCap *big.Int, data []byte) *types.Transaction { + tx, _ := types.SignTx(types.NewTx(&types.DynamicFeeTx{ + Nonce: nonce, + GasTipCap: gasTipCap, + GasFeeCap: gasFeeCap, + Gas: gasLimit, + Value: big.NewInt(0), + Data: data, + }), signer, testKey) + return tx +} + // TestStateProcessorErrors tests the output from the 'core' errors // as defined in core/error.go. These errors are generated when the // blockchain imports bad blocks, meaning blocks which have valid headers but // contain invalid transactions func TestStateProcessorErrors(t *testing.T) { - var ( - config = params.TestChainConfig - signer = types.LatestSigner(config) - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") - ) - var makeTx = func(nonce uint64, to common.Address, amount *big.Int, gasLimit uint64, gasPrice *big.Int, data []byte) *types.Transaction { - tx, _ := types.SignTx(types.NewTransaction(nonce, to, amount, gasLimit, gasPrice, data), signer, testKey) - return tx - } - var mkDynamicTx = func(nonce uint64, to common.Address, gasLimit uint64, gasTipCap, gasFeeCap *big.Int) *types.Transaction { - tx, _ := types.SignTx(types.NewTx(&types.DynamicFeeTx{ - Nonce: nonce, - GasTipCap: gasTipCap, - GasFeeCap: gasFeeCap, - Gas: gasLimit, - To: &to, - Value: big.NewInt(0), - }), signer, testKey) - return tx - } { // Tests against a 'recent' chain definition var ( db = rawdb.NewMemoryDatabase() @@ -74,14 +96,13 @@ func TestStateProcessorErrors(t *testing.T) { Config: config, Alloc: GenesisAlloc{ common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{ - Balance: big.NewInt(2000000000000000000), // 2 ether + Balance: big.NewInt(4000000000000000000), // 4 ether Nonce: 0, }, }, - GasLimit: params.ApricotPhase1GasLimit, + GasLimit: params.CortinaGasLimit, } - genesis = gspec.MustCommit(db) - blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec.Config, dummy.NewFaker(), vm.Config{}, common.Hash{}) + blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) ) defer blockchain.Stop() bigNumber := new(big.Int).SetBytes(common.FromHex("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff")) @@ -106,21 +127,21 @@ func TestStateProcessorErrors(t *testing.T) { }, { // ErrGasLimitReached txs: []*types.Transaction{ - makeTx(0, common.Address{}, big.NewInt(0), 8000001, big.NewInt(225000000000), nil), + makeTx(0, common.Address{}, big.NewInt(0), 15000001, big.NewInt(225000000000), nil), }, - want: "could not apply tx 0 [0xfbe38b817aaa760c2766b56c019fcdba506560a28fd41c69ae96bdaa4569e317]: gas limit reached", + want: "could not apply tx 0 [0x1354370681d2ab68247073d889736f8be4a8d87e35956f0c02658d3670803a66]: gas limit reached", }, { // ErrInsufficientFundsForTransfer txs: []*types.Transaction{ - makeTx(0, common.Address{}, big.NewInt(2000000000000000000), params.TxGas, big.NewInt(225000000000), nil), + makeTx(0, common.Address{}, big.NewInt(4000000000000000000), params.TxGas, big.NewInt(225000000000), nil), }, - want: "could not apply tx 0 [0xae1601ef55b676ebb824ee7e16a0d14af725b7f9cf5ec79e21f14833c26b5b35]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 2000000000000000000 want 2004725000000000000", + want: "could not apply tx 0 [0x1632f2bffcce84a5c91dd8ab2016128fccdbcfbe0485d2c67457e1c793c72a4b]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 4000000000000000000 want 4004725000000000000", }, { // ErrInsufficientFunds txs: []*types.Transaction{ makeTx(0, common.Address{}, big.NewInt(0), params.TxGas, big.NewInt(900000000000000000), nil), }, - want: "could not apply tx 0 [0x4a69690c4b0cd85e64d0d9ea06302455b01e10a83db964d60281739752003440]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 2000000000000000000 want 18900000000000000000000", + want: "could not apply tx 0 [0x4a69690c4b0cd85e64d0d9ea06302455b01e10a83db964d60281739752003440]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 4000000000000000000 want 18900000000000000000000", }, // ErrGasUintOverflow // One missing 'core' error is ErrGasUintOverflow: "gas uint64 overflow", @@ -134,9 +155,9 @@ func TestStateProcessorErrors(t *testing.T) { }, { // ErrGasLimitReached txs: []*types.Transaction{ - makeTx(0, common.Address{}, big.NewInt(0), params.TxGas*381, big.NewInt(225000000000), nil), + makeTx(0, common.Address{}, big.NewInt(0), params.TxGas*762, big.NewInt(225000000000), nil), }, - want: "could not apply tx 0 [0x9ee548e001369418ae53aaa11b5d823f081cc7fa9c9a7ee71a978ae17a2aece0]: gas limit reached", + want: "could not apply tx 0 [0x76c07cc2b32007eb1a9c3fa066d579a3d77ec4ecb79bbc266624a601d7b08e46]: gas limit reached", }, { // ErrFeeCapTooLow txs: []*types.Transaction{ @@ -169,18 +190,18 @@ func TestStateProcessorErrors(t *testing.T) { // This test is designed to have the effective cost be covered by the balance, but // the extended requirement on FeeCap*gas < balance to fail txs: []*types.Transaction{ - mkDynamicTx(0, common.Address{}, params.TxGas, big.NewInt(1), big.NewInt(100000000000000)), + mkDynamicTx(0, common.Address{}, params.TxGas, big.NewInt(1), big.NewInt(200000000000000)), }, - want: "could not apply tx 0 [0x3388378ed60640e75d2edf728d5528a305f599997abc4f23ec46b351b6197499]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 2000000000000000000 want 2100000000000000000", + want: "could not apply tx 0 [0xa3840aa3cad37eec8607b9f4846813d4a80e70b462a793fa21f64138156f849b]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 4000000000000000000 want 4200000000000000000", }, { // Another ErrInsufficientFunds, this one to ensure that feecap/tip of max u256 is allowed txs: []*types.Transaction{ mkDynamicTx(0, common.Address{}, params.TxGas, bigNumber, bigNumber), }, - want: "could not apply tx 0 [0xd82a0c2519acfeac9a948258c47e784acd20651d9d80f9a1c67b4137651c3a24]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 2000000000000000000 want 2431633873983640103894990685182446064918669677978451844828609264166175722438635000", + want: "could not apply tx 0 [0xd82a0c2519acfeac9a948258c47e784acd20651d9d80f9a1c67b4137651c3a24]: insufficient funds for gas * price + value: address 0x71562b71999873DB5b286dF957af199Ec94617F7 have 4000000000000000000 want 2431633873983640103894990685182446064918669677978451844828609264166175722438635000", }, } { - block := GenerateBadBlock(genesis, dummy.NewFaker(), tt.txs, gspec.Config) + block := GenerateBadBlock(gspec.ToBlock(nil), dummy.NewFaker(), tt.txs, gspec.Config) _, err := blockchain.InsertChain(types.Blocks{block}) if err == nil { t.Fatal("block imported without errors") @@ -219,8 +240,7 @@ func TestStateProcessorErrors(t *testing.T) { }, GasLimit: params.ApricotPhase1GasLimit, } - genesis = gspec.MustCommit(db) - blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec.Config, dummy.NewFaker(), vm.Config{}, common.Hash{}) + blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) ) defer blockchain.Stop() for i, tt := range []struct { @@ -234,7 +254,7 @@ func TestStateProcessorErrors(t *testing.T) { want: "could not apply tx 0 [0x88626ac0d53cb65308f2416103c62bb1f18b805573d4f96a3640bbbfff13c14f]: transaction type not supported", }, } { - block := GenerateBadBlock(genesis, dummy.NewFaker(), tt.txs, gspec.Config) + block := GenerateBadBlock(gspec.ToBlock(nil), dummy.NewFaker(), tt.txs, gspec.Config) _, err := blockchain.InsertChain(types.Blocks{block}) if err == nil { t.Fatal("block imported without errors") @@ -258,10 +278,9 @@ func TestStateProcessorErrors(t *testing.T) { Code: common.FromHex("0xB0B0FACE"), }, }, - GasLimit: params.ApricotPhase1GasLimit, + GasLimit: params.CortinaGasLimit, } - genesis = gspec.MustCommit(db) - blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec.Config, dummy.NewFaker(), vm.Config{}, common.Hash{}) + blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) ) defer blockchain.Stop() for i, tt := range []struct { @@ -275,7 +294,80 @@ func TestStateProcessorErrors(t *testing.T) { want: "could not apply tx 0 [0x88626ac0d53cb65308f2416103c62bb1f18b805573d4f96a3640bbbfff13c14f]: sender not an eoa: address 0x71562b71999873DB5b286dF957af199Ec94617F7, codehash: 0x9280914443471259d4570a8661015ae4a5b80186dbc619658fb494bebc3da3d1", }, } { - block := GenerateBadBlock(genesis, dummy.NewFaker(), tt.txs, gspec.Config) + block := GenerateBadBlock(gspec.ToBlock(nil), dummy.NewFaker(), tt.txs, gspec.Config) + _, err := blockchain.InsertChain(types.Blocks{block}) + if err == nil { + t.Fatal("block imported without errors") + } + if have, want := err.Error(), tt.want; have != want { + t.Errorf("test %d:\nhave \"%v\"\nwant \"%v\"\n", i, have, want) + } + } + } + + // ErrMaxInitCodeSizeExceeded, for this we need extra Shanghai (DUpgrade/EIP-3860) enabled. + { + var ( + db = rawdb.NewMemoryDatabase() + gspec = &Genesis{ + Config: ¶ms.ChainConfig{ + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, + EIP150Block: big.NewInt(0), + EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + MuirGlacierBlock: big.NewInt(0), + ApricotPhase1BlockTimestamp: big.NewInt(0), + ApricotPhase2BlockTimestamp: big.NewInt(0), + ApricotPhase3BlockTimestamp: big.NewInt(0), + ApricotPhase4BlockTimestamp: big.NewInt(0), + ApricotPhase5BlockTimestamp: big.NewInt(0), + ApricotPhasePre6BlockTimestamp: big.NewInt(0), + ApricotPhase6BlockTimestamp: big.NewInt(0), + ApricotPhasePost6BlockTimestamp: big.NewInt(0), + BanffBlockTimestamp: big.NewInt(0), + CortinaBlockTimestamp: big.NewInt(0), + DUpgradeBlockTimestamp: big.NewInt(0), + }, + Alloc: GenesisAlloc{ + common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{ + Balance: big.NewInt(1000000000000000000), // 1 ether + Nonce: 0, + }, + }, + GasLimit: params.CortinaGasLimit, + } + blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, dummy.NewFaker(), vm.Config{}, common.Hash{}, false) + tooBigInitCode = [params.MaxInitCodeSize + 1]byte{} + smallInitCode = [320]byte{} + ) + defer blockchain.Stop() + for i, tt := range []struct { + txs []*types.Transaction + want string + }{ + { // ErrMaxInitCodeSizeExceeded + txs: []*types.Transaction{ + + mkDynamicCreationTx(0, 500000, common.Big0, big.NewInt(params.ApricotPhase3InitialBaseFee), tooBigInitCode[:]), + }, + want: "could not apply tx 0 [0x18a05f40f29ff16d5287f6f88b21c9f3c7fbc268f707251144996294552c4cd6]: max initcode size exceeded: code size 49153 limit 49152", + }, + { // ErrIntrinsicGas: Not enough gas to cover init code + txs: []*types.Transaction{ + mkDynamicCreationTx(0, 54299, common.Big0, big.NewInt(params.ApricotPhase3InitialBaseFee), smallInitCode[:]), + }, + want: "could not apply tx 0 [0x849278f616d51ab56bba399551317213ce7a10e4d9cbc3d14bb663e50cb7ab99]: intrinsic gas too low: have 54299, want 54300", + }, + } { + block := GenerateBadBlock(gspec.ToBlock(nil), dummy.NewFaker(), tt.txs, gspec.Config) _, err := blockchain.InsertChain(types.Blocks{block}) if err == nil { t.Fatal("block imported without errors") @@ -332,3 +424,113 @@ func GenerateBadBlock(parent *types.Block, engine consensus.Engine, txs types.Tr // Assemble and return the final block for sealing return types.NewBlock(header, txs, nil, receipts, trie.NewStackTrie(nil), nil, true) } + +func CostOfUsingGasLimitEachBlock(gspec *Genesis) { + genesis := gspec.ToBlock(nil) + totalPaid := big.NewInt(0) + parent := genesis.Header() + gasLimit := new(big.Int).SetUint64(gspec.GasLimit) + totalGasUsed := big.NewInt(0) + + for i := 1; i < 20; i++ { + header := nextBlock(gspec.Config, parent, gspec.GasLimit) + baseFee := header.BaseFee + gasCost := new(big.Int).Mul(baseFee, gasLimit) + totalGasUsed = new(big.Int).Add(totalGasUsed, gasLimit) + totalPaid = new(big.Int).Add(totalPaid, gasCost) + parent = header + + avg := new(big.Int).Div(totalPaid, totalGasUsed) + fmt.Printf( + "Number: %d, BaseFee: %vGWei, TotalGasUsed: %d, TotalPaid (Ether): %d, AvgGasPrice: %dGWei\n", + header.Number, + new(big.Int).Div(baseFee, big.NewInt(params.GWei)), // baseFee in GWei + totalGasUsed, + new(big.Int).Div(totalPaid, big.NewInt(params.Ether)), // totalPaid in Ether + new(big.Int).Div(avg, big.NewInt(params.GWei)), // avgGasPrice in GWei + ) + } +} + +func ExampleCostOfUsingGasLimitEachBlock() { + banff := &Genesis{ + Config: params.TestBanffChainConfig, + Alloc: GenesisAlloc{ + common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{ + Balance: big.NewInt(4000000000000000000), // 4 ether + Nonce: 0, + }, + }, + BaseFee: big.NewInt(225 * params.GWei), + GasLimit: params.ApricotPhase1GasLimit, + } + cortina := &Genesis{ + Config: params.TestCortinaChainConfig, + Alloc: GenesisAlloc{ + common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{ + Balance: big.NewInt(4000000000000000000), // 4 ether + Nonce: 0, + }, + }, + BaseFee: big.NewInt(225 * params.GWei), + GasLimit: params.CortinaGasLimit, + } + fmt.Println("----- banff ----") + CostOfUsingGasLimitEachBlock(banff) + fmt.Println("----- cortina ----") + CostOfUsingGasLimitEachBlock(cortina) + // Output: + // ----- banff ---- + // Number: 1, BaseFee: 225GWei, TotalGasUsed: 8000000, TotalPaid (Ether): 1, AvgGasPrice: 225GWei + // Number: 2, BaseFee: 222GWei, TotalGasUsed: 16000000, TotalPaid (Ether): 3, AvgGasPrice: 223GWei + // Number: 3, BaseFee: 222GWei, TotalGasUsed: 24000000, TotalPaid (Ether): 5, AvgGasPrice: 223GWei + // Number: 4, BaseFee: 226GWei, TotalGasUsed: 32000000, TotalPaid (Ether): 7, AvgGasPrice: 223GWei + // Number: 5, BaseFee: 233GWei, TotalGasUsed: 40000000, TotalPaid (Ether): 9, AvgGasPrice: 225GWei + // Number: 6, BaseFee: 240GWei, TotalGasUsed: 48000000, TotalPaid (Ether): 10, AvgGasPrice: 228GWei + // Number: 7, BaseFee: 248GWei, TotalGasUsed: 56000000, TotalPaid (Ether): 12, AvgGasPrice: 231GWei + // Number: 8, BaseFee: 256GWei, TotalGasUsed: 64000000, TotalPaid (Ether): 14, AvgGasPrice: 234GWei + // Number: 9, BaseFee: 264GWei, TotalGasUsed: 72000000, TotalPaid (Ether): 17, AvgGasPrice: 237GWei + // Number: 10, BaseFee: 272GWei, TotalGasUsed: 80000000, TotalPaid (Ether): 19, AvgGasPrice: 241GWei + // Number: 11, BaseFee: 281GWei, TotalGasUsed: 88000000, TotalPaid (Ether): 21, AvgGasPrice: 244GWei + // Number: 12, BaseFee: 289GWei, TotalGasUsed: 96000000, TotalPaid (Ether): 23, AvgGasPrice: 248GWei + // Number: 13, BaseFee: 298GWei, TotalGasUsed: 104000000, TotalPaid (Ether): 26, AvgGasPrice: 252GWei + // Number: 14, BaseFee: 308GWei, TotalGasUsed: 112000000, TotalPaid (Ether): 28, AvgGasPrice: 256GWei + // Number: 15, BaseFee: 318GWei, TotalGasUsed: 120000000, TotalPaid (Ether): 31, AvgGasPrice: 260GWei + // Number: 16, BaseFee: 328GWei, TotalGasUsed: 128000000, TotalPaid (Ether): 33, AvgGasPrice: 264GWei + // Number: 17, BaseFee: 338GWei, TotalGasUsed: 136000000, TotalPaid (Ether): 36, AvgGasPrice: 269GWei + // Number: 18, BaseFee: 349GWei, TotalGasUsed: 144000000, TotalPaid (Ether): 39, AvgGasPrice: 273GWei + // Number: 19, BaseFee: 360GWei, TotalGasUsed: 152000000, TotalPaid (Ether): 42, AvgGasPrice: 278GWei + // ----- cortina ---- + // Number: 1, BaseFee: 225GWei, TotalGasUsed: 15000000, TotalPaid (Ether): 3, AvgGasPrice: 225GWei + // Number: 2, BaseFee: 225GWei, TotalGasUsed: 30000000, TotalPaid (Ether): 6, AvgGasPrice: 225GWei + // Number: 3, BaseFee: 231GWei, TotalGasUsed: 45000000, TotalPaid (Ether): 10, AvgGasPrice: 227GWei + // Number: 4, BaseFee: 244GWei, TotalGasUsed: 60000000, TotalPaid (Ether): 13, AvgGasPrice: 231GWei + // Number: 5, BaseFee: 264GWei, TotalGasUsed: 75000000, TotalPaid (Ether): 17, AvgGasPrice: 237GWei + // Number: 6, BaseFee: 286GWei, TotalGasUsed: 90000000, TotalPaid (Ether): 22, AvgGasPrice: 246GWei + // Number: 7, BaseFee: 310GWei, TotalGasUsed: 105000000, TotalPaid (Ether): 26, AvgGasPrice: 255GWei + // Number: 8, BaseFee: 336GWei, TotalGasUsed: 120000000, TotalPaid (Ether): 31, AvgGasPrice: 265GWei + // Number: 9, BaseFee: 364GWei, TotalGasUsed: 135000000, TotalPaid (Ether): 37, AvgGasPrice: 276GWei + // Number: 10, BaseFee: 394GWei, TotalGasUsed: 150000000, TotalPaid (Ether): 43, AvgGasPrice: 288GWei + // Number: 11, BaseFee: 427GWei, TotalGasUsed: 165000000, TotalPaid (Ether): 49, AvgGasPrice: 300GWei + // Number: 12, BaseFee: 463GWei, TotalGasUsed: 180000000, TotalPaid (Ether): 56, AvgGasPrice: 314GWei + // Number: 13, BaseFee: 501GWei, TotalGasUsed: 195000000, TotalPaid (Ether): 64, AvgGasPrice: 328GWei + // Number: 14, BaseFee: 543GWei, TotalGasUsed: 210000000, TotalPaid (Ether): 72, AvgGasPrice: 344GWei + // Number: 15, BaseFee: 588GWei, TotalGasUsed: 225000000, TotalPaid (Ether): 81, AvgGasPrice: 360GWei + // Number: 16, BaseFee: 637GWei, TotalGasUsed: 240000000, TotalPaid (Ether): 90, AvgGasPrice: 377GWei + // Number: 17, BaseFee: 690GWei, TotalGasUsed: 255000000, TotalPaid (Ether): 101, AvgGasPrice: 396GWei + // Number: 18, BaseFee: 748GWei, TotalGasUsed: 270000000, TotalPaid (Ether): 112, AvgGasPrice: 415GWei + // Number: 19, BaseFee: 810GWei, TotalGasUsed: 285000000, TotalPaid (Ether): 124, AvgGasPrice: 436GWei +} + +func nextBlock(config *params.ChainConfig, parent *types.Header, gasUsed uint64) *types.Header { + header := &types.Header{ + ParentHash: parent.Hash(), + Number: new(big.Int).Add(parent.Number, common.Big1), + Time: parent.Time + 2, + } + if config.IsApricotPhase3(new(big.Int).SetUint64(header.Time)) { + header.Extra, header.BaseFee, _ = dummy.CalcBaseFee(config, parent, header.Time) + } + header.GasUsed = gasUsed + return header +} diff --git a/coreth/core/state_transition.go b/coreth/core/state_transition.go index 3c86ce76..d512ffbe 100644 --- a/coreth/core/state_transition.go +++ b/coreth/core/state_transition.go @@ -29,7 +29,6 @@ package core import ( "bytes" "encoding/binary" - "errors" "fmt" "math" "math/big" @@ -46,25 +45,26 @@ import ( var emptyCodeHash = crypto.Keccak256Hash(nil) -/* -The State Transitioning Model - -A state transition is a change made when a transaction is applied to the current world state -The state transitioning model does all the necessary work to work out a valid new state root. - -1) Nonce handling -2) Pre pay gas -3) Create a new state object if the recipient is \0*32 -4) Value transfer -== If contract creation == - - 4a) Attempt to run transaction data - 4b) If valid, use result as code for the new state object - -== end == -5) Run Script section -6) Derive new state root -*/ +// The State Transitioning Model +// +// A state transition is a change made when a transaction is applied to the current world +// state. The state transitioning model does all the necessary work to work out a valid new +// state root. +// +// 1. Nonce handling +// 2. Pre pay gas +// 3. Create a new state object if the recipient is \0*32 +// 4. Value transfer +// +// == If contract creation == +// +// 4a. Attempt to run transaction data +// 4b. If valid, use result as code for the new state object +// +// == end == +// +// 5. Run Script section +// 6. Derive new state root type StateTransition struct { gp *GasPool msg Message @@ -156,7 +156,7 @@ func (st *StateTransition) AddBalance(addr common.Address, amount *big.Int) { } // IntrinsicGas computes the 'intrinsic gas' for a message with the given data. -func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation bool, isHomestead, isEIP2028 bool) (uint64, error) { +func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation bool, isHomestead, isEIP2028 bool, isEIP3860 bool) (uint64, error) { // Set the starting gas for the raw transaction var gas uint64 if isContractCreation && isHomestead { @@ -164,8 +164,9 @@ func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation b } else { gas = params.TxGas } + dataLen := uint64(len(data)) // Bump the required gas by the amount of transactional data - if len(data) > 0 { + if dataLen > 0 { // Zero and non-zero bytes are priced differently var nz uint64 for _, byt := range data { @@ -183,11 +184,19 @@ func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation b } gas += nz * nonZeroGas - z := uint64(len(data)) - nz + z := dataLen - nz if (math.MaxUint64-gas)/params.TxDataZeroGas < z { return 0, ErrGasUintOverflow } gas += z * params.TxDataZeroGas + + if isContractCreation && isEIP3860 { + lenWords := toWordSize(dataLen) + if (math.MaxUint64-gas)/params.InitCodeWordGas < lenWords { + return 0, ErrGasUintOverflow + } + gas += lenWords * params.InitCodeWordGas + } } if accessList != nil { gas += uint64(len(accessList)) * params.TxAccessListAddressGas @@ -196,6 +205,15 @@ func IntrinsicGas(data []byte, accessList types.AccessList, isContractCreation b return gas, nil } +// toWordSize returns the ceiled word size required for init code payment calculation. +func toWordSize(size uint64) uint64 { + if size > math.MaxUint64-31 { + return math.MaxUint64/32 + 1 + } + + return (size + 31) / 32 +} + // NewStateTransition initialises and returns a new state transition object. func NewStateTransition(evm *vm.EVM, msg Message, gp *GasPool) *StateTransition { return &StateTransition{ @@ -307,13 +325,10 @@ func (st *StateTransition) preCheck() error { // TransitionDb will transition the state by applying the current message and // returning the evm execution result with following fields. // -// - used gas: -// total gas used (including gas being refunded) -// - returndata: -// the returned data from evm -// - concrete execution error: -// various **EVM** error which aborts the execution, -// e.g. ErrOutOfGas, ErrExecutionReverted +// - used gas: total gas used (including gas being refunded) +// - returndata: the returned data from evm +// - concrete execution error: various EVM errors which abort the execution, e.g. +// ErrOutOfGas, ErrExecutionReverted // // However if any consensus issue encountered, return the error directly with // nil evm execution result. @@ -348,7 +363,7 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { ) // Check clauses 4-5, subtract intrinsic gas if everything is correct - gas, err := IntrinsicGas(st.data, st.msg.AccessList(), contractCreation, rules.IsHomestead, rules.IsIstanbul) + gas, err := IntrinsicGas(st.data, st.msg.AccessList(), contractCreation, rules.IsHomestead, rules.IsIstanbul, rules.IsDUpgrade) if err != nil { return nil, err } @@ -362,6 +377,11 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { return nil, fmt.Errorf("%w: address %v", ErrInsufficientFundsForTransfer, msg.From().Hex()) } + // Check whether the init code size has been exceeded. + if rules.IsDUpgrade && contractCreation && len(st.data) > params.MaxInitCodeSize { + return nil, fmt.Errorf("%w: code size %v limit %v", vmerrs.ErrMaxInitCodeSizeExceeded, len(st.data), params.MaxInitCodeSize) + } + // Set up the initial access list. if rules.IsApricotPhase2 { st.state.PrepareAccessList(msg.From(), msg.To(), vm.ActivePrecompiles(rules), msg.AccessList()) @@ -395,13 +415,6 @@ func (st *StateTransition) TransitionDb() (*ExecutionResult, error) { } } } - if errors.Is(vmerr, vmerrs.ErrToAddrProhibitedSoft) { // Only invalidate soft error here - return &ExecutionResult{ - UsedGas: st.gasUsed(), - Err: vmerr, - ReturnData: ret, - }, vmerr - } st.refundGas(rules.IsApricotPhase1) if vmerr == nil && IsPrioritisedContractCall(chainID, timestamp, msg.To(), st.data, ret, st.initialGas) { diff --git a/coreth/core/state_transition_ext_test.go b/coreth/core/state_transition_ext_test.go new file mode 100644 index 00000000..651a2298 --- /dev/null +++ b/coreth/core/state_transition_ext_test.go @@ -0,0 +1,224 @@ +package core + +import ( + "math/big" + "testing" + "time" + + "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/core/state" + "github.com/ava-labs/coreth/core/state/snapshot" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/core/vm" + "github.com/ava-labs/coreth/eth/tracers/logger" + "github.com/ava-labs/coreth/ethdb" + "github.com/ava-labs/coreth/params" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" +) + +// Test prioritized contract (Submitter) being partially refunded when fee is high +func TestStateTransitionPrioritizedContract(t *testing.T) { + configs := []*params.ChainConfig{params.CostonChainConfig, params.CostwoChainConfig, params.SongbirdChainConfig, params.FlareChainConfig} + + for _, config := range configs { + key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + from := crypto.PubkeyToAddress(key.PublicKey) + gas := uint64(3000000) + to := prioritisedSubmitterContractAddress + daemon := common.HexToAddress(GetDaemonContractAddr(new(big.Int))) + signer := types.LatestSignerForChainID(config.ChainID) + tx, err := types.SignNewTx(key, signer, + &types.LegacyTx{ + Nonce: 1, + GasPrice: big.NewInt(1250000000000000), + Gas: gas, + To: &to, + }) + if err != nil { + t.Fatal(err) + } + txContext := vm.TxContext{ + Origin: from, + GasPrice: tx.GasPrice(), + } + context := vm.BlockContext{ + CanTransfer: CanTransfer, + Transfer: Transfer, + Coinbase: common.HexToAddress("0x0100000000000000000000000000000000000000"), + BlockNumber: big.NewInt(5), + Time: big.NewInt(time.Date(2024, time.May, 1, 0, 0, 0, 0, time.UTC).Unix()), // Time after setting Submitter contract address on all chains + Difficulty: big.NewInt(0xffffffff), + GasLimit: gas, + BaseFee: big.NewInt(8), + } + alloc := GenesisAlloc{} + balance := new(big.Int) + balance.SetString("10000000000000000000000000000000000", 10) + alloc[from] = GenesisAccount{ + Nonce: 1, + Code: []byte{}, + Balance: balance, + } + alloc[to] = GenesisAccount{ + Nonce: 2, + Code: code, + Balance: balance, + } + alloc[daemon] = GenesisAccount{ + Nonce: 3, + Code: daemonCode, + Balance: balance, + } + _, statedb := makePreState(rawdb.NewMemoryDatabase(), alloc, false) + + // Create the tracer, the EVM environment and run it + tracer := logger.NewStructLogger(&logger.Config{ + Debug: false, + }) + cfg := vm.Config{Debug: true, Tracer: tracer} + evm := vm.NewEVM(context, txContext, statedb, config, cfg) + msg, err := tx.AsMessage(signer, nil) + if err != nil { + t.Fatalf("failed to prepare transaction for tracing: %v", err) + } + + st := NewStateTransition(evm, msg, new(GasPool).AddGas(tx.Gas())) + + balanceBefore := st.state.GetBalance(st.msg.From()) + _, err = st.TransitionDb() + if err != nil { + t.Fatal(err) + } + balanceAfter := st.state.GetBalance(st.msg.From()) + + // max fee (funds above which are returned) depends on the chain used + _, limit, _, _, _ := stateTransitionVariants.GetValue(config.ChainID)(st) + maxFee := new(big.Int).Mul(new(big.Int).SetUint64(params.TxGas), new(big.Int).SetUint64(limit)) + diff := new(big.Int).Sub(balanceBefore, balanceAfter) + + if maxFee.Cmp(diff) != 0 { + t.Fatalf("want %v, have %v", maxFee, diff) + } + } +} + +// Test that daemon contract is invoked after a transaction is successfully executed +func TestStateTransitionDaemon(t *testing.T) { + configs := []*params.ChainConfig{params.CostonChainConfig, params.CostwoChainConfig, params.SongbirdChainConfig, params.FlareChainConfig} + + for _, config := range configs { + key, _ := crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + from := crypto.PubkeyToAddress(key.PublicKey) + gas := uint64(3000000) + daemon := common.HexToAddress(GetDaemonContractAddr(new(big.Int))) + to := common.HexToAddress("0x7e22C4A78675ae3Be11Fb389Da9b9fb15996bb6a") + signer := types.LatestSignerForChainID(config.ChainID) + tx, err := types.SignNewTx(key, signer, + &types.LegacyTx{ + Nonce: 1, + GasPrice: big.NewInt(1250000000000000), + Gas: gas, + To: &to, + }) + if err != nil { + t.Fatal(err) + } + txContext := vm.TxContext{ + Origin: from, + GasPrice: tx.GasPrice(), + } + context := vm.BlockContext{ + CanTransfer: CanTransfer, + Transfer: Transfer, + Coinbase: common.HexToAddress("0x0100000000000000000000000000000000000000"), + BlockNumber: big.NewInt(5), + Time: big.NewInt(time.Date(2024, time.May, 1, 0, 0, 0, 0, time.UTC).Unix()), + Difficulty: big.NewInt(0xffffffff), + GasLimit: gas, + BaseFee: big.NewInt(8), + } + alloc := GenesisAlloc{} + balance := new(big.Int) + balance.SetString("10000000000000000000000000000000000", 10) + alloc[from] = GenesisAccount{ + Nonce: 1, + Code: []byte{}, + Balance: balance, + } + alloc[to] = GenesisAccount{ + Nonce: 2, + Code: code, // Reuse the code from the previous test + Balance: balance, + } + alloc[daemon] = GenesisAccount{ + Nonce: 3, + Code: daemonCode, + Balance: balance, + } + _, statedb := makePreState(rawdb.NewMemoryDatabase(), alloc, false) + + // Create the tracer, the EVM environment and run it + tracer := logger.NewStructLogger(&logger.Config{ + Debug: false, + }) + cfg := vm.Config{Debug: true, Tracer: tracer} + evm := vm.NewEVM(context, txContext, statedb, config, cfg) + msg, err := tx.AsMessage(signer, nil) + if err != nil { + t.Fatalf("failed to prepare transaction for tracing: %v", err) + } + + st := NewStateTransition(evm, msg, new(GasPool).AddGas(tx.Gas())) + + balanceBefore := st.state.GetBalance(daemon) + _, err = st.TransitionDb() + if err != nil { + t.Fatal(err) + } + balanceAfter := st.state.GetBalance(daemon) + + if balanceAfter.Cmp(balanceBefore) <= 0 { + t.Fatalf("want daemon balance increase, have %v before and %v after", balanceBefore, balanceAfter) + } + } +} + +// This is a copy of the function from tests/state_test_util.go, to create a starting state for the test EVM +// We need to copy it here due to import cycle. +func makePreState(db ethdb.Database, accounts GenesisAlloc, snapshotter bool) (*snapshot.Tree, *state.StateDB) { + sdb := state.NewDatabase(db) + statedb, _ := state.New(common.Hash{}, sdb, nil) + for addr, a := range accounts { + statedb.SetCode(addr, a.Code) + statedb.SetNonce(addr, a.Nonce) + statedb.SetBalance(addr, a.Balance) + for k, v := range a.Storage { + statedb.SetState(addr, k, v) + } + } + // Commit and re-open to start with a clean state. + root, _ := statedb.Commit(false, false) + + var snaps *snapshot.Tree + if snapshotter { + snaps, _ = snapshot.New(db, sdb.TrieDB(), 1, common.Hash{}, root, false, true, false) + } + statedb, _ = state.New(root, sdb, snaps) + return snaps, statedb +} + +// This is a simple EVM code that returns 0x01 (necessary for a prioritized contract to be refunded) +var code = []byte{ + byte(vm.PUSH1), 0x01, byte(vm.PUSH1), 0x0, byte(vm.MSTORE8), // store 1 memory at offset 0 for return + byte(vm.PUSH1), 0x01, byte(vm.PUSH1), 0x0, // set return value size to 1, offset 0 + byte(vm.RETURN), // return 0x01 +} + +// return a 32-bit value, set to 1 for daemon balance change +var daemonCode = []byte{ + byte(vm.PUSH1), 0x01, byte(vm.PUSH1), 0x0, byte(vm.MSTORE), // store 1 memory at offset 0 for return + byte(vm.PUSH1), 0x20, byte(vm.PUSH1), 0x0, // set return value size to 32 bits, offset 0 + byte(vm.RETURN), // return 0x0..01 +} diff --git a/coreth/core/state_transition_test.go b/coreth/core/state_transition_test.go new file mode 100644 index 00000000..028a8f09 --- /dev/null +++ b/coreth/core/state_transition_test.go @@ -0,0 +1,248 @@ +// (c) 2019-2021, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package core + +import ( + "encoding/hex" + "math/big" + "testing" + + "github.com/ava-labs/coreth/consensus/dummy" + "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/core/state" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/core/vm" + "github.com/ava-labs/coreth/params" + "github.com/ethereum/go-ethereum/common" + ethCrypto "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/require" +) + +/* +//SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +// FromWithinContract creates a contract with a single fallback function that invokes Native Asset Call +// so that a transaction does not need to specify any input data to hit the call. +contract FromWithinContract { + fallback() external { + address precompile = 0x0100000000000000000000000000000000000002; + precompile.call(abi.encodePacked()); + } +} + + +// FromWithinContractConstructor creates a contract that hits Native Asset Call within the contract constructor. +contract FromWithinContractConstructor { + constructor () { + address precompile = 0x0100000000000000000000000000000000000002; + precompile.call(abi.encodePacked()); + } +} +*/ + +type stateTransitionTest struct { + config *params.ChainConfig + txs []*types.Transaction + gasUsed []uint64 + want string +} + +func executeStateTransitionTest(t *testing.T, st stateTransitionTest) { + require := require.New(t) + + require.Equal(len(st.txs), len(st.gasUsed), "length of gas used must match length of txs") + + var ( + db = rawdb.NewMemoryDatabase() + gspec = &Genesis{ + Config: st.config, + Alloc: GenesisAlloc{ + common.HexToAddress("0x71562b71999873DB5b286dF957af199Ec94617F7"): GenesisAccount{ + Balance: big.NewInt(2000000000000000000), // 2 ether + Nonce: 0, + }, + }, + GasLimit: params.ApricotPhase1GasLimit, + } + genesis = gspec.ToBlock(nil) + engine = dummy.NewFaker() + blockchain, _ = NewBlockChain(db, DefaultCacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false) + ) + defer blockchain.Stop() + + statedb, err := state.New(genesis.Root(), blockchain.stateCache, blockchain.snaps) + require.NoError(err) + + block := GenerateBadBlock(genesis, engine, st.txs, blockchain.chainConfig) + receipts, _, _, err := blockchain.processor.Process(block, genesis.Header(), statedb, blockchain.vmConfig) + + if st.want == "" { + // If no error is expected, require no error and verify the correct gas used amounts from the receipts + require.NoError(err) + + for i, gasUsed := range st.gasUsed { + require.Equal(gasUsed, receipts[i].GasUsed, "expected gas used to match for index %d", i) + } + } else { + require.ErrorContains(err, st.want) + } +} + +func TestNativeAssetContractCall(t *testing.T) { + require := require.New(t) + + data, err := hex.DecodeString("608060405234801561001057600080fd5b5061016e806100206000396000f3fe608060405234801561001057600080fd5b50600073010000000000000000000000000000000000000290508073ffffffffffffffffffffffffffffffffffffffff166040516020016040516020818303038152906040526040516100639190610121565b6000604051808303816000865af19150503d80600081146100a0576040519150601f19603f3d011682016040523d82523d6000602084013e6100a5565b606091505b005b600081519050919050565b600081905092915050565b60005b838110156100db5780820151818401526020810190506100c0565b838111156100ea576000848401525b50505050565b60006100fb826100a7565b61010581856100b2565b93506101158185602086016100bd565b80840191505092915050565b600061012d82846100f0565b91508190509291505056fea2646970667358221220a297c3e133143287abef22b1c1d4e45f588efc3db99a84b364560a2079876fc364736f6c634300080d0033") + require.NoError(err) + + contractAddr := ethCrypto.CreateAddress(testAddr, 0) + txs := []*types.Transaction{ + makeContractTx(0, common.Big0, 500_000, big.NewInt(params.LaunchMinGasPrice), data), + makeTx(1, contractAddr, common.Big0, 100_000, big.NewInt(params.LaunchMinGasPrice), nil), // No input data is necessary, since this will hit the contract's fallback function. + } + + phase6Tests := map[string]stateTransitionTest{ + "phase5": { + config: params.TestApricotPhase5Config, + txs: txs, + gasUsed: []uint64{132091, 41618}, + want: "", + }, + "prePhase6": { + config: params.TestApricotPhasePre6Config, + txs: txs, + gasUsed: []uint64{132091, 21618}, + want: "", + }, + "phase6": { + config: params.TestApricotPhase6Config, + txs: txs, + gasUsed: []uint64{132091, 41618}, + want: "", + }, + "banff": { + config: params.TestBanffChainConfig, + txs: txs, + gasUsed: []uint64{132091, 21618}, + want: "", + }, + } + + for name, stTest := range phase6Tests { + t.Run(name, func(t *testing.T) { + executeStateTransitionTest(t, stTest) + }) + } +} + +func TestNativeAssetContractConstructor(t *testing.T) { + require := require.New(t) + + data, err := hex.DecodeString("608060405234801561001057600080fd5b50600073010000000000000000000000000000000000000290508073ffffffffffffffffffffffffffffffffffffffff166040516020016040516020818303038152906040526040516100639190610128565b6000604051808303816000865af19150503d80600081146100a0576040519150601f19603f3d011682016040523d82523d6000602084013e6100a5565b606091505b5050505061013f565b600081519050919050565b600081905092915050565b60005b838110156100e25780820151818401526020810190506100c7565b838111156100f1576000848401525b50505050565b6000610102826100ae565b61010c81856100b9565b935061011c8185602086016100c4565b80840191505092915050565b600061013482846100f7565b915081905092915050565b603f8061014d6000396000f3fe6080604052600080fdfea26469706673582212208a8a2e0bb031a4d5bdfa861a6e43ae57e6f4e0cc40d069ad6f52585406790ac864736f6c634300080d0033") + require.NoError(err) + + txs := []*types.Transaction{ + makeContractTx(0, common.Big0, 100_000, big.NewInt(params.LaunchMinGasPrice), data), + } + + phase6Tests := map[string]stateTransitionTest{ + "phase5": { + config: params.TestApricotPhase5Config, + txs: txs, + gasUsed: []uint64{92046}, + want: "", + }, + "prePhase6": { + config: params.TestApricotPhasePre6Config, + txs: txs, + gasUsed: []uint64{72046}, + want: "", + }, + "phase6": { + config: params.TestApricotPhase6Config, + txs: txs, + gasUsed: []uint64{92046}, + want: "", + }, + "banff": { + config: params.TestBanffChainConfig, + txs: txs, + gasUsed: []uint64{72046}, + want: "", + }, + } + + for name, stTest := range phase6Tests { + t.Run(name, func(t *testing.T) { + executeStateTransitionTest(t, stTest) + }) + } +} + +func TestNativeAssetDirectEOACall(t *testing.T) { + txs := []*types.Transaction{ + makeTx(0, vm.NativeAssetCallAddr, common.Big0, 100_000, big.NewInt(params.LaunchMinGasPrice), nil), + } + + phase6Tests := map[string]stateTransitionTest{ + "phase5": { + config: params.TestApricotPhase5Config, + txs: txs, + gasUsed: []uint64{41000}, + want: "", + }, + // Note: PrePhase6 used a soft error to ensure the Native Asset Call precompile was not used from an EOA, however, + // after PrePhase6 was over, this soft error was no longer needed since it would never be included in the chain, so + // it has been removed. + // Therefore, there is no need for an error to be returned in this test case even though a soft error would have been + // returned during PrePhase6. + "prePhase6": { + config: params.TestApricotPhasePre6Config, + txs: txs, + gasUsed: []uint64{21000}, + want: "", + }, + "phase6": { + config: params.TestApricotPhase6Config, + txs: txs, + gasUsed: []uint64{41000}, + want: "", + }, + "banff": { + config: params.TestBanffChainConfig, + txs: txs, + gasUsed: []uint64{21000}, + want: "", + }, + } + + for name, stTest := range phase6Tests { + t.Run(name, func(t *testing.T) { + executeStateTransitionTest(t, stTest) + }) + } +} diff --git a/coreth/core/test_blockchain.go b/coreth/core/test_blockchain.go index ae26b9a6..10a7887a 100644 --- a/coreth/core/test_blockchain.go +++ b/coreth/core/test_blockchain.go @@ -19,11 +19,22 @@ import ( "github.com/ethereum/go-ethereum/crypto" ) +var TestCallbacks = dummy.ConsensusCallbacks{ + OnExtraStateChange: func(block *types.Block, sdb *state.StateDB) (*big.Int, *big.Int, error) { + sdb.SetBalanceMultiCoin(common.HexToAddress("0xdeadbeef"), common.HexToHash("0xdeadbeef"), big.NewInt(block.Number().Int64())) + return nil, nil, nil + }, + OnFinalizeAndAssemble: func(header *types.Header, sdb *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { + sdb.SetBalanceMultiCoin(common.HexToAddress("0xdeadbeef"), common.HexToHash("0xdeadbeef"), big.NewInt(header.Number.Int64())) + return nil, nil, nil, nil + }, +} + type ChainTest struct { Name string testFunc func( t *testing.T, - create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error), + create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error), ) } @@ -95,13 +106,12 @@ func copyMemDB(db ethdb.Database) (ethdb.Database, error) { func checkBlockChainState( t *testing.T, bc *BlockChain, - genesis *Genesis, + gspec *Genesis, originalDB ethdb.Database, - create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error), + create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error), checkState func(sdb *state.StateDB) error, ) (*BlockChain, *BlockChain, *BlockChain) { var ( - chainConfig = bc.Config() lastAcceptedBlock = bc.LastConsensusAcceptedBlock() newDB = rawdb.NewMemoryDatabase() ) @@ -114,9 +124,7 @@ func checkBlockChainState( t.Fatalf("Check state failed for original blockchain due to: %s", err) } - _ = genesis.MustCommit(newDB) - - newBlockChain, err := create(newDB, chainConfig, common.Hash{}) + newBlockChain, err := create(newDB, gspec, common.Hash{}) if err != nil { t.Fatalf("Failed to create new blockchain instance: %s", err) } @@ -154,7 +162,7 @@ func checkBlockChainState( if err != nil { t.Fatal(err) } - restartedChain, err := create(originalDB, chainConfig, lastAcceptedBlock.Hash()) + restartedChain, err := create(originalDB, gspec, lastAcceptedBlock.Hash()) if err != nil { t.Fatal(err) } @@ -178,15 +186,12 @@ func checkBlockChainState( return bc, newBlockChain, restartedChain } -func TestInsertChainAcceptSingleBlock(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestInsertChainAcceptSingleBlock(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) - // We use two separate databases since GenerateChain commits the state roots to its underlying - // database. - genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -196,10 +201,8 @@ func TestInsertChainAcceptSingleBlock(t *testing.T, create func(db ethdb.Databas Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } @@ -207,9 +210,7 @@ func TestInsertChainAcceptSingleBlock(t *testing.T, create func(db ethdb.Databas // This call generates a chain of 3 blocks. signer := types.HomesteadSigner{} - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. - chain, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 3, 10, func(i int, gen *BlockGen) { + _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 10, func(i int, gen *BlockGen) { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) }) @@ -255,15 +256,12 @@ func TestInsertChainAcceptSingleBlock(t *testing.T, create func(db ethdb.Databas checkBlockChainState(t, blockchain, gspec, chainDB, create, checkState) } -func TestInsertLongForkedChain(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestInsertLongForkedChain(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) - // We use two separate databases since GenerateChain commits the state roots to its underlying - // database. - genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -273,10 +271,8 @@ func TestInsertLongForkedChain(t *testing.T, create func(db ethdb.Database, chai Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } @@ -284,9 +280,7 @@ func TestInsertLongForkedChain(t *testing.T, create func(db ethdb.Database, chai numBlocks := 129 signer := types.HomesteadSigner{} - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. - chain1, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, numBlocks, 10, func(i int, gen *BlockGen) { + _, chain1, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, numBlocks, 10, func(i int, gen *BlockGen) { // Generate a transaction to create a unique block tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) @@ -296,7 +290,7 @@ func TestInsertLongForkedChain(t *testing.T, create func(db ethdb.Database, chai } // Generate the forked chain to be longer than the original chain to check for a regression where // a longer chain can trigger a reorg. - chain2, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, numBlocks+1, 10, func(i int, gen *BlockGen) { + _, chain2, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, numBlocks+1, 10, func(i int, gen *BlockGen) { // Generate a transaction with a different amount to ensure [chain2] is different than [chain1]. tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(5000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) @@ -425,7 +419,7 @@ func TestInsertLongForkedChain(t *testing.T, create func(db ethdb.Database, chai checkBlockChainState(t, blockchain, gspec, chainDB, create, checkState) } -func TestAcceptNonCanonicalBlock(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestAcceptNonCanonicalBlock(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") @@ -433,7 +427,6 @@ func TestAcceptNonCanonicalBlock(t *testing.T, create func(db ethdb.Database, ch addr2 = crypto.PubkeyToAddress(key2.PublicKey) // We use two separate databases since GenerateChain commits the state roots to its underlying // database. - genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -443,10 +436,8 @@ func TestAcceptNonCanonicalBlock(t *testing.T, create func(db ethdb.Database, ch Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } @@ -454,9 +445,7 @@ func TestAcceptNonCanonicalBlock(t *testing.T, create func(db ethdb.Database, ch numBlocks := 3 signer := types.HomesteadSigner{} - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. - chain1, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, numBlocks, 10, func(i int, gen *BlockGen) { + _, chain1, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, numBlocks, 10, func(i int, gen *BlockGen) { // Generate a transaction to create a unique block tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) @@ -464,7 +453,7 @@ func TestAcceptNonCanonicalBlock(t *testing.T, create func(db ethdb.Database, ch if err != nil { t.Fatal(err) } - chain2, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, numBlocks, 10, func(i int, gen *BlockGen) { + _, chain2, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, numBlocks, 10, func(i int, gen *BlockGen) { // Generate a transaction with a different amount to create a chain of blocks different from [chain1] tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(5000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) @@ -539,15 +528,12 @@ func TestAcceptNonCanonicalBlock(t *testing.T, create func(db ethdb.Database, ch checkBlockChainState(t, blockchain, gspec, chainDB, create, checkState) } -func TestSetPreferenceRewind(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestSetPreferenceRewind(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) - // We use two separate databases since GenerateChain commits the state roots to its underlying - // database. - genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -557,10 +543,8 @@ func TestSetPreferenceRewind(t *testing.T, create func(db ethdb.Database, chainC Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } @@ -568,9 +552,7 @@ func TestSetPreferenceRewind(t *testing.T, create func(db ethdb.Database, chainC numBlocks := 3 signer := types.HomesteadSigner{} - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. - chain, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, numBlocks, 10, func(i int, gen *BlockGen) { + _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, numBlocks, 10, func(i int, gen *BlockGen) { // Generate a transaction to create a unique block tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(10000), params.TxGas, nil, nil), signer, key1) gen.AddTx(tx) @@ -676,7 +658,7 @@ func TestSetPreferenceRewind(t *testing.T, create func(db ethdb.Database, chainC checkBlockChainState(t, blockchain, gspec, chainDB, create, checkUpdatedState) } -func TestBuildOnVariousStages(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestBuildOnVariousStages(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") @@ -684,9 +666,6 @@ func TestBuildOnVariousStages(t *testing.T, create func(db ethdb.Database, chain addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) addr3 = crypto.PubkeyToAddress(key3.PublicKey) - // We use two separate databases since GenerateChain commits the state roots to its underlying - // database. - genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -699,10 +678,8 @@ func TestBuildOnVariousStages(t *testing.T, create func(db ethdb.Database, chain addr3: {Balance: genesisBalance}, }, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } @@ -710,9 +687,7 @@ func TestBuildOnVariousStages(t *testing.T, create func(db ethdb.Database, chain // This call generates a chain of 3 blocks. signer := types.HomesteadSigner{} - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. - chain1, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 20, 10, func(i int, gen *BlockGen) { + genDB, chain1, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 20, 10, func(i int, gen *BlockGen) { // Send all funds back and forth between the two accounts if i%2 == 0 { tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, genesisBalance, params.TxGas, nil, nil), signer, key1) @@ -849,31 +824,22 @@ func TestBuildOnVariousStages(t *testing.T, create func(db ethdb.Database, chain checkBlockChainState(t, blockchain, gspec, chainDB, create, checkState) } -func TestEmptyBlocks(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { - var ( - // We use two separate databases since GenerateChain commits the state roots to its underlying - // database. - genDB = rawdb.NewMemoryDatabase() - chainDB = rawdb.NewMemoryDatabase() - ) +func TestEmptyBlocks(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { + chainDB := rawdb.NewMemoryDatabase() // Ensure that key1 has some funds in the genesis block. gspec := &Genesis{ Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } defer blockchain.Stop() - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. - chain, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 3, 10, func(i int, gen *BlockGen) {}) + _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 10, func(i int, gen *BlockGen) {}) if err != nil { t.Fatal(err) } @@ -897,7 +863,7 @@ func TestEmptyBlocks(t *testing.T, create func(db ethdb.Database, chainConfig *p checkBlockChainState(t, blockchain, gspec, chainDB, create, checkState) } -func TestReorgReInsert(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestReorgReInsert(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") @@ -915,10 +881,9 @@ func TestReorgReInsert(t *testing.T, create func(db ethdb.Database, chainConfig Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) + genesis := gspec.ToBlock(nil) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } @@ -997,22 +962,22 @@ func TestReorgReInsert(t *testing.T, create func(db ethdb.Database, chainConfig // Insert two different chains that result in the identical state root. // Once we accept one of the chains, we insert and accept A3 on top of the shared // state root -// G (genesis) -// / \ -// A1 B1 -// | | -// A2 B2 (A2 and B2 represent two different paths to the identical state trie) -// | -// A3 -func TestAcceptBlockIdenticalStateRoot(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { +// +// G (genesis) +// / \ +// A1 B1 +// | | +// A2 B2 (A2 and B2 represent two different paths to the identical state trie) +// | +// A3 +// +//nolint:goimports +func TestAcceptBlockIdenticalStateRoot(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) - // We use two separate databases since GenerateChain commits the state roots to its underlying - // database. - genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -1022,19 +987,15 @@ func TestAcceptBlockIdenticalStateRoot(t *testing.T, create func(db ethdb.Databa Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } defer blockchain.Stop() signer := types.HomesteadSigner{} - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. - chain1, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 3, 10, func(i int, gen *BlockGen) { + _, chain1, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 10, func(i int, gen *BlockGen) { if i < 2 { // Send half the funds from addr1 to addr2 in one transaction per each of the two blocks in [chain1] tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(500000000), params.TxGas, nil, nil), signer, key1) @@ -1045,7 +1006,7 @@ func TestAcceptBlockIdenticalStateRoot(t *testing.T, create func(db ethdb.Databa if err != nil { t.Fatal(err) } - chain2, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 2, 10, func(i int, gen *BlockGen) { + _, chain2, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 2, 10, func(i int, gen *BlockGen) { // Send 1/4 of the funds from addr1 to addr2 in tx1 and 3/4 of the funds in tx2. This will produce the identical state // root in the second block of [chain2] as is present in the second block of [chain1]. if i == 0 { @@ -1144,22 +1105,22 @@ func TestAcceptBlockIdenticalStateRoot(t *testing.T, create func(db ethdb.Databa // Once we insert both of the chains, we restart, insert both the chains again, // and then we accept one of the chains and accept A3 on top of the shared state // root -// G (genesis) -// / \ -// A1 B1 -// | | -// A2 B2 (A2 and B2 represent two different paths to the identical state trie) -// | -// A3 -func TestReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { +// +// G (genesis) +// / \ +// A1 B1 +// | | +// A2 B2 (A2 and B2 represent two different paths to the identical state trie) +// | +// A3 +// +//nolint:goimports +func TestReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) - // We use two separate databases since GenerateChain commits the state roots to its underlying - // database. - genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -1169,18 +1130,14 @@ func TestReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create func(db eth Config: ¶ms.ChainConfig{HomesteadBlock: new(big.Int)}, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } signer := types.HomesteadSigner{} - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. - chain1, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 3, 10, func(i int, gen *BlockGen) { + _, chain1, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 10, func(i int, gen *BlockGen) { if i < 2 { // Send half the funds from addr1 to addr2 in one transaction per each of the two blocks in [chain1] tx, _ := types.SignTx(types.NewTransaction(gen.TxNonce(addr1), addr2, big.NewInt(500000000), params.TxGas, nil, nil), signer, key1) @@ -1191,7 +1148,7 @@ func TestReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create func(db eth if err != nil { t.Fatal(err) } - chain2, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 2, 10, func(i int, gen *BlockGen) { + _, chain2, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 2, 10, func(i int, gen *BlockGen) { // Send 1/4 of the funds from addr1 to addr2 in tx1 and 3/4 of the funds in tx2. This will produce the identical state // root in the second block of [chain2] as is present in the second block of [chain1]. if i == 0 { @@ -1229,7 +1186,8 @@ func TestReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create func(db eth blockchain.Stop() - blockchain, err = create(chainDB, gspec.Config, common.Hash{}) + chainDB = rawdb.NewMemoryDatabase() + blockchain, err = create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } @@ -1310,15 +1268,12 @@ func TestReprocessAcceptBlockIdenticalStateRoot(t *testing.T, create func(db eth checkBlockChainState(t, blockchain, gspec, chainDB, create, checkState) } -func TestGenerateChainInvalidBlockFee(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestGenerateChainInvalidBlockFee(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) - // We use two separate databases since GenerateChain commits the state roots to its underlying - // database. - genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -1328,10 +1283,8 @@ func TestGenerateChainInvalidBlockFee(t *testing.T, create func(db ethdb.Databas Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } @@ -1339,9 +1292,7 @@ func TestGenerateChainInvalidBlockFee(t *testing.T, create func(db ethdb.Databas // This call generates a chain of 3 blocks. signer := types.LatestSigner(params.TestChainConfig) - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. - _, _, err = GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 3, 0, func(i int, gen *BlockGen) { + _, _, _, err = GenerateChainWithGenesis(gspec, blockchain.engine, 3, 0, func(i int, gen *BlockGen) { tx := types.NewTx(&types.DynamicFeeTx{ ChainID: params.TestChainConfig.ChainID, Nonce: gen.TxNonce(addr1), @@ -1366,15 +1317,12 @@ func TestGenerateChainInvalidBlockFee(t *testing.T, create func(db ethdb.Databas } } -func TestInsertChainInvalidBlockFee(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestInsertChainInvalidBlockFee(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") addr1 = crypto.PubkeyToAddress(key1.PublicKey) addr2 = crypto.PubkeyToAddress(key2.PublicKey) - // We use two separate databases since GenerateChain commits the state roots to its underlying - // database. - genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -1384,10 +1332,8 @@ func TestInsertChainInvalidBlockFee(t *testing.T, create func(db ethdb.Database, Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } @@ -1395,19 +1341,8 @@ func TestInsertChainInvalidBlockFee(t *testing.T, create func(db ethdb.Database, // This call generates a chain of 3 blocks. signer := types.LatestSigner(params.TestChainConfig) - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. - eng := dummy.NewComplexETHFaker(&dummy.ConsensusCallbacks{ - OnExtraStateChange: func(block *types.Block, sdb *state.StateDB) (*big.Int, *big.Int, error) { - sdb.SetBalanceMultiCoin(common.HexToAddress("0xdeadbeef"), common.HexToHash("0xdeadbeef"), big.NewInt(block.Number().Int64())) - return nil, nil, nil - }, - OnFinalizeAndAssemble: func(header *types.Header, sdb *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { - sdb.SetBalanceMultiCoin(common.HexToAddress("0xdeadbeef"), common.HexToHash("0xdeadbeef"), big.NewInt(header.Number.Int64())) - return nil, nil, nil, nil - }, - }) - chain, _, err := GenerateChain(params.TestChainConfig, genesis, eng, genDB, 3, 0, func(i int, gen *BlockGen) { + eng := dummy.NewComplexETHFaker(&TestCallbacks) + _, chain, _, err := GenerateChainWithGenesis(gspec, eng, 3, 0, func(i int, gen *BlockGen) { tx := types.NewTx(&types.DynamicFeeTx{ ChainID: params.TestChainConfig.ChainID, Nonce: gen.TxNonce(addr1), @@ -1436,7 +1371,7 @@ func TestInsertChainInvalidBlockFee(t *testing.T, create func(db ethdb.Database, } } -func TestInsertChainValidBlockFee(t *testing.T, create func(db ethdb.Database, chainConfig *params.ChainConfig, lastAcceptedHash common.Hash) (*BlockChain, error)) { +func TestInsertChainValidBlockFee(t *testing.T, create func(db ethdb.Database, gspec *Genesis, lastAcceptedHash common.Hash) (*BlockChain, error)) { var ( key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") key2, _ = crypto.HexToECDSA("8a1f9a8f95be41cd7ccb6168179afb4504aefe388d1e14474d32c45c72ce7b7a") @@ -1444,7 +1379,6 @@ func TestInsertChainValidBlockFee(t *testing.T, create func(db ethdb.Database, c addr2 = crypto.PubkeyToAddress(key2.PublicKey) // We use two separate databases since GenerateChain commits the state roots to its underlying // database. - genDB = rawdb.NewMemoryDatabase() chainDB = rawdb.NewMemoryDatabase() ) @@ -1454,10 +1388,8 @@ func TestInsertChainValidBlockFee(t *testing.T, create func(db ethdb.Database, c Config: params.TestChainConfig, Alloc: GenesisAlloc{addr1: {Balance: genesisBalance}}, } - genesis := gspec.MustCommit(genDB) - _ = gspec.MustCommit(chainDB) - blockchain, err := create(chainDB, gspec.Config, common.Hash{}) + blockchain, err := create(chainDB, gspec, common.Hash{}) if err != nil { t.Fatal(err) } @@ -1465,11 +1397,9 @@ func TestInsertChainValidBlockFee(t *testing.T, create func(db ethdb.Database, c // This call generates a chain of 3 blocks. signer := types.LatestSigner(params.TestChainConfig) - // Generate chain of blocks using [genDB] instead of [chainDB] to avoid writing - // to the BlockChain's database while generating blocks. tip := big.NewInt(50000 * params.GWei) transfer := big.NewInt(10000) - chain, _, err := GenerateChain(gspec.Config, genesis, blockchain.engine, genDB, 3, 0, func(i int, gen *BlockGen) { + _, chain, _, err := GenerateChainWithGenesis(gspec, blockchain.engine, 3, 0, func(i int, gen *BlockGen) { feeCap := new(big.Int).Add(gen.BaseFee(), tip) tx := types.NewTx(&types.DynamicFeeTx{ ChainID: params.TestChainConfig.ChainID, diff --git a/coreth/core/tx_noncer.go b/coreth/core/tx_noncer.go index 0dcd31c4..948453cf 100644 --- a/coreth/core/tx_noncer.go +++ b/coreth/core/tx_noncer.go @@ -74,7 +74,7 @@ func (txn *txNoncer) set(addr common.Address, nonce uint64) { } // setIfLower updates a new virtual nonce into the virtual state database if the -// the new one is lower. +// new one is lower. func (txn *txNoncer) setIfLower(addr common.Address, nonce uint64) { txn.lock.Lock() defer txn.lock.Unlock() diff --git a/coreth/core/tx_pool.go b/coreth/core/tx_pool.go index 513b61cb..9c58b545 100644 --- a/coreth/core/tx_pool.go +++ b/coreth/core/tx_pool.go @@ -41,6 +41,7 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/metrics" "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/prque" "github.com/ethereum/go-ethereum/event" @@ -63,7 +64,7 @@ const ( // to validate whether they fit into the pool or not. // // Note: the max contract size is 24KB - txMaxSize = 32 * 1024 // 32 KB + txMaxSize = 4 * txSlotSize // 128KB ) var ( @@ -150,7 +151,6 @@ const ( TxStatusUnknown TxStatus = iota TxStatusQueued TxStatusPending - TxStatusIncluded ) // blockChain provides the state of blockchain and current gas limit to do @@ -261,6 +261,7 @@ type TxPool struct { istanbul bool // Fork indicator whether we are in the istanbul stage. eip2718 bool // Fork indicator whether we are using EIP-2718 type transactions. eip1559 bool // Fork indicator whether we are using EIP-1559 type transactions. + eip3860 bool // Fork indicator whether EIP-3860 is activated. (activated in Shanghai Upgrade in Ethereum) currentHead *types.Header // [currentState] is the state of the blockchain head. It is reset whenever @@ -676,6 +677,10 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { if txSize := uint64(tx.Size()); txSize > txMaxSize { return fmt.Errorf("%w tx size %d > max size %d", ErrOversizedData, txSize, txMaxSize) } + // Check whether the init code size has been exceeded. + if pool.eip3860 && tx.To() == nil && len(tx.Data()) > params.MaxInitCodeSize { + return fmt.Errorf("%w: code size %v limit %v", vmerrs.ErrMaxInitCodeSizeExceeded, len(tx.Data()), params.MaxInitCodeSize) + } // Transactions can't be negative. This may never happen using RLP decoded // transactions but may occur if you create a transaction using the RPC. if tx.Value().Sign() < 0 { @@ -723,7 +728,7 @@ func (pool *TxPool) validateTx(tx *types.Transaction, local bool) error { } // Ensure the transaction has more gas than the basic tx fee. - intrGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul) + intrGas, err := IntrinsicGas(tx.Data(), tx.AccessList(), tx.To() == nil, true, pool.istanbul, pool.eip3860) if err != nil { return err } @@ -1417,6 +1422,7 @@ func (pool *TxPool) reset(oldHead, newHead *types.Header) { timestamp := new(big.Int).SetUint64(newHead.Time) pool.eip2718 = pool.chainconfig.IsApricotPhase2(timestamp) pool.eip1559 = pool.chainconfig.IsApricotPhase3(timestamp) + pool.eip3860 = pool.chainconfig.IsDUpgrade(timestamp) } // promoteExecutables moves transactions that have become processable from the diff --git a/coreth/core/types/block.go b/coreth/core/types/block.go index 38cfd322..caed1144 100644 --- a/coreth/core/types/block.go +++ b/coreth/core/types/block.go @@ -110,9 +110,6 @@ type Header struct { // BlockGasCost was added by Apricot Phase 4 and is ignored in legacy // headers. BlockGasCost *big.Int `json:"blockGasCost" rlp:"optional"` - - // ExtraStateRoot root was added by Cortina and is ignored in legacy headers. - ExtraStateRoot common.Hash `json:"extraStateRoot" rlp:"optional"` } // field type overrides for gencodec diff --git a/coreth/core/types/gen_header_json.go b/coreth/core/types/gen_header_json.go index 7036c7b5..26f934c5 100644 --- a/coreth/core/types/gen_header_json.go +++ b/coreth/core/types/gen_header_json.go @@ -35,7 +35,6 @@ func (h Header) MarshalJSON() ([]byte, error) { BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"` ExtDataGasUsed *hexutil.Big `json:"extDataGasUsed" rlp:"optional"` BlockGasCost *hexutil.Big `json:"blockGasCost" rlp:"optional"` - ExtraStateRoot common.Hash `json:"extraStateRoot" rlp:"optional"` Hash common.Hash `json:"hash"` } var enc Header @@ -58,7 +57,6 @@ func (h Header) MarshalJSON() ([]byte, error) { enc.BaseFee = (*hexutil.Big)(h.BaseFee) enc.ExtDataGasUsed = (*hexutil.Big)(h.ExtDataGasUsed) enc.BlockGasCost = (*hexutil.Big)(h.BlockGasCost) - enc.ExtraStateRoot = h.ExtraStateRoot enc.Hash = h.Hash() return json.Marshal(&enc) } @@ -85,7 +83,6 @@ func (h *Header) UnmarshalJSON(input []byte) error { BaseFee *hexutil.Big `json:"baseFeePerGas" rlp:"optional"` ExtDataGasUsed *hexutil.Big `json:"extDataGasUsed" rlp:"optional"` BlockGasCost *hexutil.Big `json:"blockGasCost" rlp:"optional"` - ExtraStateRoot *common.Hash `json:"extraStateRoot" rlp:"optional"` } var dec Header if err := json.Unmarshal(input, &dec); err != nil { @@ -162,8 +159,5 @@ func (h *Header) UnmarshalJSON(input []byte) error { if dec.BlockGasCost != nil { h.BlockGasCost = (*big.Int)(dec.BlockGasCost) } - if dec.ExtraStateRoot != nil { - h.ExtraStateRoot = *dec.ExtraStateRoot - } return nil } diff --git a/coreth/core/types/gen_header_rlp.go b/coreth/core/types/gen_header_rlp.go index 84e20cb7..1dea7fce 100644 --- a/coreth/core/types/gen_header_rlp.go +++ b/coreth/core/types/gen_header_rlp.go @@ -5,7 +5,6 @@ package types -import "github.com/ethereum/go-ethereum/common" import "github.com/ethereum/go-ethereum/rlp" import "io" @@ -45,8 +44,7 @@ func (obj *Header) EncodeRLP(_w io.Writer) error { _tmp1 := obj.BaseFee != nil _tmp2 := obj.ExtDataGasUsed != nil _tmp3 := obj.BlockGasCost != nil - _tmp4 := obj.ExtraStateRoot != (common.Hash{}) - if _tmp1 || _tmp2 || _tmp3 || _tmp4 { + if _tmp1 || _tmp2 || _tmp3 { if obj.BaseFee == nil { w.Write(rlp.EmptyString) } else { @@ -56,7 +54,7 @@ func (obj *Header) EncodeRLP(_w io.Writer) error { w.WriteBigInt(obj.BaseFee) } } - if _tmp2 || _tmp3 || _tmp4 { + if _tmp2 || _tmp3 { if obj.ExtDataGasUsed == nil { w.Write(rlp.EmptyString) } else { @@ -66,7 +64,7 @@ func (obj *Header) EncodeRLP(_w io.Writer) error { w.WriteBigInt(obj.ExtDataGasUsed) } } - if _tmp3 || _tmp4 { + if _tmp3 { if obj.BlockGasCost == nil { w.Write(rlp.EmptyString) } else { @@ -76,9 +74,6 @@ func (obj *Header) EncodeRLP(_w io.Writer) error { w.WriteBigInt(obj.BlockGasCost) } } - if _tmp4 { - w.WriteBytes(obj.ExtraStateRoot[:]) - } w.ListEnd(_tmp0) return w.Flush() } diff --git a/coreth/core/types/log.go b/coreth/core/types/log.go index 8c429e9c..131ef859 100644 --- a/coreth/core/types/log.go +++ b/coreth/core/types/log.go @@ -148,3 +148,12 @@ func (l *LogForStorage) DecodeRLP(s *rlp.Stream) error { } return err } + +// FlattenLogs converts a nested array of logs to a single array of logs. +func FlattenLogs(list [][]*Log) []*Log { + var flat []*Log + for _, logs := range list { + flat = append(flat, logs...) + } + return flat +} diff --git a/coreth/core/types/transaction_signing_test.go b/coreth/core/types/transaction_signing_test.go index 081b61b1..674173dc 100644 --- a/coreth/core/types/transaction_signing_test.go +++ b/coreth/core/types/transaction_signing_test.go @@ -121,6 +121,9 @@ func TestEIP155SigningVitalik(t *testing.T) { if from != addr { t.Errorf("%d: expected %x got %x", i, addr, from) } + if !tx.Protected() { + t.Errorf("%d: expected to be protected", i) + } } } diff --git a/coreth/core/vm/contracts.go b/coreth/core/vm/contracts.go index 4b959384..578bf0c5 100644 --- a/coreth/core/vm/contracts.go +++ b/coreth/core/vm/contracts.go @@ -43,6 +43,7 @@ import ( "github.com/ethereum/go-ethereum/crypto/blake2b" "github.com/ethereum/go-ethereum/crypto/bls12381" "github.com/ethereum/go-ethereum/crypto/bn256" + big2 "github.com/holiman/big" "golang.org/x/crypto/ripemd160" ) @@ -107,6 +108,40 @@ var PrecompiledContractsApricotPhase2 = map[common.Address]precompile.StatefulPr NativeAssetCallAddr: &nativeAssetCall{gasCost: params.AssetCallApricot}, } +// PrecompiledContractsApricotPhasePre6 contains the default set of pre-compiled Ethereum +// contracts used in the PrecompiledContractsApricotPhasePre6 release. +var PrecompiledContractsApricotPhasePre6 = map[common.Address]precompile.StatefulPrecompiledContract{ + common.BytesToAddress([]byte{1}): newWrappedPrecompiledContract(&ecrecover{}), + common.BytesToAddress([]byte{2}): newWrappedPrecompiledContract(&sha256hash{}), + common.BytesToAddress([]byte{3}): newWrappedPrecompiledContract(&ripemd160hash{}), + common.BytesToAddress([]byte{4}): newWrappedPrecompiledContract(&dataCopy{}), + common.BytesToAddress([]byte{5}): newWrappedPrecompiledContract(&bigModExp{eip2565: true}), + common.BytesToAddress([]byte{6}): newWrappedPrecompiledContract(&bn256AddIstanbul{}), + common.BytesToAddress([]byte{7}): newWrappedPrecompiledContract(&bn256ScalarMulIstanbul{}), + common.BytesToAddress([]byte{8}): newWrappedPrecompiledContract(&bn256PairingIstanbul{}), + common.BytesToAddress([]byte{9}): newWrappedPrecompiledContract(&blake2F{}), + genesisContractAddr: &deprecatedContract{}, + NativeAssetBalanceAddr: &deprecatedContract{}, + NativeAssetCallAddr: &deprecatedContract{}, +} + +// PrecompiledContractsApricotPhase6 contains the default set of pre-compiled Ethereum +// contracts used in the Apricot Phase 6 release. +var PrecompiledContractsApricotPhase6 = map[common.Address]precompile.StatefulPrecompiledContract{ + common.BytesToAddress([]byte{1}): newWrappedPrecompiledContract(&ecrecover{}), + common.BytesToAddress([]byte{2}): newWrappedPrecompiledContract(&sha256hash{}), + common.BytesToAddress([]byte{3}): newWrappedPrecompiledContract(&ripemd160hash{}), + common.BytesToAddress([]byte{4}): newWrappedPrecompiledContract(&dataCopy{}), + common.BytesToAddress([]byte{5}): newWrappedPrecompiledContract(&bigModExp{eip2565: true}), + common.BytesToAddress([]byte{6}): newWrappedPrecompiledContract(&bn256AddIstanbul{}), + common.BytesToAddress([]byte{7}): newWrappedPrecompiledContract(&bn256ScalarMulIstanbul{}), + common.BytesToAddress([]byte{8}): newWrappedPrecompiledContract(&bn256PairingIstanbul{}), + common.BytesToAddress([]byte{9}): newWrappedPrecompiledContract(&blake2F{}), + genesisContractAddr: &deprecatedContract{}, + NativeAssetBalanceAddr: &nativeAssetBalance{gasCost: params.AssetBalanceApricot}, + NativeAssetCallAddr: &nativeAssetCall{gasCost: params.AssetCallApricot}, +} + // PrecompiledContractsBanff contains the default set of pre-compiled Ethereum // contracts used in the Banff release. var PrecompiledContractsBanff = map[common.Address]precompile.StatefulPrecompiledContract{ @@ -125,12 +160,14 @@ var PrecompiledContractsBanff = map[common.Address]precompile.StatefulPrecompile } var ( - PrecompiledAddressesBanff []common.Address - PrecompiledAddressesApricotPhase2 []common.Address - PrecompiledAddressesIstanbul []common.Address - PrecompiledAddressesByzantium []common.Address - PrecompiledAddressesHomestead []common.Address - PrecompileAllNativeAddresses map[common.Address]struct{} + PrecompiledAddressesBanff []common.Address + PrecompiledAddressesApricotPhase6 []common.Address + PrecompiledAddressesApricotPhasePre6 []common.Address + PrecompiledAddressesApricotPhase2 []common.Address + PrecompiledAddressesIstanbul []common.Address + PrecompiledAddressesByzantium []common.Address + PrecompiledAddressesHomestead []common.Address + PrecompileAllNativeAddresses map[common.Address]struct{} ) func init() { @@ -146,6 +183,12 @@ func init() { for k := range PrecompiledContractsApricotPhase2 { PrecompiledAddressesApricotPhase2 = append(PrecompiledAddressesApricotPhase2, k) } + for k := range PrecompiledContractsApricotPhasePre6 { + PrecompiledAddressesApricotPhasePre6 = append(PrecompiledAddressesApricotPhasePre6, k) + } + for k := range PrecompiledContractsApricotPhase6 { + PrecompiledAddressesApricotPhase6 = append(PrecompiledAddressesApricotPhase6, k) + } for k := range PrecompiledContractsBanff { PrecompiledAddressesBanff = append(PrecompiledAddressesBanff, k) } @@ -156,6 +199,8 @@ func init() { addrsList := append(PrecompiledAddressesHomestead, PrecompiledAddressesByzantium...) addrsList = append(addrsList, PrecompiledAddressesIstanbul...) addrsList = append(addrsList, PrecompiledAddressesApricotPhase2...) + addrsList = append(addrsList, PrecompiledAddressesApricotPhasePre6...) + addrsList = append(addrsList, PrecompiledAddressesApricotPhase6...) addrsList = append(addrsList, PrecompiledAddressesBanff...) for _, k := range addrsList { PrecompileAllNativeAddresses[k] = struct{}{} @@ -324,10 +369,10 @@ var ( // modexpMultComplexity implements bigModexp multComplexity formula, as defined in EIP-198 // -// def mult_complexity(x): -// if x <= 64: return x ** 2 -// elif x <= 1024: return x ** 2 // 4 + 96 * x - 3072 -// else: return x ** 2 // 16 + 480 * x - 199680 +// def mult_complexity(x): +// if x <= 64: return x ** 2 +// elif x <= 1024: return x ** 2 // 4 + 96 * x - 3072 +// else: return x ** 2 // 16 + 480 * x - 199680 // // where is x is max(length_of_MODULUS, length_of_BASE) func modexpMultComplexity(x *big.Int) *big.Int { @@ -438,15 +483,22 @@ func (c *bigModExp) Run(input []byte) ([]byte, error) { } // Retrieve the operands and execute the exponentiation var ( - base = new(big.Int).SetBytes(getData(input, 0, baseLen)) - exp = new(big.Int).SetBytes(getData(input, baseLen, expLen)) - mod = new(big.Int).SetBytes(getData(input, baseLen+expLen, modLen)) + base = new(big2.Int).SetBytes(getData(input, 0, baseLen)) + exp = new(big2.Int).SetBytes(getData(input, baseLen, expLen)) + mod = new(big2.Int).SetBytes(getData(input, baseLen+expLen, modLen)) + v []byte ) - if mod.BitLen() == 0 { + switch { + case mod.BitLen() == 0: // Modulo 0 is undefined, return zero return common.LeftPadBytes([]byte{}, int(modLen)), nil + case base.BitLen() == 1: // a bit length of 1 means it's 1 (or -1). + //If base == 1, then we can just return base % mod (if mod >= 1, which it is) + v = base.Mod(base, mod).Bytes() + default: + v = base.Exp(base, exp, mod).Bytes() } - return common.LeftPadBytes(base.Exp(base, exp, mod).Bytes(), int(modLen)), nil + return common.LeftPadBytes(v, int(modLen)), nil } // newCurvePoint unmarshals a binary blob into a bn256 elliptic curve point, diff --git a/coreth/core/vm/eips.go b/coreth/core/vm/eips.go index 819ecf9e..dfcd548d 100644 --- a/coreth/core/vm/eips.go +++ b/coreth/core/vm/eips.go @@ -36,6 +36,7 @@ import ( var activators = map[int]func(*JumpTable){ 3855: enable3855, + 3860: enable3860, 3198: enable3198, 2929: enable2929, 2200: enable2200, @@ -206,3 +207,10 @@ func opPush0(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]by scope.Stack.push(new(uint256.Int)) return nil, nil } + +// ebnable3860 enables "EIP-3860: Limit and meter initcode" +// https://eips.ethereum.org/EIPS/eip-3860 +func enable3860(jt *JumpTable) { + jt[CREATE].dynamicGas = gasCreateEip3860 + jt[CREATE2].dynamicGas = gasCreate2Eip3860 +} diff --git a/coreth/core/vm/evm.go b/coreth/core/vm/evm.go index 8ece50f1..d0bf6279 100644 --- a/coreth/core/vm/evm.go +++ b/coreth/core/vm/evm.go @@ -59,27 +59,6 @@ func IsProhibited(addr common.Address) bool { return false } -// TODO: deprecate after Banff activation. -func (evm *EVM) isProhibitedWithTimestamp(addr common.Address) error { - if addr != NativeAssetCallAddr { - return nil - } - - // Return error depending on the phase - switch { - case evm.chainRules.IsBanff: // Disable the soft fork as of Banff - return nil - case evm.chainRules.IsApricotPhasePost6: // If we are in the soft fork, return the soft error - return vmerrs.ErrToAddrProhibitedSoft - case evm.chainRules.IsApricotPhase6: // If we are in Phase6, return nil - return nil - case evm.chainRules.IsApricotPhasePre6: // If we are in PrePhase6, return Prohibited6 - return vmerrs.ErrToAddrProhibited6 - default: // Prior to Pre6, don't alter behavior at all - return nil - } -} - // emptyCodeHash is used by create to ensure deployment is disallowed to already // deployed contract addresses (relevant after the account abstraction). var emptyCodeHash = crypto.Keccak256Hash(nil) @@ -101,6 +80,10 @@ func (evm *EVM) precompile(addr common.Address) (precompile.StatefulPrecompiledC switch { case evm.chainRules.IsBanff: precompiles = PrecompiledContractsBanff + case evm.chainRules.IsApricotPhase6: + precompiles = PrecompiledContractsApricotPhase6 + case evm.chainRules.IsApricotPhasePre6: + precompiles = PrecompiledContractsApricotPhasePre6 case evm.chainRules.IsApricotPhase2: precompiles = PrecompiledContractsApricotPhase2 case evm.chainRules.IsIstanbul: @@ -357,9 +340,6 @@ func (evm *EVM) CallWithoutSnapshot(caller ContractRef, addr common.Address, inp // the necessary steps to create accounts and reverses the state in case of an // execution error or failed value transfer. func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas uint64, value *big.Int) (ret []byte, leftOverGas uint64, err error) { - if prohibitErr := evm.isProhibitedWithTimestamp(addr); prohibitErr != nil { - return nil, gas, prohibitErr - } // Fail if we're trying to execute above the call depth limit if evm.depth > int(params.CallCreateDepth) { return nil, gas, vmerrs.ErrDepth @@ -443,9 +423,6 @@ func (evm *EVM) Call(caller ContractRef, addr common.Address, input []byte, gas // This allows the user transfer balance of a specified coinId in addition to a normal Call(). func (evm *EVM) CallExpert(caller ContractRef, addr common.Address, input []byte, gas uint64, value *big.Int, coinID common.Hash, value2 *big.Int) (ret []byte, leftOverGas uint64, err error) { - if prohibitErr := evm.isProhibitedWithTimestamp(addr); prohibitErr != nil { - return nil, gas, prohibitErr - } // Fail if we're trying to execute above the call depth limit if evm.depth > int(params.CallCreateDepth) { return nil, gas, vmerrs.ErrDepth @@ -529,9 +506,6 @@ func (evm *EVM) CallExpert(caller ContractRef, addr common.Address, input []byte // CallCode differs from Call in the sense that it executes the given address' // code with the caller as context. func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte, gas uint64, value *big.Int) (ret []byte, leftOverGas uint64, err error) { - if prohibitErr := evm.isProhibitedWithTimestamp(addr); prohibitErr != nil { - return nil, gas, prohibitErr - } // Fail if we're trying to execute above the call depth limit if evm.depth > int(params.CallCreateDepth) { return nil, gas, vmerrs.ErrDepth @@ -583,9 +557,6 @@ func (evm *EVM) CallCode(caller ContractRef, addr common.Address, input []byte, // DelegateCall differs from CallCode in the sense that it executes the given address' // code with the caller as context and the caller is set to the caller of the caller. func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []byte, gas uint64) (ret []byte, leftOverGas uint64, err error) { - if prohibitErr := evm.isProhibitedWithTimestamp(addr); prohibitErr != nil { - return nil, gas, prohibitErr - } // Fail if we're trying to execute above the call depth limit if evm.depth > int(params.CallCreateDepth) { return nil, gas, vmerrs.ErrDepth @@ -625,9 +596,6 @@ func (evm *EVM) DelegateCall(caller ContractRef, addr common.Address, input []by // Opcodes that attempt to perform such modifications will result in exceptions // instead of performing the modifications. func (evm *EVM) StaticCall(caller ContractRef, addr common.Address, input []byte, gas uint64) (ret []byte, leftOverGas uint64, err error) { - if prohibitErr := evm.isProhibitedWithTimestamp(addr); prohibitErr != nil { - return nil, gas, prohibitErr - } // Fail if we're trying to execute above the call depth limit if evm.depth > int(params.CallCreateDepth) { return nil, gas, vmerrs.ErrDepth @@ -816,17 +784,6 @@ func (evm *EVM) NativeAssetCall(caller common.Address, input []byte, suppliedGas return nil, 0, vmerrs.ErrOutOfGas } remainingGas = suppliedGas - gasCost - if evm.Context.Time.Cmp(constants.NativeAssetCallDeprecationTime) >= 0 { - return nil, remainingGas, vmerrs.ErrNativeAssetCallDeprecated - } - return evm.NativeAssetCallDeprecated(caller, input, suppliedGas, gasCost, readOnly) -} - -func (evm *EVM) NativeAssetCallDeprecated(caller common.Address, input []byte, suppliedGas uint64, gasCost uint64, readOnly bool) (ret []byte, remainingGas uint64, err error) { - if suppliedGas < gasCost { - return nil, 0, vmerrs.ErrOutOfGas - } - remainingGas = suppliedGas - gasCost if readOnly { return nil, remainingGas, vmerrs.ErrExecutionReverted diff --git a/coreth/core/vm/gas_table.go b/coreth/core/vm/gas_table.go index d361e23d..86e6fdef 100644 --- a/coreth/core/vm/gas_table.go +++ b/coreth/core/vm/gas_table.go @@ -128,20 +128,21 @@ func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySi return params.SstoreResetGas, nil } } + // The new gas metering is based on net gas costs (EIP-1283): // - // 1. If current value equals new value (this is a no-op), 200 gas is deducted. - // 2. If current value does not equal new value - // 2.1. If original value equals current value (this storage slot has not been changed by the current execution context) - // 2.1.1. If original value is 0, 20000 gas is deducted. - // 2.1.2. Otherwise, 5000 gas is deducted. If new value is 0, add 15000 gas to refund counter. - // 2.2. If original value does not equal current value (this storage slot is dirty), 200 gas is deducted. Apply both of the following clauses. - // 2.2.1. If original value is not 0 - // 2.2.1.1. If current value is 0 (also means that new value is not 0), remove 15000 gas from refund counter. We can prove that refund counter will never go below 0. - // 2.2.1.2. If new value is 0 (also means that current value is not 0), add 15000 gas to refund counter. - // 2.2.2. If original value equals new value (this storage slot is reset) - // 2.2.2.1. If original value is 0, add 19800 gas to refund counter. - // 2.2.2.2. Otherwise, add 4800 gas to refund counter. + // (1.) If current value equals new value (this is a no-op), 200 gas is deducted. + // (2.) If current value does not equal new value + // (2.1.) If original value equals current value (this storage slot has not been changed by the current execution context) + // (2.1.1.) If original value is 0, 20000 gas is deducted. + // (2.1.2.) Otherwise, 5000 gas is deducted. If new value is 0, add 15000 gas to refund counter. + // (2.2.) If original value does not equal current value (this storage slot is dirty), 200 gas is deducted. Apply both of the following clauses. + // (2.2.1.) If original value is not 0 + // (2.2.1.1.) If current value is 0 (also means that new value is not 0), remove 15000 gas from refund counter. We can prove that refund counter will never go below 0. + // (2.2.1.2.) If new value is 0 (also means that current value is not 0), add 15000 gas to refund counter. + // (2.2.2.) If original value equals new value (this storage slot is reset) + // (2.2.2.1.) If original value is 0, add 19800 gas to refund counter. + // (2.2.2.2.) Otherwise, add 4800 gas to refund counter. value := common.Hash(y.Bytes32()) if current == value { // noop (1) return params.NetSstoreNoopGas, nil @@ -173,19 +174,21 @@ func gasSStore(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySi return params.NetSstoreDirtyGas, nil } -// 0. If *gasleft* is less than or equal to 2300, fail the current call. -// 1. If current value equals new value (this is a no-op), SLOAD_GAS is deducted. -// 2. If current value does not equal new value: -// 2.1. If original value equals current value (this storage slot has not been changed by the current execution context): -// 2.1.1. If original value is 0, SSTORE_SET_GAS (20K) gas is deducted. -// 2.1.2. Otherwise, SSTORE_RESET_GAS gas is deducted. If new value is 0, add SSTORE_CLEARS_SCHEDULE to refund counter. -// 2.2. If original value does not equal current value (this storage slot is dirty), SLOAD_GAS gas is deducted. Apply both of the following clauses: -// 2.2.1. If original value is not 0: -// 2.2.1.1. If current value is 0 (also means that new value is not 0), subtract SSTORE_CLEARS_SCHEDULE gas from refund counter. -// 2.2.1.2. If new value is 0 (also means that current value is not 0), add SSTORE_CLEARS_SCHEDULE gas to refund counter. -// 2.2.2. If original value equals new value (this storage slot is reset): -// 2.2.2.1. If original value is 0, add SSTORE_SET_GAS - SLOAD_GAS to refund counter. -// 2.2.2.2. Otherwise, add SSTORE_RESET_GAS - SLOAD_GAS gas to refund counter. +// Here come the EIP220 rules: +// +// (0.) If *gasleft* is less than or equal to 2300, fail the current call. +// (1.) If current value equals new value (this is a no-op), SLOAD_GAS is deducted. +// (2.) If current value does not equal new value: +// (2.1.) If original value equals current value (this storage slot has not been changed by the current execution context): +// (2.1.1.) If original value is 0, SSTORE_SET_GAS (20K) gas is deducted. +// (2.1.2.) Otherwise, SSTORE_RESET_GAS gas is deducted. If new value is 0, add SSTORE_CLEARS_SCHEDULE to refund counter. +// (2.2.) If original value does not equal current value (this storage slot is dirty), SLOAD_GAS gas is deducted. Apply both of the following clauses: +// (2.2.1.) If original value is not 0: +// (2.2.1.1.) If current value is 0 (also means that new value is not 0), subtract SSTORE_CLEARS_SCHEDULE gas from refund counter. +// (2.2.1.2.) If new value is 0 (also means that current value is not 0), add SSTORE_CLEARS_SCHEDULE gas to refund counter. +// (2.2.2.) If original value equals new value (this storage slot is reset): +// (2.2.2.1.) If original value is 0, add SSTORE_SET_GAS - SLOAD_GAS to refund counter. +// (2.2.2.2.) Otherwise, add SSTORE_RESET_GAS - SLOAD_GAS gas to refund counter. func gasSStoreEIP2200(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { // If we fail the minimum gas availability invariant, fail (0) if contract.Gas <= params.SstoreSentryGasEIP2200 { @@ -230,13 +233,13 @@ func gasSStoreEIP2200(evm *EVM, contract *Contract, stack *Stack, mem *Memory, m // gasSStoreAP1 simplifies the dynamic gas cost of SSTORE by removing all refund logic // -// 0. If *gasleft* is less than or equal to 2300, fail the current call. -// 1. If current value equals new value (this is a no-op), SLOAD_GAS is deducted. -// 2. If current value does not equal new value: -// 2.1. If original value equals current value (this storage slot has not been changed by the current execution context): +// 0. If *gasleft* is less than or equal to 2300, fail the current call. +// 1. If current value equals new value (this is a no-op), SLOAD_GAS is deducted. +// 2. If current value does not equal new value: +// 2.1. If original value equals current value (this storage slot has not been changed by the current execution context): // 2.1.1. If original value is 0, SSTORE_SET_GAS (20K) gas is deducted. // 2.1.2. Otherwise, SSTORE_RESET_GAS gas is deducted. If new value is 0, add SSTORE_CLEARS_SCHEDULE to refund counter. -// 2.2. If original value does not equal current value (this storage slot is dirty), SLOAD_GAS gas is deducted. Apply both of the following clauses: +// 2.2. If original value does not equal current value (this storage slot is dirty), SLOAD_GAS gas is deducted. Apply both of the following clauses: func gasSStoreAP1(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { // If we fail the minimum gas availability invariant, fail (0) if contract.Gas <= params.SstoreSentryGasEIP2200 { @@ -345,6 +348,39 @@ func gasCreate2(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memoryS return gas, nil } +func gasCreateEip3860(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + gas, err := memoryGasCost(mem, memorySize) + if err != nil { + return 0, err + } + size, overflow := stack.Back(2).Uint64WithOverflow() + if overflow || size > params.MaxInitCodeSize { + return 0, vmerrs.ErrGasUintOverflow + } + // Since size <= params.MaxInitCodeSize, these multiplication cannot overflow + moreGas := params.InitCodeWordGas * ((size + 31) / 32) + if gas, overflow = math.SafeAdd(gas, moreGas); overflow { + return 0, vmerrs.ErrGasUintOverflow + } + return gas, nil +} +func gasCreate2Eip3860(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { + gas, err := memoryGasCost(mem, memorySize) + if err != nil { + return 0, err + } + size, overflow := stack.Back(2).Uint64WithOverflow() + if overflow || size > params.MaxInitCodeSize { + return 0, vmerrs.ErrGasUintOverflow + } + // Since size <= params.MaxInitCodeSize, these multiplication cannot overflow + moreGas := (params.InitCodeWordGas + params.Keccak256WordGas) * ((size + 31) / 32) + if gas, overflow = math.SafeAdd(gas, moreGas); overflow { + return 0, vmerrs.ErrGasUintOverflow + } + return gas, nil +} + func gasExpFrontier(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { expByteLen := uint64((stack.data[stack.len()-2].BitLen() + 7) / 8) diff --git a/coreth/core/vm/gas_table_test.go b/coreth/core/vm/gas_table_test.go index be7fb2c5..e2388c2f 100644 --- a/coreth/core/vm/gas_table_test.go +++ b/coreth/core/vm/gas_table_test.go @@ -27,8 +27,10 @@ package vm import ( + "bytes" "math" "math/big" + "sort" "testing" "github.com/ava-labs/coreth/core/rawdb" @@ -116,3 +118,73 @@ func TestEIP2200(t *testing.T) { } } } + +var createGasTests = []struct { + code string + eip3860 bool + gasUsed uint64 + minimumGas uint64 +}{ + // legacy create(0, 0, 0xc000) without 3860 used + {"0x61C00060006000f0" + "600052" + "60206000F3", false, 41237, 41237}, + // legacy create(0, 0, 0xc000) _with_ 3860 + {"0x61C00060006000f0" + "600052" + "60206000F3", true, 44309, 44309}, + // create2(0, 0, 0xc001, 0) without 3860 + {"0x600061C00160006000f5" + "600052" + "60206000F3", false, 50471, 50471}, + // create2(0, 0, 0xc001, 0) (too large), with 3860 + {"0x600061C00160006000f5" + "600052" + "60206000F3", true, 32012, 100_000}, + // create2(0, 0, 0xc000, 0) + // This case is trying to deploy code at (within) the limit + {"0x600061C00060006000f5" + "600052" + "60206000F3", true, 53528, 53528}, + // create2(0, 0, 0xc001, 0) + // This case is trying to deploy code exceeding the limit + {"0x600061C00160006000f5" + "600052" + "60206000F3", true, 32024, 100000}, +} + +func TestCreateGas(t *testing.T) { + for i, tt := range createGasTests { + var gasUsed = uint64(0) + doCheck := func(testGas int) bool { + address := common.BytesToAddress([]byte("contract")) + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + statedb.CreateAccount(address) + statedb.SetCode(address, hexutil.MustDecode(tt.code)) + statedb.Finalise(true) + vmctx := BlockContext{ + CanTransfer: func(StateDB, common.Address, *big.Int) bool { return true }, + Transfer: func(StateDB, common.Address, common.Address, *big.Int) {}, + BlockNumber: big.NewInt(0), + } + config := Config{} + if tt.eip3860 { + config.ExtraEips = []int{3860} + } + + vmenv := NewEVM(vmctx, TxContext{}, statedb, params.TestChainConfig, config) + var startGas = uint64(testGas) + ret, gas, err := vmenv.Call(AccountRef(common.Address{}), address, nil, startGas, new(big.Int)) + if err != nil { + return false + } + gasUsed = startGas - gas + if len(ret) != 32 { + t.Fatalf("test %d: expected 32 bytes returned, have %d", i, len(ret)) + } + if bytes.Equal(ret, make([]byte, 32)) { + // Failure + return false + } + return true + } + minGas := sort.Search(100_000, doCheck) + if uint64(minGas) != tt.minimumGas { + t.Fatalf("test %d: min gas error, want %d, have %d", i, tt.minimumGas, minGas) + } + // If the deployment succeeded, we also check the gas used + if minGas < 100_000 { + if gasUsed != tt.gasUsed { + t.Errorf("test %d: gas used mismatch: have %v, want %v", i, gasUsed, tt.gasUsed) + } + } + } +} diff --git a/coreth/core/vm/instructions.go b/coreth/core/vm/instructions.go index 960df8bc..e79f059d 100644 --- a/coreth/core/vm/instructions.go +++ b/coreth/core/vm/instructions.go @@ -415,29 +415,29 @@ func opExtCodeCopy(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) // opExtCodeHash returns the code hash of a specified account. // There are several cases when the function is called, while we can relay everything // to `state.GetCodeHash` function to ensure the correctness. -// (1) Caller tries to get the code hash of a normal contract account, state -// should return the relative code hash and set it as the result. // -// (2) Caller tries to get the code hash of a non-existent account, state should -// return common.Hash{} and zero will be set as the result. +// 1. Caller tries to get the code hash of a normal contract account, state +// should return the relative code hash and set it as the result. // -// (3) Caller tries to get the code hash for an account without contract code, -// state should return emptyCodeHash(0xc5d246...) as the result. +// 2. Caller tries to get the code hash of a non-existent account, state should +// return common.Hash{} and zero will be set as the result. // -// (4) Caller tries to get the code hash of a precompiled account, the result -// should be zero or emptyCodeHash. +// 3. Caller tries to get the code hash for an account without contract code, state +// should return emptyCodeHash(0xc5d246...) as the result. // -// It is worth noting that in order to avoid unnecessary create and clean, -// all precompile accounts on mainnet have been transferred 1 wei, so the return -// here should be emptyCodeHash. -// If the precompile account is not transferred any amount on a private or +// 4. Caller tries to get the code hash of a precompiled account, the result should be +// zero or emptyCodeHash. +// +// It is worth noting that in order to avoid unnecessary create and clean, all precompile +// accounts on mainnet have been transferred 1 wei, so the return here should be +// emptyCodeHash. If the precompile account is not transferred any amount on a private or // customized chain, the return value will be zero. // -// (5) Caller tries to get the code hash for an account which is marked as suicided -// in the current transaction, the code hash of this account should be returned. +// 5. Caller tries to get the code hash for an account which is marked as suicided +// in the current transaction, the code hash of this account should be returned. // -// (6) Caller tries to get the code hash for an account which is marked as deleted, -// this account should be regarded as a non-existent account and zero should be returned. +// 6. Caller tries to get the code hash for an account which is marked as deleted, this +// account should be regarded as a non-existent account and zero should be returned. func opExtCodeHash(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byte, error) { slot := scope.Stack.peek() address := common.Address(slot.Bytes20()) @@ -618,10 +618,6 @@ func opCreate(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]b } res, addr, returnGas, suberr := interpreter.evm.Create(scope.Contract, input, gas, bigVal) - // Special case the error in the op code. TODO remove. - if errors.Is(suberr, vmerrs.ErrToAddrProhibitedSoft) { - return nil, suberr - } // Push item on the stack based on the returned error. If the ruleset is // homestead we must check for CodeStoreOutOfGasError (homestead only // rule) and treat as an error, if the ruleset is frontier we must @@ -655,7 +651,6 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] input = scope.Memory.GetCopy(int64(offset.Uint64()), int64(size.Uint64())) gas = scope.Contract.Gas ) - // Apply EIP150 gas -= gas / 64 scope.Contract.UseGas(gas) @@ -668,10 +663,6 @@ func opCreate2(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([] } res, addr, returnGas, suberr := interpreter.evm.Create2(scope.Contract, input, gas, bigEndowment, &salt) - // Special case the error in the op code. TODO remove. - if errors.Is(suberr, vmerrs.ErrToAddrProhibitedSoft) { - return nil, suberr - } // Push item on the stack based on the returned error. if suberr != nil { stackvalue.Clear() @@ -714,10 +705,6 @@ func opCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([]byt } ret, returnGas, err := interpreter.evm.Call(scope.Contract, toAddr, args, gas, bigVal) - // Special case the error in the op code. TODO remove. - if errors.Is(err, vmerrs.ErrToAddrProhibitedSoft) { - return nil, err - } if err != nil { temp.Clear() } else { @@ -768,10 +755,6 @@ func opCallExpert(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) } ret, returnGas, err := interpreter.evm.CallExpert(scope.Contract, toAddr, args, gas, bigVal, coinID, bigVal2) - // Special case the error in the op code. TODO remove. - if errors.Is(err, vmerrs.ErrToAddrProhibitedSoft) { - return nil, err - } if err != nil { temp.Clear() } else { @@ -807,10 +790,6 @@ func opCallCode(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) ([ } ret, returnGas, err := interpreter.evm.CallCode(scope.Contract, toAddr, args, gas, bigVal) - // Special case the error in the op code. TODO remove. - if errors.Is(err, vmerrs.ErrToAddrProhibitedSoft) { - return nil, err - } if err != nil { temp.Clear() } else { @@ -840,10 +819,6 @@ func opDelegateCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext args := scope.Memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64())) ret, returnGas, err := interpreter.evm.DelegateCall(scope.Contract, toAddr, args, gas) - // Special case the error in the op code. TODO remove. - if errors.Is(err, vmerrs.ErrToAddrProhibitedSoft) { - return nil, err - } if err != nil { temp.Clear() } else { @@ -873,10 +848,6 @@ func opStaticCall(pc *uint64, interpreter *EVMInterpreter, scope *ScopeContext) args := scope.Memory.GetPtr(int64(inOffset.Uint64()), int64(inSize.Uint64())) ret, returnGas, err := interpreter.evm.StaticCall(scope.Contract, toAddr, args, gas) - // Special case the error in the op code. TODO remove. - if errors.Is(err, vmerrs.ErrToAddrProhibitedSoft) { - return nil, err - } if err != nil { temp.Clear() } else { diff --git a/coreth/core/vm/interpreter.go b/coreth/core/vm/interpreter.go index a02d7f8c..b9ce3f46 100644 --- a/coreth/core/vm/interpreter.go +++ b/coreth/core/vm/interpreter.go @@ -90,6 +90,8 @@ func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter { // If jump table was not initialised we set the default one. if cfg.JumpTable == nil { switch { + case evm.chainRules.IsDUpgrade: + cfg.JumpTable = &dUpgradeInstructionSet case evm.chainRules.IsApricotPhase3: cfg.JumpTable = &apricotPhase3InstructionSet case evm.chainRules.IsApricotPhase2: @@ -111,14 +113,17 @@ func NewEVMInterpreter(evm *EVM, cfg Config) *EVMInterpreter { default: cfg.JumpTable = &frontierInstructionSet } + // TODO: update this to the new go-ethereum code when the relevant code is merged. + if len(evm.Config.ExtraEips) > 0 { + // Deep-copy jumptable to prevent modification of opcodes in other tables + cfg.JumpTable = copyJumpTable(cfg.JumpTable) + } for i, eip := range cfg.ExtraEips { - copy := *cfg.JumpTable - if err := EnableEIP(eip, ©); err != nil { + if err := EnableEIP(eip, cfg.JumpTable); err != nil { // Disable it, so caller can check if it's activated or not cfg.ExtraEips = append(cfg.ExtraEips[:i], cfg.ExtraEips[i+1:]...) log.Error("EIP activation failed", "eip", eip, "error", err) } - cfg.JumpTable = © } } diff --git a/coreth/core/vm/jump_table.go b/coreth/core/vm/jump_table.go index ce641430..6e51d9b4 100644 --- a/coreth/core/vm/jump_table.go +++ b/coreth/core/vm/jump_table.go @@ -65,6 +65,7 @@ var ( apricotPhase1InstructionSet = newApricotPhase1InstructionSet() apricotPhase2InstructionSet = newApricotPhase2InstructionSet() apricotPhase3InstructionSet = newApricotPhase3InstructionSet() + dUpgradeInstructionSet = newDUpgradeInstructionSet() ) // JumpTable contains the EVM opcodes supported at a given fork. @@ -88,6 +89,13 @@ func validate(jt JumpTable) JumpTable { return jt } +func newDUpgradeInstructionSet() JumpTable { + instructionSet := newApricotPhase3InstructionSet() + enable3855(&instructionSet) // PUSH0 instruction + enable3860(&instructionSet) // Limit and meter initcode + return validate(instructionSet) +} + // newApricotPhase3InstructionSet returns the frontier, homestead, byzantium, // contantinople, istanbul, petersburg, apricotPhase1, 2, and 3 instructions. func newApricotPhase3InstructionSet() JumpTable { @@ -1071,3 +1079,14 @@ func newFrontierInstructionSet() JumpTable { return validate(tbl) } + +func copyJumpTable(source *JumpTable) *JumpTable { + dest := *source + for i, op := range source { + if op != nil { + opCopy := *op + dest[i] = &opCopy + } + } + return &dest +} diff --git a/coreth/core/vm/operations_acl.go b/coreth/core/vm/operations_acl.go index 07e6e07e..7b36e8c1 100644 --- a/coreth/core/vm/operations_acl.go +++ b/coreth/core/vm/operations_acl.go @@ -45,7 +45,7 @@ import ( // SLOAD_GAS 800 = WARM_STORAGE_READ_COST // SSTORE_RESET_GAS 5000 5000 - COLD_SLOAD_COST // -//The other parameters defined in EIP 2200 are unchanged. +// The other parameters defined in EIP 2200 are unchanged. // see gasSStoreEIP2200(...) in core/vm/gas_table.go for more info about how EIP 2200 is specified func gasSStoreEIP2929(evm *EVM, contract *Contract, stack *Stack, mem *Memory, memorySize uint64) (uint64, error) { // If we fail the minimum gas availability invariant, fail (0) diff --git a/coreth/core/vm/runtime/runtime_test.go b/coreth/core/vm/runtime/runtime_test.go index 1b2106a2..58daa614 100644 --- a/coreth/core/vm/runtime/runtime_test.go +++ b/coreth/core/vm/runtime/runtime_test.go @@ -46,8 +46,8 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/asm" - // force-load native tracers to trigger registration - _ "github.com/ava-labs/coreth/eth/tracers/native" + // force-load js tracers to trigger registration + _ "github.com/ava-labs/coreth/eth/tracers/js" ) func TestDefaults(t *testing.T) { @@ -691,3 +691,242 @@ func TestColdAccountAccessCost(t *testing.T) { } } } + +func TestRuntimeJSTracer(t *testing.T) { + jsTracers := []string{ + `{enters: 0, exits: 0, enterGas: 0, gasUsed: 0, steps:0, + step: function() { this.steps++}, + fault: function() {}, + result: function() { + return [this.enters, this.exits,this.enterGas,this.gasUsed, this.steps].join(",") + }, + enter: function(frame) { + this.enters++; + this.enterGas = frame.getGas(); + }, + exit: function(res) { + this.exits++; + this.gasUsed = res.getGasUsed(); + }}`, + `{enters: 0, exits: 0, enterGas: 0, gasUsed: 0, steps:0, + fault: function() {}, + result: function() { + return [this.enters, this.exits,this.enterGas,this.gasUsed, this.steps].join(",") + }, + enter: function(frame) { + this.enters++; + this.enterGas = frame.getGas(); + }, + exit: function(res) { + this.exits++; + this.gasUsed = res.getGasUsed(); + }}`} + tests := []struct { + code []byte + // One result per tracer + results []string + }{ + { + // CREATE + code: []byte{ + // Store initcode in memory at 0x00 (5 bytes left-padded to 32 bytes) + byte(vm.PUSH5), + // Init code: PUSH1 0, PUSH1 0, RETURN (3 steps) + byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.RETURN), + byte(vm.PUSH1), 0, + byte(vm.MSTORE), + // length, offset, value + byte(vm.PUSH1), 5, byte(vm.PUSH1), 27, byte(vm.PUSH1), 0, + byte(vm.CREATE), + byte(vm.POP), + }, + results: []string{`"1,1,952855,6,12"`, `"1,1,952855,6,0"`}, + }, + { + // CREATE2 + code: []byte{ + // Store initcode in memory at 0x00 (5 bytes left-padded to 32 bytes) + byte(vm.PUSH5), + // Init code: PUSH1 0, PUSH1 0, RETURN (3 steps) + byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.RETURN), + byte(vm.PUSH1), 0, + byte(vm.MSTORE), + // salt, length, offset, value + byte(vm.PUSH1), 1, byte(vm.PUSH1), 5, byte(vm.PUSH1), 27, byte(vm.PUSH1), 0, + byte(vm.CREATE2), + byte(vm.POP), + }, + results: []string{`"1,1,952846,6,13"`, `"1,1,952846,6,0"`}, + }, + { + // CALL + code: []byte{ + // outsize, outoffset, insize, inoffset + byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, + byte(vm.PUSH1), 0, // value + byte(vm.PUSH1), 0xbb, //address + byte(vm.GAS), // gas + byte(vm.CALL), + byte(vm.POP), + }, + results: []string{`"1,1,981796,6,13"`, `"1,1,981796,6,0"`}, + }, + { + // CALLCODE + code: []byte{ + // outsize, outoffset, insize, inoffset + byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, + byte(vm.PUSH1), 0, // value + byte(vm.PUSH1), 0xcc, //address + byte(vm.GAS), // gas + byte(vm.CALLCODE), + byte(vm.POP), + }, + results: []string{`"1,1,981796,6,13"`, `"1,1,981796,6,0"`}, + }, + { + // STATICCALL + code: []byte{ + // outsize, outoffset, insize, inoffset + byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, + byte(vm.PUSH1), 0xdd, //address + byte(vm.GAS), // gas + byte(vm.STATICCALL), + byte(vm.POP), + }, + results: []string{`"1,1,981799,6,12"`, `"1,1,981799,6,0"`}, + }, + { + // DELEGATECALL + code: []byte{ + // outsize, outoffset, insize, inoffset + byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, + byte(vm.PUSH1), 0xee, //address + byte(vm.GAS), // gas + byte(vm.DELEGATECALL), + byte(vm.POP), + }, + results: []string{`"1,1,981799,6,12"`, `"1,1,981799,6,0"`}, + }, + { + // CALL self-destructing contract + code: []byte{ + // outsize, outoffset, insize, inoffset + byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, + byte(vm.PUSH1), 0, // value + byte(vm.PUSH1), 0xff, //address + byte(vm.GAS), // gas + byte(vm.CALL), + byte(vm.POP), + }, + results: []string{`"2,2,0,5003,12"`, `"2,2,0,5003,0"`}, + }, + } + calleeCode := []byte{ + byte(vm.PUSH1), 0, + byte(vm.PUSH1), 0, + byte(vm.RETURN), + } + depressedCode := []byte{ + byte(vm.PUSH1), 0xaa, + byte(vm.SELFDESTRUCT), + } + main := common.HexToAddress("0xaa") + for i, jsTracer := range jsTracers { + for j, tc := range tests { + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + statedb.SetCode(main, tc.code) + statedb.SetCode(common.HexToAddress("0xbb"), calleeCode) + statedb.SetCode(common.HexToAddress("0xcc"), calleeCode) + statedb.SetCode(common.HexToAddress("0xdd"), calleeCode) + statedb.SetCode(common.HexToAddress("0xee"), calleeCode) + statedb.SetCode(common.HexToAddress("0xff"), depressedCode) + + tracer, err := tracers.New(jsTracer, new(tracers.Context), nil) + if err != nil { + t.Fatal(err) + } + _, _, err = Call(main, nil, &Config{ + GasLimit: 1000000, + State: statedb, + EVMConfig: vm.Config{ + Debug: true, + Tracer: tracer, + }}) + if err != nil { + t.Fatal("didn't expect error", err) + } + res, err := tracer.GetResult() + if err != nil { + t.Fatal(err) + } + if have, want := string(res), tc.results[i]; have != want { + t.Errorf("wrong result for tracer %d testcase %d, have \n%v\nwant\n%v\n", i, j, have, want) + } + } + } +} + +func TestJSTracerCreateTx(t *testing.T) { + jsTracer := ` + {enters: 0, exits: 0, + step: function() {}, + fault: function() {}, + result: function() { return [this.enters, this.exits].join(",") }, + enter: function(frame) { this.enters++ }, + exit: function(res) { this.exits++ }}` + code := []byte{byte(vm.PUSH1), 0, byte(vm.PUSH1), 0, byte(vm.RETURN)} + + statedb, _ := state.New(common.Hash{}, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + tracer, err := tracers.New(jsTracer, new(tracers.Context), nil) + if err != nil { + t.Fatal(err) + } + _, _, _, err = Create(code, &Config{ + State: statedb, + EVMConfig: vm.Config{ + Debug: true, + Tracer: tracer, + }}) + if err != nil { + t.Fatal(err) + } + + res, err := tracer.GetResult() + if err != nil { + t.Fatal(err) + } + if have, want := string(res), `"0,0"`; have != want { + t.Errorf("wrong result for tracer, have \n%v\nwant\n%v\n", have, want) + } +} + +func BenchmarkTracerStepVsCallFrame(b *testing.B) { + // Simply pushes and pops some values in a loop + code := []byte{ + byte(vm.JUMPDEST), + byte(vm.PUSH1), 0, + byte(vm.PUSH1), 0, + byte(vm.POP), + byte(vm.POP), + byte(vm.PUSH1), 0, // jumpdestination + byte(vm.JUMP), + } + + stepTracer := ` + { + step: function() {}, + fault: function() {}, + result: function() {}, + }` + callFrameTracer := ` + { + enter: function() {}, + exit: function() {}, + fault: function() {}, + result: function() {}, + }` + + benchmarkNonModifyingCode(10000000, code, "tracer-step-10M", stepTracer, b) + benchmarkNonModifyingCode(10000000, code, "tracer-call-frame-10M", callFrameTracer, b) +} diff --git a/coreth/core/vm/testdata/precompiles/bn256ScalarMul.json b/coreth/core/vm/testdata/precompiles/bn256ScalarMul.json index 2a28f630..b0427fcc 100644 --- a/coreth/core/vm/testdata/precompiles/bn256ScalarMul.json +++ b/coreth/core/vm/testdata/precompiles/bn256ScalarMul.json @@ -124,5 +124,12 @@ "Name": "cdetrio15", "Gas": 6000, "NoBenchmark": true + }, + { + "Input": "039730ea8dff1254c0fee9c0ea777d29a9c710b7e616683f194f18c43b43b869073a5ffcc6fc7a28c30723d6e58ce577356982d65b833a5a5c15bf9024b43d980000000000000000000000000000000000000000000000000000000000000000", + "Expected": "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "Name": "zeroScalar", + "Gas": 6000, + "NoBenchmark": true } ] \ No newline at end of file diff --git a/coreth/eth/api.go b/coreth/eth/api.go index 5152ef20..17ba6525 100644 --- a/coreth/eth/api.go +++ b/coreth/eth/api.go @@ -294,10 +294,12 @@ func (api *DebugAPI) StorageRangeAt(blockHash common.Hash, txIndex int, contract if block == nil { return StorageRangeResult{}, fmt.Errorf("block %#x not found", blockHash) } - _, _, statedb, err := api.eth.stateAtTransaction(block, txIndex, 0) + _, _, statedb, release, err := api.eth.stateAtTransaction(block, txIndex, 0) if err != nil { return StorageRangeResult{}, err } + defer release() + st := statedb.StorageTrie(contractAddress) if st == nil { return StorageRangeResult{}, fmt.Errorf("account %x doesn't exist", contractAddress) diff --git a/coreth/eth/api_backend.go b/coreth/eth/api_backend.go index 6fb13ec0..b387407c 100644 --- a/coreth/eth/api_backend.go +++ b/coreth/eth/api_backend.go @@ -42,6 +42,7 @@ import ( "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" "github.com/ava-labs/coreth/eth/gasprice" + "github.com/ava-labs/coreth/eth/tracers" "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/rpc" @@ -53,10 +54,11 @@ var ErrUnfinalizedData = errors.New("cannot query unfinalized data") // EthAPIBackend implements ethapi.Backend for full nodes type EthAPIBackend struct { - extRPCEnabled bool - allowUnprotectedTxs bool - eth *Ethereum - gpo *gasprice.Oracle + extRPCEnabled bool + allowUnprotectedTxs bool + allowUnprotectedTxHashes map[common.Hash]struct{} // Invariant: read-only after creation. + eth *Ethereum + gpo *gasprice.Oracle } // ChainConfig returns the active chain configuration. @@ -246,7 +248,10 @@ func (b *EthAPIBackend) GetReceipts(ctx context.Context, hash common.Hash) (type } func (b *EthAPIBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) { - return rawdb.ReadLogs(b.eth.chainDb, hash, number), nil + if err := ctx.Err(); err != nil { + return nil, err + } + return b.eth.blockchain.GetLogs(hash, number), nil } func (b *EthAPIBackend) GetEVM(ctx context.Context, msg core.Message, state *state.StateDB, header *types.Header, vmConfig *vm.Config) (*vm.EVM, func() error, error) { @@ -386,8 +391,24 @@ func (b *EthAPIBackend) ExtRPCEnabled() bool { return b.extRPCEnabled } -func (b *EthAPIBackend) UnprotectedAllowed() bool { - return b.allowUnprotectedTxs +func (b *EthAPIBackend) UnprotectedAllowed(tx *types.Transaction) bool { + if b.allowUnprotectedTxs { + return true + } + + // Check for special cased transaction hashes: + // Note: this map is read-only after creation, so it is safe to read from it on multiple threads. + if _, ok := b.allowUnprotectedTxHashes[tx.Hash()]; ok { + return true + } + + // Check for "predictable pattern" (Nick's Signature: https://weka.medium.com/how-to-send-ether-to-11-440-people-187e332566b7) + v, r, s := tx.RawSignatureValues() + if v == nil || r == nil || s == nil { + return false + } + + return tx.Nonce() == 0 && r.Cmp(s) == 0 } func (b *EthAPIBackend) RPCGasCap() uint64 { @@ -425,11 +446,11 @@ func (b *EthAPIBackend) GetMaxBlocksPerRequest() int64 { return b.eth.settings.MaxBlocksPerRequest } -func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (*state.StateDB, error) { - return b.eth.StateAtBlock(block, reexec, base, checkLive, preferDisk) +func (b *EthAPIBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, tracers.StateReleaseFunc, error) { + return b.eth.StateAtBlock(block, reexec, base, readOnly, preferDisk) } -func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) { +func (b *EthAPIBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, tracers.StateReleaseFunc, error) { return b.eth.stateAtTransaction(block, txIndex, reexec) } diff --git a/coreth/eth/api_backend_test.go b/coreth/eth/api_backend_test.go new file mode 100644 index 00000000..e58fab09 --- /dev/null +++ b/coreth/eth/api_backend_test.go @@ -0,0 +1,73 @@ +// (c) 2019-2020, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package eth + +import ( + "fmt" + "testing" + + "github.com/ava-labs/coreth/core/types" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/rlp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestUnprotectedAllowed(t *testing.T) { + allowedTxHashes := make(map[common.Hash]struct{}) + allowedTxHashes[common.HexToHash("0x803351deb6d745e91545a6a3e1c0ea3e9a6a02a1a4193b70edfcd2f40f71a01c")] = struct{}{} // Special case EIP-2470 tx, since it's R and S values are not quite the same + + backend := &EthAPIBackend{ + allowUnprotectedTxs: false, + allowUnprotectedTxHashes: allowedTxHashes, + } + + for i, test := range []struct { + txRlp, addr string + }{ + {"f8a58085174876e800830186a08080b853604580600e600039806000f350fe7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf31ba02222222222222222222222222222222222222222222222222222222222222222a02222222222222222222222222222222222222222222222222222222222222222", "0x3fab184622dc19b6109349b94811493bf2a45362"}, // https://github.com/Arachnid/deterministic-deployment-proxy + {"f90a388085174876e800830c35008080b909e5608060405234801561001057600080fd5b506109c5806100206000396000f3fe608060405234801561001057600080fd5b50600436106100a5576000357c010000000000000000000000000000000000000000000000000000000090048063a41e7d5111610078578063a41e7d51146101d4578063aabbb8ca1461020a578063b705676514610236578063f712f3e814610280576100a5565b806329965a1d146100aa5780633d584063146100e25780635df8122f1461012457806365ba36c114610152575b600080fd5b6100e0600480360360608110156100c057600080fd5b50600160a060020a038135811691602081013591604090910135166102b6565b005b610108600480360360208110156100f857600080fd5b5035600160a060020a0316610570565b60408051600160a060020a039092168252519081900360200190f35b6100e06004803603604081101561013a57600080fd5b50600160a060020a03813581169160200135166105bc565b6101c26004803603602081101561016857600080fd5b81019060208101813564010000000081111561018357600080fd5b82018360208201111561019557600080fd5b803590602001918460018302840111640100000000831117156101b757600080fd5b5090925090506106b3565b60408051918252519081900360200190f35b6100e0600480360360408110156101ea57600080fd5b508035600160a060020a03169060200135600160e060020a0319166106ee565b6101086004803603604081101561022057600080fd5b50600160a060020a038135169060200135610778565b61026c6004803603604081101561024c57600080fd5b508035600160a060020a03169060200135600160e060020a0319166107ef565b604080519115158252519081900360200190f35b61026c6004803603604081101561029657600080fd5b508035600160a060020a03169060200135600160e060020a0319166108aa565b6000600160a060020a038416156102cd57836102cf565b335b9050336102db82610570565b600160a060020a031614610339576040805160e560020a62461bcd02815260206004820152600f60248201527f4e6f7420746865206d616e616765720000000000000000000000000000000000604482015290519081900360640190fd5b6103428361092a565b15610397576040805160e560020a62461bcd02815260206004820152601a60248201527f4d757374206e6f7420626520616e204552433136352068617368000000000000604482015290519081900360640190fd5b600160a060020a038216158015906103b85750600160a060020a0382163314155b156104ff5760405160200180807f455243313832305f4143434550545f4d4147494300000000000000000000000081525060140190506040516020818303038152906040528051906020012082600160a060020a031663249cb3fa85846040518363ffffffff167c01000000000000000000000000000000000000000000000000000000000281526004018083815260200182600160a060020a0316600160a060020a031681526020019250505060206040518083038186803b15801561047e57600080fd5b505afa158015610492573d6000803e3d6000fd5b505050506040513d60208110156104a857600080fd5b5051146104ff576040805160e560020a62461bcd02815260206004820181905260248201527f446f6573206e6f7420696d706c656d656e742074686520696e74657266616365604482015290519081900360640190fd5b600160a060020a03818116600081815260208181526040808320888452909152808220805473ffffffffffffffffffffffffffffffffffffffff19169487169485179055518692917f93baa6efbd2244243bfee6ce4cfdd1d04fc4c0e9a786abd3a41313bd352db15391a450505050565b600160a060020a03818116600090815260016020526040812054909116151561059a5750806105b7565b50600160a060020a03808216600090815260016020526040902054165b919050565b336105c683610570565b600160a060020a031614610624576040805160e560020a62461bcd02815260206004820152600f60248201527f4e6f7420746865206d616e616765720000000000000000000000000000000000604482015290519081900360640190fd5b81600160a060020a031681600160a060020a0316146106435780610646565b60005b600160a060020a03838116600081815260016020526040808220805473ffffffffffffffffffffffffffffffffffffffff19169585169590951790945592519184169290917f605c2dbf762e5f7d60a546d42e7205dcb1b011ebc62a61736a57c9089d3a43509190a35050565b600082826040516020018083838082843780830192505050925050506040516020818303038152906040528051906020012090505b92915050565b6106f882826107ef565b610703576000610705565b815b600160a060020a03928316600081815260208181526040808320600160e060020a031996909616808452958252808320805473ffffffffffffffffffffffffffffffffffffffff19169590971694909417909555908152600284528181209281529190925220805460ff19166001179055565b600080600160a060020a038416156107905783610792565b335b905061079d8361092a565b156107c357826107ad82826108aa565b6107b85760006107ba565b815b925050506106e8565b600160a060020a0390811660009081526020818152604080832086845290915290205416905092915050565b6000808061081d857f01ffc9a70000000000000000000000000000000000000000000000000000000061094c565b909250905081158061082d575080155b1561083d576000925050506106e8565b61084f85600160e060020a031961094c565b909250905081158061086057508015155b15610870576000925050506106e8565b61087a858561094c565b909250905060018214801561088f5750806001145b1561089f576001925050506106e8565b506000949350505050565b600160a060020a0382166000908152600260209081526040808320600160e060020a03198516845290915281205460ff1615156108f2576108eb83836107ef565b90506106e8565b50600160a060020a03808316600081815260208181526040808320600160e060020a0319871684529091529020549091161492915050565b7bffffffffffffffffffffffffffffffffffffffffffffffffffffffff161590565b6040517f01ffc9a7000000000000000000000000000000000000000000000000000000008082526004820183905260009182919060208160248189617530fa90519096909550935050505056fea165627a7a72305820377f4a2d4301ede9949f163f319021a6e9c687c292a5e2b2c4734c126b524e6c00291ba01820182018201820182018201820182018201820182018201820182018201820a01820182018201820182018201820182018201820182018201820182018201820", "0xa990077c3205cbDf861e17Fa532eeB069cE9fF96"}, // EIP-1820 https://eips.ethereum.org/EIPS/eip-1820 + {"f9016c8085174876e8008303c4d88080b90154608060405234801561001057600080fd5b50610134806100206000396000f3fe6080604052348015600f57600080fd5b506004361060285760003560e01c80634af63f0214602d575b600080fd5b60cf60048036036040811015604157600080fd5b810190602081018135640100000000811115605b57600080fd5b820183602082011115606c57600080fd5b80359060200191846001830284011164010000000083111715608d57600080fd5b91908080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250929550509135925060eb915050565b604080516001600160a01b039092168252519081900360200190f35b6000818351602085016000f5939250505056fea26469706673582212206b44f8a82cb6b156bfcc3dc6aadd6df4eefd204bc928a4397fd15dacf6d5320564736f6c634300060200331b83247000822470", "0xBb6e024b9cFFACB947A71991E386681B1Cd1477D"}, // https://github.com/status-im/EIPs/blob/singleton-factory/EIPS/eip-2470.md + } { + t.Run(fmt.Sprintf("nicks method %d", i), func(t *testing.T) { + signer := types.HomesteadSigner{} + + var tx *types.Transaction + err := rlp.DecodeBytes(common.Hex2Bytes(test.txRlp), &tx) + require.NoError(t, err) + + txHash := tx.Hash() + from, err := types.Sender(signer, tx) + require.NoError(t, err, "tx hash %s", txHash) + + addr := common.HexToAddress(test.addr) + assert.Equal(t, addr, from, "tx hash %s", txHash) + + assert.True(t, backend.UnprotectedAllowed(tx), "tx hash %s", txHash) + }) + } +} diff --git a/coreth/eth/backend.go b/coreth/eth/backend.go index 7dd9b1a6..db74165f 100644 --- a/coreth/eth/backend.go +++ b/coreth/eth/backend.go @@ -30,7 +30,6 @@ package eth import ( "errors" "fmt" - "strings" "sync" "time" @@ -126,18 +125,6 @@ func New( if chainDb == nil { return nil, errors.New("chainDb cannot be nil") } - if !config.Pruning && config.TrieDirtyCache > 0 { - // If snapshots are enabled, allocate 2/5 of the TrieDirtyCache memory cap to the snapshot cache - if config.SnapshotCache > 0 { - config.TrieCleanCache += config.TrieDirtyCache * 3 / 5 - config.SnapshotCache += config.TrieDirtyCache * 2 / 5 - } else { - // If snapshots are disabled, the TrieDirtyCache will be written through to the clean cache - // so move the cache allocation from the dirty cache to the clean cache - config.TrieCleanCache += config.TrieDirtyCache - config.TrieDirtyCache = 0 - } - } // round TrieCleanCache and SnapshotCache up to nearest 64MB, since fastcache will mmap // memory in 64MBs chunks. @@ -145,23 +132,12 @@ func New( config.SnapshotCache = roundUpCacheSize(config.SnapshotCache, 64) log.Info( - "Allocated trie memory caches", - "clean", common.StorageSize(config.TrieCleanCache)*1024*1024, - "dirty", common.StorageSize(config.TrieDirtyCache)*1024*1024, + "Allocated memory caches", + "trie clean", common.StorageSize(config.TrieCleanCache)*1024*1024, + "trie dirty", common.StorageSize(config.TrieDirtyCache)*1024*1024, + "snapshot clean", common.StorageSize(config.SnapshotCache)*1024*1024, ) - chainConfig, genesisErr := core.SetupGenesisBlock(chainDb, config.Genesis, lastAcceptedHash) - if genesisErr != nil { - return nil, genesisErr - } - log.Info("") - log.Info(strings.Repeat("-", 153)) - for _, line := range strings.Split(chainConfig.String(), "\n") { - log.Info(line) - } - log.Info(strings.Repeat("-", 153)) - log.Info("") - // Note: RecoverPruning must be called to handle the case that we are midway through offline pruning. // If the data directory is changed in between runs preventing RecoverPruning from performing its job correctly, // it may cause DB corruption. @@ -171,6 +147,7 @@ func New( if err := pruner.RecoverPruning(config.OfflinePruningDataDirectory, chainDb); err != nil { log.Error("Failed to recover state", "error", err) } + eth := &Ethereum{ config: config, chainDb: chainDb, @@ -208,6 +185,8 @@ func New( } cacheConfig = &core.CacheConfig{ TrieCleanLimit: config.TrieCleanCache, + TrieCleanJournal: config.TrieCleanJournal, + TrieCleanRejournal: config.TrieCleanRejournal, TrieDirtyLimit: config.TrieDirtyCache, TrieDirtyCommitTarget: config.TrieDirtyCommitTarget, Pruning: config.Pruning, @@ -222,6 +201,8 @@ func New( SnapshotVerify: config.SnapshotVerify, SkipSnapshotRebuild: config.SkipSnapshotRebuild, Preimages: config.Preimages, + AcceptedCacheSize: config.AcceptedCacheSize, + TxLookupLimit: config.TxLookupLimit, } ) @@ -230,33 +211,38 @@ func New( } var err error - eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, chainConfig, eth.engine, vmConfig, lastAcceptedHash) + eth.blockchain, err = core.NewBlockChain(chainDb, cacheConfig, config.Genesis, eth.engine, vmConfig, lastAcceptedHash, config.SkipUpgradeCheck) if err != nil { return nil, err } - if err := eth.handleOfflinePruning(cacheConfig, chainConfig, vmConfig, lastAcceptedHash); err != nil { + if err := eth.handleOfflinePruning(cacheConfig, config.Genesis, vmConfig, lastAcceptedHash); err != nil { return nil, err } eth.bloomIndexer.Start(eth.blockchain) config.TxPool.Journal = "" - eth.txPool = core.NewTxPool(config.TxPool, chainConfig, eth.blockchain) + eth.txPool = core.NewTxPool(config.TxPool, eth.blockchain.Config(), eth.blockchain) + + eth.miner = miner.New(eth, &config.Miner, eth.blockchain.Config(), eth.EventMux(), eth.engine, clock) - eth.miner = miner.New(eth, &config.Miner, chainConfig, eth.EventMux(), eth.engine, clock) + allowUnprotectedTxHashes := make(map[common.Hash]struct{}) + for _, txHash := range config.AllowUnprotectedTxHashes { + allowUnprotectedTxHashes[txHash] = struct{}{} + } eth.APIBackend = &EthAPIBackend{ - extRPCEnabled: stack.Config().ExtRPCEnabled(), - allowUnprotectedTxs: config.AllowUnprotectedTxs, - eth: eth, + extRPCEnabled: stack.Config().ExtRPCEnabled(), + allowUnprotectedTxs: config.AllowUnprotectedTxs, + allowUnprotectedTxHashes: allowUnprotectedTxHashes, + eth: eth, } if config.AllowUnprotectedTxs { log.Info("Unprotected transactions allowed") } gpoParams := config.GPO - eth.APIBackend.gpo = gasprice.NewOracle(eth.APIBackend, gpoParams) - + eth.APIBackend.gpo, err = gasprice.NewOracle(eth.APIBackend, gpoParams) if err != nil { return nil, err } @@ -284,10 +270,8 @@ func (s *Ethereum) APIs() []rpc.API { apis = append(apis, s.stackRPCs...) // Create [filterSystem] with the log cache size set in the config. - ethcfg := s.APIBackend.eth.config filterSystem := filters.NewFilterSystem(s.APIBackend, filters.Config{ - LogCacheSize: ethcfg.FilterLogCacheSize, - Timeout: 5 * time.Minute, + Timeout: 5 * time.Minute, }) // Append all the local APIs and return @@ -423,7 +407,7 @@ func (s *Ethereum) precheckPopulateMissingTries() error { return nil } -func (s *Ethereum) handleOfflinePruning(cacheConfig *core.CacheConfig, chainConfig *params.ChainConfig, vmConfig vm.Config, lastAcceptedHash common.Hash) error { +func (s *Ethereum) handleOfflinePruning(cacheConfig *core.CacheConfig, gspec *core.Genesis, vmConfig vm.Config, lastAcceptedHash common.Hash) error { if s.config.OfflinePruning && !s.config.Pruning { return core.ErrRefuseToCorruptArchiver } @@ -464,7 +448,7 @@ func (s *Ethereum) handleOfflinePruning(cacheConfig *core.CacheConfig, chainConf } // Note: Time Marker is written inside of [Prune] before compaction begins // (considered an optional optimization) - s.blockchain, err = core.NewBlockChain(s.chainDb, cacheConfig, chainConfig, s.engine, vmConfig, lastAcceptedHash) + s.blockchain, err = core.NewBlockChain(s.chainDb, cacheConfig, gspec, s.engine, vmConfig, lastAcceptedHash, s.config.SkipUpgradeCheck) if err != nil { return fmt.Errorf("failed to re-initialize blockchain after offline pruning: %w", err) } diff --git a/coreth/eth/ethconfig/config.go b/coreth/eth/ethconfig/config.go index bb9c2671..740cc5c3 100644 --- a/coreth/eth/ethconfig/config.go +++ b/coreth/eth/ethconfig/config.go @@ -27,11 +27,14 @@ package ethconfig import ( + "os" + "strconv" "time" "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/eth/gasprice" "github.com/ava-labs/coreth/miner" + "github.com/ethereum/go-ethereum/common" ) // DefaultFullGPOConfig contains default gasprice oracle settings for full node. @@ -60,17 +63,26 @@ var DefaultFullGPOSgbConfig = gasprice.Config{ // DefaultConfig contains default settings for use on the Avalanche main net. var DefaultConfig = NewDefaultConfig() +func init() { + // Set the gas price percentile from the environment variable "GAS_PRICE_PERCENTILE" + if gasPricePercentileStr := os.Getenv("GAS_PRICE_PERCENTILE"); gasPricePercentileStr != "" { + gasPricePercentile, err := strconv.Atoi(gasPricePercentileStr) + if err != nil || gasPricePercentile < 0 || gasPricePercentile > 100 { + panic("GAS_PRICE_PERCENTILE must be a value between 0 and 100") + } + DefaultFullGPOConfig.Percentile = gasPricePercentile + DefaultFullGPOSgbConfig.Percentile = gasPricePercentile + } +} + func NewDefaultConfig() Config { return Config{ NetworkId: 1, - LightPeers: 100, - UltraLightFraction: 75, - DatabaseCache: 512, - TrieCleanCache: 256, + TrieCleanCache: 512, TrieDirtyCache: 256, TrieDirtyCommitTarget: 20, - SnapshotCache: 128, - FilterLogCacheSize: 32, + SnapshotCache: 256, + AcceptedCacheSize: 32, Miner: miner.Config{}, TxPool: core.DefaultTxPoolConfig, RPCGasCap: 25000000, @@ -83,13 +95,11 @@ func NewDefaultConfig() Config { func NewDefaultSgbConfig() Config { return Config{ NetworkId: 1, - LightPeers: 100, - UltraLightFraction: 75, - DatabaseCache: 512, - TrieCleanCache: 256, + TrieCleanCache: 512, TrieDirtyCache: 256, TrieDirtyCommitTarget: 20, - SnapshotCache: 128, + SnapshotCache: 256, + AcceptedCacheSize: 32, Miner: miner.Config{}, TxPool: core.DefaultTxPoolConfig, RPCGasCap: 25000000, @@ -110,10 +120,6 @@ type Config struct { // Protocol options NetworkId uint64 // Network ID to use for selecting peers to connect to - // This can be set to list of enrtree:// URLs which will be queried for - // for nodes to connect to. - DiscoveryURLs []string - Pruning bool // Whether to disable pruning and flush everything to disk AcceptorQueueLimit int // Maximum blocks to queue before blocking during acceptance CommitInterval uint64 // If pruning is enabled, specified the interval at which to commit an entire trie to disk. @@ -125,32 +131,21 @@ type Config struct { SnapshotVerify bool // Whether to verify generated snapshots SkipSnapshotRebuild bool // Whether to skip rebuilding the snapshot in favor of returning an error (only set to true for tests) - // Light client options - LightServ int `toml:",omitempty"` // Maximum percentage of time allowed for serving LES requests - LightIngress int `toml:",omitempty"` // Incoming bandwidth limit for light servers - LightEgress int `toml:",omitempty"` // Outgoing bandwidth limit for light servers - LightPeers int `toml:",omitempty"` // Maximum number of LES client peers - LightNoPrune bool `toml:",omitempty"` // Whether to disable light chain pruning - - // Ultra Light client options - UltraLightServers []string `toml:",omitempty"` // List of trusted ultra light servers - UltraLightFraction int `toml:",omitempty"` // Percentage of trusted servers to accept an announcement - UltraLightOnlyAnnounce bool `toml:",omitempty"` // Whether to only announce headers, or also serve them - // Database options SkipBcVersionCheck bool `toml:"-"` - DatabaseHandles int `toml:"-"` - DatabaseCache int - // DatabaseFreezer string + // TrieDB and snapshot options TrieCleanCache int + TrieCleanJournal string + TrieCleanRejournal time.Duration TrieDirtyCache int TrieDirtyCommitTarget int SnapshotCache int Preimages bool - // This is the number of blocks for which logs will be cached in the filter system. - FilterLogCacheSize int + // AcceptedCacheSize is the depth of accepted headers cache and accepted + // logs cache at the accepted tip. + AcceptedCacheSize int // Mining options Miner miner.Config @@ -164,9 +159,6 @@ type Config struct { // Enables tracking of SHA3 preimages in the VM EnablePreimageRecording bool - // Miscellaneous options - DocRoot string `toml:"-"` - // RPCGasCap is the global gas cap for eth-call variants. RPCGasCap uint64 `toml:",omitempty"` @@ -184,10 +176,25 @@ type Config struct { // Unprotected transactions are transactions that are signed without EIP-155 // replay protection. AllowUnprotectedTxs bool + // AllowUnprotectedTxHashes provides a list of transaction hashes, which will be allowed + // to be issued without replay protection over the API even if AllowUnprotectedTxs is false. + AllowUnprotectedTxHashes []common.Hash // OfflinePruning enables offline pruning on startup of the node. If a node is started // with this configuration option, it must finish pruning before resuming normal operation. OfflinePruning bool OfflinePruningBloomFilterSize uint64 OfflinePruningDataDirectory string + + // SkipUpgradeCheck disables checking that upgrades must take place before the last + // accepted block. Skipping this check is useful when a node operator does not update + // their node before the network upgrade and their node accepts blocks that have + // identical state with the pre-upgrade ruleset. + SkipUpgradeCheck bool + + // TxLookupLimit is the maximum number of blocks from head whose tx indices + // are reserved: + // * 0: means no limit + // * N: means N block limit [HEAD-N+1, HEAD] and delete extra indexes + TxLookupLimit uint64 } diff --git a/coreth/eth/filters/bench_test.go b/coreth/eth/filters/bench_test.go new file mode 100644 index 00000000..7100e9fc --- /dev/null +++ b/coreth/eth/filters/bench_test.go @@ -0,0 +1,201 @@ +// (c) 2019-2022, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package filters + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/ava-labs/coreth/core/bloombits" + "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/ethdb" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/bitutil" + "github.com/stretchr/testify/require" +) + +func BenchmarkBloomBits512(b *testing.B) { + benchmarkBloomBits(b, 512) +} + +func BenchmarkBloomBits1k(b *testing.B) { + benchmarkBloomBits(b, 1024) +} + +func BenchmarkBloomBits2k(b *testing.B) { + benchmarkBloomBits(b, 2048) +} + +func BenchmarkBloomBits4k(b *testing.B) { + benchmarkBloomBits(b, 4096) +} + +func BenchmarkBloomBits8k(b *testing.B) { + benchmarkBloomBits(b, 8192) +} + +func BenchmarkBloomBits16k(b *testing.B) { + benchmarkBloomBits(b, 16384) +} + +func BenchmarkBloomBits32k(b *testing.B) { + benchmarkBloomBits(b, 32768) +} + +const benchFilterCnt = 2000 + +func benchmarkBloomBits(b *testing.B, sectionSize uint64) { + b.Skip("test disabled: this tests presume (and modify) an existing datadir.") + benchDataDir := b.TempDir() + "/coreth/chaindata" + b.Log("Running bloombits benchmark section size:", sectionSize) + + db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false) + if err != nil { + b.Fatalf("error opening database at %v: %v", benchDataDir, err) + } + head := rawdb.ReadHeadBlockHash(db) + if head == (common.Hash{}) { + b.Fatalf("chain data not found at %v", benchDataDir) + } + + clearBloomBits(db) + b.Log("Generating bloombits data...") + headNum := rawdb.ReadHeaderNumber(db, head) + if headNum == nil || *headNum < sectionSize+512 { + b.Fatalf("not enough blocks for running a benchmark") + } + + start := time.Now() + cnt := (*headNum - 512) / sectionSize + var dataSize, compSize uint64 + for sectionIdx := uint64(0); sectionIdx < cnt; sectionIdx++ { + bc, err := bloombits.NewGenerator(uint(sectionSize)) + if err != nil { + b.Fatalf("failed to create generator: %v", err) + } + var header *types.Header + for i := sectionIdx * sectionSize; i < (sectionIdx+1)*sectionSize; i++ { + hash := rawdb.ReadCanonicalHash(db, i) + if header = rawdb.ReadHeader(db, hash, i); header == nil { + b.Fatalf("Error creating bloomBits data") + return + } + bc.AddBloom(uint(i-sectionIdx*sectionSize), header.Bloom) + } + sectionHead := rawdb.ReadCanonicalHash(db, (sectionIdx+1)*sectionSize-1) + for i := 0; i < types.BloomBitLength; i++ { + data, err := bc.Bitset(uint(i)) + if err != nil { + b.Fatalf("failed to retrieve bitset: %v", err) + } + comp := bitutil.CompressBytes(data) + dataSize += uint64(len(data)) + compSize += uint64(len(comp)) + rawdb.WriteBloomBits(db, uint(i), sectionIdx, sectionHead, comp) + } + //if sectionIdx%50 == 0 { + // b.Log(" section", sectionIdx, "/", cnt) + //} + } + + d := time.Since(start) + b.Log("Finished generating bloombits data") + b.Log(" ", d, "total ", d/time.Duration(cnt*sectionSize), "per block") + b.Log(" data size:", dataSize, " compressed size:", compSize, " compression ratio:", float64(compSize)/float64(dataSize)) + + b.Log("Running filter benchmarks...") + start = time.Now() + + var ( + backend *testBackend + sys *FilterSystem + ) + for i := 0; i < benchFilterCnt; i++ { + if i%20 == 0 { + db.Close() + db, _ = rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false) + backend = &testBackend{db: db, sections: cnt} + sys = NewFilterSystem(backend, Config{}) + } + var addr common.Address + addr[0] = byte(i) + addr[1] = byte(i / 256) + filter, err := sys.NewRangeFilter(0, int64(cnt*sectionSize-1), []common.Address{addr}, nil) + require.NoError(b, err) + if _, err := filter.Logs(context.Background()); err != nil { + b.Error("filter.Logs error:", err) + } + } + + d = time.Since(start) + b.Log("Finished running filter benchmarks") + b.Log(" ", d, "total ", d/time.Duration(benchFilterCnt), "per address", d*time.Duration(1000000)/time.Duration(benchFilterCnt*cnt*sectionSize), "per million blocks") + db.Close() +} + +//nolint:unused +func clearBloomBits(db ethdb.Database) { + var bloomBitsPrefix = []byte("bloomBits-") + fmt.Println("Clearing bloombits data...") + it := db.NewIterator(bloomBitsPrefix, nil) + for it.Next() { + db.Delete(it.Key()) + } + it.Release() +} + +func BenchmarkNoBloomBits(b *testing.B) { + b.Skip("test disabled: this tests presume (and modify) an existing datadir.") + benchDataDir := b.TempDir() + "/coreth/chaindata" + b.Log("Running benchmark without bloombits") + db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false) + if err != nil { + b.Fatalf("error opening database at %v: %v", benchDataDir, err) + } + head := rawdb.ReadHeadBlockHash(db) + if head == (common.Hash{}) { + b.Fatalf("chain data not found at %v", benchDataDir) + } + headNum := rawdb.ReadHeaderNumber(db, head) + + clearBloomBits(db) + + _, sys := newTestFilterSystem(b, db, Config{}) + + b.Log("Running filter benchmarks...") + start := time.Now() + filter, err := sys.NewRangeFilter(0, int64(*headNum), []common.Address{{}}, nil) + require.NoError(b, err) + filter.Logs(context.Background()) + d := time.Since(start) + b.Log("Finished running filter benchmarks") + b.Log(" ", d, "total ", d*time.Duration(1000000)/time.Duration(*headNum+1), "per million blocks") + db.Close() +} diff --git a/coreth/eth/filters/filter.go b/coreth/eth/filters/filter.go index 02b0d57e..17c7b104 100644 --- a/coreth/eth/filters/filter.go +++ b/coreth/eth/filters/filter.go @@ -45,8 +45,8 @@ type Filter struct { addresses []common.Address topics [][]common.Hash - block common.Hash // Block hash if filtering a single block - begin, end int64 // Range interval if filtering multiple blocks + block *common.Hash // Block hash if filtering a single block + begin, end int64 // Range interval if filtering multiple blocks matcher *bloombits.Matcher } @@ -102,7 +102,7 @@ func (sys *FilterSystem) NewRangeFilter(begin, end int64, addresses []common.Add func (sys *FilterSystem) NewBlockFilter(block common.Hash, addresses []common.Address, topics [][]common.Hash) *Filter { // Create a generic filter and convert it into a block filter filter := newFilter(sys, addresses, topics) - filter.block = block + filter.block = &block return filter } @@ -120,8 +120,8 @@ func newFilter(sys *FilterSystem, addresses []common.Address, topics [][]common. // first block that contains matches, updating the start of the filter accordingly. func (f *Filter) Logs(ctx context.Context) ([]*types.Log, error) { // If we're doing singleton block filtering, execute and return - if f.block != (common.Hash{}) { - header, err := f.sys.backend.HeaderByHash(ctx, f.block) + if f.block != nil { + header, err := f.sys.backend.HeaderByHash(ctx, *f.block) if err != nil { return nil, err } @@ -264,11 +264,11 @@ func (f *Filter) unindexedLogs(ctx context.Context, end uint64) ([]*types.Log, e func (f *Filter) blockLogs(ctx context.Context, header *types.Header, skipBloom bool) ([]*types.Log, error) { // Fast track: no filtering criteria if len(f.addresses) == 0 && len(f.topics) == 0 { - list, err := f.sys.cachedGetLogs(ctx, header.Hash(), header.Number.Uint64()) + list, err := f.sys.getLogs(ctx, header.Hash(), header.Number.Uint64()) if err != nil { return nil, err } - return flatten(list), nil + return types.FlattenLogs(list), nil } else if skipBloom || bloomFilter(header.Bloom, f.addresses, f.topics) { return f.checkMatches(ctx, header) } @@ -278,12 +278,12 @@ func (f *Filter) blockLogs(ctx context.Context, header *types.Header, skipBloom // checkMatches checks if the receipts belonging to the given header contain any log events that // match the filter criteria. This function is called when the bloom filter signals a potential match. func (f *Filter) checkMatches(ctx context.Context, header *types.Header) ([]*types.Log, error) { - logsList, err := f.sys.cachedGetLogs(ctx, header.Hash(), header.Number.Uint64()) + logsList, err := f.sys.getLogs(ctx, header.Hash(), header.Number.Uint64()) if err != nil { return nil, err } - unfiltered := flatten(logsList) + unfiltered := types.FlattenLogs(logsList) logs := filterLogs(unfiltered, nil, nil, f.addresses, f.topics) if len(logs) > 0 { // We have matching logs, check if we need to resolve full logs via the light client @@ -377,11 +377,3 @@ func bloomFilter(bloom types.Bloom, addresses []common.Address, topics [][]commo } return true } - -func flatten(list [][]*types.Log) []*types.Log { - var flat []*types.Log - for _, logs := range list { - flat = append(flat, logs...) - } - return flat -} diff --git a/coreth/eth/filters/filter_system.go b/coreth/eth/filters/filter_system.go index 50191198..1b5e33fc 100644 --- a/coreth/eth/filters/filter_system.go +++ b/coreth/eth/filters/filter_system.go @@ -45,22 +45,17 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" - lru "github.com/hashicorp/golang-lru" ) // Config represents the configuration of the filter system. type Config struct { - LogCacheSize int // maximum number of cached blocks (default: 32) - Timeout time.Duration // how long filters stay active (default: 5min) + Timeout time.Duration // how long filters stay active (default: 5min) } func (cfg Config) withDefaults() Config { if cfg.Timeout == 0 { cfg.Timeout = 5 * time.Minute } - if cfg.LogCacheSize == 0 { - cfg.LogCacheSize = 32 - } return cfg } @@ -93,33 +88,22 @@ type Backend interface { // FilterSystem holds resources shared by all filters. type FilterSystem struct { - backend Backend - logsCache *lru.Cache - cfg *Config + backend Backend + cfg *Config } // NewFilterSystem creates a filter system. func NewFilterSystem(backend Backend, config Config) *FilterSystem { config = config.withDefaults() - - cache, err := lru.New(config.LogCacheSize) - if err != nil { - panic(err) - } return &FilterSystem{ - backend: backend, - logsCache: cache, - cfg: &config, + backend: backend, + cfg: &config, } } -// cachedGetLogs loads block logs from the backend and caches the result. -func (sys *FilterSystem) cachedGetLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) { - cached, ok := sys.logsCache.Get(blockHash) - if ok { - return cached.([][]*types.Log), nil - } - +// getLogs loads block logs from the backend. The backend is responsible for +// performing any log caching. +func (sys *FilterSystem) getLogs(ctx context.Context, blockHash common.Hash, number uint64) ([][]*types.Log, error) { logs, err := sys.backend.GetLogs(ctx, blockHash, number) if err != nil { return nil, err @@ -127,7 +111,6 @@ func (sys *FilterSystem) cachedGetLogs(ctx context.Context, blockHash common.Has if logs == nil { return nil, fmt.Errorf("failed to get logs for block #%d (0x%s)", number, blockHash.TerminalString()) } - sys.logsCache.Add(blockHash, logs) return logs, nil } @@ -626,7 +609,7 @@ func (es *EventSystem) lightFilterLogs(header *types.Header, addresses []common. // Get the logs of the block ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() - logsList, err := es.sys.cachedGetLogs(ctx, header.Hash(), header.Number.Uint64()) + logsList, err := es.sys.getLogs(ctx, header.Hash(), header.Number.Uint64()) if err != nil { return nil } diff --git a/coreth/eth/filters/filter_system_test.go b/coreth/eth/filters/filter_system_test.go new file mode 100644 index 00000000..5b708e77 --- /dev/null +++ b/coreth/eth/filters/filter_system_test.go @@ -0,0 +1,783 @@ +// (c) 2019-2022, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package filters + +import ( + "context" + "fmt" + "math/big" + "math/rand" + "reflect" + "runtime" + "testing" + "time" + + "github.com/ava-labs/coreth/consensus/dummy" + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/bloombits" + "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/core/vm" + "github.com/ava-labs/coreth/ethdb" + "github.com/ava-labs/coreth/interfaces" + "github.com/ava-labs/coreth/params" + "github.com/ava-labs/coreth/rpc" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/event" + "github.com/stretchr/testify/require" +) + +type testBackend struct { + db ethdb.Database + sections uint64 + txFeed event.Feed + acceptedTxFeed event.Feed + logsFeed event.Feed + rmLogsFeed event.Feed + pendingLogsFeed event.Feed + chainFeed event.Feed +} + +func (b *testBackend) ChainDb() ethdb.Database { + return b.db +} + +func (b *testBackend) GetVMConfig() *vm.Config { + return &vm.Config{AllowUnfinalizedQueries: true} +} + +func (b *testBackend) GetMaxBlocksPerRequest() int64 { + return 0 +} + +func (b *testBackend) LastAcceptedBlock() *types.Block { + return rawdb.ReadHeadBlock(b.db) +} + +func (b *testBackend) HeaderByNumber(ctx context.Context, blockNr rpc.BlockNumber) (*types.Header, error) { + var ( + hash common.Hash + num uint64 + ) + if blockNr == rpc.LatestBlockNumber { + hash = rawdb.ReadHeadBlockHash(b.db) + number := rawdb.ReadHeaderNumber(b.db, hash) + if number == nil { + return nil, nil + } + num = *number + } else { + num = uint64(blockNr) + hash = rawdb.ReadCanonicalHash(b.db, num) + } + return rawdb.ReadHeader(b.db, hash, num), nil +} + +func (b *testBackend) HeaderByHash(ctx context.Context, hash common.Hash) (*types.Header, error) { + number := rawdb.ReadHeaderNumber(b.db, hash) + if number == nil { + return nil, nil + } + return rawdb.ReadHeader(b.db, hash, *number), nil +} + +func (b *testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) { + if number := rawdb.ReadHeaderNumber(b.db, hash); number != nil { + return rawdb.ReadReceipts(b.db, hash, *number, params.TestChainConfig), nil + } + return nil, nil +} + +func (b *testBackend) GetLogs(ctx context.Context, hash common.Hash, number uint64) ([][]*types.Log, error) { + logs := rawdb.ReadLogs(b.db, hash, number) + return logs, nil +} + +func (b *testBackend) SubscribeNewTxsEvent(ch chan<- core.NewTxsEvent) event.Subscription { + return b.txFeed.Subscribe(ch) +} + +func (b *testBackend) SubscribeRemovedLogsEvent(ch chan<- core.RemovedLogsEvent) event.Subscription { + return b.rmLogsFeed.Subscribe(ch) +} + +func (b *testBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscription { + return b.logsFeed.Subscribe(ch) +} + +func (b *testBackend) SubscribePendingLogsEvent(ch chan<- []*types.Log) event.Subscription { + return b.pendingLogsFeed.Subscribe(ch) +} + +func (b *testBackend) SubscribeAcceptedLogsEvent(ch chan<- []*types.Log) event.Subscription { + return b.logsFeed.Subscribe(ch) +} + +func (b *testBackend) SubscribeAcceptedTransactionEvent(ch chan<- core.NewTxsEvent) event.Subscription { + return b.acceptedTxFeed.Subscribe(ch) +} + +func (b *testBackend) SubscribeChainEvent(ch chan<- core.ChainEvent) event.Subscription { + return b.chainFeed.Subscribe(ch) +} + +func (b *testBackend) SubscribeChainAcceptedEvent(ch chan<- core.ChainEvent) event.Subscription { + return b.chainFeed.Subscribe(ch) +} + +func (b *testBackend) BloomStatus() (uint64, uint64) { + return params.BloomBitsBlocks, b.sections +} + +func (b *testBackend) ServiceFilter(ctx context.Context, session *bloombits.MatcherSession) { + requests := make(chan chan *bloombits.Retrieval) + + go session.Multiplex(16, 0, requests) + go func() { + for { + // Wait for a service request or a shutdown + select { + case <-ctx.Done(): + return + + case request := <-requests: + task := <-request + + task.Bitsets = make([][]byte, len(task.Sections)) + for i, section := range task.Sections { + if rand.Int()%4 != 0 { // Handle occasional missing deliveries + head := rawdb.ReadCanonicalHash(b.db, (section+1)*params.BloomBitsBlocks-1) + task.Bitsets[i], _ = rawdb.ReadBloomBits(b.db, task.Bit, section, head) + } + } + request <- task + } + } + }() +} + +func newTestFilterSystem(t testing.TB, db ethdb.Database, cfg Config) (*testBackend, *FilterSystem) { + backend := &testBackend{db: db} + sys := NewFilterSystem(backend, cfg) + return backend, sys +} + +func newSectionedTestFilterSystem(t testing.TB, db ethdb.Database, cfg Config, sections uint64) (*testBackend, *FilterSystem) { + backend := &testBackend{db: db, sections: sections} + sys := NewFilterSystem(backend, cfg) + return backend, sys +} + +func TestBlockSubscription(t *testing.T) { + t.Parallel() + + var ( + db = rawdb.NewMemoryDatabase() + backend, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys, false) + genesis = &core.Genesis{ + Config: params.TestChainConfig, + BaseFee: big.NewInt(params.ApricotPhase4MinBaseFee), + } + _, chain, _, _ = core.GenerateChainWithGenesis(genesis, dummy.NewFaker(), 10, 10, func(i int, b *core.BlockGen) {}) + chainEvents = []core.ChainEvent{} + ) + + for _, blk := range chain { + chainEvents = append(chainEvents, core.ChainEvent{Hash: blk.Hash(), Block: blk}) + } + + chan0 := make(chan *types.Header) + sub0 := api.events.SubscribeNewHeads(chan0) + chan1 := make(chan *types.Header) + sub1 := api.events.SubscribeNewHeads(chan1) + + go func() { // simulate client + i1, i2 := 0, 0 + for i1 != len(chainEvents) || i2 != len(chainEvents) { + select { + case header := <-chan0: + if chainEvents[i1].Hash != header.Hash() { + t.Errorf("sub0 received invalid hash on index %d, want %x, got %x", i1, chainEvents[i1].Hash, header.Hash()) + } + i1++ + case header := <-chan1: + if chainEvents[i2].Hash != header.Hash() { + t.Errorf("sub1 received invalid hash on index %d, want %x, got %x", i2, chainEvents[i2].Hash, header.Hash()) + } + i2++ + } + } + + sub0.Unsubscribe() + sub1.Unsubscribe() + }() + + time.Sleep(1 * time.Second) + for _, e := range chainEvents { + backend.chainFeed.Send(e) + } + + <-sub0.Err() + <-sub1.Err() +} + +// TestPendingTxFilter tests whether pending tx filters retrieve all pending transactions that are posted to the event mux. +func TestPendingTxFilter(t *testing.T) { + t.Parallel() + + var ( + db = rawdb.NewMemoryDatabase() + backend, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys, false) + + transactions = []*types.Transaction{ + types.NewTransaction(0, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), + types.NewTransaction(1, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), + types.NewTransaction(2, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), + types.NewTransaction(3, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), + types.NewTransaction(4, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil), + } + + hashes []common.Hash + ) + + fid0 := api.NewPendingTransactionFilter() + + time.Sleep(1 * time.Second) + backend.txFeed.Send(core.NewTxsEvent{Txs: transactions}) + + timeout := time.Now().Add(1 * time.Second) + for { + results, err := api.GetFilterChanges(fid0) + if err != nil { + t.Fatalf("Unable to retrieve logs: %v", err) + } + + h := results.([]common.Hash) + hashes = append(hashes, h...) + if len(hashes) >= len(transactions) { + break + } + // check timeout + if time.Now().After(timeout) { + break + } + + time.Sleep(100 * time.Millisecond) + } + + if len(hashes) != len(transactions) { + t.Errorf("invalid number of transactions, want %d transactions(s), got %d", len(transactions), len(hashes)) + return + } + for i := range hashes { + if hashes[i] != transactions[i].Hash() { + t.Errorf("hashes[%d] invalid, want %x, got %x", i, transactions[i].Hash(), hashes[i]) + } + } +} + +// TestLogFilterCreation test whether a given filter criteria makes sense. +// If not it must return an error. +func TestLogFilterCreation(t *testing.T) { + var ( + db = rawdb.NewMemoryDatabase() + _, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys, false) + + testCases = []struct { + crit FilterCriteria + success bool + }{ + // defaults + {FilterCriteria{}, true}, + // valid block number range + {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)}, true}, + // "mined" block range to pending + {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, true}, + // new mined and pending blocks + {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, true}, + // from block "higher" than to block + {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(1)}, false}, + // from block "higher" than to block + {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false}, + // from block "higher" than to block + {FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, false}, + // from block "higher" than to block + {FilterCriteria{FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, false}, + } + ) + + for i, test := range testCases { + id, err := api.NewFilter(test.crit) + if err != nil && test.success { + t.Errorf("expected filter creation for case %d to success, got %v", i, err) + } + if err == nil { + api.UninstallFilter(id) + if !test.success { + t.Errorf("expected testcase %d to fail with an error", i) + } + } + } +} + +// TestInvalidLogFilterCreation tests whether invalid filter log criteria results in an error +// when the filter is created. +func TestInvalidLogFilterCreation(t *testing.T) { + t.Parallel() + + var ( + db = rawdb.NewMemoryDatabase() + _, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys, false) + ) + + // different situations where log filter creation should fail. + // Reason: fromBlock > toBlock + testCases := []FilterCriteria{ + 0: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, + 1: {FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(100)}, + 2: {FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(100)}, + } + + for i, test := range testCases { + if _, err := api.NewFilter(test); err == nil { + t.Errorf("Expected NewFilter for case #%d to fail", i) + } + } +} + +func TestInvalidGetLogsRequest(t *testing.T) { + var ( + db = rawdb.NewMemoryDatabase() + _, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys, false) + blockHash = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") + ) + + // Reason: Cannot specify both BlockHash and FromBlock/ToBlock) + testCases := []FilterCriteria{ + 0: {BlockHash: &blockHash, FromBlock: big.NewInt(100)}, + 1: {BlockHash: &blockHash, ToBlock: big.NewInt(500)}, + 2: {BlockHash: &blockHash, FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, + } + + for i, test := range testCases { + if _, err := api.GetLogs(context.Background(), test); err == nil { + t.Errorf("Expected Logs for case #%d to fail", i) + } + } +} + +// TestLogFilter tests whether log filters match the correct logs that are posted to the event feed. +func TestLogFilter(t *testing.T) { + t.Parallel() + + var ( + db = rawdb.NewMemoryDatabase() + backend, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys, false) + + firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") + secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") + thirdAddress = common.HexToAddress("0x3333333333333333333333333333333333333333") + notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999") + firstTopic = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") + secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") + notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999") + + // posted twice, once as regular logs and once as pending logs. + allLogs = []*types.Log{ + {Address: firstAddr}, + {Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}, + {Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}, + {Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 2}, + {Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}, + } + + expectedCase7 = []*types.Log{allLogs[3], allLogs[4], allLogs[0], allLogs[1], allLogs[2], allLogs[3], allLogs[4]} + expectedCase11 = []*types.Log{allLogs[1], allLogs[2], allLogs[1], allLogs[2]} + + testCases = []struct { + crit FilterCriteria + expected []*types.Log + id rpc.ID + }{ + // match all + 0: {FilterCriteria{}, allLogs, ""}, + // match none due to no matching addresses + 1: {FilterCriteria{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}}, []*types.Log{}, ""}, + // match logs based on addresses, ignore topics + 2: {FilterCriteria{Addresses: []common.Address{firstAddr}}, allLogs[:2], ""}, + // match none due to no matching topics (match with address) + 3: {FilterCriteria{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}}, []*types.Log{}, ""}, + // match logs based on addresses and topics + 4: {FilterCriteria{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[3:5], ""}, + // match logs based on multiple addresses and "or" topics + 5: {FilterCriteria{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}}, allLogs[2:5], ""}, + // logs in the pending block + 6: {FilterCriteria{Addresses: []common.Address{firstAddr}, FromBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, allLogs[:2], ""}, + // mined logs with block num >= 2 or pending logs + 7: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, expectedCase7, ""}, + // all "mined" logs with block num >= 2 + 8: {FilterCriteria{FromBlock: big.NewInt(2), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs[3:], ""}, + // all "mined" logs + 9: {FilterCriteria{ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, allLogs, ""}, + // all "mined" logs with 1>= block num <=2 and topic secondTopic + 10: {FilterCriteria{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2), Topics: [][]common.Hash{{secondTopic}}}, allLogs[3:4], ""}, + // all "mined" and pending logs with topic firstTopic + 11: {FilterCriteria{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64()), Topics: [][]common.Hash{{firstTopic}}}, expectedCase11, ""}, + // match all logs due to wildcard topic + 12: {FilterCriteria{Topics: [][]common.Hash{nil}}, allLogs[1:], ""}, + } + ) + + // create all filters + for i := range testCases { + testCases[i].id, _ = api.NewFilter(testCases[i].crit) + } + + // raise events + time.Sleep(1 * time.Second) + if nsend := backend.logsFeed.Send(allLogs); nsend == 0 { + t.Fatal("Logs event not delivered") + } + if nsend := backend.pendingLogsFeed.Send(allLogs); nsend == 0 { + t.Fatal("Pending logs event not delivered") + } + + for i, tt := range testCases { + var fetched []*types.Log + timeout := time.Now().Add(1 * time.Second) + for { // fetch all expected logs + results, err := api.GetFilterChanges(tt.id) + if err != nil { + t.Fatalf("Unable to fetch logs: %v", err) + } + + fetched = append(fetched, results.([]*types.Log)...) + if len(fetched) >= len(tt.expected) { + break + } + // check timeout + if time.Now().After(timeout) { + break + } + + time.Sleep(100 * time.Millisecond) + } + + if len(fetched) != len(tt.expected) { + t.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)) + return + } + + for l := range fetched { + if fetched[l].Removed { + t.Errorf("expected log not to be removed for log %d in case %d", l, i) + } + if !reflect.DeepEqual(fetched[l], tt.expected[l]) { + t.Errorf("invalid log on index %d for case %d", l, i) + } + } + } +} + +// TestPendingLogsSubscription tests if a subscription receives the correct pending logs that are posted to the event feed. +func TestPendingLogsSubscription(t *testing.T) { + t.Parallel() + + var ( + db = rawdb.NewMemoryDatabase() + backend, sys = newTestFilterSystem(t, db, Config{}) + api = NewFilterAPI(sys, false) + + firstAddr = common.HexToAddress("0x1111111111111111111111111111111111111111") + secondAddr = common.HexToAddress("0x2222222222222222222222222222222222222222") + thirdAddress = common.HexToAddress("0x3333333333333333333333333333333333333333") + notUsedAddress = common.HexToAddress("0x9999999999999999999999999999999999999999") + firstTopic = common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111") + secondTopic = common.HexToHash("0x2222222222222222222222222222222222222222222222222222222222222222") + thirdTopic = common.HexToHash("0x3333333333333333333333333333333333333333333333333333333333333333") + fourthTopic = common.HexToHash("0x4444444444444444444444444444444444444444444444444444444444444444") + notUsedTopic = common.HexToHash("0x9999999999999999999999999999999999999999999999999999999999999999") + + allLogs = [][]*types.Log{ + {{Address: firstAddr, Topics: []common.Hash{}, BlockNumber: 0}}, + {{Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 1}}, + {{Address: secondAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 2}}, + {{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 3}}, + {{Address: thirdAddress, Topics: []common.Hash{secondTopic}, BlockNumber: 4}}, + { + {Address: thirdAddress, Topics: []common.Hash{firstTopic}, BlockNumber: 5}, + {Address: thirdAddress, Topics: []common.Hash{thirdTopic}, BlockNumber: 5}, + {Address: thirdAddress, Topics: []common.Hash{fourthTopic}, BlockNumber: 5}, + {Address: firstAddr, Topics: []common.Hash{firstTopic}, BlockNumber: 5}, + }, + } + + pendingBlockNumber = big.NewInt(rpc.PendingBlockNumber.Int64()) + + testCases = []struct { + crit interfaces.FilterQuery + expected []*types.Log + c chan []*types.Log + sub *Subscription + err chan error + }{ + // match all + { + interfaces.FilterQuery{FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, + flattenLogs(allLogs), + nil, nil, nil, + }, + // match none due to no matching addresses + { + interfaces.FilterQuery{Addresses: []common.Address{{}, notUsedAddress}, Topics: [][]common.Hash{nil}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, + nil, + nil, nil, nil, + }, + // match logs based on addresses, ignore topics + { + interfaces.FilterQuery{Addresses: []common.Address{firstAddr}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, + append(flattenLogs(allLogs[:2]), allLogs[5][3]), + nil, nil, nil, + }, + // match none due to no matching topics (match with address) + { + interfaces.FilterQuery{Addresses: []common.Address{secondAddr}, Topics: [][]common.Hash{{notUsedTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, + nil, + nil, nil, nil, + }, + // match logs based on addresses and topics + { + interfaces.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, + append(flattenLogs(allLogs[3:5]), allLogs[5][0]), + nil, nil, nil, + }, + // match logs based on multiple addresses and "or" topics + { + interfaces.FilterQuery{Addresses: []common.Address{secondAddr, thirdAddress}, Topics: [][]common.Hash{{firstTopic, secondTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, + append(flattenLogs(allLogs[2:5]), allLogs[5][0]), + nil, nil, nil, + }, + // multiple pending logs, should match only 2 topics from the logs in block 5 + { + interfaces.FilterQuery{Addresses: []common.Address{thirdAddress}, Topics: [][]common.Hash{{firstTopic, fourthTopic}}, FromBlock: pendingBlockNumber, ToBlock: pendingBlockNumber}, + []*types.Log{allLogs[5][0], allLogs[5][2]}, + nil, nil, nil, + }, + // match none due to only matching new mined logs + { + interfaces.FilterQuery{}, + nil, + nil, nil, nil, + }, + // match none due to only matching mined logs within a specific block range + { + interfaces.FilterQuery{FromBlock: big.NewInt(1), ToBlock: big.NewInt(2)}, + nil, + nil, nil, nil, + }, + // match all due to matching mined and pending logs + { + interfaces.FilterQuery{FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64()), ToBlock: big.NewInt(rpc.PendingBlockNumber.Int64())}, + flattenLogs(allLogs), + nil, nil, nil, + }, + // match none due to matching logs from a specific block number to new mined blocks + { + interfaces.FilterQuery{FromBlock: big.NewInt(1), ToBlock: big.NewInt(rpc.LatestBlockNumber.Int64())}, + nil, + nil, nil, nil, + }, + } + ) + + // create all subscriptions, this ensures all subscriptions are created before the events are posted. + // on slow machines this could otherwise lead to missing events when the subscription is created after + // (some) events are posted. + for i := range testCases { + testCases[i].c = make(chan []*types.Log) + testCases[i].err = make(chan error, 1) + + var err error + testCases[i].sub, err = api.events.SubscribeLogs(testCases[i].crit, testCases[i].c) + if err != nil { + t.Fatalf("SubscribeLogs %d failed: %v\n", i, err) + } + } + + for n, test := range testCases { + i := n + tt := test + go func() { + defer tt.sub.Unsubscribe() + + var fetched []*types.Log + + timeout := time.After(1 * time.Second) + fetchLoop: + for { + select { + case logs := <-tt.c: + // Do not break early if we've fetched greater, or equal, + // to the number of logs expected. This ensures we do not + // deadlock the filter system because it will do a blocking + // send on this channel if another log arrives. + fetched = append(fetched, logs...) + case <-timeout: + break fetchLoop + } + } + + if len(fetched) != len(tt.expected) { + tt.err <- fmt.Errorf("invalid number of logs for case %d, want %d log(s), got %d", i, len(tt.expected), len(fetched)) + return + } + + for l := range fetched { + if fetched[l].Removed { + tt.err <- fmt.Errorf("expected log not to be removed for log %d in case %d", l, i) + return + } + if !reflect.DeepEqual(fetched[l], tt.expected[l]) { + tt.err <- fmt.Errorf("invalid log on index %d for case %d\n", l, i) + return + } + } + tt.err <- nil + }() + } + + // raise events + for _, ev := range allLogs { + backend.pendingLogsFeed.Send(ev) + } + + for i := range testCases { + err := <-testCases[i].err + if err != nil { + t.Fatalf("test %d failed: %v", i, err) + } + <-testCases[i].sub.Err() + } +} + +// TestPendingTxFilterDeadlock tests if the event loop hangs when pending +// txes arrive at the same time that one of multiple filters is timing out. +// Please refer to #22131 for more details. +func TestPendingTxFilterDeadlock(t *testing.T) { + t.Parallel() + timeout := 100 * time.Millisecond + + var ( + db = rawdb.NewMemoryDatabase() + backend, sys = newTestFilterSystem(t, db, Config{Timeout: timeout}) + api = NewFilterAPI(sys, false) + done = make(chan struct{}) + ) + + go func() { + // Bombard feed with txes until signal was received to stop + i := uint64(0) + for { + select { + case <-done: + return + default: + } + + tx := types.NewTransaction(i, common.HexToAddress("0xb794f5ea0ba39494ce83a213fffba74279579268"), new(big.Int), 0, new(big.Int), nil) + backend.txFeed.Send(core.NewTxsEvent{Txs: []*types.Transaction{tx}}) + i++ + } + }() + + // Create a bunch of filters that will + // timeout either in 100ms or 200ms + fids := make([]rpc.ID, 20) + for i := 0; i < len(fids); i++ { + fid := api.NewPendingTransactionFilter() + fids[i] = fid + // Wait for at least one tx to arrive in filter + for { + hashes, err := api.GetFilterChanges(fid) + if err != nil { + t.Fatalf("Filter should exist: %v\n", err) + } + if len(hashes.([]common.Hash)) > 0 { + break + } + runtime.Gosched() + } + } + + // Wait until filters have timed out + time.Sleep(3 * timeout) + + // If tx loop doesn't consume `done` after a second + // it's hanging. + select { + case done <- struct{}{}: + // Check that all filters have been uninstalled + for _, fid := range fids { + if _, err := api.GetFilterChanges(fid); err == nil { + t.Errorf("Filter %s should have been uninstalled\n", fid) + } + } + case <-time.After(1 * time.Second): + t.Error("Tx sending loop hangs") + } +} + +func flattenLogs(pl [][]*types.Log) []*types.Log { + var logs []*types.Log + for _, l := range pl { + logs = append(logs, l...) + } + return logs +} + +func TestGetLogsRegression(t *testing.T) { + var ( + db = rawdb.NewMemoryDatabase() + _, sys = newSectionedTestFilterSystem(t, db, Config{}, 4096) + api = NewFilterAPI(sys, false) + genesis = &core.Genesis{ + Config: params.TestChainConfig, + } + _, _, _, _ = core.GenerateChainWithGenesis(genesis, dummy.NewFaker(), 10, 10, func(i int, gen *core.BlockGen) {}) + ) + + test := FilterCriteria{BlockHash: &common.Hash{}, FromBlock: big.NewInt(rpc.LatestBlockNumber.Int64())} + + _, err := api.GetLogs(context.Background(), test) + require.Error(t, err, "unknown block") +} diff --git a/coreth/eth/filters/filter_test.go b/coreth/eth/filters/filter_test.go new file mode 100644 index 00000000..a49ecd45 --- /dev/null +++ b/coreth/eth/filters/filter_test.go @@ -0,0 +1,255 @@ +// (c) 2019-2022, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package filters + +import ( + "context" + "math/big" + "testing" + + "github.com/ava-labs/coreth/consensus/dummy" + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/rawdb" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/params" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/crypto" + "github.com/stretchr/testify/require" +) + +func makeReceipt(addr common.Address) *types.Receipt { + receipt := types.NewReceipt(nil, false, 0) + receipt.Logs = []*types.Log{ + {Address: addr}, + } + receipt.Bloom = types.CreateBloom(types.Receipts{receipt}) + return receipt +} + +func BenchmarkFilters(b *testing.B) { + var ( + db, _ = rawdb.NewLevelDBDatabase(b.TempDir(), 0, 0, "", false) + _, sys = newTestFilterSystem(b, db, Config{}) + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr1 = crypto.PubkeyToAddress(key1.PublicKey) + addr2 = common.BytesToAddress([]byte("jeff")) + addr3 = common.BytesToAddress([]byte("ethereum")) + addr4 = common.BytesToAddress([]byte("random addresses please")) + + gspec = &core.Genesis{ + Config: params.TestChainConfig, + Alloc: core.GenesisAlloc{addr1: {Balance: big.NewInt(1000000)}}, + BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), + } + ) + defer db.Close() + + _, chain, receipts, err := core.GenerateChainWithGenesis(gspec, dummy.NewFaker(), 100010, 10, func(i int, gen *core.BlockGen) { + switch i { + case 2403: + receipt := makeReceipt(addr1) + gen.AddUncheckedReceipt(receipt) + gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil)) + case 1034: + receipt := makeReceipt(addr2) + gen.AddUncheckedReceipt(receipt) + gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil)) + case 34: + receipt := makeReceipt(addr3) + gen.AddUncheckedReceipt(receipt) + gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil)) + case 99999: + receipt := makeReceipt(addr4) + gen.AddUncheckedReceipt(receipt) + gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil)) + } + }) + require.NoError(b, err) + // The test txs are not properly signed, can't simply create a chain + // and then import blocks. TODO(rjl493456442) try to get rid of the + // manual database writes. + gspec.MustCommit(db) + for i, block := range chain { + rawdb.WriteBlock(db, block) + rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64()) + rawdb.WriteHeadBlockHash(db, block.Hash()) + rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), receipts[i]) + } + b.ResetTimer() + + filter, err := sys.NewRangeFilter(0, -1, []common.Address{addr1, addr2, addr3, addr4}, nil) + require.NoError(b, err) + + for i := 0; i < b.N; i++ { + logs, _ := filter.Logs(context.Background()) + if len(logs) != 4 { + b.Fatal("expected 4 logs, got", len(logs)) + } + } +} + +func TestFilters(t *testing.T) { + var ( + db, _ = rawdb.NewLevelDBDatabase(t.TempDir(), 0, 0, "", false) + _, sys = newTestFilterSystem(t, db, Config{}) + key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + addr = crypto.PubkeyToAddress(key1.PublicKey) + + hash1 = common.BytesToHash([]byte("topic1")) + hash2 = common.BytesToHash([]byte("topic2")) + hash3 = common.BytesToHash([]byte("topic3")) + hash4 = common.BytesToHash([]byte("topic4")) + + gspec = &core.Genesis{ + Config: params.TestChainConfig, + Alloc: core.GenesisAlloc{addr: {Balance: big.NewInt(1000000)}}, + BaseFee: big.NewInt(params.ApricotPhase3InitialBaseFee), + } + ) + defer db.Close() + + _, chain, receipts, err := core.GenerateChainWithGenesis(gspec, dummy.NewFaker(), 1000, 10, func(i int, gen *core.BlockGen) { + switch i { + case 1: + receipt := types.NewReceipt(nil, false, 0) + receipt.Logs = []*types.Log{ + { + Address: addr, + Topics: []common.Hash{hash1}, + }, + } + gen.AddUncheckedReceipt(receipt) + gen.AddUncheckedTx(types.NewTransaction(1, common.HexToAddress("0x1"), big.NewInt(1), 1, gen.BaseFee(), nil)) + case 2: + receipt := types.NewReceipt(nil, false, 0) + receipt.Logs = []*types.Log{ + { + Address: addr, + Topics: []common.Hash{hash2}, + }, + } + gen.AddUncheckedReceipt(receipt) + gen.AddUncheckedTx(types.NewTransaction(2, common.HexToAddress("0x2"), big.NewInt(2), 2, gen.BaseFee(), nil)) + + case 998: + receipt := types.NewReceipt(nil, false, 0) + receipt.Logs = []*types.Log{ + { + Address: addr, + Topics: []common.Hash{hash3}, + }, + } + gen.AddUncheckedReceipt(receipt) + gen.AddUncheckedTx(types.NewTransaction(998, common.HexToAddress("0x998"), big.NewInt(998), 998, gen.BaseFee(), nil)) + case 999: + receipt := types.NewReceipt(nil, false, 0) + receipt.Logs = []*types.Log{ + { + Address: addr, + Topics: []common.Hash{hash4}, + }, + } + gen.AddUncheckedReceipt(receipt) + gen.AddUncheckedTx(types.NewTransaction(999, common.HexToAddress("0x999"), big.NewInt(999), 999, gen.BaseFee(), nil)) + } + }) + require.NoError(t, err) + // The test txs are not properly signed, can't simply create a chain + // and then import blocks. TODO(rjl493456442) try to get rid of the + // manual database writes. + gspec.MustCommit(db) + for i, block := range chain { + rawdb.WriteBlock(db, block) + rawdb.WriteCanonicalHash(db, block.Hash(), block.NumberU64()) + rawdb.WriteHeadBlockHash(db, block.Hash()) + rawdb.WriteReceipts(db, block.Hash(), block.NumberU64(), receipts[i]) + } + + filter, err := sys.NewRangeFilter(0, -1, []common.Address{addr}, [][]common.Hash{{hash1, hash2, hash3, hash4}}) + require.NoError(t, err) + + logs, _ := filter.Logs(context.Background()) + if len(logs) != 4 { + t.Error("expected 4 log, got", len(logs)) + } + + filter, err = sys.NewRangeFilter(900, 999, []common.Address{addr}, [][]common.Hash{{hash3}}) + require.NoError(t, err) + + logs, _ = filter.Logs(context.Background()) + if len(logs) != 1 { + t.Error("expected 1 log, got", len(logs)) + } + if len(logs) > 0 && logs[0].Topics[0] != hash3 { + t.Errorf("expected log[0].Topics[0] to be %x, got %x", hash3, logs[0].Topics[0]) + } + + filter, err = sys.NewRangeFilter(990, -1, []common.Address{addr}, [][]common.Hash{{hash3}}) + require.NoError(t, err) + + logs, _ = filter.Logs(context.Background()) + if len(logs) != 1 { + t.Error("expected 1 log, got", len(logs)) + } + if len(logs) > 0 && logs[0].Topics[0] != hash3 { + t.Errorf("expected log[0].Topics[0] to be %x, got %x", hash3, logs[0].Topics[0]) + } + + filter, err = sys.NewRangeFilter(1, 10, nil, [][]common.Hash{{hash1, hash2}}) + require.NoError(t, err) + + logs, _ = filter.Logs(context.Background()) + if len(logs) != 2 { + t.Error("expected 2 log, got", len(logs)) + } + + failHash := common.BytesToHash([]byte("fail")) + filter, err = sys.NewRangeFilter(0, -1, nil, [][]common.Hash{{failHash}}) + require.NoError(t, err) + + logs, _ = filter.Logs(context.Background()) + if len(logs) != 0 { + t.Error("expected 0 log, got", len(logs)) + } + + failAddr := common.BytesToAddress([]byte("failmenow")) + filter, err = sys.NewRangeFilter(0, -1, []common.Address{failAddr}, nil) + require.NoError(t, err) + + logs, _ = filter.Logs(context.Background()) + if len(logs) != 0 { + t.Error("expected 0 log, got", len(logs)) + } + + filter, err = sys.NewRangeFilter(0, -1, nil, [][]common.Hash{{failHash}, {hash1}}) + require.NoError(t, err) + + logs, _ = filter.Logs(context.Background()) + if len(logs) != 0 { + t.Error("expected 0 log, got", len(logs)) + } +} diff --git a/coreth/eth/gasprice/fee_info_provider.go b/coreth/eth/gasprice/fee_info_provider.go new file mode 100644 index 00000000..61a874ed --- /dev/null +++ b/coreth/eth/gasprice/fee_info_provider.go @@ -0,0 +1,145 @@ +// (c) 2019-2022, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package gasprice + +import ( + "context" + "math/big" + + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/rpc" + lru "github.com/hashicorp/golang-lru" +) + +// additional slots in the header cache to allow processing queries +// for previous blocks (with full number of blocks desired) if new +// blocks are added concurrently. +const feeCacheExtraSlots = 5 + +type feeInfoProvider struct { + cache *lru.Cache + backend OracleBackend + // [minGasUsed] ensures we don't recommend users pay non-zero tips when other + // users are paying a tip to unnecessarily expedite block production. + minGasUsed uint64 + newHeaderAdded func() // callback used in tests +} + +// feeInfo is the type of data stored in feeInfoProvider's cache. +type feeInfo struct { + baseFee, tip *big.Int // baseFee and min. suggested tip for tx to be included in the block + timestamp uint64 // timestamp of the block header +} + +// newFeeInfoProvider returns a bounded buffer with [size] slots to +// store [*feeInfo] for the most recently accepted blocks. +func newFeeInfoProvider(backend OracleBackend, minGasUsed uint64, size int) (*feeInfoProvider, error) { + fc := &feeInfoProvider{ + backend: backend, + minGasUsed: minGasUsed, + } + if size == 0 { + // if size is zero, we return early as there is no + // reason for a goroutine to subscribe to the chain's + // accepted event. + fc.cache, _ = lru.New(size) + return fc, nil + } + + fc.cache, _ = lru.New(size + feeCacheExtraSlots) + // subscribe to the chain accepted event + acceptedEvent := make(chan core.ChainEvent, 1) + backend.SubscribeChainAcceptedEvent(acceptedEvent) + go func() { + for ev := range acceptedEvent { + fc.addHeader(context.Background(), ev.Block.Header()) + if fc.newHeaderAdded != nil { + fc.newHeaderAdded() + } + } + }() + return fc, fc.populateCache(size) +} + +// addHeader processes header into a feeInfo struct and caches the result. +func (f *feeInfoProvider) addHeader(ctx context.Context, header *types.Header) (*feeInfo, error) { + feeInfo := &feeInfo{ + timestamp: header.Time, + baseFee: header.BaseFee, + } + // Don't bias the estimate with blocks containing a limited number of transactions paying to + // expedite block production. + var err error + if f.minGasUsed <= header.GasUsed { + // Compute minimum required tip to be included in previous block + // + // NOTE: Using this approach, we will never recommend that the caller + // provides a non-zero tip unless some block is produced faster than the + // target rate (which could only occur if some set of callers manually override the + // suggested tip). In the future, we may wish to start suggesting a non-zero + // tip when most blocks are full otherwise callers may observe an unexpected + // delay in transaction inclusion. + feeInfo.tip, err = f.backend.MinRequiredTip(ctx, header) + } + + f.cache.Add(header.Number.Uint64(), feeInfo) + return feeInfo, err +} + +// get returns the feeInfo for block with [number] if present in the cache +// and a boolean representing if it was found. +func (f *feeInfoProvider) get(number uint64) (*feeInfo, bool) { + // Note: use Peek on LRU to use it as a bounded buffer. + feeInfoIntf, ok := f.cache.Peek(number) + if ok { + return feeInfoIntf.(*feeInfo), ok + } + return nil, ok +} + +// populateCache populates [f] with [size] blocks up to last accepted. +// Note: assumes [size] is greater than zero. +func (f *feeInfoProvider) populateCache(size int) error { + lastAccepted := f.backend.LastAcceptedBlock().NumberU64() + lowerBlockNumber := uint64(0) + if uint64(size-1) <= lastAccepted { // Note: "size-1" because we need a total of size blocks. + lowerBlockNumber = lastAccepted - uint64(size-1) + } + + for i := lowerBlockNumber; i <= lastAccepted; i++ { + header, err := f.backend.HeaderByNumber(context.Background(), rpc.BlockNumber(i)) + if err != nil { + return err + } + _, err = f.addHeader(context.Background(), header) + if err != nil { + return err + } + } + return nil +} diff --git a/coreth/eth/gasprice/fee_info_provider_test.go b/coreth/eth/gasprice/fee_info_provider_test.go new file mode 100644 index 00000000..182c3378 --- /dev/null +++ b/coreth/eth/gasprice/fee_info_provider_test.go @@ -0,0 +1,73 @@ +// (c) 2022, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gasprice + +import ( + "context" + "math/big" + "sync" + "testing" + + "github.com/ava-labs/coreth/core" + "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/params" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +func TestFeeInfoProvider(t *testing.T) { + backend := newTestBackend(t, params.TestChainConfig, 2, common.Big0, testGenBlock(t, 55, 370)) + f, err := newFeeInfoProvider(backend, 1, 2) + require.NoError(t, err) + + // check that accepted event was subscribed + require.NotNil(t, backend.acceptedEvent) + + // check fee infos were cached + require.Equal(t, 2, f.cache.Len()) + + // some block that extends the current chain + var wg sync.WaitGroup + wg.Add(1) + f.newHeaderAdded = func() { wg.Done() } + header := &types.Header{Number: big.NewInt(3), ParentHash: backend.LastAcceptedBlock().Hash()} + block := types.NewBlockWithHeader(header) + backend.acceptedEvent <- core.ChainEvent{Block: block} + + // wait for the event to process before validating the new header was added. + wg.Wait() + feeInfo, ok := f.get(3) + require.True(t, ok) + require.NotNil(t, feeInfo) +} + +func TestFeeInfoProviderCacheSize(t *testing.T) { + size := 5 + overflow := 3 + backend := newTestBackend(t, params.TestChainConfig, 0, common.Big0, testGenBlock(t, 55, 370)) + f, err := newFeeInfoProvider(backend, 1, size) + require.NoError(t, err) + + // add [overflow] more elements than what will fit in the cache + // to test eviction behavior. + for i := 0; i < size+feeCacheExtraSlots+overflow; i++ { + header := &types.Header{Number: big.NewInt(int64(i))} + _, err := f.addHeader(context.Background(), header) + require.NoError(t, err) + } + + // these numbers should be evicted + for i := 0; i < overflow; i++ { + feeInfo, ok := f.get(uint64(i)) + require.False(t, ok) + require.Nil(t, feeInfo) + } + + // these numbers should be present + for i := overflow; i < size+feeCacheExtraSlots+overflow; i++ { + feeInfo, ok := f.get(uint64(i)) + require.True(t, ok) + require.NotNil(t, feeInfo) + } +} diff --git a/coreth/eth/gasprice/feehistory.go b/coreth/eth/gasprice/feehistory.go index 4841f89a..39f61840 100644 --- a/coreth/eth/gasprice/feehistory.go +++ b/coreth/eth/gasprice/feehistory.go @@ -170,10 +170,11 @@ func (oracle *Oracle) resolveBlockRange(ctx context.Context, lastBlock rpc.Block // actually processed range is returned to avoid ambiguity when parts of the requested range // are not available or when the head has changed during processing this request. // Three arrays are returned based on the processed blocks: -// - reward: the requested percentiles of effective priority fees per gas of transactions in each -// block, sorted in ascending order and weighted by gas used. -// - baseFee: base fee per gas in the given block -// - gasUsedRatio: gasUsed/gasLimit in the given block +// - reward: the requested percentiles of effective priority fees per gas of transactions in each +// block, sorted in ascending order and weighted by gas used. +// - baseFee: base fee per gas in the given block +// - gasUsedRatio: gasUsed/gasLimit in the given block +// // Note: baseFee includes the next block after the newest of the returned range, because this // value can be derived from the newest block. func (oracle *Oracle) FeeHistory(ctx context.Context, blocks int, unresolvedLastBlock rpc.BlockNumber, rewardPercentiles []float64) (*big.Int, [][]*big.Int, []*big.Int, []float64, error) { diff --git a/coreth/eth/gasprice/feehistory_test.go b/coreth/eth/gasprice/feehistory_test.go index 6989ce35..7a90a37b 100644 --- a/coreth/eth/gasprice/feehistory_test.go +++ b/coreth/eth/gasprice/feehistory_test.go @@ -34,6 +34,7 @@ import ( "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/types" + "github.com/stretchr/testify/require" "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/rpc" @@ -105,7 +106,8 @@ func TestFeeHistory(t *testing.T) { } b.AddTx(tx) }) - oracle := NewOracle(backend, config) + oracle, err := NewOracle(backend, config) + require.NoError(t, err) first, reward, baseFee, ratio, err := oracle.FeeHistory(context.Background(), c.count, c.last, c.percent) diff --git a/coreth/eth/gasprice/gasprice.go b/coreth/eth/gasprice/gasprice.go index 94229743..85ec33b0 100644 --- a/coreth/eth/gasprice/gasprice.go +++ b/coreth/eth/gasprice/gasprice.go @@ -59,9 +59,6 @@ const ( // [DefaultMaxBlockHistory] to ensure all block lookups can be cached when // serving a fee history query. DefaultFeeHistoryCacheSize int = 30_000 - // concurrentLookbackThreads sets the number of concurrent workers to fetch - // blocks to be included in fee estimations. - concurrentLookbackThreads int = 10 ) var ( @@ -100,6 +97,7 @@ type OracleBackend interface { GetReceipts(ctx context.Context, hash common.Hash) (types.Receipts, error) ChainConfig() *params.ChainConfig SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription + SubscribeChainAcceptedEvent(ch chan<- core.ChainEvent) event.Subscription MinRequiredTip(ctx context.Context, header *types.Header) (*big.Int, error) LastAcceptedBlock() *types.Block } @@ -115,13 +113,10 @@ type Oracle struct { // sink to 0 during a period of slow block production, such that nobody's // transactions will be included until the full block fee duration has // elapsed. - minPrice *big.Int - maxPrice *big.Int - // [minGasUsed] ensures we don't recommend users pay non-zero tips when other - // users are paying a tip to unnecessarily expedite block production. - minGasUsed *big.Int - cacheLock sync.RWMutex - fetchLock sync.Mutex + minPrice *big.Int + maxPrice *big.Int + cacheLock sync.RWMutex + fetchLock sync.Mutex // clock to decide what set of rules to use when recommending a gas price clock mockable.Clock @@ -131,11 +126,12 @@ type Oracle struct { maxCallBlockHistory int maxBlockHistory int historyCache *lru.Cache + feeInfoProvider *feeInfoProvider } // NewOracle returns a new gasprice oracle which can recommend suitable // gasprice for newly created transaction. -func NewOracle(backend OracleBackend, config Config) *Oracle { +func NewOracle(backend OracleBackend, config Config) (*Oracle, error) { blocks := config.Blocks if blocks < 1 { blocks = 1 @@ -196,21 +192,24 @@ func NewOracle(backend OracleBackend, config Config) *Oracle { lastHead = ev.Block.Hash() } }() - + feeInfoProvider, err := newFeeInfoProvider(backend, minGasUsed.Uint64(), config.Blocks) + if err != nil { + return nil, err + } return &Oracle{ backend: backend, lastPrice: minPrice, lastBaseFee: DefaultMinBaseFee, minPrice: minPrice, maxPrice: maxPrice, - minGasUsed: minGasUsed, checkBlocks: blocks, percentile: percent, maxLookbackSeconds: maxLookbackSeconds, maxCallBlockHistory: maxCallBlockHistory, maxBlockHistory: maxBlockHistory, historyCache: cache, - } + feeInfoProvider: feeInfoProvider, + }, nil } // EstimateBaseFee returns an estimate of what the base fee will be on a block @@ -245,20 +244,20 @@ func (oracle *Oracle) EstimateBaseFee(ctx context.Context) (*big.Int, error) { // If the latest block has a nil base fee, this function will return nil as the base fee // of the next block. func (oracle *Oracle) estimateNextBaseFee(ctx context.Context) (*big.Int, error) { - // Fetch the most recent block by number - block, err := oracle.backend.BlockByNumber(ctx, rpc.LatestBlockNumber) + // Fetch the most recent header by number + header, err := oracle.backend.HeaderByNumber(ctx, rpc.LatestBlockNumber) if err != nil { return nil, err } // If the fetched block does not have a base fee, return nil as the base fee - if block.BaseFee() == nil { + if header.BaseFee == nil { return nil, nil } // If the block does have a baseFee, calculate the next base fee // based on the current time and add it to the tip to estimate the // total gas price estimate. - _, nextBaseFee, err := dummy.EstimateNextBaseFee(oracle.backend.ChainConfig(), block.Header(), oracle.clock.Unix()) + _, nextBaseFee, err := dummy.EstimateNextBaseFee(oracle.backend.ChainConfig(), header, oracle.clock.Unix()) return nextBaseFee, err } @@ -324,77 +323,36 @@ func (oracle *Oracle) suggestDynamicFees(ctx context.Context) (*big.Int, *big.In return new(big.Int).Set(lastPrice), new(big.Int).Set(lastBaseFee), nil } var ( - latestBlockNumber = head.Number.Uint64() - lowerBlockNumberLimit = uint64(0) - result = make(chan results, oracle.checkBlocks) - tipResults []*big.Int - baseFeeResults []*big.Int - workerChannel = make(chan uint64, concurrentLookbackThreads) - wg sync.WaitGroup - lookBackContext, lookbackCancel = context.WithCancel(ctx) + latestBlockNumber = head.Number.Uint64() + lowerBlockNumberLimit = uint64(0) + currentTime = oracle.clock.Unix() + tipResults []*big.Int + baseFeeResults []*big.Int ) - defer lookbackCancel() - if uint64(oracle.checkBlocks) <= latestBlockNumber { lowerBlockNumberLimit = latestBlockNumber - uint64(oracle.checkBlocks) } - // Producer adds block requests from [latestBlockNumber] to [lowerBlockLimit] inclusive. - go func() { - defer close(workerChannel) - for i := latestBlockNumber; i > lowerBlockNumberLimit; i-- { - select { - case <-lookBackContext.Done(): - // If a worker signals that it encountered a block past the max lookback time, stop - // adding more block numbers to [workerChannel] since they will not be included. - return - - case workerChannel <- i: - } + // Process block headers in the range calculated for this gas price estimation. + for i := latestBlockNumber; i > lowerBlockNumberLimit; i-- { + feeInfo, err := oracle.getFeeInfo(ctx, i) + if err != nil { + return new(big.Int).Set(lastPrice), new(big.Int).Set(lastBaseFee), err } - }() - - // Create [concurrentLookbackThreads] consumer threads to fetch blocks for the requested heights - for i := 0; i <= concurrentLookbackThreads; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for blockNumber := range workerChannel { - blockNumber := blockNumber - currentTime := oracle.clock.Unix() - // Pass in [lookbackCancel] here, so that if the worker finds a block past the oldest timestamp - // the worker can signal to the producer that there's no need to add work past that point. - // Since the producer adds numbers in order, we guarantee that the producer has already - // added work for any block with a timestamp greater than the point at which the producer - // will stop adding work requests. - oracle.getBlockTips(ctx, blockNumber, result, currentTime, lookbackCancel) - } - }() - } - // Wait for all workers to complete. Only the workers add to the result channel, so once they have terminated - // we can safely close the result channel. - // This ensures that the results channel will be closed once there are no more results to add. - go func() { - defer close(result) - wg.Wait() - }() - - // Process all of the results sequentially. This will terminate when the [result] channel has been closed. - for res := range result { - if res.err != nil { - return new(big.Int).Set(lastPrice), new(big.Int).Set(lastBaseFee), res.err + if feeInfo.timestamp+oracle.maxLookbackSeconds < currentTime { + break } - if res.tip != nil { - tipResults = append(tipResults, res.tip) + if feeInfo.tip != nil { + tipResults = append(tipResults, feeInfo.tip) } else { tipResults = append(tipResults, new(big.Int).Set(common.Big0)) } - if res.baseFee != nil { - baseFeeResults = append(baseFeeResults, res.baseFee) + if feeInfo.baseFee != nil { + baseFeeResults = append(baseFeeResults, feeInfo.baseFee) } else { baseFeeResults = append(baseFeeResults, new(big.Int).Set(common.Big0)) } @@ -426,45 +384,20 @@ func (oracle *Oracle) suggestDynamicFees(ctx context.Context) (*big.Int, *big.In return new(big.Int).Set(price), new(big.Int).Set(baseFee), nil } -type results struct { - tip *big.Int - baseFee *big.Int - err error -} - -// getBlockTips calculates the minimum required tip to be included in a given -// block and sends the value to the result channel. -func (oracle *Oracle) getBlockTips(ctx context.Context, blockNumber uint64, result chan results, currentTime uint64, cancel context.CancelFunc) { - header, err := oracle.backend.HeaderByNumber(ctx, rpc.BlockNumber(blockNumber)) - if header == nil { - result <- results{nil, nil, err} - return +// getFeeInfo calculates the minimum required tip to be included in a given +// block and returns the value as a feeInfo struct. +func (oracle *Oracle) getFeeInfo(ctx context.Context, number uint64) (*feeInfo, error) { + feeInfo, ok := oracle.feeInfoProvider.get(number) + if ok { + return feeInfo, nil } - // If we see a block thats older than maxLookbackSeconds, we should cancel all contexts and - // stop looking back blocks - if currentTime-header.Time > oracle.maxLookbackSeconds { - cancel() - return - } - - // Don't bias the estimate with blocks containing a limited number of transactions paying to - // expedite block production. - if header.GasUsed < oracle.minGasUsed.Uint64() { - result <- results{nil, header.BaseFee, nil} - return + // on cache miss, read from database + header, err := oracle.backend.HeaderByNumber(ctx, rpc.BlockNumber(number)) + if err != nil { + return nil, err } - - // Compute minimum required tip to be included in previous block - // - // NOTE: Using this approach, we will never recommend that the caller - // provides a non-zero tip unless some block is produced faster than the - // target rate (which could only occur if some set of callers manually override the - // suggested tip). In the future, we may wish to start suggesting a non-zero - // tip when most blocks are full otherwise callers may observe an unexpected - // delay in transaction inclusion. - minTip, err := oracle.backend.MinRequiredTip(ctx, header) - result <- results{minTip, header.BaseFee, err} + return oracle.feeInfoProvider.addHeader(ctx, header) } type bigIntArray []*big.Int diff --git a/coreth/eth/gasprice/gasprice_test.go b/coreth/eth/gasprice/gasprice_test.go index c009ef44..376b6933 100644 --- a/coreth/eth/gasprice/gasprice_test.go +++ b/coreth/eth/gasprice/gasprice_test.go @@ -43,10 +43,9 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/event" + "github.com/stretchr/testify/require" ) -const testHead = 32 - var ( key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr = crypto.PubkeyToAddress(key.PublicKey) @@ -54,8 +53,8 @@ var ( ) type testBackend struct { - chain *core.BlockChain - pending bool // pending block available + chain *core.BlockChain + acceptedEvent chan<- core.ChainEvent } func (b *testBackend) HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) { @@ -76,14 +75,6 @@ func (b *testBackend) GetReceipts(ctx context.Context, hash common.Hash) (types. return b.chain.GetReceiptsByHash(hash), nil } -func (b *testBackend) PendingBlockAndReceipts() (*types.Block, types.Receipts) { - if b.pending { - block := b.chain.GetBlockByNumber(testHead + 1) - return block, b.chain.GetReceiptsByHash(block.Hash()) - } - return nil, nil -} - func (b *testBackend) ChainConfig() *params.ChainConfig { return b.chain.Config() } @@ -92,6 +83,11 @@ func (b *testBackend) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) eve return nil } +func (b *testBackend) SubscribeChainAcceptedEvent(ch chan<- core.ChainEvent) event.Subscription { + b.acceptedEvent = ch + return nil +} + func newTestBackendFakerEngine(t *testing.T, config *params.ChainConfig, numBlocks int, extDataGasUsage *big.Int, genBlocks func(i int, b *core.BlockGen)) *testBackend { var gspec = &core.Genesis{ Config: config, @@ -99,18 +95,15 @@ func newTestBackendFakerEngine(t *testing.T, config *params.ChainConfig, numBloc } engine := dummy.NewETHFaker() - db := rawdb.NewMemoryDatabase() - genesis := gspec.MustCommit(db) // Generate testing blocks - blocks, _, err := core.GenerateChain(gspec.Config, genesis, engine, db, numBlocks, 0, genBlocks) + _, blocks, _, err := core.GenerateChainWithGenesis(gspec, engine, numBlocks, 0, genBlocks) if err != nil { t.Fatal(err) } // Construct testing chain diskdb := rawdb.NewMemoryDatabase() - gspec.Commit(diskdb) - chain, err := core.NewBlockChain(diskdb, core.DefaultCacheConfig, gspec.Config, engine, vm.Config{}, common.Hash{}) + chain, err := core.NewBlockChain(diskdb, core.DefaultCacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false) if err != nil { t.Fatalf("Failed to create local chain, %v", err) } @@ -134,19 +127,14 @@ func newTestBackend(t *testing.T, config *params.ChainConfig, numBlocks int, ext return common.Big0, extDataGasUsage, nil }, }) - db := rawdb.NewMemoryDatabase() - genesis := gspec.MustCommit(db) // Generate testing blocks - - blocks, _, err := core.GenerateChain(gspec.Config, genesis, engine, db, numBlocks, 1, genBlocks) + _, blocks, _, err := core.GenerateChainWithGenesis(gspec, engine, numBlocks, 1, genBlocks) if err != nil { t.Fatal(err) } // Construct testing chain - diskdb := rawdb.NewMemoryDatabase() - gspec.Commit(diskdb) - chain, err := core.NewBlockChain(diskdb, core.DefaultCacheConfig, gspec.Config, engine, vm.Config{}, common.Hash{}) + chain, err := core.NewBlockChain(rawdb.NewMemoryDatabase(), core.DefaultCacheConfig, gspec, engine, vm.Config{}, common.Hash{}, false) if err != nil { t.Fatalf("Failed to create local chain, %v", err) } @@ -188,7 +176,7 @@ func defaultOracleConfig() Config { } } -// timeCrunchOracleConfig returns a config with [MaxLookbackSeconds] set to 5 +// timeCrunchOracleConfig returns a config with [MaxLookbackSeconds] set to 5 // to ensure that during gas price estimation, we will hit the time based look back limit func timeCrunchOracleConfig() Config { return Config{ @@ -203,117 +191,73 @@ func applyGasPriceTest(t *testing.T, test suggestTipCapTest, config Config) { test.genBlock = func(i int, b *core.BlockGen) {} } backend := newTestBackend(t, test.chainConfig, test.numBlocks, test.extDataGasUsage, test.genBlock) - oracle := NewOracle(backend, config) + oracle, err := NewOracle(backend, config) + require.NoError(t, err) // mock time to be consistent across different CI runs // sets currentTime to be 20 seconds oracle.clock.Set(time.Unix(20, 0)) got, err := oracle.SuggestTipCap(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) + if got.Cmp(test.expectedTip) != 0 { t.Fatalf("Expected tip (%d), got tip (%d)", test.expectedTip, got) } } +func testGenBlock(t *testing.T, tip int64, numTx int) func(int, *core.BlockGen) { + return func(i int, b *core.BlockGen) { + b.SetCoinbase(common.Address{1}) + + txTip := big.NewInt(tip * params.GWei) + signer := types.LatestSigner(params.TestChainConfig) + baseFee := b.BaseFee() + feeCap := new(big.Int).Add(baseFee, txTip) + for j := 0; j < numTx; j++ { + tx := types.NewTx(&types.DynamicFeeTx{ + ChainID: params.TestChainConfig.ChainID, + Nonce: b.TxNonce(addr), + To: &common.Address{}, + Gas: params.TxGas, + GasFeeCap: feeCap, + GasTipCap: txTip, + Data: []byte{}, + }) + tx, err := types.SignTx(tx, signer, key) + require.NoError(t, err, "failed to create tx") + b.AddTx(tx) + } + } +} + func TestSuggestTipCapEmptyExtDataGasUsage(t *testing.T) { - txTip := big.NewInt(55 * params.GWei) applyGasPriceTest(t, suggestTipCapTest{ chainConfig: params.TestChainConfig, numBlocks: 3, extDataGasUsage: nil, - genBlock: func(i int, b *core.BlockGen) { - b.SetCoinbase(common.Address{1}) - - signer := types.LatestSigner(params.TestChainConfig) - baseFee := b.BaseFee() - feeCap := new(big.Int).Add(baseFee, txTip) - for j := 0; j < 370; j++ { - tx := types.NewTx(&types.DynamicFeeTx{ - ChainID: params.TestChainConfig.ChainID, - Nonce: b.TxNonce(addr), - To: &common.Address{}, - Gas: params.TxGas, - GasFeeCap: feeCap, - GasTipCap: txTip, - Data: []byte{}, - }) - tx, err := types.SignTx(tx, signer, key) - if err != nil { - t.Fatalf("failed to create tx: %s", err) - } - b.AddTx(tx) - } - }, - expectedTip: big.NewInt(5_713_963_963), + genBlock: testGenBlock(t, 55, 370), + expectedTip: big.NewInt(5_713_963_963), }, defaultOracleConfig()) } func TestSuggestTipCapSimple(t *testing.T) { - txTip := big.NewInt(55 * params.GWei) applyGasPriceTest(t, suggestTipCapTest{ chainConfig: params.TestChainConfig, numBlocks: 3, extDataGasUsage: common.Big0, - genBlock: func(i int, b *core.BlockGen) { - b.SetCoinbase(common.Address{1}) - - signer := types.LatestSigner(params.TestChainConfig) - baseFee := b.BaseFee() - feeCap := new(big.Int).Add(baseFee, txTip) - for j := 0; j < 370; j++ { - tx := types.NewTx(&types.DynamicFeeTx{ - ChainID: params.TestChainConfig.ChainID, - Nonce: b.TxNonce(addr), - To: &common.Address{}, - Gas: params.TxGas, - GasFeeCap: feeCap, - GasTipCap: txTip, - Data: []byte{}, - }) - tx, err := types.SignTx(tx, signer, key) - if err != nil { - t.Fatalf("failed to create tx: %s", err) - } - b.AddTx(tx) - } - }, - expectedTip: big.NewInt(5_713_963_963), + genBlock: testGenBlock(t, 55, 370), + expectedTip: big.NewInt(5_713_963_963), }, defaultOracleConfig()) } func TestSuggestTipCapSimpleFloor(t *testing.T) { - txTip := big.NewInt(55 * params.GWei) applyGasPriceTest(t, suggestTipCapTest{ chainConfig: params.TestChainConfig, numBlocks: 1, extDataGasUsage: common.Big0, - genBlock: func(i int, b *core.BlockGen) { - b.SetCoinbase(common.Address{1}) - - signer := types.LatestSigner(params.TestChainConfig) - baseFee := b.BaseFee() - feeCap := new(big.Int).Add(baseFee, txTip) - for j := 0; j < 370; j++ { - tx := types.NewTx(&types.DynamicFeeTx{ - ChainID: params.TestChainConfig.ChainID, - Nonce: b.TxNonce(addr), - To: &common.Address{}, - Gas: params.TxGas, - GasFeeCap: feeCap, - GasTipCap: txTip, - Data: []byte{}, - }) - tx, err := types.SignTx(tx, signer, key) - if err != nil { - t.Fatalf("failed to create tx: %s", err) - } - b.AddTx(tx) - } - }, - expectedTip: common.Big0, + genBlock: testGenBlock(t, 55, 370), + expectedTip: common.Big0, }, defaultOracleConfig()) } @@ -354,9 +298,7 @@ func TestSuggestTipCapSmallTips(t *testing.T) { Data: []byte{}, }) tx, err = types.SignTx(tx, signer, key) - if err != nil { - t.Fatalf("failed to create tx: %s", err) - } + require.NoError(t, err, "failed to create tx") b.AddTx(tx) } }, @@ -366,68 +308,22 @@ func TestSuggestTipCapSmallTips(t *testing.T) { } func TestSuggestTipCapExtDataUsage(t *testing.T) { - txTip := big.NewInt(55 * params.GWei) applyGasPriceTest(t, suggestTipCapTest{ chainConfig: params.TestChainConfig, numBlocks: 3, extDataGasUsage: big.NewInt(10_000), - genBlock: func(i int, b *core.BlockGen) { - b.SetCoinbase(common.Address{1}) - - signer := types.LatestSigner(params.TestChainConfig) - baseFee := b.BaseFee() - feeCap := new(big.Int).Add(baseFee, txTip) - for j := 0; j < 370; j++ { - tx := types.NewTx(&types.DynamicFeeTx{ - ChainID: params.TestChainConfig.ChainID, - Nonce: b.TxNonce(addr), - To: &common.Address{}, - Gas: params.TxGas, - GasFeeCap: feeCap, - GasTipCap: txTip, - Data: []byte{}, - }) - tx, err := types.SignTx(tx, signer, key) - if err != nil { - t.Fatalf("failed to create tx: %s", err) - } - b.AddTx(tx) - } - }, - expectedTip: big.NewInt(5_706_726_649), + genBlock: testGenBlock(t, 55, 370), + expectedTip: big.NewInt(5_706_726_649), }, defaultOracleConfig()) } func TestSuggestTipCapMinGas(t *testing.T) { - txTip := big.NewInt(500 * params.GWei) applyGasPriceTest(t, suggestTipCapTest{ chainConfig: params.TestChainConfig, numBlocks: 3, extDataGasUsage: common.Big0, - genBlock: func(i int, b *core.BlockGen) { - b.SetCoinbase(common.Address{1}) - - signer := types.LatestSigner(params.TestChainConfig) - baseFee := b.BaseFee() - feeCap := new(big.Int).Add(baseFee, txTip) - for j := 0; j < 50; j++ { - tx := types.NewTx(&types.DynamicFeeTx{ - ChainID: params.TestChainConfig.ChainID, - Nonce: b.TxNonce(addr), - To: &common.Address{}, - Gas: params.TxGas, - GasFeeCap: feeCap, - GasTipCap: txTip, - Data: []byte{}, - }) - tx, err := types.SignTx(tx, signer, key) - if err != nil { - t.Fatalf("failed to create tx: %s", err) - } - b.AddTx(tx) - } - }, - expectedTip: big.NewInt(0), + genBlock: testGenBlock(t, 500, 50), + expectedTip: big.NewInt(0), }, defaultOracleConfig()) } @@ -454,83 +350,33 @@ func TestSuggestGasPricePreAP3(t *testing.T) { Data: []byte{}, }) tx, err := types.SignTx(tx, signer, key) - if err != nil { - t.Fatalf("failed to create tx: %s", err) - } + require.NoError(t, err, "failed to create tx") b.AddTx(tx) } }) - oracle := NewOracle(backend, config) + oracle, err := NewOracle(backend, config) + require.NoError(t, err) - _, err := oracle.SuggestPrice(context.Background()) - if err != nil { - t.Fatal(err) - } + _, err = oracle.SuggestPrice(context.Background()) + require.NoError(t, err) } func TestSuggestTipCapMaxBlocksLookback(t *testing.T) { - txTip := big.NewInt(550 * params.GWei) - applyGasPriceTest(t, suggestTipCapTest{ chainConfig: params.TestChainConfig, numBlocks: 20, extDataGasUsage: common.Big0, - genBlock: func(i int, b *core.BlockGen) { - b.SetCoinbase(common.Address{1}) - - signer := types.LatestSigner(params.TestChainConfig) - baseFee := b.BaseFee() - feeCap := new(big.Int).Add(baseFee, txTip) - for j := 0; j < 370; j++ { - tx := types.NewTx(&types.DynamicFeeTx{ - ChainID: params.TestChainConfig.ChainID, - Nonce: b.TxNonce(addr), - To: &common.Address{}, - Gas: params.TxGas, - GasFeeCap: feeCap, - GasTipCap: txTip, - Data: []byte{}, - }) - tx, err := types.SignTx(tx, signer, key) - if err != nil { - t.Fatalf("failed to create tx: %s", err) - } - b.AddTx(tx) - } - }, - expectedTip: big.NewInt(51_565_264_256), + genBlock: testGenBlock(t, 550, 370), + expectedTip: big.NewInt(51_565_264_256), }, defaultOracleConfig()) } func TestSuggestTipCapMaxBlocksSecondsLookback(t *testing.T) { - txTip := big.NewInt(550 * params.GWei) applyGasPriceTest(t, suggestTipCapTest{ chainConfig: params.TestChainConfig, numBlocks: 20, extDataGasUsage: big.NewInt(1), - genBlock: func(i int, b *core.BlockGen) { - b.SetCoinbase(common.Address{1}) - - signer := types.LatestSigner(params.TestChainConfig) - baseFee := b.BaseFee() - feeCap := new(big.Int).Add(baseFee, txTip) - for j := 0; j < 370; j++ { - tx := types.NewTx(&types.DynamicFeeTx{ - ChainID: params.TestChainConfig.ChainID, - Nonce: b.TxNonce(addr), - To: &common.Address{}, - Gas: params.TxGas, - GasFeeCap: feeCap, - GasTipCap: txTip, - Data: []byte{}, - }) - tx, err := types.SignTx(tx, signer, key) - if err != nil { - t.Fatalf("failed to create tx: %s", err) - } - b.AddTx(tx) - } - }, - expectedTip: big.NewInt(92_212_529_423), + genBlock: testGenBlock(t, 550, 370), + expectedTip: big.NewInt(92_212_529_423), }, timeCrunchOracleConfig()) } diff --git a/coreth/eth/state_accessor.go b/coreth/eth/state_accessor.go index 3e5f4c99..7197e8e6 100644 --- a/coreth/eth/state_accessor.go +++ b/coreth/eth/state_accessor.go @@ -36,40 +36,60 @@ import ( "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/core/vm" + "github.com/ava-labs/coreth/eth/tracers" "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" ) +// noopReleaser is returned in case there is no operation expected +// for releasing state. +var noopReleaser = tracers.StateReleaseFunc(func() {}) + // StateAtBlock retrieves the state database associated with a certain block. // If no state is locally available for the given block, a number of blocks // are attempted to be reexecuted to generate the desired state. The optional -// base layer statedb can be passed then it's regarded as the statedb of the +// base layer statedb can be provided which is regarded as the statedb of the // parent block. +// +// An additional release function will be returned if the requested state is +// available. Release is expected to be invoked when the returned state is no longer needed. +// Its purpose is to prevent resource leaking. Though it can be noop in some cases. +// // Parameters: -// - block: The block for which we want the state (== state at the stateRoot of the parent) -// - reexec: The maximum number of blocks to reprocess trying to obtain the desired state -// - base: If the caller is tracing multiple blocks, the caller can provide the parent state -// continuously from the callsite. -// - checklive: if true, then the live 'blockchain' state database is used. If the caller want to -// perform Commit or other 'save-to-disk' changes, this should be set to false to avoid -// storing trash persistently -// - preferDisk: this arg can be used by the caller to signal that even though the 'base' is provided, -// it would be preferable to start from a fresh state, if we have it on disk. -func (eth *Ethereum) StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (statedb *state.StateDB, err error) { +// - block: The block for which we want the state(state = block.Root) +// - reexec: The maximum number of blocks to reprocess trying to obtain the desired state +// - base: If the caller is tracing multiple blocks, the caller can provide the parent +// state continuously from the callsite. +// - readOnly: If true, then the live 'blockchain' state database is used. No mutation should +// be made from caller, e.g. perform Commit or other 'save-to-disk' changes. +// Otherwise, the trash generated by caller may be persisted permanently. +// - preferDisk: this arg can be used by the caller to signal that even though the 'base' is +// provided, it would be preferable to start from a fresh state, if we have it +// on disk. +func (eth *Ethereum) StateAtBlock(block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (statedb *state.StateDB, release tracers.StateReleaseFunc, err error) { var ( current *types.Block database state.Database report = true origin = block.NumberU64() ) - // Check the live database first if we have the state fully available, use that. - if checkLive { - statedb, err = eth.blockchain.StateAt(block.Root()) - if err == nil { - return statedb, nil + // The state is only for reading purposes, check the state presence in + // live database. + if readOnly { + // The state is available in live database, create a reference + // on top to prevent garbage collection and return a release + // function to deref it. + if statedb, err = eth.blockchain.StateAt(block.Root()); err == nil { + statedb.Database().TrieDB().Reference(block.Root(), common.Hash{}) + return statedb, func() { + statedb.Database().TrieDB().Dereference(block.Root()) + }, nil } } + // The state is both for reading and writing, or it's unavailable in disk, + // try to construct/recover the state over an ephemeral trie.Database for + // isolating the live one. if base != nil { if preferDisk { // Create an ephemeral trie.Database for isolating the live one. Otherwise @@ -77,37 +97,37 @@ func (eth *Ethereum) StateAtBlock(block *types.Block, reexec uint64, base *state database = state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16}) if statedb, err = state.New(block.Root(), database, nil); err == nil { log.Info("Found disk backend for state trie", "root", block.Root(), "number", block.Number()) - return statedb, nil + return statedb, noopReleaser, nil } } // The optional base statedb is given, mark the start point as parent block statedb, database, report = base, base.Database(), false current = eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) } else { - // Otherwise try to reexec blocks until we find a state or reach our limit + // Otherwise, try to reexec blocks until we find a state or reach our limit current = block // Create an ephemeral trie.Database for isolating the live one. Otherwise // the internal junks created by tracing will be persisted into the disk. database = state.NewDatabaseWithConfig(eth.chainDb, &trie.Config{Cache: 16}) - // If we didn't check the dirty database, do check the clean one, otherwise - // we would rewind past a persisted block (specific corner case is chain - // tracing from the genesis). - if !checkLive { + // If we didn't check the live database, do check state over ephemeral database, + // otherwise we would rewind past a persisted block (specific corner case is + // chain tracing from the genesis). + if !readOnly { statedb, err = state.New(current.Root(), database, nil) if err == nil { - return statedb, nil + return statedb, noopReleaser, nil } } // Database does not have the state for the given block, try to regenerate for i := uint64(0); i < reexec; i++ { if current.NumberU64() == 0 { - return nil, errors.New("genesis state is missing") + return nil, nil, errors.New("genesis state is missing") } parent := eth.blockchain.GetBlock(current.ParentHash(), current.NumberU64()-1) if parent == nil { - return nil, fmt.Errorf("missing block %v %d", current.ParentHash(), current.NumberU64()-1) + return nil, nil, fmt.Errorf("missing block %v %d", current.ParentHash(), current.NumberU64()-1) } current = parent @@ -119,13 +139,14 @@ func (eth *Ethereum) StateAtBlock(block *types.Block, reexec uint64, base *state if err != nil { switch err.(type) { case *trie.MissingNodeError: - return nil, fmt.Errorf("required historical state unavailable (reexec=%d)", reexec) + return nil, nil, fmt.Errorf("required historical state unavailable (reexec=%d)", reexec) default: - return nil, err + return nil, nil, err } } } - // State was available at historical point, regenerate + // State is available at historical point, re-execute the blocks on top for + // the desired state. var ( start = time.Now() logged time.Time @@ -141,22 +162,24 @@ func (eth *Ethereum) StateAtBlock(block *types.Block, reexec uint64, base *state parentHeader := current.Header() next := current.NumberU64() + 1 if current = eth.blockchain.GetBlockByNumber(next); current == nil { - return nil, fmt.Errorf("block #%d not found", next) + return nil, nil, fmt.Errorf("block #%d not found", next) } _, _, _, err := eth.blockchain.Processor().Process(current, parentHeader, statedb, vm.Config{}) if err != nil { - return nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err) + return nil, nil, fmt.Errorf("processing block %d failed: %v", current.NumberU64(), err) } // Finalize the state so any modifications are written to the trie root, err := statedb.Commit(eth.blockchain.Config().IsEIP158(current.Number()), true) if err != nil { - return nil, fmt.Errorf("stateAtBlock commit failed, number %d root %v: %w", + return nil, nil, fmt.Errorf("stateAtBlock commit failed, number %d root %v: %w", current.NumberU64(), current.Root().Hex(), err) } statedb, err = state.New(root, database, nil) if err != nil { - return nil, fmt.Errorf("state reset after block %d failed: %v", current.NumberU64(), err) + return nil, nil, fmt.Errorf("state reset after block %d failed: %v", current.NumberU64(), err) } + // Note: In coreth, the state reference is held by passing true to [statedb.Commit]. + // Drop the parent state to prevent accumulating too many nodes in memory. if parent != (common.Hash{}) { database.TrieDB().Dereference(parent) } @@ -166,28 +189,28 @@ func (eth *Ethereum) StateAtBlock(block *types.Block, reexec uint64, base *state nodes, imgs := database.TrieDB().Size() log.Info("Historical state regenerated", "block", current.NumberU64(), "elapsed", time.Since(start), "nodes", nodes, "preimages", imgs) } - return statedb, nil + return statedb, func() { database.TrieDB().Dereference(block.Root()) }, nil } // stateAtTransaction returns the execution environment of a certain transaction. -func (eth *Ethereum) stateAtTransaction(block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) { +func (eth *Ethereum) stateAtTransaction(block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, tracers.StateReleaseFunc, error) { // Short circuit if it's genesis block. if block.NumberU64() == 0 { - return nil, vm.BlockContext{}, nil, errors.New("no transaction in genesis") + return nil, vm.BlockContext{}, nil, nil, errors.New("no transaction in genesis") } // Create the parent state database parent := eth.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1) if parent == nil { - return nil, vm.BlockContext{}, nil, fmt.Errorf("parent %#x not found", block.ParentHash()) + return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("parent %#x not found", block.ParentHash()) } // Lookup the statedb of parent block from the live database, // otherwise regenerate it on the flight. - statedb, err := eth.StateAtBlock(parent, reexec, nil, true, false) + statedb, release, err := eth.StateAtBlock(parent, reexec, nil, true, false) if err != nil { - return nil, vm.BlockContext{}, nil, err + return nil, vm.BlockContext{}, nil, nil, err } if txIndex == 0 && len(block.Transactions()) == 0 { - return nil, vm.BlockContext{}, statedb, nil + return nil, vm.BlockContext{}, statedb, release, nil } // Recompute transactions up to the target index. signer := types.MakeSigner(eth.blockchain.Config(), block.Number(), new(big.Int).SetUint64(block.Time())) @@ -197,17 +220,17 @@ func (eth *Ethereum) stateAtTransaction(block *types.Block, txIndex int, reexec txContext := core.NewEVMTxContext(msg) context := core.NewEVMBlockContext(block.Header(), eth.blockchain, nil) if idx == txIndex { - return msg, context, statedb, nil + return msg, context, statedb, release, nil } // Not yet the searched for transaction, execute on top of the current state vmenv := vm.NewEVM(context, txContext, statedb, eth.blockchain.Config(), vm.Config{}) statedb.Prepare(tx.Hash(), idx) if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { - return nil, vm.BlockContext{}, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) + return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) } // Ensure any modifications are committed to the state // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) } - return nil, vm.BlockContext{}, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) + return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) } diff --git a/coreth/eth/tracers/api.go b/coreth/eth/tracers/api.go index 03fb5be8..0e0f7b2f 100644 --- a/coreth/eth/tracers/api.go +++ b/coreth/eth/tracers/api.go @@ -27,12 +27,14 @@ package tracers import ( + "bufio" "bytes" "context" "encoding/json" "errors" "fmt" "math/big" + "os" "runtime" "sync" "time" @@ -71,6 +73,10 @@ const ( defaultTracechainMemLimit = common.StorageSize(500 * 1024 * 1024) ) +// StateReleaseFunc is used to deallocate resources held by constructing a +// historical state for tracing purposes. +type StateReleaseFunc func() + // Backend interface provides the common API services (that are provided by // both full and light clients) with access to necessary functions. type Backend interface { @@ -84,22 +90,39 @@ type Backend interface { ChainConfig() *params.ChainConfig Engine() consensus.Engine ChainDb() ethdb.Database - StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (*state.StateDB, error) - StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) + StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, StateReleaseFunc, error) + StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, StateReleaseFunc, error) +} + +// baseAPI holds the collection of common methods for API and FileTracerAPI. +type baseAPI struct { + backend Backend } // API is the collection of tracing APIs exposed over the private debugging endpoint. type API struct { - backend Backend + baseAPI } // NewAPI creates a new API definition for the tracing methods of the Ethereum service. func NewAPI(backend Backend) *API { - return &API{backend: backend} + return &API{baseAPI{backend: backend}} +} + +// FileTracerAPI is the collection of additional tracing APIs exposed over the private +// debugging endpoint that log their output to a file. +type FileTracerAPI struct { + baseAPI +} + +// NewFileTracerAPI creates a new API definition for the tracing methods of the Ethererum +// service that log their output to a file. +func NewFileTracerAPI(backend Backend) *FileTracerAPI { + return &FileTracerAPI{baseAPI{backend: backend}} } type chainContext struct { - api *API + api *baseAPI ctx context.Context } @@ -124,13 +147,13 @@ func (context *chainContext) GetHeader(hash common.Hash, number uint64) *types.H // chainContext constructs the context reader which is used by the evm for reading // the necessary chain context. -func (api *API) chainContext(ctx context.Context) core.ChainContext { +func (api *baseAPI) chainContext(ctx context.Context) core.ChainContext { return &chainContext{api: api, ctx: ctx} } // blockByNumber is the wrapper of the chain access function offered by the backend. // It will return an error if the block is not found. -func (api *API) blockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { +func (api *baseAPI) blockByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Block, error) { block, err := api.backend.BlockByNumber(ctx, number) if err != nil { return nil, err @@ -143,7 +166,7 @@ func (api *API) blockByNumber(ctx context.Context, number rpc.BlockNumber) (*typ // blockByHash is the wrapper of the chain access function offered by the backend. // It will return an error if the block is not found. -func (api *API) blockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { +func (api *baseAPI) blockByHash(ctx context.Context, hash common.Hash) (*types.Block, error) { block, err := api.backend.BlockByHash(ctx, hash) if err != nil { return nil, err @@ -159,7 +182,7 @@ func (api *API) blockByHash(ctx context.Context, hash common.Hash) (*types.Block // // Note this function is friendly for the light client which can only retrieve the // historical(before the CHT) header/block by number. -func (api *API) blockByNumberAndHash(ctx context.Context, number rpc.BlockNumber, hash common.Hash) (*types.Block, error) { +func (api *baseAPI) blockByNumberAndHash(ctx context.Context, number rpc.BlockNumber, hash common.Hash) (*types.Block, error) { block, err := api.blockByNumber(ctx, number) if err != nil { return nil, err @@ -211,7 +234,7 @@ func (t *txTraceResult) String() string { type blockTraceTask struct { statedb *state.StateDB // Intermediate state prepped for tracing block *types.Block // Block to trace the transactions from - rootref common.Hash // Trie root reference held for this task + release StateReleaseFunc // The function to release the held resource for this task results []*txTraceResult // Trace results procudes by the task } @@ -244,13 +267,6 @@ func (api *API) TraceChain(ctx context.Context, start, end rpc.BlockNumber, conf if from.Number().Cmp(to.Number()) >= 0 { return nil, fmt.Errorf("end block (#%d) needs to come after start block (#%d)", end, start) } - return api.traceChain(ctx, from, to, config) -} - -// traceChain configures a new tracer according to the provided configuration, and -// executes all the transactions contained within. The return value will be one item -// per transaction, dependent on the requested tracer. -func (api *API) traceChain(ctx context.Context, start, end *types.Block, config *TraceConfig) (*rpc.Subscription, error) { // Tracing a chain is a **long** operation, only do with subscriptions notifier, supported := rpc.NotifierFromContext(ctx) if !supported { @@ -258,8 +274,45 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config } sub := notifier.CreateSubscription() - // Prepare all the states for tracing. Note this procedure can take very - // long time. Timeout mechanism is necessary. + resCh := api.traceChain(from, to, config, notifier.Closed()) + go func() { + for result := range resCh { + notifier.Notify(sub.ID, result) + } + }() + return sub, nil +} + +// releaser is a helper tool responsible for caching the release +// callbacks of tracing state. +type releaser struct { + releases []StateReleaseFunc + lock sync.Mutex +} + +func (r *releaser) add(release StateReleaseFunc) { + r.lock.Lock() + defer r.lock.Unlock() + + r.releases = append(r.releases, release) +} + +func (r *releaser) call() { + r.lock.Lock() + defer r.lock.Unlock() + + for _, release := range r.releases { + release() + } + r.releases = r.releases[:0] +} + +// traceChain configures a new tracer according to the provided configuration, and +// executes all the transactions contained within. The tracing chain range includes +// the end block but excludes the start one. The return value will be one item per +// transaction, dependent on the requested tracer. +// The tracing procedure should be aborted in case the closed signal is received. +func (api *API) traceChain(start, end *types.Block, config *TraceConfig, closed <-chan interface{}) chan *blockTraceResult { reexec := defaultTraceReexec if config != nil && config.Reexec != nil { reexec = *config.Reexec @@ -270,20 +323,23 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config threads = blocks } var ( - pend = new(sync.WaitGroup) - tasks = make(chan *blockTraceTask, threads) - results = make(chan *blockTraceTask, threads) - localctx = context.Background() + pend = new(sync.WaitGroup) + ctx = context.Background() + taskCh = make(chan *blockTraceTask, threads) + resCh = make(chan *blockTraceTask, threads) + reler = new(releaser) ) for th := 0; th < threads; th++ { pend.Add(1) go func() { defer pend.Done() - // Fetch and execute the next block trace tasks - for task := range tasks { - signer := types.MakeSigner(api.backend.ChainConfig(), task.block.Number(), new(big.Int).SetUint64(task.block.Time())) - blockCtx := core.NewEVMBlockContext(task.block.Header(), api.chainContext(localctx), nil) + // Fetch and execute the block trace taskCh + for task := range taskCh { + var ( + signer = types.MakeSigner(api.backend.ChainConfig(), task.block.Number(), task.block.Timestamp()) + blockCtx = core.NewEVMBlockContext(task.block.Header(), api.chainContext(ctx), nil) + ) // Trace all the transactions contained within for i, tx := range task.block.Transactions() { msg, _ := tx.AsMessage(signer, task.block.BaseFee()) @@ -292,7 +348,7 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config TxIndex: i, TxHash: tx.Hash(), } - res, err := api.traceTx(localctx, msg, txctx, blockCtx, task.statedb, config) + res, err := api.traceTx(ctx, msg, txctx, blockCtx, task.statedb, config) if err != nil { task.results[i] = &txTraceResult{Error: err.Error()} log.Warn("Tracing failed", "hash", tx.Hash(), "block", task.block.NumberU64(), "err", err) @@ -302,36 +358,38 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config task.statedb.Finalise(api.backend.ChainConfig().IsEIP158(task.block.Number())) task.results[i] = &txTraceResult{Result: res} } - // Stream the result back to the user or abort on teardown + // Tracing state is used up, queue it for de-referencing + reler.add(task.release) + + // Stream the result back to the result catcher or abort on teardown select { - case results <- task: - case <-notifier.Closed(): + case resCh <- task: + case <-closed: return } } }() } // Start a goroutine to feed all the blocks into the tracers - var ( - begin = time.Now() - derefTodo []common.Hash // list of hashes to dereference from the db - derefsMu sync.Mutex // mutex for the derefs - ) - go func() { var ( logged time.Time + begin = time.Now() number uint64 traced uint64 failed error - parent common.Hash statedb *state.StateDB + release StateReleaseFunc ) // Ensure everything is properly cleaned up on any exit path defer func() { - close(tasks) + close(taskCh) pend.Wait() + // Clean out any pending derefs. + reler.call() + + // Log the chain result switch { case failed != nil: log.Warn("Chain tracing failed", "start", start.NumberU64(), "end", end.NumberU64(), "transactions", traced, "elapsed", time.Since(begin), "err", failed) @@ -340,105 +398,97 @@ func (api *API) traceChain(ctx context.Context, start, end *types.Block, config default: log.Info("Chain tracing finished", "start", start.NumberU64(), "end", end.NumberU64(), "transactions", traced, "elapsed", time.Since(begin)) } - close(results) + close(resCh) }() - var preferDisk bool // Feed all the blocks both into the tracer, as well as fast process concurrently for number = start.NumberU64(); number < end.NumberU64(); number++ { // Stop tracing if interruption was requested select { - case <-notifier.Closed(): + case <-closed: return default: } - // clean out any derefs - derefsMu.Lock() - for _, h := range derefTodo { - statedb.Database().TrieDB().Dereference(h) - } - derefTodo = derefTodo[:0] - derefsMu.Unlock() - // Print progress logs if long enough time elapsed if time.Since(logged) > 8*time.Second { logged = time.Now() log.Info("Tracing chain segment", "start", start.NumberU64(), "end", end.NumberU64(), "current", number, "transactions", traced, "elapsed", time.Since(begin)) } - // Retrieve the parent state to trace on top - block, err := api.blockByNumber(localctx, rpc.BlockNumber(number)) + // Retrieve the parent block and target block for tracing. + block, err := api.blockByNumber(ctx, rpc.BlockNumber(number)) if err != nil { failed = err break } - // Prepare the statedb for tracing. Don't use the live database for - // tracing to avoid persisting state junks into the database. - statedb, err = api.backend.StateAtBlock(localctx, block, reexec, statedb, false, preferDisk) + next, err := api.blockByNumber(ctx, rpc.BlockNumber(number+1)) if err != nil { failed = err break } - if trieDb := statedb.Database().TrieDB(); trieDb != nil { - // Hold the reference for tracer, will be released at the final stage - trieDb.Reference(block.Root(), common.Hash{}) - - // Release the parent state because it's already held by the tracer - if parent != (common.Hash{}) { - trieDb.Dereference(parent) - } - // Prefer disk if the trie db memory grows too much - s1, s2 := trieDb.Size() - if !preferDisk && (s1+s2) > defaultTracechainMemLimit { - log.Info("Switching to prefer-disk mode for tracing", "size", s1+s2) - preferDisk = true - } + // Prepare the statedb for tracing. Don't use the live database for + // tracing to avoid persisting state junks into the database. Switch + // over to `preferDisk` mode only if the memory usage exceeds the + // limit, the trie database will be reconstructed from scratch only + // if the relevant state is available in disk. + var preferDisk bool + if statedb != nil { + s1, s2 := statedb.Database().TrieDB().Size() + preferDisk = s1+s2 > defaultTracechainMemLimit } - parent = block.Root() - - next, err := api.blockByNumber(localctx, rpc.BlockNumber(number+1)) + statedb, release, err = api.backend.StateAtBlock(ctx, block, reexec, statedb, false, preferDisk) if err != nil { failed = err break } + // Clean out any pending derefs. Note this step must be done after + // constructing tracing state, because the tracing state of block + // next depends on the parent state and construction may fail if + // we release too early. + reler.call() + // Send the block over to the concurrent tracers (if not in the fast-forward phase) txs := next.Transactions() select { - case tasks <- &blockTraceTask{statedb: statedb.Copy(), block: next, rootref: block.Root(), results: make([]*txTraceResult, len(txs))}: - case <-notifier.Closed(): + case taskCh <- &blockTraceTask{statedb: statedb.Copy(), block: next, release: release, results: make([]*txTraceResult, len(txs))}: + case <-closed: + reler.add(release) return } traced += uint64(len(txs)) } }() - // Keep reading the trace results and stream the to the user + // Keep reading the trace results and stream them to result channel. + retCh := make(chan *blockTraceResult) go func() { + defer close(retCh) var ( - done = make(map[uint64]*blockTraceResult) next = start.NumberU64() + 1 + done = make(map[uint64]*blockTraceResult) ) - for res := range results { + for res := range resCh { // Queue up next received result result := &blockTraceResult{ Block: hexutil.Uint64(res.block.NumberU64()), Hash: res.block.Hash(), Traces: res.results, } - // Schedule any parent tries held in memory by this task for dereferencing done[uint64(result.Block)] = result - derefsMu.Lock() - derefTodo = append(derefTodo, res.rootref) - derefsMu.Unlock() - // Stream completed traces to the user, aborting on the first error + + // Stream completed traces to the result channel for result, ok := done[next]; ok; result, ok = done[next] { if len(result.Traces) > 0 || next == end.NumberU64() { - notifier.Notify(sub.ID, result) + // It will be blocked in case the channel consumer doesn't take the + // tracing result in time(e.g. the websocket connect is not stable) + // which will eventually block the entire chain tracer. It's the + // expected behavior to not waste node resources for a non-active user. + retCh <- result } delete(done, next) next++ } } }() - return sub, nil + return retCh } // TraceBlockByNumber returns the structured logs created during the execution of @@ -463,7 +513,7 @@ func (api *API) TraceBlockByHash(ctx context.Context, hash common.Hash, config * // TraceBlock returns the structured logs created during the execution of EVM // and returns them as a JSON object. -func (api *API) TraceBlock(ctx context.Context, blob hexutil.Bytes, config *TraceConfig) ([]*txTraceResult, error) { +func (api *baseAPI) TraceBlock(ctx context.Context, blob hexutil.Bytes, config *TraceConfig) ([]*txTraceResult, error) { block := new(types.Block) if err := rlp.Decode(bytes.NewReader(blob), block); err != nil { return nil, fmt.Errorf("could not decode block: %v", err) @@ -471,6 +521,16 @@ func (api *API) TraceBlock(ctx context.Context, blob hexutil.Bytes, config *Trac return api.traceBlock(ctx, block, config) } +// TraceBlockFromFile returns the structured logs created during the execution of +// EVM and returns them as a JSON object. +func (api *FileTracerAPI) TraceBlockFromFile(ctx context.Context, file string, config *TraceConfig) ([]*txTraceResult, error) { + blob, err := os.ReadFile(file) + if err != nil { + return nil, fmt.Errorf("could not read file: %v", err) + } + return api.TraceBlock(ctx, blob, config) +} + // TraceBadBlock returns the structured logs created during the execution of // EVM against a block pulled from the pool of bad ones and returns them as a JSON // object. @@ -492,6 +552,17 @@ func (api *API) TraceBadBlock(ctx context.Context, hash common.Hash, config *Tra return api.traceBlock(ctx, block, config) } +// StandardTraceBlockToFile dumps the structured logs created during the +// execution of EVM to the local file system and returns a list of files +// to the caller. +func (api *FileTracerAPI) StandardTraceBlockToFile(ctx context.Context, hash common.Hash, config *StdTraceConfig) ([]string, error) { + block, err := api.blockByHash(ctx, hash) + if err != nil { + return nil, err + } + return api.standardTraceBlockToFile(ctx, block, config) +} + // IntermediateRoots executes a block (bad- or canon- or side-), and returns a list // of intermediate roots: the stateroot after each transaction. func (api *API) IntermediateRoots(ctx context.Context, hash common.Hash, config *TraceConfig) ([]common.Hash, error) { @@ -510,10 +581,12 @@ func (api *API) IntermediateRoots(ctx context.Context, hash common.Hash, config if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false) + statedb, release, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false) if err != nil { return nil, err } + defer release() + var ( roots []common.Hash signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), new(big.Int).SetUint64(block.Time())) @@ -545,10 +618,31 @@ func (api *API) IntermediateRoots(ctx context.Context, hash common.Hash, config return roots, nil } +// StandardTraceBadBlockToFile dumps the structured logs created during the +// execution of EVM against a block pulled from the pool of bad ones to the +// local file system and returns a list of files to the caller. +func (api *FileTracerAPI) StandardTraceBadBlockToFile(ctx context.Context, hash common.Hash, config *StdTraceConfig) ([]string, error) { + // Search for the bad block corresponding to [hash]. + var ( + badBlocks, _ = api.backend.BadBlocks() + block *types.Block + ) + for _, badBlock := range badBlocks { + if hash == block.Hash() { + block = badBlock + break + } + } + if block == nil { + return nil, fmt.Errorf("bad block %#x not found", hash) + } + return api.standardTraceBlockToFile(ctx, block, config) +} + // traceBlock configures a new tracer according to the provided configuration, and // executes all the transactions contained within. The return value will be one item // per transaction, dependent on the requested tracer. -func (api *API) traceBlock(ctx context.Context, block *types.Block, config *TraceConfig) ([]*txTraceResult, error) { +func (api *baseAPI) traceBlock(ctx context.Context, block *types.Block, config *TraceConfig) ([]*txTraceResult, error) { if block.NumberU64() == 0 { return nil, errors.New("genesis is not traceable") } @@ -560,10 +654,12 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false) + statedb, release, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false) if err != nil { return nil, err } + defer release() + // Execute all the transaction contained within the block concurrently var ( signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), new(big.Int).SetUint64(block.Time())) @@ -630,6 +726,128 @@ func (api *API) traceBlock(ctx context.Context, block *types.Block, config *Trac return results, nil } +// standardTraceBlockToFile configures a new tracer which uses standard JSON output, +// and traces either a full block or an individual transaction. The return value will +// be one filename per transaction traced. +func (api *FileTracerAPI) standardTraceBlockToFile(ctx context.Context, block *types.Block, config *StdTraceConfig) ([]string, error) { + // If we're tracing a single transaction, make sure it's present + if config != nil && config.TxHash != (common.Hash{}) { + if !containsTx(block, config.TxHash) { + return nil, fmt.Errorf("transaction %#x not found in block", config.TxHash) + } + } + if block.NumberU64() == 0 { + return nil, errors.New("genesis is not traceable") + } + parent, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(block.NumberU64()-1), block.ParentHash()) + if err != nil { + return nil, err + } + reexec := defaultTraceReexec + if config != nil && config.Reexec != nil { + reexec = *config.Reexec + } + statedb, release, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false) + if err != nil { + return nil, err + } + defer release() + + // Retrieve the tracing configurations, or use default values + var ( + logConfig logger.Config + txHash common.Hash + ) + if config != nil { + logConfig = config.Config + txHash = config.TxHash + } + logConfig.Debug = true + + // Execute transaction, either tracing all or just the requested one + var ( + dumps []string + signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), block.Timestamp()) + chainConfig = api.backend.ChainConfig() + vmctx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) + canon = true + ) + // Check if there are any overrides: the caller may wish to enable a future + // fork when executing this block. Note, such overrides are only applicable to the + // actual specified block, not any preceding blocks that we have to go through + // in order to obtain the state. + // Therefore, it's perfectly valid to specify `"futureForkBlock": 0`, to enable `futureFork` + if config != nil && config.Overrides != nil { + chainConfig, canon = overrideConfig(chainConfig, config.Overrides) + } + for i, tx := range block.Transactions() { + // Prepare the transaction for un-traced execution + var ( + msg, _ = tx.AsMessage(signer, block.BaseFee()) + txContext = core.NewEVMTxContext(msg) + vmConf vm.Config + dump *os.File + writer *bufio.Writer + err error + ) + // If the transaction needs tracing, swap out the configs + if tx.Hash() == txHash || txHash == (common.Hash{}) { + // Generate a unique temporary file to dump it into + prefix := fmt.Sprintf("block_%#x-%d-%#x-", block.Hash().Bytes()[:4], i, tx.Hash().Bytes()[:4]) + if !canon { + prefix = fmt.Sprintf("%valt-", prefix) + } + dump, err = os.CreateTemp(os.TempDir(), prefix) + if err != nil { + return nil, err + } + dumps = append(dumps, dump.Name()) + + // Swap out the noop logger to the standard tracer + writer = bufio.NewWriter(dump) + vmConf = vm.Config{ + Debug: true, + Tracer: logger.NewJSONLogger(&logConfig, writer), + EnablePreimageRecording: true, + } + } + // Execute the transaction and flush any traces to disk + vmenv := vm.NewEVM(vmctx, txContext, statedb, chainConfig, vmConf) + statedb.Prepare(tx.Hash(), i) + _, err = core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(msg.Gas())) + if writer != nil { + writer.Flush() + } + if dump != nil { + dump.Close() + log.Info("Wrote standard trace", "file", dump.Name()) + } + if err != nil { + return dumps, err + } + // Finalize the state so any modifications are written to the trie + // Only delete empty objects if EIP158/161 (a.k.a Spurious Dragon) is in effect + statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) + + // If we've traced the transaction we were looking for, abort + if tx.Hash() == txHash { + break + } + } + return dumps, nil +} + +// containsTx reports whether the transaction with a certain hash +// is contained within the specified block. +func containsTx(block *types.Block, hash common.Hash) bool { + for _, tx := range block.Transactions() { + if tx.Hash() == hash { + return true + } + } + return false +} + // TraceTransaction returns the structured logs created during the execution of EVM // and returns them as a JSON object. func (api *API) TraceTransaction(ctx context.Context, hash common.Hash, config *TraceConfig) (interface{}, error) { @@ -649,10 +867,12 @@ func (api *API) TraceTransaction(ctx context.Context, hash common.Hash, config * if err != nil { return nil, err } - msg, vmctx, statedb, err := api.backend.StateAtTransaction(ctx, block, int(index), reexec) + msg, vmctx, statedb, release, err := api.backend.StateAtTransaction(ctx, block, int(index), reexec) if err != nil { return nil, err } + defer release() + txctx := &Context{ BlockHash: blockHash, TxIndex: int(index), @@ -693,10 +913,12 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc if config != nil && config.Reexec != nil { reexec = *config.Reexec } - statedb, err := api.backend.StateAtBlock(ctx, block, reexec, nil, true, false) + statedb, release, err := api.backend.StateAtBlock(ctx, block, reexec, nil, true, false) if err != nil { return nil, err } + defer release() + vmctx := core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil) // Apply the customization rules if required. if config != nil { @@ -726,7 +948,7 @@ func (api *API) TraceCall(ctx context.Context, args ethapi.TransactionArgs, bloc // traceTx configures a new tracer according to the provided configuration, and // executes the given message in the provided environment. The return value will // be tracer dependent. -func (api *API) traceTx(ctx context.Context, message core.Message, txctx *Context, vmctx vm.BlockContext, statedb *state.StateDB, config *TraceConfig) (interface{}, error) { +func (api *baseAPI) traceTx(ctx context.Context, message core.Message, txctx *Context, vmctx vm.BlockContext, statedb *state.StateDB, config *TraceConfig) (interface{}, error) { var ( tracer Tracer err error @@ -778,5 +1000,63 @@ func APIs(backend Backend) []rpc.API { Service: NewAPI(backend), Name: "debug-tracer", }, + { + Namespace: "debug", + Service: NewFileTracerAPI(backend), + Name: "debug-file-tracer", + }, + } +} + +// overrideConfig returns a copy of [original] with network upgrades enabled by [override] enabled, +// along with a boolean that indicates whether the copy is canonical (equivalent to the original). +func overrideConfig(original *params.ChainConfig, override *params.ChainConfig) (*params.ChainConfig, bool) { + copy := new(params.ChainConfig) + *copy = *original + canon := true + + // Apply network upgrades (after Berlin) to the copy. + // Note in coreth, ApricotPhase2 is the "equivalent" to Berlin. + if timestamp := override.ApricotPhase2BlockTimestamp; timestamp != nil { + copy.ApricotPhase2BlockTimestamp = timestamp + canon = false + } + if timestamp := override.ApricotPhase3BlockTimestamp; timestamp != nil { + copy.ApricotPhase3BlockTimestamp = timestamp + canon = false } + if timestamp := override.ApricotPhase4BlockTimestamp; timestamp != nil { + copy.ApricotPhase4BlockTimestamp = timestamp + canon = false + } + if timestamp := override.ApricotPhase5BlockTimestamp; timestamp != nil { + copy.ApricotPhase5BlockTimestamp = timestamp + canon = false + } + if timestamp := override.ApricotPhasePre6BlockTimestamp; timestamp != nil { + copy.ApricotPhasePre6BlockTimestamp = timestamp + canon = false + } + if timestamp := override.ApricotPhase6BlockTimestamp; timestamp != nil { + copy.ApricotPhase6BlockTimestamp = timestamp + canon = false + } + if timestamp := override.ApricotPhasePost6BlockTimestamp; timestamp != nil { + copy.ApricotPhasePost6BlockTimestamp = timestamp + canon = false + } + if timestamp := override.BanffBlockTimestamp; timestamp != nil { + copy.BanffBlockTimestamp = timestamp + canon = false + } + if timestamp := override.CortinaBlockTimestamp; timestamp != nil { + copy.CortinaBlockTimestamp = timestamp + canon = false + } + if timestamp := override.DUpgradeBlockTimestamp; timestamp != nil { + copy.DUpgradeBlockTimestamp = timestamp + canon = false + } + + return copy, canon } diff --git a/coreth/eth/tracers/api_test.go b/coreth/eth/tracers/api_test.go index 18220495..be367340 100644 --- a/coreth/eth/tracers/api_test.go +++ b/coreth/eth/tracers/api_test.go @@ -36,6 +36,7 @@ import ( "math/big" "reflect" "sort" + "sync/atomic" "testing" "github.com/ava-labs/coreth/consensus" @@ -66,34 +67,31 @@ type testBackend struct { engine consensus.Engine chaindb ethdb.Database chain *core.BlockChain + + refHook func() // Hook is invoked when the requested state is referenced + relHook func() // Hook is invoked when the requested state is released } func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i int, b *core.BlockGen)) *testBackend { backend := &testBackend{ - chainConfig: params.TestChainConfig, + chainConfig: gspec.Config, engine: dummy.NewETHFaker(), chaindb: rawdb.NewMemoryDatabase(), } // Generate blocks for testing - gspec.Config = backend.chainConfig - var ( - gendb = rawdb.NewMemoryDatabase() - genesis = gspec.MustCommit(gendb) - ) - blocks, _, err := core.GenerateChain(backend.chainConfig, genesis, backend.engine, gendb, n, 10, generator) + _, blocks, _, err := core.GenerateChainWithGenesis(gspec, backend.engine, n, 10, generator) if err != nil { t.Fatal(err) } // Import the canonical chain - gspec.MustCommit(backend.chaindb) cacheConfig := &core.CacheConfig{ TrieCleanLimit: 256, TrieDirtyLimit: 256, SnapshotLimit: 128, Pruning: false, // Archive mode } - chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, backend.chainConfig, backend.engine, vm.Config{}, common.Hash{}) + chain, err := core.NewBlockChain(backend.chaindb, cacheConfig, gspec, backend.engine, vm.Config{}, common.Hash{}, false) if err != nil { t.Fatalf("failed to create tester chain: %v", err) } @@ -158,25 +156,33 @@ func (b *testBackend) ChainDb() ethdb.Database { return b.chaindb } -func (b *testBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, checkLive bool, preferDisk bool) (*state.StateDB, error) { +func (b *testBackend) StateAtBlock(ctx context.Context, block *types.Block, reexec uint64, base *state.StateDB, readOnly bool, preferDisk bool) (*state.StateDB, StateReleaseFunc, error) { statedb, err := b.chain.StateAt(block.Root()) if err != nil { - return nil, errStateNotFound + return nil, nil, errStateNotFound + } + if b.refHook != nil { + b.refHook() + } + release := func() { + if b.relHook != nil { + b.relHook() + } } - return statedb, nil + return statedb, release, nil } -func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, error) { +func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block, txIndex int, reexec uint64) (core.Message, vm.BlockContext, *state.StateDB, StateReleaseFunc, error) { parent := b.chain.GetBlock(block.ParentHash(), block.NumberU64()-1) if parent == nil { - return nil, vm.BlockContext{}, nil, errBlockNotFound + return nil, vm.BlockContext{}, nil, nil, errBlockNotFound } - statedb, err := b.chain.StateAt(parent.Root()) + statedb, release, err := b.StateAtBlock(ctx, parent, reexec, nil, true, false) if err != nil { - return nil, vm.BlockContext{}, nil, errStateNotFound + return nil, vm.BlockContext{}, nil, nil, errStateNotFound } if txIndex == 0 && len(block.Transactions()) == 0 { - return nil, vm.BlockContext{}, statedb, nil + return nil, vm.BlockContext{}, statedb, release, nil } // Recompute transactions up to the target index. signer := types.MakeSigner(b.chainConfig, block.Number(), new(big.Int).SetUint64(block.Time())) @@ -185,15 +191,15 @@ func (b *testBackend) StateAtTransaction(ctx context.Context, block *types.Block txContext := core.NewEVMTxContext(msg) context := core.NewEVMBlockContext(block.Header(), b.chain, nil) if idx == txIndex { - return msg, context, statedb, nil + return msg, context, statedb, release, nil } vmenv := vm.NewEVM(context, txContext, statedb, b.chainConfig, vm.Config{}) if _, err := core.ApplyMessage(vmenv, msg, new(core.GasPool).AddGas(tx.Gas())); err != nil { - return nil, vm.BlockContext{}, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) + return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction %#x failed: %v", tx.Hash(), err) } statedb.Finalise(vmenv.ChainConfig().IsEIP158(block.Number())) } - return nil, vm.BlockContext{}, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) + return nil, vm.BlockContext{}, nil, nil, fmt.Errorf("transaction index %d out of range for block %#x", txIndex, block.Hash()) } func TestTraceCall(t *testing.T) { @@ -201,11 +207,13 @@ func TestTraceCall(t *testing.T) { // Initialize test accounts accounts := newAccounts(3) - genesis := &core.Genesis{Alloc: core.GenesisAlloc{ - accounts[0].addr: {Balance: big.NewInt(params.Ether)}, - accounts[1].addr: {Balance: big.NewInt(params.Ether)}, - accounts[2].addr: {Balance: big.NewInt(params.Ether)}, - }} + genesis := &core.Genesis{ + Config: params.TestBanffChainConfig, // TODO: go-ethereum has not enabled Shanghai yet, so we use Banff here so tests pass. + Alloc: core.GenesisAlloc{ + accounts[0].addr: {Balance: big.NewInt(params.Ether)}, + accounts[1].addr: {Balance: big.NewInt(params.Ether)}, + accounts[2].addr: {Balance: big.NewInt(params.Ether)}, + }} genBlocks := 10 signer := types.HomesteadSigner{} api := NewAPI(newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { @@ -331,10 +339,13 @@ func TestTraceTransaction(t *testing.T) { // Initialize test accounts accounts := newAccounts(2) - genesis := &core.Genesis{Alloc: core.GenesisAlloc{ - accounts[0].addr: {Balance: big.NewInt(params.Ether)}, - accounts[1].addr: {Balance: big.NewInt(params.Ether)}, - }} + genesis := &core.Genesis{ + Config: params.TestChainConfig, + Alloc: core.GenesisAlloc{ + accounts[0].addr: {Balance: big.NewInt(params.Ether)}, + accounts[1].addr: {Balance: big.NewInt(params.Ether)}, + }, + } target := common.Hash{} signer := types.HomesteadSigner{} api := NewAPI(newTestBackend(t, 1, genesis, func(i int, b *core.BlockGen) { @@ -369,11 +380,14 @@ func TestTraceBlock(t *testing.T) { // Initialize test accounts accounts := newAccounts(3) - genesis := &core.Genesis{Alloc: core.GenesisAlloc{ - accounts[0].addr: {Balance: big.NewInt(params.Ether)}, - accounts[1].addr: {Balance: big.NewInt(params.Ether)}, - accounts[2].addr: {Balance: big.NewInt(params.Ether)}, - }} + genesis := &core.Genesis{ + Config: params.TestChainConfig, + Alloc: core.GenesisAlloc{ + accounts[0].addr: {Balance: big.NewInt(params.Ether)}, + accounts[1].addr: {Balance: big.NewInt(params.Ether)}, + accounts[2].addr: {Balance: big.NewInt(params.Ether)}, + }, + } genBlocks := 10 signer := types.HomesteadSigner{} api := NewAPI(newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { @@ -444,11 +458,14 @@ func TestTracingWithOverrides(t *testing.T) { t.Parallel() // Initialize test accounts accounts := newAccounts(3) - genesis := &core.Genesis{Alloc: core.GenesisAlloc{ - accounts[0].addr: {Balance: big.NewInt(params.Ether)}, - accounts[1].addr: {Balance: big.NewInt(params.Ether)}, - accounts[2].addr: {Balance: big.NewInt(params.Ether)}, - }} + genesis := &core.Genesis{ + Config: params.TestBanffChainConfig, // TODO: go-ethereum has not enabled Shanghai yet, so we use Banff here so tests pass. + Alloc: core.GenesisAlloc{ + accounts[0].addr: {Balance: big.NewInt(params.Ether)}, + accounts[1].addr: {Balance: big.NewInt(params.Ether)}, + accounts[2].addr: {Balance: big.NewInt(params.Ether)}, + }, + } genBlocks := 10 signer := types.HomesteadSigner{} api := NewAPI(newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { @@ -636,3 +653,78 @@ func newStates(keys []common.Hash, vals []common.Hash) *map[common.Hash]common.H } return &m } + +func TestTraceChain(t *testing.T) { + // Initialize test accounts + // Note: the balances in this test have been increased compared to go-ethereum. + accounts := newAccounts(3) + genesis := &core.Genesis{ + Config: params.TestChainConfig, + Alloc: core.GenesisAlloc{ + accounts[0].addr: {Balance: big.NewInt(5 * params.Ether)}, + accounts[1].addr: {Balance: big.NewInt(5 * params.Ether)}, + accounts[2].addr: {Balance: big.NewInt(5 * params.Ether)}, + }, + } + genBlocks := 50 + signer := types.HomesteadSigner{} + + var ( + ref uint32 // total refs has made + rel uint32 // total rels has made + nonce uint64 + ) + backend := newTestBackend(t, genBlocks, genesis, func(i int, b *core.BlockGen) { + // Transfer from account[0] to account[1] + // value: 1000 wei + // fee: 0 wei + for j := 0; j < i+1; j++ { + tx, _ := types.SignTx(types.NewTransaction(nonce, accounts[1].addr, big.NewInt(1000), params.TxGas, b.BaseFee(), nil), signer, accounts[0].key) + b.AddTx(tx) + nonce += 1 + } + }) + backend.refHook = func() { atomic.AddUint32(&ref, 1) } + backend.relHook = func() { atomic.AddUint32(&rel, 1) } + api := NewAPI(backend) + + single := `{"result":{"gas":21000,"failed":false,"returnValue":"","structLogs":[]}}` + var cases = []struct { + start uint64 + end uint64 + config *TraceConfig + }{ + {0, 50, nil}, // the entire chain range, blocks [1, 50] + {10, 20, nil}, // the middle chain range, blocks [11, 20] + } + for _, c := range cases { + ref, rel = 0, 0 // clean up the counters + + from, _ := api.blockByNumber(context.Background(), rpc.BlockNumber(c.start)) + to, _ := api.blockByNumber(context.Background(), rpc.BlockNumber(c.end)) + resCh := api.traceChain(from, to, c.config, nil) + + next := c.start + 1 + for result := range resCh { + if next != uint64(result.Block) { + t.Error("Unexpected tracing block") + } + if len(result.Traces) != int(next) { + t.Error("Unexpected tracing result") + } + for _, trace := range result.Traces { + blob, _ := json.Marshal(trace) + if string(blob) != single { + t.Error("Unexpected tracing result") + } + } + next += 1 + } + if next != c.end+1 { + t.Error("Missing tracing block") + } + if ref != rel { + t.Errorf("Ref and deref actions are not equal, ref %d rel %d", ref, rel) + } + } +} diff --git a/coreth/eth/tracers/internal/tracetest/calltrace_test.go b/coreth/eth/tracers/internal/tracetest/calltrace_test.go index 340c35d0..1964c420 100644 --- a/coreth/eth/tracers/internal/tracetest/calltrace_test.go +++ b/coreth/eth/tracers/internal/tracetest/calltrace_test.go @@ -49,7 +49,8 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/rlp" - // Force-load native, to trigger registration + // Force-load native and js packages to trigger registration + _ "github.com/ava-labs/coreth/eth/tracers/js" _ "github.com/ava-labs/coreth/eth/tracers/native" ) @@ -134,6 +135,12 @@ type callTracerTest struct { Result *callTrace `json:"result"` } +// Iterates over all the input-output datasets in the tracer test harness and +// runs the JavaScript tracers against them. +func TestCallTracerLegacy(t *testing.T) { + testCallTracer("callTracerLegacy", "call_tracer_legacy", t) +} + func TestCallTracerNative(t *testing.T) { testCallTracer("callTracer", "call_tracer", t) } diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/create.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/create.json new file mode 100644 index 00000000..8699bf3e --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/create.json @@ -0,0 +1,58 @@ +{ + "context": { + "difficulty": "3755480783", + "gasLimit": "5401723", + "miner": "0xd049bfd667cb46aa3ef5df0da3e57db3be39e511", + "number": "2294702", + "timestamp": "1513676146" + }, + "genesis": { + "alloc": { + "0x13e4acefe6a6700604929946e70e6443e4e73447": { + "balance": "0xcf3e0938579f000", + "code": "0x", + "nonce": "9", + "storage": {} + }, + "0x7dc9c9730689ff0b0fd506c67db815f12d90a448": { + "balance": "0x0", + "code": "0x", + "nonce": "0", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3757315409", + "extraData": "0x566961425443", + "gasLimit": "5406414", + "hash": "0xae107f592eebdd9ff8d6ba00363676096e6afb0e1007a7d3d0af88173077378d", + "miner": "0xd049bfd667cb46aa3ef5df0da3e57db3be39e511", + "mixHash": "0xc927aa05a38bc3de864e95c33b3ae559d3f39c4ccd51cef6f113f9c50ba0caf1", + "nonce": "0x93363bbd2c95f410", + "number": "2294701", + "stateRoot": "0x6b6737d5bde8058990483e915866bd1578014baeff57bd5e4ed228a2bfad635c", + "timestamp": "1513676127", + "totalDifficulty": "7160808139332585" + }, + "input": "0xf907ef098504e3b29200830897be8080b9079c606060405260405160208061077c83398101604052808051906020019091905050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415151561007d57600080fd5b336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506001600460006101000a81548160ff02191690831515021790555050610653806101296000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029000000000000000000000000c65e620a3a55451316168d57e268f5702ef56a1129a01060f46676a5dff6f407f0f51eb6f37f5c8c54e238c70221e18e65fc29d3ea65a0557b01c50ff4ffaac8ed6e5d31237a4ecbac843ab1bfe8bb0165a0060df7c54f", + "result": { + "from": "0x13e4acefe6a6700604929946e70e6443e4e73447", + "gas": "0x5e106", + "gasUsed": "0x5e106", + "input": "0x606060405260405160208061077c83398101604052808051906020019091905050600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415151561007d57600080fd5b336000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555080600160006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506001600460006101000a81548160ff02191690831515021790555050610653806101296000396000f300606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029000000000000000000000000c65e620a3a55451316168d57e268f5702ef56a11", + "output": "0x606060405260043610610083576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806305e4382a146100855780631c02708d146100ae5780632e1a7d4d146100c35780635114cb52146100e6578063a37dda2c146100fe578063ae200e7914610153578063b5769f70146101a8575b005b341561009057600080fd5b6100986101d1565b6040518082815260200191505060405180910390f35b34156100b957600080fd5b6100c16101d7565b005b34156100ce57600080fd5b6100e460048080359060200190919050506102eb565b005b6100fc6004808035906020019091905050610513565b005b341561010957600080fd5b6101116105d6565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561015e57600080fd5b6101666105fc565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156101b357600080fd5b6101bb610621565b6040518082815260200191505060405180910390f35b60025481565b60011515600460009054906101000a900460ff1615151415156101f957600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806102a15750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b15156102ac57600080fd5b6000600460006101000a81548160ff0219169083151502179055506003543073ffffffffffffffffffffffffffffffffffffffff163103600281905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614806103935750600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16145b151561039e57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141561048357600060025411801561040757506002548111155b151561041257600080fd5b80600254036002819055506000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561047e57600080fd5b610510565b600060035411801561049757506003548111155b15156104a257600080fd5b8060035403600381905550600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561050f57600080fd5b5b50565b60011515600460009054906101000a900460ff16151514151561053557600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614801561059657506003548160035401115b80156105bd575080600354013073ffffffffffffffffffffffffffffffffffffffff163110155b15156105c857600080fd5b806003540160038190555050565b600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600354815600a165627a7a72305820c3b849e8440987ce43eae3097b77672a69234d516351368b03fe5b7de03807910029", + "to": "0x7dc9c9730689ff0b0fd506c67db815f12d90a448", + "type": "CREATE", + "value": "0x0" + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/deep_calls.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/deep_calls.json new file mode 100644 index 00000000..0353d4cf --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/deep_calls.json @@ -0,0 +1,415 @@ +{ + "context": { + "difficulty": "117066904", + "gasLimit": "4712384", + "miner": "0x1977c248e1014cc103929dd7f154199c916e39ec", + "number": "25001", + "timestamp": "1479891545" + }, + "genesis": { + "alloc": { + "0x2a98c5f40bfa3dee83431103c535f6fae9a8ad38": { + "balance": "0x0", + "code": "0x606060405236156100825760e060020a600035046302d05d3f811461008a5780630accce061461009c5780631ab9075a146100c757806331ed274614610102578063645a3b7214610133578063772fdae314610155578063a7f4377914610180578063ae5f80801461019e578063c9bded21146101ea578063f905c15a14610231575b61023a610002565b61023c600054600160a060020a031681565b61023a600435602435604435606435608435600254600160a060020a03166000141561024657610002565b61023a600435600254600160a060020a03166000148015906100f8575060025433600160a060020a03908116911614155b156102f457610002565b61023a60043560243560443560643560843560a43560c435600254600160a060020a03166000141561031657610002565b61023a600435602435600254600160a060020a0316600014156103d057610002565b61023a600435602435604435606435608435600254600160a060020a03166000141561046157610002565b61023a60025433600160a060020a0390811691161461051657610002565b61023a6004356024356044356060828152600160a060020a0382169060ff8516907fa6c2f0913db6f79ff0a4365762c61718973b3413d6e40382e704782a9a5099f690602090a3505050565b61023a600435602435600160a060020a038116606090815260ff8316907fee6348a7ec70f74e3d6cba55a53e9f9110d180d7698e9117fc466ae29a43e34790602090a25050565b61023c60035481565b005b6060908152602090f35b60025460e060020a6313bc6d4b02606090815233600160a060020a0390811660645291909116906313bc6d4b906084906020906024816000876161da5a03f115610002575050604051511515905061029d57610002565b60408051858152602081018390528151600160a060020a03858116939087169260ff8a16927f5a690ecd0cb15c1c1fd6b6f8a32df0d4f56cb41a54fea7e94020f013595de796929181900390910190a45050505050565b6002805473ffffffffffffffffffffffffffffffffffffffff19168217905550565b60025460e060020a6313bc6d4b02606090815233600160a060020a0390811660645291909116906313bc6d4b906084906020906024816000876161da5a03f115610002575050604051511515905061036d57610002565b6040805186815260208101869052808201859052606081018490529051600160a060020a03831691889160ff8b16917fd65d9ddafbad8824e2bbd6f56cc9f4ac27ba60737035c10a321ea2f681c94d47919081900360800190a450505050505050565b60025460e060020a6313bc6d4b02606090815233600160a060020a0390811660645291909116906313bc6d4b906084906020906024816000876161da5a03f115610002575050604051511515905061042757610002565b60408051828152905183917fa9c6cbc4bd352a6940479f6d802a1001550581858b310d7f68f7bea51218cda6919081900360200190a25050565b60025460e060020a6313bc6d4b02606090815233600160a060020a0390811660645291909116906313bc6d4b906084906020906024816000876161da5a03f11561000257505060405151151590506104b857610002565b80600160a060020a031684600160a060020a03168660ff167f69bdaf789251e1d3a0151259c0c715315496a7404bce9fd0b714674685c2cab78686604051808381526020018281526020019250505060405180910390a45050505050565b600254600160a060020a0316ff", + "nonce": "1", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000002cccf5e0538493c235d1c5ef6580f77d99e91396" + } + }, + "0x2cccf5e0538493c235d1c5ef6580f77d99e91396": { + "balance": "0x0", + "code": "0x606060405236156100775760e060020a600035046302d05d3f811461007f57806313bc6d4b146100915780633688a877146100b95780635188f9961461012f5780637eadc976146101545780638ad79680146101d3578063a43e04d814610238578063a7f437791461025e578063e16c7d981461027c575b61029f610002565b6102a1600054600160a060020a031681565b6102be600435600160a060020a03811660009081526002602052604090205460ff165b919050565b6102d26004356040805160208181018352600080835284815260038252835190849020805460026001821615610100026000190190911604601f8101849004840283018401909552848252929390929183018282801561037d5780601f106103525761010080835404028352916020019161037d565b61029f6004356024356000805433600160a060020a039081169116146104a957610002565b61034060043560008181526001602090815260408083205481517ff905c15a0000000000000000000000000000000000000000000000000000000081529151600160a060020a03909116928392839263f905c15a92600483810193919291829003018189876161da5a03f1156100025750506040515195945050505050565b60408051602060248035600481810135601f810185900485028601850190965285855261029f9581359591946044949293909201918190840183828082843750949650505050505050600054600160a060020a0390811633909116146104f657610002565b61029f6004355b600080548190600160a060020a0390811633909116146105a457610002565b61029f60005433600160a060020a0390811691161461072957610002565b6102a1600435600081815260016020526040902054600160a060020a03166100b4565b005b60408051600160a060020a03929092168252519081900360200190f35b604080519115158252519081900360200190f35b60405180806020018281038252838181518152602001915080519060200190808383829060006004602084601f0104600f02600301f150905090810190601f1680156103325780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60408051918252519081900360200190f35b820191906000526020600020905b81548152906001019060200180831161036057829003601f168201915b505050505090506100b4565b506000828152600160208181526040808420805473ffffffffffffffffffffffffffffffffffffffff191686179055600160a060020a038581168086526002909352818520805460ff191690941790935580517f1ab9075a0000000000000000000000000000000000000000000000000000000081523090931660048401525184939192631ab9075a926024828101939192829003018183876161da5a03f11561000257505060408051602081018690528082019290925243606083015260808083526003908301527f414444000000000000000000000000000000000000000000000000000000000060a0830152517f8ac68d4e97d65912f220b4c5f87978b8186320a5e378c1369850b5b5f90323d39181900360c00190a15b505050565b600083815260016020526040902054600160a060020a03838116911614156104d0576104a4565b600083815260016020526040812054600160a060020a031614610389576103898361023f565b600082815260036020908152604082208054845182855293839020919360026001831615610100026000190190921691909104601f90810184900483019391929186019083901061056a57805160ff19168380011785555b5061059a9291505b808211156105a05760008155600101610556565b8280016001018555821561054e579182015b8281111561054e57825182600050559160200191906001019061057c565b50505050565b5090565b600083815260016020526040812054600160a060020a031614156105c757610002565b50506000818152600160205260408082205481517fa7f437790000000000000000000000000000000000000000000000000000000081529151600160a060020a0391909116928392839263a7f4377992600483810193919291829003018183876161da5a03f11561000257505050600160005060008460001916815260200190815260200160002060006101000a815490600160a060020a0302191690556002600050600083600160a060020a0316815260200190815260200160002060006101000a81549060ff02191690557f8ac68d4e97d65912f220b4c5f87978b8186320a5e378c1369850b5b5f90323d383834360405180806020018560001916815260200184600160a060020a03168152602001838152602001828103825260038152602001807f44454c000000000000000000000000000000000000000000000000000000000081526020015060200194505050505060405180910390a1505050565b600054600160a060020a0316ff", + "nonce": "1", + "storage": { + "0x0684ac65a9fa32414dda56996f4183597d695987fdb82b145d722743891a6fe8": "0x0000000000000000000000003e9286eafa2db8101246c2131c09b49080d00690", + "0x1cd76f78169a420d99346e3501dd3e541622c38a226f9b63e01cfebc69879dc7": "0x000000000000000000000000b4fe7aa695b326c9d219158d2ca50db77b39f99f", + "0x8e54a4494fe5da016bfc01363f4f6cdc91013bb5434bd2a4a3359f13a23afa2f": "0x000000000000000000000000cf00ffd997ad14939736f026006498e3f099baaf", + "0x94edf7f600ba56655fd65fca1f1424334ce369326c1dc3e53151dcd1ad06bc13": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0xbbee47108b275f55f98482c6800f6372165e88b0330d3f5dae6419df4734366c": "0x0000000000000000000000002a98c5f40bfa3dee83431103c535f6fae9a8ad38", + "0xd38c0c4e84de118cfdcc775130155d83b8bbaaf23dc7f3c83a626b10473213bd": "0x0000000000000000000000000000000000000000000000000000000000000001", + "0xfb3aa5c655c2ec9d40609401f88d505d1da61afaa550e36ef5da0509ada257ba": "0x0000000000000000000000007986bad81f4cbd9317f5a46861437dae58d69113" + } + }, + "0x3e9286eafa2db8101246c2131c09b49080d00690": { + "balance": "0x0", + "code": "0x606060405236156100cf5760e060020a600035046302d05d3f81146100d7578063056d4470146100e957806316c66cc61461010c5780631ab9075a146101935780633ae1005c146101ce57806358541662146101fe5780635ed61af014610231578063644e3b791461025457806384dbac3b146102db578063949ae479146102fd5780639859387b14610321578063a7f4377914610340578063ab03fc261461035e578063e8161b7814610385578063e964d4e114610395578063f905c15a146103a5578063f92eb774146103ae575b6103be610002565b6103c0600054600160a060020a031681565b6103be6004356002546000908190600160a060020a031681141561040357610002565b6103dd60043560006108365b6040805160025460e360020a631c2d8fb30282527f636f6e747261637464620000000000000000000000000000000000000000000060048301529151600092600160a060020a03169163e16c7d98916024828101926020929190829003018187876161da5a03f1156100025750506040515191506104e29050565b6103be600435600254600160a060020a03166000148015906101c4575060025433600160a060020a03908116911614155b1561088d57610002565b6103be600435602435604435606435600254600090819081908190600160a060020a03168114156108af57610002565b6103c0600435602435604435606435608435600254600090819081908190600160a060020a03168114156110e857610002565b6103be6004356002546000908190600160a060020a03168114156115ec57610002565b6103c06004356000611b635b6040805160025460e360020a631c2d8fb30282527f6d61726b6574646200000000000000000000000000000000000000000000000060048301529151600092600160a060020a03169163e16c7d98916024828101926020929190829003018187876161da5a03f1156100025750506040515191506104e29050565b6103be600435602435600254600160a060020a031660001415611bb557610002565b6103be600435602435600254600090600160a060020a0316811415611d2e57610002565b6103be600435600254600160a060020a031660001415611fc657610002565b6103be60025433600160a060020a0390811691161461207e57610002565b6103be600435602435604435600254600090600160a060020a031681141561208c57610002565b6103dd60043560006124b8610260565b6103c0600435600061250a610118565b6103f160035481565b6103f16004356000612561610260565b005b60408051600160a060020a03929092168252519081900360200190f35b604080519115158252519081900360200190f35b60408051918252519081900360200190f35b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061046557610002565b8291506104e55b6040805160025460e360020a631c2d8fb30282527f63706f6f6c00000000000000000000000000000000000000000000000000000060048301529151600092600160a060020a03169163e16c7d98916024828101926020929190829003018187876161da5a03f115610002575050604051519150505b90565b600160a060020a031663b2206e6d83600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517fb2206e6d0000000000000000000000000000000000000000000000000000000082526004820152600160a060020a038816602482015290516044808301935060209282900301816000876161da5a03f11561000257505060405151915061059b90506106ba565b600160a060020a031663d5b205ce83600160a060020a03166336da44686040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a636ad902e7028252600160a060020a0390811660048301526024820187905288166044820152905160648281019350600092829003018183876161da5a03f115610002575050506107355b6040805160025460e360020a631c2d8fb30282527f6c6f676d6772000000000000000000000000000000000000000000000000000060048301529151600092600160a060020a03169163e16c7d98916024828101926020929190829003018187876161da5a03f1156100025750506040515191506104e29050565b50826120ee5b6040805160025460e360020a631c2d8fb30282527f6163636f756e7463746c0000000000000000000000000000000000000000000060048301529151600092600160a060020a03169163e16c7d98916024828101926020929190829003018187876161da5a03f1156100025750506040515191506104e29050565b600160a060020a0316630accce06600684600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e360020a6306db488d02825291519192899290916336da446891600482810192602092919082900301816000876161da5a03f1156100025750505060405180519060200150866040518660e060020a028152600401808681526020018560001916815260200184600160a060020a0316815260200183600160a060020a03168152602001828152602001955050505050506000604051808303816000876161da5a03f11561000257505050505050565b600160a060020a03166316c66cc6836040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f115610002575050604051519150505b919050565b6002805473ffffffffffffffffffffffffffffffffffffffff19168217905550565b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061091157610002565b87935061091c610260565b600160a060020a031663bdbdb08685600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517fbdbdb0860000000000000000000000000000000000000000000000000000000082526004820152602481018a905290516044808301935060209282900301816000876161da5a03f1156100025750506040515193506109ca90506106ba565b600160a060020a03166381982a7a8885876040518460e060020a0281526004018084600160a060020a0316815260200183815260200182600160a060020a0316815260200193505050506000604051808303816000876161da5a03f11561000257505050610a3661046c565b600160a060020a03166308636bdb85600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517f08636bdb000000000000000000000000000000000000000000000000000000008252600482015260248101889052604481019290925251606482810192602092919082900301816000876161da5a03f11561000257505060408051805160e160020a630a5d50db028252600482018190529151919450600160a060020a03871692506314baa1b6916024828101926000929190829003018183876161da5a03f11561000257505050610b3561046c565b600160a060020a0316630a3b6ede85600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a63051db76f0282526004820152600160a060020a038d16602482015290516044808301935060209282900301816000876161da5a03f115610002575050604051519150610bd590506106ba565b600160a060020a031663d5b205ce87838b6040518460e060020a0281526004018084600160a060020a0316815260200183815260200182600160a060020a0316815260200193505050506000604051808303816000876161da5a03f11561000257505050610c41610118565b600160a060020a031663988db79c888a6040518360e060020a0281526004018083600160a060020a0316815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f11561000257505050610ca5610260565b600160a060020a031663f4f2821b896040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f11561000257505050610d6f5b6040805160025460e360020a631c2d8fb30282527f747261646564620000000000000000000000000000000000000000000000000060048301529151600092600160a060020a03169163e16c7d98916024828101926020929190829003018187876161da5a03f1156100025750506040515191506104e29050565b600160a060020a0316635f539d69896040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f11561000257505050610dc2610639565b600160a060020a0316630accce06600386600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e360020a6315b1ea01028252915191928e928e9263ad8f500891600482810192602092919082900301816000876161da5a03f11561000257505050604051805190602001506040518660e060020a028152600401808681526020018560001916815260200184600160a060020a0316815260200183600160a060020a03168152602001828152602001955050505050506000604051808303816000876161da5a03f11561000257505050610ec5610639565b600160a060020a0316630accce06600386600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e360020a6315b1ea01028252915191928e928d9263ad8f500891600482810192602092919082900301816000876161da5a03f11561000257505050604051805190602001506040518660e060020a028152600401808681526020018560001916815260200184600160a060020a0316815260200183600160a060020a03168152602001828152602001955050505050506000604051808303816000876161da5a03f11561000257505050610fc8610639565b600160a060020a031663645a3b7285600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060405151905061101e610260565b600160a060020a031663f92eb77488600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e260020a633e4baddd028252600482015290516024828101935060209282900301816000876161da5a03f11561000257505060408051805160e060020a86028252600482019490945260248101939093525160448381019360009350829003018183876161da5a03f115610002575050505050505050505050565b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061114a57610002565b604051600254600160a060020a0316908a908a908a908a908a90611579806125b38339018087600160a060020a0316815260200186600160a060020a03168152602001856000191681526020018481526020018381526020018281526020019650505050505050604051809103906000f092506111c5610118565b600160a060020a031663b9858a288a856040518360e060020a0281526004018083600160a060020a0316815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f11561000257505050611229610260565b600160a060020a0316635188f99689856040518360e060020a028152600401808360001916815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f11561000257505050611288610260565b600160a060020a031663bdbdb08689896040518360e060020a0281526004018083600019168152602001828152602001925050506020604051808303816000876161da5a03f1156100025750506040515192506112e590506106ba565b600160a060020a03166346d88e7d8a858a6040518460e060020a0281526004018084600160a060020a0316815260200183600160a060020a0316815260200182815260200193505050506000604051808303816000876161da5a03f115610002575050506113516106ba565b600160a060020a03166381982a7a8a84866040518460e060020a0281526004018084600160a060020a0316815260200183815260200182600160a060020a0316815260200193505050506000604051808303816000876161da5a03f115610002575050506113bd61046c565b600160a060020a0316632b58469689856040518360e060020a028152600401808360001916815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f1156100025750505061141c61046c565b600160a060020a03166308636bdb8984866040518460e060020a028152600401808460001916815260200183815260200182600160a060020a0316815260200193505050506020604051808303816000876161da5a03f11561000257505060408051805160e160020a630a5d50db028252600482018190529151919350600160a060020a03861692506314baa1b6916024828101926000929190829003018183876161da5a03f115610002575050506114d3610639565b6040805160e160020a630566670302815260016004820152602481018b9052600160a060020a0386811660448301528c811660648301526000608483018190529251931692630accce069260a480840193919291829003018183876161da5a03f11561000257505050611544610639565b600160a060020a031663645a3b728961155b610260565b600160a060020a031663f92eb7748c6040518260e060020a02815260040180826000191681526020019150506020604051808303816000876161da5a03f11561000257505060408051805160e060020a86028252600482019490945260248101939093525160448084019360009350829003018183876161da5a03f1156100025750939a9950505050505050505050565b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061164e57610002565b82915061165961046c565b600160a060020a0316630a3b6ede83600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a63051db76f0282526004820152600160a060020a038816602482015290516044808301935060209282900301816000876161da5a03f1156100025750506040515191506116f990506106ba565b600160a060020a031663d5b205ce83600160a060020a03166336da44686040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a636ad902e7028252600160a060020a0390811660048301526024820187905288166044820152905160648281019350600092829003018183876161da5a03f1156100025750505061179b6106ba565b600160a060020a031663d653078983600160a060020a03166336da44686040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517ff1ff78a0000000000000000000000000000000000000000000000000000000008252915191929163f1ff78a09160048181019260209290919082900301816000876161da5a03f1156100025750505060405180519060200150866040518460e060020a0281526004018084600160a060020a0316815260200183815260200182600160a060020a0316815260200193505050506000604051808303816000876161da5a03f1156100025750505061189f610260565b600160a060020a031663f4f2821b846040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f115610002575050506118f2610118565b600160a060020a031663f4f2821b846040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f11561000257505050611945610639565b600160a060020a0316630accce06600484600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e360020a6306db488d02825291519192899290916336da44689181870191602091908190038801816000876161da5a03f115610002575050506040518051906020015060006040518660e060020a028152600401808681526020018560001916815260200184600160a060020a0316815260200183600160a060020a03168152602001828152602001955050505050506000604051808303816000876161da5a03f11561000257505050611a48610639565b600160a060020a031663645a3b7283600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604051519050611a9e610260565b600160a060020a031663f92eb77486600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e260020a633e4baddd028252600482015290516024828101935060209282900301816000876161da5a03f11561000257505060408051805160e060020a86028252600482019490945260248101939093525160448381019360009350829003018183876161da5a03f11561000257505050505050565b600160a060020a03166381738c59836040518260e060020a02815260040180826000191681526020019150506020604051808303816000876161da5a03f1156100025750506040515191506108889050565b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f1156100025750506040515115159050611c1757610002565b611c1f610260565b600160a060020a03166338a699a4836040518260e060020a02815260040180826000191681526020019150506020604051808303816000876161da5a03f11561000257505060405151159050611c7457610002565b611c7c610260565b600160a060020a0316632243118a836040518260e060020a02815260040180826000191681526020019150506000604051808303816000876161da5a03f11561000257505050611cca610639565b600160a060020a031663ae5f8080600184846040518460e060020a028152600401808481526020018360001916815260200182600160a060020a0316815260200193505050506000604051808303816000876161da5a03f115610002575050505050565b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f1156100025750506040515115159050611d9057610002565b5081611d9a610260565b600160a060020a031663581d5d6084846040518360e060020a0281526004018083600160a060020a03168152602001828152602001925050506000604051808303816000876161da5a03f11561000257505050611df5610639565b600160a060020a0316630accce06600283600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e160020a630566670302825260048201949094526024810193909352600160a060020a038816604484015260006064840181905260848401819052905160a4808501949293509091829003018183876161da5a03f11561000257505050611eab610639565b600160a060020a031663645a3b7282600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604051519050611f01610260565b600160a060020a031663f92eb77485600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e260020a633e4baddd028252600482015290516024828101935060209282900301816000876161da5a03f11561000257505060408051805160e060020a86028252600482019490945260248101939093525160448381019360009350829003018183876161da5a03f11561000257505050505050565b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061202857610002565b612030610118565b600160a060020a0316639859387b826040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f1156100025750505050565b600254600160a060020a0316ff5b6040805160025460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f11561000257505060405151151590506106b457610002565b600160a060020a031663d65307898383600160a060020a031663f1ff78a06040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517fd6530789000000000000000000000000000000000000000000000000000000008252600160a060020a039485166004830152602482015292891660448401525160648381019360009350829003018183876161da5a03f115610002575050506121a5610118565b600160a060020a031663f4f2821b856040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f115610002575050506121f8610cf4565b600160a060020a031663f4f2821b856040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f1156100025750505061224b610639565b600160a060020a0316630accce06600583600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e360020a6306db488d028252915191928a9290916336da446891600482810192602092919082900301816000876161da5a03f1156100025750505060405180519060200150886040518660e060020a028152600401808681526020018560001916815260200184600160a060020a0316815260200183600160a060020a03168152602001828152602001955050505050506000604051808303816000876161da5a03f1156100025750505080600160a060020a031663ea71b02d6040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060405151600160a060020a031660001490506124b25761239f610639565b600160a060020a0316630accce06600583600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750506040805180517fea71b02d000000000000000000000000000000000000000000000000000000008252915191928a92909163ea71b02d91600482810192602092919082900301816000876161da5a03f1156100025750505060405180519060200150886040518660e060020a028152600401808681526020018560001916815260200184600160a060020a0316815260200183600160a060020a03168152602001828152602001955050505050506000604051808303816000876161da5a03f115610002575050505b50505050565b600160a060020a03166338a699a4836040518260e060020a02815260040180826000191681526020019150506020604051808303816000876161da5a03f1156100025750506040515191506108889050565b600160a060020a031663213fe2b7836040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515191506108889050565b600160a060020a031663f92eb774836040518260e060020a02815260040180826000191681526020019150506020604051808303816000876161da5a03f115610002575050604051519150610888905056606060405260405160c08061157983396101206040819052825160805160a051935160e0516101005160008054600160a060020a03199081163317909155600180546005805484168817905560048a90556006869055600b8590556008849055909116861760a060020a60ff02191690554360038190556002558686526101408390526101608190529396929594919390929091600160a060020a033016917f76885d242fb71c6f74a7e717416e42eff4d96faf54f6de75c6a0a6bbd8890c6b91a230600160a060020a03167fa609f6bd4ad0b4f419ddad4ac9f0d02c2b9295c5e6891469055cf73c2b568fff600b600050546040518082815260200191505060405180910390a250505050505061145e8061011b6000396000f3606060405236156101745760e060020a600035046302d05d3f811461017c57806304a7fdbc1461018e5780630e90f957146101fb5780630fb5a6b41461021257806314baa1b61461021b57806317fc45e21461023a5780632b096926146102435780632e94420f1461025b578063325a19f11461026457806336da44681461026d5780633f81a2c01461027f5780633fc306821461029757806345ecd3d7146102d45780634665096d146102dd5780634e71d92d146102e657806351a34eb8146103085780636111bb951461032d5780636f265b93146103445780637e9014e11461034d57806390ba009114610360578063927df5e014610393578063a7f437791461046c578063ad8f50081461046e578063bc6d909414610477578063bdec3ad114610557578063c19d93fb1461059a578063c9503fe2146105ad578063e0a73a93146105b6578063ea71b02d146105bf578063ea8a1af0146105d1578063ee4a96f9146105f3578063f1ff78a01461065c575b61046c610002565b610665600054600160a060020a031681565b6040805160c081810190925261046c9160049160c4918390600690839083908082843760408051808301909152929750909561018495509193509091908390839080828437509095505050505050600554600090600160a060020a0390811633909116146106a857610002565b61068260015460a060020a900460ff166000145b90565b61069660085481565b61046c600435600154600160a060020a03166000141561072157610002565b610696600d5481565b610696600435600f8160068110156100025750015481565b61069660045481565b61069660035481565b610665600554600160a060020a031681565b61069660043560158160068110156100025750015481565b6106966004355b600b54600f5460009160028202808203928083039290810191018386101561078357601054840186900394505b50505050919050565b61069660025481565b61069660095481565b61046c600554600090600160a060020a03908116339091161461085857610002565b61046c600435600554600090600160a060020a03908116339091161461092e57610002565b6106826001805460a060020a900460ff161461020f565b610696600b5481565b61068260075460a060020a900460ff1681565b6106966004355b600b54601554600091600282028082039280830392908101910183861015610a6c5760165494506102cb565b61046c6004356024356044356040805160015460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b02600483015291516000928392600160a060020a03919091169163e16c7d9891602481810192602092909190829003018187876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663c4b0c96a336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610b4657610002565b005b610696600a5481565b61046c60006000600060006000600160009054906101000a9004600160a060020a0316600160a060020a031663e16c7d986040518160e060020a028152600401808060b260020a691858d8dbdd5b9d18dd1b0281526020015060200190506020604051808303816000876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663c4b0c96a336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610f1757610002565b61046c5b60015b60058160ff16101561071e57600f6001820160ff166006811015610002578101549060ff83166006811015610002570154101561129057610002565b61069660015460a060020a900460ff1681565b61069660065481565b610696600c5481565b610665600754600160a060020a031681565b61046c600554600090600160a060020a0390811633909116146112c857610002565b6040805160c081810190925261046c9160049160c4918390600690839083908082843760408051808301909152929750909561018495509193509091908390839080828437509095505050505050600154600090600160a060020a03168114156113fb57610002565b610696600e5481565b60408051600160a060020a03929092168252519081900360200190f35b604080519115158252519081900360200190f35b60408051918252519081900360200190f35b5060005b60068160ff16101561070857828160ff166006811015610002576020020151600f60ff831660068110156100025701558160ff82166006811015610002576020020151601560ff831660068110156100025701556001016106ac565b61071061055b565b505050565b600e8054820190555b50565b6040805160015460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061071557610002565b83861015801561079257508286105b156107b457600f546010546011548689039082030291909104900394506102cb565b8286101580156107c55750600b5486105b156107e757600f546011546012548589039082030291909104900394506102cb565b600b5486108015906107f857508186105b1561081d57600b54600f546012546013549289039281039290920204900394506102cb565b81861015801561082c57508086105b1561084e57600f546013546014548489039082030291909104900394506102cb565b60145494506102cb565b60015460a060020a900460ff1660001461087157610002565b600254600a01431161088257610002565b6040805160015460e360020a631c2d8fb302825260a860020a6a636f6e74726163746170690260048301529151600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663771d50e16040518160e060020a0281526004018090506000604051808303816000876161da5a03f1156100025750505050565b60015460a060020a900460ff1660001461094757610002565b600254600a01431161095857610002565b6040805160015460e360020a631c2d8fb302825260a860020a6a636f6e74726163746170690260048301529151600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180517f51a34eb8000000000000000000000000000000000000000000000000000000008252600482018690529151919350600160a060020a03841692506351a34eb8916024808301926000929190829003018183876161da5a03f11561000257505050600b8290554360025560408051838152905130600160a060020a0316917fa609f6bd4ad0b4f419ddad4ac9f0d02c2b9295c5e6891469055cf73c2b568fff919081900360200190a25050565b838610158015610a7b57508286105b15610a9d576015546016546017548689039082900302919091040194506102cb565b828610158015610aae5750600b5486105b15610ad0576015546017546018548589039082900302919091040194506102cb565b600b548610801590610ae157508186105b15610b0657600b546015546018546019549289039281900392909202040194506102cb565b818610158015610b1557508086105b15610b3757601554601954601a548489039082900302919091040194506102cb565b601a54860181900394506102cb565b60015460a060020a900460ff16600014610b5f57610002565b6001805460a060020a60ff02191660a060020a17908190556040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180516004805460e260020a633e4baddd028452908301529151919450600160a060020a038516925063f92eb77491602482810192602092919082900301816000876161da5a03f115610002575050604080518051600a556005547ffebf661200000000000000000000000000000000000000000000000000000000825233600160a060020a03908116600484015216602482015260448101879052905163febf661291606480820192600092909190829003018183876161da5a03f115610002575050508215610cc7576007805473ffffffffffffffffffffffffffffffffffffffff191633179055610dbb565b6040805160055460065460e060020a63599efa6b028352600160a060020a039182166004840152602483015291519184169163599efa6b91604481810192600092909190829003018183876161da5a03f115610002575050604080516006547f56ccb6f000000000000000000000000000000000000000000000000000000000825233600160a060020a03166004830152602482015290516356ccb6f091604480820192600092909190829003018183876161da5a03f115610002575050600580546007805473ffffffffffffffffffffffffffffffffffffffff19908116600160a060020a038416179091551633179055505b6007805460a060020a60ff02191660a060020a87810291909117918290556008544301600955900460ff1615610df757600a54610e039061029e565b600a54610e0b90610367565b600c55610e0f565b600c555b600c54670de0b6b3a7640000850204600d55600754600554604080517f759297bb000000000000000000000000000000000000000000000000000000008152600160a060020a039384166004820152918316602483015260448201879052519184169163759297bb91606481810192600092909190829003018183876161da5a03f11561000257505060408051600754600a54600d54600554600c5460a060020a850460ff161515865260208601929092528486019290925260608401529251600160a060020a0391821694509281169230909116917f3b3d1986083d191be01d28623dc19604728e29ae28bdb9ba52757fdee1a18de2919081900360800190a45050505050565b600954431015610f2657610002565b6001805460a060020a900460ff1614610f3e57610002565b6001805460a060020a60ff0219167402000000000000000000000000000000000000000017908190556040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180516004805460e260020a633e4baddd028452908301529151919750600160a060020a038816925063f92eb77491602482810192602092919082900301816000876161da5a03f115610002575050604051516007549095506000945060a060020a900460ff1615905061105c57600a5484111561105757600a54600d54670de0b6b3a7640000918603020492505b61107e565b600a5484101561107e57600a54600d54670de0b6b3a764000091869003020492505b60065483111561108e5760065492505b6006548390039150600083111561111857604080516005546007547f5928d37f000000000000000000000000000000000000000000000000000000008352600160a060020a0391821660048401528116602483015260448201869052915191871691635928d37f91606481810192600092909190829003018183876161da5a03f115610002575050505b600082111561117a576040805160055460e060020a63599efa6b028252600160a060020a0390811660048301526024820185905291519187169163599efa6b91604481810192600092909190829003018183876161da5a03f115610002575050505b6040805185815260208101849052808201859052905130600160a060020a0316917f89e690b1d5aaae14f3e85f108dc92d9ab3763a58d45aed8b59daedbbae8fe794919081900360600190a260008311156112285784600160a060020a0316634cc927d785336040518360e060020a0281526004018083815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f11561000257505050611282565b84600160a060020a0316634cc927d7600a60005054336040518360e060020a0281526004018083815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f115610002575050505b600054600160a060020a0316ff5b60156001820160ff166006811015610002578101549060ff8316600681101561000257015411156112c057610002565b60010161055e565b60015460a060020a900460ff166000146112e157610002565b600254600a0143116112f257610002565b6001546040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f11561000257505060408051805160055460065460e060020a63599efa6b028452600160a060020a03918216600485015260248401529251909450918416925063599efa6b916044808301926000929190829003018183876161da5a03f1156100025750505080600160a060020a0316632b68bb2d6040518160e060020a0281526004018090506000604051808303816000876161da5a03f115610002575050600054600160a060020a03169050ff5b6001546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602480830192602092919082900301816000876161da5a03f11561000257505060405151151590506106a85761000256", + "nonce": "16", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000002cccf5e0538493c235d1c5ef6580f77d99e91396" + } + }, + "0x70c9217d814985faef62b124420f8dfbddd96433": { + "balance": "0x4ef436dcbda6cd4a", + "code": "0x", + "nonce": "1634", + "storage": {} + }, + "0x7986bad81f4cbd9317f5a46861437dae58d69113": { + "balance": "0x0", + "code": "0x6060604052361561008d5760e060020a600035046302d05d3f811461009557806316c66cc6146100a75780631ab9075a146100d7578063213fe2b7146101125780639859387b1461013f578063988db79c1461015e578063a7f4377914610180578063b9858a281461019e578063c8e40fbf146101c0578063f4f2821b146101e8578063f905c15a14610209575b610212610002565b610214600054600160a060020a031681565b600160a060020a0360043581811660009081526005602052604081205461023193168114610257575060016101e3565b610212600435600254600160a060020a0316600014801590610108575060025433600160a060020a03908116911614155b1561025f57610002565b610214600435600160a060020a03811660009081526004602052604081205460ff16151561027557610002565b610212600435600254600160a060020a03166000141561029b57610002565b610212600435602435600254600160a060020a03166000141561050457610002565b61021260025433600160a060020a0390811691161461056757610002565b610212600435602435600254600160a060020a03166000141561057557610002565b610231600435600160a060020a03811660009081526004602052604090205460ff165b919050565b610212600435600254600090600160a060020a031681141561072057610002565b61024560035481565b005b60408051600160a060020a03929092168252519081900360200190f35b604080519115158252519081900360200190f35b60408051918252519081900360200190f35b5060006101e3565b60028054600160a060020a031916821790555b50565b50600160a060020a038181166000908152600460205260409020546101009004166101e3565b6002546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602482810192602092919082900301816000876161da5a03f11561000257505060405151151590506102fe57610002565b600160a060020a03811660009081526004602052604090205460ff161515610272576040516104028061092e833901809050604051809103906000f06004600050600083600160a060020a0316815260200190815260200160002060005060000160016101000a815481600160a060020a030219169083021790555060016004600050600083600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff0219169083021790555050565b600160a060020a03821660009081526004602052604090205460ff1615156104725760405161040280610d30833901809050604051809103906000f06004600050600084600160a060020a0316815260200190815260200160002060005060000160016101000a815481600160a060020a030219169083021790555060016004600050600084600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff021916908302179055505b600160a060020a03828116600090815260046020819052604080518184205460e060020a630a3b0a4f02825286861693820193909352905161010090920490931692630a3b0a4f926024828101939192829003018183876161da5a03f11561000257505050600160a060020a03811660009081526006602052604090208054600160a060020a031916831790555b5050565b6002546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602482810192602092919082900301816000876161da5a03f11561000257505060405151151590506103b957610002565b600254600160a060020a0316ff5b6002546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602482810192602092919082900301816000876161da5a03f11561000257505060405151151590506105d857610002565b600160a060020a03821660009081526004602052604090205460ff1615156106915760405161040280611132833901809050604051809103906000f06004600050600084600160a060020a0316815260200190815260200160002060005060000160016101000a815481600160a060020a030219169083021790555060016004600050600084600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff021916908302179055505b600160a060020a03828116600090815260046020819052604080518184205460e060020a630a3b0a4f02825286861693820193909352905161010090920490931692630a3b0a4f926024828101939192829003018183876161da5a03f11561000257505050600160a060020a031660009081526005602052604090208054600160a060020a0319169091179055565b6002546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602482810192602092919082900301816000876161da5a03f115610002575050604051511515905061078357610002565b50600160a060020a0381811660009081526005602090815260408083205490931680835260049091529190205460ff161561080f576040600081812054825160e260020a632e72bafd028152600160a060020a03868116600483015293516101009092049093169263b9caebf4926024828101939192829003018183876161da5a03f115610002575050505b600160a060020a03828116600090815260056020526040812054909116146108545760406000908120600160a060020a0384169091528054600160a060020a03191690555b50600160a060020a0381811660009081526006602090815260408083205490931680835260049091529190205460ff16156108e657600160a060020a038181166000908152604080518183205460e260020a632e72bafd028252868516600483015291516101009092049093169263b9caebf4926024828101939192829003018183876161da5a03f115610002575050505b600160a060020a03828116600090815260066020526040812054909116146105005760406000908120600160a060020a0384169091528054600160a060020a0319169055505056606060405260008054600160a060020a031916331790556103de806100246000396000f3606060405236156100615760e060020a600035046302d05d3f81146100695780630a3b0a4f1461007b5780630d327fa7146100f6578063524d81d314610109578063a7f4377914610114578063b9caebf414610132578063bbec3bae14610296575b6102ce610002565b6102d0600054600160a060020a031681565b6102ce600435600254600090600160a060020a03168114156102ed5760028054600160a060020a03199081168417808355600160a060020a03808616855260036020526040852060018101805493831694909316939093179091559154815461010060a860020a031916921661010002919091179055610372565b6102d0600254600160a060020a03165b90565b6102e3600154610106565b6102ce60005433600160a060020a039081169116146103c657610002565b6102ce600435600160a060020a038116600090815260036020526040812054819060ff16801561016457506001548190115b1561029157506040808220600180820154915461010090819004600160a060020a039081168087528587209093018054600160a060020a031916948216948517905583865293909420805461010060a860020a03191694820294909417909355600254909190811690841614156101e85760028054600160a060020a031916821790555b600254600160a060020a0390811690841614156102105760028054600160a060020a03191690555b6003600050600084600160a060020a0316815260200190815260200160002060006000820160006101000a81549060ff02191690556000820160016101000a815490600160a060020a0302191690556001820160006101000a815490600160a060020a03021916905550506001600081815054809291906001900391905055505b505050565b600160a060020a036004358181166000908152600360205260408120600101546002546102d09491821691168114156103d4576103d8565b005b600160a060020a03166060908152602090f35b6060908152602090f35b60028054600160a060020a03908116835260036020526040808420805461010060a860020a0319808216610100808a029190911790935590829004841680875283872060019081018054600160a060020a03199081168b179091559654868a168952949097209687018054949095169390951692909217909255835416908202179091555b60016003600050600084600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff0219169083021790555060016000818150548092919060010191905055505050565b600054600160a060020a0316ff5b8091505b5091905056606060405260008054600160a060020a031916331790556103de806100246000396000f3606060405236156100615760e060020a600035046302d05d3f81146100695780630a3b0a4f1461007b5780630d327fa7146100f6578063524d81d314610109578063a7f4377914610114578063b9caebf414610132578063bbec3bae14610296575b6102ce610002565b6102d0600054600160a060020a031681565b6102ce600435600254600090600160a060020a03168114156102ed5760028054600160a060020a03199081168417808355600160a060020a03808616855260036020526040852060018101805493831694909316939093179091559154815461010060a860020a031916921661010002919091179055610372565b6102d0600254600160a060020a03165b90565b6102e3600154610106565b6102ce60005433600160a060020a039081169116146103c657610002565b6102ce600435600160a060020a038116600090815260036020526040812054819060ff16801561016457506001548190115b1561029157506040808220600180820154915461010090819004600160a060020a039081168087528587209093018054600160a060020a031916948216948517905583865293909420805461010060a860020a03191694820294909417909355600254909190811690841614156101e85760028054600160a060020a031916821790555b600254600160a060020a0390811690841614156102105760028054600160a060020a03191690555b6003600050600084600160a060020a0316815260200190815260200160002060006000820160006101000a81549060ff02191690556000820160016101000a815490600160a060020a0302191690556001820160006101000a815490600160a060020a03021916905550506001600081815054809291906001900391905055505b505050565b600160a060020a036004358181166000908152600360205260408120600101546002546102d09491821691168114156103d4576103d8565b005b600160a060020a03166060908152602090f35b6060908152602090f35b60028054600160a060020a03908116835260036020526040808420805461010060a860020a0319808216610100808a029190911790935590829004841680875283872060019081018054600160a060020a03199081168b179091559654868a168952949097209687018054949095169390951692909217909255835416908202179091555b60016003600050600084600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff0219169083021790555060016000818150548092919060010191905055505050565b600054600160a060020a0316ff5b8091505b5091905056606060405260008054600160a060020a031916331790556103de806100246000396000f3606060405236156100615760e060020a600035046302d05d3f81146100695780630a3b0a4f1461007b5780630d327fa7146100f6578063524d81d314610109578063a7f4377914610114578063b9caebf414610132578063bbec3bae14610296575b6102ce610002565b6102d0600054600160a060020a031681565b6102ce600435600254600090600160a060020a03168114156102ed5760028054600160a060020a03199081168417808355600160a060020a03808616855260036020526040852060018101805493831694909316939093179091559154815461010060a860020a031916921661010002919091179055610372565b6102d0600254600160a060020a03165b90565b6102e3600154610106565b6102ce60005433600160a060020a039081169116146103c657610002565b6102ce600435600160a060020a038116600090815260036020526040812054819060ff16801561016457506001548190115b1561029157506040808220600180820154915461010090819004600160a060020a039081168087528587209093018054600160a060020a031916948216948517905583865293909420805461010060a860020a03191694820294909417909355600254909190811690841614156101e85760028054600160a060020a031916821790555b600254600160a060020a0390811690841614156102105760028054600160a060020a03191690555b6003600050600084600160a060020a0316815260200190815260200160002060006000820160006101000a81549060ff02191690556000820160016101000a815490600160a060020a0302191690556001820160006101000a815490600160a060020a03021916905550506001600081815054809291906001900391905055505b505050565b600160a060020a036004358181166000908152600360205260408120600101546002546102d09491821691168114156103d4576103d8565b005b600160a060020a03166060908152602090f35b6060908152602090f35b60028054600160a060020a03908116835260036020526040808420805461010060a860020a0319808216610100808a029190911790935590829004841680875283872060019081018054600160a060020a03199081168b179091559654868a168952949097209687018054949095169390951692909217909255835416908202179091555b60016003600050600084600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff0219169083021790555060016000818150548092919060010191905055505050565b600054600160a060020a0316ff5b8091505b5091905056", + "nonce": "7", + "storage": { + "0xffc4df2d4f3d2cffad590bed6296406ab7926ca9e74784f74a95191fa069a174": "0x00000000000000000000000070c9217d814985faef62b124420f8dfbddd96433" + } + }, + "0xb4fe7aa695b326c9d219158d2ca50db77b39f99f": { + "balance": "0x0", + "code": "0x606060405236156100ae5760e060020a600035046302d05d3f81146100b65780631ab9075a146100c85780632b68bb2d146101035780634cc927d7146101c557806351a34eb81461028e57806356ccb6f0146103545780635928d37f1461041d578063599efa6b146104e9578063759297bb146105b2578063771d50e11461067e578063a7f4377914610740578063f905c15a1461075e578063f92eb77414610767578063febf661214610836575b610902610002565b610904600054600160a060020a031681565b610902600435600254600160a060020a03166000148015906100f9575060025433600160a060020a03908116911614155b1561092057610002565b60025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b02606452610902916000918291600160a060020a03169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f115610002575050604051511515905061094257610002565b61090260043560243560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610a0d57610002565b61090260043560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610ae957610002565b61090260043560243560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610bbc57610002565b61090260043560243560443560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610c9657610002565b61090260043560243560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610de057610002565b61090260043560243560443560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610ebb57610002565b60025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b02606452610902916000918291600160a060020a03169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610f9e57610002565b61090260025433600160a060020a0390811691161461106957610002565b61090e60035481565b61090e60043560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750506040805180517ff92eb774000000000000000000000000000000000000000000000000000000008252600482018790529151919350600160a060020a038416925063f92eb774916024828101926020929190829003018188876161da5a03f11561000257505060405151949350505050565b61090260043560243560443560025460e360020a631c2d8fb302606090815260aa60020a6a18dbdb9d1c9858dd18dd1b026064526000918291600160a060020a039091169063e16c7d989060849060209060248187876161da5a03f1156100025750505060405180519060200150905080600160a060020a03166316c66cc6336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f115610002575050604051511515905061107757610002565b005b6060908152602090f35b60408051918252519081900360200190f35b6002805473ffffffffffffffffffffffffffffffffffffffff19168217905550565b6040805160025460e360020a631c2d8fb302825260aa60020a6a18dbdb9d1c9858dd18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f5ed61af000000000000000000000000000000000000000000000000000000000825233600160a060020a039081166004840152925190959286169350635ed61af092602483810193919291829003018183876161da5a03f115610002575050505050565b6040805160025460e360020a631c2d8fb302825260aa60020a6a18dbdb9d1c9858dd18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517fab03fc2600000000000000000000000000000000000000000000000000000000825233600160a060020a03908116600484015260248301899052808816604484015292519095928616935063ab03fc2692606483810193919291829003018183876161da5a03f1156100025750505050505050565b6040805160025460e360020a631c2d8fb302825260aa60020a6a18dbdb9d1c9858dd18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f949ae47900000000000000000000000000000000000000000000000000000000825233600160a060020a0390811660048401526024830188905292519095928616935063949ae47992604483810193919291829003018183876161da5a03f11561000257505050505050565b6040805160025460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f46d88e7d000000000000000000000000000000000000000000000000000000008252600160a060020a0380891660048401523381166024840152604483018890529251909592861693506346d88e7d92606483810193919291829003018183876161da5a03f1156100025750505050505050565b6040805160025460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f5315cdde00000000000000000000000000000000000000000000000000000000825233600160a060020a039081166004840152808a16602484015260448301889052925190959286169350635315cdde92606483810193919291829003018183876161da5a03f115610002575050604080517f5928d37f00000000000000000000000000000000000000000000000000000000815233600160a060020a03908116600483015287166024820152604481018690529051635928d37f91606481810192600092909190829003018183876161da5a03f115610002575050505050505050565b6040805160025460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517fe68e401c00000000000000000000000000000000000000000000000000000000825233600160a060020a03908116600484015280891660248401526044830188905292519095928616935063e68e401c92606483810193919291829003018183876161da5a03f1156100025750505050505050565b6040805160025460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f5152f381000000000000000000000000000000000000000000000000000000008252600160a060020a03808a1660048401528089166024840152604483018890523381166064840152925190959286169350635152f38192608483810193919291829003018183876161da5a03f115610002575050505050505050565b6040805160025460e360020a631c2d8fb302825260aa60020a6a18dbdb9d1c9858dd18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f056d447000000000000000000000000000000000000000000000000000000000825233600160a060020a03908116600484015292519095928616935063056d447092602483810193919291829003018183876161da5a03f115610002575050505050565b600254600160a060020a0316ff5b6040805160025460e360020a631c2d8fb302825260aa60020a6a18dbdb9d1c9858dd18dd1b0260048301529151600160a060020a03929092169163e16c7d9891602481810192602092909190829003018188876161da5a03f1156100025750506040805180517f3ae1005c00000000000000000000000000000000000000000000000000000000825233600160a060020a039081166004840152808a166024840152808916604484015260648301889052925190959286169350633ae1005c92608483810193919291829003018183876161da5a03f11561000257505050505050505056", + "nonce": "1", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000002cccf5e0538493c235d1c5ef6580f77d99e91396" + } + }, + "0xc212e03b9e060e36facad5fd8f4435412ca22e6b": { + "balance": "0x0", + "code": "0x606060405236156101745760e060020a600035046302d05d3f811461017c57806304a7fdbc1461018e5780630e90f957146101fb5780630fb5a6b41461021257806314baa1b61461021b57806317fc45e21461023a5780632b096926146102435780632e94420f1461025b578063325a19f11461026457806336da44681461026d5780633f81a2c01461027f5780633fc306821461029757806345ecd3d7146102d45780634665096d146102dd5780634e71d92d146102e657806351a34eb8146103085780636111bb951461032d5780636f265b93146103445780637e9014e11461034d57806390ba009114610360578063927df5e014610393578063a7f437791461046c578063ad8f50081461046e578063bc6d909414610477578063bdec3ad114610557578063c19d93fb1461059a578063c9503fe2146105ad578063e0a73a93146105b6578063ea71b02d146105bf578063ea8a1af0146105d1578063ee4a96f9146105f3578063f1ff78a01461065c575b61046c610002565b610665600054600160a060020a031681565b6040805160c081810190925261046c9160049160c4918390600690839083908082843760408051808301909152929750909561018495509193509091908390839080828437509095505050505050600554600090600160a060020a0390811633909116146106a857610002565b61068260015460a060020a900460ff166000145b90565b61069660085481565b61046c600435600154600160a060020a03166000141561072157610002565b610696600d5481565b610696600435600f8160068110156100025750015481565b61069660045481565b61069660035481565b610665600554600160a060020a031681565b61069660043560158160068110156100025750015481565b6106966004355b600b54600f5460009160028202808203928083039290810191018386101561078357601054840186900394505b50505050919050565b61069660025481565b61069660095481565b61046c600554600090600160a060020a03908116339091161461085857610002565b61046c600435600554600090600160a060020a03908116339091161461092e57610002565b6106826001805460a060020a900460ff161461020f565b610696600b5481565b61068260075460a060020a900460ff1681565b6106966004355b600b54601554600091600282028082039280830392908101910183861015610a6c5760165494506102cb565b61046c6004356024356044356040805160015460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b02600483015291516000928392600160a060020a03919091169163e16c7d9891602481810192602092909190829003018187876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663c4b0c96a336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610b4657610002565b005b610696600a5481565b61046c60006000600060006000600160009054906101000a9004600160a060020a0316600160a060020a031663e16c7d986040518160e060020a028152600401808060b260020a691858d8dbdd5b9d18dd1b0281526020015060200190506020604051808303816000876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663c4b0c96a336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610f1757610002565b61046c5b60015b60058160ff16101561071e57600f6001820160ff166006811015610002578101549060ff83166006811015610002570154101561129057610002565b61069660015460a060020a900460ff1681565b61069660065481565b610696600c5481565b610665600754600160a060020a031681565b61046c600554600090600160a060020a0390811633909116146112c857610002565b6040805160c081810190925261046c9160049160c4918390600690839083908082843760408051808301909152929750909561018495509193509091908390839080828437509095505050505050600154600090600160a060020a03168114156113fb57610002565b610696600e5481565b60408051600160a060020a03929092168252519081900360200190f35b604080519115158252519081900360200190f35b60408051918252519081900360200190f35b5060005b60068160ff16101561070857828160ff166006811015610002576020020151600f60ff831660068110156100025701558160ff82166006811015610002576020020151601560ff831660068110156100025701556001016106ac565b61071061055b565b505050565b600e8054820190555b50565b6040805160015460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061071557610002565b83861015801561079257508286105b156107b457600f546010546011548689039082030291909104900394506102cb565b8286101580156107c55750600b5486105b156107e757600f546011546012548589039082030291909104900394506102cb565b600b5486108015906107f857508186105b1561081d57600b54600f546012546013549289039281039290920204900394506102cb565b81861015801561082c57508086105b1561084e57600f546013546014548489039082030291909104900394506102cb565b60145494506102cb565b60015460a060020a900460ff1660001461087157610002565b600254600a01431161088257610002565b6040805160015460e360020a631c2d8fb302825260a860020a6a636f6e74726163746170690260048301529151600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663771d50e16040518160e060020a0281526004018090506000604051808303816000876161da5a03f1156100025750505050565b60015460a060020a900460ff1660001461094757610002565b600254600a01431161095857610002565b6040805160015460e360020a631c2d8fb302825260a860020a6a636f6e74726163746170690260048301529151600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180517f51a34eb8000000000000000000000000000000000000000000000000000000008252600482018690529151919350600160a060020a03841692506351a34eb8916024808301926000929190829003018183876161da5a03f11561000257505050600b8290554360025560408051838152905130600160a060020a0316917fa609f6bd4ad0b4f419ddad4ac9f0d02c2b9295c5e6891469055cf73c2b568fff919081900360200190a25050565b838610158015610a7b57508286105b15610a9d576015546016546017548689039082900302919091040194506102cb565b828610158015610aae5750600b5486105b15610ad0576015546017546018548589039082900302919091040194506102cb565b600b548610801590610ae157508186105b15610b0657600b546015546018546019549289039281900392909202040194506102cb565b818610158015610b1557508086105b15610b3757601554601954601a548489039082900302919091040194506102cb565b601a54860181900394506102cb565b60015460a060020a900460ff16600014610b5f57610002565b6001805460a060020a60ff02191660a060020a17908190556040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180516004805460e260020a633e4baddd028452908301529151919450600160a060020a038516925063f92eb77491602482810192602092919082900301816000876161da5a03f115610002575050604080518051600a556005547ffebf661200000000000000000000000000000000000000000000000000000000825233600160a060020a03908116600484015216602482015260448101879052905163febf661291606480820192600092909190829003018183876161da5a03f115610002575050508215610cc7576007805473ffffffffffffffffffffffffffffffffffffffff191633179055610dbb565b6040805160055460065460e060020a63599efa6b028352600160a060020a039182166004840152602483015291519184169163599efa6b91604481810192600092909190829003018183876161da5a03f115610002575050604080516006547f56ccb6f000000000000000000000000000000000000000000000000000000000825233600160a060020a03166004830152602482015290516356ccb6f091604480820192600092909190829003018183876161da5a03f115610002575050600580546007805473ffffffffffffffffffffffffffffffffffffffff19908116600160a060020a038416179091551633179055505b6007805460a060020a60ff02191660a060020a87810291909117918290556008544301600955900460ff1615610df757600a54610e039061029e565b600a54610e0b90610367565b600c55610e0f565b600c555b600c54670de0b6b3a7640000850204600d55600754600554604080517f759297bb000000000000000000000000000000000000000000000000000000008152600160a060020a039384166004820152918316602483015260448201879052519184169163759297bb91606481810192600092909190829003018183876161da5a03f11561000257505060408051600754600a54600d54600554600c5460a060020a850460ff161515865260208601929092528486019290925260608401529251600160a060020a0391821694509281169230909116917f3b3d1986083d191be01d28623dc19604728e29ae28bdb9ba52757fdee1a18de2919081900360800190a45050505050565b600954431015610f2657610002565b6001805460a060020a900460ff1614610f3e57610002565b6001805460a060020a60ff0219167402000000000000000000000000000000000000000017908190556040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180516004805460e260020a633e4baddd028452908301529151919750600160a060020a038816925063f92eb77491602482810192602092919082900301816000876161da5a03f115610002575050604051516007549095506000945060a060020a900460ff1615905061105c57600a5484111561105757600a54600d54670de0b6b3a7640000918603020492505b61107e565b600a5484101561107e57600a54600d54670de0b6b3a764000091869003020492505b60065483111561108e5760065492505b6006548390039150600083111561111857604080516005546007547f5928d37f000000000000000000000000000000000000000000000000000000008352600160a060020a0391821660048401528116602483015260448201869052915191871691635928d37f91606481810192600092909190829003018183876161da5a03f115610002575050505b600082111561117a576040805160055460e060020a63599efa6b028252600160a060020a0390811660048301526024820185905291519187169163599efa6b91604481810192600092909190829003018183876161da5a03f115610002575050505b6040805185815260208101849052808201859052905130600160a060020a0316917f89e690b1d5aaae14f3e85f108dc92d9ab3763a58d45aed8b59daedbbae8fe794919081900360600190a260008311156112285784600160a060020a0316634cc927d785336040518360e060020a0281526004018083815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f11561000257505050611282565b84600160a060020a0316634cc927d7600a60005054336040518360e060020a0281526004018083815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f115610002575050505b600054600160a060020a0316ff5b60156001820160ff166006811015610002578101549060ff8316600681101561000257015411156112c057610002565b60010161055e565b60015460a060020a900460ff166000146112e157610002565b600254600a0143116112f257610002565b6001546040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f11561000257505060408051805160055460065460e060020a63599efa6b028452600160a060020a03918216600485015260248401529251909450918416925063599efa6b916044808301926000929190829003018183876161da5a03f1156100025750505080600160a060020a0316632b68bb2d6040518160e060020a0281526004018090506000604051808303816000876161da5a03f115610002575050600054600160a060020a03169050ff5b6001546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602480830192602092919082900301816000876161da5a03f11561000257505060405151151590506106a85761000256", + "nonce": "1", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000002cccf5e0538493c235d1c5ef6580f77d99e91396", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000000000000000000000000000000000000000006195", + "0x0000000000000000000000000000000000000000000000000000000000000004": "0x5842545553440000000000000000000000000000000000000000000000000000", + "0x0000000000000000000000000000000000000000000000000000000000000005": "0x00000000000000000000000070c9217d814985faef62b124420f8dfbddd96433", + "0x0000000000000000000000000000000000000000000000000000000000000006": "0x0000000000000000000000000000000000000000000000008ac7230489e80000", + "0x000000000000000000000000000000000000000000000000000000000000000b": "0x0000000000000000000000000000000000000000000000283c7b9181eca20000" + } + }, + "0xcf00ffd997ad14939736f026006498e3f099baaf": { + "balance": "0x0", + "code": "0x606060405236156100cf5760e060020a600035046302d05d3f81146100d7578063031e7f5d146100e95780631ab9075a1461010b5780632243118a1461014657806327aad68a1461016557806338a699a4146101da5780635188f996146101f8578063581d5d601461021e57806381738c5914610246578063977da54014610269578063a07421ce14610288578063a7f43779146102be578063bdbdb086146102dc578063e1c7111914610303578063f4f2821b14610325578063f905c15a1461034a578063f92eb77414610353575b610387610002565b610389600054600160a060020a031681565b610387600435602435600254600160a060020a0316600014156103a857610002565b610387600435600254600160a060020a031660001480159061013c575060025433600160a060020a03908116911614155b1561042957610002565b610387600435600254600160a060020a03166000141561044b57610002565b6102ac60043560008181526004602081815260408320547f524d81d3000000000000000000000000000000000000000000000000000000006060908152610100909104600160a060020a031692839263524d81d3926064928188876161da5a03f1156100025750506040515192506103819050565b61039c60043560008181526004602052604090205460ff165b919050565b6103876004356024356002546000908190600160a060020a031681141561079457610002565b61038760043560243560025460009081908190600160a060020a031681141561080457610002565b61038960043560008181526004602052604081205460ff1615156109e357610002565b610387600435600254600160a060020a0316600014156109fb57610002565b600435600090815260096020526040902054670de0b6b3a764000090810360243502045b60408051918252519081900360200190f35b61038760025433600160a060020a03908116911614610a9257610002565b600435600090815260086020526040902054670de0b6b3a7640000602435909102046102ac565b610387600435602435600254600160a060020a031660001415610aa057610002565b61038760043560025460009081908190600160a060020a0316811415610b3657610002565b6102ac60035481565b6102ac600435600081815260076020908152604080832054600690925290912054670de0b6b3a76400000204805b50919050565b005b600160a060020a03166060908152602090f35b15156060908152602090f35b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b906084906020906024816000876161da5a03f11561000257505060405151151590506103fe57610002565b60008281526004602052604090205460ff16151561041b57610002565b600860205260406000205550565b6002805473ffffffffffffffffffffffffffffffffffffffff19168217905550565b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b906084906020906024816000876161da5a03f11561000257505060405151151590506104a157610002565b604080516000838152600460205291909120805460ff1916600117905561040280610de2833901809050604051809103906000f0600460005060008360001916815260200190815260200160002060005060000160016101000a815481600160a060020a030219169083021790555066470de4df8200006008600050600083600019168152602001908152602001600020600050819055506703782dace9d9000060096000506000836000191681526020019081526020016000206000508190555050565b600460005060008560001916815260200190815260200160002060005060000160019054906101000a9004600160a060020a0316915081600160a060020a031663524d81d36040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060405151821415905061060057838152600660209081526040808320839055600790915281208190555b81600160a060020a0316630a3b0a4f846040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f11561000257505050600160a060020a038316808252600560209081526040808420879055805160e160020a6364a81ff102815290518694670de0b6b3a7640000949363c9503fe29360048181019492939183900301908290876161da5a03f11561000257505060408051805160e060020a636f265b930282529151919291636f265b939160048181019260209290919082900301816000876161da5a03f11561000257505050604051805190602001500204600660005060008660001916815260200190815260200160002060008282825054019250508190555080600160a060020a031663c9503fe26040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050506040518051906020015060076000506000866000191681526020019081526020016000206000828282505401925050819055505b50505050565b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b9060849060209060248187876161da5a03f11561000257505060405151151590506107e957610002565b8381526004602052604081205460ff16151561056657610002565b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b9060849060209060248187876161da5a03f115610002575050604051511515905061085957610002565b849250670de0b6b3a764000083600160a060020a031663c9503fe26040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575060408051805160e160020a6364a81ff102825291519189028590049650600481810192602092909190829003018188876161da5a03f11561000257505060408051805160e060020a636f265b930282529151919291636f265b9391600481810192602092909190829003018189876161da5a03f115610002575050506040518051906020015002049050806006600050600085600160a060020a0316632e94420f6040518160e060020a0281526004018090506020604051808303816000876161da5a03f1156100025750604080518051855260208681528286208054989098039097557f2e94420f00000000000000000000000000000000000000000000000000000000815290518896600483810193919291829003018187876161da5a03f115610002575050604080515183526020939093525020805490910190555050505050565b60409020546101009004600160a060020a03166101f3565b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b906084906020906024816000876161da5a03f1156100025750506040515115159050610a5157610002565b60008181526004602052604090205460ff161515610a6e57610002565b6040600020805474ffffffffffffffffffffffffffffffffffffffffff1916905550565b600254600160a060020a0316ff5b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b906084906020906024816000876161da5a03f1156100025750506040515115159050610af657610002565b60008281526004602052604090205460ff161515610b1357610002565b670de0b6b3a7640000811115610b2857610002565b600960205260406000205550565b60025460e060020a6313bc6d4b02606090815233600160a060020a03908116606452909116906313bc6d4b9060849060209060248187876161da5a03f1156100025750506040515115159050610b8b57610002565b600160a060020a038416815260056020908152604080832054808452600490925282205490935060ff161515610bc057610002565b600460005060008460001916815260200190815260200160002060005060000160019054906101000a9004600160a060020a0316915081600160a060020a031663b9caebf4856040518260e060020a0281526004018082600160a060020a031681526020019150506000604051808303816000876161da5a03f115610002575050506005600050600085600160a060020a0316815260200190815260200160002060005060009055839050600082600160a060020a031663524d81d36040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050604051519190911115905061078e57670de0b6b3a764000081600160a060020a031663c9503fe26040518160e060020a0281526004018090506020604051808303816000876161da5a03f11561000257505060408051805160e060020a636f265b930282529151919291636f265b939160048181019260209290919082900301816000876161da5a03f11561000257505050604051805190602001500204600660005060008560001916815260200190815260200160002060008282825054039250508190555080600160a060020a031663c9503fe26040518160e060020a0281526004018090506020604051808303816000876161da5a03f115610002575050506040518051906020015060076000506000856000191681526020019081526020016000206000828282505403925050819055505050505056606060405260008054600160a060020a031916331790556103de806100246000396000f3606060405236156100615760e060020a600035046302d05d3f81146100695780630a3b0a4f1461007b5780630d327fa7146100f6578063524d81d314610109578063a7f4377914610114578063b9caebf414610132578063bbec3bae14610296575b6102ce610002565b6102d0600054600160a060020a031681565b6102ce600435600254600090600160a060020a03168114156102ed5760028054600160a060020a03199081168417808355600160a060020a03808616855260036020526040852060018101805493831694909316939093179091559154815461010060a860020a031916921661010002919091179055610372565b6102d0600254600160a060020a03165b90565b6102e3600154610106565b6102ce60005433600160a060020a039081169116146103c657610002565b6102ce600435600160a060020a038116600090815260036020526040812054819060ff16801561016457506001548190115b1561029157506040808220600180820154915461010090819004600160a060020a039081168087528587209093018054600160a060020a031916948216948517905583865293909420805461010060a860020a03191694820294909417909355600254909190811690841614156101e85760028054600160a060020a031916821790555b600254600160a060020a0390811690841614156102105760028054600160a060020a03191690555b6003600050600084600160a060020a0316815260200190815260200160002060006000820160006101000a81549060ff02191690556000820160016101000a815490600160a060020a0302191690556001820160006101000a815490600160a060020a03021916905550506001600081815054809291906001900391905055505b505050565b600160a060020a036004358181166000908152600360205260408120600101546002546102d09491821691168114156103d4576103d8565b005b600160a060020a03166060908152602090f35b6060908152602090f35b60028054600160a060020a03908116835260036020526040808420805461010060a860020a0319808216610100808a029190911790935590829004841680875283872060019081018054600160a060020a03199081168b179091559654868a168952949097209687018054949095169390951692909217909255835416908202179091555b60016003600050600084600160a060020a0316815260200190815260200160002060005060000160006101000a81548160ff0219169083021790555060016000818150548092919060010191905055505050565b600054600160a060020a0316ff5b8091505b5091905056", + "nonce": "3", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x0000000000000000000000002cccf5e0538493c235d1c5ef6580f77d99e91396", + "0x3571d73f14f31a1463bd0a2f92f7fde1653d4e1ead7aedf4b0a5df02f16092ab": "0x0000000000000000000000000000000000000000000007d634e4c55188be0000", + "0x4e64fe2d1b72d95a0a31945cc6e4f4e524ac5ad56d6bd44a85ec7bc9cc0462c0": "0x000000000000000000000000000000000000000000000002b5e3af16b1880000" + } + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "117124093", + "extraData": "0xd5830105008650617269747986312e31322e31826d61", + "gasLimit": "4707788", + "hash": "0xad325e4c49145fb7a4058a68ac741cc8607a71114e23fc88083c7e881dd653e7", + "miner": "0x00714b9ac97fd6bd9325a059a70c9b9fa94ce050", + "mixHash": "0x0af918f65cb4af04b608fc1f14a849707696986a0e7049e97ef3981808bcc65f", + "nonce": "0x38dee147326a8d40", + "number": "25000", + "stateRoot": "0xc5d6bbcd46236fcdcc80b332ffaaa5476b980b01608f9708408cfef01b58bd5b", + "timestamp": "1479891517", + "totalDifficulty": "1895410389427" + }, + "input": "0xf88b8206628504a817c8008303d09094c212e03b9e060e36facad5fd8f4435412ca22e6b80a451a34eb80000000000000000000000000000000000000000000000280faf689c35ac00002aa0a7ee5b7877811bf671d121b40569462e722657044808dc1d6c4f1e4233ec145ba0417e7543d52b65738d9df419cbe40a708424f4d54b0fc145c0a64545a2bb1065", + "result": { + "calls": [ + { + "from": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "gas": "0x31217", + "gasUsed": "0x334", + "input": "0xe16c7d98636f6e7472616374617069000000000000000000000000000000000000000000", + "output": "0x000000000000000000000000b4fe7aa695b326c9d219158d2ca50db77b39f99f", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "type": "CALL", + "value": "0x0" + }, + { + "calls": [ + { + "from": "0xb4fe7aa695b326c9d219158d2ca50db77b39f99f", + "gas": "0x2a68d", + "gasUsed": "0x334", + "input": "0xe16c7d98636f6e747261637463746c000000000000000000000000000000000000000000", + "output": "0x0000000000000000000000003e9286eafa2db8101246c2131c09b49080d00690", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "type": "CALL", + "value": "0x0" + }, + { + "calls": [ + { + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x23ac9", + "gasUsed": "0x334", + "input": "0xe16c7d98636f6e7472616374646200000000000000000000000000000000000000000000", + "output": "0x0000000000000000000000007986bad81f4cbd9317f5a46861437dae58d69113", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x23366", + "gasUsed": "0x273", + "input": "0x16c66cc6000000000000000000000000c212e03b9e060e36facad5fd8f4435412ca22e6b", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "to": "0x7986bad81f4cbd9317f5a46861437dae58d69113", + "type": "CALL", + "value": "0x0" + } + ], + "from": "0xb4fe7aa695b326c9d219158d2ca50db77b39f99f", + "gas": "0x29f35", + "gasUsed": "0xf8d", + "input": "0x16c66cc6000000000000000000000000c212e03b9e060e36facad5fd8f4435412ca22e6b", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "to": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0xb4fe7aa695b326c9d219158d2ca50db77b39f99f", + "gas": "0x28a9e", + "gasUsed": "0x334", + "input": "0xe16c7d98636f6e747261637463746c000000000000000000000000000000000000000000", + "output": "0x0000000000000000000000003e9286eafa2db8101246c2131c09b49080d00690", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "type": "CALL", + "value": "0x0" + }, + { + "calls": [ + { + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x21d79", + "gasUsed": "0x24d", + "input": "0x13bc6d4b000000000000000000000000b4fe7aa695b326c9d219158d2ca50db77b39f99f", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x2165b", + "gasUsed": "0x334", + "input": "0xe16c7d986d61726b65746462000000000000000000000000000000000000000000000000", + "output": "0x000000000000000000000000cf00ffd997ad14939736f026006498e3f099baaf", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "type": "CALL", + "value": "0x0" + }, + { + "calls": [ + { + "from": "0xcf00ffd997ad14939736f026006498e3f099baaf", + "gas": "0x1a8e8", + "gasUsed": "0x24d", + "input": "0x13bc6d4b0000000000000000000000003e9286eafa2db8101246c2131c09b49080d00690", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0xcf00ffd997ad14939736f026006498e3f099baaf", + "gas": "0x1a2c6", + "gasUsed": "0x3cb", + "input": "0xc9503fe2", + "output": "0x0000000000000000000000000000000000000000000000008ac7230489e80000", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0xcf00ffd997ad14939736f026006498e3f099baaf", + "gas": "0x19b72", + "gasUsed": "0x3cb", + "input": "0xc9503fe2", + "output": "0x0000000000000000000000000000000000000000000000008ac7230489e80000", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0xcf00ffd997ad14939736f026006498e3f099baaf", + "gas": "0x19428", + "gasUsed": "0x305", + "input": "0x6f265b93", + "output": "0x0000000000000000000000000000000000000000000000283c7b9181eca20000", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0xcf00ffd997ad14939736f026006498e3f099baaf", + "gas": "0x18d45", + "gasUsed": "0x229", + "input": "0x2e94420f", + "output": "0x5842545553440000000000000000000000000000000000000000000000000000", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0xcf00ffd997ad14939736f026006498e3f099baaf", + "gas": "0x1734e", + "gasUsed": "0x229", + "input": "0x2e94420f", + "output": "0x5842545553440000000000000000000000000000000000000000000000000000", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "type": "CALL", + "value": "0x0" + } + ], + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x20ee1", + "gasUsed": "0x5374", + "input": "0x581d5d60000000000000000000000000c212e03b9e060e36facad5fd8f4435412ca22e6b0000000000000000000000000000000000000000000000280faf689c35ac0000", + "output": "0x", + "to": "0xcf00ffd997ad14939736f026006498e3f099baaf", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x1b6c1", + "gasUsed": "0x334", + "input": "0xe16c7d986c6f676d67720000000000000000000000000000000000000000000000000000", + "output": "0x0000000000000000000000002a98c5f40bfa3dee83431103c535f6fae9a8ad38", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x1af69", + "gasUsed": "0x229", + "input": "0x2e94420f", + "output": "0x5842545553440000000000000000000000000000000000000000000000000000", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "type": "CALL", + "value": "0x0" + }, + { + "calls": [ + { + "from": "0x2a98c5f40bfa3dee83431103c535f6fae9a8ad38", + "gas": "0x143a5", + "gasUsed": "0x24d", + "input": "0x13bc6d4b0000000000000000000000003e9286eafa2db8101246c2131c09b49080d00690", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "type": "CALL", + "value": "0x0" + } + ], + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x1a91d", + "gasUsed": "0x12fa", + "input": "0x0accce0600000000000000000000000000000000000000000000000000000000000000025842545553440000000000000000000000000000000000000000000000000000000000000000000000000000c212e03b9e060e36facad5fd8f4435412ca22e6b00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "output": "0x", + "to": "0x2a98c5f40bfa3dee83431103c535f6fae9a8ad38", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x19177", + "gasUsed": "0x334", + "input": "0xe16c7d986c6f676d67720000000000000000000000000000000000000000000000000000", + "output": "0x0000000000000000000000002a98c5f40bfa3dee83431103c535f6fae9a8ad38", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x18a22", + "gasUsed": "0x229", + "input": "0x2e94420f", + "output": "0x5842545553440000000000000000000000000000000000000000000000000000", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x18341", + "gasUsed": "0x334", + "input": "0xe16c7d986d61726b65746462000000000000000000000000000000000000000000000000", + "output": "0x000000000000000000000000cf00ffd997ad14939736f026006498e3f099baaf", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x17bec", + "gasUsed": "0x229", + "input": "0x2e94420f", + "output": "0x5842545553440000000000000000000000000000000000000000000000000000", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "type": "CALL", + "value": "0x0" + }, + { + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x1764e", + "gasUsed": "0x45c", + "input": "0xf92eb7745842545553440000000000000000000000000000000000000000000000000000", + "output": "0x00000000000000000000000000000000000000000000002816d180e30c390000", + "to": "0xcf00ffd997ad14939736f026006498e3f099baaf", + "type": "CALL", + "value": "0x0" + }, + { + "calls": [ + { + "from": "0x2a98c5f40bfa3dee83431103c535f6fae9a8ad38", + "gas": "0x108ba", + "gasUsed": "0x24d", + "input": "0x13bc6d4b0000000000000000000000003e9286eafa2db8101246c2131c09b49080d00690", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "to": "0x2cccf5e0538493c235d1c5ef6580f77d99e91396", + "type": "CALL", + "value": "0x0" + } + ], + "from": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "gas": "0x16e62", + "gasUsed": "0xebb", + "input": "0x645a3b72584254555344000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002816d180e30c390000", + "output": "0x", + "to": "0x2a98c5f40bfa3dee83431103c535f6fae9a8ad38", + "type": "CALL", + "value": "0x0" + } + ], + "from": "0xb4fe7aa695b326c9d219158d2ca50db77b39f99f", + "gas": "0x283b9", + "gasUsed": "0xc51c", + "input": "0x949ae479000000000000000000000000c212e03b9e060e36facad5fd8f4435412ca22e6b0000000000000000000000000000000000000000000000280faf689c35ac0000", + "output": "0x", + "to": "0x3e9286eafa2db8101246c2131c09b49080d00690", + "type": "CALL", + "value": "0x0" + } + ], + "from": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "gas": "0x30b4a", + "gasUsed": "0xedb7", + "input": "0x51a34eb80000000000000000000000000000000000000000000000280faf689c35ac0000", + "output": "0x", + "to": "0xb4fe7aa695b326c9d219158d2ca50db77b39f99f", + "type": "CALL", + "value": "0x0" + } + ], + "from": "0x70c9217d814985faef62b124420f8dfbddd96433", + "gas": "0x37b38", + "gasUsed": "0x12bb3", + "input": "0x51a34eb80000000000000000000000000000000000000000000000280faf689c35ac0000", + "output": "0x", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "type": "CALL", + "value": "0x0" + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/delegatecall.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/delegatecall.json new file mode 100644 index 00000000..f7ad6df5 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/delegatecall.json @@ -0,0 +1,97 @@ +{ + "context": { + "difficulty": "31927752", + "gasLimit": "4707788", + "miner": "0x5659922ce141eedbc2733678f9806c77b4eebee8", + "number": "11495", + "timestamp": "1479735917" + }, + "genesis": { + "alloc": { + "0x13204f5d64c28326fd7bd05fd4ea855302d7f2ff": { + "balance": "0x0", + "code": "0x606060405236156100825760e060020a60003504630a0313a981146100875780630a3b0a4f146101095780630cd40fea1461021257806329092d0e1461021f5780634cd06a5f146103295780635dbe47e8146103395780637a9e5410146103d9578063825db5f7146103e6578063a820b44d146103f3578063efa52fb31461047a575b610002565b34610002576104fc600435600060006000507342b02b5deeb78f34cd5ac896473b63e6c99a71a26333556e849091846000604051602001526040518360e060020a028152600401808381526020018281526020019250505060206040518083038186803b156100025760325a03f415610002575050604051519150505b919050565b346100025761051060043560006000507342b02b5deeb78f34cd5ac896473b63e6c99a71a2637d65837a9091336000604051602001526040518360e060020a0281526004018083815260200182600160a060020a031681526020019250505060206040518083038186803b156100025760325a03f4156100025750506040515115905061008257604080517f21ce24d4000000000000000000000000000000000000000000000000000000008152600060048201819052600160a060020a038416602483015291517342b02b5deeb78f34cd5ac896473b63e6c99a71a2926321ce24d49260448082019391829003018186803b156100025760325a03f415610002575050505b50565b3461000257610512600181565b346100025761051060043560006000507342b02b5deeb78f34cd5ac896473b63e6c99a71a2637d65837a9091336000604051602001526040518360e060020a0281526004018083815260200182600160a060020a031681526020019250505060206040518083038186803b156100025760325a03f4156100025750506040515115905061008257604080517f89489a87000000000000000000000000000000000000000000000000000000008152600060048201819052600160a060020a038416602483015291517342b02b5deeb78f34cd5ac896473b63e6c99a71a2926389489a879260448082019391829003018186803b156100025760325a03f4156100025750505061020f565b3461000257610528600435610403565b34610002576104fc600435604080516000602091820181905282517f7d65837a00000000000000000000000000000000000000000000000000000000815260048101829052600160a060020a0385166024820152925190927342b02b5deeb78f34cd5ac896473b63e6c99a71a292637d65837a92604480840193829003018186803b156100025760325a03f4156100025750506040515191506101049050565b3461000257610512600c81565b3461000257610512600081565b3461000257610528600061055660005b600060006000507342b02b5deeb78f34cd5ac896473b63e6c99a71a263685a1f3c9091846000604051602001526040518360e060020a028152600401808381526020018281526020019250505060206040518083038186803b156100025760325a03f4156100025750506040515191506101049050565b346100025761053a600435600060006000507342b02b5deeb78f34cd5ac896473b63e6c99a71a263f775b6b59091846000604051602001526040518360e060020a028152600401808381526020018281526020019250505060206040518083038186803b156100025760325a03f4156100025750506040515191506101049050565b604080519115158252519081900360200190f35b005b6040805160ff9092168252519081900360200190f35b60408051918252519081900360200190f35b60408051600160a060020a039092168252519081900360200190f35b90509056", + "nonce": "1", + "storage": { + "0x4d140b25abf3c71052885c66f73ce07cff141c1afabffdaf5cba04d625b7ebcc": "0x0000000000000000000000000000000000000000000000000000000000000001" + } + }, + "0x269296dddce321a6bcbaa2f0181127593d732cba": { + "balance": "0x0", + "code": "0x606060405236156101275760e060020a60003504630cd40fea811461012c578063173825d9146101395780631849cb5a146101c7578063285791371461030f5780632a58b3301461033f5780632cb0d48a146103565780632f54bf6e1461036a578063332b9f061461039d5780633ca8b002146103c55780633df4ddf4146103d557806341c0e1b5146103f457806347799da81461040557806362a51eee1461042457806366907d13146104575780637065cb48146104825780637a9e541014610496578063825db5f7146104a3578063949d225d146104b0578063a51687df146104c7578063b4da4e37146104e6578063b4e6850b146104ff578063bd7474ca14610541578063e75623d814610541578063e9938e1114610555578063f5d241d314610643575b610002565b3461000257610682600181565b34610002576106986004356106ff335b60006001600a9054906101000a9004600160a060020a0316600160a060020a0316635dbe47e8836000604051602001526040518260e060020a0281526004018082600160a060020a03168152602001915050602060405180830381600087803b156100025760325a03f1156100025750506040515191506103989050565b3461000257604080516101008082018352600080835260208084018290528385018290526060808501839052608080860184905260a080870185905260c080880186905260e09788018690526001605060020a0360043581168752600586529589902089519788018a528054808816808a52605060020a91829004600160a060020a0316978a01889052600183015463ffffffff8082169d8c018e905264010000000082048116988c01899052604060020a90910416958a018690526002830154948a01859052600390920154808916938a01849052049096169690970186905293969495949293604080516001605060020a03998a16815297891660208901529590971686860152600160a060020a03909316606086015263ffffffff9182166080860152811660a08501521660c083015260e08201929092529051908190036101000190f35b346100025761069a60043560018054600091829160ff60f060020a909104161515141561063d5761072833610376565b34610002576106ae6004546001605060020a031681565b34610002576106986004356108b333610149565b346100025761069a6004355b600160a060020a03811660009081526002602052604090205460ff1615156001145b919050565b34610002576106986001805460ff60f060020a9091041615151415610913576108ed33610376565b346100025761069a600435610149565b34610002576106ae6003546001605060020a03605060020a9091041681565b346100025761069861091533610149565b34610002576106ae6003546001605060020a0360a060020a9091041681565b346100025761069a60043560243560018054600091829160ff60f060020a909104161515141561095e5761092633610376565b34610002576106986004356001805460ff60f060020a909104161515141561072557610a8b33610376565b3461000257610698600435610aa533610149565b3461000257610682600c81565b3461000257610682600081565b34610002576106ae6003546001605060020a031681565b34610002576106ca600154600160a060020a03605060020a9091041681565b346100025761069a60015460ff60f060020a9091041681565b346100025761069a60043560243560443560643560843560a43560c43560018054600091829160ff60f060020a9091041615151415610b5857610ad233610376565b3461000257610698600435610bd633610149565b34610002576106e6600435604080516101008181018352600080835260208084018290528385018290526060808501839052608080860184905260a080870185905260c080880186905260e09788018690526001605060020a03808b168752600586529589902089519788018a5280548088168952600160a060020a03605060020a918290041696890196909652600181015463ffffffff8082169b8a019b909b5264010000000081048b1695890195909552604060020a90940490981691860182905260028301549086015260039091015480841696850196909652940416918101919091525b50919050565b346100025761069a60043560243560443560643560843560a43560018054600091829160ff60f060020a9091041615151415610c8e57610bfb33610376565b6040805160ff9092168252519081900360200190f35b005b604080519115158252519081900360200190f35b604080516001605060020a039092168252519081900360200190f35b60408051600160a060020a039092168252519081900360200190f35b6040805163ffffffff9092168252519081900360200190f35b1561012757600160a060020a0381166000908152600260205260409020805460ff191690555b50565b1561063d57506001605060020a0380831660009081526005602052604090208054909116151561075b576000915061063d565b604080516101008101825282546001605060020a038082168352600160a060020a03605060020a92839004166020840152600185015463ffffffff80821695850195909552640100000000810485166060850152604060020a90049093166080830152600284015460a0830152600384015480841660c08401520490911660e0820152610817905b8051600354600090819060016001605060020a0390911611610c995760038054605060020a60f060020a0319169055610ddf565b600380546001605060020a031981166000196001605060020a03928316011782558416600090815260056020526040812080547fffff000000000000000000000000000000000000000000000000000000000000168155600181810180546bffffffffffffffffffffffff191690556002820192909255909101805473ffffffffffffffffffffffffffffffffffffffff19169055915061063d565b1561012757600180547fff00ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1660f060020a8302179055610725565b1561091357600480546001605060020a031981166001605060020a039091166001011790555b565b156101275733600160a060020a0316ff5b1561095e57506001605060020a03808416600090815260056020526040902080549091161515610965576000915061095e565b600191505b5092915050565b60038101546001605060020a0384811691161415610986576001915061095e565b604080516101008101825282546001605060020a038082168352600160a060020a03605060020a92839004166020840152600185015463ffffffff80821695850195909552640100000000810485166060850152604060020a90049093166080830152600284015460a0830152600384015480841660c08401520490911660e0820152610a12906107e3565b61095983825b80546003546001605060020a0391821691600091161515610de55760038054605060020a60a060020a031916605060020a84021760a060020a69ffffffffffffffffffff02191660a060020a84021781558301805473ffffffffffffffffffffffffffffffffffffffff19169055610ddf565b1561072557600480546001605060020a0319168217905550565b1561012757600160a060020a0381166000908152600260205260409020805460ff19166001179055610725565b15610b5857506001605060020a038088166000908152600560205260409020805490911615610b645760009150610b58565b6004546001605060020a0390811690891610610b3057600480546001605060020a03191660018a011790555b6003805460016001605060020a03821681016001605060020a03199092169190911790915591505b50979650505050505050565b80546001605060020a0319168817605060020a60f060020a031916605060020a880217815560018101805463ffffffff1916871767ffffffff0000000019166401000000008702176bffffffff00000000000000001916604060020a860217905560028101839055610b048982610a18565b156101275760018054605060020a60f060020a031916605060020a8302179055610725565b15610c8e57506001605060020a03808816600090815260056020526040902080549091161515610c2e5760009150610c8e565b8054605060020a60f060020a031916605060020a88021781556001808201805463ffffffff1916881767ffffffff0000000019166401000000008802176bffffffff00000000000000001916604060020a87021790556002820184905591505b509695505050505050565b6003546001605060020a03848116605060020a909204161415610d095760e084015160038054605060020a928302605060020a60a060020a031990911617808255919091046001605060020a031660009081526005602052604090200180546001605060020a0319169055610ddf565b6003546001605060020a0384811660a060020a909204161415610d825760c08401516003805460a060020a92830260a060020a69ffffffffffffffffffff021990911617808255919091046001605060020a03166000908152600560205260409020018054605060020a60a060020a0319169055610ddf565b505060c082015160e08301516001605060020a0380831660009081526005602052604080822060039081018054605060020a60a060020a031916605060020a8702179055928416825290200180546001605060020a031916831790555b50505050565b6001605060020a0384161515610e6457600380546001605060020a03605060020a9182900481166000908152600560205260409020830180546001605060020a0319908116871790915583548785018054918590049093168402605060020a60a060020a03199182161790911690915582549185029116179055610ddf565b506001605060020a038381166000908152600560205260409020600390810180549185018054605060020a60a060020a0319908116605060020a94859004909516808502959095176001605060020a0319168817909155815416918402919091179055801515610ef4576003805460a060020a69ffffffffffffffffffff02191660a060020a8402179055610ddf565b6003808401546001605060020a03605060020a9091041660009081526005602052604090200180546001605060020a031916831790555050505056", + "nonce": "1", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x000113204f5d64c28326fd7bd05fd4ea855302d7f2ff00000000000000000000" + } + }, + "0x42b02b5deeb78f34cd5ac896473b63e6c99a71a2": { + "balance": "0x0", + "code": "0x6504032353da7150606060405236156100695760e060020a60003504631bf7509d811461006e57806321ce24d41461008157806333556e84146100ec578063685a1f3c146101035780637d65837a1461011757806389489a8714610140578063f775b6b5146101fc575b610007565b61023460043560006100fd82600061010d565b610246600435602435600160a060020a03811660009081526020839052604081205415156102cb57826001016000508054806001018281815481835581811511610278576000838152602090206102789181019083015b808211156102d057600081556001016100d8565b610248600435602435600182015481105b92915050565b6102346004356024355b60018101906100fd565b610248600435602435600160a060020a03811660009081526020839052604090205415156100fd565b61024660043560243580600160a060020a031632600160a060020a03161415156101f857600160a060020a038116600090815260208390526040902054156101f857600160a060020a038116600090815260208390526040902054600183018054909160001901908110156100075760009182526020808320909101805473ffffffffffffffffffffffffffffffffffffffff19169055600160a060020a038316825283905260408120556002820180546000190190555b5050565b61025c60043560243560008260010160005082815481101561000757600091825260209091200154600160a060020a03169392505050565b60408051918252519081900360200190f35b005b604080519115158252519081900360200190f35b60408051600160a060020a039092168252519081900360200190f35b50505060009283526020808420909201805473ffffffffffffffffffffffffffffffffffffffff191686179055600160a060020a0385168352908590526040909120819055600284018054600101905590505b505050565b509056", + "nonce": "1", + "storage": {} + }, + "0xa529806c67cc6486d4d62024471772f47f6fd672": { + "balance": "0x67820e39ac8fe9800", + "code": "0x", + "nonce": "68", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "31912170", + "extraData": "0xd783010502846765746887676f312e372e33856c696e7578", + "gasLimit": "4712388", + "hash": "0x0855914bdc581bccdc62591fd438498386ffb59ea4d5361ed5c3702e26e2c72f", + "miner": "0x334391aa808257952a462d1475562ee2106a6c90", + "mixHash": "0x64bb70b8ca883cadb8fbbda2c70a861612407864089ed87b98e5de20acceada6", + "nonce": "0x684129f283aaef18", + "number": "11494", + "stateRoot": "0x7057f31fe3dab1d620771adad35224aae43eb70e94861208bc84c557ff5b9d10", + "timestamp": "1479735912", + "totalDifficulty": "90744064339" + }, + "input": "0xf889448504a817c800832dc6c094269296dddce321a6bcbaa2f0181127593d732cba80a47065cb480000000000000000000000001523e55a1ca4efbae03355775ae89f8d7699ad9e29a080ed81e4c5e9971a730efab4885566e2c868cd80bd4166d0ed8c287fdf181650a069d7c49215e3d4416ad239cd09dbb71b9f04c16b33b385d14f40b618a7a65115", + "result": { + "calls": [ + { + "calls": [ + { + "from": "0x13204f5d64c28326fd7bd05fd4ea855302d7f2ff", + "gas": "0x2bf459", + "gasUsed": "0x2aa", + "input": "0x7d65837a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a529806c67cc6486d4d62024471772f47f6fd672", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "to": "0x42b02b5deeb78f34cd5ac896473b63e6c99a71a2", + "type": "DELEGATECALL" + } + ], + "from": "0x269296dddce321a6bcbaa2f0181127593d732cba", + "gas": "0x2cae73", + "gasUsed": "0xa9d", + "input": "0x5dbe47e8000000000000000000000000a529806c67cc6486d4d62024471772f47f6fd672", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "to": "0x13204f5d64c28326fd7bd05fd4ea855302d7f2ff", + "type": "CALL", + "value": "0x0" + } + ], + "from": "0xa529806c67cc6486d4d62024471772f47f6fd672", + "gas": "0x2d6e28", + "gasUsed": "0x64bd", + "input": "0x7065cb480000000000000000000000001523e55a1ca4efbae03355775ae89f8d7699ad9e", + "output": "0x", + "to": "0x269296dddce321a6bcbaa2f0181127593d732cba", + "type": "CALL", + "value": "0x0" + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_create_oog_outer_throw.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_create_oog_outer_throw.json new file mode 100644 index 00000000..72152e27 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_create_oog_outer_throw.json @@ -0,0 +1,77 @@ +{ + "context": { + "difficulty": "3451177886", + "gasLimit": "4709286", + "miner": "0x1585936b53834b021f68cc13eeefdec2efc8e724", + "number": "2290744", + "timestamp": "1513616439" + }, + "genesis": { + "alloc": { + "0x1d3ddf7caf024f253487e18bc4a15b1a360c170a": { + "balance": "0x0", + "code": "0x606060405263ffffffff60e060020a6000350416633b91f50681146100505780635bb47808146100715780635f51fca01461008c578063bc7647a9146100ad578063f1bd0d7a146100c8575b610000565b346100005761006f600160a060020a03600435811690602435166100e9565b005b346100005761006f600160a060020a0360043516610152565b005b346100005761006f600160a060020a036004358116906024351661019c565b005b346100005761006f600160a060020a03600435166101fa565b005b346100005761006f600160a060020a0360043581169060243516610db8565b005b600160a060020a038083166000908152602081905260408120549091908116903316811461011657610000565b839150600160a060020a038316151561012d573392505b6101378284610e2e565b6101418284610db8565b61014a826101fa565b5b5b50505050565b600154600160a060020a03908116903316811461016e57610000565b6002805473ffffffffffffffffffffffffffffffffffffffff1916600160a060020a0384161790555b5b5050565b600254600160a060020a0390811690331681146101b857610000565b600160a060020a038381166000908152602081905260409020805473ffffffffffffffffffffffffffffffffffffffff19169184169190911790555b5b505050565b6040805160e260020a631a481fc102815260016024820181905260026044830152606482015262093a8060848201819052600060a4830181905260c06004840152601e60c48401527f736574456e7469747953746174757328616464726573732c75696e743829000060e484015292519091600160a060020a038516916369207f049161010480820192879290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a63379938570281526000602482018190526001604483015260606004830152602360648301527f626567696e506f6c6c28616464726573732c75696e7436342c626f6f6c2c626f60848301527f6f6c29000000000000000000000000000000000000000000000000000000000060a48301529151600160a060020a038716935063de64e15c9260c48084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc102815260016024820181905260026044830152606482015267ffffffffffffffff8416608482015260ff851660a482015260c06004820152601960c48201527f61646453746f636b28616464726573732c75696e74323536290000000000000060e48201529051600160a060020a03861692506369207f04916101048082019260009290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc102815260016024820181905260026044830152606482015267ffffffffffffffff8416608482015260ff851660a482015260c06004820152601960c48201527f697373756553746f636b2875696e74382c75696e74323536290000000000000060e48201529051600160a060020a03861692506369207f04916101048082019260009290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a63379938570281526002602482015260006044820181905260606004830152602160648301527f6772616e7453746f636b2875696e74382c75696e743235362c61646472657373608483015260f860020a60290260a48301529151600160a060020a038716935063de64e15c9260c48084019391929182900301818387803b156100005760325a03f115610000575050604080517f010555b8000000000000000000000000000000000000000000000000000000008152600160a060020a03338116602483015260006044830181905260606004840152603c60648401527f6772616e7456657374656453746f636b2875696e74382c75696e743235362c6160848401527f6464726573732c75696e7436342c75696e7436342c75696e743634290000000060a48401529251908716935063010555b89260c48084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc102815260016024820181905260026044830152606482015267ffffffffffffffff8416608482015260ff851660a482015260c06004820152601260c48201527f626567696e53616c65286164647265737329000000000000000000000000000060e48201529051600160a060020a03861692506369207f04916101048082019260009290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a63379938570281526002602482015260006044820181905260606004830152601a60648301527f7472616e7366657253616c6546756e64732875696e743235362900000000000060848301529151600160a060020a038716935063de64e15c9260a48084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc102815260016024820181905260026044830152606482015267ffffffffffffffff8416608482015260ff851660a482015260c06004820152602d60c48201527f7365744163636f756e74696e6753657474696e67732875696e743235362c756960e48201527f6e7436342c75696e7432353629000000000000000000000000000000000000006101048201529051600160a060020a03861692506369207f04916101248082019260009290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a63379938570281526002602482015260006044820181905260606004830152603460648301527f637265617465526563757272696e6752657761726428616464726573732c756960848301527f6e743235362c75696e7436342c737472696e672900000000000000000000000060a48301529151600160a060020a038716935063de64e15c9260c48084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a63379938570281526002602482015260006044820181905260606004830152601b60648301527f72656d6f7665526563757272696e675265776172642875696e7429000000000060848301529151600160a060020a038716935063de64e15c9260a48084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a63379938570281526002602482015260006044820181905260606004830152602360648301527f697373756552657761726428616464726573732c75696e743235362c7374726960848301527f6e6729000000000000000000000000000000000000000000000000000000000060a48301529151600160a060020a038716935063de64e15c9260c48084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a6337993857028152600160248201819052604482015260606004820152602260648201527f61737369676e53746f636b2875696e74382c616464726573732c75696e743235608482015260f060020a6136290260a48201529051600160a060020a038616925063de64e15c9160c48082019260009290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a6337993857028152600160248201819052604482015260606004820152602260648201527f72656d6f766553746f636b2875696e74382c616464726573732c75696e743235608482015260f060020a6136290260a48201529051600160a060020a038616925063de64e15c9160c48082019260009290919082900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc102815260026024808301919091526003604483015260006064830181905267ffffffffffffffff8616608484015260ff871660a484015260c0600484015260c48301919091527f7365744164647265737342796c617728737472696e672c616464726573732c6260e48301527f6f6f6c29000000000000000000000000000000000000000000000000000000006101048301529151600160a060020a03871693506369207f04926101248084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc1028152600260248201526003604482015260006064820181905267ffffffffffffffff8516608483015260ff861660a483015260c06004830152602160c48301527f73657453746174757342796c617728737472696e672c75696e74382c626f6f6c60e483015260f860020a6029026101048301529151600160a060020a03871693506369207f04926101248084019391929182900301818387803b156100005760325a03f1156100005750506040805160e260020a631a481fc1028152600260248201526003604482015260006064820181905267ffffffffffffffff8516608483015260ff861660a483015260c06004830152603860c48301527f736574566f74696e6742796c617728737472696e672c75696e743235362c756960e48301527f6e743235362c626f6f6c2c75696e7436342c75696e74382900000000000000006101048301529151600160a060020a03871693506369207f04926101248084019391929182900301818387803b156100005760325a03f115610000575050505b505050565b604080517f225553a4000000000000000000000000000000000000000000000000000000008152600160a060020a0383811660048301526002602483015291519184169163225553a49160448082019260009290919082900301818387803b156100005760325a03f115610000575050505b5050565b600082604051611fd280610f488339600160a060020a03909216910190815260405190819003602001906000f0801561000057905082600160a060020a03166308b027418260016040518363ffffffff1660e060020a0281526004018083600160a060020a0316600160a060020a0316815260200182815260200192505050600060405180830381600087803b156100005760325a03f115610000575050604080517fa14e3ee300000000000000000000000000000000000000000000000000000000815260006004820181905260016024830152600160a060020a0386811660448401529251928716935063a14e3ee39260648084019382900301818387803b156100005760325a03f115610000575050505b5050505600606060405234620000005760405160208062001fd283398101604052515b805b600a8054600160a060020a031916600160a060020a0383161790555b506001600d819055600e81905560408051808201909152600c8082527f566f74696e672053746f636b00000000000000000000000000000000000000006020928301908152600b805460008290528251601860ff1990911617825590947f0175b7a638427703f0dbe7bb9bbf987a2551717b34e79f33b5b1008d1fa01db9600291831615610100026000190190921604601f0193909304830192906200010c565b828001600101855582156200010c579182015b828111156200010c578251825591602001919060010190620000ef565b5b50620001309291505b808211156200012c576000815560010162000116565b5090565b50506040805180820190915260038082527f43565300000000000000000000000000000000000000000000000000000000006020928301908152600c805460008290528251600660ff1990911617825590937fdf6966c971051c3d54ec59162606531493a51404a002842f56009d7e5cf4a8c760026001841615610100026000190190931692909204601f010481019291620001f7565b82800160010185558215620001f7579182015b82811115620001f7578251825591602001919060010190620001da565b5b506200021b9291505b808211156200012c576000815560010162000116565b5090565b50505b505b611da280620002306000396000f3006060604052361561019a5763ffffffff60e060020a600035041662e1986d811461019f57806302a72a4c146101d657806306eb4e421461020157806306fdde0314610220578063095ea7b3146102ad578063158ccb99146102dd57806318160ddd146102f85780631cf65a781461031757806323b872dd146103365780632c71e60a1461036c57806333148fd6146103ca578063435ebc2c146103f55780635eeb6e451461041e578063600e85b71461043c5780636103d70b146104a157806362c1e46a146104b05780636c182e99146104ba578063706dc87c146104f057806370a082311461052557806377174f851461055057806395d89b411461056f578063a7771ee3146105fc578063a9059cbb14610629578063ab377daa14610659578063b25dbb5e14610685578063b89a73cb14610699578063ca5eb5e1146106c6578063cbcf2e5a146106e1578063d21f05ba1461070e578063d347c2051461072d578063d96831e114610765578063dd62ed3e14610777578063df3c211b146107a8578063e2982c21146107d6578063eb944e4c14610801575b610000565b34610000576101d4600160a060020a036004351660243567ffffffffffffffff6044358116906064358116906084351661081f565b005b34610000576101ef600160a060020a0360043516610a30565b60408051918252519081900360200190f35b34610000576101ef610a4f565b60408051918252519081900360200190f35b346100005761022d610a55565b604080516020808252835181830152835191928392908301918501908083838215610273575b80518252602083111561027357601f199092019160209182019101610253565b505050905090810190601f16801561029f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34610000576102c9600160a060020a0360043516602435610ae3565b604080519115158252519081900360200190f35b34610000576101d4600160a060020a0360043516610b4e565b005b34610000576101ef610b89565b60408051918252519081900360200190f35b34610000576101ef610b8f565b60408051918252519081900360200190f35b34610000576102c9600160a060020a0360043581169060243516604435610b95565b604080519115158252519081900360200190f35b3461000057610388600160a060020a0360043516602435610bb7565b60408051600160a060020a039096168652602086019490945267ffffffffffffffff928316858501529082166060850152166080830152519081900360a00190f35b34610000576101ef600160a060020a0360043516610c21565b60408051918252519081900360200190f35b3461000057610402610c40565b60408051600160a060020a039092168252519081900360200190f35b34610000576101d4600160a060020a0360043516602435610c4f565b005b3461000057610458600160a060020a0360043516602435610cc9565b60408051600160a060020a03909716875260208701959095528585019390935267ffffffffffffffff9182166060860152811660808501521660a0830152519081900360c00190f35b34610000576101d4610d9e565b005b6101d4610e1e565b005b34610000576104d3600160a060020a0360043516610e21565b6040805167ffffffffffffffff9092168252519081900360200190f35b3461000057610402600160a060020a0360043516610ead565b60408051600160a060020a039092168252519081900360200190f35b34610000576101ef600160a060020a0360043516610ef9565b60408051918252519081900360200190f35b34610000576101ef610f18565b60408051918252519081900360200190f35b346100005761022d610f1e565b604080516020808252835181830152835191928392908301918501908083838215610273575b80518252602083111561027357601f199092019160209182019101610253565b505050905090810190601f16801561029f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34610000576102c9600160a060020a0360043516610fac565b604080519115158252519081900360200190f35b34610000576102c9600160a060020a0360043516602435610fc2565b604080519115158252519081900360200190f35b3461000057610402600435610fe2565b60408051600160a060020a039092168252519081900360200190f35b34610000576101d46004351515610ffd565b005b34610000576102c9600160a060020a036004351661104c565b604080519115158252519081900360200190f35b34610000576101d4600160a060020a0360043516611062565b005b34610000576102c9600160a060020a0360043516611070565b604080519115158252519081900360200190f35b34610000576101ef6110f4565b60408051918252519081900360200190f35b34610000576101ef600160a060020a036004351667ffffffffffffffff602435166110fa565b60408051918252519081900360200190f35b34610000576101d4600435611121565b005b34610000576101ef600160a060020a03600435811690602435166111c6565b60408051918252519081900360200190f35b34610000576101ef6004356024356044356064356084356111f3565b60408051918252519081900360200190f35b34610000576101ef600160a060020a036004351661128c565b60408051918252519081900360200190f35b34610000576101d4600160a060020a036004351660243561129e565b005b6040805160a08101825260008082526020820181905291810182905260608101829052608081019190915267ffffffffffffffff848116908416101561086457610000565b8367ffffffffffffffff168267ffffffffffffffff16101561088557610000565b8267ffffffffffffffff168267ffffffffffffffff1610156108a657610000565b506040805160a081018252600160a060020a033381168252602080830188905267ffffffffffffffff80871684860152858116606085015287166080840152908816600090815260039091529190912080546001810180835582818380158290116109615760030281600302836000526020600020918201910161096191905b8082111561095d578054600160a060020a031916815560006001820155600281018054600160c060020a0319169055600301610926565b5090565b5b505050916000526020600020906003020160005b5082518154600160a060020a031916600160a060020a03909116178155602083015160018201556040830151600290910180546060850151608086015167ffffffffffffffff1990921667ffffffffffffffff948516176fffffffffffffffff00000000000000001916604060020a918516919091021777ffffffffffffffff000000000000000000000000000000001916608060020a939091169290920291909117905550610a268686610fc2565b505b505050505050565b600160a060020a0381166000908152600360205260409020545b919050565b60055481565b600b805460408051602060026001851615610100026000190190941693909304601f81018490048402820184019092528181529291830182828015610adb5780601f10610ab057610100808354040283529160200191610adb565b820191906000526020600020905b815481529060010190602001808311610abe57829003601f168201915b505050505081565b600160a060020a03338116600081815260026020908152604080832094871680845294825280832086905580518681529051929493927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925929181900390910190a35060015b92915050565b600a5433600160a060020a03908116911614610b6957610000565b600a8054600160a060020a031916600160a060020a0383161790555b5b50565b60005481565b60005b90565b6000610ba2848484611600565b610bad8484846116e2565b90505b9392505050565b600360205281600052604060002081815481101561000057906000526020600020906003020160005b5080546001820154600290920154600160a060020a03909116935090915067ffffffffffffffff80821691604060020a8104821691608060020a9091041685565b600160a060020a0381166000908152600860205260409020545b919050565b600a54600160a060020a031681565b600a5433600160a060020a03908116911614610c6a57610000565b610c7660005482611714565b6000908155600160a060020a038316815260016020526040902054610c9b9082611714565b600160a060020a038316600090815260016020526040812091909155610cc390839083611600565b5b5b5050565b6000600060006000600060006000600360008a600160a060020a0316600160a060020a0316815260200190815260200160002088815481101561000057906000526020600020906003020160005b508054600182015460028301546040805160a081018252600160a060020a039094168085526020850184905267ffffffffffffffff808416928601839052604060020a8404811660608701819052608060020a9094041660808601819052909c50929a509197509095509350909150610d90904261172d565b94505b509295509295509295565b33600160a060020a038116600090815260066020526040902054801515610dc457610000565b8030600160a060020a0316311015610ddb57610000565b600160a060020a0382166000818152600660205260408082208290555183156108fc0291849190818181858888f193505050501515610cc357610000565b5b5050565b5b565b600160a060020a03811660009081526003602052604081205442915b81811015610ea557600160a060020a03841660009081526003602052604090208054610e9a9190839081101561000057906000526020600020906003020160005b5060020154604060020a900467ffffffffffffffff168461177d565b92505b600101610e3d565b5b5050919050565b600160a060020a0380821660009081526007602052604081205490911615610eef57600160a060020a0380831660009081526007602052604090205416610ef1565b815b90505b919050565b600160a060020a0381166000908152600160205260409020545b919050565b600d5481565b600c805460408051602060026001851615610100026000190190941693909304601f81018490048402820184019092528181529291830182828015610adb5780601f10610ab057610100808354040283529160200191610adb565b820191906000526020600020905b815481529060010190602001808311610abe57829003601f168201915b505050505081565b60006000610fb983610c21565b1190505b919050565b6000610fcf338484611600565b610fd983836117ac565b90505b92915050565b600460205260009081526040902054600160a060020a031681565b8015801561101a575061100f33610ef9565b61101833610c21565b115b1561102457610000565b33600160a060020a03166000908152600960205260409020805460ff19168215151790555b50565b60006000610fb983610ef9565b1190505b919050565b610b8533826117dc565b5b50565b600a54604080516000602091820181905282517fcbcf2e5a000000000000000000000000000000000000000000000000000000008152600160a060020a03868116600483015293519194939093169263cbcf2e5a92602480830193919282900301818787803b156100005760325a03f115610000575050604051519150505b919050565b600e5481565b6000610fd961110984846118b2565b61111385856119b6565b611a05565b90505b92915050565b600a5433600160a060020a0390811691161461113c57610000565b61114860005482611a1f565b600055600554600190101561116c57600a5461116c90600160a060020a0316611a47565b5b600a54600160a060020a03166000908152600160205260409020546111929082611a1f565b600a8054600160a060020a039081166000908152600160205260408120939093559054610b8592911683611600565b5b5b50565b600160a060020a038083166000908152600260209081526040808320938516835292905220545b92915050565b6000600060008487101561120a5760009250611281565b8387111561121a57879250611281565b61123f6112308961122b888a611714565b611a90565b61123a8689611714565b611abc565b915081925061124e8883611714565b905061127e8361127961126a8461122b8c8b611714565b611a90565b61123a888b611714565b611abc565b611a1f565b92505b505095945050505050565b60066020526000908152604090205481565b600160a060020a03821660009081526003602052604081208054829190849081101561000057906000526020600020906003020160005b50805490925033600160a060020a039081169116146112f357610000565b6040805160a0810182528354600160a060020a0316815260018401546020820152600284015467ffffffffffffffff80821693830193909352604060020a810483166060830152608060020a900490911660808201526113539042611af9565b600160a060020a0385166000908152600360205260409020805491925090849081101561000057906000526020600020906003020160005b508054600160a060020a031916815560006001820181905560029091018054600160c060020a0319169055600160a060020a0385168152600360205260409020805460001981019081101561000057906000526020600020906003020160005b50600160a060020a03851660009081526003602052604090208054859081101561000057906000526020600020906003020160005b5081548154600160a060020a031916600160a060020a03918216178255600180840154908301556002928301805493909201805467ffffffffffffffff191667ffffffffffffffff948516178082558354604060020a908190048616026fffffffffffffffff000000000000000019909116178082559254608060020a9081900490941690930277ffffffffffffffff00000000000000000000000000000000199092169190911790915584166000908152600360205260409020805460001981018083559190829080158290116115485760030281600302836000526020600020918201910161154891905b8082111561095d578054600160a060020a031916815560006001820155600281018054600160c060020a0319169055600301610926565b5090565b5b505050600160a060020a033316600090815260016020526040902054611570915082611a1f565b600160a060020a03338116600090815260016020526040808220939093559086168152205461159f9082611714565b600160a060020a038086166000818152600160209081526040918290209490945580518581529051339093169391927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a35b50505050565b600160a060020a0383161561166e576116466008600061161f86610ead565b600160a060020a0316600160a060020a031681526020019081526020016000205482611714565b6008600061165386610ead565b600160a060020a031681526020810191909152604001600020555b600160a060020a038216156116dc576116b46008600061168d85610ead565b600160a060020a0316600160a060020a031681526020019081526020016000205482611a1f565b600860006116c185610ead565b600160a060020a031681526020810191909152604001600020555b5b505050565b600083826116f082426110fa565b8111156116fc57610000565b611707868686611b1b565b92505b5b50509392505050565b600061172283831115611b4d565b508082035b92915050565b6000610fd983602001518367ffffffffffffffff16856080015167ffffffffffffffff16866040015167ffffffffffffffff16876060015167ffffffffffffffff166111f3565b90505b92915050565b60008167ffffffffffffffff168367ffffffffffffffff1610156117a15781610fd9565b825b90505b92915050565b600033826117ba82426110fa565b8111156117c657610000565b6117d08585611b5d565b92505b5b505092915050565b6117e582610ef9565b6117ee83610c21565b11156117f957610000565b600160a060020a03811660009081526009602052604090205460ff16158015611834575081600160a060020a031681600160a060020a031614155b1561183e57610000565b61184782611070565b1561185157610000565b611864828261185f85610ef9565b611600565b600160a060020a0382811660009081526007602052604090208054600160a060020a031916918316918217905561189a82610ead565b600160a060020a031614610cc357610000565b5b5050565b600160a060020a038216600090815260036020526040812054815b818110156119885761197d836112796003600089600160a060020a0316600160a060020a0316815260200190815260200160002084815481101561000057906000526020600020906003020160005b506040805160a0810182528254600160a060020a031681526001830154602082015260029092015467ffffffffffffffff80821692840192909252604060020a810482166060840152608060020a900416608082015287611af9565b611a1f565b92505b6001016118cd565b600160a060020a0385166000908152600160205260409020546117d09084611714565b92505b505092915050565b600060006119c384611070565b80156119d157506000600d54115b90506119fb816119e9576119e485610ef9565b6119ec565b60005b6111138686611b7b565b611a05565b91505b5092915050565b60008183106117a15781610fd9565b825b90505b92915050565b6000828201611a3c848210801590611a375750838210155b611b4d565b8091505b5092915050565b611a508161104c565b15611a5a57610b85565b6005805460009081526004602052604090208054600160a060020a031916600160a060020a038416179055805460010190555b50565b6000828202611a3c841580611a37575083858381156100005704145b611b4d565b8091505b5092915050565b60006000611acc60008411611b4d565b8284811561000057049050611a3c838581156100005706828502018514611b4d565b8091505b5092915050565b6000610fd98360200151611b0d858561172d565b611714565b90505b92915050565b60008382611b2982426110fa565b811115611b3557610000565b611707868686611b8f565b92505b5b50509392505050565b801515610b8557610000565b5b50565b6000611b6883611a47565b610fd98383611c92565b90505b92915050565b6000610fd983610ef9565b90505b92915050565b600160a060020a038084166000908152600260209081526040808320338516845282528083205493861683526001909152812054909190611bd09084611a1f565b600160a060020a038086166000908152600160205260408082209390935590871681522054611bff9084611714565b600160a060020a038616600090815260016020526040902055611c228184611714565b600160a060020a038087166000818152600260209081526040808320338616845282529182902094909455805187815290519288169391927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a3600191505b509392505050565b60003382611ca082426110fa565b811115611cac57610000565b6117d08585611cc2565b92505b5b505092915050565b600160a060020a033316600090815260016020526040812054611ce59083611714565b600160a060020a033381166000908152600160205260408082209390935590851681522054611d149083611a1f565b600160a060020a038085166000818152600160209081526040918290209490945580518681529051919333909316927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef92918290030190a35060015b929150505600a165627a7a72305820bfa5ddd3fecf3f43aed25385ec7ec3ef79638c2e58d99f85d9a3cc494183bf160029a165627a7a723058200e78a5f7e0f91739035d0fbf5eca02f79377210b722f63431f29a22e2880b3bd0029", + "nonce": "789", + "storage": { + "0xfe9ec0542a1c009be8b1f3acf43af97100ffff42eb736850fb038fa1151ad4d9": "0x000000000000000000000000e4a13bc304682a903e9472f469c33801dd18d9e8" + } + }, + "0x5cb4a6b902fcb21588c86c3517e797b07cdaadb9": { + "balance": "0x0", + "code": "0x", + "nonce": "0", + "storage": {} + }, + "0xe4a13bc304682a903e9472f469c33801dd18d9e8": { + "balance": "0x33c763c929f62c4f", + "code": "0x", + "nonce": "14", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3451177886", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "4713874", + "hash": "0x5d52a672417cd1269bf4f7095e25dcbf837747bba908cd5ef809dc1bd06144b5", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0x01a12845ed546b94a038a7a03e8df8d7952024ed41ccb3db7a7ade4abc290ce1", + "nonce": "0x28c446f1cb9748c1", + "number": "2290743", + "stateRoot": "0x4898aceede76739daef76448a367d10015a2c022c9e7909b99a10fbf6fb16708", + "timestamp": "1513616414", + "totalDifficulty": "7146523769022564" + }, + "input": "0xf8aa0e8509502f9000830493e0941d3ddf7caf024f253487e18bc4a15b1a360c170a80b8443b91f506000000000000000000000000a14bdd7e5666d784dcce98ad24d383a6b1cd4182000000000000000000000000e4a13bc304682a903e9472f469c33801dd18d9e829a0524564944fa419f5c189b5074044f89210c6d6b2d77ee8f7f12a927d59b636dfa0015b28986807a424b18b186ee6642d76739df36cad802d20e8c00e79a61d7281", + "result": { + "calls": [ + { + "error": "internal failure", + "from": "0x1d3ddf7caf024f253487e18bc4a15b1a360c170a", + "gas": "0x39ff0", + "gasUsed": "0x39ff0", + "input": "0x606060405234620000005760405160208062001fd283398101604052515b805b600a8054600160a060020a031916600160a060020a0383161790555b506001600d819055600e81905560408051808201909152600c8082527f566f74696e672053746f636b00000000000000000000000000000000000000006020928301908152600b805460008290528251601860ff1990911617825590947f0175b7a638427703f0dbe7bb9bbf987a2551717b34e79f33b5b1008d1fa01db9600291831615610100026000190190921604601f0193909304830192906200010c565b828001600101855582156200010c579182015b828111156200010c578251825591602001919060010190620000ef565b5b50620001309291505b808211156200012c576000815560010162000116565b5090565b50506040805180820190915260038082527f43565300000000000000000000000000000000000000000000000000000000006020928301908152600c805460008290528251600660ff1990911617825590937fdf6966c971051c3d54ec59162606531493a51404a002842f56009d7e5cf4a8c760026001841615610100026000190190931692909204601f010481019291620001f7565b82800160010185558215620001f7579182015b82811115620001f7578251825591602001919060010190620001da565b5b506200021b9291505b808211156200012c576000815560010162000116565b5090565b50505b505b611da280620002306000396000f3006060604052361561019a5763ffffffff60e060020a600035041662e1986d811461019f57806302a72a4c146101d657806306eb4e421461020157806306fdde0314610220578063095ea7b3146102ad578063158ccb99146102dd57806318160ddd146102f85780631cf65a781461031757806323b872dd146103365780632c71e60a1461036c57806333148fd6146103ca578063435ebc2c146103f55780635eeb6e451461041e578063600e85b71461043c5780636103d70b146104a157806362c1e46a146104b05780636c182e99146104ba578063706dc87c146104f057806370a082311461052557806377174f851461055057806395d89b411461056f578063a7771ee3146105fc578063a9059cbb14610629578063ab377daa14610659578063b25dbb5e14610685578063b89a73cb14610699578063ca5eb5e1146106c6578063cbcf2e5a146106e1578063d21f05ba1461070e578063d347c2051461072d578063d96831e114610765578063dd62ed3e14610777578063df3c211b146107a8578063e2982c21146107d6578063eb944e4c14610801575b610000565b34610000576101d4600160a060020a036004351660243567ffffffffffffffff6044358116906064358116906084351661081f565b005b34610000576101ef600160a060020a0360043516610a30565b60408051918252519081900360200190f35b34610000576101ef610a4f565b60408051918252519081900360200190f35b346100005761022d610a55565b604080516020808252835181830152835191928392908301918501908083838215610273575b80518252602083111561027357601f199092019160209182019101610253565b505050905090810190601f16801561029f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34610000576102c9600160a060020a0360043516602435610ae3565b604080519115158252519081900360200190f35b34610000576101d4600160a060020a0360043516610b4e565b005b34610000576101ef610b89565b60408051918252519081900360200190f35b34610000576101ef610b8f565b60408051918252519081900360200190f35b34610000576102c9600160a060020a0360043581169060243516604435610b95565b604080519115158252519081900360200190f35b3461000057610388600160a060020a0360043516602435610bb7565b60408051600160a060020a039096168652602086019490945267ffffffffffffffff928316858501529082166060850152166080830152519081900360a00190f35b34610000576101ef600160a060020a0360043516610c21565b60408051918252519081900360200190f35b3461000057610402610c40565b60408051600160a060020a039092168252519081900360200190f35b34610000576101d4600160a060020a0360043516602435610c4f565b005b3461000057610458600160a060020a0360043516602435610cc9565b60408051600160a060020a03909716875260208701959095528585019390935267ffffffffffffffff9182166060860152811660808501521660a0830152519081900360c00190f35b34610000576101d4610d9e565b005b6101d4610e1e565b005b34610000576104d3600160a060020a0360043516610e21565b6040805167ffffffffffffffff9092168252519081900360200190f35b3461000057610402600160a060020a0360043516610ead565b60408051600160a060020a039092168252519081900360200190f35b34610000576101ef600160a060020a0360043516610ef9565b60408051918252519081900360200190f35b34610000576101ef610f18565b60408051918252519081900360200190f35b346100005761022d610f1e565b604080516020808252835181830152835191928392908301918501908083838215610273575b80518252602083111561027357601f199092019160209182019101610253565b505050905090810190601f16801561029f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34610000576102c9600160a060020a0360043516610fac565b604080519115158252519081900360200190f35b34610000576102c9600160a060020a0360043516602435610fc2565b604080519115158252519081900360200190f35b3461000057610402600435610fe2565b60408051600160a060020a039092168252519081900360200190f35b34610000576101d46004351515610ffd565b005b34610000576102c9600160a060020a036004351661104c565b604080519115158252519081900360200190f35b34610000576101d4600160a060020a0360043516611062565b005b34610000576102c9600160a060020a0360043516611070565b604080519115158252519081900360200190f35b34610000576101ef6110f4565b60408051918252519081900360200190f35b34610000576101ef600160a060020a036004351667ffffffffffffffff602435166110fa565b60408051918252519081900360200190f35b34610000576101d4600435611121565b005b34610000576101ef600160a060020a03600435811690602435166111c6565b60408051918252519081900360200190f35b34610000576101ef6004356024356044356064356084356111f3565b60408051918252519081900360200190f35b34610000576101ef600160a060020a036004351661128c565b60408051918252519081900360200190f35b34610000576101d4600160a060020a036004351660243561129e565b005b6040805160a08101825260008082526020820181905291810182905260608101829052608081019190915267ffffffffffffffff848116908416101561086457610000565b8367ffffffffffffffff168267ffffffffffffffff16101561088557610000565b8267ffffffffffffffff168267ffffffffffffffff1610156108a657610000565b506040805160a081018252600160a060020a033381168252602080830188905267ffffffffffffffff80871684860152858116606085015287166080840152908816600090815260039091529190912080546001810180835582818380158290116109615760030281600302836000526020600020918201910161096191905b8082111561095d578054600160a060020a031916815560006001820155600281018054600160c060020a0319169055600301610926565b5090565b5b505050916000526020600020906003020160005b5082518154600160a060020a031916600160a060020a03909116178155602083015160018201556040830151600290910180546060850151608086015167ffffffffffffffff1990921667ffffffffffffffff948516176fffffffffffffffff00000000000000001916604060020a918516919091021777ffffffffffffffff000000000000000000000000000000001916608060020a939091169290920291909117905550610a268686610fc2565b505b505050505050565b600160a060020a0381166000908152600360205260409020545b919050565b60055481565b600b805460408051602060026001851615610100026000190190941693909304601f81018490048402820184019092528181529291830182828015610adb5780601f10610ab057610100808354040283529160200191610adb565b820191906000526020600020905b815481529060010190602001808311610abe57829003601f168201915b505050505081565b600160a060020a03338116600081815260026020908152604080832094871680845294825280832086905580518681529051929493927f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925929181900390910190a35060015b92915050565b600a5433600160a060020a03908116911614610b6957610000565b600a8054600160a060020a031916600160a060020a0383161790555b5b50565b60005481565b60005b90565b6000610ba2848484611600565b610bad8484846116e2565b90505b9392505050565b600360205281600052604060002081815481101561000057906000526020600020906003020160005b5080546001820154600290920154600160a060020a03909116935090915067ffffffffffffffff80821691604060020a8104821691608060020a9091041685565b600160a060020a0381166000908152600860205260409020545b919050565b600a54600160a060020a031681565b600a5433600160a060020a03908116911614610c6a57610000565b610c7660005482611714565b6000908155600160a060020a038316815260016020526040902054610c9b9082611714565b600160a060020a038316600090815260016020526040812091909155610cc390839083611600565b5b5b5050565b6000600060006000600060006000600360008a600160a060020a0316600160a060020a0316815260200190815260200160002088815481101561000057906000526020600020906003020160005b508054600182015460028301546040805160a081018252600160a060020a039094168085526020850184905267ffffffffffffffff808416928601839052604060020a8404811660608701819052608060020a9094041660808601819052909c50929a509197509095509350909150610d90904261172d565b94505b509295509295509295565b33600160a060020a038116600090815260066020526040902054801515610dc457610000565b8030600160a060020a0316311015610ddb57610000565b600160a060020a0382166000818152600660205260408082208290555183156108fc0291849190818181858888f193505050501515610cc357610000565b5b5050565b5b565b600160a060020a03811660009081526003602052604081205442915b81811015610ea557600160a060020a03841660009081526003602052604090208054610e9a9190839081101561000057906000526020600020906003020160005b5060020154604060020a900467ffffffffffffffff168461177d565b92505b600101610e3d565b5b5050919050565b600160a060020a0380821660009081526007602052604081205490911615610eef57600160a060020a0380831660009081526007602052604090205416610ef1565b815b90505b919050565b600160a060020a0381166000908152600160205260409020545b919050565b600d5481565b600c805460408051602060026001851615610100026000190190941693909304601f81018490048402820184019092528181529291830182828015610adb5780601f10610ab057610100808354040283529160200191610adb565b820191906000526020600020905b815481529060010190602001808311610abe57829003601f168201915b505050505081565b60006000610fb983610c21565b1190505b919050565b6000610fcf338484611600565b610fd983836117ac565b90505b92915050565b600460205260009081526040902054600160a060020a031681565b8015801561101a575061100f33610ef9565b61101833610c21565b115b1561102457610000565b33600160a060020a03166000908152600960205260409020805460ff19168215151790555b50565b60006000610fb983610ef9565b1190505b919050565b610b8533826117dc565b5b50565b600a54604080516000602091820181905282517fcbcf2e5a000000000000000000000000000000000000000000000000000000008152600160a060020a03868116600483015293519194939093169263cbcf2e5a92602480830193919282900301818787803b156100005760325a03f115610000575050604051519150505b919050565b600e5481565b6000610fd961110984846118b2565b61111385856119b6565b611a05565b90505b92915050565b600a5433600160a060020a0390811691161461113c57610000565b61114860005482611a1f565b600055600554600190101561116c57600a5461116c90600160a060020a0316611a47565b5b600a54600160a060020a03166000908152600160205260409020546111929082611a1f565b600a8054600160a060020a039081166000908152600160205260408120939093559054610b8592911683611600565b5b5b50565b600160a060020a038083166000908152600260209081526040808320938516835292905220545b92915050565b6000600060008487101561120a5760009250611281565b8387111561121a57879250611281565b61123f6112308961122b888a611714565b611a90565b61123a8689611714565b611abc565b915081925061124e8883611714565b905061127e8361127961126a8461122b8c8b611714565b611a90565b61123a888b611714565b611abc565b611a1f565b92505b505095945050505050565b60066020526000908152604090205481565b600160a060020a03821660009081526003602052604081208054829190849081101561000057906000526020600020906003020160005b50805490925033600160a060020a039081169116146112f357610000565b6040805160a0810182528354600160a060020a0316815260018401546020820152600284015467ffffffffffffffff80821693830193909352604060020a810483166060830152608060020a900490911660808201526113539042611af9565b600160a060020a0385166000908152600360205260409020805491925090849081101561000057906000526020600020906003020160005b508054600160a060020a031916815560006001820181905560029091018054600160c060020a0319169055600160a060020a0385168152600360205260409020805460001981019081101561000057906000526020600020906003020160005b50600160a060020a03851660009081526003602052604090208054859081101561000057906000526020600020906003020160005b5081548154600160a060020a031916600160a060020a03918216178255600180840154908301556002928301805493909201805467ffffffffffffffff191667ffffffffffffffff948516178082558354604060020a908190048616026fffffffffffffffff000000000000000019909116178082559254608060020a9081900490941690930277ffffffffffffffff00000000000000000000000000000000199092169190911790915584166000908152600360205260409020805460001981018083559190829080158290116115485760030281600302836000526020600020918201910161154891905b8082111561095d578054600160a060020a031916815560006001820155600281018054600160c060020a0319169055600301610926565b5090565b5b505050600160a060020a033316600090815260016020526040902054611570915082611a1f565b600160a060020a03338116600090815260016020526040808220939093559086168152205461159f9082611714565b600160a060020a038086166000818152600160209081526040918290209490945580518581529051339093169391927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a35b50505050565b600160a060020a0383161561166e576116466008600061161f86610ead565b600160a060020a0316600160a060020a031681526020019081526020016000205482611714565b6008600061165386610ead565b600160a060020a031681526020810191909152604001600020555b600160a060020a038216156116dc576116b46008600061168d85610ead565b600160a060020a0316600160a060020a031681526020019081526020016000205482611a1f565b600860006116c185610ead565b600160a060020a031681526020810191909152604001600020555b5b505050565b600083826116f082426110fa565b8111156116fc57610000565b611707868686611b1b565b92505b5b50509392505050565b600061172283831115611b4d565b508082035b92915050565b6000610fd983602001518367ffffffffffffffff16856080015167ffffffffffffffff16866040015167ffffffffffffffff16876060015167ffffffffffffffff166111f3565b90505b92915050565b60008167ffffffffffffffff168367ffffffffffffffff1610156117a15781610fd9565b825b90505b92915050565b600033826117ba82426110fa565b8111156117c657610000565b6117d08585611b5d565b92505b5b505092915050565b6117e582610ef9565b6117ee83610c21565b11156117f957610000565b600160a060020a03811660009081526009602052604090205460ff16158015611834575081600160a060020a031681600160a060020a031614155b1561183e57610000565b61184782611070565b1561185157610000565b611864828261185f85610ef9565b611600565b600160a060020a0382811660009081526007602052604090208054600160a060020a031916918316918217905561189a82610ead565b600160a060020a031614610cc357610000565b5b5050565b600160a060020a038216600090815260036020526040812054815b818110156119885761197d836112796003600089600160a060020a0316600160a060020a0316815260200190815260200160002084815481101561000057906000526020600020906003020160005b506040805160a0810182528254600160a060020a031681526001830154602082015260029092015467ffffffffffffffff80821692840192909252604060020a810482166060840152608060020a900416608082015287611af9565b611a1f565b92505b6001016118cd565b600160a060020a0385166000908152600160205260409020546117d09084611714565b92505b505092915050565b600060006119c384611070565b80156119d157506000600d54115b90506119fb816119e9576119e485610ef9565b6119ec565b60005b6111138686611b7b565b611a05565b91505b5092915050565b60008183106117a15781610fd9565b825b90505b92915050565b6000828201611a3c848210801590611a375750838210155b611b4d565b8091505b5092915050565b611a508161104c565b15611a5a57610b85565b6005805460009081526004602052604090208054600160a060020a031916600160a060020a038416179055805460010190555b50565b6000828202611a3c841580611a37575083858381156100005704145b611b4d565b8091505b5092915050565b60006000611acc60008411611b4d565b8284811561000057049050611a3c838581156100005706828502018514611b4d565b8091505b5092915050565b6000610fd98360200151611b0d858561172d565b611714565b90505b92915050565b60008382611b2982426110fa565b811115611b3557610000565b611707868686611b8f565b92505b5b50509392505050565b801515610b8557610000565b5b50565b6000611b6883611a47565b610fd98383611c92565b90505b92915050565b6000610fd983610ef9565b90505b92915050565b600160a060020a038084166000908152600260209081526040808320338516845282528083205493861683526001909152812054909190611bd09084611a1f565b600160a060020a038086166000908152600160205260408082209390935590871681522054611bff9084611714565b600160a060020a038616600090815260016020526040902055611c228184611714565b600160a060020a038087166000818152600260209081526040808320338616845282529182902094909455805187815290519288169391927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef929181900390910190a3600191505b509392505050565b60003382611ca082426110fa565b811115611cac57610000565b6117d08585611cc2565b92505b5b505092915050565b600160a060020a033316600090815260016020526040812054611ce59083611714565b600160a060020a033381166000908152600160205260408082209390935590851681522054611d149083611a1f565b600160a060020a038085166000818152600160209081526040918290209490945580518681529051919333909316927fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef92918290030190a35060015b929150505600a165627a7a72305820bfa5ddd3fecf3f43aed25385ec7ec3ef79638c2e58d99f85d9a3cc494183bf160029000000000000000000000000a14bdd7e5666d784dcce98ad24d383a6b1cd4182", + "type": "CREATE", + "value": "0x0" + } + ], + "error": "invalid jump destination", + "from": "0xe4a13bc304682a903e9472f469c33801dd18d9e8", + "gas": "0x435c8", + "gasUsed": "0x435c8", + "input": "0x3b91f506000000000000000000000000a14bdd7e5666d784dcce98ad24d383a6b1cd4182000000000000000000000000e4a13bc304682a903e9472f469c33801dd18d9e8", + "to": "0x1d3ddf7caf024f253487e18bc4a15b1a360c170a", + "type": "CALL", + "value": "0x0" + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_instafail.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_instafail.json new file mode 100644 index 00000000..86070d13 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_instafail.json @@ -0,0 +1,72 @@ +{ + "genesis": { + "difficulty": "117067574", + "extraData": "0xd783010502846765746887676f312e372e33856c696e7578", + "gasLimit": "4712380", + "hash": "0xe05db05eeb3f288041ecb10a787df121c0ed69499355716e17c307de313a4486", + "miner": "0x0c062b329265c965deef1eede55183b3acb8f611", + "mixHash": "0xb669ae39118a53d2c65fd3b1e1d3850dd3f8c6842030698ed846a2762d68b61d", + "nonce": "0x2b469722b8e28c45", + "number": "24973", + "stateRoot": "0x532a5c3f75453a696428db078e32ae283c85cb97e4d8560dbdf022adac6df369", + "timestamp": "1479891145", + "totalDifficulty": "1892250259406", + "alloc": { + "0x6c06b16512b332e6cd8293a2974872674716ce18": { + "balance": "0x0", + "nonce": "1", + "code": "0x60606040526000357c0100000000000000000000000000000000000000000000000000000000900480632e1a7d4d146036575b6000565b34600057604e60048080359060200190919050506050565b005b3373ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051809050600060405180830381858888f19350505050505b5056", + "storage": {} + }, + "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31": { + "balance": "0x229ebbb36c3e0f20", + "nonce": "3", + "code": "0x", + "storage": {} + } + }, + "config": { + "chainId": 3, + "homesteadBlock": 0, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "byzantiumBlock": 1700000, + "constantinopleBlock": 4230000, + "petersburgBlock": 4939394, + "istanbulBlock": 6485846, + "muirGlacierBlock": 7117117, + "ethash": {} + } + }, + "context": { + "number": "24974", + "difficulty": "117067574", + "timestamp": "1479891162", + "gasLimit": "4712388", + "miner": "0xc822ef32e6d26e170b70cf761e204c1806265914" + }, + "input": "0xf889038504a81557008301f97e946c06b16512b332e6cd8293a2974872674716ce1880a42e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b1600002aa0e2a6558040c5d72bc59f2fb62a38993a314c849cd22fb393018d2c5af3112095a01bdb6d7ba32263ccc2ecc880d38c49d9f0c5a72d8b7908e3122b31356d349745", + "result": { + "type": "CALL", + "from": "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31", + "to": "0x6c06b16512b332e6cd8293a2974872674716ce18", + "value": "0x0", + "gas": "0x1a466", + "gasUsed": "0x1dc6", + "input": "0x2e1a7d4d00000000000000000000000000000000000000000000000014d1120d7b160000", + "output": "0x", + "calls": [ + { + "type": "CALL", + "from": "0x6c06b16512b332e6cd8293a2974872674716ce18", + "to": "0x66fdfd05e46126a07465ad24e40cc0597bc1ef31", + "value": "0x14d1120d7b160000", + "error":"internal failure", + "input": "0x" + } + ] + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_throw_outer_revert.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_throw_outer_revert.json new file mode 100644 index 00000000..ec2ceb42 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/inner_throw_outer_revert.json @@ -0,0 +1,81 @@ +{ + "context": { + "difficulty": "3956606365", + "gasLimit": "5413248", + "miner": "0x00d8ae40d9a06d0e7a2877b62e32eb959afbe16d", + "number": "2295104", + "timestamp": "1513681256" + }, + "genesis": { + "alloc": { + "0x33056b5dcac09a9b4becad0e1dcf92c19bd0af76": { + "balance": "0x0", + "code": "0x60606040526004361061015e576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff1680625b4487146101a257806311df9995146101cb578063278ecde11461022057806330adce0e146102435780633197cbb61461026c5780634bb278f3146102955780636103d70b146102aa57806363a599a4146102bf5780636a2d1cb8146102d457806375f12b21146102fd57806378e979251461032a578063801db9cc1461035357806386d1a69f1461037c5780638da5cb5b146103915780638ef26a71146103e65780639890220b1461040f5780639b39caef14610424578063b85dfb801461044d578063be9a6555146104a1578063ccb07cef146104b6578063d06c91e4146104e3578063d669e1d414610538578063df40503c14610561578063e2982c2114610576578063f02e030d146105c3578063f2fde38b146105d8578063f3283fba14610611575b600060149054906101000a900460ff1615151561017a57600080fd5b60075442108061018b575060085442115b15151561019757600080fd5b6101a03361064a565b005b34156101ad57600080fd5b6101b5610925565b6040518082815260200191505060405180910390f35b34156101d657600080fd5b6101de61092b565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561022b57600080fd5b6102416004808035906020019091905050610951565b005b341561024e57600080fd5b610256610c48565b6040518082815260200191505060405180910390f35b341561027757600080fd5b61027f610c4e565b6040518082815260200191505060405180910390f35b34156102a057600080fd5b6102a8610c54565b005b34156102b557600080fd5b6102bd610f3e565b005b34156102ca57600080fd5b6102d261105d565b005b34156102df57600080fd5b6102e76110d5565b6040518082815260200191505060405180910390f35b341561030857600080fd5b6103106110e1565b604051808215151515815260200191505060405180910390f35b341561033557600080fd5b61033d6110f4565b6040518082815260200191505060405180910390f35b341561035e57600080fd5b6103666110fa565b6040518082815260200191505060405180910390f35b341561038757600080fd5b61038f611104565b005b341561039c57600080fd5b6103a4611196565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b34156103f157600080fd5b6103f96111bb565b6040518082815260200191505060405180910390f35b341561041a57600080fd5b6104226111c1565b005b341561042f57600080fd5b610437611296565b6040518082815260200191505060405180910390f35b341561045857600080fd5b610484600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190505061129c565b604051808381526020018281526020019250505060405180910390f35b34156104ac57600080fd5b6104b46112c0565b005b34156104c157600080fd5b6104c9611341565b604051808215151515815260200191505060405180910390f35b34156104ee57600080fd5b6104f6611354565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561054357600080fd5b61054b61137a565b6040518082815260200191505060405180910390f35b341561056c57600080fd5b610574611385565b005b341561058157600080fd5b6105ad600480803573ffffffffffffffffffffffffffffffffffffffff169060200190919050506116c3565b6040518082815260200191505060405180910390f35b34156105ce57600080fd5b6105d66116db565b005b34156105e357600080fd5b61060f600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050611829565b005b341561061c57600080fd5b610648600480803573ffffffffffffffffffffffffffffffffffffffff169060200190919050506118fe565b005b600080670de0b6b3a7640000341015151561066457600080fd5b61069b610696670de0b6b3a7640000610688610258346119d990919063ffffffff16565b611a0c90919063ffffffff16565b611a27565b9150660221b262dd80006106ba60065484611a7e90919063ffffffff16565b111515156106c757600080fd5b600a60008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000209050600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663a9059cbb84846000604051602001526040518363ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200182815260200192505050602060405180830381600087803b15156107d557600080fd5b6102c65a03f115156107e657600080fd5b5050506040518051905050610808828260010154611a7e90919063ffffffff16565b8160010181905550610827348260000154611a7e90919063ffffffff16565b816000018190555061084434600554611a7e90919063ffffffff16565b60058190555061085f82600654611a7e90919063ffffffff16565b6006819055503373ffffffffffffffffffffffffffffffffffffffff167ff3c1c7c0eb1328ddc834c4c9e579c06d35f443bf1102b034653624a239c7a40c836040518082815260200191505060405180910390a27fd1dc370699ae69fb860ed754789a4327413ec1cd379b93f2cbedf449a26b0e8583600554604051808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019250505060405180910390a1505050565b60025481565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b600060085442108061096b5750651b48eb57e00060065410155b15151561097757600080fd5b600a60003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060010154821415156109c757600080fd5b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166323b872dd3330856000604051602001526040518463ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019350505050602060405180830381600087803b1515610ac857600080fd5b6102c65a03f11515610ad957600080fd5b5050506040518051905050600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166342966c68836000604051602001526040518263ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180828152602001915050602060405180830381600087803b1515610b7d57600080fd5b6102c65a03f11515610b8e57600080fd5b505050604051805190501515610ba357600080fd5b600a60003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000015490506000600a60003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600001819055506000811115610c4457610c433382611a9c565b5b5050565b60055481565b60085481565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141515610cb157600080fd5b600854421015610cd357660221b262dd8000600654141515610cd257600080fd5b5b651b48eb57e000600654108015610cf057506213c6806008540142105b151515610cfc57600080fd5b600460009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc3073ffffffffffffffffffffffffffffffffffffffff16319081150290604051600060405180830381858888f193505050501515610d7557600080fd5b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166370a08231306000604051602001526040518263ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001915050602060405180830381600087803b1515610e3a57600080fd5b6102c65a03f11515610e4b57600080fd5b5050506040518051905090506000811115610f2057600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166342966c68826000604051602001526040518263ffffffff167c010000000000000000000000000000000000000000000000000000000002815260040180828152602001915050602060405180830381600087803b1515610ef957600080fd5b6102c65a03f11515610f0a57600080fd5b505050604051805190501515610f1f57600080fd5b5b6001600960006101000a81548160ff02191690831515021790555050565b600080339150600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054905060008114151515610f9657600080fd5b803073ffffffffffffffffffffffffffffffffffffffff163110151515610fbc57600080fd5b610fd181600254611b5090919063ffffffff16565b6002819055506000600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508173ffffffffffffffffffffffffffffffffffffffff166108fc829081150290604051600060405180830381858888f19350505050151561105957fe5b5050565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415156110b857600080fd5b6001600060146101000a81548160ff021916908315150217905550565b670de0b6b3a764000081565b600060149054906101000a900460ff1681565b60075481565b651b48eb57e00081565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561115f57600080fd5b600060149054906101000a900460ff16151561117a57600080fd5b60008060146101000a81548160ff021916908315150217905550565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b60065481565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561121c57600080fd5b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff166108fc3073ffffffffffffffffffffffffffffffffffffffff16319081150290604051600060405180830381858888f19350505050151561129457600080fd5b565b61025881565b600a6020528060005260406000206000915090508060000154908060010154905082565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561131b57600080fd5b600060075414151561132c57600080fd5b4260078190555062278d004201600881905550565b600960009054906101000a900460ff1681565b600460009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b660221b262dd800081565b60008060008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415156113e557600080fd5b600654660221b262dd800003925061142b670de0b6b3a764000061141c610258670de0b6b3a76400006119d990919063ffffffff16565b81151561142557fe5b04611a27565b915081831115151561143c57600080fd5b600a60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000209050600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663a9059cbb6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff16856000604051602001526040518363ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200182815260200192505050602060405180830381600087803b151561158c57600080fd5b6102c65a03f1151561159d57600080fd5b50505060405180519050506115bf838260010154611a7e90919063ffffffff16565b81600101819055506115dc83600654611a7e90919063ffffffff16565b6006819055503073ffffffffffffffffffffffffffffffffffffffff167ff3c1c7c0eb1328ddc834c4c9e579c06d35f443bf1102b034653624a239c7a40c846040518082815260200191505060405180910390a27fd1dc370699ae69fb860ed754789a4327413ec1cd379b93f2cbedf449a26b0e856000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff16600554604051808373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020018281526020019250505060405180910390a1505050565b60016020528060005260406000206000915090505481565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561173657600080fd5b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663f2fde38b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff166040518263ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001915050600060405180830381600087803b151561181357600080fd5b6102c65a03f1151561182457600080fd5b505050565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561188457600080fd5b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff161415156118fb57806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505b50565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff1614151561195957600080fd5b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff161415151561199557600080fd5b80600460006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff16021790555050565b600080828402905060008414806119fa57508284828115156119f757fe5b04145b1515611a0257fe5b8091505092915050565b6000808284811515611a1a57fe5b0490508091505092915050565b6000611a416202a300600754611a7e90919063ffffffff16565b421015611a7557611a6e611a5f600584611a0c90919063ffffffff16565b83611a7e90919063ffffffff16565b9050611a79565b8190505b919050565b6000808284019050838110151515611a9257fe5b8091505092915050565b611aee81600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054611a7e90919063ffffffff16565b600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002081905550611b4681600254611a7e90919063ffffffff16565b6002819055505050565b6000828211151515611b5e57fe5b8183039050929150505600a165627a7a72305820ec0d82a406896ccf20989b3d6e650abe4dc104e400837f1f58e67ef499493ae90029", + "nonce": "1", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x0000000000000000000000008d69d00910d0b2afb2a99ed6c16c8129fa8e1751", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000e819f024b41358d2c08e3a868a5c5dd0566078d4", + "0x0000000000000000000000000000000000000000000000000000000000000007": "0x000000000000000000000000000000000000000000000000000000005a388981", + "0x0000000000000000000000000000000000000000000000000000000000000008": "0x000000000000000000000000000000000000000000000000000000005a3b38e6" + } + }, + "0xd4fcab9f0a6dc0493af47c864f6f17a8a5e2e826": { + "balance": "0x2a2dd979a35cf000", + "code": "0x", + "nonce": "0", + "storage": {} + }, + "0xe819f024b41358d2c08e3a868a5c5dd0566078d4": { + "balance": "0x0", + "code": "0x6060604052600436106100ba576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806306fdde03146100bf578063095ea7b31461014d57806318160ddd146101a757806323b872dd146101d0578063313ce5671461024957806342966c681461027257806370a08231146102ad5780638da5cb5b146102fa57806395d89b411461034f578063a9059cbb146103dd578063dd62ed3e14610437578063f2fde38b146104a3575b600080fd5b34156100ca57600080fd5b6100d26104dc565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101125780820151818401526020810190506100f7565b50505050905090810190601f16801561013f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561015857600080fd5b61018d600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610515565b604051808215151515815260200191505060405180910390f35b34156101b257600080fd5b6101ba61069c565b6040518082815260200191505060405180910390f35b34156101db57600080fd5b61022f600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff169060200190919080359060200190919050506106a2565b604051808215151515815260200191505060405180910390f35b341561025457600080fd5b61025c610952565b6040518082815260200191505060405180910390f35b341561027d57600080fd5b6102936004808035906020019091905050610957565b604051808215151515815260200191505060405180910390f35b34156102b857600080fd5b6102e4600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610abe565b6040518082815260200191505060405180910390f35b341561030557600080fd5b61030d610b07565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b341561035a57600080fd5b610362610b2d565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156103a2578082015181840152602081019050610387565b50505050905090810190601f1680156103cf5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b34156103e857600080fd5b61041d600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610b66565b604051808215151515815260200191505060405180910390f35b341561044257600080fd5b61048d600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610d01565b6040518082815260200191505060405180910390f35b34156104ae57600080fd5b6104da600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610d88565b005b6040805190810160405280600b81526020017f416c6c436f6465436f696e00000000000000000000000000000000000000000081525081565b6000808214806105a157506000600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054145b15156105ac57600080fd5b81600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925846040518082815260200191505060405180910390a36001905092915050565b60005481565b600080600260008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054905061077683600160008773ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610e5f90919063ffffffff16565b600160008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000208190555061080b83600160008873ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610e7d90919063ffffffff16565b600160008773ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055506108618382610e7d90919063ffffffff16565b600260008773ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508373ffffffffffffffffffffffffffffffffffffffff168573ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef856040518082815260200191505060405180910390a360019150509392505050565b600681565b6000600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161415156109b557600080fd5b610a0782600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610e7d90919063ffffffff16565b600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002081905550610a5f82600054610e7d90919063ffffffff16565b60008190555060003373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a360019050919050565b6000600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020549050919050565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6040805190810160405280600481526020017f414c4c430000000000000000000000000000000000000000000000000000000081525081565b6000610bba82600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610e7d90919063ffffffff16565b600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002081905550610c4f82600160008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054610e5f90919063ffffffff16565b600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055508273ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a36001905092915050565b6000600260008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054905092915050565b600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16141515610de457600080fd5b600073ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff16141515610e5c5780600360006101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505b50565b6000808284019050838110151515610e7357fe5b8091505092915050565b6000828211151515610e8b57fe5b8183039050929150505600a165627a7a7230582059f3ea3df0b054e9ab711f37969684ba83fe38f255ffe2c8d850d951121c51100029", + "nonce": "1", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3956606365", + "extraData": "0x566961425443", + "gasLimit": "5418523", + "hash": "0x6f37eb930a25da673ea1bb80fd9e32ddac19cdf7cd4bb2eac62cc13598624077", + "miner": "0xd049bfd667cb46aa3ef5df0da3e57db3be39e511", + "mixHash": "0x10971cde68c587c750c23b8589ae868ce82c2c646636b97e7d9856470c5297c7", + "nonce": "0x810f923ff4b450a1", + "number": "2295103", + "stateRoot": "0xff403612573d76dfdaf4fea2429b77dbe9764021ae0e38dc8ac79a3cf551179e", + "timestamp": "1513681246", + "totalDifficulty": "7162347056825919" + }, + "input": "0xf86d808504e3b292008307dfa69433056b5dcac09a9b4becad0e1dcf92c19bd0af76880e92596fd62900008029a0e5f27bb66431f7081bb7f1f242003056d7f3f35414c352cd3d1848b52716dac2a07d0be78980edb0bd2a0678fc53aa90ea9558ce346b0d947967216918ac74ccea", + "result": { + "calls": [ + { + "error": "invalid opcode: INVALID", + "from": "0x33056b5dcac09a9b4becad0e1dcf92c19bd0af76", + "gas": "0x75fe3", + "gasUsed": "0x75fe3", + "input": "0xa9059cbb000000000000000000000000d4fcab9f0a6dc0493af47c864f6f17a8a5e2e82600000000000000000000000000000000000000000000000000000000000002f4", + "to": "0xe819f024b41358d2c08e3a868a5c5dd0566078d4", + "type": "CALL", + "value": "0x0" + } + ], + "error": "execution reverted", + "from": "0xd4fcab9f0a6dc0493af47c864f6f17a8a5e2e826", + "gas": "0x78d9e", + "gasUsed": "0x76fc0", + "input": "0x", + "to": "0x33056b5dcac09a9b4becad0e1dcf92c19bd0af76", + "type": "CALL", + "value": "0xe92596fd6290000" + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/oog.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/oog.json new file mode 100644 index 00000000..de4fed6a --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/oog.json @@ -0,0 +1,60 @@ +{ + "context": { + "difficulty": "3699098917", + "gasLimit": "5258985", + "miner": "0xd049bfd667cb46aa3ef5df0da3e57db3be39e511", + "number": "2294631", + "timestamp": "1513675366" + }, + "genesis": { + "alloc": { + "0x43064693d3d38ad6a7cb579e0d6d9718c8aa6b62": { + "balance": "0x0", + "code": "0x6060604052600436106100ba576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806306fdde03146100bf578063095ea7b31461014d57806318160ddd146101a757806323b872dd146101d0578063313ce5671461024957806342966c68146102785780635a3b7e42146102b357806370a082311461034157806379cc67901461038e57806395d89b41146103e8578063a9059cbb14610476578063dd62ed3e146104b8575b600080fd5b34156100ca57600080fd5b6100d2610524565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156101125780820151818401526020810190506100f7565b50505050905090810190601f16801561013f5780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561015857600080fd5b61018d600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803590602001909190505061055d565b604051808215151515815260200191505060405180910390f35b34156101b257600080fd5b6101ba6105ea565b6040518082815260200191505060405180910390f35b34156101db57600080fd5b61022f600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff169060200190919080359060200190919050506105f0565b604051808215151515815260200191505060405180910390f35b341561025457600080fd5b61025c610910565b604051808260ff1660ff16815260200191505060405180910390f35b341561028357600080fd5b6102996004808035906020019091905050610915565b604051808215151515815260200191505060405180910390f35b34156102be57600080fd5b6102c6610a18565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156103065780820151818401526020810190506102eb565b50505050905090810190601f1680156103335780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561034c57600080fd5b610378600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610a51565b6040518082815260200191505060405180910390f35b341561039957600080fd5b6103ce600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610a69565b604051808215151515815260200191505060405180910390f35b34156103f357600080fd5b6103fb610bf8565b6040518080602001828103825283818151815260200191508051906020019080838360005b8381101561043b578082015181840152602081019050610420565b50505050905090810190601f1680156104685780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b341561048157600080fd5b6104b6600480803573ffffffffffffffffffffffffffffffffffffffff16906020019091908035906020019091905050610c31565b005b34156104c357600080fd5b61050e600480803573ffffffffffffffffffffffffffffffffffffffff1690602001909190803573ffffffffffffffffffffffffffffffffffffffff16906020019091905050610e34565b6040518082815260200191505060405180910390f35b6040805190810160405280600881526020017f446f70616d696e6500000000000000000000000000000000000000000000000081525081565b600081600260003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020819055506001905092915050565b60005481565b6000808373ffffffffffffffffffffffffffffffffffffffff161415151561061757600080fd5b81600160008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020541015151561066557600080fd5b600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205482600160008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205401101515156106f157fe5b600260008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002054821115151561077c57600080fd5b81600160008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254039250508190555081600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254019250508190555081600260008673ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055508273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040518082815260200191505060405180910390a3600190509392505050565b601281565b600081600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020541015151561096557600080fd5b81600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055508160008082825403925050819055503373ffffffffffffffffffffffffffffffffffffffff167fcc16f5dbb4873280815c1ee09dbd06736cffcc184412cf7a71a0fdb75d397ca5836040518082815260200191505060405180910390a260019050919050565b6040805190810160405280600981526020017f446f706d6e20302e32000000000000000000000000000000000000000000000081525081565b60016020528060005260406000206000915090505481565b600081600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205410151515610ab957600080fd5b600260008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020548211151515610b4457600080fd5b81600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825403925050819055508160008082825403925050819055508273ffffffffffffffffffffffffffffffffffffffff167fcc16f5dbb4873280815c1ee09dbd06736cffcc184412cf7a71a0fdb75d397ca5836040518082815260200191505060405180910390a26001905092915050565b6040805190810160405280600581526020017f444f504d4e00000000000000000000000000000000000000000000000000000081525081565b60008273ffffffffffffffffffffffffffffffffffffffff1614151515610c5757600080fd5b80600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205410151515610ca557600080fd5b600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000205481600160008573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020540110151515610d3157fe5b80600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000828254039250508190555080600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600082825401925050819055508173ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef836040518082815260200191505060405180910390a35050565b60026020528160005260406000206020528060005260406000206000915091505054815600a165627a7a723058206d93424f4e7b11929b8276a269038402c10c0ddf21800e999916ddd9dff4a7630029", + "nonce": "1", + "storage": { + "0x296b66049cc4f9c8bf3d4f14752add261d1a980b39bdd194a7897baf39ac7579": "0x0000000000000000000000000000000000000000033b2e3c9fc9653f9e72b1e0" + } + }, + "0x94194bc2aaf494501d7880b61274a169f6502a54": { + "balance": "0xea8c39a876d19888d", + "code": "0x", + "nonce": "265", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3699098917", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "5263953", + "hash": "0x03a0f62a8106793dafcfae7b75fd2654322062d585a19cea568314d7205790dc", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0x15482cc64b7c00a947f5bf015dfc010db1a6a668c74df61974d6a7848c174408", + "nonce": "0xd1bdb150f6fd170e", + "number": "2294630", + "stateRoot": "0x1ab1a534e84cc787cda1db21e0d5920ab06017948075b759166cfea7274657a1", + "timestamp": "1513675347", + "totalDifficulty": "7160543502214733" + }, + "input": "0xf8ab820109855d21dba00082ca1d9443064693d3d38ad6a7cb579e0d6d9718c8aa6b6280b844a9059cbb000000000000000000000000e77b1ac803616503510bed0086e3a7be2627a69900000000000000000000000000000000000000000000000000000009502f90001ba0ce3ad83f5530136467b7c2bb225f406bd170f4ad59c254e5103c34eeabb5bd69a0455154527224a42ab405cacf0fe92918a75641ce4152f8db292019a5527aa956", + "result": { + "error": "out of gas", + "from": "0x94194bc2aaf494501d7880b61274a169f6502a54", + "gas": "0x7045", + "gasUsed": "0x7045", + "input": "0xa9059cbb000000000000000000000000e77b1ac803616503510bed0086e3a7be2627a69900000000000000000000000000000000000000000000000000000009502f9000", + "to": "0x43064693d3d38ad6a7cb579e0d6d9718c8aa6b62", + "type": "CALL", + "value": "0x0" + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert.json new file mode 100644 index 00000000..059040a1 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert.json @@ -0,0 +1,58 @@ +{ + "context": { + "difficulty": "3665057456", + "gasLimit": "5232723", + "miner": "0xf4d8e706cfb25c0decbbdd4d2e2cc10c66376a3f", + "number": "2294501", + "timestamp": "1513673601" + }, + "genesis": { + "alloc": { + "0x0f6cef2b7fbb504782e35aa82a2207e816a2b7a9": { + "balance": "0x2a3fc32bcc019283", + "code": "0x", + "nonce": "10", + "storage": {} + }, + "0xabbcd5b340c80b5f1c0545c04c987b87310296ae": { + "balance": "0x0", + "code": "0x606060405236156100755763ffffffff7c01000000000000000000000000000000000000000000000000000000006000350416632d0335ab811461007a578063548db174146100ab5780637f649783146100fc578063b092145e1461014d578063c3f44c0a14610186578063c47cf5de14610203575b600080fd5b341561008557600080fd5b610099600160a060020a0360043516610270565b60405190815260200160405180910390f35b34156100b657600080fd5b6100fa600460248135818101908301358060208181020160405190810160405280939291908181526020018383602002808284375094965061028f95505050505050565b005b341561010757600080fd5b6100fa600460248135818101908301358060208181020160405190810160405280939291908181526020018383602002808284375094965061029e95505050505050565b005b341561015857600080fd5b610172600160a060020a03600435811690602435166102ad565b604051901515815260200160405180910390f35b341561019157600080fd5b6100fa6004803560ff1690602480359160443591606435600160a060020a0316919060a49060843590810190830135806020601f8201819004810201604051908101604052818152929190602084018383808284375094965050509235600160a060020a031692506102cd915050565b005b341561020e57600080fd5b61025460046024813581810190830135806020601f8201819004810201604051908101604052818152929190602084018383808284375094965061056a95505050505050565b604051600160a060020a03909116815260200160405180910390f35b600160a060020a0381166000908152602081905260409020545b919050565b61029a816000610594565b5b50565b61029a816001610594565b5b50565b600160209081526000928352604080842090915290825290205460ff1681565b60008080600160a060020a038416158061030d5750600160a060020a038085166000908152600160209081526040808320339094168352929052205460ff165b151561031857600080fd5b6103218561056a565b600160a060020a038116600090815260208190526040808220549295507f19000000000000000000000000000000000000000000000000000000000000009230918891908b908b90517fff000000000000000000000000000000000000000000000000000000000000008089168252871660018201526c01000000000000000000000000600160a060020a038088168202600284015286811682026016840152602a8301869052841602604a820152605e810182805190602001908083835b6020831061040057805182525b601f1990920191602091820191016103e0565b6001836020036101000a0380198251168184511617909252505050919091019850604097505050505050505051809103902091506001828a8a8a6040516000815260200160405260006040516020015260405193845260ff90921660208085019190915260408085019290925260608401929092526080909201915160208103908084039060008661646e5a03f1151561049957600080fd5b5050602060405103519050600160a060020a03838116908216146104bc57600080fd5b600160a060020a0380841660009081526020819052604090819020805460010190559087169086905180828051906020019080838360005b8381101561050d5780820151818401525b6020016104f4565b50505050905090810190601f16801561053a5780820380516001836020036101000a031916815260200191505b5091505060006040518083038160008661646e5a03f1915050151561055e57600080fd5b5b505050505050505050565b600060248251101561057e5750600061028a565b600160a060020a0360248301511690505b919050565b60005b825181101561060157600160a060020a033316600090815260016020526040812083918584815181106105c657fe5b90602001906020020151600160a060020a031681526020810191909152604001600020805460ff19169115159190911790555b600101610597565b5b5050505600a165627a7a723058200027e8b695e9d2dea9f3629519022a69f3a1d23055ce86406e686ea54f31ee9c0029", + "nonce": "1", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3672229776", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "5227619", + "hash": "0xa07b3d6c6bf63f5f981016db9f2d1d93033833f2c17e8bf7209e85f1faf08076", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0x806e151ce2817be922e93e8d5921fa0f0d0fd213d6b2b9a3fa17458e74a163d0", + "nonce": "0xbc5d43adc2c30c7d", + "number": "2294500", + "stateRoot": "0xca645b335888352ef9d8b1ef083e9019648180b259026572e3139717270de97d", + "timestamp": "1513673552", + "totalDifficulty": "7160066586979149" + }, + "input": "0xf9018b0a8505d21dba00832dc6c094abbcd5b340c80b5f1c0545c04c987b87310296ae80b9012473b40a5c000000000000000000000000400de2e016bda6577407dfc379faba9899bc73ef0000000000000000000000002cc31912b2b0f3075a87b3640923d45a26cef3ee000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000064d79d8e6c7265636f76657279416464726573730000000000000000000000000000000000000000000000000000000000383e3ec32dc0f66d8fe60dbdc2f6815bdf73a988383e3ec32dc0f66d8fe60dbdc2f6815bdf73a988000000000000000000000000000000000000000000000000000000000000000000000000000000001ba0fd659d76a4edbd2a823e324c93f78ad6803b30ff4a9c8bce71ba82798975c70ca06571eecc0b765688ec6c78942c5ee8b585e00988c0141b518287e9be919bc48a", + "result": { + "error": "execution reverted", + "from": "0x0f6cef2b7fbb504782e35aa82a2207e816a2b7a9", + "gas": "0x2d55e8", + "gasUsed": "0xc3", + "input": "0x73b40a5c000000000000000000000000400de2e016bda6577407dfc379faba9899bc73ef0000000000000000000000002cc31912b2b0f3075a87b3640923d45a26cef3ee000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000064d79d8e6c7265636f76657279416464726573730000000000000000000000000000000000000000000000000000000000383e3ec32dc0f66d8fe60dbdc2f6815bdf73a988383e3ec32dc0f66d8fe60dbdc2f6815bdf73a98800000000000000000000000000000000000000000000000000000000000000000000000000000000", + "to": "0xabbcd5b340c80b5f1c0545c04c987b87310296ae", + "type": "CALL", + "value": "0x0" + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert_reason.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert_reason.json new file mode 100644 index 00000000..094b0446 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/revert_reason.json @@ -0,0 +1,64 @@ +{ + "context": { + "difficulty": "2", + "gasLimit": "8000000", + "miner": "0x0000000000000000000000000000000000000000", + "number": "3212651", + "timestamp": "1597246515" + }, + "genesis": { + "alloc": { + "0xf58833cf0c791881b494eb79d461e08a1f043f52": { + "balance": "0x0", + "code": "0x608060405234801561001057600080fd5b50600436106100a5576000357c010000000000000000000000000000000000000000000000000000000090048063609ff1bd11610078578063609ff1bd146101af5780639e7b8d61146101cd578063a3ec138d14610211578063e2ba53f0146102ae576100a5565b80630121b93f146100aa578063013cf08b146100d85780632e4176cf146101215780635c19a95c1461016b575b600080fd5b6100d6600480360360208110156100c057600080fd5b81019080803590602001909291905050506102cc565b005b610104600480360360208110156100ee57600080fd5b8101908080359060200190929190505050610469565b604051808381526020018281526020019250505060405180910390f35b61012961049a565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6101ad6004803603602081101561018157600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff1690602001909291905050506104bf565b005b6101b76108db565b6040518082815260200191505060405180910390f35b61020f600480360360208110156101e357600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610952565b005b6102536004803603602081101561022757600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610b53565b60405180858152602001841515151581526020018373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200182815260200194505050505060405180910390f35b6102b6610bb0565b6040518082815260200191505060405180910390f35b6000600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020905060008160000154141561038a576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260148152602001807f486173206e6f20726967687420746f20766f746500000000000000000000000081525060200191505060405180910390fd5b8060010160009054906101000a900460ff161561040f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252600e8152602001807f416c726561647920766f7465642e00000000000000000000000000000000000081525060200191505060405180910390fd5b60018160010160006101000a81548160ff02191690831515021790555081816002018190555080600001546002838154811061044757fe5b9060005260206000209060020201600101600082825401925050819055505050565b6002818154811061047657fe5b90600052602060002090600202016000915090508060000154908060010154905082565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1681565b6000600160003373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002090508060010160009054906101000a900460ff1615610587576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260128152602001807f596f7520616c726561647920766f7465642e000000000000000000000000000081525060200191505060405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff161415610629576040517f08c379a000000000000000000000000000000000000000000000000000000000815260040180806020018281038252601e8152602001807f53656c662d64656c65676174696f6e20697320646973616c6c6f7765642e000081525060200191505060405180910390fd5b5b600073ffffffffffffffffffffffffffffffffffffffff16600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060010160019054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16146107cc57600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060010160019054906101000a900473ffffffffffffffffffffffffffffffffffffffff1691503373ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff1614156107c7576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260198152602001807f466f756e64206c6f6f7020696e2064656c65676174696f6e2e0000000000000081525060200191505060405180910390fd5b61062a565b60018160010160006101000a81548160ff021916908315150217905550818160010160016101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055506000600160008473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002090508060010160009054906101000a900460ff16156108bf578160000154600282600201548154811061089c57fe5b9060005260206000209060020201600101600082825401925050819055506108d6565b816000015481600001600082825401925050819055505b505050565b6000806000905060008090505b60028054905081101561094d57816002828154811061090357fe5b9060005260206000209060020201600101541115610940576002818154811061092857fe5b90600052602060002090600202016001015491508092505b80806001019150506108e8565b505090565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16146109f7576040517f08c379a0000000000000000000000000000000000000000000000000000000008152600401808060200182810382526028815260200180610bde6028913960400191505060405180910390fd5b600160008273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200190815260200160002060010160009054906101000a900460ff1615610aba576040517f08c379a00000000000000000000000000000000000000000000000000000000081526004018080602001828103825260188152602001807f54686520766f74657220616c726561647920766f7465642e000000000000000081525060200191505060405180910390fd5b6000600160008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000015414610b0957600080fd5b60018060008373ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1681526020019081526020016000206000018190555050565b60016020528060005260406000206000915090508060000154908060010160009054906101000a900460ff16908060010160019054906101000a900473ffffffffffffffffffffffffffffffffffffffff16908060020154905084565b60006002610bbc6108db565b81548110610bc657fe5b90600052602060002090600202016000015490509056fe4f6e6c79206368616972706572736f6e2063616e206769766520726967687420746f20766f74652ea26469706673582212201d282819f8f06fed792100d60a8b08809b081a34a1ecd225e83a4b41122165ed64736f6c63430006060033", + "nonce": "1", + "storage": { + "0x6200beec95762de01ce05f2a0e58ce3299dbb53c68c9f3254a242121223cdf58": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "0xf7579c3d8a669c89d5ed246a22eb6db8f6fedbf1": { + "balance": "0x57af9d6b3df812900", + "code": "0x", + "nonce": "6", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 0, + "constantinopleBlock": 0, + "petersburgBlock": 0, + "IstanbulBlock":1561651, + "chainId": 5, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3509749784", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "4727564", + "hash": "0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada", + "nonce": "0x4eb12e19c16d43da", + "number": "2289805", + "stateRoot": "0xc7f10f352bff82fac3c2999d3085093d12652e19c7fd32591de49dc5d91b4f1f", + "timestamp": "1513601261", + "totalDifficulty": "7143276353481064" + }, + "input": "0xf888068449504f80832dc6c094f58833cf0c791881b494eb79d461e08a1f043f5280a45c19a95c000000000000000000000000f7579c3d8a669c89d5ed246a22eb6db8f6fedbf12da0264664db3e71fae1dbdaf2f53954be149ad3b7ba8a5054b4d89c70febfacc8b1a0212e8398757963f419681839ae8c5a54b411e252473c82d93dda68405ca63294", + "result": { + "error": "execution reverted", + "from": "0xf7579c3d8a669c89d5ed246a22eb6db8f6fedbf1", + "gas": "0x2d7308", + "gasUsed": "0x588", + "input": "0x5c19a95c000000000000000000000000f7579c3d8a669c89d5ed246a22eb6db8f6fedbf1", + "to": "0xf58833cf0c791881b494eb79d461e08a1f043f52", + "type": "CALL", + "value": "0x0", + "output": "0x08c379a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000001e53656c662d64656c65676174696f6e20697320646973616c6c6f7765642e0000" + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/selfdestruct.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/selfdestruct.json new file mode 100644 index 00000000..132cefa1 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/selfdestruct.json @@ -0,0 +1,73 @@ +{ + "context": { + "difficulty": "3502894804", + "gasLimit": "4722976", + "miner": "0x1585936b53834b021f68cc13eeefdec2efc8e724", + "number": "2289806", + "timestamp": "1513601314" + }, + "genesis": { + "alloc": { + "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": { + "balance": "0x0", + "code": "0x", + "nonce": "22", + "storage": {} + }, + "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": { + "balance": "0x4d87094125a369d9bd5", + "code": "0x61deadff", + "nonce": "1", + "storage": {} + }, + "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": { + "balance": "0x1780d77678137ac1b775", + "code": "0x", + "nonce": "29072", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3509749784", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "4727564", + "hash": "0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada", + "nonce": "0x4eb12e19c16d43da", + "number": "2289805", + "stateRoot": "0xc7f10f352bff82fac3c2999d3085093d12652e19c7fd32591de49dc5d91b4f1f", + "timestamp": "1513601261", + "totalDifficulty": "7143276353481064" + }, + "input": "0xf88b8271908506fc23ac0083015f90943b873a919aa0512d5a0f09e6dcceaa4a6727fafe80a463e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c52aa0bdce0b59e8761854e857fe64015f06dd08a4fbb7624f6094893a79a72e6ad6bea01d9dde033cff7bb235a3163f348a6d7ab8d6b52bc0963a95b91612e40ca766a4", + "result": { + "calls": [ + { + "from": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe", + "input": "0x", + "to": "0x000000000000000000000000000000000000dEaD", + "type": "SELFDESTRUCT", + "value": "0x4d87094125a369d9bd5" + } + ], + "from": "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb", + "gas": "0x10738", + "gasUsed": "0x7533", + "input": "0x63e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c5", + "output": "0x", + "to": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe", + "type": "CALL", + "value": "0x0" + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/simple.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/simple.json new file mode 100644 index 00000000..b4643212 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/simple.json @@ -0,0 +1,78 @@ +{ + "context": { + "difficulty": "3502894804", + "gasLimit": "4722976", + "miner": "0x1585936b53834b021f68cc13eeefdec2efc8e724", + "number": "2289806", + "timestamp": "1513601314" + }, + "genesis": { + "alloc": { + "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5": { + "balance": "0x0", + "code": "0x", + "nonce": "22", + "storage": {} + }, + "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe": { + "balance": "0x4d87094125a369d9bd5", + "code": "0x606060405236156100935763ffffffff60e060020a60003504166311ee8382811461009c57806313af4035146100be5780631f5e8f4c146100ee57806324daddc5146101125780634921a91a1461013b57806363e4bff414610157578063764978f91461017f578063893d20e8146101a1578063ba40aaa1146101cd578063cebc9a82146101f4578063e177246e14610216575b61009a5b5b565b005b34156100a457fe5b6100ac61023d565b60408051918252519081900360200190f35b34156100c657fe5b6100da600160a060020a0360043516610244565b604080519115158252519081900360200190f35b34156100f657fe5b6100da610307565b604080519115158252519081900360200190f35b341561011a57fe5b6100da6004351515610318565b604080519115158252519081900360200190f35b6100da6103d6565b604080519115158252519081900360200190f35b6100da600160a060020a0360043516610420565b604080519115158252519081900360200190f35b341561018757fe5b6100ac61046c565b60408051918252519081900360200190f35b34156101a957fe5b6101b1610473565b60408051600160a060020a039092168252519081900360200190f35b34156101d557fe5b6100da600435610483565b604080519115158252519081900360200190f35b34156101fc57fe5b6100ac61050d565b60408051918252519081900360200190f35b341561021e57fe5b6100da600435610514565b604080519115158252519081900360200190f35b6003545b90565b60006000610250610473565b600160a060020a031633600160a060020a03161415156102705760006000fd5b600160a060020a03831615156102865760006000fd5b50600054600160a060020a0390811690831681146102fb57604051600160a060020a0380851691908316907ffcf23a92150d56e85e3a3d33b357493246e55783095eb6a733eb8439ffc752c890600090a360008054600160a060020a031916600160a060020a03851617905560019150610300565b600091505b5b50919050565b60005460a060020a900460ff165b90565b60006000610324610473565b600160a060020a031633600160a060020a03161415156103445760006000fd5b5060005460a060020a900460ff16801515831515146102fb576000546040805160a060020a90920460ff1615158252841515602083015280517fe6cd46a119083b86efc6884b970bfa30c1708f53ba57b86716f15b2f4551a9539281900390910190a16000805460a060020a60ff02191660a060020a8515150217905560019150610300565b600091505b5b50919050565b60006103e0610307565b801561040557506103ef610473565b600160a060020a031633600160a060020a031614155b156104105760006000fd5b610419336105a0565b90505b5b90565b600061042a610307565b801561044f5750610439610473565b600160a060020a031633600160a060020a031614155b1561045a5760006000fd5b610463826105a0565b90505b5b919050565b6001545b90565b600054600160a060020a03165b90565b6000600061048f610473565b600160a060020a031633600160a060020a03161415156104af5760006000fd5b506001548281146102fb57604080518281526020810185905281517f79a3746dde45672c9e8ab3644b8bb9c399a103da2dc94b56ba09777330a83509929181900390910190a160018381559150610300565b600091505b5b50919050565b6002545b90565b60006000610520610473565b600160a060020a031633600160a060020a03161415156105405760006000fd5b506002548281146102fb57604080518281526020810185905281517ff6991a728965fedd6e927fdf16bdad42d8995970b4b31b8a2bf88767516e2494929181900390910190a1600283905560019150610300565b600091505b5b50919050565b60006000426105ad61023d565b116102fb576105c46105bd61050d565b4201610652565b6105cc61046c565b604051909150600160a060020a038416908290600081818185876187965a03f1925050501561063d57604080518281529051600160a060020a038516917f9bca65ce52fdef8a470977b51f247a2295123a4807dfa9e502edf0d30722da3b919081900360200190a260019150610300565b6102fb42610652565b5b600091505b50919050565b60038190555b505600a165627a7a72305820f3c973c8b7ed1f62000b6701bd5b708469e19d0f1d73fde378a56c07fd0b19090029", + "nonce": "1", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000000": "0x000000000000000000000001b436ba50d378d4bbc8660d312a13df6af6e89dfb", + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x00000000000000000000000000000000000000000000000006f05b59d3b20000", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x000000000000000000000000000000000000000000000000000000000000003c", + "0x0000000000000000000000000000000000000000000000000000000000000003": "0x000000000000000000000000000000000000000000000000000000005a37b834" + } + }, + "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb": { + "balance": "0x1780d77678137ac1b775", + "code": "0x", + "nonce": "29072", + "storage": {} + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "3509749784", + "extraData": "0x4554482e45544846414e532e4f52472d4641313738394444", + "gasLimit": "4727564", + "hash": "0x609948ac3bd3c00b7736b933248891d6c901ee28f066241bddb28f4e00a9f440", + "miner": "0xbbf5029fd710d227630c8b7d338051b8e76d50b3", + "mixHash": "0xb131e4507c93c7377de00e7c271bf409ec7492767142ff0f45c882f8068c2ada", + "nonce": "0x4eb12e19c16d43da", + "number": "2289805", + "stateRoot": "0xc7f10f352bff82fac3c2999d3085093d12652e19c7fd32591de49dc5d91b4f1f", + "timestamp": "1513601261", + "totalDifficulty": "7143276353481064" + }, + "input": "0xf88b8271908506fc23ac0083015f90943b873a919aa0512d5a0f09e6dcceaa4a6727fafe80a463e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c52aa0bdce0b59e8761854e857fe64015f06dd08a4fbb7624f6094893a79a72e6ad6bea01d9dde033cff7bb235a3163f348a6d7ab8d6b52bc0963a95b91612e40ca766a4", + "result": { + "calls": [ + { + "from": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe", + "input": "0x", + "to": "0x0024f658a46fbb89d8ac105e98d7ac7cbbaf27c5", + "type": "CALL", + "value": "0x6f05b59d3b20000" + } + ], + "from": "0xb436ba50d378d4bbc8660d312a13df6af6e89dfb", + "gas": "0x10738", + "gasUsed": "0x3ef9", + "input": "0x63e4bff40000000000000000000000000024f658a46fbb89d8ac105e98d7ac7cbbaf27c5", + "output": "0x0000000000000000000000000000000000000000000000000000000000000001", + "to": "0x3b873a919aa0512d5a0f09e6dcceaa4a6727fafe", + "type": "CALL", + "value": "0x0" + } +} diff --git a/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/throw.json b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/throw.json new file mode 100644 index 00000000..09cf4497 --- /dev/null +++ b/coreth/eth/tracers/internal/tracetest/testdata/call_tracer_legacy/throw.json @@ -0,0 +1,62 @@ +{ + "context": { + "difficulty": "117009631", + "gasLimit": "4712388", + "miner": "0x294e5d6c39a36ce38af1dca70c1060f78dee8070", + "number": "25009", + "timestamp": "1479891666" + }, + "genesis": { + "alloc": { + "0x70c9217d814985faef62b124420f8dfbddd96433": { + "balance": "0x4ecd70668f5d854a", + "code": "0x", + "nonce": "1638", + "storage": {} + }, + "0xc212e03b9e060e36facad5fd8f4435412ca22e6b": { + "balance": "0x0", + "code": "0x606060405236156101745760e060020a600035046302d05d3f811461017c57806304a7fdbc1461018e5780630e90f957146101fb5780630fb5a6b41461021257806314baa1b61461021b57806317fc45e21461023a5780632b096926146102435780632e94420f1461025b578063325a19f11461026457806336da44681461026d5780633f81a2c01461027f5780633fc306821461029757806345ecd3d7146102d45780634665096d146102dd5780634e71d92d146102e657806351a34eb8146103085780636111bb951461032d5780636f265b93146103445780637e9014e11461034d57806390ba009114610360578063927df5e014610393578063a7f437791461046c578063ad8f50081461046e578063bc6d909414610477578063bdec3ad114610557578063c19d93fb1461059a578063c9503fe2146105ad578063e0a73a93146105b6578063ea71b02d146105bf578063ea8a1af0146105d1578063ee4a96f9146105f3578063f1ff78a01461065c575b61046c610002565b610665600054600160a060020a031681565b6040805160c081810190925261046c9160049160c4918390600690839083908082843760408051808301909152929750909561018495509193509091908390839080828437509095505050505050600554600090600160a060020a0390811633909116146106a857610002565b61068260015460a060020a900460ff166000145b90565b61069660085481565b61046c600435600154600160a060020a03166000141561072157610002565b610696600d5481565b610696600435600f8160068110156100025750015481565b61069660045481565b61069660035481565b610665600554600160a060020a031681565b61069660043560158160068110156100025750015481565b6106966004355b600b54600f5460009160028202808203928083039290810191018386101561078357601054840186900394505b50505050919050565b61069660025481565b61069660095481565b61046c600554600090600160a060020a03908116339091161461085857610002565b61046c600435600554600090600160a060020a03908116339091161461092e57610002565b6106826001805460a060020a900460ff161461020f565b610696600b5481565b61068260075460a060020a900460ff1681565b6106966004355b600b54601554600091600282028082039280830392908101910183861015610a6c5760165494506102cb565b61046c6004356024356044356040805160015460e360020a631c2d8fb302825260b260020a691858d8dbdd5b9d18dd1b02600483015291516000928392600160a060020a03919091169163e16c7d9891602481810192602092909190829003018187876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663c4b0c96a336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610b4657610002565b005b610696600a5481565b61046c60006000600060006000600160009054906101000a9004600160a060020a0316600160a060020a031663e16c7d986040518160e060020a028152600401808060b260020a691858d8dbdd5b9d18dd1b0281526020015060200190506020604051808303816000876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663c4b0c96a336040518260e060020a0281526004018082600160a060020a031681526020019150506020604051808303816000876161da5a03f1156100025750506040515115159050610f1757610002565b61046c5b60015b60058160ff16101561071e57600f6001820160ff166006811015610002578101549060ff83166006811015610002570154101561129057610002565b61069660015460a060020a900460ff1681565b61069660065481565b610696600c5481565b610665600754600160a060020a031681565b61046c600554600090600160a060020a0390811633909116146112c857610002565b6040805160c081810190925261046c9160049160c4918390600690839083908082843760408051808301909152929750909561018495509193509091908390839080828437509095505050505050600154600090600160a060020a03168114156113fb57610002565b610696600e5481565b60408051600160a060020a03929092168252519081900360200190f35b604080519115158252519081900360200190f35b60408051918252519081900360200190f35b5060005b60068160ff16101561070857828160ff166006811015610002576020020151600f60ff831660068110156100025701558160ff82166006811015610002576020020151601560ff831660068110156100025701556001016106ac565b61071061055b565b505050565b600e8054820190555b50565b6040805160015460e060020a6313bc6d4b02825233600160a060020a03908116600484015292519216916313bc6d4b9160248181019260209290919082900301816000876161da5a03f115610002575050604051511515905061071557610002565b83861015801561079257508286105b156107b457600f546010546011548689039082030291909104900394506102cb565b8286101580156107c55750600b5486105b156107e757600f546011546012548589039082030291909104900394506102cb565b600b5486108015906107f857508186105b1561081d57600b54600f546012546013549289039281039290920204900394506102cb565b81861015801561082c57508086105b1561084e57600f546013546014548489039082030291909104900394506102cb565b60145494506102cb565b60015460a060020a900460ff1660001461087157610002565b600254600a01431161088257610002565b6040805160015460e360020a631c2d8fb302825260a860020a6a636f6e74726163746170690260048301529151600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750505060405180519060200150905080600160a060020a031663771d50e16040518160e060020a0281526004018090506000604051808303816000876161da5a03f1156100025750505050565b60015460a060020a900460ff1660001461094757610002565b600254600a01431161095857610002565b6040805160015460e360020a631c2d8fb302825260a860020a6a636f6e74726163746170690260048301529151600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180517f51a34eb8000000000000000000000000000000000000000000000000000000008252600482018690529151919350600160a060020a03841692506351a34eb8916024808301926000929190829003018183876161da5a03f11561000257505050600b8290554360025560408051838152905130600160a060020a0316917fa609f6bd4ad0b4f419ddad4ac9f0d02c2b9295c5e6891469055cf73c2b568fff919081900360200190a25050565b838610158015610a7b57508286105b15610a9d576015546016546017548689039082900302919091040194506102cb565b828610158015610aae5750600b5486105b15610ad0576015546017546018548589039082900302919091040194506102cb565b600b548610801590610ae157508186105b15610b0657600b546015546018546019549289039281900392909202040194506102cb565b818610158015610b1557508086105b15610b3757601554601954601a548489039082900302919091040194506102cb565b601a54860181900394506102cb565b60015460a060020a900460ff16600014610b5f57610002565b6001805460a060020a60ff02191660a060020a17908190556040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180516004805460e260020a633e4baddd028452908301529151919450600160a060020a038516925063f92eb77491602482810192602092919082900301816000876161da5a03f115610002575050604080518051600a556005547ffebf661200000000000000000000000000000000000000000000000000000000825233600160a060020a03908116600484015216602482015260448101879052905163febf661291606480820192600092909190829003018183876161da5a03f115610002575050508215610cc7576007805473ffffffffffffffffffffffffffffffffffffffff191633179055610dbb565b6040805160055460065460e060020a63599efa6b028352600160a060020a039182166004840152602483015291519184169163599efa6b91604481810192600092909190829003018183876161da5a03f115610002575050604080516006547f56ccb6f000000000000000000000000000000000000000000000000000000000825233600160a060020a03166004830152602482015290516356ccb6f091604480820192600092909190829003018183876161da5a03f115610002575050600580546007805473ffffffffffffffffffffffffffffffffffffffff19908116600160a060020a038416179091551633179055505b6007805460a060020a60ff02191660a060020a87810291909117918290556008544301600955900460ff1615610df757600a54610e039061029e565b600a54610e0b90610367565b600c55610e0f565b600c555b600c54670de0b6b3a7640000850204600d55600754600554604080517f759297bb000000000000000000000000000000000000000000000000000000008152600160a060020a039384166004820152918316602483015260448201879052519184169163759297bb91606481810192600092909190829003018183876161da5a03f11561000257505060408051600754600a54600d54600554600c5460a060020a850460ff161515865260208601929092528486019290925260608401529251600160a060020a0391821694509281169230909116917f3b3d1986083d191be01d28623dc19604728e29ae28bdb9ba52757fdee1a18de2919081900360800190a45050505050565b600954431015610f2657610002565b6001805460a060020a900460ff1614610f3e57610002565b6001805460a060020a60ff0219167402000000000000000000000000000000000000000017908190556040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f1156100025750506040805180516004805460e260020a633e4baddd028452908301529151919750600160a060020a038816925063f92eb77491602482810192602092919082900301816000876161da5a03f115610002575050604051516007549095506000945060a060020a900460ff1615905061105c57600a5484111561105757600a54600d54670de0b6b3a7640000918603020492505b61107e565b600a5484101561107e57600a54600d54670de0b6b3a764000091869003020492505b60065483111561108e5760065492505b6006548390039150600083111561111857604080516005546007547f5928d37f000000000000000000000000000000000000000000000000000000008352600160a060020a0391821660048401528116602483015260448201869052915191871691635928d37f91606481810192600092909190829003018183876161da5a03f115610002575050505b600082111561117a576040805160055460e060020a63599efa6b028252600160a060020a0390811660048301526024820185905291519187169163599efa6b91604481810192600092909190829003018183876161da5a03f115610002575050505b6040805185815260208101849052808201859052905130600160a060020a0316917f89e690b1d5aaae14f3e85f108dc92d9ab3763a58d45aed8b59daedbbae8fe794919081900360600190a260008311156112285784600160a060020a0316634cc927d785336040518360e060020a0281526004018083815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f11561000257505050611282565b84600160a060020a0316634cc927d7600a60005054336040518360e060020a0281526004018083815260200182600160a060020a03168152602001925050506000604051808303816000876161da5a03f115610002575050505b600054600160a060020a0316ff5b60156001820160ff166006811015610002578101549060ff8316600681101561000257015411156112c057610002565b60010161055e565b60015460a060020a900460ff166000146112e157610002565b600254600a0143116112f257610002565b6001546040805160e360020a631c2d8fb302815260a860020a6a636f6e74726163746170690260048201529051600160a060020a03929092169163e16c7d989160248181019260209290919082900301816000876161da5a03f11561000257505060408051805160055460065460e060020a63599efa6b028452600160a060020a03918216600485015260248401529251909450918416925063599efa6b916044808301926000929190829003018183876161da5a03f1156100025750505080600160a060020a0316632b68bb2d6040518160e060020a0281526004018090506000604051808303816000876161da5a03f115610002575050600054600160a060020a03169050ff5b6001546040805160e060020a6313bc6d4b02815233600160a060020a039081166004830152915191909216916313bc6d4b91602480830192602092919082900301816000876161da5a03f11561000257505060405151151590506106a85761000256", + "nonce": "1", + "storage": { + "0x0000000000000000000000000000000000000000000000000000000000000001": "0x0000000000000000000000002cccf5e0538493c235d1c5ef6580f77d99e91396", + "0x0000000000000000000000000000000000000000000000000000000000000002": "0x00000000000000000000000000000000000000000000000000000000000061a9", + "0x0000000000000000000000000000000000000000000000000000000000000005": "0x00000000000000000000000070c9217d814985faef62b124420f8dfbddd96433" + } + } + }, + "config": { + "byzantiumBlock": 1700000, + "chainId": 3, + "daoForkSupport": true, + "eip150Block": 0, + "eip150Hash": "0x41941023680923e0fe4d74a34bdac8141f2540e3ae90623718e47d66d1ca4a2d", + "eip155Block": 10, + "eip158Block": 10, + "ethash": {}, + "homesteadBlock": 0 + }, + "difficulty": "117066792", + "extraData": "0xd783010502846765746887676f312e372e33856c696e7578", + "gasLimit": "4712388", + "hash": "0xe23e8d4562a1045b70cbc99fefb20c101a8f0fc8559a80d65fea8896e2f1d46e", + "miner": "0x71842f946b98800fe6feb49f0ae4e253259031c9", + "mixHash": "0x0aada9d6e93dd4db0d09c0488dc0a048fca2ccdc1f3fc7b83ba2a8d393a3a4ff", + "nonce": "0x70849d5838dee2e9", + "number": "25008", + "stateRoot": "0x1e01d2161794768c5b917069e73d86e8dca80cd7f3168c0597de420ab93a3b7b", + "timestamp": "1479891641", + "totalDifficulty": "1896347038589" + }, + "input": "0xf88b8206668504a817c8008303d09094c212e03b9e060e36facad5fd8f4435412ca22e6b80a451a34eb8000000000000000000000000000000000000000000000027fad02094277c000029a0692a3b4e7b2842f8dd7832e712c21e09f451f416c8976d5b8d02e8c0c2b4bea9a07645e90fc421b63dd755767fd93d3c03b4ec0c4d8fafa059558d08cf11d59750", + "result": { + "error": "invalid jump destination", + "from": "0x70c9217d814985faef62b124420f8dfbddd96433", + "gas": "0x37b38", + "gasUsed": "0x37b38", + "input": "0x51a34eb8000000000000000000000000000000000000000000000027fad02094277c0000", + "to": "0xc212e03b9e060e36facad5fd8f4435412ca22e6b", + "type": "CALL", + "value": "0x0" + } +} diff --git a/coreth/eth/tracers/js/bigint.go b/coreth/eth/tracers/js/bigint.go new file mode 100644 index 00000000..aa3f44e2 --- /dev/null +++ b/coreth/eth/tracers/js/bigint.go @@ -0,0 +1,30 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package js + +// bigIntegerJS is the minified version of https://github.com/peterolson/BigInteger.js. +const bigIntegerJS = `var bigInt=function(undefined){"use strict";var BASE=1e7,LOG_BASE=7,MAX_INT=9007199254740992,MAX_INT_ARR=smallToArray(MAX_INT),LOG_MAX_INT=Math.log(MAX_INT);function Integer(v,radix){if(typeof v==="undefined")return Integer[0];if(typeof radix!=="undefined")return+radix===10?parseValue(v):parseBase(v,radix);return parseValue(v)}function BigInteger(value,sign){this.value=value;this.sign=sign;this.isSmall=false}BigInteger.prototype=Object.create(Integer.prototype);function SmallInteger(value){this.value=value;this.sign=value<0;this.isSmall=true}SmallInteger.prototype=Object.create(Integer.prototype);function isPrecise(n){return-MAX_INT0)return Math.floor(n);return Math.ceil(n)}function add(a,b){var l_a=a.length,l_b=b.length,r=new Array(l_a),carry=0,base=BASE,sum,i;for(i=0;i=base?1:0;r[i]=sum-carry*base}while(i0)r.push(carry);return r}function addAny(a,b){if(a.length>=b.length)return add(a,b);return add(b,a)}function addSmall(a,carry){var l=a.length,r=new Array(l),base=BASE,sum,i;for(i=0;i0){r[i++]=carry%base;carry=Math.floor(carry/base)}return r}BigInteger.prototype.add=function(v){var n=parseValue(v);if(this.sign!==n.sign){return this.subtract(n.negate())}var a=this.value,b=n.value;if(n.isSmall){return new BigInteger(addSmall(a,Math.abs(b)),this.sign)}return new BigInteger(addAny(a,b),this.sign)};BigInteger.prototype.plus=BigInteger.prototype.add;SmallInteger.prototype.add=function(v){var n=parseValue(v);var a=this.value;if(a<0!==n.sign){return this.subtract(n.negate())}var b=n.value;if(n.isSmall){if(isPrecise(a+b))return new SmallInteger(a+b);b=smallToArray(Math.abs(b))}return new BigInteger(addSmall(b,Math.abs(a)),a<0)};SmallInteger.prototype.plus=SmallInteger.prototype.add;function subtract(a,b){var a_l=a.length,b_l=b.length,r=new Array(a_l),borrow=0,base=BASE,i,difference;for(i=0;i=0){value=subtract(a,b)}else{value=subtract(b,a);sign=!sign}value=arrayToSmall(value);if(typeof value==="number"){if(sign)value=-value;return new SmallInteger(value)}return new BigInteger(value,sign)}function subtractSmall(a,b,sign){var l=a.length,r=new Array(l),carry=-b,base=BASE,i,difference;for(i=0;i=0)};SmallInteger.prototype.minus=SmallInteger.prototype.subtract;BigInteger.prototype.negate=function(){return new BigInteger(this.value,!this.sign)};SmallInteger.prototype.negate=function(){var sign=this.sign;var small=new SmallInteger(-this.value);small.sign=!sign;return small};BigInteger.prototype.abs=function(){return new BigInteger(this.value,false)};SmallInteger.prototype.abs=function(){return new SmallInteger(Math.abs(this.value))};function multiplyLong(a,b){var a_l=a.length,b_l=b.length,l=a_l+b_l,r=createArray(l),base=BASE,product,carry,i,a_i,b_j;for(i=0;i0){r[i++]=carry%base;carry=Math.floor(carry/base)}return r}function shiftLeft(x,n){var r=[];while(n-- >0)r.push(0);return r.concat(x)}function multiplyKaratsuba(x,y){var n=Math.max(x.length,y.length);if(n<=30)return multiplyLong(x,y);n=Math.ceil(n/2);var b=x.slice(n),a=x.slice(0,n),d=y.slice(n),c=y.slice(0,n);var ac=multiplyKaratsuba(a,c),bd=multiplyKaratsuba(b,d),abcd=multiplyKaratsuba(addAny(a,b),addAny(c,d));var product=addAny(addAny(ac,shiftLeft(subtract(subtract(abcd,ac),bd),n)),shiftLeft(bd,2*n));trim(product);return product}function useKaratsuba(l1,l2){return-.012*l1-.012*l2+15e-6*l1*l2>0}BigInteger.prototype.multiply=function(v){var n=parseValue(v),a=this.value,b=n.value,sign=this.sign!==n.sign,abs;if(n.isSmall){if(b===0)return Integer[0];if(b===1)return this;if(b===-1)return this.negate();abs=Math.abs(b);if(abs=0;shift--){quotientDigit=base-1;if(remainder[shift+b_l]!==divisorMostSignificantDigit){quotientDigit=Math.floor((remainder[shift+b_l]*base+remainder[shift+b_l-1])/divisorMostSignificantDigit)}carry=0;borrow=0;l=divisor.length;for(i=0;ib_l){highx=(highx+1)*base}guess=Math.ceil(highx/highy);do{check=multiplySmall(b,guess);if(compareAbs(check,part)<=0)break;guess--}while(guess);result.push(guess);part=subtract(part,check)}result.reverse();return[arrayToSmall(result),arrayToSmall(part)]}function divModSmall(value,lambda){var length=value.length,quotient=createArray(length),base=BASE,i,q,remainder,divisor;remainder=0;for(i=length-1;i>=0;--i){divisor=remainder*base+value[i];q=truncate(divisor/lambda);remainder=divisor-q*lambda;quotient[i]=q|0}return[quotient,remainder|0]}function divModAny(self,v){var value,n=parseValue(v);var a=self.value,b=n.value;var quotient;if(b===0)throw new Error("Cannot divide by zero");if(self.isSmall){if(n.isSmall){return[new SmallInteger(truncate(a/b)),new SmallInteger(a%b)]}return[Integer[0],self]}if(n.isSmall){if(b===1)return[self,Integer[0]];if(b==-1)return[self.negate(),Integer[0]];var abs=Math.abs(b);if(absb.length?1:-1}for(var i=a.length-1;i>=0;i--){if(a[i]!==b[i])return a[i]>b[i]?1:-1}return 0}BigInteger.prototype.compareAbs=function(v){var n=parseValue(v),a=this.value,b=n.value;if(n.isSmall)return 1;return compareAbs(a,b)};SmallInteger.prototype.compareAbs=function(v){var n=parseValue(v),a=Math.abs(this.value),b=n.value;if(n.isSmall){b=Math.abs(b);return a===b?0:a>b?1:-1}return-1};BigInteger.prototype.compare=function(v){if(v===Infinity){return-1}if(v===-Infinity){return 1}var n=parseValue(v),a=this.value,b=n.value;if(this.sign!==n.sign){return n.sign?1:-1}if(n.isSmall){return this.sign?-1:1}return compareAbs(a,b)*(this.sign?-1:1)};BigInteger.prototype.compareTo=BigInteger.prototype.compare;SmallInteger.prototype.compare=function(v){if(v===Infinity){return-1}if(v===-Infinity){return 1}var n=parseValue(v),a=this.value,b=n.value;if(n.isSmall){return a==b?0:a>b?1:-1}if(a<0!==n.sign){return a<0?-1:1}return a<0?1:-1};SmallInteger.prototype.compareTo=SmallInteger.prototype.compare;BigInteger.prototype.equals=function(v){return this.compare(v)===0};SmallInteger.prototype.eq=SmallInteger.prototype.equals=BigInteger.prototype.eq=BigInteger.prototype.equals;BigInteger.prototype.notEquals=function(v){return this.compare(v)!==0};SmallInteger.prototype.neq=SmallInteger.prototype.notEquals=BigInteger.prototype.neq=BigInteger.prototype.notEquals;BigInteger.prototype.greater=function(v){return this.compare(v)>0};SmallInteger.prototype.gt=SmallInteger.prototype.greater=BigInteger.prototype.gt=BigInteger.prototype.greater;BigInteger.prototype.lesser=function(v){return this.compare(v)<0};SmallInteger.prototype.lt=SmallInteger.prototype.lesser=BigInteger.prototype.lt=BigInteger.prototype.lesser;BigInteger.prototype.greaterOrEquals=function(v){return this.compare(v)>=0};SmallInteger.prototype.geq=SmallInteger.prototype.greaterOrEquals=BigInteger.prototype.geq=BigInteger.prototype.greaterOrEquals;BigInteger.prototype.lesserOrEquals=function(v){return this.compare(v)<=0};SmallInteger.prototype.leq=SmallInteger.prototype.lesserOrEquals=BigInteger.prototype.leq=BigInteger.prototype.lesserOrEquals;BigInteger.prototype.isEven=function(){return(this.value[0]&1)===0};SmallInteger.prototype.isEven=function(){return(this.value&1)===0};BigInteger.prototype.isOdd=function(){return(this.value[0]&1)===1};SmallInteger.prototype.isOdd=function(){return(this.value&1)===1};BigInteger.prototype.isPositive=function(){return!this.sign};SmallInteger.prototype.isPositive=function(){return this.value>0};BigInteger.prototype.isNegative=function(){return this.sign};SmallInteger.prototype.isNegative=function(){return this.value<0};BigInteger.prototype.isUnit=function(){return false};SmallInteger.prototype.isUnit=function(){return Math.abs(this.value)===1};BigInteger.prototype.isZero=function(){return false};SmallInteger.prototype.isZero=function(){return this.value===0};BigInteger.prototype.isDivisibleBy=function(v){var n=parseValue(v);var value=n.value;if(value===0)return false;if(value===1)return true;if(value===2)return this.isEven();return this.mod(n).equals(Integer[0])};SmallInteger.prototype.isDivisibleBy=BigInteger.prototype.isDivisibleBy;function isBasicPrime(v){var n=v.abs();if(n.isUnit())return false;if(n.equals(2)||n.equals(3)||n.equals(5))return true;if(n.isEven()||n.isDivisibleBy(3)||n.isDivisibleBy(5))return false;if(n.lesser(25))return true}BigInteger.prototype.isPrime=function(){var isPrime=isBasicPrime(this);if(isPrime!==undefined)return isPrime;var n=this.abs(),nPrev=n.prev();var a=[2,3,5,7,11,13,17,19],b=nPrev,d,t,i,x;while(b.isEven())b=b.divide(2);for(i=0;i-MAX_INT)return new SmallInteger(value-1);return new BigInteger(MAX_INT_ARR,true)};var powersOfTwo=[1];while(2*powersOfTwo[powersOfTwo.length-1]<=BASE)powersOfTwo.push(2*powersOfTwo[powersOfTwo.length-1]);var powers2Length=powersOfTwo.length,highestPower2=powersOfTwo[powers2Length-1];function shift_isSmall(n){return(typeof n==="number"||typeof n==="string")&&+Math.abs(n)<=BASE||n instanceof BigInteger&&n.value.length<=1}BigInteger.prototype.shiftLeft=function(n){if(!shift_isSmall(n)){throw new Error(String(n)+" is too large for shifting.")}n=+n;if(n<0)return this.shiftRight(-n);var result=this;while(n>=powers2Length){result=result.multiply(highestPower2);n-=powers2Length-1}return result.multiply(powersOfTwo[n])};SmallInteger.prototype.shiftLeft=BigInteger.prototype.shiftLeft;BigInteger.prototype.shiftRight=function(n){var remQuo;if(!shift_isSmall(n)){throw new Error(String(n)+" is too large for shifting.")}n=+n;if(n<0)return this.shiftLeft(-n);var result=this;while(n>=powers2Length){if(result.isZero())return result;remQuo=divModAny(result,highestPower2);result=remQuo[1].isNegative()?remQuo[0].prev():remQuo[0];n-=powers2Length-1}remQuo=divModAny(result,powersOfTwo[n]);return remQuo[1].isNegative()?remQuo[0].prev():remQuo[0]};SmallInteger.prototype.shiftRight=BigInteger.prototype.shiftRight;function bitwise(x,y,fn){y=parseValue(y);var xSign=x.isNegative(),ySign=y.isNegative();var xRem=xSign?x.not():x,yRem=ySign?y.not():y;var xDigit=0,yDigit=0;var xDivMod=null,yDivMod=null;var result=[];while(!xRem.isZero()||!yRem.isZero()){xDivMod=divModAny(xRem,highestPower2);xDigit=xDivMod[1].toJSNumber();if(xSign){xDigit=highestPower2-1-xDigit}yDivMod=divModAny(yRem,highestPower2);yDigit=yDivMod[1].toJSNumber();if(ySign){yDigit=highestPower2-1-yDigit}xRem=xDivMod[0];yRem=yDivMod[0];result.push(fn(xDigit,yDigit))}var sum=fn(xSign?1:0,ySign?1:0)!==0?bigInt(-1):bigInt(0);for(var i=result.length-1;i>=0;i-=1){sum=sum.multiply(highestPower2).add(bigInt(result[i]))}return sum}BigInteger.prototype.not=function(){return this.negate().prev()};SmallInteger.prototype.not=BigInteger.prototype.not;BigInteger.prototype.and=function(n){return bitwise(this,n,function(a,b){return a&b})};SmallInteger.prototype.and=BigInteger.prototype.and;BigInteger.prototype.or=function(n){return bitwise(this,n,function(a,b){return a|b})};SmallInteger.prototype.or=BigInteger.prototype.or;BigInteger.prototype.xor=function(n){return bitwise(this,n,function(a,b){return a^b})};SmallInteger.prototype.xor=BigInteger.prototype.xor;var LOBMASK_I=1<<30,LOBMASK_BI=(BASE&-BASE)*(BASE&-BASE)|LOBMASK_I;function roughLOB(n){var v=n.value,x=typeof v==="number"?v|LOBMASK_I:v[0]+v[1]*BASE|LOBMASK_BI;return x&-x}function max(a,b){a=parseValue(a);b=parseValue(b);return a.greater(b)?a:b}function min(a,b){a=parseValue(a);b=parseValue(b);return a.lesser(b)?a:b}function gcd(a,b){a=parseValue(a).abs();b=parseValue(b).abs();if(a.equals(b))return a;if(a.isZero())return b;if(b.isZero())return a;var c=Integer[1],d,t;while(a.isEven()&&b.isEven()){d=Math.min(roughLOB(a),roughLOB(b));a=a.divide(d);b=b.divide(d);c=c.multiply(d)}while(a.isEven()){a=a.divide(roughLOB(a))}do{while(b.isEven()){b=b.divide(roughLOB(b))}if(a.greater(b)){t=b;b=a;a=t}b=b.subtract(a)}while(!b.isZero());return c.isUnit()?a:a.multiply(c)}function lcm(a,b){a=parseValue(a).abs();b=parseValue(b).abs();return a.divide(gcd(a,b)).multiply(b)}function randBetween(a,b){a=parseValue(a);b=parseValue(b);var low=min(a,b),high=max(a,b);var range=high.subtract(low).add(1);if(range.isSmall)return low.add(Math.floor(Math.random()*range));var length=range.value.length-1;var result=[],restricted=true;for(var i=length;i>=0;i--){var top=restricted?range.value[i]:BASE;var digit=truncate(Math.random()*top);result.unshift(digit);if(digit=absBase){if(c==="1"&&absBase===1)continue;throw new Error(c+" is not a valid digit in base "+base+".")}else if(c.charCodeAt(0)-87>=absBase){throw new Error(c+" is not a valid digit in base "+base+".")}}}if(2<=base&&base<=36){if(length<=LOG_MAX_INT/Math.log(base)){var result=parseInt(text,base);if(isNaN(result)){throw new Error(c+" is not a valid digit in base "+base+".")}return new SmallInteger(parseInt(text,base))}}base=parseValue(base);var digits=[];var isNegative=text[0]==="-";for(i=isNegative?1:0;i");digits.push(parseValue(text.slice(start+1,i)))}else throw new Error(c+" is not a valid character")}return parseBaseFromArray(digits,base,isNegative)};function parseBaseFromArray(digits,base,isNegative){var val=Integer[0],pow=Integer[1],i;for(i=digits.length-1;i>=0;i--){val=val.add(digits[i].times(pow));pow=pow.times(base)}return isNegative?val.negate():val}function stringify(digit){var v=digit.value;if(typeof v==="number")v=[v];if(v.length===1&&v[0]<=35){return"0123456789abcdefghijklmnopqrstuvwxyz".charAt(v[0])}return"<"+v+">"}function toBase(n,base){base=bigInt(base);if(base.isZero()){if(n.isZero())return"0";throw new Error("Cannot convert nonzero numbers to base 0.")}if(base.equals(-1)){if(n.isZero())return"0";if(n.isNegative())return new Array(1-n).join("10");return"1"+new Array(+n).join("01")}var minusSign="";if(n.isNegative()&&base.isPositive()){minusSign="-";n=n.abs()}if(base.equals(1)){if(n.isZero())return"0";return minusSign+new Array(+n+1).join(1)}var out=[];var left=n,divmod;while(left.isNegative()||left.compareAbs(base)>=0){divmod=left.divmod(base);left=divmod.quotient;var digit=divmod.remainder;if(digit.isNegative()){digit=base.minus(digit).abs();left=left.next()}out.push(stringify(digit))}out.push(stringify(left));return minusSign+out.reverse().join("")}BigInteger.prototype.toString=function(radix){if(radix===undefined)radix=10;if(radix!==10)return toBase(this,radix);var v=this.value,l=v.length,str=String(v[--l]),zeros="0000000",digit;while(--l>=0){digit=String(v[l]);str+=zeros.slice(digit.length)+digit}var sign=this.sign?"-":"";return sign+str};SmallInteger.prototype.toString=function(radix){if(radix===undefined)radix=10;if(radix!=10)return toBase(this,radix);return String(this.value)};BigInteger.prototype.toJSON=SmallInteger.prototype.toJSON=function(){return this.toString()};BigInteger.prototype.valueOf=function(){return+this.toString()};BigInteger.prototype.toJSNumber=BigInteger.prototype.valueOf;SmallInteger.prototype.valueOf=function(){return this.value};SmallInteger.prototype.toJSNumber=SmallInteger.prototype.valueOf;function parseStringValue(v){if(isPrecise(+v)){var x=+v;if(x===truncate(x))return new SmallInteger(x);throw"Invalid integer: "+v}var sign=v[0]==="-";if(sign)v=v.slice(1);var split=v.split(/e/i);if(split.length>2)throw new Error("Invalid integer: "+split.join("e"));if(split.length===2){var exp=split[1];if(exp[0]==="+")exp=exp.slice(1);exp=+exp;if(exp!==truncate(exp)||!isPrecise(exp))throw new Error("Invalid integer: "+exp+" is not a valid exponent.");var text=split[0];var decimalPlace=text.indexOf(".");if(decimalPlace>=0){exp-=text.length-decimalPlace-1;text=text.slice(0,decimalPlace)+text.slice(decimalPlace+1)}if(exp<0)throw new Error("Cannot include negative exponent part for integers");text+=new Array(exp+1).join("0");v=text}var isValid=/^([0-9][0-9]*)$/.test(v);if(!isValid)throw new Error("Invalid integer: "+v);var r=[],max=v.length,l=LOG_BASE,min=max-l;while(max>0){r.push(+v.slice(min,max));min-=l;if(min<0)min=0;max-=l}trim(r);return new BigInteger(r,sign)}function parseNumberValue(v){if(isPrecise(v)){if(v!==truncate(v))throw new Error(v+" is not an integer.");return new SmallInteger(v)}return parseStringValue(v.toString())}function parseValue(v){if(typeof v==="number"){return parseNumberValue(v)}if(typeof v==="string"){return parseStringValue(v)}return v}for(var i=0;i<1e3;i++){Integer[i]=new SmallInteger(i);if(i>0)Integer[-i]=new SmallInteger(-i)}Integer.one=Integer[1];Integer.zero=Integer[0];Integer.minusOne=Integer[-1];Integer.max=max;Integer.min=min;Integer.gcd=gcd;Integer.lcm=lcm;Integer.isInstance=function(x){return x instanceof BigInteger||x instanceof SmallInteger};Integer.randBetween=randBetween;Integer.fromArray=function(digits,base,isNegative){return parseBaseFromArray(digits.map(parseValue),parseValue(base||10),isNegative)};return Integer}();if(typeof module!=="undefined"&&module.hasOwnProperty("exports")){module.exports=bigInt}if(typeof define==="function"&&define.amd){define("big-integer",[],function(){return bigInt})}; bigInt` diff --git a/coreth/eth/tracers/js/goja.go b/coreth/eth/tracers/js/goja.go new file mode 100644 index 00000000..4a55f06d --- /dev/null +++ b/coreth/eth/tracers/js/goja.go @@ -0,0 +1,957 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package js + +import ( + "encoding/json" + "errors" + "fmt" + "math/big" + "time" + + "github.com/dop251/goja" + + "github.com/ava-labs/coreth/core/vm" + "github.com/ava-labs/coreth/eth/tracers" + jsassets "github.com/ava-labs/coreth/eth/tracers/js/internal/tracers" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" +) + +var assetTracers = make(map[string]string) + +// init retrieves the JavaScript transaction tracers included in go-ethereum. +func init() { + var err error + assetTracers, err = jsassets.Load() + if err != nil { + panic(err) + } + tracers.RegisterLookup(true, newJsTracer) +} + +// bigIntProgram is compiled once and the exported function mostly invoked to convert +// hex strings into big ints. +var bigIntProgram = goja.MustCompile("bigInt", bigIntegerJS, false) + +type toBigFn = func(vm *goja.Runtime, val string) (goja.Value, error) +type toBufFn = func(vm *goja.Runtime, val []byte) (goja.Value, error) +type fromBufFn = func(vm *goja.Runtime, buf goja.Value, allowString bool) ([]byte, error) + +func toBuf(vm *goja.Runtime, bufType goja.Value, val []byte) (goja.Value, error) { + // bufType is usually Uint8Array. This is equivalent to `new Uint8Array(val)` in JS. + return vm.New(bufType, vm.ToValue(vm.NewArrayBuffer(val))) +} + +func fromBuf(vm *goja.Runtime, bufType goja.Value, buf goja.Value, allowString bool) ([]byte, error) { + obj := buf.ToObject(vm) + switch obj.ClassName() { + case "String": + if !allowString { + break + } + return common.FromHex(obj.String()), nil + + case "Array": + var b []byte + if err := vm.ExportTo(buf, &b); err != nil { + return nil, err + } + return b, nil + + case "Object": + if !obj.Get("constructor").SameAs(bufType) { + break + } + b := obj.Get("buffer").Export().(goja.ArrayBuffer).Bytes() + return b, nil + } + return nil, fmt.Errorf("invalid buffer type") +} + +// jsTracer is an implementation of the Tracer interface which evaluates +// JS functions on the relevant EVM hooks. It uses Goja as its JS engine. +type jsTracer struct { + vm *goja.Runtime + env *vm.EVM + toBig toBigFn // Converts a hex string into a JS bigint + toBuf toBufFn // Converts a []byte into a JS buffer + fromBuf fromBufFn // Converts an array, hex string or Uint8Array to a []byte + ctx map[string]goja.Value // KV-bag passed to JS in `result` + activePrecompiles []common.Address // List of active precompiles at current block + traceStep bool // True if tracer object exposes a `step()` method + traceFrame bool // True if tracer object exposes the `enter()` and `exit()` methods + gasLimit uint64 // Amount of gas bought for the whole tx + err error // Any error that should stop tracing + obj *goja.Object // Trace object + + // Methods exposed by tracer + result goja.Callable + fault goja.Callable + step goja.Callable + enter goja.Callable + exit goja.Callable + + // Underlying structs being passed into JS + log *steplog + frame *callframe + frameResult *callframeResult + + // Goja-wrapping of types prepared for JS consumption + logValue goja.Value + dbValue goja.Value + frameValue goja.Value + frameResultValue goja.Value +} + +// newJsTracer instantiates a new JS tracer instance. code is either +// the name of a built-in JS tracer or a Javascript snippet which +// evaluates to an expression returning an object with certain methods. +// The methods `result` and `fault` are required to be present. +// The methods `step`, `enter`, and `exit` are optional, but note that +// `enter` and `exit` always go together. +func newJsTracer(code string, ctx *tracers.Context, cfg json.RawMessage) (tracers.Tracer, error) { + if c, ok := assetTracers[code]; ok { + code = c + } + vm := goja.New() + // By default field names are exported to JS as is, i.e. capitalized. + vm.SetFieldNameMapper(goja.UncapFieldNameMapper()) + t := &jsTracer{ + vm: vm, + ctx: make(map[string]goja.Value), + } + if ctx == nil { + ctx = new(tracers.Context) + } + if ctx.BlockHash != (common.Hash{}) { + t.ctx["blockHash"] = vm.ToValue(ctx.BlockHash.Bytes()) + if ctx.TxHash != (common.Hash{}) { + t.ctx["txIndex"] = vm.ToValue(ctx.TxIndex) + t.ctx["txHash"] = vm.ToValue(ctx.TxHash.Bytes()) + } + } + + t.setTypeConverters() + t.setBuiltinFunctions() + ret, err := vm.RunString("(" + code + ")") + if err != nil { + return nil, err + } + // Check tracer's interface for required and optional methods. + obj := ret.ToObject(vm) + result, ok := goja.AssertFunction(obj.Get("result")) + if !ok { + return nil, errors.New("trace object must expose a function result()") + } + fault, ok := goja.AssertFunction(obj.Get("fault")) + if !ok { + return nil, errors.New("trace object must expose a function fault()") + } + step, ok := goja.AssertFunction(obj.Get("step")) + t.traceStep = ok + enter, hasEnter := goja.AssertFunction(obj.Get("enter")) + exit, hasExit := goja.AssertFunction(obj.Get("exit")) + if hasEnter != hasExit { + return nil, errors.New("trace object must expose either both or none of enter() and exit()") + } + t.traceFrame = hasEnter + t.obj = obj + t.step = step + t.enter = enter + t.exit = exit + t.result = result + t.fault = fault + + // Pass in config + if setup, ok := goja.AssertFunction(obj.Get("setup")); ok { + cfgStr := "{}" + if cfg != nil { + cfgStr = string(cfg) + } + if _, err := setup(obj, vm.ToValue(cfgStr)); err != nil { + return nil, err + } + } + // Setup objects carrying data to JS. These are created once and re-used. + t.log = &steplog{ + vm: vm, + op: &opObj{vm: vm}, + memory: &memoryObj{vm: vm, toBig: t.toBig, toBuf: t.toBuf}, + stack: &stackObj{vm: vm, toBig: t.toBig}, + contract: &contractObj{vm: vm, toBig: t.toBig, toBuf: t.toBuf}, + } + t.frame = &callframe{vm: vm, toBig: t.toBig, toBuf: t.toBuf} + t.frameResult = &callframeResult{vm: vm, toBuf: t.toBuf} + t.frameValue = t.frame.setupObject() + t.frameResultValue = t.frameResult.setupObject() + t.logValue = t.log.setupObject() + return t, nil +} + +// CaptureTxStart implements the Tracer interface and is invoked at the beginning of +// transaction processing. +func (t *jsTracer) CaptureTxStart(gasLimit uint64) { + t.gasLimit = gasLimit +} + +// CaptureTxStart implements the Tracer interface and is invoked at the end of +// transaction processing. +func (t *jsTracer) CaptureTxEnd(restGas uint64) {} + +// CaptureStart implements the Tracer interface to initialize the tracing operation. +func (t *jsTracer) CaptureStart(env *vm.EVM, from common.Address, to common.Address, create bool, input []byte, gas uint64, value *big.Int) { + t.env = env + db := &dbObj{db: env.StateDB, vm: t.vm, toBig: t.toBig, toBuf: t.toBuf, fromBuf: t.fromBuf} + t.dbValue = db.setupObject() + if create { + t.ctx["type"] = t.vm.ToValue("CREATE") + } else { + t.ctx["type"] = t.vm.ToValue("CALL") + } + t.ctx["from"] = t.vm.ToValue(from.Bytes()) + t.ctx["to"] = t.vm.ToValue(to.Bytes()) + t.ctx["input"] = t.vm.ToValue(input) + t.ctx["gas"] = t.vm.ToValue(gas) + t.ctx["gasPrice"] = t.vm.ToValue(env.TxContext.GasPrice) + valueBig, err := t.toBig(t.vm, value.String()) + if err != nil { + t.err = err + return + } + t.ctx["value"] = valueBig + t.ctx["block"] = t.vm.ToValue(env.Context.BlockNumber.Uint64()) + // Update list of precompiles based on current block + rules := env.ChainConfig().AvalancheRules(env.Context.BlockNumber, env.Context.Time) + t.activePrecompiles = vm.ActivePrecompiles(rules) + t.ctx["intrinsicGas"] = t.vm.ToValue(t.gasLimit - gas) +} + +// CaptureState implements the Tracer interface to trace a single step of VM execution. +func (t *jsTracer) CaptureState(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, rData []byte, depth int, err error) { + if !t.traceStep { + return + } + if t.err != nil { + return + } + + log := t.log + log.op.op = op + log.memory.memory = scope.Memory + log.stack.stack = scope.Stack + log.contract.contract = scope.Contract + log.pc = pc + log.gas = gas + log.cost = cost + log.refund = t.env.StateDB.GetRefund() + log.depth = depth + log.err = err + if _, err := t.step(t.obj, t.logValue, t.dbValue); err != nil { + t.onError("step", err) + } +} + +// CaptureFault implements the Tracer interface to trace an execution fault +func (t *jsTracer) CaptureFault(pc uint64, op vm.OpCode, gas, cost uint64, scope *vm.ScopeContext, depth int, err error) { + if t.err != nil { + return + } + // Other log fields have been already set as part of the last CaptureState. + t.log.err = err + if _, err := t.fault(t.obj, t.logValue, t.dbValue); err != nil { + t.onError("fault", err) + } +} + +// CaptureEnd is called after the call finishes to finalize the tracing. +func (t *jsTracer) CaptureEnd(output []byte, gasUsed uint64, duration time.Duration, err error) { + t.ctx["output"] = t.vm.ToValue(output) + t.ctx["time"] = t.vm.ToValue(duration.String()) + t.ctx["gasUsed"] = t.vm.ToValue(gasUsed) + if err != nil { + t.ctx["error"] = t.vm.ToValue(err.Error()) + } +} + +// CaptureEnter is called when EVM enters a new scope (via call, create or selfdestruct). +func (t *jsTracer) CaptureEnter(typ vm.OpCode, from common.Address, to common.Address, input []byte, gas uint64, value *big.Int) { + if !t.traceFrame { + return + } + if t.err != nil { + return + } + + t.frame.typ = typ.String() + t.frame.from = from + t.frame.to = to + t.frame.input = common.CopyBytes(input) + t.frame.gas = uint(gas) + t.frame.value = nil + if value != nil { + t.frame.value = new(big.Int).SetBytes(value.Bytes()) + } + + if _, err := t.enter(t.obj, t.frameValue); err != nil { + t.onError("enter", err) + } +} + +// CaptureExit is called when EVM exits a scope, even if the scope didn't +// execute any code. +func (t *jsTracer) CaptureExit(output []byte, gasUsed uint64, err error) { + if !t.traceFrame { + return + } + + t.frameResult.gasUsed = uint(gasUsed) + t.frameResult.output = common.CopyBytes(output) + t.frameResult.err = err + + if _, err := t.exit(t.obj, t.frameResultValue); err != nil { + t.onError("exit", err) + } +} + +// GetResult calls the Javascript 'result' function and returns its value, or any accumulated error +func (t *jsTracer) GetResult() (json.RawMessage, error) { + ctx := t.vm.ToValue(t.ctx) + res, err := t.result(t.obj, ctx, t.dbValue) + if err != nil { + return nil, wrapError("result", err) + } + encoded, err := json.Marshal(res) + if err != nil { + return nil, err + } + return json.RawMessage(encoded), t.err +} + +// Stop terminates execution of the tracer at the first opportune moment. +func (t *jsTracer) Stop(err error) { + t.vm.Interrupt(err) +} + +// onError is called anytime the running JS code is interrupted +// and returns an error. It in turn pings the EVM to cancel its +// execution. +func (t *jsTracer) onError(context string, err error) { + t.err = wrapError(context, err) + // `env` is set on CaptureStart which comes before any JS execution. + // So it should be non-nil. + t.env.Cancel() +} + +func wrapError(context string, err error) error { + return fmt.Errorf("%v in server-side tracer function '%v'", err, context) +} + +// setBuiltinFunctions injects Go functions which are available to tracers into the environment. +// It depends on type converters having been set up. +func (t *jsTracer) setBuiltinFunctions() { + vm := t.vm + // TODO: load console from goja-nodejs + vm.Set("toHex", func(v goja.Value) string { + b, err := t.fromBuf(vm, v, false) + if err != nil { + vm.Interrupt(err) + return "" + } + return hexutil.Encode(b) + }) + vm.Set("toWord", func(v goja.Value) goja.Value { + // TODO: add test with []byte len < 32 or > 32 + b, err := t.fromBuf(vm, v, true) + if err != nil { + vm.Interrupt(err) + return nil + } + b = common.BytesToHash(b).Bytes() + res, err := t.toBuf(vm, b) + if err != nil { + vm.Interrupt(err) + return nil + } + return res + }) + vm.Set("toAddress", func(v goja.Value) goja.Value { + a, err := t.fromBuf(vm, v, true) + if err != nil { + vm.Interrupt(err) + return nil + } + a = common.BytesToAddress(a).Bytes() + res, err := t.toBuf(vm, a) + if err != nil { + vm.Interrupt(err) + return nil + } + return res + }) + vm.Set("toContract", func(from goja.Value, nonce uint) goja.Value { + a, err := t.fromBuf(vm, from, true) + if err != nil { + vm.Interrupt(err) + return nil + } + addr := common.BytesToAddress(a) + b := crypto.CreateAddress(addr, uint64(nonce)).Bytes() + res, err := t.toBuf(vm, b) + if err != nil { + vm.Interrupt(err) + return nil + } + return res + }) + vm.Set("toContract2", func(from goja.Value, salt string, initcode goja.Value) goja.Value { + a, err := t.fromBuf(vm, from, true) + if err != nil { + vm.Interrupt(err) + return nil + } + addr := common.BytesToAddress(a) + code, err := t.fromBuf(vm, initcode, true) + if err != nil { + vm.Interrupt(err) + return nil + } + code = common.CopyBytes(code) + codeHash := crypto.Keccak256(code) + b := crypto.CreateAddress2(addr, common.HexToHash(salt), codeHash).Bytes() + res, err := t.toBuf(vm, b) + if err != nil { + vm.Interrupt(err) + return nil + } + return res + }) + vm.Set("isPrecompiled", func(v goja.Value) bool { + a, err := t.fromBuf(vm, v, true) + if err != nil { + vm.Interrupt(err) + return false + } + addr := common.BytesToAddress(a) + for _, p := range t.activePrecompiles { + if p == addr { + return true + } + } + return false + }) + vm.Set("slice", func(slice goja.Value, start, end int) goja.Value { + b, err := t.fromBuf(vm, slice, false) + if err != nil { + vm.Interrupt(err) + return nil + } + if start < 0 || start > end || end > len(b) { + vm.Interrupt(fmt.Sprintf("Tracer accessed out of bound memory: available %d, offset %d, size %d", len(b), start, end-start)) + return nil + } + res, err := t.toBuf(vm, b[start:end]) + if err != nil { + vm.Interrupt(err) + return nil + } + return res + }) +} + +// setTypeConverters sets up utilities for converting Go types into those +// suitable for JS consumption. +func (t *jsTracer) setTypeConverters() error { + // Inject bigint logic. + // TODO: To be replaced after goja adds support for native JS bigint. + toBigCode, err := t.vm.RunProgram(bigIntProgram) + if err != nil { + return err + } + // Used to create JS bigint objects from go. + toBigFn, ok := goja.AssertFunction(toBigCode) + if !ok { + return errors.New("failed to bind bigInt func") + } + toBigWrapper := func(vm *goja.Runtime, val string) (goja.Value, error) { + return toBigFn(goja.Undefined(), vm.ToValue(val)) + } + t.toBig = toBigWrapper + // NOTE: We need this workaround to create JS buffers because + // goja doesn't at the moment expose constructors for typed arrays. + // + // Cache uint8ArrayType once to be used every time for less overhead. + uint8ArrayType := t.vm.Get("Uint8Array") + toBufWrapper := func(vm *goja.Runtime, val []byte) (goja.Value, error) { + return toBuf(vm, uint8ArrayType, val) + } + t.toBuf = toBufWrapper + fromBufWrapper := func(vm *goja.Runtime, buf goja.Value, allowString bool) ([]byte, error) { + return fromBuf(vm, uint8ArrayType, buf, allowString) + } + t.fromBuf = fromBufWrapper + return nil +} + +type opObj struct { + vm *goja.Runtime + op vm.OpCode +} + +func (o *opObj) ToNumber() int { + return int(o.op) +} + +func (o *opObj) ToString() string { + return o.op.String() +} + +func (o *opObj) IsPush() bool { + return o.op.IsPush() +} + +func (o *opObj) setupObject() *goja.Object { + obj := o.vm.NewObject() + obj.Set("toNumber", o.vm.ToValue(o.ToNumber)) + obj.Set("toString", o.vm.ToValue(o.ToString)) + obj.Set("isPush", o.vm.ToValue(o.IsPush)) + return obj +} + +type memoryObj struct { + memory *vm.Memory + vm *goja.Runtime + toBig toBigFn + toBuf toBufFn +} + +func (mo *memoryObj) Slice(begin, end int64) goja.Value { + b, err := mo.slice(begin, end) + if err != nil { + mo.vm.Interrupt(err) + return nil + } + res, err := mo.toBuf(mo.vm, b) + if err != nil { + mo.vm.Interrupt(err) + return nil + } + return res +} + +// slice returns the requested range of memory as a byte slice. +func (mo *memoryObj) slice(begin, end int64) ([]byte, error) { + if end == begin { + return []byte{}, nil + } + if end < begin || begin < 0 { + return nil, fmt.Errorf("tracer accessed out of bound memory: offset %d, end %d", begin, end) + } + if mo.memory.Len() < int(end) { + return nil, fmt.Errorf("tracer accessed out of bound memory: available %d, offset %d, size %d", mo.memory.Len(), begin, end-begin) + } + return mo.memory.GetCopy(begin, end-begin), nil +} + +func (mo *memoryObj) GetUint(addr int64) goja.Value { + value, err := mo.getUint(addr) + if err != nil { + mo.vm.Interrupt(err) + return nil + } + res, err := mo.toBig(mo.vm, value.String()) + if err != nil { + mo.vm.Interrupt(err) + return nil + } + return res +} + +// getUint returns the 32 bytes at the specified address interpreted as a uint. +func (mo *memoryObj) getUint(addr int64) (*big.Int, error) { + if mo.memory.Len() < int(addr)+32 || addr < 0 { + return nil, fmt.Errorf("tracer accessed out of bound memory: available %d, offset %d, size %d", mo.memory.Len(), addr, 32) + } + return new(big.Int).SetBytes(mo.memory.GetPtr(addr, 32)), nil +} + +func (mo *memoryObj) Length() int { + return mo.memory.Len() +} + +func (m *memoryObj) setupObject() *goja.Object { + o := m.vm.NewObject() + o.Set("slice", m.vm.ToValue(m.Slice)) + o.Set("getUint", m.vm.ToValue(m.GetUint)) + o.Set("length", m.vm.ToValue(m.Length)) + return o +} + +type stackObj struct { + stack *vm.Stack + vm *goja.Runtime + toBig toBigFn +} + +func (s *stackObj) Peek(idx int) goja.Value { + value, err := s.peek(idx) + if err != nil { + s.vm.Interrupt(err) + return nil + } + res, err := s.toBig(s.vm, value.String()) + if err != nil { + s.vm.Interrupt(err) + return nil + } + return res +} + +// peek returns the nth-from-the-top element of the stack. +func (s *stackObj) peek(idx int) (*big.Int, error) { + if len(s.stack.Data()) <= idx || idx < 0 { + return nil, fmt.Errorf("tracer accessed out of bound stack: size %d, index %d", len(s.stack.Data()), idx) + } + return s.stack.Back(idx).ToBig(), nil +} + +func (s *stackObj) Length() int { + return len(s.stack.Data()) +} + +func (s *stackObj) setupObject() *goja.Object { + o := s.vm.NewObject() + o.Set("peek", s.vm.ToValue(s.Peek)) + o.Set("length", s.vm.ToValue(s.Length)) + return o +} + +type dbObj struct { + db vm.StateDB + vm *goja.Runtime + toBig toBigFn + toBuf toBufFn + fromBuf fromBufFn +} + +func (do *dbObj) GetBalance(addrSlice goja.Value) goja.Value { + a, err := do.fromBuf(do.vm, addrSlice, false) + if err != nil { + do.vm.Interrupt(err) + return nil + } + addr := common.BytesToAddress(a) + value := do.db.GetBalance(addr) + res, err := do.toBig(do.vm, value.String()) + if err != nil { + do.vm.Interrupt(err) + return nil + } + return res +} + +func (do *dbObj) GetNonce(addrSlice goja.Value) uint64 { + a, err := do.fromBuf(do.vm, addrSlice, false) + if err != nil { + do.vm.Interrupt(err) + return 0 + } + addr := common.BytesToAddress(a) + return do.db.GetNonce(addr) +} + +func (do *dbObj) GetCode(addrSlice goja.Value) goja.Value { + a, err := do.fromBuf(do.vm, addrSlice, false) + if err != nil { + do.vm.Interrupt(err) + return nil + } + addr := common.BytesToAddress(a) + code := do.db.GetCode(addr) + res, err := do.toBuf(do.vm, code) + if err != nil { + do.vm.Interrupt(err) + return nil + } + return res +} + +func (do *dbObj) GetState(addrSlice goja.Value, hashSlice goja.Value) goja.Value { + a, err := do.fromBuf(do.vm, addrSlice, false) + if err != nil { + do.vm.Interrupt(err) + return nil + } + addr := common.BytesToAddress(a) + h, err := do.fromBuf(do.vm, hashSlice, false) + if err != nil { + do.vm.Interrupt(err) + return nil + } + hash := common.BytesToHash(h) + state := do.db.GetState(addr, hash).Bytes() + res, err := do.toBuf(do.vm, state) + if err != nil { + do.vm.Interrupt(err) + return nil + } + return res +} + +func (do *dbObj) Exists(addrSlice goja.Value) bool { + a, err := do.fromBuf(do.vm, addrSlice, false) + if err != nil { + do.vm.Interrupt(err) + return false + } + addr := common.BytesToAddress(a) + return do.db.Exist(addr) +} + +func (do *dbObj) setupObject() *goja.Object { + o := do.vm.NewObject() + o.Set("getBalance", do.vm.ToValue(do.GetBalance)) + o.Set("getNonce", do.vm.ToValue(do.GetNonce)) + o.Set("getCode", do.vm.ToValue(do.GetCode)) + o.Set("getState", do.vm.ToValue(do.GetState)) + o.Set("exists", do.vm.ToValue(do.Exists)) + return o +} + +type contractObj struct { + contract *vm.Contract + vm *goja.Runtime + toBig toBigFn + toBuf toBufFn +} + +func (co *contractObj) GetCaller() goja.Value { + caller := co.contract.Caller().Bytes() + res, err := co.toBuf(co.vm, caller) + if err != nil { + co.vm.Interrupt(err) + return nil + } + return res +} + +func (co *contractObj) GetAddress() goja.Value { + addr := co.contract.Address().Bytes() + res, err := co.toBuf(co.vm, addr) + if err != nil { + co.vm.Interrupt(err) + return nil + } + return res +} + +func (co *contractObj) GetValue() goja.Value { + value := co.contract.Value() + res, err := co.toBig(co.vm, value.String()) + if err != nil { + co.vm.Interrupt(err) + return nil + } + return res +} + +func (co *contractObj) GetInput() goja.Value { + input := common.CopyBytes(co.contract.Input) + res, err := co.toBuf(co.vm, input) + if err != nil { + co.vm.Interrupt(err) + return nil + } + return res +} + +func (c *contractObj) setupObject() *goja.Object { + o := c.vm.NewObject() + o.Set("getCaller", c.vm.ToValue(c.GetCaller)) + o.Set("getAddress", c.vm.ToValue(c.GetAddress)) + o.Set("getValue", c.vm.ToValue(c.GetValue)) + o.Set("getInput", c.vm.ToValue(c.GetInput)) + return o +} + +type callframe struct { + vm *goja.Runtime + toBig toBigFn + toBuf toBufFn + + typ string + from common.Address + to common.Address + input []byte + gas uint + value *big.Int +} + +func (f *callframe) GetType() string { + return f.typ +} + +func (f *callframe) GetFrom() goja.Value { + from := f.from.Bytes() + res, err := f.toBuf(f.vm, from) + if err != nil { + f.vm.Interrupt(err) + return nil + } + return res +} + +func (f *callframe) GetTo() goja.Value { + to := f.to.Bytes() + res, err := f.toBuf(f.vm, to) + if err != nil { + f.vm.Interrupt(err) + return nil + } + return res +} + +func (f *callframe) GetInput() goja.Value { + input := f.input + res, err := f.toBuf(f.vm, input) + if err != nil { + f.vm.Interrupt(err) + return nil + } + return res +} + +func (f *callframe) GetGas() uint { + return f.gas +} + +func (f *callframe) GetValue() goja.Value { + if f.value == nil { + return goja.Undefined() + } + res, err := f.toBig(f.vm, f.value.String()) + if err != nil { + f.vm.Interrupt(err) + return nil + } + return res +} + +func (f *callframe) setupObject() *goja.Object { + o := f.vm.NewObject() + o.Set("getType", f.vm.ToValue(f.GetType)) + o.Set("getFrom", f.vm.ToValue(f.GetFrom)) + o.Set("getTo", f.vm.ToValue(f.GetTo)) + o.Set("getInput", f.vm.ToValue(f.GetInput)) + o.Set("getGas", f.vm.ToValue(f.GetGas)) + o.Set("getValue", f.vm.ToValue(f.GetValue)) + return o +} + +type callframeResult struct { + vm *goja.Runtime + toBuf toBufFn + + gasUsed uint + output []byte + err error +} + +func (r *callframeResult) GetGasUsed() uint { + return r.gasUsed +} + +func (r *callframeResult) GetOutput() goja.Value { + res, err := r.toBuf(r.vm, r.output) + if err != nil { + r.vm.Interrupt(err) + return nil + } + return res +} + +func (r *callframeResult) GetError() goja.Value { + if r.err != nil { + return r.vm.ToValue(r.err.Error()) + } + return goja.Undefined() +} + +func (r *callframeResult) setupObject() *goja.Object { + o := r.vm.NewObject() + o.Set("getGasUsed", r.vm.ToValue(r.GetGasUsed)) + o.Set("getOutput", r.vm.ToValue(r.GetOutput)) + o.Set("getError", r.vm.ToValue(r.GetError)) + return o +} + +type steplog struct { + vm *goja.Runtime + + op *opObj + memory *memoryObj + stack *stackObj + contract *contractObj + + pc uint64 + gas uint64 + cost uint64 + depth int + refund uint64 + err error +} + +func (l *steplog) GetPC() uint64 { return l.pc } +func (l *steplog) GetGas() uint64 { return l.gas } +func (l *steplog) GetCost() uint64 { return l.cost } +func (l *steplog) GetDepth() int { return l.depth } +func (l *steplog) GetRefund() uint64 { return l.refund } + +func (l *steplog) GetError() goja.Value { + if l.err != nil { + return l.vm.ToValue(l.err.Error()) + } + return goja.Undefined() +} + +func (l *steplog) setupObject() *goja.Object { + o := l.vm.NewObject() + // Setup basic fields. + o.Set("getPC", l.vm.ToValue(l.GetPC)) + o.Set("getGas", l.vm.ToValue(l.GetGas)) + o.Set("getCost", l.vm.ToValue(l.GetCost)) + o.Set("getDepth", l.vm.ToValue(l.GetDepth)) + o.Set("getRefund", l.vm.ToValue(l.GetRefund)) + o.Set("getError", l.vm.ToValue(l.GetError)) + // Setup nested objects. + o.Set("op", l.op.setupObject()) + o.Set("stack", l.stack.setupObject()) + o.Set("memory", l.memory.setupObject()) + o.Set("contract", l.contract.setupObject()) + return o +} diff --git a/coreth/eth/tracers/js/internal/tracers/4byte_tracer_legacy.js b/coreth/eth/tracers/js/internal/tracers/4byte_tracer_legacy.js new file mode 100644 index 00000000..e4714b8b --- /dev/null +++ b/coreth/eth/tracers/js/internal/tracers/4byte_tracer_legacy.js @@ -0,0 +1,86 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// 4byteTracer searches for 4byte-identifiers, and collects them for post-processing. +// It collects the methods identifiers along with the size of the supplied data, so +// a reversed signature can be matched against the size of the data. +// +// Example: +// > debug.traceTransaction( "0x214e597e35da083692f5386141e69f47e973b2c56e7a8073b1ea08fd7571e9de", {tracer: "4byteTracer"}) +// { +// 0x27dc297e-128: 1, +// 0x38cc4831-0: 2, +// 0x524f3889-96: 1, +// 0xadf59f99-288: 1, +// 0xc281d19e-0: 1 +// } +{ + // ids aggregates the 4byte ids found. + ids : {}, + + // callType returns 'false' for non-calls, or the peek-index for the first param + // after 'value', i.e. meminstart. + callType: function(opstr){ + switch(opstr){ + case "CALL": case "CALLCODE": + // gas, addr, val, memin, meminsz, memout, memoutsz + return 3; // stack ptr to memin + + case "DELEGATECALL": case "STATICCALL": + // gas, addr, memin, meminsz, memout, memoutsz + return 2; // stack ptr to memin + } + return false; + }, + + // store save the given identifier and datasize. + store: function(id, size){ + var key = "" + toHex(id) + "-" + size; + this.ids[key] = this.ids[key] + 1 || 1; + }, + + // step is invoked for every opcode that the VM executes. + step: function(log, db) { + // Skip any opcodes that are not internal calls + var ct = this.callType(log.op.toString()); + if (!ct) { + return; + } + // Skip any pre-compile invocations, those are just fancy opcodes + if (isPrecompiled(toAddress(log.stack.peek(1).toString(16)))) { + return; + } + // Gather internal call details + var inSz = log.stack.peek(ct + 1).valueOf(); + if (inSz >= 4) { + var inOff = log.stack.peek(ct).valueOf(); + this.store(log.memory.slice(inOff, inOff + 4), inSz-4); + } + }, + + // fault is invoked when the actual execution of an opcode fails. + fault: function(log, db) { }, + + // result is invoked when all the opcodes have been iterated over and returns + // the final result of the tracing. + result: function(ctx) { + // Save the outer calldata also + if (ctx.input.length >= 4) { + this.store(slice(ctx.input, 0, 4), ctx.input.length-4) + } + return this.ids; + }, +} diff --git a/coreth/eth/tracers/js/internal/tracers/bigram_tracer.js b/coreth/eth/tracers/js/internal/tracers/bigram_tracer.js new file mode 100644 index 00000000..421c360a --- /dev/null +++ b/coreth/eth/tracers/js/internal/tracers/bigram_tracer.js @@ -0,0 +1,47 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +{ + // hist is the counters of opcode bigrams + hist: {}, + // lastOp is last operation + lastOp: '', + // execution depth of last op + lastDepth: 0, + // step is invoked for every opcode that the VM executes. + step: function(log, db) { + var op = log.op.toString(); + var depth = log.getDepth(); + if (depth == this.lastDepth){ + var key = this.lastOp+'-'+op; + if (this.hist[key]){ + this.hist[key]++; + } + else { + this.hist[key] = 1; + } + } + this.lastOp = op; + this.lastDepth = depth; + }, + // fault is invoked when the actual execution of an opcode fails. + fault: function(log, db) {}, + // result is invoked when all the opcodes have been iterated over and returns + // the final result of the tracing. + result: function(ctx) { + return this.hist; + }, +} diff --git a/coreth/eth/tracers/js/internal/tracers/call_tracer_legacy.js b/coreth/eth/tracers/js/internal/tracers/call_tracer_legacy.js new file mode 100644 index 00000000..3ca73777 --- /dev/null +++ b/coreth/eth/tracers/js/internal/tracers/call_tracer_legacy.js @@ -0,0 +1,252 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// callTracer is a full blown transaction tracer that extracts and reports all +// the internal calls made by a transaction, along with any useful information. +{ + // callstack is the current recursive call stack of the EVM execution. + callstack: [{}], + + // descended tracks whether we've just descended from an outer transaction into + // an inner call. + descended: false, + + // step is invoked for every opcode that the VM executes. + step: function(log, db) { + // Capture any errors immediately + var error = log.getError(); + if (error !== undefined) { + this.fault(log, db); + return; + } + // We only care about system opcodes, faster if we pre-check once + var syscall = (log.op.toNumber() & 0xf0) == 0xf0; + if (syscall) { + var op = log.op.toString(); + } + // If a new contract is being created, add to the call stack + if (syscall && (op == 'CREATE' || op == "CREATE2")) { + var inOff = log.stack.peek(1).valueOf(); + var inEnd = inOff + log.stack.peek(2).valueOf(); + + // Assemble the internal call report and store for completion + var call = { + type: op, + from: toHex(log.contract.getAddress()), + input: toHex(log.memory.slice(inOff, inEnd)), + gasIn: log.getGas(), + gasCost: log.getCost(), + value: '0x' + log.stack.peek(0).toString(16) + }; + this.callstack.push(call); + this.descended = true + return; + } + // If a contract is being self destructed, gather that as a subcall too + if (syscall && op == 'SELFDESTRUCT') { + var left = this.callstack.length; + if (this.callstack[left-1].calls === undefined) { + this.callstack[left-1].calls = []; + } + this.callstack[left-1].calls.push({ + type: op, + from: toHex(log.contract.getAddress()), + to: toHex(toAddress(log.stack.peek(0).toString(16))), + gasIn: log.getGas(), + gasCost: log.getCost(), + value: '0x' + db.getBalance(log.contract.getAddress()).toString(16) + }); + return + } + // If a new method invocation is being done, add to the call stack + if (syscall && (op == 'CALL' || op == 'CALLCODE' || op == 'DELEGATECALL' || op == 'STATICCALL')) { + // Skip any pre-compile invocations, those are just fancy opcodes + var to = toAddress(log.stack.peek(1).toString(16)); + if (isPrecompiled(to)) { + return + } + var off = (op == 'DELEGATECALL' || op == 'STATICCALL' ? 0 : 1); + + var inOff = log.stack.peek(2 + off).valueOf(); + var inEnd = inOff + log.stack.peek(3 + off).valueOf(); + + // Assemble the internal call report and store for completion + var call = { + type: op, + from: toHex(log.contract.getAddress()), + to: toHex(to), + input: toHex(log.memory.slice(inOff, inEnd)), + gasIn: log.getGas(), + gasCost: log.getCost(), + outOff: log.stack.peek(4 + off).valueOf(), + outLen: log.stack.peek(5 + off).valueOf() + }; + if (op != 'DELEGATECALL' && op != 'STATICCALL') { + call.value = '0x' + log.stack.peek(2).toString(16); + } + this.callstack.push(call); + this.descended = true + return; + } + // If we've just descended into an inner call, retrieve it's true allowance. We + // need to extract if from within the call as there may be funky gas dynamics + // with regard to requested and actually given gas (2300 stipend, 63/64 rule). + if (this.descended) { + if (log.getDepth() >= this.callstack.length) { + this.callstack[this.callstack.length - 1].gas = log.getGas(); + } else { + // TODO(karalabe): The call was made to a plain account. We currently don't + // have access to the true gas amount inside the call and so any amount will + // mostly be wrong since it depends on a lot of input args. Skip gas for now. + } + this.descended = false; + } + // If an existing call is returning, pop off the call stack + if (syscall && op == 'REVERT') { + this.callstack[this.callstack.length - 1].error = "execution reverted"; + return; + } + if (log.getDepth() == this.callstack.length - 1) { + // Pop off the last call and get the execution results + var call = this.callstack.pop(); + + if (call.type == 'CREATE' || call.type == "CREATE2") { + // If the call was a CREATE, retrieve the contract address and output code + call.gasUsed = '0x' + bigInt(call.gasIn - call.gasCost - log.getGas()).toString(16); + delete call.gasIn; delete call.gasCost; + + var ret = log.stack.peek(0); + if (!ret.equals(0)) { + call.to = toHex(toAddress(ret.toString(16))); + call.output = toHex(db.getCode(toAddress(ret.toString(16)))); + } else if (call.error === undefined) { + call.error = "internal failure"; // TODO(karalabe): surface these faults somehow + } + } else { + // If the call was a contract call, retrieve the gas usage and output + if (call.gas !== undefined) { + call.gasUsed = '0x' + bigInt(call.gasIn - call.gasCost + call.gas - log.getGas()).toString(16); + } + var ret = log.stack.peek(0); + if (!ret.equals(0)) { + call.output = toHex(log.memory.slice(call.outOff, call.outOff + call.outLen)); + } else if (call.error === undefined) { + call.error = "internal failure"; // TODO(karalabe): surface these faults somehow + } + delete call.gasIn; delete call.gasCost; + delete call.outOff; delete call.outLen; + } + if (call.gas !== undefined) { + call.gas = '0x' + bigInt(call.gas).toString(16); + } + // Inject the call into the previous one + var left = this.callstack.length; + if (this.callstack[left-1].calls === undefined) { + this.callstack[left-1].calls = []; + } + this.callstack[left-1].calls.push(call); + } + }, + + // fault is invoked when the actual execution of an opcode fails. + fault: function(log, db) { + // If the topmost call already reverted, don't handle the additional fault again + if (this.callstack[this.callstack.length - 1].error !== undefined) { + return; + } + // Pop off the just failed call + var call = this.callstack.pop(); + call.error = log.getError(); + + // Consume all available gas and clean any leftovers + if (call.gas !== undefined) { + call.gas = '0x' + bigInt(call.gas).toString(16); + call.gasUsed = call.gas + } + delete call.gasIn; delete call.gasCost; + delete call.outOff; delete call.outLen; + + // Flatten the failed call into its parent + var left = this.callstack.length; + if (left > 0) { + if (this.callstack[left-1].calls === undefined) { + this.callstack[left-1].calls = []; + } + this.callstack[left-1].calls.push(call); + return; + } + // Last call failed too, leave it in the stack + this.callstack.push(call); + }, + + // result is invoked when all the opcodes have been iterated over and returns + // the final result of the tracing. + result: function(ctx, db) { + var result = { + type: ctx.type, + from: toHex(ctx.from), + to: toHex(ctx.to), + value: '0x' + ctx.value.toString(16), + gas: '0x' + bigInt(ctx.gas).toString(16), + gasUsed: '0x' + bigInt(ctx.gasUsed).toString(16), + input: toHex(ctx.input), + output: toHex(ctx.output), + time: ctx.time, + }; + if (this.callstack[0].calls !== undefined) { + result.calls = this.callstack[0].calls; + } + if (this.callstack[0].error !== undefined) { + result.error = this.callstack[0].error; + } else if (ctx.error !== undefined) { + result.error = ctx.error; + } + if (result.error !== undefined && (result.error !== "execution reverted" || result.output ==="0x")) { + delete result.output; + } + return this.finalize(result); + }, + + // finalize recreates a call object using the final desired field oder for json + // serialization. This is a nicety feature to pass meaningfully ordered results + // to users who don't interpret it, just display it. + finalize: function(call) { + var sorted = { + type: call.type, + from: call.from, + to: call.to, + value: call.value, + gas: call.gas, + gasUsed: call.gasUsed, + input: call.input, + output: call.output, + error: call.error, + time: call.time, + calls: call.calls, + } + for (var key in sorted) { + if (sorted[key] === undefined) { + delete sorted[key]; + } + } + if (sorted.calls !== undefined) { + for (var i=0; i. + +// evmdisTracer returns sufficient information from a trace to perform evmdis-style +// disassembly. +{ + stack: [{ops: []}], + + npushes: {0: 0, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1, 6: 1, 7: 1, 8: 1, 9: 1, 10: 1, 11: 1, 16: 1, 17: 1, 18: 1, 19: 1, 20: 1, 21: 1, 22: 1, 23: 1, 24: 1, 25: 1, 26: 1, 32: 1, 48: 1, 49: 1, 50: 1, 51: 1, 52: 1, 53: 1, 54: 1, 55: 0, 56: 1, 57: 0, 58: 1, 59: 1, 60: 0, 64: 1, 65: 1, 66: 1, 67: 1, 68: 1, 69: 1, 80: 0, 81: 1, 82: 0, 83: 0, 84: 1, 85: 0, 86: 0, 87: 0, 88: 1, 89: 1, 90: 1, 91: 0, 96: 1, 97: 1, 98: 1, 99: 1, 100: 1, 101: 1, 102: 1, 103: 1, 104: 1, 105: 1, 106: 1, 107: 1, 108: 1, 109: 1, 110: 1, 111: 1, 112: 1, 113: 1, 114: 1, 115: 1, 116: 1, 117: 1, 118: 1, 119: 1, 120: 1, 121: 1, 122: 1, 123: 1, 124: 1, 125: 1, 126: 1, 127: 1, 128: 2, 129: 3, 130: 4, 131: 5, 132: 6, 133: 7, 134: 8, 135: 9, 136: 10, 137: 11, 138: 12, 139: 13, 140: 14, 141: 15, 142: 16, 143: 17, 144: 2, 145: 3, 146: 4, 147: 5, 148: 6, 149: 7, 150: 8, 151: 9, 152: 10, 153: 11, 154: 12, 155: 13, 156: 14, 157: 15, 158: 16, 159: 17, 160: 0, 161: 0, 162: 0, 163: 0, 164: 0, 240: 1, 241: 1, 242: 1, 243: 0, 244: 0, 255: 0}, + + // result is invoked when all the opcodes have been iterated over and returns + // the final result of the tracing. + result: function() { return this.stack[0].ops; }, + + // fault is invoked when the actual execution of an opcode fails. + fault: function(log, db) { }, + + // step is invoked for every opcode that the VM executes. + step: function(log, db) { + var frame = this.stack[this.stack.length - 1]; + + var error = log.getError(); + if (error) { + frame["error"] = error; + } else if (log.getDepth() == this.stack.length) { + opinfo = { + op: log.op.toNumber(), + depth : log.getDepth(), + result: [], + }; + if (frame.ops.length > 0) { + var prevop = frame.ops[frame.ops.length - 1]; + for(var i = 0; i < this.npushes[prevop.op]; i++) + prevop.result.push(log.stack.peek(i).toString(16)); + } + switch(log.op.toString()) { + case "CALL": case "CALLCODE": + var instart = log.stack.peek(3).valueOf(); + var insize = log.stack.peek(4).valueOf(); + opinfo["gas"] = log.stack.peek(0).valueOf(); + opinfo["to"] = log.stack.peek(1).toString(16); + opinfo["value"] = log.stack.peek(2).toString(); + opinfo["input"] = log.memory.slice(instart, instart + insize); + opinfo["error"] = null; + opinfo["return"] = null; + opinfo["ops"] = []; + this.stack.push(opinfo); + break; + case "DELEGATECALL": case "STATICCALL": + var instart = log.stack.peek(2).valueOf(); + var insize = log.stack.peek(3).valueOf(); + opinfo["op"] = log.op.toString(); + opinfo["gas"] = log.stack.peek(0).valueOf(); + opinfo["to"] = log.stack.peek(1).toString(16); + opinfo["input"] = log.memory.slice(instart, instart + insize); + opinfo["error"] = null; + opinfo["return"] = null; + opinfo["ops"] = []; + this.stack.push(opinfo); + break; + case "RETURN": case "REVERT": + var out = log.stack.peek(0).valueOf(); + var outsize = log.stack.peek(1).valueOf(); + frame.return = log.memory.slice(out, out + outsize); + break; + case "STOP": case "SELFDESTRUCT": + frame.return = log.memory.slice(0, 0); + break; + case "JUMPDEST": + opinfo["pc"] = log.getPC(); + } + if(log.op.isPush()) { + opinfo["len"] = log.op.toNumber() - 0x5e; + } + frame.ops.push(opinfo); + } else { + this.stack = this.stack.slice(0, log.getDepth()); + } + } +} diff --git a/coreth/eth/tracers/js/internal/tracers/noop_tracer_legacy.js b/coreth/eth/tracers/js/internal/tracers/noop_tracer_legacy.js new file mode 100644 index 00000000..fe7ddc85 --- /dev/null +++ b/coreth/eth/tracers/js/internal/tracers/noop_tracer_legacy.js @@ -0,0 +1,29 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// noopTracer is just the barebone boilerplate code required from a JavaScript +// object to be usable as a transaction tracer. +{ + // step is invoked for every opcode that the VM executes. + step: function(log, db) { }, + + // fault is invoked when the actual execution of an opcode fails. + fault: function(log, db) { }, + + // result is invoked when all the opcodes have been iterated over and returns + // the final result of the tracing. + result: function(ctx, db) { return {}; } +} diff --git a/coreth/eth/tracers/js/internal/tracers/opcount_tracer.js b/coreth/eth/tracers/js/internal/tracers/opcount_tracer.js new file mode 100644 index 00000000..f7984c74 --- /dev/null +++ b/coreth/eth/tracers/js/internal/tracers/opcount_tracer.js @@ -0,0 +1,32 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// opcountTracer is a sample tracer that just counts the number of instructions +// executed by the EVM before the transaction terminated. +{ + // count tracks the number of EVM instructions executed. + count: 0, + + // step is invoked for every opcode that the VM executes. + step: function(log, db) { this.count++ }, + + // fault is invoked when the actual execution of an opcode fails. + fault: function(log, db) { }, + + // result is invoked when all the opcodes have been iterated over and returns + // the final result of the tracing. + result: function(ctx, db) { return this.count } +} diff --git a/coreth/eth/tracers/js/internal/tracers/prestate_tracer_legacy.js b/coreth/eth/tracers/js/internal/tracers/prestate_tracer_legacy.js new file mode 100644 index 00000000..77f25209 --- /dev/null +++ b/coreth/eth/tracers/js/internal/tracers/prestate_tracer_legacy.js @@ -0,0 +1,115 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// prestateTracer outputs sufficient information to create a local execution of +// the transaction from a custom assembled genesis block. +{ + // prestate is the genesis that we're building. + prestate: null, + + // lookupAccount injects the specified account into the prestate object. + lookupAccount: function(addr, db){ + var acc = toHex(addr); + if (this.prestate[acc] === undefined) { + this.prestate[acc] = { + balance: '0x' + db.getBalance(addr).toString(16), + nonce: db.getNonce(addr), + code: toHex(db.getCode(addr)), + storage: {} + }; + } + }, + + // lookupStorage injects the specified storage entry of the given account into + // the prestate object. + lookupStorage: function(addr, key, db){ + var acc = toHex(addr); + var idx = toHex(key); + + if (this.prestate[acc].storage[idx] === undefined) { + this.prestate[acc].storage[idx] = toHex(db.getState(addr, key)); + } + }, + + // result is invoked when all the opcodes have been iterated over and returns + // the final result of the tracing. + result: function(ctx, db) { + if (this.prestate === null) { + this.prestate = {}; + // If tx is transfer-only, the recipient account + // hasn't been populated. + this.lookupAccount(ctx.to, db); + } + + // At this point, we need to deduct the 'value' from the + // outer transaction, and move it back to the origin + this.lookupAccount(ctx.from, db); + + var fromBal = bigInt(this.prestate[toHex(ctx.from)].balance.slice(2), 16); + var toBal = bigInt(this.prestate[toHex(ctx.to)].balance.slice(2), 16); + + this.prestate[toHex(ctx.to)].balance = '0x'+toBal.subtract(ctx.value).toString(16); + this.prestate[toHex(ctx.from)].balance = '0x'+fromBal.add(ctx.value).add((ctx.gasUsed + ctx.intrinsicGas) * ctx.gasPrice).toString(16); + + // Decrement the caller's nonce, and remove empty create targets + this.prestate[toHex(ctx.from)].nonce--; + if (ctx.type == 'CREATE') { + // We can blibdly delete the contract prestate, as any existing state would + // have caused the transaction to be rejected as invalid in the first place. + delete this.prestate[toHex(ctx.to)]; + } + // Return the assembled allocations (prestate) + return this.prestate; + }, + + // step is invoked for every opcode that the VM executes. + step: function(log, db) { + // Add the current account if we just started tracing + if (this.prestate === null){ + this.prestate = {}; + // Balance will potentially be wrong here, since this will include the value + // sent along with the message. We fix that in 'result()'. + this.lookupAccount(log.contract.getAddress(), db); + } + // Whenever new state is accessed, add it to the prestate + switch (log.op.toString()) { + case "EXTCODECOPY": case "EXTCODESIZE": case "EXTCODEHASH": case "BALANCE": + this.lookupAccount(toAddress(log.stack.peek(0).toString(16)), db); + break; + case "CREATE": + var from = log.contract.getAddress(); + this.lookupAccount(toContract(from, db.getNonce(from)), db); + break; + case "CREATE2": + var from = log.contract.getAddress(); + // stack: salt, size, offset, endowment + var offset = log.stack.peek(1).valueOf() + var size = log.stack.peek(2).valueOf() + var end = offset + size + this.lookupAccount(toContract2(from, log.stack.peek(3).toString(16), log.memory.slice(offset, end)), db); + break; + case "CALL": case "CALLCODE": case "DELEGATECALL": case "STATICCALL": + this.lookupAccount(toAddress(log.stack.peek(1).toString(16)), db); + break; + case 'SSTORE':case 'SLOAD': + this.lookupStorage(log.contract.getAddress(), toWord(log.stack.peek(0).toString(16)), db); + break; + } + }, + + // fault is invoked when the actual execution of an opcode fails. + fault: function(log, db) {} +} diff --git a/coreth/eth/tracers/js/internal/tracers/tracers.go b/coreth/eth/tracers/js/internal/tracers/tracers.go new file mode 100644 index 00000000..6547f1b0 --- /dev/null +++ b/coreth/eth/tracers/js/internal/tracers/tracers.go @@ -0,0 +1,59 @@ +// Copyright 2017 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +// Package tracers contains the actual JavaScript tracer assets. +package tracers + +import ( + "embed" + "io/fs" + "strings" + "unicode" +) + +//go:embed *.js +var files embed.FS + +// Load reads the built-in JS tracer files embedded in the binary and +// returns a mapping of tracer name to source. +func Load() (map[string]string, error) { + var assetTracers = make(map[string]string) + err := fs.WalkDir(files, ".", func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.IsDir() { + return nil + } + b, err := fs.ReadFile(files, path) + if err != nil { + return err + } + name := camel(strings.TrimSuffix(path, ".js")) + assetTracers[name] = string(b) + return nil + }) + return assetTracers, err +} + +// camel converts a snake cased input string into a camel cased output. +func camel(str string) string { + pieces := strings.Split(str, "_") + for i := 1; i < len(pieces); i++ { + pieces[i] = string(unicode.ToUpper(rune(pieces[i][0]))) + pieces[i][1:] + } + return strings.Join(pieces, "") +} diff --git a/coreth/eth/tracers/js/internal/tracers/trigram_tracer.js b/coreth/eth/tracers/js/internal/tracers/trigram_tracer.js new file mode 100644 index 00000000..8756490d --- /dev/null +++ b/coreth/eth/tracers/js/internal/tracers/trigram_tracer.js @@ -0,0 +1,49 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +{ + // hist is the map of trigram counters + hist: {}, + // lastOp is last operation + lastOps: ['',''], + lastDepth: 0, + // step is invoked for every opcode that the VM executes. + step: function(log, db) { + var depth = log.getDepth(); + if (depth != this.lastDepth){ + this.lastOps = ['','']; + this.lastDepth = depth; + return; + } + var op = log.op.toString(); + var key = this.lastOps[0]+'-'+this.lastOps[1]+'-'+op; + if (this.hist[key]){ + this.hist[key]++; + } + else { + this.hist[key] = 1; + } + this.lastOps[0] = this.lastOps[1]; + this.lastOps[1] = op; + }, + // fault is invoked when the actual execution of an opcode fails. + fault: function(log, db) {}, + // result is invoked when all the opcodes have been iterated over and returns + // the final result of the tracing. + result: function(ctx) { + return this.hist; + }, +} diff --git a/coreth/eth/tracers/js/internal/tracers/unigram_tracer.js b/coreth/eth/tracers/js/internal/tracers/unigram_tracer.js new file mode 100644 index 00000000..51107d8f --- /dev/null +++ b/coreth/eth/tracers/js/internal/tracers/unigram_tracer.js @@ -0,0 +1,41 @@ +// Copyright 2018 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +{ + // hist is the map of opcodes to counters + hist: {}, + // nops counts number of ops + nops: 0, + // step is invoked for every opcode that the VM executes. + step: function(log, db) { + var op = log.op.toString(); + if (this.hist[op]){ + this.hist[op]++; + } + else { + this.hist[op] = 1; + } + this.nops++; + }, + // fault is invoked when the actual execution of an opcode fails. + fault: function(log, db) {}, + + // result is invoked when all the opcodes have been iterated over and returns + // the final result of the tracing. + result: function(ctx) { + return this.hist; + }, +} diff --git a/coreth/eth/tracers/js/tracer_test.go b/coreth/eth/tracers/js/tracer_test.go new file mode 100644 index 00000000..435fcf07 --- /dev/null +++ b/coreth/eth/tracers/js/tracer_test.go @@ -0,0 +1,315 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package js + +import ( + "encoding/json" + "errors" + "math/big" + "strings" + "testing" + "time" + + "github.com/ava-labs/coreth/core/state" + "github.com/ava-labs/coreth/core/vm" + "github.com/ava-labs/coreth/eth/tracers" + "github.com/ava-labs/coreth/params" + "github.com/ethereum/go-ethereum/common" +) + +type account struct{} + +func (account) SubBalance(amount *big.Int) {} +func (account) AddBalance(amount *big.Int) {} +func (account) SetAddress(common.Address) {} +func (account) Value() *big.Int { return nil } +func (account) SetBalance(*big.Int) {} +func (account) SetNonce(uint64) {} +func (account) Balance() *big.Int { return nil } +func (account) Address() common.Address { return common.Address{} } +func (account) SetCode(common.Hash, []byte) {} +func (account) ForEachStorage(cb func(key, value common.Hash) bool) {} + +type dummyStatedb struct { + state.StateDB +} + +func (*dummyStatedb) GetRefund() uint64 { return 1337 } +func (*dummyStatedb) GetBalance(addr common.Address) *big.Int { return new(big.Int) } + +type vmContext struct { + blockCtx vm.BlockContext + txCtx vm.TxContext +} + +func testCtx() *vmContext { + return &vmContext{blockCtx: vm.BlockContext{BlockNumber: big.NewInt(1)}, txCtx: vm.TxContext{GasPrice: big.NewInt(100000)}} +} + +func runTrace(tracer tracers.Tracer, vmctx *vmContext, chaincfg *params.ChainConfig) (json.RawMessage, error) { + var ( + env = vm.NewEVM(vmctx.blockCtx, vmctx.txCtx, &dummyStatedb{}, chaincfg, vm.Config{Debug: true, Tracer: tracer}) + gasLimit uint64 = 31000 + startGas uint64 = 10000 + value = big.NewInt(0) + contract = vm.NewContract(account{}, account{}, value, startGas) + ) + contract.Code = []byte{byte(vm.PUSH1), 0x1, byte(vm.PUSH1), 0x1, 0x0} + + tracer.CaptureTxStart(gasLimit) + tracer.CaptureStart(env, contract.Caller(), contract.Address(), false, []byte{}, startGas, value) + ret, err := env.Interpreter().Run(contract, []byte{}, false) + tracer.CaptureEnd(ret, startGas-contract.Gas, 1, err) + // Rest gas assumes no refund + tracer.CaptureTxEnd(startGas - contract.Gas) + if err != nil { + return nil, err + } + return tracer.GetResult() +} + +func TestTracer(t *testing.T) { + execTracer := func(code string) ([]byte, string) { + t.Helper() + tracer, err := newJsTracer(code, nil, nil) + if err != nil { + t.Fatal(err) + } + ret, err := runTrace(tracer, testCtx(), params.TestChainConfig) + if err != nil { + return nil, err.Error() // Stringify to allow comparison without nil checks + } + return ret, "" + } + for i, tt := range []struct { + code string + want string + fail string + }{ + { // tests that we don't panic on bad arguments to memory access + code: "{depths: [], step: function(log) { this.depths.push(log.memory.slice(-1,-2)); }, fault: function() {}, result: function() { return this.depths; }}", + want: ``, + fail: "tracer accessed out of bound memory: offset -1, end -2 at step (:1:53(15)) in server-side tracer function 'step'", + }, { // tests that we don't panic on bad arguments to stack peeks + code: "{depths: [], step: function(log) { this.depths.push(log.stack.peek(-1)); }, fault: function() {}, result: function() { return this.depths; }}", + want: ``, + fail: "tracer accessed out of bound stack: size 0, index -1 at step (:1:53(13)) in server-side tracer function 'step'", + }, { // tests that we don't panic on bad arguments to memory getUint + code: "{ depths: [], step: function(log, db) { this.depths.push(log.memory.getUint(-64));}, fault: function() {}, result: function() { return this.depths; }}", + want: ``, + fail: "tracer accessed out of bound memory: available 0, offset -64, size 32 at step (:1:58(13)) in server-side tracer function 'step'", + }, { // tests some general counting + code: "{count: 0, step: function() { this.count += 1; }, fault: function() {}, result: function() { return this.count; }}", + want: `3`, + }, { // tests that depth is reported correctly + code: "{depths: [], step: function(log) { this.depths.push(log.stack.length()); }, fault: function() {}, result: function() { return this.depths; }}", + want: `[0,1,2]`, + }, { // tests memory length + code: "{lengths: [], step: function(log) { this.lengths.push(log.memory.length()); }, fault: function() {}, result: function() { return this.lengths; }}", + want: `[0,0,0]`, + }, { // tests to-string of opcodes + code: "{opcodes: [], step: function(log) { this.opcodes.push(log.op.toString()); }, fault: function() {}, result: function() { return this.opcodes; }}", + want: `["PUSH1","PUSH1","STOP"]`, + }, { // tests intrinsic gas + code: "{depths: [], step: function() {}, fault: function() {}, result: function(ctx) { return ctx.gasPrice+'.'+ctx.gasUsed+'.'+ctx.intrinsicGas; }}", + want: `"100000.6.21000"`, + }, { + code: "{res: null, step: function(log) {}, fault: function() {}, result: function() { return toWord('0xffaa') }}", + want: `{"0":0,"1":0,"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,"10":0,"11":0,"12":0,"13":0,"14":0,"15":0,"16":0,"17":0,"18":0,"19":0,"20":0,"21":0,"22":0,"23":0,"24":0,"25":0,"26":0,"27":0,"28":0,"29":0,"30":255,"31":170}`, + }, { // test feeding a buffer back into go + code: "{res: null, step: function(log) { var address = log.contract.getAddress(); this.res = toAddress(address); }, fault: function() {}, result: function() { return this.res }}", + want: `{"0":0,"1":0,"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,"10":0,"11":0,"12":0,"13":0,"14":0,"15":0,"16":0,"17":0,"18":0,"19":0}`, + }, { + code: "{res: null, step: function(log) { var address = '0x0000000000000000000000000000000000000000'; this.res = toAddress(address); }, fault: function() {}, result: function() { return this.res }}", + want: `{"0":0,"1":0,"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,"10":0,"11":0,"12":0,"13":0,"14":0,"15":0,"16":0,"17":0,"18":0,"19":0}`, + }, { + code: "{res: null, step: function(log) { var address = Array.prototype.slice.call(log.contract.getAddress()); this.res = toAddress(address); }, fault: function() {}, result: function() { return this.res }}", + want: `{"0":0,"1":0,"2":0,"3":0,"4":0,"5":0,"6":0,"7":0,"8":0,"9":0,"10":0,"11":0,"12":0,"13":0,"14":0,"15":0,"16":0,"17":0,"18":0,"19":0}`, + }, + } { + if have, err := execTracer(tt.code); tt.want != string(have) || tt.fail != err { + t.Errorf("testcase %d: expected return value to be '%s' got '%s', error to be '%s' got '%s'\n\tcode: %v", i, tt.want, string(have), tt.fail, err, tt.code) + } + } +} + +func TestHalt(t *testing.T) { + timeout := errors.New("stahp") + tracer, err := newJsTracer("{step: function() { while(1); }, result: function() { return null; }, fault: function(){}}", nil, nil) + if err != nil { + t.Fatal(err) + } + go func() { + time.Sleep(1 * time.Second) + tracer.Stop(timeout) + }() + if _, err = runTrace(tracer, testCtx(), params.TestChainConfig); !strings.Contains(err.Error(), "stahp") { + t.Errorf("Expected timeout error, got %v", err) + } +} + +func TestHaltBetweenSteps(t *testing.T) { + tracer, err := newJsTracer("{step: function() {}, fault: function() {}, result: function() { return null; }}", nil, nil) + if err != nil { + t.Fatal(err) + } + env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{GasPrice: big.NewInt(1)}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer}) + scope := &vm.ScopeContext{ + Contract: vm.NewContract(&account{}, &account{}, big.NewInt(0), 0), + } + tracer.CaptureStart(env, common.Address{}, common.Address{}, false, []byte{}, 0, big.NewInt(0)) + tracer.CaptureState(0, 0, 0, 0, scope, nil, 0, nil) + timeout := errors.New("stahp") + tracer.Stop(timeout) + tracer.CaptureState(0, 0, 0, 0, scope, nil, 0, nil) + + if _, err := tracer.GetResult(); !strings.Contains(err.Error(), timeout.Error()) { + t.Errorf("Expected timeout error, got %v", err) + } +} + +// testNoStepExec tests a regular value transfer (no exec), and accessing the statedb +// in 'result' +func TestNoStepExec(t *testing.T) { + execTracer := func(code string) []byte { + t.Helper() + tracer, err := newJsTracer(code, nil, nil) + if err != nil { + t.Fatal(err) + } + env := vm.NewEVM(vm.BlockContext{BlockNumber: big.NewInt(1)}, vm.TxContext{GasPrice: big.NewInt(100)}, &dummyStatedb{}, params.TestChainConfig, vm.Config{Debug: true, Tracer: tracer}) + tracer.CaptureStart(env, common.Address{}, common.Address{}, false, []byte{}, 1000, big.NewInt(0)) + tracer.CaptureEnd(nil, 0, 1, nil) + ret, err := tracer.GetResult() + if err != nil { + t.Fatal(err) + } + return ret + } + for i, tt := range []struct { + code string + want string + }{ + { // tests that we don't panic on accessing the db methods + code: "{depths: [], step: function() {}, fault: function() {}, result: function(ctx, db){ return db.getBalance(ctx.to)} }", + want: `"0"`, + }, + } { + if have := execTracer(tt.code); tt.want != string(have) { + t.Errorf("testcase %d: expected return value to be %s got %s\n\tcode: %v", i, tt.want, string(have), tt.code) + } + } +} + +func TestIsPrecompile(t *testing.T) { + chaincfg := ¶ms.ChainConfig{ChainID: big.NewInt(1), HomesteadBlock: big.NewInt(0), DAOForkBlock: nil, DAOForkSupport: false, EIP150Block: big.NewInt(0), EIP150Hash: common.Hash{}, EIP155Block: big.NewInt(0), EIP158Block: big.NewInt(0), ByzantiumBlock: big.NewInt(100), ConstantinopleBlock: big.NewInt(0), PetersburgBlock: big.NewInt(0), IstanbulBlock: big.NewInt(200), MuirGlacierBlock: big.NewInt(0)} + chaincfg.ByzantiumBlock = big.NewInt(100) + chaincfg.IstanbulBlock = big.NewInt(200) + txCtx := vm.TxContext{GasPrice: big.NewInt(100000)} + tracer, err := newJsTracer("{addr: toAddress('0000000000000000000000000000000000000009'), res: null, step: function() { this.res = isPrecompiled(this.addr); }, fault: function() {}, result: function() { return this.res; }}", nil, nil) + if err != nil { + t.Fatal(err) + } + + blockCtx := vm.BlockContext{BlockNumber: big.NewInt(150)} + res, err := runTrace(tracer, &vmContext{blockCtx, txCtx}, chaincfg) + if err != nil { + t.Error(err) + } + if string(res) != "false" { + t.Errorf("tracer should not consider blake2f as precompile in byzantium") + } + + tracer, _ = newJsTracer("{addr: toAddress('0000000000000000000000000000000000000009'), res: null, step: function() { this.res = isPrecompiled(this.addr); }, fault: function() {}, result: function() { return this.res; }}", nil, nil) + blockCtx = vm.BlockContext{BlockNumber: big.NewInt(250)} + res, err = runTrace(tracer, &vmContext{blockCtx, txCtx}, chaincfg) + if err != nil { + t.Error(err) + } + if string(res) != "true" { + t.Errorf("tracer should consider blake2f as precompile in istanbul") + } +} + +func TestEnterExit(t *testing.T) { + // test that either both or none of enter() and exit() are defined + if _, err := newJsTracer("{step: function() {}, fault: function() {}, result: function() { return null; }, enter: function() {}}", new(tracers.Context), nil); err == nil { + t.Fatal("tracer creation should've failed without exit() definition") + } + if _, err := newJsTracer("{step: function() {}, fault: function() {}, result: function() { return null; }, enter: function() {}, exit: function() {}}", new(tracers.Context), nil); err != nil { + t.Fatal(err) + } + // test that the enter and exit method are correctly invoked and the values passed + tracer, err := newJsTracer("{enters: 0, exits: 0, enterGas: 0, gasUsed: 0, step: function() {}, fault: function() {}, result: function() { return {enters: this.enters, exits: this.exits, enterGas: this.enterGas, gasUsed: this.gasUsed} }, enter: function(frame) { this.enters++; this.enterGas = frame.getGas(); }, exit: function(res) { this.exits++; this.gasUsed = res.getGasUsed(); }}", new(tracers.Context), nil) + if err != nil { + t.Fatal(err) + } + scope := &vm.ScopeContext{ + Contract: vm.NewContract(&account{}, &account{}, big.NewInt(0), 0), + } + tracer.CaptureEnter(vm.CALL, scope.Contract.Caller(), scope.Contract.Address(), []byte{}, 1000, new(big.Int)) + tracer.CaptureExit([]byte{}, 400, nil) + + have, err := tracer.GetResult() + if err != nil { + t.Fatal(err) + } + want := `{"enters":1,"exits":1,"enterGas":1000,"gasUsed":400}` + if string(have) != want { + t.Errorf("Number of invocations of enter() and exit() is wrong. Have %s, want %s\n", have, want) + } +} + +func TestSetup(t *testing.T) { + // Test empty config + _, err := newJsTracer(`{setup: function(cfg) { if (cfg !== "{}") { throw("invalid empty config") } }, fault: function() {}, result: function() {}}`, new(tracers.Context), nil) + if err != nil { + t.Error(err) + } + + cfg, err := json.Marshal(map[string]string{"foo": "bar"}) + if err != nil { + t.Fatal(err) + } + // Test no setup func + _, err = newJsTracer(`{fault: function() {}, result: function() {}}`, new(tracers.Context), cfg) + if err != nil { + t.Fatal(err) + } + // Test config value + tracer, err := newJsTracer("{config: null, setup: function(cfg) { this.config = JSON.parse(cfg) }, step: function() {}, fault: function() {}, result: function() { return this.config.foo }}", new(tracers.Context), cfg) + if err != nil { + t.Fatal(err) + } + have, err := tracer.GetResult() + if err != nil { + t.Fatal(err) + } + if string(have) != `"bar"` { + t.Errorf("tracer returned wrong result. have: %s, want: \"bar\"\n", string(have)) + } +} diff --git a/coreth/eth/tracers/native/4byte.go b/coreth/eth/tracers/native/4byte.go index 1b7eced7..5ea51f2d 100644 --- a/coreth/eth/tracers/native/4byte.go +++ b/coreth/eth/tracers/native/4byte.go @@ -47,14 +47,15 @@ func init() { // a reversed signature can be matched against the size of the data. // // Example: -// > debug.traceTransaction( "0x214e597e35da083692f5386141e69f47e973b2c56e7a8073b1ea08fd7571e9de", {tracer: "4byteTracer"}) -// { -// 0x27dc297e-128: 1, -// 0x38cc4831-0: 2, -// 0x524f3889-96: 1, -// 0xadf59f99-288: 1, -// 0xc281d19e-0: 1 -// } +// +// > debug.traceTransaction( "0x214e597e35da083692f5386141e69f47e973b2c56e7a8073b1ea08fd7571e9de", {tracer: "4byteTracer"}) +// { +// 0x27dc297e-128: 1, +// 0x38cc4831-0: 2, +// 0x524f3889-96: 1, +// 0xadf59f99-288: 1, +// 0xc281d19e-0: 1 +// } type fourByteTracer struct { env *vm.EVM ids map[string]int // ids aggregates the 4byte ids found diff --git a/coreth/eth/tracers/native/tracer.go b/coreth/eth/tracers/native/tracer.go index 102e6885..ed6f62df 100644 --- a/coreth/eth/tracers/native/tracer.go +++ b/coreth/eth/tracers/native/tracer.go @@ -24,24 +24,20 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -/* -Package native is a collection of tracers written in go. - -In order to add a native tracer and have it compiled into the binary, a new -file needs to be added to this folder, containing an implementation of the -`eth.tracers.Tracer` interface. - -Aside from implementing the tracer, it also needs to register itself, using the -`register` method -- and this needs to be done in the package initialization. - -Example: - -```golang -func init() { - register("noopTracerNative", newNoopTracer) -} -``` -*/ +// Package native is a collection of tracers written in go. +// +// In order to add a native tracer and have it compiled into the binary, a new +// file needs to be added to this folder, containing an implementation of the +// `eth.tracers.Tracer` interface. +// +// Aside from implementing the tracer, it also needs to register itself, using the +// `register` method -- and this needs to be done in the package initialization. +// +// Example: +// +// func init() { +// register("noopTracerNative", newNoopTracer) +// } package native import ( diff --git a/coreth/ethclient/ethclient.go b/coreth/ethclient/ethclient.go index f74948ba..ee7a7b58 100644 --- a/coreth/ethclient/ethclient.go +++ b/coreth/ethclient/ethclient.go @@ -632,12 +632,12 @@ func ToBlockNumArg(number *big.Int) string { // negative numbers to special strings (latest, pending) then is // used on its server side. See rpc/types.go for the comparison. // In Coreth, latest, pending, and accepted are all treated the same - // therefore, if [number] is nil or a negative number in [-3, -1] + // therefore, if [number] is nil or a negative number in [-4, -1] // we want the latest accepted block if number == nil { return "latest" } - low := big.NewInt(-3) + low := big.NewInt(-4) high := big.NewInt(-1) if number.Cmp(low) >= 0 && number.Cmp(high) <= 0 { return "latest" diff --git a/coreth/ethdb/leveldb/leveldb.go b/coreth/ethdb/leveldb/leveldb.go index 0cbf407a..7876ae60 100644 --- a/coreth/ethdb/leveldb/leveldb.go +++ b/coreth/ethdb/leveldb/leveldb.go @@ -263,13 +263,14 @@ func (db *Database) Path() string { // the metrics subsystem. // // This is how a LevelDB stats table looks like (currently): -// Compactions -// Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB) -// -------+------------+---------------+---------------+---------------+--------------- -// 0 | 0 | 0.00000 | 1.27969 | 0.00000 | 12.31098 -// 1 | 85 | 109.27913 | 28.09293 | 213.92493 | 214.26294 -// 2 | 523 | 1000.37159 | 7.26059 | 66.86342 | 66.77884 -// 3 | 570 | 1113.18458 | 0.00000 | 0.00000 | 0.00000 +// +// Compactions +// Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB) +// -------+------------+---------------+---------------+---------------+--------------- +// 0 | 0 | 0.00000 | 1.27969 | 0.00000 | 12.31098 +// 1 | 85 | 109.27913 | 28.09293 | 213.92493 | 214.26294 +// 2 | 523 | 1000.37159 | 7.26059 | 66.86342 | 66.77884 +// 3 | 570 | 1113.18458 | 0.00000 | 0.00000 | 0.00000 // // This is how the write delay look like (currently): // DelayN:5 Delay:406.604657ms Paused: false diff --git a/coreth/go.mod b/coreth/go.mod index 6eb397ea..5285877b 100644 --- a/coreth/go.mod +++ b/coreth/go.mod @@ -4,65 +4,61 @@ go 1.21 require ( github.com/VictoriaMetrics/fastcache v1.10.0 - github.com/ava-labs/avalanchego v1.8.5 + github.com/ava-labs/avalanchego v1.9.16 github.com/cespare/cp v0.1.0 github.com/davecgh/go-spew v1.1.1 github.com/deckarep/golang-set v1.8.0 - github.com/ethereum/go-ethereum v1.10.25 + github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf + github.com/ethereum/go-ethereum v1.10.26 github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 + github.com/fsnotify/fsnotify v1.6.0 github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 github.com/google/uuid v1.2.0 github.com/gorilla/rpc v1.2.0 github.com/gorilla/websocket v1.4.2 github.com/hashicorp/go-bexpr v0.1.10 github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d + github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e github.com/holiman/bloomfilter/v2 v2.0.3 github.com/holiman/uint256 v1.2.0 github.com/mattn/go-colorable v0.1.12 github.com/mattn/go-isatty v0.0.14 github.com/olekukonko/tablewriter v0.0.5 - github.com/prometheus/client_golang v1.12.2 + github.com/prometheus/client_golang v1.13.0 github.com/prometheus/client_model v0.2.0 - github.com/rjeczalik/notify v0.9.2 + github.com/rjeczalik/notify v0.9.3 github.com/shirou/gopsutil v3.21.11+incompatible github.com/spf13/cast v1.5.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.12.0 github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969 - github.com/stretchr/testify v1.7.2 + github.com/stretchr/testify v1.8.1 github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a github.com/tyler-smith/go-bip39 v1.0.2 github.com/urfave/cli/v2 v2.10.2 golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f - golang.org/x/text v0.3.7 + golang.org/x/sync v0.1.0 + golang.org/x/sys v0.5.0 + golang.org/x/text v0.7.0 golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac gopkg.in/urfave/cli.v1 v1.20.0 ) require ( - github.com/aead/siphash v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/btcsuite/btcd v0.23.1 // indirect - github.com/btcsuite/btcd/btcec/v2 v2.2.0 // indirect - github.com/btcsuite/btcd/btcutil v1.1.1 // indirect - github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 // indirect - github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect - github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect - github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect - github.com/btcsuite/winsvc v1.0.0 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect + github.com/btcsuite/btcd/btcutil v1.1.3 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/decred/dcrd/crypto/blake256 v1.0.0 // indirect - github.com/decred/dcrd/dcrec/secp256k1/v3 v3.0.0-20200627015759-01fd2de07837 // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect - github.com/decred/dcrd/lru v1.1.1 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect github.com/deepmap/oapi-codegen v1.8.2 // indirect + github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 // indirect github.com/edsrzf/mmap-go v1.0.0 // indirect - github.com/fatih/color v1.13.0 // indirect - github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/go-stack/stack v1.8.0 // indirect github.com/golang-jwt/jwt/v4 v4.3.0 // indirect github.com/golang/mock v1.6.0 // indirect @@ -70,54 +66,57 @@ require ( github.com/golang/snappy v0.0.4 // indirect github.com/graph-gophers/graphql-go v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/hashicorp/go-hclog v1.2.2 // indirect - github.com/hashicorp/go-plugin v1.4.4 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/hashicorp/yamux v0.0.0-20200609203250-aecfd211c9ce // indirect github.com/huin/goupnp v1.0.3 // indirect github.com/influxdata/influxdb v1.8.3 // indirect github.com/influxdata/influxdb-client-go/v2 v2.4.0 // indirect github.com/influxdata/line-protocol v0.0.0-20210311194329-9aa0e372d097 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect - github.com/jessevdk/go-flags v1.5.0 // indirect - github.com/jrick/logrotate v1.0.0 // indirect - github.com/kkdai/bstream v1.0.0 // indirect github.com/magiconair/properties v1.8.6 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect - github.com/mitchellh/go-testing-interface v1.14.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/pointerstructure v1.2.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d // indirect - github.com/oklog/run v1.1.0 // indirect github.com/opentracing/opentracing-go v1.1.0 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.0.1 // indirect github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/common v0.32.1 // indirect - github.com/prometheus/procfs v0.7.3 // indirect + github.com/prometheus/common v0.37.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect github.com/prometheus/tsdb v0.10.0 // indirect github.com/rs/cors v1.7.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/spf13/afero v1.8.2 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/subosito/gotenv v1.3.0 // indirect + github.com/supranational/blst v0.3.11-0.20220920110316-f72618070295 // indirect github.com/tklauser/go-sysconf v0.3.5 // indirect github.com/tklauser/numcpus v0.2.2 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect - go.uber.org/atomic v1.9.0 // indirect + go.opentelemetry.io/otel v1.11.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.0 // indirect + go.opentelemetry.io/otel/sdk v1.11.0 // indirect + go.opentelemetry.io/otel/trace v1.11.0 // indirect + go.opentelemetry.io/proto/otlp v0.19.0 // indirect + go.uber.org/atomic v1.10.0 // indirect go.uber.org/multierr v1.8.0 // indirect - go.uber.org/zap v1.21.0 // indirect - golang.org/x/net v0.0.0-20220708220712-1185a9018129 // indirect - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect + go.uber.org/zap v1.24.0 // indirect + golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 // indirect + golang.org/x/net v0.7.0 // indirect + golang.org/x/term v0.5.0 // indirect gonum.org/v1/gonum v0.11.0 // indirect - google.golang.org/genproto v0.0.0-20220712132514-bdd2acd4974d // indirect - google.golang.org/grpc v1.49.0 // indirect - google.golang.org/protobuf v1.28.0 // indirect + google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c // indirect + google.golang.org/grpc v1.50.1 // indirect + google.golang.org/protobuf v1.28.1 // indirect gopkg.in/ini.v1 v1.66.4 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect diff --git a/coreth/go.sum b/coreth/go.sum index 25bb562a..9712f713 100644 --- a/coreth/go.sum +++ b/coreth/go.sum @@ -47,7 +47,6 @@ github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/VictoriaMetrics/fastcache v1.10.0 h1:5hDJnLsKLpnUEToub7ETuRu8RCkb40woBZAUiKonXzY= github.com/VictoriaMetrics/fastcache v1.10.0/go.mod h1:tjiYeEfYXCqacuvYw/7UoDIeJaNxq6132xHICNP77w8= -github.com/aead/siphash v1.0.1 h1:FwHfE/T45KPKYuuSAKyyvE+oPWcaQ+CUmFW0bPlM+kg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -60,10 +59,9 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0= -github.com/ava-labs/avalanchego v1.8.5 h1:HhNJUsvwk+DxxncutN/4rlnUmnQvtF/73qNbpW3BFJc= -github.com/ava-labs/avalanchego v1.8.5/go.mod h1:GAo3tDCj5Ulx9xIY78ad5SmGFqtdmFRjJGQ2gJk//Vk= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/ava-labs/avalanchego v1.9.16 h1:JarxIn7gy4V9f1dBgUxubRRO6CrqY2MprOLGqEmk+Vg= +github.com/ava-labs/avalanchego v1.9.16/go.mod h1:Unm7ruhAvLSRP+7gIfwyHNf+wEehWLsFhY9yp10nDbw= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -72,31 +70,31 @@ github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+Wji github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= -github.com/btcsuite/btcd v0.23.1 h1:IB8cVQcC2X5mHbnfirLG5IZnkWYNTPlLZVrxUYSotbE= -github.com/btcsuite/btcd v0.23.1/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY= -github.com/btcsuite/btcd/btcec/v2 v2.1.1/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= -github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k= -github.com/btcsuite/btcd/btcec/v2 v2.2.0/go.mod h1:U7MHm051Al6XmscBQ0BoNydpOTsFAn707034b5nY8zU= +github.com/btcsuite/btcd v0.23.0 h1:V2/ZgjfDFIygAX3ZapeigkVBoVUtOJKSwrhZdlpSvaA= +github.com/btcsuite/btcd v0.23.0/go.mod h1:0QJIIN1wwIXF/3G/m87gIwGniDMDQqjVn4SZgnFpsYY= +github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= +github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= +github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= +github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9UrLwaeswfjfdF0A= -github.com/btcsuite/btcd/btcutil v1.1.1 h1:hDcDaXiP0uEzR8Biqo2weECKqEw0uHDZ9ixIWevVQqY= -github.com/btcsuite/btcd/btcutil v1.1.1/go.mod h1:nbKlBMNm9FGsdvKvu0essceubPiAcI57pYBNnsLAa34= +github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE= +github.com/btcsuite/btcd/btcutil v1.1.3 h1:xfbtw8lwpp0G6NwSHb+UE67ryTFHJAiNuipusjXSohQ= +github.com/btcsuite/btcd/btcutil v1.1.3/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= -github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= -github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= -github.com/btcsuite/winsvc v1.0.0 h1:J9B4L7e3oqhXOcm+2IuNApwzQec85lE+QaikUcCs+dk= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= @@ -112,11 +110,13 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -125,24 +125,24 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4= github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo= -github.com/decred/dcrd/chaincfg/chainhash v1.0.2 h1:rt5Vlq/jM3ZawwiacWjPa+smINyLRN07EO0cNBV6DGU= -github.com/decred/dcrd/chaincfg/chainhash v1.0.2/go.mod h1:BpbrGgrPTr3YJYRN3Bm+D9NuaFd+zGyNeIKgrhCXK60= github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= -github.com/decred/dcrd/dcrec/secp256k1/v3 v3.0.0-20200627015759-01fd2de07837 h1:g2cyFTu5FKWhCo7L4hVJ797Q506B4EywA7L9I6OebgA= -github.com/decred/dcrd/dcrec/secp256k1/v3 v3.0.0-20200627015759-01fd2de07837/go.mod h1:J70FGZSbzsjecRTiTzER+3f1KZLNaXkuv+yeFTKoxM8= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= -github.com/decred/dcrd/lru v1.1.1 h1:kWFDaW0OWx6AD6Ki342c+JPmHbiVdE6rK81pT3fuo/Y= -github.com/decred/dcrd/lru v1.1.1/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE9iAUlWl9Al3M= github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU= github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91 h1:Izz0+t1Z5nI16/II7vuEo/nHjodOg0p7+OiDpjX5t1E= +github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/docker/docker v1.6.2 h1:HlFGsy+9/xrgMmhmN+NGhCc5SHGJ7I+kHosRR1xc/aI= +github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf h1:Yt+4K30SdjOkRoRRm3vYNQgR+/ZIy0RmeUDZo7Y8zeQ= +github.com/dop251/goja v0.0.0-20220405120441-9037c2b61cbf/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk= +github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= @@ -151,20 +151,20 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ethereum/go-ethereum v1.10.25 h1:5dFrKJDnYf8L6/5o42abCE6a9yJm9cs4EJVRyYMr55s= -github.com/ethereum/go-ethereum v1.10.25/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/ethereum/go-ethereum v1.10.26 h1:i/7d9RBBwiXCEuyduBQzJw/mKmnvzsN14jqBmytw72s= +github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= @@ -180,14 +180,23 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU= +github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -200,6 +209,8 @@ github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -248,7 +259,7 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -272,7 +283,6 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/rpc v1.2.0 h1:WvvdC2lNeT1SP32zrIce5l0ECBfbAlmrmSBsuc57wfk= github.com/gorilla/rpc v1.2.0/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36jkTQ= @@ -283,20 +293,19 @@ github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLt github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0 h1:kr3j8iIMR4ywO/O0rvksXaJvauGGCMg2zAZIiNZ9uIQ= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0/go.mod h1:ummNFgdgLhhX7aIiy35vVmQNS0rWXknfPE0qe6fmFXg= github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= -github.com/hashicorp/go-hclog v1.2.2 h1:ihRI7YFwcZdiSD7SIenIhHfQH3OuDvWerAUBZbeQS3M= -github.com/hashicorp/go-hclog v1.2.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-plugin v1.4.4 h1:NVdrSdFRt3SkZtNckJ6tog7gbpRrcbOjQi/rgF7JYWQ= -github.com/hashicorp/go-plugin v1.4.4/go.mod h1:viDMjcLJuDui6pXb8U4HVfb8AamCWhHGUjr2IrTF67s= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/yamux v0.0.0-20200609203250-aecfd211c9ce h1:7UnVY3T/ZnHUrfviiAgIUjg2PXxsQfs5bphsG8F7Keo= -github.com/hashicorp/yamux v0.0.0-20200609203250-aecfd211c9ce/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e h1:pIYdhNkDh+YENVNi3gto8n9hAmRxKxoar0iE6BLucjw= +github.com/holiman/big v0.0.0-20221017200358-a027dc42d04e/go.mod h1:j9cQbcqHQujT0oKJ38PylVfqohClLr3CvDC+Qcg+lhU= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM= @@ -326,11 +335,7 @@ github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7Bd github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/jrick/logrotate v1.0.0 h1:lQ1bL/n9mBNeIXoTUoYRlK4dHuNJVofX9oWqBtPnSzI= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -348,8 +353,6 @@ github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/kkdai/bstream v1.0.0 h1:Se5gHwgp2VT2uHfDrkbbgbgEvV9cimLELwrPJctSjg8= -github.com/kkdai/bstream v1.0.0/go.mod h1:FDnDOHt5Yx4p3FaHcioFT0QjDOtgUpvjeZqAs+NVZZA= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= @@ -359,10 +362,12 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -375,7 +380,6 @@ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaO github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -392,8 +396,6 @@ github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4f github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= -github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -414,8 +416,6 @@ github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96d github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= -github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= @@ -432,8 +432,8 @@ github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= @@ -460,8 +460,9 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= -github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= +github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -471,19 +472,21 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/prometheus/tsdb v0.10.0 h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38ic= github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4= github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc= -github.com/rjeczalik/notify v0.9.2 h1:MiTWrPj55mNDHEiIX5YUSKefw/+lCQVoAFmD6oQm5w8= -github.com/rjeczalik/notify v0.9.2/go.mod h1:aErll2f0sUX9PXZnVNyeiObbmTlk5jnMoCa4QEjJeqM= +github.com/rjeczalik/notify v0.9.3 h1:6rJAzHTGKXGj76sbRgDiDcYj/HniypXmSJo1SWakZeY= +github.com/rjeczalik/notify v0.9.3/go.mod h1:gF3zSOrafR9DQEWSE8TjfI9NkooDxbyT4UgRGKZA0lc= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= @@ -519,6 +522,8 @@ github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969 h1:Oo2KZNP70K github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -526,10 +531,15 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= +github.com/supranational/blst v0.3.11-0.20220920110316-f72618070295 h1:rVKS9JjtqE4/PscoIsP46sRnJhfq8YFbjlk0fUJTRnY= +github.com/supranational/blst v0.3.11-0.20220920110316-f72618070295/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a h1:1ur3QoCqvE5fl+nylMaIr9PVV1w343YRDtsy+Rwu7XI= github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= @@ -561,20 +571,34 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opentelemetry.io/otel v1.11.0 h1:kfToEGMDq6TrVrJ9Vht84Y8y9enykSZzDDZglV0kIEk= +go.opentelemetry.io/otel v1.11.0/go.mod h1:H2KtuEphyMvlhZ+F7tg9GRhAOe60moNx61Ex+WmiKkk= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.0 h1:0dly5et1i/6Th3WHn0M6kYiJfFNzhhxanrJ0bOfnjEo= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.0/go.mod h1:+Lq4/WkdCkjbGcBMVHHg2apTbv8oMBf29QCnyCCJjNQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.0 h1:eyJ6njZmH16h9dOKCi7lMswAnGsSOwgTqWzfxqcuNr8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.0/go.mod h1:FnDp7XemjN3oZ3xGunnfOUTVwd2XcvLbtRAuOSU3oc8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.0 h1:j2RFV0Qdt38XQ2Jvi4WIsQ56w8T7eSirYbMw19VXRDg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.0/go.mod h1:pILgiTEtrqvZpoiuGdblDgS5dbIaTgDrkIuKfEFkt+A= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.0 h1:v29I/NbVp7LXQYMFZhU6q17D0jSEbYOAVONlrO1oH5s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.0/go.mod h1:/RpLsmbQLDO1XCbWAM4S6TSwj8FKwwgyKKyqtvVfAnw= +go.opentelemetry.io/otel/sdk v1.11.0 h1:ZnKIL9V9Ztaq+ME43IUi/eo22mNsb6a7tGfzaOWB5fo= +go.opentelemetry.io/otel/sdk v1.11.0/go.mod h1:REusa8RsyKaq0OlyangWXaw97t2VogoO4SSEeKkSTAk= +go.opentelemetry.io/otel/trace v1.11.0 h1:20U/Vj42SX+mASlXLmSGBg6jpI1jQtv682lZtTAOVFI= +go.opentelemetry.io/otel/trace v1.11.0/go.mod h1:nyYjis9jy0gytE9LXGU+/m1sHTKbRY0fX0hulNNDP1U= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= -go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -602,6 +626,7 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5 h1:rxKZ2gOnYxjfmakvUUqh9Gyb6KXfrj7JWTxORTYqb0E= +golang.org/x/exp v0.0.0-20220426173459-3bcf042a4bf5/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -668,10 +693,11 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220708220712-1185a9018129 h1:vucSRfWwTsoXro7P+3Cjlr6flUMtzCwzlvkxEQtHHB0= -golang.org/x/net v0.0.0-20220708220712-1185a9018129/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -682,6 +708,8 @@ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -692,8 +720,9 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -749,7 +778,6 @@ golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210316164454-77fc1eacc6aa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -762,14 +790,15 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -778,8 +807,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -841,7 +871,6 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -920,8 +949,9 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20220712132514-bdd2acd4974d h1:YbuF5+kdiC516xIP60RvlHeFbY9sRDR73QsAGHpkeVw= -google.golang.org/genproto v0.0.0-20220712132514-bdd2acd4974d/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c h1:QgY/XxIAIeccR+Ca/rDdKubLIU9rcJ3xfy1DC/Wd2Oo= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -940,9 +970,10 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= -google.golang.org/grpc v1.49.0 h1:WTLtQzmQori5FUH25Pq4WT22oCsv8USpQ+F6rqtsmxw= -google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.50.1 h1:DS/BukOZWp8s6p4Dt/tOaJaTQyPyOoCcrjroHuCeLzY= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -956,13 +987,14 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= @@ -980,7 +1012,6 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/coreth/internal/ethapi/api.go b/coreth/internal/ethapi/api.go index 2be321b6..79d8ddf5 100644 --- a/coreth/internal/ethapi/api.go +++ b/coreth/internal/ethapi/api.go @@ -138,11 +138,6 @@ func (s *EthereumAPI) Syncing() (interface{}, error) { return false, nil } -// GetChainConfig returns the chain config. -func (s *EthereumAPI) GetChainConfig(ctx context.Context) *params.ChainConfig { - return s.b.ChainConfig() -} - // TxPoolAPI offers and API for the transaction pool. It only operates on data that is non confidential. type TxPoolAPI struct { b Backend @@ -616,6 +611,11 @@ func (api *BlockChainAPI) ChainId() *hexutil.Big { return (*hexutil.Big)(api.b.ChainConfig().ChainID) } +// GetChainConfig returns the chain config. +func (api *BlockChainAPI) GetChainConfig(ctx context.Context) *params.ChainConfig { + return api.b.ChainConfig() +} + // BlockNumber returns the block number of the chain head. func (s *BlockChainAPI) BlockNumber() hexutil.Uint64 { header, _ := s.b.HeaderByNumber(context.Background(), rpc.LatestBlockNumber) // latest header should always be available @@ -1243,9 +1243,6 @@ func RPCMarshalHeader(head *types.Header) map[string]interface{} { if head.BlockGasCost != nil { result["blockGasCost"] = (*hexutil.Big)(head.BlockGasCost) } - if head.ExtraStateRoot != (common.Hash{}) { - result["extraStateRoot"] = head.ExtraStateRoot - } return result } @@ -1773,7 +1770,7 @@ func SubmitTransaction(ctx context.Context, b Backend, tx *types.Transaction) (c if err := checkTxFee(tx.GasPrice(), tx.Gas(), b.RPCTxFeeCap()); err != nil { return common.Hash{}, err } - if !b.UnprotectedAllowed() && !tx.Protected() { + if !b.UnprotectedAllowed(tx) && !tx.Protected() { // Ensure only eip155 signed transactions are submitted if EIP155Required is set. return common.Hash{}, errors.New("only replay-protected (EIP-155) transactions allowed over RPC") } @@ -2007,25 +2004,45 @@ func NewDebugAPI(b Backend) *DebugAPI { return &DebugAPI{b: b} } -// GetHeaderRlp retrieves the RLP encoded for of a single header. -func (api *DebugAPI) GetHeaderRlp(ctx context.Context, number uint64) (hexutil.Bytes, error) { - header, _ := api.b.HeaderByNumber(ctx, rpc.BlockNumber(number)) +// GetRawHeader retrieves the RLP encoding for a single header. +func (api *DebugAPI) GetRawHeader(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) { + var hash common.Hash + if h, ok := blockNrOrHash.Hash(); ok { + hash = h + } else { + block, err := api.b.BlockByNumberOrHash(ctx, blockNrOrHash) + if err != nil { + return nil, err + } + hash = block.Hash() + } + header, _ := api.b.HeaderByHash(ctx, hash) if header == nil { - return nil, fmt.Errorf("header #%d not found", number) + return nil, fmt.Errorf("header #%d not found", hash) } return rlp.EncodeToBytes(header) } -// GetBlockRlp retrieves the RLP encoded for of a single block. -func (api *DebugAPI) GetBlockRlp(ctx context.Context, number uint64) (hexutil.Bytes, error) { - block, _ := api.b.BlockByNumber(ctx, rpc.BlockNumber(number)) +// GetRawBlock retrieves the RLP encoded for a single block. +func (api *DebugAPI) GetRawBlock(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) (hexutil.Bytes, error) { + var hash common.Hash + if h, ok := blockNrOrHash.Hash(); ok { + hash = h + } else { + block, err := api.b.BlockByNumberOrHash(ctx, blockNrOrHash) + if err != nil { + return nil, err + } + hash = block.Hash() + } + block, _ := api.b.BlockByHash(ctx, hash) if block == nil { - return nil, fmt.Errorf("block #%d not found", number) + return nil, fmt.Errorf("block #%d not found", hash) } return rlp.EncodeToBytes(block) } -// GetRawReceipts retrieves the binary-encoded raw receipts of a single block. +// GetRawReceipts retrieves the binary-encoded receipts of a single block. func (api *DebugAPI) GetRawReceipts(ctx context.Context, blockNrOrHash rpc.BlockNumberOrHash) ([]hexutil.Bytes, error) { var hash common.Hash if h, ok := blockNrOrHash.Hash(); ok { @@ -2052,6 +2069,22 @@ func (api *DebugAPI) GetRawReceipts(ctx context.Context, blockNrOrHash rpc.Block return result, nil } +// GetRawTransaction returns the bytes of the transaction for the given hash. +func (s *DebugAPI) GetRawTransaction(ctx context.Context, hash common.Hash) (hexutil.Bytes, error) { + // Retrieve a finalized transaction, or a pooled otherwise + tx, _, _, _, err := s.b.GetTransaction(ctx, hash) + if err != nil { + return nil, err + } + if tx == nil { + if tx = s.b.GetPoolTransaction(hash); tx == nil { + // Transaction not found anywhere, abort + return nil, nil + } + } + return tx.MarshalBinary() +} + // PrintBlock retrieves a block and returns its pretty printed form. func (api *DebugAPI) PrintBlock(ctx context.Context, number uint64) (string, error) { block, _ := api.b.BlockByNumber(ctx, rpc.BlockNumber(number)) diff --git a/coreth/internal/ethapi/backend.go b/coreth/internal/ethapi/backend.go index 75b6af60..98a28def 100644 --- a/coreth/internal/ethapi/backend.go +++ b/coreth/internal/ethapi/backend.go @@ -57,10 +57,10 @@ type Backend interface { ChainDb() ethdb.Database AccountManager() *accounts.Manager ExtRPCEnabled() bool - RPCGasCap() uint64 // global gas cap for eth_call over rpc: DoS protection - RPCEVMTimeout() time.Duration // global timeout for eth_call over rpc: DoS protection - RPCTxFeeCap() float64 // global tx fee cap for all transaction related APIs - UnprotectedAllowed() bool // allows only for EIP155 transactions. + RPCGasCap() uint64 // global gas cap for eth_call over rpc: DoS protection + RPCEVMTimeout() time.Duration // global timeout for eth_call over rpc: DoS protection + RPCTxFeeCap() float64 // global tx fee cap for all transaction related APIs + UnprotectedAllowed(tx *types.Transaction) bool // allows only for EIP155 transactions. // Blockchain API HeaderByNumber(ctx context.Context, number rpc.BlockNumber) (*types.Header, error) diff --git a/coreth/internal/flags/helpers.go b/coreth/internal/flags/helpers.go index 8bbc5601..cc935c0f 100644 --- a/coreth/internal/flags/helpers.go +++ b/coreth/internal/flags/helpers.go @@ -64,11 +64,11 @@ var migrationApplied = map[*cli.Command]struct{}{} // // Example: // -// geth account new --keystore /tmp/mykeystore --lightkdf +// geth account new --keystore /tmp/mykeystore --lightkdf // // is equivalent after calling this method with: // -// geth --keystore /tmp/mykeystore --lightkdf account new +// geth --keystore /tmp/mykeystore --lightkdf account new // // i.e. in the subcommand Action function of 'account new', ctx.Bool("lightkdf) // will return true even if --lightkdf is set as a global option. diff --git a/coreth/metrics/runtime_test.go b/coreth/metrics/runtime_test.go index ddc49f03..e011bf59 100644 --- a/coreth/metrics/runtime_test.go +++ b/coreth/metrics/runtime_test.go @@ -1,6 +1,7 @@ package metrics import ( + "os" "runtime" "testing" "time" @@ -16,7 +17,9 @@ func BenchmarkRuntimeMemStats(b *testing.B) { } func TestRuntimeMemStats(t *testing.T) { - t.Skip("FLAKY") + if os.Getenv("RUN_FLAKY_TESTS") != "true" { + t.Skip("FLAKY") + } r := NewRegistry() RegisterRuntimeMemStats(r) CaptureRuntimeMemStatsOnce(r) @@ -49,7 +52,9 @@ func TestRuntimeMemStats(t *testing.T) { } func TestRuntimeMemStatsNumThread(t *testing.T) { - t.Skip("FLAKY") + if os.Getenv("RUN_FLAKY_TESTS") != "true" { + t.Skip("FLAKY") + } r := NewRegistry() RegisterRuntimeMemStats(r) CaptureRuntimeMemStatsOnce(r) @@ -60,7 +65,9 @@ func TestRuntimeMemStatsNumThread(t *testing.T) { } func TestRuntimeMemStatsBlocking(t *testing.T) { - t.Skip("FLAKY") + if os.Getenv("RUN_FLAKY_TESTS") != "true" { + t.Skip("FLAKY") + } if g := runtime.GOMAXPROCS(0); g < 2 { t.Skipf("skipping TestRuntimeMemStatsBlocking with GOMAXPROCS=%d\n", g) } diff --git a/coreth/miner/worker.go b/coreth/miner/worker.go index da3f85d8..5c7013bf 100644 --- a/coreth/miner/worker.go +++ b/coreth/miner/worker.go @@ -40,19 +40,19 @@ import ( "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/coreth/consensus" "github.com/ava-labs/coreth/consensus/dummy" - "github.com/ava-labs/coreth/consensus/misc" "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/params" - "github.com/ava-labs/coreth/vmerrs" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/log" ) const ( - targetTxsSize = 192 * units.KiB + // Leaves 256 KBs for other sections of the block (limit is 2MB). + // This should suffice for atomic txs, proposervm header, and serialization overhead. + targetTxsSize = 1792 * units.KiB ) // environment is the worker's current environment and holds all of the current state information. @@ -129,23 +129,27 @@ func (w *worker) commitNewWork() (*types.Block, error) { } var gasLimit uint64 - if w.chainConfig.IsSongbirdCode() { - if w.chainConfig.IsSongbirdTransition(big.NewInt(timestamp)) { - gasLimit = params.SgbTransitionGasLimit - } else if w.chainConfig.IsApricotPhase5(big.NewInt(timestamp)) { - gasLimit = params.SgbApricotPhase5GasLimit - } else if w.chainConfig.IsApricotPhase1(big.NewInt(timestamp)) { - gasLimit = params.ApricotPhase1GasLimit - } else { - gasLimit = core.CalcGasLimit(parent.GasUsed(), parent.GasLimit(), params.ApricotPhase1GasLimit, params.ApricotPhase1GasLimit) - } + if w.chainConfig.IsCortina(big.NewInt(timestamp)) { + gasLimit = params.CortinaGasLimit } else { - if w.chainConfig.IsApricotPhase1(big.NewInt(timestamp)) { - gasLimit = params.ApricotPhase1GasLimit + if w.chainConfig.IsSongbirdCode() { + if w.chainConfig.IsSongbirdTransition(big.NewInt(timestamp)) { + gasLimit = params.SgbTransitionGasLimit + } else if w.chainConfig.IsApricotPhase5(big.NewInt(timestamp)) { + gasLimit = params.SgbApricotPhase5GasLimit + } else if w.chainConfig.IsApricotPhase1(big.NewInt(timestamp)) { + gasLimit = params.ApricotPhase1GasLimit + } else { + gasLimit = core.CalcGasLimit(parent.GasUsed(), parent.GasLimit(), params.ApricotPhase1GasLimit, params.ApricotPhase1GasLimit) + } } else { - // The gas limit is set in phase1 to ApricotPhase1GasLimit because the ceiling and floor were set to the same value - // such that the gas limit converged to it. Since this is hardbaked now, we remove the ability to configure it. - gasLimit = core.CalcGasLimit(parent.GasUsed(), parent.GasLimit(), params.ApricotPhase1GasLimit, params.ApricotPhase1GasLimit) + if w.chainConfig.IsApricotPhase1(big.NewInt(timestamp)) { + gasLimit = params.ApricotPhase1GasLimit + } else { + // The gas limit is set in phase1 to ApricotPhase1GasLimit because the ceiling and floor were set to the same value + // such that the gas limit converged to it. Since this is hardbaked now, we remove the ability to configure it. + gasLimit = core.CalcGasLimit(parent.GasUsed(), parent.GasLimit(), params.ApricotPhase1GasLimit, params.ApricotPhase1GasLimit) + } } } num := parent.Number() @@ -177,9 +181,6 @@ func (w *worker) commitNewWork() (*types.Block, error) { if err != nil { return nil, fmt.Errorf("failed to create new current environment: %w", err) } - if w.chainConfig.DAOForkSupport && w.chainConfig.DAOForkBlock != nil && w.chainConfig.DAOForkBlock.Cmp(header.Number) == 0 { - misc.ApplyDAOHardFork(env.state) - } // Configure any stateful precompiles that should go into effect during this block. w.chainConfig.CheckConfigurePrecompiles(new(big.Int).SetUint64(parent.Time()), types.NewBlockWithHeader(header), env.state) @@ -300,10 +301,6 @@ func (w *worker) commitTransactions(env *environment, txs *types.TransactionsByP log.Trace("Skipping unsupported transaction type", "sender", from, "type", tx.Type()) txs.Pop() - case errors.Is(err, vmerrs.ErrToAddrProhibitedSoft): - log.Warn("Tx dropped: failed verification", "tx", tx.Hash(), "sender", from, "data", tx.Data(), "err", err) - w.eth.TxPool().RemoveTx(tx.Hash()) - txs.Pop() default: // Strange error, discard the transaction and get the next in line (note, the // nonce-too-high clause will prevent us from executing in vain). diff --git a/coreth/node/node.go b/coreth/node/node.go index da935bc9..2cc4551c 100644 --- a/coreth/node/node.go +++ b/coreth/node/node.go @@ -35,8 +35,6 @@ import ( type Node struct { config *Config accman *accounts.Manager - - corethVersion string } // New creates a new P2P node, ready for protocol registration. diff --git a/coreth/params/avalanche_params.go b/coreth/params/avalanche_params.go index e39f5bbe..61a71ff0 100644 --- a/coreth/params/avalanche_params.go +++ b/coreth/params/avalanche_params.go @@ -19,6 +19,7 @@ const ( AvalancheAtomicTxFee = units.MilliAvax ApricotPhase1GasLimit uint64 = 8_000_000 + CortinaGasLimit uint64 = 15_000_000 // Revert to the default gas limit after the Songbird transition due to re-enabled transfer between chains SgbTransitionGasLimit uint64 = 8_000_000 diff --git a/coreth/params/config.go b/coreth/params/config.go index 90312019..09b84cae 100644 --- a/coreth/params/config.go +++ b/coreth/params/config.go @@ -83,7 +83,8 @@ var ( ApricotPhase6BlockTimestamp: big.NewInt(time.Date(2022, time.September, 6, 20, 0, 0, 0, time.UTC).Unix()), ApricotPhasePost6BlockTimestamp: big.NewInt(time.Date(2022, time.September, 7, 3, 0, 0, 0, time.UTC).Unix()), BanffBlockTimestamp: big.NewInt(time.Date(2022, time.October, 18, 16, 0, 0, 0, time.UTC).Unix()), - // TODO Add Banff and Cortina timestamps + CortinaBlockTimestamp: big.NewInt(time.Date(2023, time.April, 25, 15, 0, 0, 0, time.UTC).Unix()), + // TODO Add DUpgrade timestamp } // AvalancheFujiChainConfig is the configuration for the Fuji Test Network @@ -110,7 +111,8 @@ var ( ApricotPhase6BlockTimestamp: big.NewInt(time.Date(2022, time.September, 6, 20, 0, 0, 0, time.UTC).Unix()), ApricotPhasePost6BlockTimestamp: big.NewInt(time.Date(2022, time.September, 7, 6, 0, 0, 0, time.UTC).Unix()), BanffBlockTimestamp: big.NewInt(time.Date(2022, time.October, 3, 14, 0, 0, 0, time.UTC).Unix()), - // TODO add Cortina timestamp + CortinaBlockTimestamp: big.NewInt(time.Date(2023, time.April, 6, 15, 0, 0, 0, time.UTC).Unix()), + // TODO Add DUpgrade timestamp } // AvalancheLocalChainConfig is the configuration for the Avalanche Local Network @@ -137,6 +139,7 @@ var ( ApricotPhase6BlockTimestamp: big.NewInt(0), ApricotPhasePost6BlockTimestamp: big.NewInt(0), BanffBlockTimestamp: big.NewInt(0), + CortinaBlockTimestamp: big.NewInt(0), } FlareChainConfig = &ChainConfig{ @@ -162,6 +165,7 @@ var ( ApricotPhase6BlockTimestamp: big.NewInt(time.Date(2024, time.December, 17, 13, 0, 0, 0, time.UTC).Unix()), ApricotPhasePost6BlockTimestamp: big.NewInt(time.Date(2024, time.December, 17, 14, 0, 0, 0, time.UTC).Unix()), BanffBlockTimestamp: big.NewInt(time.Date(2024, time.December, 17, 15, 0, 0, 0, time.UTC).Unix()), + CortinaBlockTimestamp: big.NewInt(time.Date(10000, time.January, 1, 0, 0, 0, 0, time.UTC).Unix()), } CostwoChainConfig = &ChainConfig{ @@ -187,6 +191,7 @@ var ( ApricotPhase6BlockTimestamp: big.NewInt(time.Date(2024, time.November, 26, 13, 0, 0, 0, time.UTC).Unix()), ApricotPhasePost6BlockTimestamp: big.NewInt(time.Date(2024, time.November, 26, 14, 0, 0, 0, time.UTC).Unix()), BanffBlockTimestamp: big.NewInt(time.Date(2024, time.November, 26, 15, 0, 0, 0, time.UTC).Unix()), + CortinaBlockTimestamp: big.NewInt(time.Date(10000, time.January, 1, 0, 0, 0, 0, time.UTC).Unix()), } StagingChainConfig = &ChainConfig{ @@ -212,6 +217,7 @@ var ( ApricotPhase6BlockTimestamp: big.NewInt(0), ApricotPhasePost6BlockTimestamp: big.NewInt(0), BanffBlockTimestamp: big.NewInt(0), + CortinaBlockTimestamp: big.NewInt(0), } LocalFlareChainConfig = &ChainConfig{ @@ -237,6 +243,7 @@ var ( ApricotPhase6BlockTimestamp: big.NewInt(0), ApricotPhasePost6BlockTimestamp: big.NewInt(0), BanffBlockTimestamp: big.NewInt(0), + CortinaBlockTimestamp: big.NewInt(0), } // CostonChainConfig is the configuration for the Coston test network. @@ -264,6 +271,7 @@ var ( ApricotPhase6BlockTimestamp: big.NewInt(time.Date(2025, time.January, 7, 13, 0, 0, 0, time.UTC).Unix()), ApricotPhasePost6BlockTimestamp: big.NewInt(time.Date(2025, time.January, 7, 14, 0, 0, 0, time.UTC).Unix()), BanffBlockTimestamp: big.NewInt(time.Date(2025, time.January, 7, 15, 0, 0, 0, time.UTC).Unix()), + CortinaBlockTimestamp: big.NewInt(time.Date(10000, time.January, 1, 0, 0, 0, 0, time.UTC).Unix()), } // LocalChainConfig is the configuration for the Songbird Local network. @@ -291,7 +299,7 @@ var ( ApricotPhase6BlockTimestamp: big.NewInt(0), ApricotPhasePost6BlockTimestamp: big.NewInt(0), BanffBlockTimestamp: big.NewInt(0), - // TODO add Cortina timestamp + CortinaBlockTimestamp: big.NewInt(0), } // SongbirdChainConfig is the configuration for the Songbird canary network. @@ -319,21 +327,23 @@ var ( ApricotPhase6BlockTimestamp: big.NewInt(time.Date(2025, time.January, 28, 13, 0, 0, 0, time.UTC).Unix()), ApricotPhasePost6BlockTimestamp: big.NewInt(time.Date(2025, time.January, 28, 14, 0, 0, 0, time.UTC).Unix()), BanffBlockTimestamp: big.NewInt(time.Date(2025, time.January, 28, 15, 0, 0, 0, time.UTC).Unix()), - } - - TestChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0)} - TestLaunchConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil} - TestApricotPhase1Config = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil} - TestApricotPhase2Config = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil, nil, nil} - TestApricotPhase3Config = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil, nil} - TestApricotPhase4Config = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil} - TestApricotPhase5Config = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil} - TestApricotPhasePre6Config = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil} - TestApricotPhase6Config = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil} - TestApricotPhasePost6Config = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil} - TestSgbTransitionChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil} - TestBanffChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil} - TestCortinaChainConfig = &ChainConfig{big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0)} + CortinaBlockTimestamp: big.NewInt(time.Date(10000, time.January, 1, 0, 0, 0, 0, time.UTC).Unix()), + } + + TestChainConfig = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0)} + TestLaunchConfig = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil} + TestApricotPhase1Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil} + TestApricotPhase2Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil} + TestApricotPhase3Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil, nil, nil} + TestApricotPhase4Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil, nil} + TestApricotPhase5Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil, nil} + TestSgbTransitionChainConfig = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil, nil} + TestApricotPhasePre6Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil, nil} + TestApricotPhase6Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil, nil} + TestApricotPhasePost6Config = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil, nil} + TestBanffChainConfig = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil, nil} + TestCortinaChainConfig = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), nil} + TestDUpgradeChainConfig = &ChainConfig{AvalancheContext{common.Hash{1}}, big.NewInt(1), big.NewInt(0), nil, false, big.NewInt(0), common.Hash{}, big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0), big.NewInt(0)} TestRules = TestChainConfig.AvalancheRules(new(big.Int), new(big.Int)) ) @@ -343,6 +353,8 @@ var ( // that any network, identified by its genesis block, can have its own // set of configuration options. type ChainConfig struct { + AvalancheContext `json:"-"` // Avalanche specific context set during VM initialization. Not serialized. + ChainID *big.Int `json:"chainId"` // chainId identifies the current chain and is used for replay protection HomesteadBlock *big.Int `json:"homesteadBlock,omitempty"` // Homestead switch block (nil = no fork, 0 = already homestead) @@ -382,10 +394,17 @@ type ChainConfig struct { ApricotPhase6BlockTimestamp *big.Int `json:"apricotPhase6BlockTimestamp,omitempty"` // Apricot Phase Post-6 deprecates the NativeAssetCall precompile (soft). (nil = no fork, 0 = already activated) ApricotPhasePost6BlockTimestamp *big.Int `json:"apricotPhasePost6BlockTimestamp,omitempty"` - // Banff TODO comment. (nil = no fork, 0 = already activated) + // Banff restricts import/export transactions to AVAX. (nil = no fork, 0 = already activated) BanffBlockTimestamp *big.Int `json:"banffBlockTimestamp,omitempty"` - // Cortina TODO comment. (nil = no fork, 0 = already activated) + // Cortina increases the block gas limit to 15M. (nil = no fork, 0 = already activated) CortinaBlockTimestamp *big.Int `json:"cortinaBlockTimestamp,omitempty"` + // DUpgrade activates the Shanghai upgrade from Ethereum. (nil = no fork, 0 = already activated) + DUpgradeBlockTimestamp *big.Int `json:"dUpgradeBlockTimestamp,omitempty"` +} + +// AvalancheContext provides Avalanche specific context directly into the EVM. +type AvalancheContext struct { + BlockchainID common.Hash } // String implements the fmt.Stringer interface. @@ -424,6 +443,7 @@ func (c *ChainConfig) String() string { banner += fmt.Sprintf(" - Apricot Phase Post-6 Timestamp: %-8v (https://github.com/ava-labs/avalanchego/releases/tag/v1.8.0\n", c.ApricotPhasePost6BlockTimestamp) banner += fmt.Sprintf(" - Banff Timestamp: %-8v (https://github.com/ava-labs/avalanchego/releases/tag/v1.9.0)\n", c.BanffBlockTimestamp) banner += fmt.Sprintf(" - Cortina Timestamp: %-8v (https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0)\n", c.CortinaBlockTimestamp) + banner += fmt.Sprintf(" - DUpgrade Timestamp %-8v (https://github.com/ava-labs/avalanchego/releases/tag/v1.11.0)\n", c.DUpgradeBlockTimestamp) banner += "\n" return banner } @@ -553,6 +573,12 @@ func (c *ChainConfig) IsCortina(blockTimestamp *big.Int) bool { return utils.IsForked(c.CortinaBlockTimestamp, blockTimestamp) } +// IsDUpgrade returns whether [blockTimestamp] represents a block +// with a timestamp after the DUpgrade upgrade time. +func (c *ChainConfig) IsDUpgrade(blockTimestamp *big.Int) bool { + return utils.IsForked(c.DUpgradeBlockTimestamp, blockTimestamp) +} + // CheckCompatible checks whether scheduled fork transitions have been imported // with a mismatching chain configuration. func (c *ChainConfig) CheckCompatible(newcfg *ChainConfig, height uint64, timestamp uint64) *ConfigCompatError { @@ -633,6 +659,7 @@ func (c *ChainConfig) CheckConfigForkOrder() error { {name: "apricotPhasePost6BlockTimestamp", block: c.ApricotPhasePost6BlockTimestamp}, {name: "banffBlockTimestamp", block: c.BanffBlockTimestamp}, {name: "cortinaBlockTimestamp", block: c.CortinaBlockTimestamp}, + {name: "dUpgradeBlockTimestamp", block: c.DUpgradeBlockTimestamp}, } { if lastFork.name != "" { // Next one must be higher number @@ -715,15 +742,24 @@ func (c *ChainConfig) checkCompatible(newcfg *ChainConfig, lastHeight *big.Int, if isForkIncompatible(c.ApricotPhase5BlockTimestamp, newcfg.ApricotPhase5BlockTimestamp, lastTimestamp) { return newCompatError("ApricotPhase5 fork block timestamp", c.ApricotPhase5BlockTimestamp, newcfg.ApricotPhase5BlockTimestamp) } - // TODO: add Phase 6 checks - - // TODO activate isForkIncompatible checks - // if isForkIncompatible(c.BanffBlockTimestamp, newcfg.BanffBlockTimestamp, lastTimestamp) { - // return newCompatError("Banff fork block timestamp", c.BanffBlockTimestamp, newcfg.BanffBlockTimestamp) - // } - // if isForkIncompatible(c.CortinaBlockTimestamp, newcfg.CortinaBlockTimestamp, lastTimestamp) { - // return newCompatError("Cortina fork block timestamp", c.CortinaBlockTimestamp, newcfg.CortinaBlockTimestamp) - // } + if isForkIncompatible(c.ApricotPhasePre6BlockTimestamp, newcfg.ApricotPhasePre6BlockTimestamp, lastTimestamp) { + return newCompatError("ApricotPhasePre6 fork block timestamp", c.ApricotPhasePre6BlockTimestamp, newcfg.ApricotPhasePre6BlockTimestamp) + } + if isForkIncompatible(c.ApricotPhase6BlockTimestamp, newcfg.ApricotPhase6BlockTimestamp, lastTimestamp) { + return newCompatError("ApricotPhase6 fork block timestamp", c.ApricotPhase6BlockTimestamp, newcfg.ApricotPhase6BlockTimestamp) + } + if isForkIncompatible(c.ApricotPhasePost6BlockTimestamp, newcfg.ApricotPhasePost6BlockTimestamp, lastTimestamp) { + return newCompatError("ApricotPhasePost6 fork block timestamp", c.ApricotPhasePost6BlockTimestamp, newcfg.ApricotPhasePost6BlockTimestamp) + } + if isForkIncompatible(c.BanffBlockTimestamp, newcfg.BanffBlockTimestamp, lastTimestamp) { + return newCompatError("Banff fork block timestamp", c.BanffBlockTimestamp, newcfg.BanffBlockTimestamp) + } + if isForkIncompatible(c.CortinaBlockTimestamp, newcfg.CortinaBlockTimestamp, lastTimestamp) { + return newCompatError("Cortina fork block timestamp", c.CortinaBlockTimestamp, newcfg.CortinaBlockTimestamp) + } + if isForkIncompatible(c.DUpgradeBlockTimestamp, newcfg.DUpgradeBlockTimestamp, lastTimestamp) { + return newCompatError("DUpgrade fork block timestamp", c.DUpgradeBlockTimestamp, newcfg.DUpgradeBlockTimestamp) + } return nil } @@ -789,7 +825,7 @@ type Rules struct { IsApricotPhasePre6, IsApricotPhase6, IsApricotPhasePost6 bool IsBanff bool IsCortina bool - IsClementine bool + IsDUpgrade bool // Songbird (coston, local) IsSongbirdCode bool @@ -838,6 +874,7 @@ func (c *ChainConfig) AvalancheRules(blockNum, blockTimestamp *big.Int) Rules { rules.IsSongbirdTransition = c.IsSongbirdTransition(blockTimestamp) rules.IsBanff = c.IsBanff(blockTimestamp) rules.IsCortina = c.IsCortina(blockTimestamp) + rules.IsDUpgrade = c.IsDUpgrade(blockTimestamp) // Initialize the stateful precompiles that should be enabled at [blockTimestamp]. rules.Precompiles = make(map[common.Address]precompile.StatefulPrecompiledContract) diff --git a/coreth/params/dao.go b/coreth/params/dao.go deleted file mode 100644 index 3d76d34e..00000000 --- a/coreth/params/dao.go +++ /dev/null @@ -1,168 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2016 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package params - -import ( - "math/big" - - "github.com/ethereum/go-ethereum/common" -) - -// DAOForkBlockExtra is the block header extra-data field to set for the DAO fork -// point and a number of consecutive blocks to allow fast/light syncers to correctly -// pick the side they want ("dao-hard-fork"). -var DAOForkBlockExtra = common.FromHex("0x64616f2d686172642d666f726b") - -// DAOForkExtraRange is the number of consecutive blocks from the DAO fork point -// to override the extra-data in to prevent no-fork attacks. -var DAOForkExtraRange = big.NewInt(10) - -// DAORefundContract is the address of the refund contract to send DAO balances to. -var DAORefundContract = common.HexToAddress("0xbf4ed7b27f1d666546e30d74d50d173d20bca754") - -// DAODrainList is the list of accounts whose full balances will be moved into a -// refund contract at the beginning of the dao-fork block. -func DAODrainList() []common.Address { - return []common.Address{ - common.HexToAddress("0xd4fe7bc31cedb7bfb8a345f31e668033056b2728"), - common.HexToAddress("0xb3fb0e5aba0e20e5c49d252dfd30e102b171a425"), - common.HexToAddress("0x2c19c7f9ae8b751e37aeb2d93a699722395ae18f"), - common.HexToAddress("0xecd135fa4f61a655311e86238c92adcd779555d2"), - common.HexToAddress("0x1975bd06d486162d5dc297798dfc41edd5d160a7"), - common.HexToAddress("0xa3acf3a1e16b1d7c315e23510fdd7847b48234f6"), - common.HexToAddress("0x319f70bab6845585f412ec7724b744fec6095c85"), - common.HexToAddress("0x06706dd3f2c9abf0a21ddcc6941d9b86f0596936"), - common.HexToAddress("0x5c8536898fbb74fc7445814902fd08422eac56d0"), - common.HexToAddress("0x6966ab0d485353095148a2155858910e0965b6f9"), - common.HexToAddress("0x779543a0491a837ca36ce8c635d6154e3c4911a6"), - common.HexToAddress("0x2a5ed960395e2a49b1c758cef4aa15213cfd874c"), - common.HexToAddress("0x5c6e67ccd5849c0d29219c4f95f1a7a93b3f5dc5"), - common.HexToAddress("0x9c50426be05db97f5d64fc54bf89eff947f0a321"), - common.HexToAddress("0x200450f06520bdd6c527622a273333384d870efb"), - common.HexToAddress("0xbe8539bfe837b67d1282b2b1d61c3f723966f049"), - common.HexToAddress("0x6b0c4d41ba9ab8d8cfb5d379c69a612f2ced8ecb"), - common.HexToAddress("0xf1385fb24aad0cd7432824085e42aff90886fef5"), - common.HexToAddress("0xd1ac8b1ef1b69ff51d1d401a476e7e612414f091"), - common.HexToAddress("0x8163e7fb499e90f8544ea62bbf80d21cd26d9efd"), - common.HexToAddress("0x51e0ddd9998364a2eb38588679f0d2c42653e4a6"), - common.HexToAddress("0x627a0a960c079c21c34f7612d5d230e01b4ad4c7"), - common.HexToAddress("0xf0b1aa0eb660754448a7937c022e30aa692fe0c5"), - common.HexToAddress("0x24c4d950dfd4dd1902bbed3508144a54542bba94"), - common.HexToAddress("0x9f27daea7aca0aa0446220b98d028715e3bc803d"), - common.HexToAddress("0xa5dc5acd6a7968a4554d89d65e59b7fd3bff0f90"), - common.HexToAddress("0xd9aef3a1e38a39c16b31d1ace71bca8ef58d315b"), - common.HexToAddress("0x63ed5a272de2f6d968408b4acb9024f4cc208ebf"), - common.HexToAddress("0x6f6704e5a10332af6672e50b3d9754dc460dfa4d"), - common.HexToAddress("0x77ca7b50b6cd7e2f3fa008e24ab793fd56cb15f6"), - common.HexToAddress("0x492ea3bb0f3315521c31f273e565b868fc090f17"), - common.HexToAddress("0x0ff30d6de14a8224aa97b78aea5388d1c51c1f00"), - common.HexToAddress("0x9ea779f907f0b315b364b0cfc39a0fde5b02a416"), - common.HexToAddress("0xceaeb481747ca6c540a000c1f3641f8cef161fa7"), - common.HexToAddress("0xcc34673c6c40e791051898567a1222daf90be287"), - common.HexToAddress("0x579a80d909f346fbfb1189493f521d7f48d52238"), - common.HexToAddress("0xe308bd1ac5fda103967359b2712dd89deffb7973"), - common.HexToAddress("0x4cb31628079fb14e4bc3cd5e30c2f7489b00960c"), - common.HexToAddress("0xac1ecab32727358dba8962a0f3b261731aad9723"), - common.HexToAddress("0x4fd6ace747f06ece9c49699c7cabc62d02211f75"), - common.HexToAddress("0x440c59b325d2997a134c2c7c60a8c61611212bad"), - common.HexToAddress("0x4486a3d68fac6967006d7a517b889fd3f98c102b"), - common.HexToAddress("0x9c15b54878ba618f494b38f0ae7443db6af648ba"), - common.HexToAddress("0x27b137a85656544b1ccb5a0f2e561a5703c6a68f"), - common.HexToAddress("0x21c7fdb9ed8d291d79ffd82eb2c4356ec0d81241"), - common.HexToAddress("0x23b75c2f6791eef49c69684db4c6c1f93bf49a50"), - common.HexToAddress("0x1ca6abd14d30affe533b24d7a21bff4c2d5e1f3b"), - common.HexToAddress("0xb9637156d330c0d605a791f1c31ba5890582fe1c"), - common.HexToAddress("0x6131c42fa982e56929107413a9d526fd99405560"), - common.HexToAddress("0x1591fc0f688c81fbeb17f5426a162a7024d430c2"), - common.HexToAddress("0x542a9515200d14b68e934e9830d91645a980dd7a"), - common.HexToAddress("0xc4bbd073882dd2add2424cf47d35213405b01324"), - common.HexToAddress("0x782495b7b3355efb2833d56ecb34dc22ad7dfcc4"), - common.HexToAddress("0x58b95c9a9d5d26825e70a82b6adb139d3fd829eb"), - common.HexToAddress("0x3ba4d81db016dc2890c81f3acec2454bff5aada5"), - common.HexToAddress("0xb52042c8ca3f8aa246fa79c3feaa3d959347c0ab"), - common.HexToAddress("0xe4ae1efdfc53b73893af49113d8694a057b9c0d1"), - common.HexToAddress("0x3c02a7bc0391e86d91b7d144e61c2c01a25a79c5"), - common.HexToAddress("0x0737a6b837f97f46ebade41b9bc3e1c509c85c53"), - common.HexToAddress("0x97f43a37f595ab5dd318fb46e7a155eae057317a"), - common.HexToAddress("0x52c5317c848ba20c7504cb2c8052abd1fde29d03"), - common.HexToAddress("0x4863226780fe7c0356454236d3b1c8792785748d"), - common.HexToAddress("0x5d2b2e6fcbe3b11d26b525e085ff818dae332479"), - common.HexToAddress("0x5f9f3392e9f62f63b8eac0beb55541fc8627f42c"), - common.HexToAddress("0x057b56736d32b86616a10f619859c6cd6f59092a"), - common.HexToAddress("0x9aa008f65de0b923a2a4f02012ad034a5e2e2192"), - common.HexToAddress("0x304a554a310c7e546dfe434669c62820b7d83490"), - common.HexToAddress("0x914d1b8b43e92723e64fd0a06f5bdb8dd9b10c79"), - common.HexToAddress("0x4deb0033bb26bc534b197e61d19e0733e5679784"), - common.HexToAddress("0x07f5c1e1bc2c93e0402f23341973a0e043f7bf8a"), - common.HexToAddress("0x35a051a0010aba705c9008d7a7eff6fb88f6ea7b"), - common.HexToAddress("0x4fa802324e929786dbda3b8820dc7834e9134a2a"), - common.HexToAddress("0x9da397b9e80755301a3b32173283a91c0ef6c87e"), - common.HexToAddress("0x8d9edb3054ce5c5774a420ac37ebae0ac02343c6"), - common.HexToAddress("0x0101f3be8ebb4bbd39a2e3b9a3639d4259832fd9"), - common.HexToAddress("0x5dc28b15dffed94048d73806ce4b7a4612a1d48f"), - common.HexToAddress("0xbcf899e6c7d9d5a215ab1e3444c86806fa854c76"), - common.HexToAddress("0x12e626b0eebfe86a56d633b9864e389b45dcb260"), - common.HexToAddress("0xa2f1ccba9395d7fcb155bba8bc92db9bafaeade7"), - common.HexToAddress("0xec8e57756626fdc07c63ad2eafbd28d08e7b0ca5"), - common.HexToAddress("0xd164b088bd9108b60d0ca3751da4bceb207b0782"), - common.HexToAddress("0x6231b6d0d5e77fe001c2a460bd9584fee60d409b"), - common.HexToAddress("0x1cba23d343a983e9b5cfd19496b9a9701ada385f"), - common.HexToAddress("0xa82f360a8d3455c5c41366975bde739c37bfeb8a"), - common.HexToAddress("0x9fcd2deaff372a39cc679d5c5e4de7bafb0b1339"), - common.HexToAddress("0x005f5cee7a43331d5a3d3eec71305925a62f34b6"), - common.HexToAddress("0x0e0da70933f4c7849fc0d203f5d1d43b9ae4532d"), - common.HexToAddress("0xd131637d5275fd1a68a3200f4ad25c71a2a9522e"), - common.HexToAddress("0xbc07118b9ac290e4622f5e77a0853539789effbe"), - common.HexToAddress("0x47e7aa56d6bdf3f36be34619660de61275420af8"), - common.HexToAddress("0xacd87e28b0c9d1254e868b81cba4cc20d9a32225"), - common.HexToAddress("0xadf80daec7ba8dcf15392f1ac611fff65d94f880"), - common.HexToAddress("0x5524c55fb03cf21f549444ccbecb664d0acad706"), - common.HexToAddress("0x40b803a9abce16f50f36a77ba41180eb90023925"), - common.HexToAddress("0xfe24cdd8648121a43a7c86d289be4dd2951ed49f"), - common.HexToAddress("0x17802f43a0137c506ba92291391a8a8f207f487d"), - common.HexToAddress("0x253488078a4edf4d6f42f113d1e62836a942cf1a"), - common.HexToAddress("0x86af3e9626fce1957c82e88cbf04ddf3a2ed7915"), - common.HexToAddress("0xb136707642a4ea12fb4bae820f03d2562ebff487"), - common.HexToAddress("0xdbe9b615a3ae8709af8b93336ce9b477e4ac0940"), - common.HexToAddress("0xf14c14075d6c4ed84b86798af0956deef67365b5"), - common.HexToAddress("0xca544e5c4687d109611d0f8f928b53a25af72448"), - common.HexToAddress("0xaeeb8ff27288bdabc0fa5ebb731b6f409507516c"), - common.HexToAddress("0xcbb9d3703e651b0d496cdefb8b92c25aeb2171f7"), - common.HexToAddress("0x6d87578288b6cb5549d5076a207456a1f6a63dc0"), - common.HexToAddress("0xb2c6f0dfbb716ac562e2d85d6cb2f8d5ee87603e"), - common.HexToAddress("0xaccc230e8a6e5be9160b8cdf2864dd2a001c28b6"), - common.HexToAddress("0x2b3455ec7fedf16e646268bf88846bd7a2319bb2"), - common.HexToAddress("0x4613f3bca5c44ea06337a9e439fbc6d42e501d0a"), - common.HexToAddress("0xd343b217de44030afaa275f54d31a9317c7f441e"), - common.HexToAddress("0x84ef4b2357079cd7a7c69fd7a37cd0609a679106"), - common.HexToAddress("0xda2fef9e4a3230988ff17df2165440f37e8b1708"), - common.HexToAddress("0xf4c64518ea10f995918a454158c6b61407ea345c"), - common.HexToAddress("0x7602b46df5390e432ef1c307d4f2c9ff6d65cc97"), - common.HexToAddress("0xbb9bc244d798123fde783fcc1c72d3bb8c189413"), - common.HexToAddress("0x807640a13483f8ac783c557fcdf27be11ea4ac7a"), - } -} diff --git a/coreth/params/denomination.go b/coreth/params/denomination.go index dfaff524..f70b342a 100644 --- a/coreth/params/denomination.go +++ b/coreth/params/denomination.go @@ -29,8 +29,7 @@ package params // These are the multipliers for ether denominations. // Example: To get the wei value of an amount in 'gwei', use // -// new(big.Int).Mul(value, big.NewInt(params.GWei)) -// +// new(big.Int).Mul(value, big.NewInt(params.GWei)) const ( Wei = 1 GWei = 1e9 diff --git a/coreth/params/protocol_params.go b/coreth/params/protocol_params.go index 2137387a..44839d6d 100644 --- a/coreth/params/protocol_params.go +++ b/coreth/params/protocol_params.go @@ -50,6 +50,7 @@ const ( Keccak256Gas uint64 = 30 // Once per KECCAK256 operation. Keccak256WordGas uint64 = 6 // Once per word of the KECCAK256 operation's data. + InitCodeWordGas uint64 = 2 // Once per word of the init code when creating a contract. SstoreSetGas uint64 = 20000 // Once per SSTORE operation. SstoreResetGas uint64 = 5000 // Once per SSTORE operation if the zeroness changes from zero. @@ -131,7 +132,8 @@ const ( // Introduced in Tangerine Whistle (Eip 150) CreateBySelfdestructGas uint64 = 25000 - MaxCodeSize = 24576 // Maximum bytecode to permit for a contract + MaxCodeSize = 24576 // Maximum bytecode to permit for a contract + MaxInitCodeSize = 2 * MaxCodeSize // Maximum initcode to permit in a creation transaction and create instructions // Precompiled contract gas prices diff --git a/coreth/params/version.go b/coreth/params/version.go index ff5c122a..dbc62d1c 100644 --- a/coreth/params/version.go +++ b/coreth/params/version.go @@ -31,10 +31,10 @@ import ( ) const ( - VersionMajor = 1 // Major version component of the current release - VersionMinor = 10 // Minor version component of the current release - VersionPatch = 25 // Patch version component of the current release - VersionMeta = "stable" // Version metadata to append to the version string + VersionMajor = 1 // Major version component of the current release + VersionMinor = 11 // Minor version component of the current release + VersionPatch = 0 // Patch version component of the current release + VersionMeta = "unstable" // Version metadata to append to the version string ) // Version holds the textual version string. diff --git a/coreth/peer/client.go b/coreth/peer/client.go index 31fe2175..6a002d0e 100644 --- a/coreth/peer/client.go +++ b/coreth/peer/client.go @@ -19,15 +19,19 @@ var ( // NetworkClient defines ability to send request / response through the Network type NetworkClient interface { - // RequestAny synchronously sends request to a randomly chosen peer with a + // SendAppRequestAny synchronously sends request to an arbitrary peer with a // node version greater than or equal to minVersion. // Returns response bytes, the ID of the chosen peer, and ErrRequestFailed if // the request should be retried. - RequestAny(minVersion *version.Application, request []byte) ([]byte, ids.NodeID, error) + SendAppRequestAny(minVersion *version.Application, request []byte) ([]byte, ids.NodeID, error) - // Request synchronously sends request to the selected nodeID + // SendAppRequest synchronously sends request to the selected nodeID // Returns response bytes, and ErrRequestFailed if the request should be retried. - Request(nodeID ids.NodeID, request []byte) ([]byte, error) + SendAppRequest(nodeID ids.NodeID, request []byte) ([]byte, error) + + // SendCrossChainRequest sends a request to a specific blockchain running on this node. + // Returns response bytes, and ErrRequestFailed if the request failed. + SendCrossChainRequest(chainID ids.ID, request []byte) ([]byte, error) // Gossip sends given gossip message to peers Gossip(gossip []byte) error @@ -51,13 +55,13 @@ func NewNetworkClient(network Network) NetworkClient { } } -// RequestAny synchronously sends request to a randomly chosen peer with a +// SendAppRequestAny synchronously sends request to an arbitrary peer with a // node version greater than or equal to minVersion. // Returns response bytes, the ID of the chosen peer, and ErrRequestFailed if // the request should be retried. -func (c *client) RequestAny(minVersion *version.Application, request []byte) ([]byte, ids.NodeID, error) { +func (c *client) SendAppRequestAny(minVersion *version.Application, request []byte) ([]byte, ids.NodeID, error) { waitingHandler := newWaitingResponseHandler() - nodeID, err := c.network.RequestAny(minVersion, request, waitingHandler) + nodeID, err := c.network.SendAppRequestAny(minVersion, request, waitingHandler) if err != nil { return nil, nodeID, err } @@ -68,11 +72,25 @@ func (c *client) RequestAny(minVersion *version.Application, request []byte) ([] return response, nodeID, nil } -// Request synchronously sends request to the specified nodeID +// SendAppRequest synchronously sends request to the specified nodeID +// Returns response bytes and ErrRequestFailed if the request should be retried. +func (c *client) SendAppRequest(nodeID ids.NodeID, request []byte) ([]byte, error) { + waitingHandler := newWaitingResponseHandler() + if err := c.network.SendAppRequest(nodeID, request, waitingHandler); err != nil { + return nil, err + } + response := <-waitingHandler.responseChan + if waitingHandler.failed { + return nil, ErrRequestFailed + } + return response, nil +} + +// SendCrossChainRequest synchronously sends request to the specified chainID // Returns response bytes and ErrRequestFailed if the request should be retried. -func (c *client) Request(nodeID ids.NodeID, request []byte) ([]byte, error) { +func (c *client) SendCrossChainRequest(chainID ids.ID, request []byte) ([]byte, error) { waitingHandler := newWaitingResponseHandler() - if err := c.network.Request(nodeID, request, waitingHandler); err != nil { + if err := c.network.SendCrossChainRequest(chainID, request, waitingHandler); err != nil { return nil, err } response := <-waitingHandler.responseChan diff --git a/coreth/peer/network.go b/coreth/peer/network.go index d9674d7d..4a88da28 100644 --- a/coreth/peer/network.go +++ b/coreth/peer/network.go @@ -18,6 +18,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/coreth/peer/stats" @@ -29,6 +30,7 @@ const minRequestHandlingDuration = 100 * time.Millisecond var ( errAcquiringSemaphore = errors.New("error acquiring semaphore") + errExpiredRequest = errors.New("expired request") _ Network = &network{} _ validators.Connector = &network{} _ common.AppHandler = &network{} @@ -38,18 +40,21 @@ type Network interface { validators.Connector common.AppHandler - // RequestAny synchronously sends request to a randomly chosen peer with a + // SendAppRequestAny synchronously sends request to an arbitrary peer with a // node version greater than or equal to minVersion. // Returns the ID of the chosen peer, and an error if the request could not // be sent to a peer with the desired [minVersion]. - RequestAny(minVersion *version.Application, message []byte, handler message.ResponseHandler) (ids.NodeID, error) + SendAppRequestAny(minVersion *version.Application, message []byte, handler message.ResponseHandler) (ids.NodeID, error) - // Request sends message to given nodeID, notifying handler when there's a response or timeout - Request(nodeID ids.NodeID, message []byte, handler message.ResponseHandler) error + // SendAppRequest sends message to given nodeID, notifying handler when there's a response or timeout + SendAppRequest(nodeID ids.NodeID, message []byte, handler message.ResponseHandler) error // Gossip sends given gossip message to peers Gossip(gossip []byte) error + // SendCrossChainRequest sends a message to given chainID notifying handler when there's a response or timeout + SendCrossChainRequest(chainID ids.ID, message []byte, handler message.ResponseHandler) error + // Shutdown stops all peer channel listeners and marks the node to have stopped // n.Start() can be called again but the peers will have to be reconnected // by calling OnPeerConnected for each peer @@ -61,6 +66,9 @@ type Network interface { // SetRequestHandler sets the provided request handler as the request handler SetRequestHandler(handler message.RequestHandler) + // SetCrossChainHandler sets the provided cross chain request handler as the cross chain request handler + SetCrossChainRequestHandler(handler message.CrossChainRequestHandler) + // Size returns the size of the network in number of connected peers Size() uint32 @@ -76,74 +84,82 @@ type network struct { self ids.NodeID // NodeID of this node requestIDGen uint32 // requestID counter used to track outbound requests outstandingRequestHandlers map[uint32]message.ResponseHandler // maps avalanchego requestID => message.ResponseHandler - activeRequests *semaphore.Weighted // controls maximum number of active outbound requests + activeAppRequests *semaphore.Weighted // controls maximum number of active outbound requests + activeCrossChainRequests *semaphore.Weighted // controls maximum number of active outbound cross chain requests appSender common.AppSender // avalanchego AppSender for sending messages codec codec.Manager // Codec used for parsing messages - requestHandler message.RequestHandler // maps request type => handler + crossChainCodec codec.Manager // Codec used for parsing cross chain messages + appRequestHandler message.RequestHandler // maps request type => handler + crossChainRequestHandler message.CrossChainRequestHandler // maps cross chain request type => handler gossipHandler message.GossipHandler // maps gossip type => handler peers *peerTracker // tracking of peers & bandwidth - stats stats.RequestHandlerStats // Provide request handler metrics + appStats stats.RequestHandlerStats // Provide request handler metrics + crossChainStats stats.RequestHandlerStats // Provide cross chain request handler metrics } -func NewNetwork(appSender common.AppSender, codec codec.Manager, self ids.NodeID, maxActiveRequests int64) Network { +func NewNetwork(appSender common.AppSender, codec codec.Manager, crossChainCodec codec.Manager, self ids.NodeID, maxActiveAppRequests int64, maxActiveCrossChainRequests int64) Network { return &network{ appSender: appSender, codec: codec, + crossChainCodec: crossChainCodec, self: self, outstandingRequestHandlers: make(map[uint32]message.ResponseHandler), - activeRequests: semaphore.NewWeighted(maxActiveRequests), + activeAppRequests: semaphore.NewWeighted(maxActiveAppRequests), + activeCrossChainRequests: semaphore.NewWeighted(maxActiveCrossChainRequests), gossipHandler: message.NoopMempoolGossipHandler{}, - requestHandler: message.NoopRequestHandler{}, + appRequestHandler: message.NoopRequestHandler{}, + crossChainRequestHandler: message.NoopCrossChainRequestHandler{}, peers: NewPeerTracker(), - stats: stats.NewRequestHandlerStats(), + appStats: stats.NewRequestHandlerStats(), + crossChainStats: stats.NewCrossChainRequestHandlerStats(), } } -// RequestAny synchronously sends request to a randomly chosen peer with a +// SendAppRequestAny synchronously sends request to an arbitrary peer with a // node version greater than or equal to minVersion. If minVersion is nil, // the request will be sent to any peer regardless of their version. // Returns the ID of the chosen peer, and an error if the request could not // be sent to a peer with the desired [minVersion]. -func (n *network) RequestAny(minVersion *version.Application, request []byte, handler message.ResponseHandler) (ids.NodeID, error) { - // Take a slot from total [activeRequests] and block until a slot becomes available. - if err := n.activeRequests.Acquire(context.Background(), 1); err != nil { +func (n *network) SendAppRequestAny(minVersion *version.Application, request []byte, handler message.ResponseHandler) (ids.NodeID, error) { + // Take a slot from total [activeAppRequests] and block until a slot becomes available. + if err := n.activeAppRequests.Acquire(context.Background(), 1); err != nil { return ids.EmptyNodeID, errAcquiringSemaphore } n.lock.Lock() defer n.lock.Unlock() if nodeID, ok := n.peers.GetAnyPeer(minVersion); ok { - return nodeID, n.request(nodeID, request, handler) + return nodeID, n.sendAppRequest(nodeID, request, handler) } - n.activeRequests.Release(1) + n.activeAppRequests.Release(1) return ids.EmptyNodeID, fmt.Errorf("no peers found matching version %s out of %d peers", minVersion, n.peers.Size()) } -// Request sends request message bytes to specified nodeID, notifying the responseHandler on response or failure -func (n *network) Request(nodeID ids.NodeID, request []byte, responseHandler message.ResponseHandler) error { +// SendAppRequest sends request message bytes to specified nodeID, notifying the responseHandler on response or failure +func (n *network) SendAppRequest(nodeID ids.NodeID, request []byte, responseHandler message.ResponseHandler) error { if nodeID == ids.EmptyNodeID { return fmt.Errorf("cannot send request to empty nodeID, nodeID=%s, requestLen=%d", nodeID, len(request)) } - // Take a slot from total [activeRequests] and block until a slot becomes available. - if err := n.activeRequests.Acquire(context.Background(), 1); err != nil { + // Take a slot from total [activeAppRequests] and block until a slot becomes available. + if err := n.activeAppRequests.Acquire(context.Background(), 1); err != nil { return errAcquiringSemaphore } n.lock.Lock() defer n.lock.Unlock() - return n.request(nodeID, request, responseHandler) + return n.sendAppRequest(nodeID, request, responseHandler) } -// request sends request message bytes to specified nodeID and adds [responseHandler] to [outstandingRequestHandlers] +// sendAppRequest sends request message bytes to specified nodeID and adds [responseHandler] to [outstandingRequestHandlers] // so that it can be invoked when the network receives either a response or failure message. // Assumes [nodeID] is never [self] since we guarantee [self] will not be added to the [peers] map. // Releases active requests semaphore if there was an error in sending the request // Returns an error if [appSender] is unable to make the request. // Assumes write lock is held -func (n *network) request(nodeID ids.NodeID, request []byte, responseHandler message.ResponseHandler) error { +func (n *network) sendAppRequest(nodeID ids.NodeID, request []byte, responseHandler message.ResponseHandler) error { log.Debug("sending request to peer", "nodeID", nodeID, "requestLen", len(request)) n.peers.TrackPeer(nodeID) @@ -153,15 +169,13 @@ func (n *network) request(nodeID ids.NodeID, request []byte, responseHandler mes n.outstandingRequestHandlers[requestID] = responseHandler - nodeIDs := ids.NewNodeIDSet(1) + nodeIDs := set.NewSet[ids.NodeID](1) nodeIDs.Add(nodeID) - // send app request to the peer - // on failure: release the activeRequests slot, mark message as processed and return fatal error // Send app request to [nodeID]. - // On failure, release the slot from active requests and [outstandingRequestHandlers]. - if err := n.appSender.SendAppRequest(nodeIDs, requestID, request); err != nil { - n.activeRequests.Release(1) + // On failure, release the slot from [activeAppRequests] and delete request from [outstandingRequestHandlers] + if err := n.appSender.SendAppRequest(context.TODO(), nodeIDs, requestID, request); err != nil { + n.activeAppRequests.Release(1) delete(n.outstandingRequestHandlers, requestID) return err } @@ -170,15 +184,124 @@ func (n *network) request(nodeID ids.NodeID, request []byte, responseHandler mes return nil } +// SendCrossChainRequest sends request message bytes to specified chainID and adds [handler] to [outstandingRequestHandlers] +// so that it can be invoked when the network receives either a response or failure message. +// Returns an error if [appSender] is unable to make the request. +func (n *network) SendCrossChainRequest(chainID ids.ID, request []byte, handler message.ResponseHandler) error { + // Take a slot from total [activeCrossChainRequests] and block until a slot becomes available. + if err := n.activeCrossChainRequests.Acquire(context.Background(), 1); err != nil { + return errAcquiringSemaphore + } + + n.lock.Lock() + defer n.lock.Unlock() + + // generate requestID + requestID := n.requestIDGen + n.requestIDGen++ + + n.outstandingRequestHandlers[requestID] = handler + + // Send cross chain request to [chainID]. + // On failure, release the slot from [activeCrossChainRequests] and delete request from [outstandingRequestHandlers]. + if err := n.appSender.SendCrossChainAppRequest(context.TODO(), chainID, requestID, request); err != nil { + n.activeCrossChainRequests.Release(1) + delete(n.outstandingRequestHandlers, requestID) + return err + } + + log.Debug("sent request message to chain", "chainID", chainID, "crossChainRequestID", requestID) + return nil +} + +// CrossChainAppRequest notifies the VM when another chain in the network requests for data. +// Send a CrossChainAppResponse to [chainID] in response to a valid message using the same +// [requestID] before the deadline. +func (n *network) CrossChainAppRequest(ctx context.Context, requestingChainID ids.ID, requestID uint32, deadline time.Time, request []byte) error { + log.Debug("received CrossChainAppRequest from chain", "requestingChainID", requestingChainID, "requestID", requestID, "requestLen", len(request)) + + var req message.CrossChainRequest + if _, err := n.crossChainCodec.Unmarshal(request, &req); err != nil { + log.Debug("failed to unmarshal CrossChainAppRequest", "requestingChainID", requestingChainID, "requestID", requestID, "requestLen", len(request), "err", err) + return nil + } + + bufferedDeadline, err := calculateTimeUntilDeadline(deadline, n.crossChainStats) + if err != nil { + log.Debug("deadline to process CrossChainAppRequest has expired, skipping", "requestingChainID", requestingChainID, "requestID", requestID, "err", err) + return nil + } + + log.Debug("processing incoming CrossChainAppRequest", "requestingChainID", requestingChainID, "requestID", requestID, "req", req) + handleCtx, cancel := context.WithDeadline(context.Background(), bufferedDeadline) + defer cancel() + + responseBytes, err := req.Handle(handleCtx, requestingChainID, requestID, n.crossChainRequestHandler) + switch { + case err != nil && err != context.DeadlineExceeded: + return err // Return a fatal error + case responseBytes != nil: + return n.appSender.SendCrossChainAppResponse(ctx, requestingChainID, requestID, responseBytes) // Propagate fatal error + default: + return nil + } +} + +// CrossChainAppRequestFailed can be called by the avalanchego -> VM in following cases: +// - respondingChain doesn't exist +// - invalid CrossChainAppResponse from respondingChain +// - invalid CrossChainRequest was sent to respondingChain +// - request times out before a response is provided +// If [requestID] is not known, this function will emit a log and return a nil error. +// If the response handler returns an error it is propagated as a fatal error. +func (n *network) CrossChainAppRequestFailed(ctx context.Context, respondingChainID ids.ID, requestID uint32) error { + n.lock.Lock() + defer n.lock.Unlock() + + log.Debug("received CrossChainAppRequestFailed from chain", "respondingChainID", respondingChainID, "requestID", requestID) + + handler, exists := n.markRequestFulfilled(requestID) + if !exists { + // Should never happen since the engine should be managing outstanding requests + log.Error("received CrossChainAppRequestFailed to unknown request", "respondingChainID", respondingChainID, "requestID", requestID) + return nil + } + + // We must release the slot + n.activeCrossChainRequests.Release(1) + + return handler.OnFailure() +} + +// CrossChainAppResponse is invoked when there is a +// response received from [respondingChainID] regarding a request the VM sent out +// If [requestID] is not known, this function will emit a log and return a nil error. +// If the response handler returns an error it is propagated as a fatal error. +func (n *network) CrossChainAppResponse(ctx context.Context, respondingChainID ids.ID, requestID uint32, response []byte) error { + n.lock.Lock() + defer n.lock.Unlock() + + log.Debug("received CrossChainAppResponse from responding chain", "respondingChainID", respondingChainID, "requestID", requestID) + + handler, exists := n.markRequestFulfilled(requestID) + if !exists { + // Should never happen since the engine should be managing outstanding requests + log.Error("received CrossChainAppResponse to unknown request", "respondingChainID", respondingChainID, "requestID", requestID, "responseLen", len(response)) + return nil + } + + // We must release the slot + n.activeCrossChainRequests.Release(1) + + return handler.OnResponse(response) +} + // AppRequest is called by avalanchego -> VM when there is an incoming AppRequest from a peer // error returned by this function is expected to be treated as fatal by the engine // returns error if the requestHandler returns an error // sends a response back to the sender if length of response returned by the handler is >0 // expects the deadline to not have been passed -func (n *network) AppRequest(nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { - n.lock.RLock() - defer n.lock.RUnlock() - +func (n *network) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { log.Debug("received AppRequest from node", "nodeID", nodeID, "requestID", requestID, "requestLen", len(request)) var req message.Request @@ -187,33 +310,24 @@ func (n *network) AppRequest(nodeID ids.NodeID, requestID uint32, deadline time. return nil } - // calculate how much time is left until the deadline - timeTillDeadline := time.Until(deadline) - n.stats.UpdateTimeUntilDeadline(timeTillDeadline) - - // bufferedDeadline is half the time till actual deadline so that the message has a reasonable chance - // of completing its processing and sending the response to the peer. - timeTillDeadline = time.Duration(timeTillDeadline.Nanoseconds() / 2) - bufferedDeadline := time.Now().Add(timeTillDeadline) - - // check if we have enough time to handle this request - if time.Until(bufferedDeadline) < minRequestHandlingDuration { - // Drop the request if we already missed the deadline to respond. - log.Debug("deadline to process AppRequest has expired, skipping", "nodeID", nodeID, "requestID", requestID, "req", req) - n.stats.IncDeadlineDroppedRequest() + bufferedDeadline, err := calculateTimeUntilDeadline(deadline, n.appStats) + if err != nil { + log.Debug("deadline to process AppRequest has expired, skipping", "nodeID", nodeID, "requestID", requestID, "err", err) return nil } log.Debug("processing incoming request", "nodeID", nodeID, "requestID", requestID, "req", req) - ctx, cancel := context.WithDeadline(context.Background(), bufferedDeadline) + // We make a new context here because we don't want to cancel the context + // passed into n.AppSender.SendAppResponse below + handleCtx, cancel := context.WithDeadline(context.Background(), bufferedDeadline) defer cancel() - responseBytes, err := req.Handle(ctx, nodeID, requestID, n.requestHandler) + responseBytes, err := req.Handle(handleCtx, nodeID, requestID, n.appRequestHandler) switch { case err != nil && err != context.DeadlineExceeded: return err // Return a fatal error case responseBytes != nil: - return n.appSender.SendAppResponse(nodeID, requestID, responseBytes) // Propagate fatal error + return n.appSender.SendAppResponse(ctx, nodeID, requestID, responseBytes) // Propagate fatal error default: return nil } @@ -223,66 +337,95 @@ func (n *network) AppRequest(nodeID ids.NodeID, requestID uint32, deadline time. // Error returned by this function is expected to be treated as fatal by the engine // If [requestID] is not known, this function will emit a log and return a nil error. // If the response handler returns an error it is propagated as a fatal error. -func (n *network) AppResponse(nodeID ids.NodeID, requestID uint32, response []byte) error { +func (n *network) AppResponse(_ context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { n.lock.Lock() defer n.lock.Unlock() log.Debug("received AppResponse from peer", "nodeID", nodeID, "requestID", requestID) - handler, exists := n.getRequestHandler(requestID) + handler, exists := n.markRequestFulfilled(requestID) if !exists { // Should never happen since the engine should be managing outstanding requests - log.Error("received response to unknown request", "nodeID", nodeID, "requestID", requestID, "responseLen", len(response)) + log.Error("received AppResponse to unknown request", "nodeID", nodeID, "requestID", requestID, "responseLen", len(response)) return nil } - return handler.OnResponse(nodeID, requestID, response) + // We must release the slot + n.activeAppRequests.Release(1) + + return handler.OnResponse(response) } // AppRequestFailed can be called by the avalanchego -> VM in following cases: // - node is benched // - failed to send message to [nodeID] due to a network issue -// - timeout +// - request times out before a response is provided // error returned by this function is expected to be treated as fatal by the engine // returns error only when the response handler returns an error -func (n *network) AppRequestFailed(nodeID ids.NodeID, requestID uint32) error { +func (n *network) AppRequestFailed(_ context.Context, nodeID ids.NodeID, requestID uint32) error { n.lock.Lock() defer n.lock.Unlock() + log.Debug("received AppRequestFailed from peer", "nodeID", nodeID, "requestID", requestID) - handler, exists := n.getRequestHandler(requestID) + handler, exists := n.markRequestFulfilled(requestID) if !exists { // Should never happen since the engine should be managing outstanding requests - log.Error("received request failed to unknown request", "nodeID", nodeID, "requestID", requestID) + log.Error("received AppRequestFailed to unknown request", "nodeID", nodeID, "requestID", requestID) return nil } - return handler.OnFailure(nodeID, requestID) + // We must release the slot + n.activeAppRequests.Release(1) + + return handler.OnFailure() +} + +// calculateTimeUntilDeadline calculates the time until deadline and drops it if we missed he deadline to response. +// This function updates metrics for both app requests and cross chain requests. +// This is called by either [AppRequest] or [CrossChainAppRequest]. +func calculateTimeUntilDeadline(deadline time.Time, stats stats.RequestHandlerStats) (time.Time, error) { + // calculate how much time is left until the deadline + timeTillDeadline := time.Until(deadline) + stats.UpdateTimeUntilDeadline(timeTillDeadline) + + // bufferedDeadline is half the time till actual deadline so that the message has a reasonable chance + // of completing its processing and sending the response to the peer. + bufferedDeadline := time.Now().Add(timeTillDeadline / 2) + + // check if we have enough time to handle this request + if time.Until(bufferedDeadline) < minRequestHandlingDuration { + // Drop the request if we already missed the deadline to respond. + stats.IncDeadlineDroppedRequest() + return time.Time{}, errExpiredRequest + } + + return bufferedDeadline, nil } -// getRequestHandler fetches the handler for [requestID] and marks the request with [requestID] as having been fulfilled. +// markRequestFulfilled fetches the handler for [requestID] and marks the request with [requestID] as having been fulfilled. // This is called by either [AppResponse] or [AppRequestFailed]. -// assumes that the write lock is held. -func (n *network) getRequestHandler(requestID uint32) (message.ResponseHandler, bool) { +// Assumes that the write lock is held. +func (n *network) markRequestFulfilled(requestID uint32) (message.ResponseHandler, bool) { handler, exists := n.outstandingRequestHandlers[requestID] if !exists { return nil, false } - // mark message as processed, release activeRequests slot + // mark message as processed delete(n.outstandingRequestHandlers, requestID) - n.activeRequests.Release(1) + return handler, true } // Gossip sends given gossip message to peers func (n *network) Gossip(gossip []byte) error { - return n.appSender.SendAppGossip(gossip) + return n.appSender.SendAppGossip(context.TODO(), gossip) } // AppGossip is called by avalanchego -> VM when there is an incoming AppGossip from a peer // error returned by this function is expected to be treated as fatal by the engine // returns error if request could not be parsed as message.Request or when the requestHandler returns an error -func (n *network) AppGossip(nodeID ids.NodeID, gossipBytes []byte) error { +func (n *network) AppGossip(_ context.Context, nodeID ids.NodeID, gossipBytes []byte) error { var gossipMsg message.GossipMessage if _, err := n.codec.Unmarshal(gossipBytes, &gossipMsg); err != nil { log.Debug("could not parse app gossip", "nodeID", nodeID, "gossipLen", len(gossipBytes), "err", err) @@ -294,7 +437,7 @@ func (n *network) AppGossip(nodeID ids.NodeID, gossipBytes []byte) error { } // Connected adds the given nodeID to the peer list so that it can receive messages -func (n *network) Connected(nodeID ids.NodeID, nodeVersion *version.Application) error { +func (n *network) Connected(_ context.Context, nodeID ids.NodeID, nodeVersion *version.Application) error { log.Debug("adding new peer", "nodeID", nodeID) n.lock.Lock() @@ -310,7 +453,7 @@ func (n *network) Connected(nodeID ids.NodeID, nodeVersion *version.Application) } // Disconnected removes given [nodeID] from the peer list -func (n *network) Disconnected(nodeID ids.NodeID) error { +func (n *network) Disconnected(_ context.Context, nodeID ids.NodeID) error { log.Debug("disconnecting peer", "nodeID", nodeID) n.lock.Lock() defer n.lock.Unlock() @@ -324,6 +467,11 @@ func (n *network) Shutdown() { n.lock.Lock() defer n.lock.Unlock() + // clean up any pending requests + for requestID := range n.outstandingRequestHandlers { + delete(n.outstandingRequestHandlers, requestID) + } + // reset peers n.peers = NewPeerTracker() } @@ -339,7 +487,14 @@ func (n *network) SetRequestHandler(handler message.RequestHandler) { n.lock.Lock() defer n.lock.Unlock() - n.requestHandler = handler + n.appRequestHandler = handler +} + +func (n *network) SetCrossChainRequestHandler(handler message.CrossChainRequestHandler) { + n.lock.Lock() + defer n.lock.Unlock() + + n.crossChainRequestHandler = handler } func (n *network) Size() uint32 { diff --git a/coreth/peer/network_test.go b/coreth/peer/network_test.go index 650defcd..3c173115 100644 --- a/coreth/peer/network_test.go +++ b/coreth/peer/network_test.go @@ -13,14 +13,17 @@ import ( "time" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/utils/set" + ethcommon "github.com/ethereum/go-ethereum/common" "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/stretchr/testify/assert" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/version" - "github.com/stretchr/testify/assert" ) var ( @@ -42,12 +45,15 @@ var ( _ common.AppSender = testAppSender{} _ message.GossipMessage = HelloGossip{} _ message.GossipHandler = &testGossipHandler{} + + _ message.CrossChainRequest = &ExampleCrossChainRequest{} + _ message.CrossChainRequestHandler = &testCrossChainHandler{} ) func TestNetworkDoesNotConnectToItself(t *testing.T) { selfNodeID := ids.GenerateTestNodeID() - n := NewNetwork(nil, nil, selfNodeID, 1) - assert.NoError(t, n.Connected(selfNodeID, defaultPeerVersion)) + n := NewNetwork(nil, nil, nil, selfNodeID, 1, 1) + assert.NoError(t, n.Connected(context.Background(), selfNodeID, defaultPeerVersion)) assert.EqualValues(t, 0, n.Size()) } @@ -56,12 +62,12 @@ func TestRequestAnyRequestsRoutingAndResponse(t *testing.T) { senderWg := &sync.WaitGroup{} var net Network sender := testAppSender{ - sendAppRequestFn: func(nodes ids.NodeIDSet, requestID uint32, requestBytes []byte) error { + sendAppRequestFn: func(nodes set.Set[ids.NodeID], requestID uint32, requestBytes []byte) error { nodeID, _ := nodes.Pop() senderWg.Add(1) go func() { defer senderWg.Done() - if err := net.AppRequest(nodeID, requestID, time.Now().Add(5*time.Second), requestBytes); err != nil { + if err := net.AppRequest(context.Background(), nodeID, requestID, time.Now().Add(5*time.Second), requestBytes); err != nil { panic(err) } }() @@ -71,7 +77,7 @@ func TestRequestAnyRequestsRoutingAndResponse(t *testing.T) { senderWg.Add(1) go func() { defer senderWg.Done() - if err := net.AppResponse(nodeID, requestID, responseBytes); err != nil { + if err := net.AppResponse(context.Background(), nodeID, requestID, responseBytes); err != nil { panic(err) } atomic.AddUint32(&callNum, 1) @@ -81,16 +87,17 @@ func TestRequestAnyRequestsRoutingAndResponse(t *testing.T) { } codecManager := buildCodec(t, HelloRequest{}, HelloResponse{}) - net = NewNetwork(sender, codecManager, ids.EmptyNodeID, 16) + crossChainCodecManager := buildCodec(t, ExampleCrossChainRequest{}, ExampleCrossChainResponse{}) + net = NewNetwork(sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 16, 16) net.SetRequestHandler(&HelloGreetingRequestHandler{codec: codecManager}) client := NewNetworkClient(net) nodeID := ids.GenerateTestNodeID() - assert.NoError(t, net.Connected(nodeID, defaultPeerVersion)) + assert.NoError(t, net.Connected(context.Background(), nodeID, defaultPeerVersion)) requestMessage := HelloRequest{Message: "this is a request"} defer net.Shutdown() - assert.NoError(t, net.Connected(nodeID, defaultPeerVersion)) + assert.NoError(t, net.Connected(context.Background(), nodeID, defaultPeerVersion)) totalRequests := 5000 numCallsPerRequest := 1 // on sending response @@ -103,7 +110,7 @@ func TestRequestAnyRequestsRoutingAndResponse(t *testing.T) { defer wg.Done() requestBytes, err := message.RequestToBytes(codecManager, requestMessage) assert.NoError(t, err) - responseBytes, _, err := client.RequestAny(defaultPeerVersion, requestBytes) + responseBytes, _, err := client.SendAppRequestAny(defaultPeerVersion, requestBytes) assert.NoError(t, err) assert.NotNil(t, responseBytes) @@ -127,7 +134,7 @@ func TestRequestRequestsRoutingAndResponse(t *testing.T) { var lock sync.Mutex contactedNodes := make(map[ids.NodeID]struct{}) sender := testAppSender{ - sendAppRequestFn: func(nodes ids.NodeIDSet, requestID uint32, requestBytes []byte) error { + sendAppRequestFn: func(nodes set.Set[ids.NodeID], requestID uint32, requestBytes []byte) error { nodeID, _ := nodes.Pop() lock.Lock() contactedNodes[nodeID] = struct{}{} @@ -135,7 +142,7 @@ func TestRequestRequestsRoutingAndResponse(t *testing.T) { senderWg.Add(1) go func() { defer senderWg.Done() - if err := net.AppRequest(nodeID, requestID, time.Now().Add(5*time.Second), requestBytes); err != nil { + if err := net.AppRequest(context.Background(), nodeID, requestID, time.Now().Add(5*time.Second), requestBytes); err != nil { panic(err) } }() @@ -145,7 +152,7 @@ func TestRequestRequestsRoutingAndResponse(t *testing.T) { senderWg.Add(1) go func() { defer senderWg.Done() - if err := net.AppResponse(nodeID, requestID, responseBytes); err != nil { + if err := net.AppResponse(context.Background(), nodeID, requestID, responseBytes); err != nil { panic(err) } atomic.AddUint32(&callNum, 1) @@ -155,7 +162,8 @@ func TestRequestRequestsRoutingAndResponse(t *testing.T) { } codecManager := buildCodec(t, HelloRequest{}, HelloResponse{}) - net = NewNetwork(sender, codecManager, ids.EmptyNodeID, 16) + crossChainCodecManager := buildCodec(t, ExampleCrossChainRequest{}, ExampleCrossChainResponse{}) + net = NewNetwork(sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 16, 16) net.SetRequestHandler(&HelloGreetingRequestHandler{codec: codecManager}) client := NewNetworkClient(net) @@ -167,7 +175,7 @@ func TestRequestRequestsRoutingAndResponse(t *testing.T) { ids.GenerateTestNodeID(), } for _, nodeID := range nodes { - assert.NoError(t, net.Connected(nodeID, defaultPeerVersion)) + assert.NoError(t, net.Connected(context.Background(), nodeID, defaultPeerVersion)) } requestMessage := HelloRequest{Message: "this is a request"} @@ -187,7 +195,7 @@ func TestRequestRequestsRoutingAndResponse(t *testing.T) { defer wg.Done() requestBytes, err := message.RequestToBytes(codecManager, requestMessage) assert.NoError(t, err) - responseBytes, err := client.Request(nodeID, requestBytes) + responseBytes, err := client.SendAppRequest(nodeID, requestBytes) assert.NoError(t, err) assert.NotNil(t, responseBytes) @@ -209,7 +217,7 @@ func TestRequestRequestsRoutingAndResponse(t *testing.T) { } // ensure empty nodeID is not allowed - _, err := client.Request(ids.EmptyNodeID, []byte("hello there")) + _, err := client.SendAppRequest(ids.EmptyNodeID, []byte("hello there")) assert.Error(t, err) assert.Contains(t, err.Error(), "cannot send request to empty nodeID") } @@ -218,10 +226,11 @@ func TestRequestMinVersion(t *testing.T) { callNum := uint32(0) nodeID := ids.GenerateTestNodeID() codecManager := buildCodec(t, TestMessage{}) + crossChainCodecManager := buildCodec(t, ExampleCrossChainRequest{}, ExampleCrossChainResponse{}) var net Network sender := testAppSender{ - sendAppRequestFn: func(nodes ids.NodeIDSet, reqID uint32, messageBytes []byte) error { + sendAppRequestFn: func(nodes set.Set[ids.NodeID], reqID uint32, messageBytes []byte) error { atomic.AddUint32(&callNum, 1) assert.True(t, nodes.Contains(nodeID), "request nodes should contain expected nodeID") assert.Len(t, nodes, 1, "request nodes should contain exactly one node") @@ -233,7 +242,7 @@ func TestRequestMinVersion(t *testing.T) { if err != nil { panic(err) } - err = net.AppResponse(nodeID, reqID, responseBytes) + err = net.AppResponse(context.Background(), nodeID, reqID, responseBytes) assert.NoError(t, err) }() return nil @@ -241,13 +250,14 @@ func TestRequestMinVersion(t *testing.T) { } // passing nil as codec works because the net.AppRequest is never called - net = NewNetwork(sender, codecManager, ids.EmptyNodeID, 1) + net = NewNetwork(sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 16) client := NewNetworkClient(net) requestMessage := TestMessage{Message: "this is a request"} requestBytes, err := message.RequestToBytes(codecManager, requestMessage) assert.NoError(t, err) assert.NoError(t, net.Connected( + context.Background(), nodeID, &version.Application{ Major: 1, @@ -258,7 +268,7 @@ func TestRequestMinVersion(t *testing.T) { ) // ensure version does not match - responseBytes, _, err := client.RequestAny( + responseBytes, _, err := client.SendAppRequestAny( &version.Application{ Major: 2, Minor: 0, @@ -270,7 +280,7 @@ func TestRequestMinVersion(t *testing.T) { assert.Nil(t, responseBytes) // ensure version matches and the request goes through - responseBytes, _, err = client.RequestAny(defaultPeerVersion, requestBytes) + responseBytes, _, err = client.SendAppRequestAny(defaultPeerVersion, requestBytes) assert.NoError(t, err) var response TestMessage @@ -284,7 +294,7 @@ func TestOnRequestHonoursDeadline(t *testing.T) { var net Network responded := false sender := testAppSender{ - sendAppRequestFn: func(nodes ids.NodeIDSet, reqID uint32, message []byte) error { + sendAppRequestFn: func(nodes set.Set[ids.NodeID], reqID uint32, message []byte) error { return nil }, sendAppResponseFn: func(nodeID ids.NodeID, reqID uint32, message []byte) error { @@ -294,6 +304,7 @@ func TestOnRequestHonoursDeadline(t *testing.T) { } codecManager := buildCodec(t, TestMessage{}) + crossChainCodecManager := buildCodec(t, ExampleCrossChainRequest{}, ExampleCrossChainResponse{}) requestBytes, err := marshalStruct(codecManager, TestMessage{Message: "hello there"}) assert.NoError(t, err) @@ -301,19 +312,20 @@ func TestOnRequestHonoursDeadline(t *testing.T) { requestHandler := &testRequestHandler{ processingDuration: 500 * time.Millisecond, } - net = NewNetwork(sender, codecManager, ids.EmptyNodeID, 1) + + net = NewNetwork(sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) net.SetRequestHandler(requestHandler) nodeID := ids.GenerateTestNodeID() requestHandler.response, err = marshalStruct(codecManager, TestMessage{Message: "hi there"}) assert.NoError(t, err) - err = net.AppRequest(nodeID, 1, time.Now().Add(1*time.Millisecond), requestBytes) + err = net.AppRequest(context.Background(), nodeID, 1, time.Now().Add(1*time.Millisecond), requestBytes) assert.NoError(t, err) // ensure the handler didn't get called (as peer.Network would've dropped the request) assert.EqualValues(t, requestHandler.calls, 0) requestHandler.processingDuration = 0 - err = net.AppRequest(nodeID, 2, time.Now().Add(250*time.Millisecond), requestBytes) + err = net.AppRequest(context.Background(), nodeID, 2, time.Now().Add(250*time.Millisecond), requestBytes) assert.NoError(t, err) assert.True(t, responded) assert.EqualValues(t, requestHandler.calls, 1) @@ -321,6 +333,7 @@ func TestOnRequestHonoursDeadline(t *testing.T) { func TestGossip(t *testing.T) { codecManager := buildCodec(t, HelloGossip{}) + crossChainCodecManager := buildCodec(t, ExampleCrossChainRequest{}, ExampleCrossChainResponse{}) nodeID := ids.GenerateTestNodeID() var clientNetwork Network @@ -331,7 +344,7 @@ func TestGossip(t *testing.T) { sendAppGossipFn: func(msg []byte) error { go func() { defer wg.Done() - err := clientNetwork.AppGossip(nodeID, msg) + err := clientNetwork.AppGossip(context.Background(), nodeID, msg) assert.NoError(t, err) }() sentGossip = true @@ -340,10 +353,10 @@ func TestGossip(t *testing.T) { } gossipHandler := &testGossipHandler{} - clientNetwork = NewNetwork(sender, codecManager, ids.EmptyNodeID, 1) + clientNetwork = NewNetwork(sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) clientNetwork.SetGossipHandler(gossipHandler) - assert.NoError(t, clientNetwork.Connected(nodeID, defaultPeerVersion)) + assert.NoError(t, clientNetwork.Connected(context.Background(), nodeID, defaultPeerVersion)) client := NewNetworkClient(clientNetwork) defer clientNetwork.Shutdown() @@ -361,16 +374,17 @@ func TestGossip(t *testing.T) { func TestHandleInvalidMessages(t *testing.T) { codecManager := buildCodec(t, HelloGossip{}, TestMessage{}) + crossChainCodecManager := buildCodec(t, ExampleCrossChainRequest{}, ExampleCrossChainResponse{}) nodeID := ids.GenerateTestNodeID() requestID := uint32(1) sender := testAppSender{} - clientNetwork := NewNetwork(sender, codecManager, ids.EmptyNodeID, 1) + clientNetwork := NewNetwork(sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) clientNetwork.SetGossipHandler(message.NoopMempoolGossipHandler{}) clientNetwork.SetRequestHandler(&testRequestHandler{}) - assert.NoError(t, clientNetwork.Connected(nodeID, defaultPeerVersion)) + assert.NoError(t, clientNetwork.Connected(context.Background(), nodeID, defaultPeerVersion)) defer clientNetwork.Shutdown() @@ -390,36 +404,37 @@ func TestHandleInvalidMessages(t *testing.T) { var nilResponse []byte // Check for edge cases - assert.NoError(t, clientNetwork.AppGossip(nodeID, gossipMsg)) - assert.NoError(t, clientNetwork.AppGossip(nodeID, requestMessage)) - assert.NoError(t, clientNetwork.AppGossip(nodeID, garbageResponse)) - assert.NoError(t, clientNetwork.AppGossip(nodeID, emptyResponse)) - assert.NoError(t, clientNetwork.AppGossip(nodeID, nilResponse)) - assert.NoError(t, clientNetwork.AppRequest(nodeID, requestID, time.Now().Add(time.Second), gossipMsg)) - assert.NoError(t, clientNetwork.AppRequest(nodeID, requestID, time.Now().Add(time.Second), requestMessage)) - assert.NoError(t, clientNetwork.AppRequest(nodeID, requestID, time.Now().Add(time.Second), garbageResponse)) - assert.NoError(t, clientNetwork.AppRequest(nodeID, requestID, time.Now().Add(time.Second), emptyResponse)) - assert.NoError(t, clientNetwork.AppRequest(nodeID, requestID, time.Now().Add(time.Second), nilResponse)) - assert.NoError(t, clientNetwork.AppResponse(nodeID, requestID, gossipMsg)) - assert.NoError(t, clientNetwork.AppResponse(nodeID, requestID, requestMessage)) - assert.NoError(t, clientNetwork.AppResponse(nodeID, requestID, garbageResponse)) - assert.NoError(t, clientNetwork.AppResponse(nodeID, requestID, emptyResponse)) - assert.NoError(t, clientNetwork.AppResponse(nodeID, requestID, nilResponse)) - assert.NoError(t, clientNetwork.AppRequestFailed(nodeID, requestID)) + assert.NoError(t, clientNetwork.AppGossip(context.Background(), nodeID, gossipMsg)) + assert.NoError(t, clientNetwork.AppGossip(context.Background(), nodeID, requestMessage)) + assert.NoError(t, clientNetwork.AppGossip(context.Background(), nodeID, garbageResponse)) + assert.NoError(t, clientNetwork.AppGossip(context.Background(), nodeID, emptyResponse)) + assert.NoError(t, clientNetwork.AppGossip(context.Background(), nodeID, nilResponse)) + assert.NoError(t, clientNetwork.AppRequest(context.Background(), nodeID, requestID, time.Now().Add(time.Second), gossipMsg)) + assert.NoError(t, clientNetwork.AppRequest(context.Background(), nodeID, requestID, time.Now().Add(time.Second), requestMessage)) + assert.NoError(t, clientNetwork.AppRequest(context.Background(), nodeID, requestID, time.Now().Add(time.Second), garbageResponse)) + assert.NoError(t, clientNetwork.AppRequest(context.Background(), nodeID, requestID, time.Now().Add(time.Second), emptyResponse)) + assert.NoError(t, clientNetwork.AppRequest(context.Background(), nodeID, requestID, time.Now().Add(time.Second), nilResponse)) + assert.NoError(t, clientNetwork.AppResponse(context.Background(), nodeID, requestID, gossipMsg)) + assert.NoError(t, clientNetwork.AppResponse(context.Background(), nodeID, requestID, requestMessage)) + assert.NoError(t, clientNetwork.AppResponse(context.Background(), nodeID, requestID, garbageResponse)) + assert.NoError(t, clientNetwork.AppResponse(context.Background(), nodeID, requestID, emptyResponse)) + assert.NoError(t, clientNetwork.AppResponse(context.Background(), nodeID, requestID, nilResponse)) + assert.NoError(t, clientNetwork.AppRequestFailed(context.Background(), nodeID, requestID)) } func TestNetworkPropagatesRequestHandlerError(t *testing.T) { codecManager := buildCodec(t, TestMessage{}) + crossChainCodecManager := buildCodec(t, ExampleCrossChainRequest{}, ExampleCrossChainResponse{}) nodeID := ids.GenerateTestNodeID() requestID := uint32(1) sender := testAppSender{} - clientNetwork := NewNetwork(sender, codecManager, ids.EmptyNodeID, 1) + clientNetwork := NewNetwork(sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) clientNetwork.SetGossipHandler(message.NoopMempoolGossipHandler{}) clientNetwork.SetRequestHandler(&testRequestHandler{err: errors.New("fail")}) // Return an error from the request handler - assert.NoError(t, clientNetwork.Connected(nodeID, defaultPeerVersion)) + assert.NoError(t, clientNetwork.Connected(context.Background(), nodeID, defaultPeerVersion)) defer clientNetwork.Shutdown() @@ -428,7 +443,126 @@ func TestNetworkPropagatesRequestHandlerError(t *testing.T) { assert.NoError(t, err) // Check that if the request handler returns an error, it is propagated as a fatal error. - assert.Error(t, clientNetwork.AppRequest(nodeID, requestID, time.Now().Add(time.Second), requestMessage)) + assert.Error(t, clientNetwork.AppRequest(context.Background(), nodeID, requestID, time.Now().Add(time.Second), requestMessage)) +} + +func TestCrossChainAppRequest(t *testing.T) { + var net Network + codecManager := buildCodec(t, TestMessage{}) + crossChainCodecManager := buildCodec(t, ExampleCrossChainRequest{}, ExampleCrossChainResponse{}) + + sender := testAppSender{ + sendCrossChainAppRequestFn: func(requestingChainID ids.ID, requestID uint32, requestBytes []byte) error { + go func() { + if err := net.CrossChainAppRequest(context.Background(), requestingChainID, requestID, time.Now().Add(5*time.Second), requestBytes); err != nil { + panic(err) + } + }() + return nil + }, + sendCrossChainAppResponseFn: func(respondingChainID ids.ID, requestID uint32, responseBytes []byte) error { + go func() { + if err := net.CrossChainAppResponse(context.Background(), respondingChainID, requestID, responseBytes); err != nil { + panic(err) + } + }() + return nil + }, + } + + net = NewNetwork(sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) + net.SetCrossChainRequestHandler(&testCrossChainHandler{codec: crossChainCodecManager}) + client := NewNetworkClient(net) + + exampleCrossChainRequest := ExampleCrossChainRequest{ + Message: "hello this is an example request", + } + + crossChainRequest, err := buildCrossChainRequest(crossChainCodecManager, exampleCrossChainRequest) + assert.NoError(t, err) + + chainID := ids.ID(ethcommon.BytesToHash([]byte{1, 2, 3, 4, 5})) + responseBytes, err := client.SendCrossChainRequest(chainID, crossChainRequest) + assert.NoError(t, err) + + var response ExampleCrossChainResponse + if _, err = crossChainCodecManager.Unmarshal(responseBytes, &response); err != nil { + t.Fatal("unexpected error during unmarshal", err) + } + assert.Equal(t, "this is an example response", response.Response) +} + +func TestCrossChainRequestRequestsRoutingAndResponse(t *testing.T) { + var ( + callNum uint32 + senderWg sync.WaitGroup + net Network + ) + + sender := testAppSender{ + sendCrossChainAppRequestFn: func(requestingChainID ids.ID, requestID uint32, requestBytes []byte) error { + senderWg.Add(1) + go func() { + defer senderWg.Done() + if err := net.CrossChainAppRequest(context.Background(), requestingChainID, requestID, time.Now().Add(5*time.Second), requestBytes); err != nil { + panic(err) + } + }() + return nil + }, + sendCrossChainAppResponseFn: func(respondingChainID ids.ID, requestID uint32, responseBytes []byte) error { + senderWg.Add(1) + go func() { + defer senderWg.Done() + if err := net.CrossChainAppResponse(context.Background(), respondingChainID, requestID, responseBytes); err != nil { + panic(err) + } + atomic.AddUint32(&callNum, 1) + }() + return nil + }, + } + + codecManager := buildCodec(t, TestMessage{}) + crossChainCodecManager := buildCodec(t, ExampleCrossChainRequest{}, ExampleCrossChainResponse{}) + net = NewNetwork(sender, codecManager, crossChainCodecManager, ids.EmptyNodeID, 1, 1) + net.SetCrossChainRequestHandler(&testCrossChainHandler{codec: crossChainCodecManager}) + client := NewNetworkClient(net) + + exampleCrossChainRequest := ExampleCrossChainRequest{ + Message: "hello this is an example request", + } + + chainID := ids.ID(ethcommon.BytesToHash([]byte{1, 2, 3, 4, 5})) + defer net.Shutdown() + + totalRequests := 500 + numCallsPerRequest := 1 // on sending response + totalCalls := totalRequests * numCallsPerRequest + + var requestWg sync.WaitGroup + requestWg.Add(totalCalls) + + for i := 0; i < totalCalls; i++ { + go func() { + defer requestWg.Done() + crossChainRequest, err := buildCrossChainRequest(crossChainCodecManager, exampleCrossChainRequest) + assert.NoError(t, err) + responseBytes, err := client.SendCrossChainRequest(chainID, crossChainRequest) + assert.NoError(t, err) + assert.NotNil(t, responseBytes) + + var response ExampleCrossChainResponse + if _, err = crossChainCodecManager.Unmarshal(responseBytes, &response); err != nil { + panic(fmt.Errorf("unexpected error during unmarshal: %w", err)) + } + assert.Equal(t, "this is an example response", response.Response) + }() + } + + requestWg.Wait() + senderWg.Wait() + assert.Equal(t, totalCalls, int(atomic.LoadUint32(&callNum))) } func buildCodec(t *testing.T, types ...interface{}) codec.Manager { @@ -451,25 +585,39 @@ func buildGossip(codec codec.Manager, msg message.GossipMessage) ([]byte, error) return codec.Marshal(message.Version, &msg) } +func buildCrossChainRequest(codec codec.Manager, msg message.CrossChainRequest) ([]byte, error) { + return codec.Marshal(message.Version, &msg) +} + type testAppSender struct { - sendAppRequestFn func(ids.NodeIDSet, uint32, []byte) error - sendAppResponseFn func(ids.NodeID, uint32, []byte) error - sendAppGossipFn func([]byte) error + sendCrossChainAppRequestFn func(ids.ID, uint32, []byte) error + sendCrossChainAppResponseFn func(ids.ID, uint32, []byte) error + sendAppRequestFn func(set.Set[ids.NodeID], uint32, []byte) error + sendAppResponseFn func(ids.NodeID, uint32, []byte) error + sendAppGossipFn func([]byte) error +} + +func (t testAppSender) SendCrossChainAppRequest(_ context.Context, chainID ids.ID, requestID uint32, appRequestBytes []byte) error { + return t.sendCrossChainAppRequestFn(chainID, requestID, appRequestBytes) +} + +func (t testAppSender) SendCrossChainAppResponse(_ context.Context, chainID ids.ID, requestID uint32, appResponseBytes []byte) error { + return t.sendCrossChainAppResponseFn(chainID, requestID, appResponseBytes) } -func (t testAppSender) SendAppGossipSpecific(ids.NodeIDSet, []byte) error { +func (t testAppSender) SendAppGossipSpecific(context.Context, set.Set[ids.NodeID], []byte) error { panic("not implemented") } -func (t testAppSender) SendAppRequest(nodeIDs ids.NodeIDSet, requestID uint32, message []byte) error { +func (t testAppSender) SendAppRequest(_ context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, message []byte) error { return t.sendAppRequestFn(nodeIDs, requestID, message) } -func (t testAppSender) SendAppResponse(nodeID ids.NodeID, requestID uint32, message []byte) error { +func (t testAppSender) SendAppResponse(_ context.Context, nodeID ids.NodeID, requestID uint32, message []byte) error { return t.sendAppResponseFn(nodeID, requestID, message) } -func (t testAppSender) SendAppGossip(message []byte) error { +func (t testAppSender) SendAppGossip(_ context.Context, message []byte) error { return t.sendAppGossipFn(message) } @@ -549,10 +697,6 @@ func (h HelloGossip) String() string { return fmt.Sprintf("HelloGossip(%s)", h.Msg) } -func (h HelloGossip) initialize(_ []byte) { - // no op -} - func (h HelloGossip) Bytes() []byte { // no op return nil @@ -561,7 +705,6 @@ func (h HelloGossip) Bytes() []byte { type testGossipHandler struct { received bool nodeID ids.NodeID - msg []byte } func (t *testGossipHandler) HandleAtomicTx(nodeID ids.NodeID, msg message.AtomicTxGossip) error { @@ -594,3 +737,32 @@ func (r *testRequestHandler) handleTestRequest(ctx context.Context, _ ids.NodeID } return r.response, r.err } + +type ExampleCrossChainRequest struct { + Message string `serialize:"true"` +} + +func (e ExampleCrossChainRequest) Handle(ctx context.Context, requestingChainID ids.ID, requestID uint32, handler message.CrossChainRequestHandler) ([]byte, error) { + return handler.(*testCrossChainHandler).HandleCrossChainRequest(ctx, requestingChainID, requestID, e) +} + +func (e ExampleCrossChainRequest) String() string { + return fmt.Sprintf("TestMessage(%s)", e.Message) +} + +type ExampleCrossChainResponse struct { + Response string `serialize:"true"` +} + +type TestCrossChainRequestHandler interface { + HandleCrossChainRequest(ctx context.Context, requestingchainID ids.ID, requestID uint32, exampleRequest message.CrossChainRequest) ([]byte, error) +} + +type testCrossChainHandler struct { + message.CrossChainRequestHandler + codec codec.Manager +} + +func (t *testCrossChainHandler) HandleCrossChainRequest(ctx context.Context, requestingChainID ids.ID, requestID uint32, exampleRequest message.CrossChainRequest) ([]byte, error) { + return t.codec.Marshal(message.Version, ExampleCrossChainResponse{Response: "this is an example response"}) +} diff --git a/coreth/peer/peer_tracker.go b/coreth/peer/peer_tracker.go index 05eeb3c1..8070005c 100644 --- a/coreth/peer/peer_tracker.go +++ b/coreth/peer/peer_tracker.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/avalanchego/ids" utils_math "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" "github.com/ethereum/go-ethereum/log" @@ -43,9 +44,9 @@ type peerInfo struct { type peerTracker struct { peers map[ids.NodeID]*peerInfo // all peers we are connected to numTrackedPeers metrics.Gauge - trackedPeers ids.NodeIDSet // peers that we have sent a request to + trackedPeers set.Set[ids.NodeID] // peers that we have sent a request to numResponsivePeers metrics.Gauge - responsivePeers ids.NodeIDSet // peers that responded to the last request they were sent + responsivePeers set.Set[ids.NodeID] // peers that responded to the last request they were sent bandwidthHeap utils_math.AveragerHeap // tracks bandwidth peers are responding with averageBandwidthMetric metrics.GaugeFloat64 averageBandwidth utils_math.Averager @@ -55,9 +56,9 @@ func NewPeerTracker() *peerTracker { return &peerTracker{ peers: make(map[ids.NodeID]*peerInfo), numTrackedPeers: metrics.GetOrRegisterGauge("net_tracked_peers", nil), - trackedPeers: make(ids.NodeIDSet), + trackedPeers: make(set.Set[ids.NodeID]), numResponsivePeers: metrics.GetOrRegisterGauge("net_responsive_peers", nil), - responsivePeers: make(ids.NodeIDSet), + responsivePeers: make(set.Set[ids.NodeID]), bandwidthHeap: utils_math.NewMaxAveragerHeap(), averageBandwidthMetric: metrics.GetOrRegisterGaugeFloat64("net_average_bandwidth", nil), averageBandwidth: utils_math.NewAverager(0, bandwidthHalflife, time.Now()), diff --git a/coreth/peer/stats/stats.go b/coreth/peer/stats/stats.go index b17ef234..4f4cdb6f 100644 --- a/coreth/peer/stats/stats.go +++ b/coreth/peer/stats/stats.go @@ -9,8 +9,7 @@ import ( "github.com/ava-labs/coreth/metrics" ) -// RequestHandlerStats provides the interface for metrics on request handling. -// Since we drop +// RequestHandlerStats provides the interface for metrics for both app requests and cross chain requests. type RequestHandlerStats interface { UpdateTimeUntilDeadline(duration time.Duration) IncDeadlineDroppedRequest() @@ -35,3 +34,10 @@ func NewRequestHandlerStats() RequestHandlerStats { droppedRequests: metrics.GetOrRegisterCounter("net_req_deadline_dropped", nil), } } + +func NewCrossChainRequestHandlerStats() RequestHandlerStats { + return &requestHandlerStats{ + timeUntilDeadline: metrics.GetOrRegisterTimer("net_cross_chain_req_time_until_deadline", nil), + droppedRequests: metrics.GetOrRegisterCounter("net_cross_chain_req_deadline_dropped", nil), + } +} diff --git a/coreth/peer/waiting_handler.go b/coreth/peer/waiting_handler.go index 53ac48bf..64c209d1 100644 --- a/coreth/peer/waiting_handler.go +++ b/coreth/peer/waiting_handler.go @@ -4,7 +4,6 @@ package peer import ( - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/coreth/plugin/evm/message" ) @@ -20,14 +19,14 @@ type waitingResponseHandler struct { } // OnResponse passes the response bytes to the responseChan and closes the channel -func (w *waitingResponseHandler) OnResponse(_ ids.NodeID, _ uint32, response []byte) error { +func (w *waitingResponseHandler) OnResponse(response []byte) error { w.responseChan <- response close(w.responseChan) return nil } // OnFailure sets the failed flag to true and closes the channel -func (w *waitingResponseHandler) OnFailure(ids.NodeID, uint32) error { +func (w *waitingResponseHandler) OnFailure() error { w.failed = true close(w.responseChan) return nil diff --git a/coreth/plugin/evm/atomic_syncer.go b/coreth/plugin/evm/atomic_syncer.go index 5c70d551..54430587 100644 --- a/coreth/plugin/evm/atomic_syncer.go +++ b/coreth/plugin/evm/atomic_syncer.go @@ -12,10 +12,11 @@ import ( "github.com/ava-labs/avalanchego/database/versiondb" "github.com/ava-labs/avalanchego/utils/wrappers" + "github.com/ethereum/go-ethereum/common" + "github.com/ava-labs/coreth/plugin/evm/message" syncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/trie" - "github.com/ethereum/go-ethereum/common" ) var ( @@ -160,13 +161,13 @@ type atomicSyncerLeafTask struct { atomicSyncer *atomicSyncer } -func (a *atomicSyncerLeafTask) Start() []byte { return addZeroes(a.atomicSyncer.nextHeight) } -func (a *atomicSyncerLeafTask) End() []byte { return nil } -func (a *atomicSyncerLeafTask) NodeType() message.NodeType { return message.AtomicTrieNode } -func (a *atomicSyncerLeafTask) OnFinish() error { return a.atomicSyncer.onFinish() } -func (a *atomicSyncerLeafTask) OnStart() (bool, error) { return false, nil } -func (a *atomicSyncerLeafTask) Root() common.Hash { return a.atomicSyncer.targetRoot } -func (a *atomicSyncerLeafTask) Account() common.Hash { return common.Hash{} } +func (a *atomicSyncerLeafTask) Start() []byte { return addZeroes(a.atomicSyncer.nextHeight) } +func (a *atomicSyncerLeafTask) End() []byte { return nil } +func (a *atomicSyncerLeafTask) NodeType() message.NodeType { return message.AtomicTrieNode } +func (a *atomicSyncerLeafTask) OnFinish(context.Context) error { return a.atomicSyncer.onFinish() } +func (a *atomicSyncerLeafTask) OnStart() (bool, error) { return false, nil } +func (a *atomicSyncerLeafTask) Root() common.Hash { return a.atomicSyncer.targetRoot } +func (a *atomicSyncerLeafTask) Account() common.Hash { return common.Hash{} } func (a *atomicSyncerLeafTask) OnLeafs(keys, vals [][]byte) error { return a.atomicSyncer.onLeafs(keys, vals) } diff --git a/coreth/plugin/evm/atomic_trie.go b/coreth/plugin/evm/atomic_trie.go index 88ef7811..3e67a682 100644 --- a/coreth/plugin/evm/atomic_trie.go +++ b/coreth/plugin/evm/atomic_trie.go @@ -118,7 +118,7 @@ type atomicTrie struct { lastAcceptedRoot common.Hash // most recent trie root passed to accept trie or the root of the atomic trie on intialization. codec codec.Manager memoryCap common.StorageSize - tipBuffer *core.BoundedBuffer + tipBuffer *core.BoundedBuffer[common.Hash] } // newAtomicTrie returns a new instance of a atomicTrie with a configurable commitHeightInterval, used in testing. diff --git a/coreth/plugin/evm/atomic_tx_repository_test.go b/coreth/plugin/evm/atomic_tx_repository_test.go index 8bdeaacd..614d9c9e 100644 --- a/coreth/plugin/evm/atomic_tx_repository_test.go +++ b/coreth/plugin/evm/atomic_tx_repository_test.go @@ -16,6 +16,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/stretchr/testify/assert" @@ -109,7 +110,7 @@ func verifyTxs(t testing.TB, repo AtomicTxRepository, txMap map[uint64][]*Tx) { // txs should be stored in order of txID sort.Slice(expectedTxs, getComparator(expectedTxs)) - txIDs := ids.Set{} + txIDs := set.Set[ids.ID]{} for i := 0; i < len(txs); i++ { assert.Equalf(t, expectedTxs[i].ID().Hex(), txs[i].ID().Hex(), "wrong txID at height=%d idx=%d", height, i) txIDs.Add(txs[i].ID()) diff --git a/coreth/plugin/evm/block.go b/coreth/plugin/evm/block.go index e65776ba..f4b53081 100644 --- a/coreth/plugin/evm/block.go +++ b/coreth/plugin/evm/block.go @@ -4,6 +4,8 @@ package evm import ( + "context" + "errors" "fmt" "math/big" "time" @@ -29,6 +31,8 @@ var ( 103546, 103571, 103572, 103619, 103287, 103624, 103591, } + + errMissingUTXOs = errors.New("missing UTXOs") ) func init() { @@ -130,7 +134,7 @@ func (vm *VM) newBlock(ethBlock *types.Block) (*Block, error) { func (b *Block) ID() ids.ID { return b.id } // Accept implements the snowman.Block interface -func (b *Block) Accept() error { +func (b *Block) Accept(context.Context) error { vm := b.vm // Although returning an error from Accept is considered fatal, it is good @@ -167,7 +171,7 @@ func (b *Block) Accept() error { // Reject implements the snowman.Block interface // If [b] contains an atomic transaction, attempt to re-issue it -func (b *Block) Reject() error { +func (b *Block) Reject(context.Context) error { b.status = choices.Rejected log.Debug(fmt.Sprintf("Rejecting block %s (%s) at height %d", b.ID().Hex(), b.ID(), b.Height())) for _, tx := range b.atomicTxs { @@ -223,7 +227,7 @@ func (b *Block) syntacticVerify() error { } // Verify implements the snowman.Block interface -func (b *Block) Verify() error { +func (b *Block) Verify(context.Context) error { return b.verify(true) } @@ -232,6 +236,11 @@ func (b *Block) verify(writes bool) error { return fmt.Errorf("syntactic block verification failed: %w", err) } + // verify UTXOs named in import txs are present in shared memory. + if err := b.verifyUTXOsPresent(); err != nil { + return err + } + err := b.vm.blockChain.InsertBlockManual(b.ethBlock, writes) if err != nil || !writes { // if an error occurred inserting the block into the chain @@ -244,6 +253,33 @@ func (b *Block) verify(writes bool) error { return err } +// verifyUTXOsPresent returns an error if any of the atomic transactions name UTXOs that +// are not present in shared memory. +func (b *Block) verifyUTXOsPresent() error { + blockHash := common.Hash(b.ID()) + if b.vm.atomicBackend.IsBonus(b.Height(), blockHash) { + log.Info("skipping atomic tx verification on bonus block", "block", blockHash) + return nil + } + + if !b.vm.bootstrapped { + return nil + } + + // verify UTXOs named in import txs are present in shared memory. + for _, atomicTx := range b.atomicTxs { + utx := atomicTx.UnsignedAtomicTx + chainID, requests, err := utx.AtomicOps() + if err != nil { + return err + } + if _, err := b.vm.ctx.SharedMemory.Get(chainID, requests.RemoveRequests); err != nil { + return fmt.Errorf("%w: %s", errMissingUTXOs, err) + } + } + return nil +} + // Bytes implements the snowman.Block interface func (b *Block) Bytes() []byte { res, err := rlp.EncodeToBytes(b.ethBlock) diff --git a/coreth/plugin/evm/block_builder.go b/coreth/plugin/evm/block_builder.go index 333ea88b..381c3e7a 100644 --- a/coreth/plugin/evm/block_builder.go +++ b/coreth/plugin/evm/block_builder.go @@ -7,23 +7,15 @@ import ( "sync" "time" + "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/coreth/params" "github.com/ava-labs/avalanchego/snow" commonEng "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/coreth/core" "github.com/ethereum/go-ethereum/log" ) -// buildingBlkStatus denotes the current status of the VM in block production. -type buildingBlkStatus uint8 - -var ( - // AP4 Params - minBlockTimeAP4 = 500 * time.Millisecond -) - const ( // waitBlockTime is the amount of time to wait for BuildBlock to be // called by the engine before deciding whether or not to gossip the @@ -35,9 +27,9 @@ const ( // whatever the peer makes. waitBlockTime = 100 * time.Millisecond - dontBuild buildingBlkStatus = iota - mayBuild - building + // Minimum amount of time to wait after building a block before attempting to build a block + // a second time without changing the contents of the mempool. + minBlockBuildingRetryDelay = 500 * time.Millisecond ) type blockBuilder struct { @@ -55,17 +47,18 @@ type blockBuilder struct { // is ready to be build. This notifies the consensus engine. notifyBuildBlockChan chan<- commonEng.Message - // [buildBlockLock] must be held when accessing [buildStatus] + // [buildBlockLock] must be held when accessing [buildSent] buildBlockLock sync.Mutex - // [buildBlockTimer] is a timer handling block production. - buildBlockTimer *timer.Timer + // buildSent is true iff we have sent a PendingTxs message to the consensus message and + // are still waiting for buildBlock to be called. + buildSent bool - // buildStatus signals the phase of block building the VM is currently in. - // [dontBuild] indicates there's no need to build a block. - // [mayBuild] indicates the VM should proceed to build a block. - // [building] indicates the VM has sent a request to the engine to build a block. - buildStatus buildingBlkStatus + // buildBlockTimer is a timer used to delay retrying block building a minimum amount of time + // with the same contents of the mempool. + // If the mempool receives a new transaction, the block builder will send a new notification to + // the engine and cancel the timer. + buildBlockTimer *timer.Timer } func (vm *VM) NewBlockBuilder(notifyBuildBlockChan chan<- commonEng.Message) *blockBuilder { @@ -78,38 +71,43 @@ func (vm *VM) NewBlockBuilder(notifyBuildBlockChan chan<- commonEng.Message) *bl shutdownChan: vm.shutdownChan, shutdownWg: &vm.shutdownWg, notifyBuildBlockChan: notifyBuildBlockChan, - buildStatus: dontBuild, } - b.handleBlockBuilding() return b } +// handleBlockBuilding dispatches a timer used to delay block building retry attempts when the contents +// of the mempool has not been changed since the last attempt. func (b *blockBuilder) handleBlockBuilding() { b.buildBlockTimer = timer.NewTimer(b.buildBlockTimerCallback) go b.ctx.Log.RecoverAndPanic(b.buildBlockTimer.Dispatch) } -// handleGenerateBlock should be called immediately after [BuildBlock]. -// [handleGenerateBlock] invocation could lead to quiesence, building a block with -// some delay, or attempting to build another block immediately. -func (b *blockBuilder) handleGenerateBlock() { +// buildBlockTimerCallback is the timer callback that will send a PendingTxs notification +// to the consensus engine if there are transactions in the mempool. +func (b *blockBuilder) buildBlockTimerCallback() { b.buildBlockLock.Lock() defer b.buildBlockLock.Unlock() - // If we still need to build a block immediately after building, we let the - // engine know it [mayBuild] in [minBlockTimeAP4]. - // - // It is often the case in AP4 that a block (with the same txs) could be built - // after a few seconds of delay as the [baseFee] and/or [blockGasCost] decrease. + // If there are still transactions in the mempool, send another notification to + // the engine to retry BuildBlock. if b.needToBuild() { - b.buildStatus = mayBuild - b.buildBlockTimer.SetTimeoutIn(minBlockTimeAP4) - } else { - b.buildStatus = dontBuild + b.markBuilding() } } +// handleGenerateBlock is called from the VM immediately after BuildBlock. +func (b *blockBuilder) handleGenerateBlock() { + b.buildBlockLock.Lock() + defer b.buildBlockLock.Unlock() + + // Reset buildSent now that the engine has called BuildBlock. + b.buildSent = false + + // Set a timer to check if calling build block a second time is needed. + b.buildBlockTimer.SetTimeoutIn(minBlockBuildingRetryDelay) +} + // needToBuild returns true if there are outstanding transactions to be issued // into a block. func (b *blockBuilder) needToBuild() bool { @@ -117,49 +115,32 @@ func (b *blockBuilder) needToBuild() bool { return size > 0 || b.mempool.Len() > 0 } -// buildBlockTimerCallback is the timer callback that sends a notification -// to the engine when the VM is ready to build a block. -func (b *blockBuilder) buildBlockTimerCallback() { - b.buildBlockLock.Lock() - defer b.buildBlockLock.Unlock() - - switch b.buildStatus { - case dontBuild: - case mayBuild: - b.markBuilding() - case building: - // If the status has already been set to building, there is no need - // to send an additional request to the consensus engine until the call - // to BuildBlock resets the block status. - default: - // Log an error if an invalid status is found. - log.Error("Found invalid build status in build block timer", "buildStatus", b.buildStatus) - } -} - +// markBuilding adds a PendingTxs message to the toEngine channel. // markBuilding assumes the [buildBlockLock] is held. func (b *blockBuilder) markBuilding() { + // If the engine has not called BuildBlock, no need to send another message. + if b.buildSent { + return + } + b.buildBlockTimer.Cancel() // Cancel any future attempt from the timer to send a PendingTxs message + select { case b.notifyBuildBlockChan <- commonEng.PendingTxs: - b.buildStatus = building + b.buildSent = true default: log.Error("Failed to push PendingTxs notification to the consensus engine.") } } -// signalTxsReady notifies the engine and sets the status to [building] if the -// status is [dontBuild]. Otherwise, the attempt has already begun and this notification -// can be safely skipped. +// signalTxsReady sends a PendingTxs notification to the consensus engine. +// If BuildBlock has not been called since the last PendingTxs message was sent, +// signalTxsReady will not send a duplicate. func (b *blockBuilder) signalTxsReady() { b.buildBlockLock.Lock() defer b.buildBlockLock.Unlock() - if b.buildStatus != dontBuild { - return - } - // We take a naive approach here and signal the engine that we should build - // a block as soon as we receive at least one transaction. + // a block as soon as we receive at least one new transaction. // // In the future, we may wish to add optimization here to only signal the // engine if the sum of the projected tips in the mempool satisfies the diff --git a/coreth/plugin/evm/block_builder_test.go b/coreth/plugin/evm/block_builder_test.go index 00a15d2d..ce65c0c0 100644 --- a/coreth/plugin/evm/block_builder_test.go +++ b/coreth/plugin/evm/block_builder_test.go @@ -4,7 +4,6 @@ package evm import ( - "math/big" "sync" "testing" "time" @@ -17,14 +16,9 @@ import ( func TestBlockBuilderShutsDown(t *testing.T) { shutdownChan := make(chan struct{}) wg := &sync.WaitGroup{} - config := *params.TestChainConfig - // Set ApricotPhase4BlockTime one hour in the future so that it will - // create a goroutine waiting for an hour before shutting down the - // buildBlocktimer. - config.ApricotPhase4BlockTimestamp = big.NewInt(time.Now().Add(time.Hour).Unix()) builder := &blockBuilder{ ctx: snow.DefaultContextTest(), - chainConfig: &config, + chainConfig: params.TestChainConfig, shutdownChan: shutdownChan, shutdownWg: wg, } diff --git a/coreth/plugin/evm/block_verification.go b/coreth/plugin/evm/block_verification.go index 9202018f..a868225f 100644 --- a/coreth/plugin/evm/block_verification.go +++ b/coreth/plugin/evm/block_verification.go @@ -98,31 +98,39 @@ func (v blockValidator) SyntacticVerify(b *Block, rules params.Rules) error { } // Enforce static gas limit after ApricotPhase1 (prior to ApricotPhase1 it's handled in processing). - - if rules.IsSongbirdCode { - if rules.IsSongbirdTransition { - if ethHeader.GasLimit != params.SgbTransitionGasLimit { - return fmt.Errorf( - "expected gas limit to be %d in sgb transition but got %d", - params.SgbTransitionGasLimit, ethHeader.GasLimit, - ) - } - } else if rules.IsApricotPhase5 { - if ethHeader.GasLimit != params.SgbApricotPhase5GasLimit { - return fmt.Errorf( - "expected gas limit to be %d in apricot phase 5 but got %d", - params.SgbApricotPhase5GasLimit, ethHeader.GasLimit, - ) - - } + if rules.IsCortina { + if ethHeader.GasLimit != params.CortinaGasLimit { + return fmt.Errorf( + "expected gas limit to be %d after cortina but got %d", + params.CortinaGasLimit, ethHeader.GasLimit, + ) } } else { - if rules.IsApricotPhase1 { - if ethHeader.GasLimit != params.ApricotPhase1GasLimit { - return fmt.Errorf( - "expected gas limit to be %d after apricot phase 1 but got %d", - params.ApricotPhase1GasLimit, ethHeader.GasLimit, - ) + if rules.IsSongbirdCode { + if rules.IsSongbirdTransition { + if ethHeader.GasLimit != params.SgbTransitionGasLimit { + return fmt.Errorf( + "expected gas limit to be %d in sgb transition but got %d", + params.SgbTransitionGasLimit, ethHeader.GasLimit, + ) + } + } else if rules.IsApricotPhase5 { + if ethHeader.GasLimit != params.SgbApricotPhase5GasLimit { + return fmt.Errorf( + "expected gas limit to be %d in apricot phase 5 but got %d", + params.SgbApricotPhase5GasLimit, ethHeader.GasLimit, + ) + + } + } + } else { + if rules.IsApricotPhase1 { + if ethHeader.GasLimit != params.ApricotPhase1GasLimit { + return fmt.Errorf( + "expected gas limit to be %d after apricot phase 1 but got %d", + params.ApricotPhase1GasLimit, ethHeader.GasLimit, + ) + } } } } @@ -261,16 +269,5 @@ func (v blockValidator) SyntacticVerify(b *Block, rules params.Rules) error { } } - if rules.IsCortina { - // In Cortina, ExtraStateRoot must not be empty (should contain the root of the atomic trie). - if ethHeader.ExtraStateRoot == (common.Hash{}) { - return fmt.Errorf("%w: ExtraStateRoot must not be empty", errInvalidExtraStateRoot) - } - } else { - // Before Cortina, ExtraStateRoot must be empty. - if ethHeader.ExtraStateRoot != (common.Hash{}) { - return fmt.Errorf("%w: ExtraStateRoot must be empty", errInvalidExtraStateRoot) - } - } return nil } diff --git a/coreth/plugin/evm/client.go b/coreth/plugin/evm/client.go index 7477fd7b..4e0163a2 100644 --- a/coreth/plugin/evm/client.go +++ b/coreth/plugin/evm/client.go @@ -11,7 +11,7 @@ import ( "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/rpc" @@ -28,8 +28,8 @@ type Client interface { GetAtomicTx(ctx context.Context, txID ids.ID) ([]byte, error) GetAtomicUTXOs(ctx context.Context, addrs []string, sourceChain string, limit uint32, startAddress, startUTXOID string) ([][]byte, api.Index, error) ListAddresses(ctx context.Context, userPass api.UserPass) ([]string, error) - ExportKey(ctx context.Context, userPass api.UserPass, addr string) (*crypto.PrivateKeySECP256K1R, string, error) - ImportKey(ctx context.Context, userPass api.UserPass, privateKey *crypto.PrivateKeySECP256K1R) (string, error) + ExportKey(ctx context.Context, userPass api.UserPass, addr string) (*secp256k1.PrivateKey, string, error) + ImportKey(ctx context.Context, userPass api.UserPass, privateKey *secp256k1.PrivateKey) (string, error) Import(ctx context.Context, userPass api.UserPass, to string, sourceChain string) (ids.ID, error) ExportAVAX(ctx context.Context, userPass api.UserPass, amount uint64, to string) (ids.ID, error) Export(ctx context.Context, userPass api.UserPass, amount uint64, to string, assetID string) (ids.ID, error) @@ -50,8 +50,8 @@ type client struct { // NewClient returns a Client for interacting with EVM [chain] func NewClient(uri, chain string) Client { return &client{ - requester: rpc.NewEndpointRequester(fmt.Sprintf("%s/ext/bc/%s/avax", uri, chain), "avax"), - adminRequester: rpc.NewEndpointRequester(fmt.Sprintf("%s/ext/bc/%s/admin", uri, chain), "admin"), + requester: rpc.NewEndpointRequester(fmt.Sprintf("%s/ext/bc/%s/avax", uri, chain)), + adminRequester: rpc.NewEndpointRequester(fmt.Sprintf("%s/ext/bc/%s/admin", uri, chain)), } } @@ -67,7 +67,7 @@ func (c *client) IssueTx(ctx context.Context, txBytes []byte) (ids.ID, error) { if err != nil { return res.TxID, fmt.Errorf("problem hex encoding bytes: %w", err) } - err = c.requester.SendRequest(ctx, "issueTx", &api.FormattedTx{ + err = c.requester.SendRequest(ctx, "avax.issueTx", &api.FormattedTx{ Tx: txStr, Encoding: formatting.Hex, }, res) @@ -77,7 +77,7 @@ func (c *client) IssueTx(ctx context.Context, txBytes []byte) (ids.ID, error) { // GetAtomicTxStatus returns the status of [txID] func (c *client) GetAtomicTxStatus(ctx context.Context, txID ids.ID) (Status, error) { res := &GetAtomicTxStatusReply{} - err := c.requester.SendRequest(ctx, "getAtomicTxStatus", &api.JSONTxID{ + err := c.requester.SendRequest(ctx, "avax.getAtomicTxStatus", &api.JSONTxID{ TxID: txID, }, res) return res.Status, err @@ -86,7 +86,7 @@ func (c *client) GetAtomicTxStatus(ctx context.Context, txID ids.ID) (Status, er // GetAtomicTx returns the byte representation of [txID] func (c *client) GetAtomicTx(ctx context.Context, txID ids.ID) ([]byte, error) { res := &api.FormattedTx{} - err := c.requester.SendRequest(ctx, "getAtomicTx", &api.GetTxArgs{ + err := c.requester.SendRequest(ctx, "avax.getAtomicTx", &api.GetTxArgs{ TxID: txID, Encoding: formatting.Hex, }, res) @@ -101,7 +101,7 @@ func (c *client) GetAtomicTx(ctx context.Context, txID ids.ID) ([]byte, error) { // from [sourceChain] func (c *client) GetAtomicUTXOs(ctx context.Context, addrs []string, sourceChain string, limit uint32, startAddress, startUTXOID string) ([][]byte, api.Index, error) { res := &api.GetUTXOsReply{} - err := c.requester.SendRequest(ctx, "getUTXOs", &api.GetUTXOsArgs{ + err := c.requester.SendRequest(ctx, "avax.getUTXOs", &api.GetUTXOsArgs{ Addresses: addrs, SourceChain: sourceChain, Limit: cjson.Uint32(limit), @@ -129,15 +129,15 @@ func (c *client) GetAtomicUTXOs(ctx context.Context, addrs []string, sourceChain // ListAddresses returns all addresses on this chain controlled by [user] func (c *client) ListAddresses(ctx context.Context, user api.UserPass) ([]string, error) { res := &api.JSONAddresses{} - err := c.requester.SendRequest(ctx, "listAddresses", &user, res) + err := c.requester.SendRequest(ctx, "avax.listAddresses", &user, res) return res.Addresses, err } // ExportKey returns the private key corresponding to [addr] controlled by [user] // in both Avalanche standard format and hex format -func (c *client) ExportKey(ctx context.Context, user api.UserPass, addr string) (*crypto.PrivateKeySECP256K1R, string, error) { +func (c *client) ExportKey(ctx context.Context, user api.UserPass, addr string) (*secp256k1.PrivateKey, string, error) { res := &ExportKeyReply{} - err := c.requester.SendRequest(ctx, "exportKey", &ExportKeyArgs{ + err := c.requester.SendRequest(ctx, "avax.exportKey", &ExportKeyArgs{ UserPass: user, Address: addr, }, res) @@ -145,9 +145,9 @@ func (c *client) ExportKey(ctx context.Context, user api.UserPass, addr string) } // ImportKey imports [privateKey] to [user] -func (c *client) ImportKey(ctx context.Context, user api.UserPass, privateKey *crypto.PrivateKeySECP256K1R) (string, error) { +func (c *client) ImportKey(ctx context.Context, user api.UserPass, privateKey *secp256k1.PrivateKey) (string, error) { res := &api.JSONAddress{} - err := c.requester.SendRequest(ctx, "importKey", &ImportKeyArgs{ + err := c.requester.SendRequest(ctx, "avax.importKey", &ImportKeyArgs{ UserPass: user, PrivateKey: privateKey, }, res) @@ -158,7 +158,7 @@ func (c *client) ImportKey(ctx context.Context, user api.UserPass, privateKey *c // returns the ID of the newly created transaction func (c *client) Import(ctx context.Context, user api.UserPass, to, sourceChain string) (ids.ID, error) { res := &api.JSONTxID{} - err := c.requester.SendRequest(ctx, "import", &ImportArgs{ + err := c.requester.SendRequest(ctx, "avax.import", &ImportArgs{ UserPass: user, To: to, SourceChain: sourceChain, @@ -188,7 +188,7 @@ func (c *client) Export( assetID string, ) (ids.ID, error) { res := &api.JSONTxID{} - err := c.requester.SendRequest(ctx, "export", &ExportArgs{ + err := c.requester.SendRequest(ctx, "avax.export", &ExportArgs{ ExportAVAXArgs: ExportAVAXArgs{ UserPass: user, Amount: cjson.Uint64(amount), @@ -200,24 +200,24 @@ func (c *client) Export( } func (c *client) StartCPUProfiler(ctx context.Context) error { - return c.adminRequester.SendRequest(ctx, "startCPUProfiler", struct{}{}, &api.EmptyReply{}) + return c.adminRequester.SendRequest(ctx, "admin.startCPUProfiler", struct{}{}, &api.EmptyReply{}) } func (c *client) StopCPUProfiler(ctx context.Context) error { - return c.adminRequester.SendRequest(ctx, "stopCPUProfiler", struct{}{}, &api.EmptyReply{}) + return c.adminRequester.SendRequest(ctx, "admin.stopCPUProfiler", struct{}{}, &api.EmptyReply{}) } func (c *client) MemoryProfile(ctx context.Context) error { - return c.adminRequester.SendRequest(ctx, "memoryProfile", struct{}{}, &api.EmptyReply{}) + return c.adminRequester.SendRequest(ctx, "admin.memoryProfile", struct{}{}, &api.EmptyReply{}) } func (c *client) LockProfile(ctx context.Context) error { - return c.adminRequester.SendRequest(ctx, "lockProfile", struct{}{}, &api.EmptyReply{}) + return c.adminRequester.SendRequest(ctx, "admin.lockProfile", struct{}{}, &api.EmptyReply{}) } // SetLogLevel dynamically sets the log level for the C Chain func (c *client) SetLogLevel(ctx context.Context, level log.Lvl) error { - return c.adminRequester.SendRequest(ctx, "setLogLevel", &SetLogLevelArgs{ + return c.adminRequester.SendRequest(ctx, "admin.setLogLevel", &SetLogLevelArgs{ Level: level.String(), }, &api.EmptyReply{}) } @@ -225,6 +225,6 @@ func (c *client) SetLogLevel(ctx context.Context, level log.Lvl) error { // GetVMConfig returns the current config of the VM func (c *client) GetVMConfig(ctx context.Context) (*Config, error) { res := &ConfigReply{} - err := c.adminRequester.SendRequest(ctx, "getVMConfig", struct{}{}, res) + err := c.adminRequester.SendRequest(ctx, "admin.getVMConfig", struct{}{}, res) return res.Config, err } diff --git a/coreth/plugin/evm/config.go b/coreth/plugin/evm/config.go index 198c19d0..99425ad6 100644 --- a/coreth/plugin/evm/config.go +++ b/coreth/plugin/evm/config.go @@ -8,33 +8,41 @@ import ( "fmt" "time" + "github.com/ava-labs/coreth/core" "github.com/ava-labs/coreth/eth" + "github.com/ethereum/go-ethereum/common" "github.com/spf13/cast" ) const ( - defaultAcceptorQueueLimit = 64 // Provides 2 minutes of buffer (2s block target) for a commit delay - defaultPruningEnabled = true - defaultCommitInterval = 4096 - defaultSyncableCommitInterval = defaultCommitInterval * 4 - defaultSnapshotAsync = true - defaultRpcGasCap = 50_000_000 // Default to 50M Gas Limit - defaultRpcTxFeeCap = 100 // 100 AVAX - defaultMetricsExpensiveEnabled = true - defaultApiMaxDuration = 0 // Default to no maximum API call duration - defaultWsCpuRefillRate = 0 // Default to no maximum WS CPU usage - defaultWsCpuMaxStored = 0 // Default to no maximum WS CPU usage - defaultMaxBlocksPerRequest = 0 // Default to no maximum on the number of blocks per getLogs request - defaultContinuousProfilerFrequency = 15 * time.Minute - defaultContinuousProfilerMaxFiles = 5 - defaultTxRegossipFrequency = 1 * time.Minute - defaultTxRegossipMaxSize = 15 - defaultOfflinePruningBloomFilterSize uint64 = 512 // Default size (MB) for the offline pruner to use - defaultLogLevel = "info" - defaultLogJSONFormat = false - defaultPopulateMissingTriesParallelism = 1024 - defaultMaxOutboundActiveRequests = 16 - defaultStateSyncServerTrieCache = 64 // MB + defaultAcceptorQueueLimit = 64 // Provides 2 minutes of buffer (2s block target) for a commit delay + defaultPruningEnabled = true + defaultCommitInterval = 4096 + defaultTrieCleanCache = 512 + defaultTrieDirtyCache = 256 + defaultTrieDirtyCommitTarget = 20 + defaultSnapshotCache = 256 + defaultSyncableCommitInterval = defaultCommitInterval * 4 + defaultSnapshotAsync = true + defaultRpcGasCap = 50_000_000 // Default to 50M Gas Limit + defaultRpcTxFeeCap = 100 // 100 AVAX + defaultMetricsExpensiveEnabled = true + defaultApiMaxDuration = 0 // Default to no maximum API call duration + defaultWsCpuRefillRate = 0 // Default to no maximum WS CPU usage + defaultWsCpuMaxStored = 0 // Default to no maximum WS CPU usage + defaultMaxBlocksPerRequest = 0 // Default to no maximum on the number of blocks per getLogs request + defaultContinuousProfilerFrequency = 15 * time.Minute + defaultContinuousProfilerMaxFiles = 5 + defaultTxRegossipFrequency = 1 * time.Minute + defaultTxRegossipMaxSize = 15 + defaultOfflinePruningBloomFilterSize uint64 = 512 // Default size (MB) for the offline pruner to use + defaultLogLevel = "info" + defaultLogJSONFormat = false + defaultPopulateMissingTriesParallelism = 1024 + defaultMaxOutboundActiveRequests = 16 + defaultMaxOutboundActiveCrossChainRequests = 64 + defaultStateSyncServerTrieCache = 64 // MB + defaultAcceptedCacheSize = 32 // blocks // defaultStateSyncMinBlocks is the minimum number of blocks the blockchain // should be ahead of local last accepted to perform state sync. @@ -46,15 +54,20 @@ const ( defaultStateSyncMinBlocks = 300_000 ) -var defaultEnabledAPIs = []string{ - "eth", - "eth-filter", - "net", - "web3", - "internal-eth", - "internal-blockchain", - "internal-transaction", -} +var ( + defaultEnabledAPIs = []string{ + "eth", + "eth-filter", + "net", + "web3", + "internal-eth", + "internal-blockchain", + "internal-transaction", + } + defaultAllowUnprotectedTxHashes = []common.Hash{ + common.HexToHash("0xfefb2da535e927b85fe68eb81cb2e4a5827c905f78381a01ef2322aa9b0aee8e"), // EIP-1820: https://eips.ethereum.org/EIPS/eip-1820 + } +) type Duration struct { time.Duration @@ -80,6 +93,14 @@ type Config struct { RPCGasCap uint64 `json:"rpc-gas-cap"` RPCTxFeeCap float64 `json:"rpc-tx-fee-cap"` + // Cache settings + TrieCleanCache int `json:"trie-clean-cache"` // Size of the trie clean cache (MB) + TrieCleanJournal string `json:"trie-clean-journal"` // Directory to use to save the trie clean cache (must be populated to enable journaling the trie clean cache) + TrieCleanRejournal Duration `json:"trie-clean-rejournal"` // Frequency to re-journal the trie clean cache to disk (minimum 1 minute, must be populated to enable journaling the trie clean cache) + TrieDirtyCache int `json:"trie-dirty-cache"` // Size of the trie dirty cache (MB) + TrieDirtyCommitTarget int `json:"trie-dirty-commit-target"` // Memory limit to target in the dirty cache before performing a commit (MB) + SnapshotCache int `json:"snapshot-cache"` // Size of the snapshot disk layer clean cache (MB) + // Eth Settings Preimages bool `json:"preimages-enabled"` SnapshotAsync bool `json:"snapshot-async"` @@ -97,13 +118,24 @@ type Config struct { MetricsExpensiveEnabled bool `json:"metrics-expensive-enabled"` // Debug-level metrics that might impact runtime performance // API Settings - LocalTxsEnabled bool `json:"local-txs-enabled"` - APIMaxDuration Duration `json:"api-max-duration"` - WSCPURefillRate Duration `json:"ws-cpu-refill-rate"` - WSCPUMaxStored Duration `json:"ws-cpu-max-stored"` - MaxBlocksPerRequest int64 `json:"api-max-blocks-per-request"` - AllowUnfinalizedQueries bool `json:"allow-unfinalized-queries"` - AllowUnprotectedTxs bool `json:"allow-unprotected-txs"` + LocalTxsEnabled bool `json:"local-txs-enabled"` + + TxPoolJournal string `json:"tx-pool-journal"` + TxPoolRejournal Duration `json:"tx-pool-rejournal"` + TxPoolPriceLimit uint64 `json:"tx-pool-price-limit"` + TxPoolPriceBump uint64 `json:"tx-pool-price-bump"` + TxPoolAccountSlots uint64 `json:"tx-pool-account-slots"` + TxPoolGlobalSlots uint64 `json:"tx-pool-global-slots"` + TxPoolAccountQueue uint64 `json:"tx-pool-account-queue"` + TxPoolGlobalQueue uint64 `json:"tx-pool-global-queue"` + + APIMaxDuration Duration `json:"api-max-duration"` + WSCPURefillRate Duration `json:"ws-cpu-refill-rate"` + WSCPUMaxStored Duration `json:"ws-cpu-max-stored"` + MaxBlocksPerRequest int64 `json:"api-max-blocks-per-request"` + AllowUnfinalizedQueries bool `json:"allow-unfinalized-queries"` + AllowUnprotectedTxs bool `json:"allow-unprotected-txs"` + AllowUnprotectedTxHashes []common.Hash `json:"allow-unprotected-tx-hashes"` // Keystore Settings KeystoreDirectory string `json:"keystore-directory"` // both absolute and relative supported @@ -125,15 +157,38 @@ type Config struct { OfflinePruningDataDirectory string `json:"offline-pruning-data-directory"` // VM2VM network - MaxOutboundActiveRequests int64 `json:"max-outbound-active-requests"` + MaxOutboundActiveRequests int64 `json:"max-outbound-active-requests"` + MaxOutboundActiveCrossChainRequests int64 `json:"max-outbound-active-cross-chain-requests"` // Sync settings - StateSyncEnabled bool `json:"state-sync-enabled"` + StateSyncEnabled *bool `json:"state-sync-enabled"` // Pointer distinguishes false (no state sync) and not set (state sync only at genesis). StateSyncSkipResume bool `json:"state-sync-skip-resume"` // Forces state sync to use the highest available summary block StateSyncServerTrieCache int `json:"state-sync-server-trie-cache"` StateSyncIDs string `json:"state-sync-ids"` StateSyncCommitInterval uint64 `json:"state-sync-commit-interval"` StateSyncMinBlocks uint64 `json:"state-sync-min-blocks"` + + // Database Settings + InspectDatabase bool `json:"inspect-database"` // Inspects the database on startup if enabled. + + // SkipUpgradeCheck disables checking that upgrades must take place before the last + // accepted block. Skipping this check is useful when a node operator does not update + // their node before the network upgrade and their node accepts blocks that have + // identical state with the pre-upgrade ruleset. + SkipUpgradeCheck bool `json:"skip-upgrade-check"` + + // AcceptedCacheSize is the depth to keep in the accepted headers cache and the + // accepted logs cache at the accepted tip. + // + // This is particularly useful for improving the performance of eth_getLogs + // on RPC nodes. + AcceptedCacheSize int `json:"accepted-cache-size"` + + // TxLookupLimit is the maximum number of blocks from head whose tx indices + // are reserved: + // * 0: means no limit + // * N: means N block limit [HEAD-N+1, HEAD] and delete extra indexes + TxLookupLimit uint64 `json:"tx-lookup-limit"` } // EthAPIs returns an array of strings representing the Eth APIs that should be enabled @@ -150,6 +205,16 @@ func (c *Config) SetDefaults() { c.RPCGasCap = defaultRpcGasCap c.RPCTxFeeCap = defaultRpcTxFeeCap c.MetricsExpensiveEnabled = defaultMetricsExpensiveEnabled + + c.TxPoolJournal = core.DefaultTxPoolConfig.Journal + c.TxPoolRejournal = Duration{core.DefaultTxPoolConfig.Rejournal} + c.TxPoolPriceLimit = core.DefaultTxPoolConfig.PriceLimit + c.TxPoolPriceBump = core.DefaultTxPoolConfig.PriceBump + c.TxPoolAccountSlots = core.DefaultTxPoolConfig.AccountSlots + c.TxPoolGlobalSlots = core.DefaultTxPoolConfig.GlobalSlots + c.TxPoolAccountQueue = core.DefaultTxPoolConfig.AccountQueue + c.TxPoolGlobalQueue = core.DefaultTxPoolConfig.GlobalQueue + c.APIMaxDuration.Duration = defaultApiMaxDuration c.WSCPURefillRate.Duration = defaultWsCpuRefillRate c.WSCPUMaxStored.Duration = defaultWsCpuMaxStored @@ -157,6 +222,10 @@ func (c *Config) SetDefaults() { c.ContinuousProfilerFrequency.Duration = defaultContinuousProfilerFrequency c.ContinuousProfilerMaxFiles = defaultContinuousProfilerMaxFiles c.Pruning = defaultPruningEnabled + c.TrieCleanCache = defaultTrieCleanCache + c.TrieDirtyCache = defaultTrieDirtyCache + c.TrieDirtyCommitTarget = defaultTrieDirtyCommitTarget + c.SnapshotCache = defaultSnapshotCache c.AcceptorQueueLimit = defaultAcceptorQueueLimit c.SnapshotAsync = defaultSnapshotAsync c.TxRegossipFrequency.Duration = defaultTxRegossipFrequency @@ -166,10 +235,13 @@ func (c *Config) SetDefaults() { c.PopulateMissingTriesParallelism = defaultPopulateMissingTriesParallelism c.LogJSONFormat = defaultLogJSONFormat c.MaxOutboundActiveRequests = defaultMaxOutboundActiveRequests + c.MaxOutboundActiveCrossChainRequests = defaultMaxOutboundActiveCrossChainRequests c.StateSyncServerTrieCache = defaultStateSyncServerTrieCache c.CommitInterval = defaultCommitInterval c.StateSyncCommitInterval = defaultSyncableCommitInterval c.StateSyncMinBlocks = defaultStateSyncMinBlocks + c.AllowUnprotectedTxHashes = defaultAllowUnprotectedTxHashes + c.AcceptedCacheSize = defaultAcceptedCacheSize } func (d *Duration) UnmarshalJSON(data []byte) (err error) { diff --git a/coreth/plugin/evm/config_test.go b/coreth/plugin/evm/config_test.go index e81aa97b..809c4f90 100644 --- a/coreth/plugin/evm/config_test.go +++ b/coreth/plugin/evm/config_test.go @@ -9,9 +9,16 @@ import ( "testing" "time" + "github.com/ethereum/go-ethereum/common" "github.com/stretchr/testify/assert" ) +// newTrue returns a pointer to a bool that is true +func newTrue() *bool { + b := true + return &b +} + func TestUnmarshalConfig(t *testing.T) { tests := []struct { name string @@ -21,8 +28,8 @@ func TestUnmarshalConfig(t *testing.T) { }{ { "string durations parsed", - []byte(`{"api-max-duration": "1m", "continuous-profiler-frequency": "2m"}`), - Config{APIMaxDuration: Duration{1 * time.Minute}, ContinuousProfilerFrequency: Duration{2 * time.Minute}}, + []byte(`{"api-max-duration": "1m", "continuous-profiler-frequency": "2m", "tx-pool-rejournal": "3m30s"}`), + Config{APIMaxDuration: Duration{1 * time.Minute}, ContinuousProfilerFrequency: Duration{2 * time.Minute}, TxPoolRejournal: Duration{3*time.Minute + 30*time.Second}}, false, }, { @@ -33,8 +40,8 @@ func TestUnmarshalConfig(t *testing.T) { }, { "nanosecond durations parsed", - []byte(`{"api-max-duration": 5000000000, "continuous-profiler-frequency": 5000000000}`), - Config{APIMaxDuration: Duration{5 * time.Second}, ContinuousProfilerFrequency: Duration{5 * time.Second}}, + []byte(`{"api-max-duration": 5000000000, "continuous-profiler-frequency": 5000000000, "tx-pool-rejournal": 9000000000}`), + Config{APIMaxDuration: Duration{5 * time.Second}, ContinuousProfilerFrequency: Duration{5 * time.Second}, TxPoolRejournal: Duration{9 * time.Second}}, false, }, { @@ -44,10 +51,25 @@ func TestUnmarshalConfig(t *testing.T) { true, }, + { + "tx pool configurations", + []byte(`{"tx-pool-journal": "hello", "tx-pool-price-limit": 1, "tx-pool-price-bump": 2, "tx-pool-account-slots": 3, "tx-pool-global-slots": 4, "tx-pool-account-queue": 5, "tx-pool-global-queue": 6}`), + Config{ + TxPoolJournal: "hello", + TxPoolPriceLimit: 1, + TxPoolPriceBump: 2, + TxPoolAccountSlots: 3, + TxPoolGlobalSlots: 4, + TxPoolAccountQueue: 5, + TxPoolGlobalQueue: 6, + }, + false, + }, + { "state sync enabled", []byte(`{"state-sync-enabled":true}`), - Config{StateSyncEnabled: true}, + Config{StateSyncEnabled: newTrue()}, false, }, { @@ -56,6 +78,40 @@ func TestUnmarshalConfig(t *testing.T) { Config{StateSyncIDs: "NodeID-CaBYJ9kzHvrQFiYWowMkJGAQKGMJqZoat"}, false, }, + { + "empty tx lookup limit", + []byte(`{}`), + Config{TxLookupLimit: 0}, + false, + }, + { + "zero tx lookup limit", + []byte(`{"tx-lookup-limit": 0}`), + func() Config { + return Config{TxLookupLimit: 0} + }(), + false, + }, + { + "1 tx lookup limit", + []byte(`{"tx-lookup-limit": 1}`), + func() Config { + return Config{TxLookupLimit: 1} + }(), + false, + }, + { + "-1 tx lookup limit", + []byte(`{"tx-lookup-limit": -1}`), + Config{}, + true, + }, + { + "allow unprotected tx hashes", + []byte(`{"allow-unprotected-tx-hashes": ["0x803351deb6d745e91545a6a3e1c0ea3e9a6a02a1a4193b70edfcd2f40f71a01c"]}`), + Config{AllowUnprotectedTxHashes: []common.Hash{common.HexToHash("0x803351deb6d745e91545a6a3e1c0ea3e9a6a02a1a4193b70edfcd2f40f71a01c")}}, + false, + }, } for _, tt := range tests { diff --git a/coreth/plugin/evm/export_tx.go b/coreth/plugin/evm/export_tx.go index 2d56dd08..fe83483d 100644 --- a/coreth/plugin/evm/export_tx.go +++ b/coreth/plugin/evm/export_tx.go @@ -4,6 +4,7 @@ package evm import ( + "context" "encoding/hex" "errors" "fmt" @@ -17,9 +18,10 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" @@ -51,8 +53,8 @@ type UnsignedExportTx struct { } // InputUTXOs returns a set of all the hash(address:nonce) exporting funds. -func (utx *UnsignedExportTx) InputUTXOs() ids.Set { - set := ids.NewSet(len(utx.Ins)) +func (utx *UnsignedExportTx) InputUTXOs() set.Set[ids.ID] { + set := set.NewSet[ids.ID](len(utx.Ins)) for _, in := range utx.Ins { // Total populated bytes is exactly 32 bytes. // 8 (Nonce) + 4 (Address Length) + 20 (Address) @@ -89,7 +91,7 @@ func (utx *UnsignedExportTx) Verify( if rules.IsApricotPhase5 { // Note that SameSubnet verifies that [tx.DestinationChain] isn't this // chain's ID - if err := verify.SameSubnet(ctx, utx.DestinationChain); err != nil { + if err := verify.SameSubnet(context.TODO(), ctx, utx.DestinationChain); err != nil { return errWrongChainID } } else { @@ -174,7 +176,7 @@ func (utx *UnsignedExportTx) Burned(assetID ids.ID) (uint64, error) { } } - return math.Sub64(input, spent) + return math.Sub(input, spent) } // SemanticVerify this transaction is valid. @@ -309,9 +311,9 @@ func (vm *VM) newExportTx( chainID ids.ID, // Chain to send the UTXOs to to ids.ShortID, // Address of chain recipient baseFee *big.Int, // fee to use post-AP3 - keys []*crypto.PrivateKeySECP256K1R, // Pay the fee and provide the tokens + keys []*secp256k1.PrivateKey, // Pay the fee and provide the tokens ) (*Tx, error) { - outs := []*avax.TransferableOutput{{ // Exported to X-Chain + outs := []*avax.TransferableOutput{{ Asset: avax.Asset{ID: assetID}, Out: &secp256k1fx.TransferOutput{ Amt: amount, @@ -326,7 +328,7 @@ func (vm *VM) newExportTx( var ( avaxNeeded uint64 = 0 ins, avaxIns []EVMInput - signers, avaxSigners [][]*crypto.PrivateKeySECP256K1R + signers, avaxSigners [][]*secp256k1.PrivateKey err error ) @@ -429,14 +431,9 @@ func (utx *UnsignedExportTx) EVMStateTransfer(ctx *snow.Context, state *state.St // Recover the address from the signature of the transaction hash func recoverAddress(vm *VM, txHash []byte, sig []byte) (common.Address, error) { - pubKeyIntf, err := vm.secpFactory.RecoverHashPublicKey(txHash, sig) + pubKey, err := vm.secpFactory.RecoverHashPublicKey(txHash, sig) if err != nil { return common.Address{}, err } - pubKey, ok := pubKeyIntf.(*crypto.PublicKeySECP256K1R) - if !ok { - // This should never happen - return common.Address{}, fmt.Errorf("expected *crypto.PublicKeySECP256K1R but got %T", pubKeyIntf) - } return PublicKeyToEthAddress(pubKey), nil } diff --git a/coreth/plugin/evm/export_tx_test.go b/coreth/plugin/evm/export_tx_test.go index 5784472e..50b782e1 100644 --- a/coreth/plugin/evm/export_tx_test.go +++ b/coreth/plugin/evm/export_tx_test.go @@ -5,6 +5,7 @@ package evm import ( "bytes" + "context" "math/big" "testing" @@ -12,7 +13,7 @@ import ( "github.com/ava-labs/avalanchego/ids" engCommon "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -53,7 +54,7 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, } // Import the funds - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -64,27 +65,27 @@ func createExportTxOptions(t *testing.T, vm *VM, issuer chan engCommon.Message, <-issuer - blk, err := vm.BuildBlock() + blk, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := blk.Verify(); err != nil { + if err := blk.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := vm.SetPreference(blk.ID()); err != nil { + if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { t.Fatal(err) } - if err := blk.Accept(); err != nil { + if err := blk.Accept(context.Background()); err != nil { t.Fatal(err) } // Use the funds to create 3 conflicting export transactions sending the funds to each of the test addresses exportTxs := make([]*Tx, 0, 3) for _, addr := range testShortIDAddrs { - exportTx, err := vm.newExportTx(vm.ctx.AVAXAssetID, uint64(5000000), vm.ctx.XChainID, addr, initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + exportTx, err := vm.newExportTx(vm.ctx.AVAXAssetID, uint64(5000000), vm.ctx.XChainID, addr, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -318,7 +319,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { t.Run(test.name, func(t *testing.T) { issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase0, "", "") defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() @@ -365,7 +366,7 @@ func TestExportTxEVMStateTransfer(t *testing.T) { t.Fatal(err) } - tx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + tx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -376,20 +377,20 @@ func TestExportTxEVMStateTransfer(t *testing.T) { <-issuer - blk, err := vm.BuildBlock() + blk, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := blk.Verify(); err != nil { + if err := blk.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := vm.SetPreference(blk.ID()); err != nil { + if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { t.Fatal(err) } - if err := blk.Accept(); err != nil { + if err := blk.Accept(context.Background()); err != nil { t.Fatal(err) } @@ -436,7 +437,7 @@ func TestExportTxSemanticVerify(t *testing.T) { _, vm, _, _, _ := GenesisVM(t, true, genesisJSONApricotPhase0, "", "") defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() @@ -522,7 +523,7 @@ func TestExportTxSemanticVerify(t *testing.T) { tests := []struct { name string tx *Tx - signers [][]*crypto.PrivateKeySECP256K1R + signers [][]*secp256k1.PrivateKey baseFee *big.Int rules params.Rules shouldErr bool @@ -530,7 +531,7 @@ func TestExportTxSemanticVerify(t *testing.T) { { name: "valid", tx: &Tx{UnsignedAtomicTx: validExportTx}, - signers: [][]*crypto.PrivateKeySECP256K1R{ + signers: [][]*secp256k1.PrivateKey{ {key}, {key}, {key}, @@ -546,7 +547,7 @@ func TestExportTxSemanticVerify(t *testing.T) { validExportTx.DestinationChain = constants.PlatformChainID return &Tx{UnsignedAtomicTx: &validExportTx} }(), - signers: [][]*crypto.PrivateKeySECP256K1R{ + signers: [][]*secp256k1.PrivateKey{ {key}, }, baseFee: initialBaseFee, @@ -560,7 +561,7 @@ func TestExportTxSemanticVerify(t *testing.T) { validExportTx.DestinationChain = constants.PlatformChainID return &Tx{UnsignedAtomicTx: &validExportTx} }(), - signers: [][]*crypto.PrivateKeySECP256K1R{ + signers: [][]*secp256k1.PrivateKey{ {key}, }, baseFee: initialBaseFee, @@ -574,7 +575,7 @@ func TestExportTxSemanticVerify(t *testing.T) { validExportTx.DestinationChain = ids.GenerateTestID() return &Tx{UnsignedAtomicTx: &validExportTx} }(), - signers: [][]*crypto.PrivateKeySECP256K1R{ + signers: [][]*secp256k1.PrivateKey{ {key}, }, baseFee: initialBaseFee, @@ -588,7 +589,7 @@ func TestExportTxSemanticVerify(t *testing.T) { validExportTx.DestinationChain = constants.PlatformChainID return &Tx{UnsignedAtomicTx: &validExportTx} }(), - signers: [][]*crypto.PrivateKeySECP256K1R{ + signers: [][]*secp256k1.PrivateKey{ {key}, {key}, {key}, @@ -604,7 +605,7 @@ func TestExportTxSemanticVerify(t *testing.T) { validExportTx.DestinationChain = constants.PlatformChainID return &Tx{UnsignedAtomicTx: &validExportTx} }(), - signers: [][]*crypto.PrivateKeySECP256K1R{ + signers: [][]*secp256k1.PrivateKey{ {key}, {key}, {key}, @@ -620,7 +621,7 @@ func TestExportTxSemanticVerify(t *testing.T) { validExportTx.DestinationChain = ids.GenerateTestID() return &Tx{UnsignedAtomicTx: &validExportTx} }(), - signers: [][]*crypto.PrivateKeySECP256K1R{ + signers: [][]*secp256k1.PrivateKey{ {key}, {key}, {key}, @@ -636,7 +637,7 @@ func TestExportTxSemanticVerify(t *testing.T) { validExportTx.ExportedOutputs = nil return &Tx{UnsignedAtomicTx: &validExportTx} }(), - signers: [][]*crypto.PrivateKeySECP256K1R{ + signers: [][]*secp256k1.PrivateKey{ {key}, {key}, {key}, @@ -652,7 +653,7 @@ func TestExportTxSemanticVerify(t *testing.T) { validExportTx.NetworkID++ return &Tx{UnsignedAtomicTx: &validExportTx} }(), - signers: [][]*crypto.PrivateKeySECP256K1R{ + signers: [][]*secp256k1.PrivateKey{ {key}, {key}, {key}, @@ -668,7 +669,7 @@ func TestExportTxSemanticVerify(t *testing.T) { validExportTx.BlockchainID = ids.GenerateTestID() return &Tx{UnsignedAtomicTx: &validExportTx} }(), - signers: [][]*crypto.PrivateKeySECP256K1R{ + signers: [][]*secp256k1.PrivateKey{ {key}, {key}, {key}, @@ -685,7 +686,7 @@ func TestExportTxSemanticVerify(t *testing.T) { validExportTx.Ins[2].Amount = 0 return &Tx{UnsignedAtomicTx: &validExportTx} }(), - signers: [][]*crypto.PrivateKeySECP256K1R{ + signers: [][]*secp256k1.PrivateKey{ {key}, {key}, {key}, @@ -710,7 +711,7 @@ func TestExportTxSemanticVerify(t *testing.T) { }} return &Tx{UnsignedAtomicTx: &validExportTx} }(), - signers: [][]*crypto.PrivateKeySECP256K1R{ + signers: [][]*secp256k1.PrivateKey{ {key}, {key}, {key}, @@ -751,7 +752,7 @@ func TestExportTxSemanticVerify(t *testing.T) { validExportTx.ExportedOutputs = exportOutputs return &Tx{UnsignedAtomicTx: &validExportTx} }(), - signers: [][]*crypto.PrivateKeySECP256K1R{ + signers: [][]*secp256k1.PrivateKey{ {key}, {key}, {key}, @@ -768,7 +769,7 @@ func TestExportTxSemanticVerify(t *testing.T) { validExportTx.Ins[2] = validExportTx.Ins[1] return &Tx{UnsignedAtomicTx: &validExportTx} }(), - signers: [][]*crypto.PrivateKeySECP256K1R{ + signers: [][]*secp256k1.PrivateKey{ {key}, {key}, {key}, @@ -795,7 +796,7 @@ func TestExportTxSemanticVerify(t *testing.T) { } return &Tx{UnsignedAtomicTx: &validExportTx} }(), - signers: [][]*crypto.PrivateKeySECP256K1R{ + signers: [][]*secp256k1.PrivateKey{ {key}, {key}, {key}, @@ -822,7 +823,7 @@ func TestExportTxSemanticVerify(t *testing.T) { } return &Tx{UnsignedAtomicTx: &validExportTx} }(), - signers: [][]*crypto.PrivateKeySECP256K1R{ + signers: [][]*secp256k1.PrivateKey{ {key}, {key}, {key}, @@ -834,7 +835,7 @@ func TestExportTxSemanticVerify(t *testing.T) { { name: "too many signatures", tx: &Tx{UnsignedAtomicTx: validExportTx}, - signers: [][]*crypto.PrivateKeySECP256K1R{ + signers: [][]*secp256k1.PrivateKey{ {key}, {key}, {key}, @@ -847,7 +848,7 @@ func TestExportTxSemanticVerify(t *testing.T) { { name: "too few signatures", tx: &Tx{UnsignedAtomicTx: validExportTx}, - signers: [][]*crypto.PrivateKeySECP256K1R{ + signers: [][]*secp256k1.PrivateKey{ {key}, {key}, }, @@ -858,7 +859,7 @@ func TestExportTxSemanticVerify(t *testing.T) { { name: "too many signatures on credential", tx: &Tx{UnsignedAtomicTx: validExportTx}, - signers: [][]*crypto.PrivateKeySECP256K1R{ + signers: [][]*secp256k1.PrivateKey{ {key, testKeys[1]}, {key}, {key}, @@ -870,7 +871,7 @@ func TestExportTxSemanticVerify(t *testing.T) { { name: "too few signatures on credential", tx: &Tx{UnsignedAtomicTx: validExportTx}, - signers: [][]*crypto.PrivateKeySECP256K1R{ + signers: [][]*secp256k1.PrivateKey{ {}, {key}, {key}, @@ -882,7 +883,7 @@ func TestExportTxSemanticVerify(t *testing.T) { { name: "wrong signature on credential", tx: &Tx{UnsignedAtomicTx: validExportTx}, - signers: [][]*crypto.PrivateKeySECP256K1R{ + signers: [][]*secp256k1.PrivateKey{ {testKeys[1]}, {key}, {key}, @@ -894,7 +895,7 @@ func TestExportTxSemanticVerify(t *testing.T) { { name: "no signatures", tx: &Tx{UnsignedAtomicTx: validExportTx}, - signers: [][]*crypto.PrivateKeySECP256K1R{}, + signers: [][]*secp256k1.PrivateKey{}, baseFee: initialBaseFee, rules: apricotRulesPhase3, shouldErr: true, @@ -926,7 +927,7 @@ func TestExportTxAccept(t *testing.T) { xChainSharedMemory := sharedMemory.NewSharedMemory(vm.ctx.XChainID) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() @@ -985,7 +986,7 @@ func TestExportTxAccept(t *testing.T) { tx := &Tx{UnsignedAtomicTx: exportTx} - signers := [][]*crypto.PrivateKeySECP256K1R{ + signers := [][]*secp256k1.PrivateKey{ {key}, {key}, {key}, @@ -1119,7 +1120,7 @@ func TestExportTxVerify(t *testing.T) { avax.SortTransferableOutputs(exportTx.ExportedOutputs, Codec) // Pass in a list of signers here with the appropriate length // to avoid causing a nil-pointer error in the helper method - emptySigners := make([][]*crypto.PrivateKeySECP256K1R, 2) + emptySigners := make([][]*secp256k1.PrivateKey, 2) SortEVMInputsAndSigners(exportTx.Ins, emptySigners) ctx := NewContext() @@ -1373,7 +1374,7 @@ func TestExportTxGasCost(t *testing.T) { tests := map[string]struct { UnsignedExportTx *UnsignedExportTx - Keys [][]*crypto.PrivateKeySECP256K1R + Keys [][]*secp256k1.PrivateKey BaseFee *big.Int ExpectedGasUsed uint64 @@ -1407,7 +1408,7 @@ func TestExportTxGasCost(t *testing.T) { }, }, }, - Keys: [][]*crypto.PrivateKeySECP256K1R{{testKeys[0]}}, + Keys: [][]*secp256k1.PrivateKey{{testKeys[0]}}, ExpectedGasUsed: 1230, ExpectedFee: 1, BaseFee: big.NewInt(1), @@ -1439,7 +1440,7 @@ func TestExportTxGasCost(t *testing.T) { }, }, }, - Keys: [][]*crypto.PrivateKeySECP256K1R{{testKeys[0]}}, + Keys: [][]*secp256k1.PrivateKey{{testKeys[0]}}, ExpectedGasUsed: 11230, ExpectedFee: 1, BaseFee: big.NewInt(1), @@ -1472,7 +1473,7 @@ func TestExportTxGasCost(t *testing.T) { }, }, }, - Keys: [][]*crypto.PrivateKeySECP256K1R{{testKeys[0]}}, + Keys: [][]*secp256k1.PrivateKey{{testKeys[0]}}, ExpectedGasUsed: 1230, ExpectedFee: 30750, BaseFee: big.NewInt(25 * params.GWei), @@ -1504,7 +1505,7 @@ func TestExportTxGasCost(t *testing.T) { }, }, }, - Keys: [][]*crypto.PrivateKeySECP256K1R{{testKeys[0]}}, + Keys: [][]*secp256k1.PrivateKey{{testKeys[0]}}, ExpectedGasUsed: 1230, ExpectedFee: 276750, BaseFee: big.NewInt(225 * params.GWei), @@ -1548,7 +1549,7 @@ func TestExportTxGasCost(t *testing.T) { }, }, }, - Keys: [][]*crypto.PrivateKeySECP256K1R{{testKeys[0], testKeys[0], testKeys[0]}}, + Keys: [][]*secp256k1.PrivateKey{{testKeys[0], testKeys[0], testKeys[0]}}, ExpectedGasUsed: 3366, ExpectedFee: 84150, BaseFee: big.NewInt(25 * params.GWei), @@ -1592,7 +1593,7 @@ func TestExportTxGasCost(t *testing.T) { }, }, }, - Keys: [][]*crypto.PrivateKeySECP256K1R{{testKeys[0], testKeys[0], testKeys[0]}}, + Keys: [][]*secp256k1.PrivateKey{{testKeys[0], testKeys[0], testKeys[0]}}, ExpectedGasUsed: 3366, ExpectedFee: 757350, BaseFee: big.NewInt(225 * params.GWei), @@ -1683,7 +1684,7 @@ func TestNewExportTx(t *testing.T) { issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, test.genesis, "", "") defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() @@ -1720,7 +1721,7 @@ func TestNewExportTx(t *testing.T) { t.Fatal(err) } - tx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + tx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -1731,27 +1732,27 @@ func TestNewExportTx(t *testing.T) { <-issuer - blk, err := vm.BuildBlock() + blk, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := blk.Verify(); err != nil { + if err := blk.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := vm.SetPreference(blk.ID()); err != nil { + if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { t.Fatal(err) } - if err := blk.Accept(); err != nil { + if err := blk.Accept(context.Background()); err != nil { t.Fatal(err) } parent = vm.LastAcceptedBlockInternal().(*Block) exportAmount := uint64(5000000) - tx, err = vm.newExportTx(vm.ctx.AVAXAssetID, exportAmount, vm.ctx.XChainID, testShortIDAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + tx, err = vm.newExportTx(vm.ctx.AVAXAssetID, exportAmount, vm.ctx.XChainID, testShortIDAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -1843,7 +1844,7 @@ func TestNewExportTxMulticoin(t *testing.T) { issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, test.genesis, "", "") defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() @@ -1910,7 +1911,7 @@ func TestNewExportTxMulticoin(t *testing.T) { t.Fatal(err) } - tx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + tx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -1921,20 +1922,20 @@ func TestNewExportTxMulticoin(t *testing.T) { <-issuer - blk, err := vm.BuildBlock() + blk, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := blk.Verify(); err != nil { + if err := blk.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := vm.SetPreference(blk.ID()); err != nil { + if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { t.Fatal(err) } - if err := blk.Accept(); err != nil { + if err := blk.Accept(context.Background()); err != nil { t.Fatal(err) } @@ -1947,7 +1948,7 @@ func TestNewExportTxMulticoin(t *testing.T) { t.Fatal(err) } - tx, err = vm.newExportTx(tid, exportAmount, vm.ctx.XChainID, exportId, initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + tx, err = vm.newExportTx(tid, exportAmount, vm.ctx.XChainID, exportId, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } diff --git a/coreth/plugin/evm/factory.go b/coreth/plugin/evm/factory.go index 34ecc893..a08fbc2a 100644 --- a/coreth/plugin/evm/factory.go +++ b/coreth/plugin/evm/factory.go @@ -5,7 +5,7 @@ package evm import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms" ) @@ -18,6 +18,6 @@ var ( type Factory struct{} -func (f *Factory) New(*snow.Context) (interface{}, error) { +func (*Factory) New(logging.Logger) (interface{}, error) { return &VM{}, nil } diff --git a/coreth/plugin/evm/formatting.go b/coreth/plugin/evm/formatting.go index 505d9875..a586a455 100644 --- a/coreth/plugin/evm/formatting.go +++ b/coreth/plugin/evm/formatting.go @@ -8,10 +8,10 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ethereum/go-ethereum/common" - ethcrypto "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/crypto" ) // ParseLocalAddress takes in an address for this chain and produces the ID @@ -52,11 +52,11 @@ func ParseEthAddress(addrStr string) (common.Address, error) { } // GetEthAddress returns the ethereum address derived from [privKey] -func GetEthAddress(privKey *crypto.PrivateKeySECP256K1R) common.Address { - return PublicKeyToEthAddress(privKey.PublicKey().(*crypto.PublicKeySECP256K1R)) +func GetEthAddress(privKey *secp256k1.PrivateKey) common.Address { + return PublicKeyToEthAddress(privKey.PublicKey()) } // PublicKeyToEthAddress returns the ethereum address derived from [pubKey] -func PublicKeyToEthAddress(pubKey *crypto.PublicKeySECP256K1R) common.Address { - return ethcrypto.PubkeyToAddress(*(pubKey.ToECDSA())) +func PublicKeyToEthAddress(pubKey *secp256k1.PublicKey) common.Address { + return crypto.PubkeyToAddress(*(pubKey.ToECDSA())) } diff --git a/coreth/plugin/evm/gossiper.go b/coreth/plugin/evm/gossiper.go index 4779737e..e1bae8e1 100644 --- a/coreth/plugin/evm/gossiper.go +++ b/coreth/plugin/evm/gossiper.go @@ -67,8 +67,8 @@ type pushGossiper struct { // [recentAtomicTxs] and [recentEthTxs] prevent us from over-gossiping the // same transaction in a short period of time. - recentAtomicTxs *cache.LRU - recentEthTxs *cache.LRU + recentAtomicTxs *cache.LRU[ids.ID, interface{}] + recentEthTxs *cache.LRU[common.Hash, interface{}] codec codec.Manager stats GossipSentStats @@ -93,8 +93,8 @@ func (vm *VM) createGossiper(stats GossipStats) Gossiper { ethTxsToGossip: make(map[common.Hash]*types.Transaction), shutdownChan: vm.shutdownChan, shutdownWg: &vm.shutdownWg, - recentAtomicTxs: &cache.LRU{Size: recentCacheSize}, - recentEthTxs: &cache.LRU{Size: recentCacheSize}, + recentAtomicTxs: &cache.LRU[ids.ID, interface{}]{Size: recentCacheSize}, + recentEthTxs: &cache.LRU[common.Hash, interface{}]{Size: recentCacheSize}, codec: vm.networkCodec, stats: stats, } @@ -219,12 +219,15 @@ func (n *pushGossiper) queueRegossipTxs() types.Transactions { func (n *pushGossiper) awaitEthTxGossip() { n.shutdownWg.Add(1) go n.ctx.Log.RecoverAndPanic(func() { - defer n.shutdownWg.Done() - var ( gossipTicker = time.NewTicker(ethTxsGossipInterval) regossipTicker = time.NewTicker(n.config.TxRegossipFrequency.Duration) ) + defer func() { + gossipTicker.Stop() + regossipTicker.Stop() + n.shutdownWg.Done() + }() for { select { diff --git a/coreth/plugin/evm/gossiper_atomic_gossiping_test.go b/coreth/plugin/evm/gossiper_atomic_gossiping_test.go index 99ff3fe1..3f3b70c5 100644 --- a/coreth/plugin/evm/gossiper_atomic_gossiping_test.go +++ b/coreth/plugin/evm/gossiper_atomic_gossiping_test.go @@ -4,11 +4,14 @@ package evm import ( + "context" + "os" "sync" "testing" "time" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" "github.com/stretchr/testify/assert" @@ -21,7 +24,7 @@ func TestMempoolAtmTxsIssueTxAndGossiping(t *testing.T) { _, vm, _, sharedMemory, sender := GenesisVM(t, true, "", "", "") defer func() { - assert.NoError(vm.Shutdown()) + assert.NoError(vm.Shutdown(context.Background())) }() // Create conflicting transactions @@ -31,7 +34,7 @@ func TestMempoolAtmTxsIssueTxAndGossiping(t *testing.T) { var gossiped int var gossipedLock sync.Mutex // needed to prevent race sender.CantSendAppGossip = false - sender.SendAppGossipF = func(gossipedBytes []byte) error { + sender.SendAppGossipF = func(_ context.Context, gossipedBytes []byte) error { gossipedLock.Lock() defer gossipedLock.Unlock() @@ -80,7 +83,7 @@ func TestMempoolAtmTxsAppGossipHandling(t *testing.T) { _, vm, _, sharedMemory, sender := GenesisVM(t, true, "", "", "") defer func() { - assert.NoError(vm.Shutdown()) + assert.NoError(vm.Shutdown(context.Background())) }() nodeID := ids.GenerateTestNodeID() @@ -91,14 +94,14 @@ func TestMempoolAtmTxsAppGossipHandling(t *testing.T) { txRequested bool ) sender.CantSendAppGossip = false - sender.SendAppGossipF = func(_ []byte) error { + sender.SendAppGossipF = func(context.Context, []byte) error { txGossipedLock.Lock() defer txGossipedLock.Unlock() txGossiped++ return nil } - sender.SendAppRequestF = func(_ ids.NodeIDSet, _ uint32, _ []byte) error { + sender.SendAppRequestF = func(context.Context, set.Set[ids.NodeID], uint32, []byte) error { txRequested = true return nil } @@ -115,7 +118,7 @@ func TestMempoolAtmTxsAppGossipHandling(t *testing.T) { assert.NoError(err) // show that no txID is requested - assert.NoError(vm.AppGossip(nodeID, msgBytes)) + assert.NoError(vm.AppGossip(context.Background(), nodeID, msgBytes)) time.Sleep(waitBlockTime * 3) assert.False(txRequested, "tx should not have been requested") @@ -125,7 +128,7 @@ func TestMempoolAtmTxsAppGossipHandling(t *testing.T) { assert.True(vm.mempool.has(tx.ID())) // show that tx is not re-gossiped - assert.NoError(vm.AppGossip(nodeID, msgBytes)) + assert.NoError(vm.AppGossip(context.Background(), nodeID, msgBytes)) txGossipedLock.Lock() assert.Equal(1, txGossiped, "tx should have only been gossiped once") txGossipedLock.Unlock() @@ -136,7 +139,7 @@ func TestMempoolAtmTxsAppGossipHandling(t *testing.T) { } msgBytes, err = message.BuildGossipMessage(vm.networkCodec, msg) assert.NoError(err) - assert.NoError(vm.AppGossip(nodeID, msgBytes)) + assert.NoError(vm.AppGossip(context.Background(), nodeID, msgBytes)) assert.False(txRequested, "tx should not have been requested") txGossipedLock.Lock() assert.Equal(1, txGossiped, "tx should not have been gossiped") @@ -146,12 +149,14 @@ func TestMempoolAtmTxsAppGossipHandling(t *testing.T) { // show that txs already marked as invalid are not re-requested on gossiping func TestMempoolAtmTxsAppGossipHandlingDiscardedTx(t *testing.T) { - t.Skip("FLAKY") + if os.Getenv("RUN_FLAKY_TESTS") != "true" { + t.Skip("FLAKY") + } assert := assert.New(t) _, vm, _, sharedMemory, sender := GenesisVM(t, true, "", "", "") defer func() { - assert.NoError(vm.Shutdown()) + assert.NoError(vm.Shutdown(context.Background())) }() mempool := vm.mempool @@ -161,14 +166,14 @@ func TestMempoolAtmTxsAppGossipHandlingDiscardedTx(t *testing.T) { txRequested bool ) sender.CantSendAppGossip = false - sender.SendAppGossipF = func(_ []byte) error { + sender.SendAppGossipF = func(context.Context, []byte) error { txGossipedLock.Lock() defer txGossipedLock.Unlock() txGossiped++ return nil } - sender.SendAppRequestF = func(ids.NodeIDSet, uint32, []byte) error { + sender.SendAppRequestF = func(context.Context, set.Set[ids.NodeID], uint32, []byte) error { txRequested = true return nil } @@ -194,7 +199,7 @@ func TestMempoolAtmTxsAppGossipHandlingDiscardedTx(t *testing.T) { msgBytes, err := message.BuildGossipMessage(vm.networkCodec, msg) assert.NoError(err) - assert.NoError(vm.AppGossip(nodeID, msgBytes)) + assert.NoError(vm.AppGossip(context.Background(), nodeID, msgBytes)) assert.False(txRequested, "tx shouldn't be requested") txGossipedLock.Lock() assert.Zero(txGossiped, "tx should not have been gossiped") @@ -212,7 +217,7 @@ func TestMempoolAtmTxsAppGossipHandlingDiscardedTx(t *testing.T) { msgBytes, err = message.BuildGossipMessage(vm.networkCodec, msg) assert.NoError(err) - assert.NoError(vm.AppGossip(nodeID, msgBytes)) + assert.NoError(vm.AppGossip(context.Background(), nodeID, msgBytes)) time.Sleep(waitBlockTime * 3) assert.False(txRequested, "tx shouldn't be requested") txGossipedLock.Lock() diff --git a/coreth/plugin/evm/gossiper_eth_gossiping_test.go b/coreth/plugin/evm/gossiper_eth_gossiping_test.go index fb96153d..4a2a3f6d 100644 --- a/coreth/plugin/evm/gossiper_eth_gossiping_test.go +++ b/coreth/plugin/evm/gossiper_eth_gossiping_test.go @@ -4,15 +4,18 @@ package evm import ( + "context" "crypto/ecdsa" "encoding/json" "math/big" + "os" "strings" "sync" "testing" "time" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -80,7 +83,9 @@ func getValidEthTxs(key *ecdsa.PrivateKey, count int, gasPrice *big.Int) []*type // to ease up UT, which target only VM behaviors in response to coreth mempool // signals func TestMempoolEthTxsAddedTxsGossipedAfterActivation(t *testing.T) { - t.Skip("FLAKY") + if os.Getenv("RUN_FLAKY_TESTS") != "true" { + t.Skip("FLAKY") + } assert := assert.New(t) key, err := crypto.GenerateKey() @@ -93,7 +98,7 @@ func TestMempoolEthTxsAddedTxsGossipedAfterActivation(t *testing.T) { _, vm, _, _, sender := GenesisVM(t, true, genesisJSON, "", "") defer func() { - err := vm.Shutdown() + err := vm.Shutdown(context.Background()) assert.NoError(err) }() vm.txPool.SetGasPrice(common.Big1) @@ -107,7 +112,7 @@ func TestMempoolEthTxsAddedTxsGossipedAfterActivation(t *testing.T) { sender.CantSendAppGossip = false signal1 := make(chan struct{}) seen := 0 - sender.SendAppGossipF = func(gossipedBytes []byte) error { + sender.SendAppGossipF = func(_ context.Context, gossipedBytes []byte) error { if seen == 0 { notifyMsgIntf, err := message.ParseGossipMessage(vm.networkCodec, gossipedBytes) assert.NoError(err) @@ -166,7 +171,9 @@ func TestMempoolEthTxsAddedTxsGossipedAfterActivation(t *testing.T) { // show that locally issued eth txs are chunked correctly func TestMempoolEthTxsAddedTxsGossipedAfterActivationChunking(t *testing.T) { - t.Skip("FLAKY") + if os.Getenv("RUN_FLAKY_TESTS") != "true" { + t.Skip("FLAKY") + } assert := assert.New(t) key, err := crypto.GenerateKey() @@ -179,7 +186,7 @@ func TestMempoolEthTxsAddedTxsGossipedAfterActivationChunking(t *testing.T) { _, vm, _, _, sender := GenesisVM(t, true, genesisJSON, "", "") defer func() { - err := vm.Shutdown() + err := vm.Shutdown(context.Background()) assert.NoError(err) }() vm.txPool.SetGasPrice(common.Big1) @@ -192,7 +199,7 @@ func TestMempoolEthTxsAddedTxsGossipedAfterActivationChunking(t *testing.T) { wg.Add(2) sender.CantSendAppGossip = false seen := map[common.Hash]struct{}{} - sender.SendAppGossipF = func(gossipedBytes []byte) error { + sender.SendAppGossipF = func(_ context.Context, gossipedBytes []byte) error { notifyMsgIntf, err := message.ParseGossipMessage(vm.networkCodec, gossipedBytes) assert.NoError(err) @@ -226,7 +233,9 @@ func TestMempoolEthTxsAddedTxsGossipedAfterActivationChunking(t *testing.T) { // show that a geth tx discovered from gossip is requested to the same node that // gossiped it func TestMempoolEthTxsAppGossipHandling(t *testing.T) { - t.Skip("FLAKY") + if os.Getenv("RUN_FLAKY_TESTS") != "true" { + t.Skip("FLAKY") + } assert := assert.New(t) key, err := crypto.GenerateKey() @@ -239,7 +248,7 @@ func TestMempoolEthTxsAppGossipHandling(t *testing.T) { _, vm, _, _, sender := GenesisVM(t, true, genesisJSON, "", "") defer func() { - err := vm.Shutdown() + err := vm.Shutdown(context.Background()) assert.NoError(err) }() vm.txPool.SetGasPrice(common.Big1) @@ -250,12 +259,12 @@ func TestMempoolEthTxsAppGossipHandling(t *testing.T) { txRequested bool ) sender.CantSendAppGossip = false - sender.SendAppRequestF = func(_ ids.NodeIDSet, _ uint32, _ []byte) error { + sender.SendAppRequestF = func(context.Context, set.Set[ids.NodeID], uint32, []byte) error { txRequested = true return nil } wg.Add(1) - sender.SendAppGossipF = func(_ []byte) error { + sender.SendAppGossipF = func(context.Context, []byte) error { wg.Done() return nil } @@ -273,7 +282,7 @@ func TestMempoolEthTxsAppGossipHandling(t *testing.T) { assert.NoError(err) nodeID := ids.GenerateTestNodeID() - err = vm.AppGossip(nodeID, msgBytes) + err = vm.AppGossip(context.Background(), nodeID, msgBytes) assert.NoError(err) assert.False(txRequested, "tx should not be requested") @@ -294,7 +303,7 @@ func TestMempoolEthTxsRegossipSingleAccount(t *testing.T) { _, vm, _, _, _ := GenesisVM(t, true, genesisJSON, `{"local-txs-enabled":true}`, "") defer func() { - err := vm.Shutdown() + err := vm.Shutdown(context.Background()) assert.NoError(err) }() vm.txPool.SetGasPrice(common.Big1) @@ -334,7 +343,7 @@ func TestMempoolEthTxsRegossip(t *testing.T) { _, vm, _, _, _ := GenesisVM(t, true, genesisJSON, `{"local-txs-enabled":true}`, "") defer func() { - err := vm.Shutdown() + err := vm.Shutdown(context.Background()) assert.NoError(err) }() vm.txPool.SetGasPrice(common.Big1) diff --git a/coreth/plugin/evm/health.go b/coreth/plugin/evm/health.go index 35b49f5e..116f820e 100644 --- a/coreth/plugin/evm/health.go +++ b/coreth/plugin/evm/health.go @@ -3,10 +3,12 @@ package evm +import "context" + // Health returns nil if this chain is healthy. // Also returns details, which should be one of: // string, []byte, map[string]string -func (vm *VM) HealthCheck() (interface{}, error) { +func (vm *VM) HealthCheck(context.Context) (interface{}, error) { // TODO perform actual health check return nil, nil } diff --git a/coreth/plugin/evm/import_tx.go b/coreth/plugin/evm/import_tx.go index b641ec52..8f02e906 100644 --- a/coreth/plugin/evm/import_tx.go +++ b/coreth/plugin/evm/import_tx.go @@ -4,6 +4,7 @@ package evm import ( + "context" "errors" "fmt" "math/big" @@ -14,8 +15,10 @@ import ( "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -46,8 +49,8 @@ type UnsignedImportTx struct { } // InputUTXOs returns the UTXOIDs of the imported funds -func (utx *UnsignedImportTx) InputUTXOs() ids.Set { - set := ids.NewSet(len(utx.ImportedInputs)) +func (utx *UnsignedImportTx) InputUTXOs() set.Set[ids.ID] { + set := set.NewSet[ids.ID](len(utx.ImportedInputs)) for _, in := range utx.ImportedInputs { set.Add(in.InputID()) } @@ -80,7 +83,7 @@ func (utx *UnsignedImportTx) Verify( if rules.IsApricotPhase5 { // Note that SameSubnet verifies that [tx.SourceChain] isn't this // chain's ID - if err := verify.SameSubnet(ctx, utx.SourceChain); err != nil { + if err := verify.SameSubnet(context.TODO(), ctx, utx.SourceChain); err != nil { return errWrongChainID } } else { @@ -106,7 +109,7 @@ func (utx *UnsignedImportTx) Verify( return errImportNonAVAXInputBanff } } - if !avax.IsSortedAndUniqueTransferableInputs(utx.ImportedInputs) { + if !utils.IsSortedAndUniqueSortable(utx.ImportedInputs) { return errInputsNotSortedUnique } @@ -171,7 +174,7 @@ func (utx *UnsignedImportTx) Burned(assetID ids.ID) (uint64, error) { } } - return math.Sub64(input, spent) + return math.Sub(input, spent) } // SemanticVerify this transaction is valid. @@ -279,7 +282,7 @@ func (vm *VM) newImportTx( chainID ids.ID, // chain to import from to common.Address, // Address of recipient baseFee *big.Int, // fee to use post-AP3 - keys []*crypto.PrivateKeySECP256K1R, // Keys to import the funds + keys []*secp256k1.PrivateKey, // Keys to import the funds ) (*Tx, error) { kc := secp256k1fx.NewKeychain() for _, key := range keys { @@ -303,7 +306,7 @@ func (vm *VM) newImportTxWithUTXOs( atomicUTXOs []*avax.UTXO, // UTXOs to spend ) (*Tx, error) { importedInputs := []*avax.TransferableInput{} - signers := [][]*crypto.PrivateKeySECP256K1R{} + signers := [][]*secp256k1.PrivateKey{} importedAmount := make(map[ids.ID]uint64) now := vm.clock.Unix() diff --git a/coreth/plugin/evm/import_tx_test.go b/coreth/plugin/evm/import_tx_test.go index 861636de..d292ef95 100644 --- a/coreth/plugin/evm/import_tx_test.go +++ b/coreth/plugin/evm/import_tx_test.go @@ -12,8 +12,10 @@ import ( "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -51,7 +53,7 @@ func createImportTxOptions(t *testing.T, vm *VM, sharedMemory *atomic.Memory) [] importTxs := make([]*Tx, 0, 3) for _, ethAddr := range testEthAddrs { - importTx, err := vm.newImportTx(vm.ctx.XChainID, ethAddr, initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, ethAddr, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -112,8 +114,8 @@ func TestImportTxVerify(t *testing.T) { }, } - // // Sort the inputs and outputs to ensure the transaction is canonical - avax.SortTransferableInputs(importTx.ImportedInputs) + // Sort the inputs and outputs to ensure the transaction is canonical + utils.Sort(importTx.ImportedInputs) SortEVMOutputs(importTx.Outs) tests := map[string]atomicTxVerifyTest{ @@ -430,7 +432,7 @@ func TestNewImportTx(t *testing.T) { t.Fatal(err) } - tx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + tx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -476,7 +478,7 @@ func TestNewImportTx(t *testing.T) { } // Ensure that the UTXO has been removed from shared memory within Accept - addrSet := ids.ShortSet{} + addrSet := set.Set[ids.ShortID]{} addrSet.Add(testShortIDAddrs[0]) utxos, _, _, err := vm.GetAtomicUTXOs(vm.ctx.XChainID, addrSet, ids.ShortEmpty, ids.Empty, -1) if err != nil { @@ -540,7 +542,7 @@ func TestImportTxGasCost(t *testing.T) { tests := map[string]struct { UnsignedImportTx *UnsignedImportTx - Keys [][]*crypto.PrivateKeySECP256K1R + Keys [][]*secp256k1.PrivateKey ExpectedGasUsed uint64 ExpectedFee uint64 @@ -566,7 +568,7 @@ func TestImportTxGasCost(t *testing.T) { AssetID: avaxAssetID, }}, }, - Keys: [][]*crypto.PrivateKeySECP256K1R{{testKeys[0]}}, + Keys: [][]*secp256k1.PrivateKey{{testKeys[0]}}, ExpectedGasUsed: 1230, ExpectedFee: 30750, BaseFee: big.NewInt(25 * params.GWei), @@ -590,7 +592,7 @@ func TestImportTxGasCost(t *testing.T) { AssetID: avaxAssetID, }}, }, - Keys: [][]*crypto.PrivateKeySECP256K1R{{testKeys[0]}}, + Keys: [][]*secp256k1.PrivateKey{{testKeys[0]}}, ExpectedGasUsed: 1230, ExpectedFee: 1, BaseFee: big.NewInt(1), @@ -614,7 +616,7 @@ func TestImportTxGasCost(t *testing.T) { AssetID: avaxAssetID, }}, }, - Keys: [][]*crypto.PrivateKeySECP256K1R{{testKeys[0]}}, + Keys: [][]*secp256k1.PrivateKey{{testKeys[0]}}, ExpectedGasUsed: 11230, ExpectedFee: 1, BaseFee: big.NewInt(1), @@ -651,7 +653,7 @@ func TestImportTxGasCost(t *testing.T) { }, }, }, - Keys: [][]*crypto.PrivateKeySECP256K1R{{testKeys[0]}, {testKeys[0]}}, + Keys: [][]*secp256k1.PrivateKey{{testKeys[0]}, {testKeys[0]}}, ExpectedGasUsed: 2318, ExpectedFee: 57950, BaseFee: big.NewInt(25 * params.GWei), @@ -692,7 +694,7 @@ func TestImportTxGasCost(t *testing.T) { }, }, }, - Keys: [][]*crypto.PrivateKeySECP256K1R{{testKeys[0]}, {testKeys[0]}}, + Keys: [][]*secp256k1.PrivateKey{{testKeys[0]}, {testKeys[0]}}, ExpectedGasUsed: 2378, ExpectedFee: 59450, BaseFee: big.NewInt(25 * params.GWei), @@ -716,7 +718,7 @@ func TestImportTxGasCost(t *testing.T) { AssetID: avaxAssetID, }}, }, - Keys: [][]*crypto.PrivateKeySECP256K1R{{testKeys[0], testKeys[1]}}, + Keys: [][]*secp256k1.PrivateKey{{testKeys[0], testKeys[1]}}, ExpectedGasUsed: 2234, ExpectedFee: 55850, BaseFee: big.NewInt(25 * params.GWei), @@ -816,7 +818,7 @@ func TestImportTxGasCost(t *testing.T) { }, }, }, - Keys: [][]*crypto.PrivateKeySECP256K1R{ + Keys: [][]*secp256k1.PrivateKey{ {testKeys[0]}, {testKeys[0]}, {testKeys[0]}, @@ -886,7 +888,7 @@ func TestImportTxSemanticVerify(t *testing.T) { AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{testKeys[0]}}); err != nil { + if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx @@ -915,7 +917,7 @@ func TestImportTxSemanticVerify(t *testing.T) { AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{testKeys[0]}}); err != nil { + if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx @@ -955,7 +957,7 @@ func TestImportTxSemanticVerify(t *testing.T) { AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{testKeys[0]}}); err != nil { + if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx @@ -989,7 +991,7 @@ func TestImportTxSemanticVerify(t *testing.T) { AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{testKeys[0]}}); err != nil { + if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx @@ -1022,7 +1024,7 @@ func TestImportTxSemanticVerify(t *testing.T) { AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{testKeys[0]}}); err != nil { + if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx @@ -1056,7 +1058,7 @@ func TestImportTxSemanticVerify(t *testing.T) { AssetID: assetID, }}, }} - if err := tx.Sign(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{testKeys[0]}}); err != nil { + if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx @@ -1123,7 +1125,7 @@ func TestImportTxSemanticVerify(t *testing.T) { }}, }} // Sign the transaction with the incorrect key - if err := tx.Sign(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{testKeys[1]}}); err != nil { + if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[1]}}); err != nil { t.Fatal(err) } return tx @@ -1163,7 +1165,7 @@ func TestImportTxSemanticVerify(t *testing.T) { }, }, }} - if err := tx.Sign(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{testKeys[0]}}); err != nil { + if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx @@ -1209,7 +1211,7 @@ func TestImportTxEVMStateTransfer(t *testing.T) { AssetID: vm.ctx.AVAXAssetID, }}, }} - if err := tx.Sign(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{testKeys[0]}}); err != nil { + if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx @@ -1254,7 +1256,7 @@ func TestImportTxEVMStateTransfer(t *testing.T) { AssetID: assetID, }}, }} - if err := tx.Sign(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{testKeys[0]}}); err != nil { + if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } return tx diff --git a/coreth/plugin/evm/mempool.go b/coreth/plugin/evm/mempool.go index b409262d..25b67298 100644 --- a/coreth/plugin/evm/mempool.go +++ b/coreth/plugin/evm/mempool.go @@ -58,7 +58,7 @@ type Mempool struct { issuedTxs map[ids.ID]*Tx // discardedTxs is an LRU Cache of transactions that have been discarded after failing // verification. - discardedTxs *cache.LRU + discardedTxs *cache.LRU[ids.ID, *Tx] // Pending is a channel of length one, which the mempool ensures has an item on // it as long as there is an unissued transaction remaining in [txs] Pending chan struct{} @@ -78,7 +78,7 @@ func NewMempool(AVAXAssetID ids.ID, maxSize int) *Mempool { return &Mempool{ AVAXAssetID: AVAXAssetID, issuedTxs: make(map[ids.ID]*Tx), - discardedTxs: &cache.LRU{Size: discardedTxsCacheSize}, + discardedTxs: &cache.LRU[ids.ID, *Tx]{Size: discardedTxsCacheSize}, currentTxs: make(map[ids.ID]*Tx), Pending: make(chan struct{}, 1), txHeap: newTxHeap(maxSize), @@ -314,7 +314,7 @@ func (m *Mempool) GetTx(txID ids.ID) (*Tx, bool, bool) { return tx, false, true } if tx, exists := m.discardedTxs.Get(txID); exists { - return tx.(*Tx), true, true + return tx, true, true } return nil, false, false diff --git a/coreth/plugin/evm/mempool_atomic_gossiping_test.go b/coreth/plugin/evm/mempool_atomic_gossiping_test.go index 9d15f364..84a9dd10 100644 --- a/coreth/plugin/evm/mempool_atomic_gossiping_test.go +++ b/coreth/plugin/evm/mempool_atomic_gossiping_test.go @@ -4,12 +4,14 @@ package evm import ( + "context" "testing" "github.com/ava-labs/coreth/params" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/chain" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -27,7 +29,7 @@ func TestMempoolAddLocallyCreateAtomicTx(t *testing.T) { // we use AP3 genesis here to not trip any block fees issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase3, "", "") defer func() { - err := vm.Shutdown() + err := vm.Shutdown(context.Background()) assert.NoError(err) }() mempool := vm.mempool @@ -65,7 +67,7 @@ func TestMempoolAddLocallyCreateAtomicTx(t *testing.T) { // Show that BuildBlock generates a block containing [txID] and that it is // still present in the mempool. - blk, err := vm.BuildBlock() + blk, err := vm.BuildBlock(context.Background()) assert.NoError(err, "could not build block out of mempool") evmBlk, ok := blk.(*chain.BlockWrapper).Block.(*Block) @@ -76,10 +78,10 @@ func TestMempoolAddLocallyCreateAtomicTx(t *testing.T) { has = mempool.has(txID) assert.True(has, "tx should stay in mempool until block is accepted") - err = blk.Verify() + err = blk.Verify(context.Background()) assert.NoError(err) - err = blk.Accept() + err = blk.Accept(context.Background()) assert.NoError(err) has = mempool.has(txID) @@ -95,7 +97,7 @@ func TestMempoolMaxMempoolSizeHandling(t *testing.T) { _, vm, _, sharedMemory, _ := GenesisVM(t, true, "", "", "") defer func() { - err := vm.Shutdown() + err := vm.Shutdown(context.Background()) assert.NoError(err) }() mempool := vm.mempool @@ -165,12 +167,12 @@ func createImportTx(t *testing.T, vm *VM, txID ids.ID, feeAmount uint64) *Tx { } // Sort the inputs and outputs to ensure the transaction is canonical - avax.SortTransferableInputs(importTx.ImportedInputs) + utils.Sort(importTx.ImportedInputs) SortEVMOutputs(importTx.Outs) tx := &Tx{UnsignedAtomicTx: importTx} // Sign with the correct key - if err := tx.Sign(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{testKeys[0]}}); err != nil { + if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{testKeys[0]}}); err != nil { t.Fatal(err) } @@ -184,7 +186,7 @@ func TestMempoolPriorityDrop(t *testing.T) { // we use AP3 genesis here to not trip any block fees _, vm, _, _, _ := GenesisVM(t, true, genesisJSONApricotPhase3, "", "") defer func() { - err := vm.Shutdown() + err := vm.Shutdown(context.Background()) assert.NoError(err) }() mempool := vm.mempool diff --git a/coreth/plugin/evm/message/codec.go b/coreth/plugin/evm/message/codec.go index 33f70e9c..a698e821 100644 --- a/coreth/plugin/evm/message/codec.go +++ b/coreth/plugin/evm/message/codec.go @@ -15,7 +15,10 @@ const ( maxMessageSize = 1 * units.MiB ) -var Codec codec.Manager +var ( + Codec codec.Manager + CrossChainCodec codec.Manager +) func init() { Codec = codec.NewManager(maxMessageSize) @@ -44,4 +47,20 @@ func init() { if errs.Errored() { panic(errs.Err) } + + CrossChainCodec = codec.NewManager(maxMessageSize) + ccc := linearcodec.NewDefault() + + errs = wrappers.Errs{} + errs.Add( + // CrossChainRequest Types + ccc.RegisterType(EthCallRequest{}), + ccc.RegisterType(EthCallResponse{}), + + CrossChainCodec.RegisterCodec(Version, ccc), + ) + + if errs.Errored() { + panic(errs.Err) + } } diff --git a/coreth/plugin/evm/message/cross_chain_handler.go b/coreth/plugin/evm/message/cross_chain_handler.go new file mode 100644 index 00000000..5a810d34 --- /dev/null +++ b/coreth/plugin/evm/message/cross_chain_handler.go @@ -0,0 +1,71 @@ +// (c) 2021-2022, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "context" + "encoding/json" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/coreth/internal/ethapi" + "github.com/ava-labs/coreth/rpc" + "github.com/ethereum/go-ethereum/log" +) + +var _ CrossChainRequestHandler = &crossChainHandler{} + +// crossChainHandler implements the CrossChainRequestHandler interface +type crossChainHandler struct { + backend ethapi.Backend + crossChainCodec codec.Manager +} + +// NewCrossChainHandler creates and returns a new instance of CrossChainRequestHandler +func NewCrossChainHandler(b ethapi.Backend, codec codec.Manager) CrossChainRequestHandler { + return &crossChainHandler{ + backend: b, + crossChainCodec: codec, + } +} + +// HandleEthCallRequests returns an encoded EthCallResponse to the given [ethCallRequest] +// This function executes EVM Call against the state associated with [rpc.AcceptedBlockNumber] with the given +// transaction call object [ethCallRequest]. +// This function does not return an error as errors are treated as FATAL to the node. +func (c *crossChainHandler) HandleEthCallRequest(ctx context.Context, requestingChainID ids.ID, requestID uint32, ethCallRequest EthCallRequest) ([]byte, error) { + lastAcceptedBlockNumber := rpc.BlockNumber(c.backend.LastAcceptedBlock().NumberU64()) + lastAcceptedBlockNumberOrHash := rpc.BlockNumberOrHash{BlockNumber: &lastAcceptedBlockNumber} + + transactionArgs := ethapi.TransactionArgs{} + err := json.Unmarshal(ethCallRequest.RequestArgs, &transactionArgs) + if err != nil { + log.Debug("error occurred with JSON unmarshalling ethCallRequest.RequestArgs", "err", err) + return nil, nil + } + + result, err := ethapi.DoCall(ctx, c.backend, transactionArgs, lastAcceptedBlockNumberOrHash, nil, c.backend.RPCEVMTimeout(), c.backend.RPCGasCap()) + if err != nil { + log.Debug("error occurred with EthCall", "err", err, "transactionArgs", ethCallRequest.RequestArgs, "blockNumberOrHash", lastAcceptedBlockNumberOrHash) + return nil, nil + } + + executionResult, err := json.Marshal(&result) + if err != nil { + log.Debug("error occurred with JSON marshalling result", "err", err) + return nil, nil + } + + response := EthCallResponse{ + ExecutionResult: executionResult, + } + + responseBytes, err := c.crossChainCodec.Marshal(Version, response) + if err != nil { + log.Warn("error occurred with marshalling EthCallResponse", "err", err, "EthCallResponse", response) + return nil, nil + } + + return responseBytes, nil +} diff --git a/coreth/plugin/evm/message/eth_call_request.go b/coreth/plugin/evm/message/eth_call_request.go new file mode 100644 index 00000000..69d1139a --- /dev/null +++ b/coreth/plugin/evm/message/eth_call_request.go @@ -0,0 +1,33 @@ +// (c) 2021-2022, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package message + +import ( + "context" + "fmt" + + "github.com/ava-labs/avalanchego/ids" +) + +var _ CrossChainRequest = EthCallRequest{} + +// EthCallRequest has the JSON Data necessary to execute a new EVM call on the blockchain +type EthCallRequest struct { + RequestArgs []byte `serialize:"true"` +} + +// EthCallResponse represents the JSON return value of the executed EVM call +type EthCallResponse struct { + ExecutionResult []byte `serialize:"true"` +} + +// String converts EthCallRequest to a string +func (e EthCallRequest) String() string { + return fmt.Sprintf("%#v", e) +} + +// Handle returns the encoded EthCallResponse by executing EVM call with the given EthCallRequest +func (e EthCallRequest) Handle(ctx context.Context, requestingChainID ids.ID, requestID uint32, handler CrossChainRequestHandler) ([]byte, error) { + return handler.HandleEthCallRequest(ctx, requestingChainID, requestID, e) +} diff --git a/coreth/plugin/evm/message/handler.go b/coreth/plugin/evm/message/handler.go index cfa97e7e..ad8f7665 100644 --- a/coreth/plugin/evm/message/handler.go +++ b/coreth/plugin/evm/message/handler.go @@ -12,8 +12,9 @@ import ( ) var ( - _ GossipHandler = NoopMempoolGossipHandler{} - _ RequestHandler = NoopRequestHandler{} + _ GossipHandler = NoopMempoolGossipHandler{} + _ RequestHandler = NoopRequestHandler{} + _ CrossChainRequestHandler = NoopCrossChainRequestHandler{} ) // GossipHandler handles incoming gossip messages @@ -50,9 +51,9 @@ type RequestHandler interface { // Only one of OnResponse or OnFailure is called for a given requestID, not both type ResponseHandler interface { // OnResponse is invoked when the peer responded to a request - OnResponse(nodeID ids.NodeID, requestID uint32, response []byte) error + OnResponse(response []byte) error // OnFailure is invoked when there was a failure in processing a request - OnFailure(nodeID ids.NodeID, requestID uint32) error + OnFailure() error } type NoopRequestHandler struct{} @@ -72,3 +73,14 @@ func (NoopRequestHandler) HandleBlockRequest(ctx context.Context, nodeID ids.Nod func (NoopRequestHandler) HandleCodeRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, codeRequest CodeRequest) ([]byte, error) { return nil, nil } + +// CrossChainRequestHandler interface handles incoming requests from another chain +type CrossChainRequestHandler interface { + HandleEthCallRequest(ctx context.Context, requestingchainID ids.ID, requestID uint32, ethCallRequest EthCallRequest) ([]byte, error) +} + +type NoopCrossChainRequestHandler struct{} + +func (NoopCrossChainRequestHandler) HandleEthCallRequest(ctx context.Context, requestingchainID ids.ID, requestID uint32, ethCallRequest EthCallRequest) ([]byte, error) { + return nil, nil +} diff --git a/coreth/plugin/evm/message/request.go b/coreth/plugin/evm/message/request.go index 2aadf5a9..6b5831f9 100644 --- a/coreth/plugin/evm/message/request.go +++ b/coreth/plugin/evm/message/request.go @@ -34,3 +34,13 @@ func BytesToRequest(codec codec.Manager, requestBytes []byte) (Request, error) { func RequestToBytes(codec codec.Manager, request Request) ([]byte, error) { return codec.Marshal(Version, &request) } + +// CrossChainRequest represents the interface a cross chain request should implement +type CrossChainRequest interface { + // CrossChainRequest should implement String() for logging. + fmt.Stringer + + // Handle allows [CrossChainRequest] to call respective methods on handler to handle + // this particular request type + Handle(ctx context.Context, requestingChainID ids.ID, requestID uint32, handler CrossChainRequestHandler) ([]byte, error) +} diff --git a/coreth/plugin/evm/message/syncable.go b/coreth/plugin/evm/message/syncable.go index 4ca4a607..c8631bbb 100644 --- a/coreth/plugin/evm/message/syncable.go +++ b/coreth/plugin/evm/message/syncable.go @@ -4,6 +4,7 @@ package message import ( + "context" "fmt" "github.com/ava-labs/avalanchego/ids" @@ -25,10 +26,10 @@ type SyncSummary struct { summaryID ids.ID bytes []byte - acceptImpl func(SyncSummary) (bool, error) + acceptImpl func(SyncSummary) (block.StateSyncMode, error) } -func NewSyncSummaryFromBytes(summaryBytes []byte, acceptImpl func(SyncSummary) (bool, error)) (SyncSummary, error) { +func NewSyncSummaryFromBytes(summaryBytes []byte, acceptImpl func(SyncSummary) (block.StateSyncMode, error)) (SyncSummary, error) { summary := SyncSummary{} if codecVersion, err := Codec.Unmarshal(summaryBytes, &summary); err != nil { return SyncSummary{}, err @@ -84,9 +85,9 @@ func (s SyncSummary) String() string { return fmt.Sprintf("SyncSummary(BlockHash=%s, BlockNumber=%d, BlockRoot=%s, AtomicRoot=%s)", s.BlockHash, s.BlockNumber, s.BlockRoot, s.AtomicRoot) } -func (s SyncSummary) Accept() (bool, error) { +func (s SyncSummary) Accept(context.Context) (block.StateSyncMode, error) { if s.acceptImpl == nil { - return false, fmt.Errorf("accept implementation not specified for summary: %s", s) + return block.StateSyncSkipped, fmt.Errorf("accept implementation not specified for summary: %s", s) } return s.acceptImpl(s) } diff --git a/coreth/plugin/evm/service.go b/coreth/plugin/evm/service.go index bf3d15c1..5d5af423 100644 --- a/coreth/plugin/evm/service.go +++ b/coreth/plugin/evm/service.go @@ -12,9 +12,10 @@ import ( "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/json" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" @@ -99,8 +100,8 @@ type ExportKeyArgs struct { // ExportKeyReply is the response for ExportKey type ExportKeyReply struct { // The decrypted PrivateKey for the Address provided in the arguments - PrivateKey *crypto.PrivateKeySECP256K1R `json:"privateKey"` - PrivateKeyHex string `json:"privateKeyHex"` + PrivateKey *secp256k1.PrivateKey `json:"privateKey"` + PrivateKeyHex string `json:"privateKeyHex"` } // ExportKey returns a private key from the provided user @@ -133,7 +134,7 @@ func (service *AvaxAPI) ExportKey(r *http.Request, args *ExportKeyArgs, reply *E // ImportKeyArgs are arguments for ImportKey type ImportKeyArgs struct { api.UserPass - PrivateKey *crypto.PrivateKeySECP256K1R `json:"privateKey"` + PrivateKey *secp256k1.PrivateKey `json:"privateKey"` } // ImportKey adds a private key to the provided user @@ -347,7 +348,7 @@ func (service *AvaxAPI) GetUTXOs(r *http.Request, args *api.GetUTXOsArgs, reply } sourceChain := chainID - addrSet := ids.ShortSet{} + addrSet := set.Set[ids.ShortID]{} for _, addrStr := range args.Addresses { addr, err := service.vm.ParseLocalAddress(addrStr) if err != nil { diff --git a/coreth/plugin/evm/syncervm_client.go b/coreth/plugin/evm/syncervm_client.go index 0cc36591..41f076e6 100644 --- a/coreth/plugin/evm/syncervm_client.go +++ b/coreth/plugin/evm/syncervm_client.go @@ -6,6 +6,7 @@ package evm import ( "context" "fmt" + "sync" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/versiondb" @@ -64,6 +65,7 @@ type stateSyncerClient struct { resumableSummary message.SyncSummary cancel context.CancelFunc + wg sync.WaitGroup // State Sync results syncSummary message.SyncSummary @@ -78,9 +80,9 @@ func NewStateSyncClient(config *stateSyncClientConfig) StateSyncClient { type StateSyncClient interface { // methods that implement the client side of [block.StateSyncableVM] - StateSyncEnabled() (bool, error) - GetOngoingSyncStateSummary() (block.StateSummary, error) - ParseStateSummary(summaryBytes []byte) (block.StateSummary, error) + StateSyncEnabled(context.Context) (bool, error) + GetOngoingSyncStateSummary(context.Context) (block.StateSummary, error) + ParseStateSummary(ctx context.Context, summaryBytes []byte) (block.StateSummary, error) // additional methods required by the evm package StateSyncClearOngoingSummary() error @@ -98,12 +100,14 @@ type Syncer interface { } // StateSyncEnabled returns [client.enabled], which is set in the chain's config file. -func (client *stateSyncerClient) StateSyncEnabled() (bool, error) { return client.enabled, nil } +func (client *stateSyncerClient) StateSyncEnabled(context.Context) (bool, error) { + return client.enabled, nil +} // GetOngoingSyncStateSummary returns a state summary that was previously started // and not finished, and sets [resumableSummary] if one was found. // Returns [database.ErrNotFound] if no ongoing summary is found or if [client.skipResume] is true. -func (client *stateSyncerClient) GetOngoingSyncStateSummary() (block.StateSummary, error) { +func (client *stateSyncerClient) GetOngoingSyncStateSummary(context.Context) (block.StateSummary, error) { if client.skipResume { return nil, database.ErrNotFound } @@ -134,17 +138,13 @@ func (client *stateSyncerClient) StateSyncClearOngoingSummary() error { } // ParseStateSummary parses [summaryBytes] to [commonEng.Summary] -func (client *stateSyncerClient) ParseStateSummary(summaryBytes []byte) (block.StateSummary, error) { +func (client *stateSyncerClient) ParseStateSummary(_ context.Context, summaryBytes []byte) (block.StateSummary, error) { return message.NewSyncSummaryFromBytes(summaryBytes, client.acceptSyncSummary) } // stateSync blockingly performs the state sync for the EVM state and the atomic state // to [client.syncSummary]. returns an error if one occurred. -func (client *stateSyncerClient) stateSync() error { - ctx, cancel := context.WithCancel(context.Background()) - client.cancel = cancel - defer cancel() - +func (client *stateSyncerClient) stateSync(ctx context.Context) error { if err := client.syncBlocks(ctx, client.syncSummary.BlockHash, client.syncSummary.BlockNumber, parentsToGet); err != nil { return err } @@ -160,7 +160,7 @@ func (client *stateSyncerClient) stateSync() error { // acceptSyncSummary returns true if sync will be performed and launches the state sync process // in a goroutine. -func (client *stateSyncerClient) acceptSyncSummary(proposedSummary message.SyncSummary) (bool, error) { +func (client *stateSyncerClient) acceptSyncSummary(proposedSummary message.SyncSummary) (block.StateSyncMode, error) { isResume := proposedSummary.BlockHash == client.resumableSummary.BlockHash if !isResume { // Skip syncing if the blockchain is not significantly ahead of local state, @@ -173,12 +173,12 @@ func (client *stateSyncerClient) acceptSyncSummary(proposedSummary message.SyncS "syncableHeight", proposedSummary.Height(), ) if err := client.StateSyncClearOngoingSummary(); err != nil { - return false, fmt.Errorf("failed to clear ongoing summary after skipping state sync: %w", err) + return block.StateSyncSkipped, fmt.Errorf("failed to clear ongoing summary after skipping state sync: %w", err) } // Initialize snapshots if we're skipping state sync, since it will not have been initialized on // startup. client.chain.BlockChain().InitializeSnapshots() - return false, nil + return block.StateSyncSkipped, nil } // Wipe the snapshot completely if we are not resuming from an existing sync, so that we do not @@ -199,15 +199,23 @@ func (client *stateSyncerClient) acceptSyncSummary(proposedSummary message.SyncS // Note: this must be performed after WipeSnapshot finishes so that we do not start a state sync // session from a partially wiped snapshot. if err := client.metadataDB.Put(stateSyncSummaryKey, proposedSummary.Bytes()); err != nil { - return false, fmt.Errorf("failed to write state sync summary key to disk: %w", err) + return block.StateSyncSkipped, fmt.Errorf("failed to write state sync summary key to disk: %w", err) } if err := client.db.Commit(); err != nil { - return false, fmt.Errorf("failed to commit db: %w", err) + return block.StateSyncSkipped, fmt.Errorf("failed to commit db: %w", err) } log.Info("Starting state sync", "summary", proposedSummary) + + // create a cancellable ctx for the state sync goroutine + ctx, cancel := context.WithCancel(context.Background()) + client.cancel = cancel + client.wg.Add(1) // track the state sync goroutine so we can wait for it on shutdown go func() { - if err := client.stateSync(); err != nil { + defer client.wg.Done() + defer cancel() + + if err := client.stateSync(ctx); err != nil { client.stateSyncErr = err } else { client.stateSyncErr = client.finishSync() @@ -218,7 +226,7 @@ func (client *stateSyncerClient) acceptSyncSummary(proposedSummary message.SyncS log.Info("stateSync completed, notifying engine", "err", client.stateSyncErr) client.toEngine <- commonEng.StateSyncDone }() - return true, nil + return block.StateSyncStatic, nil } // syncBlocks fetches (up to) [parentsToGet] blocks from peers @@ -311,13 +319,14 @@ func (client *stateSyncerClient) Shutdown() error { if client.cancel != nil { client.cancel() } + client.wg.Wait() // wait for the background goroutine to exit return nil } // finishSync is responsible for updating disk and memory pointers so the VM is prepared // for bootstrapping. Executes any shared memory operations from the atomic trie to shared memory. func (client *stateSyncerClient) finishSync() error { - stateBlock, err := client.state.GetBlock(ids.ID(client.syncSummary.BlockHash)) + stateBlock, err := client.state.GetBlock(context.TODO(), ids.ID(client.syncSummary.BlockHash)) if err != nil { return fmt.Errorf("could not get block by hash from client state: %s", client.syncSummary.BlockHash) } @@ -353,7 +362,7 @@ func (client *stateSyncerClient) finishSync() error { parentHash := block.ParentHash() client.chain.BloomIndexer().AddCheckpoint(parentHeight/params.BloomBitsBlocks, parentHash) - if err := client.chain.BlockChain().ResetState(block); err != nil { + if err := client.chain.BlockChain().ResetToStateSyncedBlock(block); err != nil { return err } diff --git a/coreth/plugin/evm/syncervm_server.go b/coreth/plugin/evm/syncervm_server.go index 17348641..3bf051bf 100644 --- a/coreth/plugin/evm/syncervm_server.go +++ b/coreth/plugin/evm/syncervm_server.go @@ -4,6 +4,7 @@ package evm import ( + "context" "fmt" "github.com/ava-labs/avalanchego/database" @@ -31,8 +32,8 @@ type stateSyncServer struct { } type StateSyncServer interface { - GetLastStateSummary() (block.StateSummary, error) - GetStateSummary(uint64) (block.StateSummary, error) + GetLastStateSummary(context.Context) (block.StateSummary, error) + GetStateSummary(context.Context, uint64) (block.StateSummary, error) } func NewStateSyncServer(config *stateSyncServerConfig) StateSyncServer { @@ -74,7 +75,7 @@ func (server *stateSyncServer) stateSummaryAtHeight(height uint64) (message.Sync // State summary is calculated by the block nearest to last accepted // that is divisible by [syncableInterval] // If no summary is available, [database.ErrNotFound] must be returned. -func (server *stateSyncServer) GetLastStateSummary() (block.StateSummary, error) { +func (server *stateSyncServer) GetLastStateSummary(context.Context) (block.StateSummary, error) { lastHeight := server.chain.LastAcceptedBlock().NumberU64() lastSyncSummaryNumber := lastHeight - lastHeight%server.syncableInterval @@ -90,7 +91,7 @@ func (server *stateSyncServer) GetLastStateSummary() (block.StateSummary, error) // GetStateSummary implements StateSyncableVM and returns a summary corresponding // to the provided [height] if the node can serve state sync data for that key. // If not, [database.ErrNotFound] must be returned. -func (server *stateSyncServer) GetStateSummary(height uint64) (block.StateSummary, error) { +func (server *stateSyncServer) GetStateSummary(_ context.Context, height uint64) (block.StateSummary, error) { summaryBlock := server.chain.GetBlockByNumber(height) if summaryBlock == nil || summaryBlock.NumberU64() > server.chain.LastAcceptedBlock().NumberU64() || diff --git a/coreth/plugin/evm/syncervm_test.go b/coreth/plugin/evm/syncervm_test.go index 4ca4f5a0..156e59e2 100644 --- a/coreth/plugin/evm/syncervm_test.go +++ b/coreth/plugin/evm/syncervm_test.go @@ -13,6 +13,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database/manager" @@ -20,9 +21,15 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" commonEng "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/snow/engine/snowman/block" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/units" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rlp" + "github.com/ava-labs/coreth/accounts/keystore" "github.com/ava-labs/coreth/consensus/dummy" "github.com/ava-labs/coreth/constants" @@ -35,9 +42,6 @@ import ( statesyncclient "github.com/ava-labs/coreth/sync/client" "github.com/ava-labs/coreth/sync/statesync" "github.com/ava-labs/coreth/trie" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rlp" ) func TestSkipStateSync(t *testing.T) { @@ -45,7 +49,7 @@ func TestSkipStateSync(t *testing.T) { test := syncTest{ syncableInterval: 256, stateSyncMinBlocks: 300, // must be greater than [syncableInterval] to skip sync - shouldSync: false, + syncMode: block.StateSyncSkipped, } vmSetup := createSyncServerAndClientVMs(t, test) defer vmSetup.Teardown(t) @@ -58,7 +62,7 @@ func TestStateSyncFromScratch(t *testing.T) { test := syncTest{ syncableInterval: 256, stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync - shouldSync: true, + syncMode: block.StateSyncStatic, } vmSetup := createSyncServerAndClientVMs(t, test) defer vmSetup.Teardown(t) @@ -79,7 +83,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { test := syncTest{ syncableInterval: 256, stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync - shouldSync: true, + syncMode: block.StateSyncStatic, responseIntercept: func(syncerVM *VM, nodeID ids.NodeID, requestID uint32, response []byte) { lock.Lock() defer lock.Unlock() @@ -87,7 +91,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { reqCount++ // Fail all requests after number 50 to interrupt the sync if reqCount > 50 { - if err := syncerVM.AppRequestFailed(nodeID, requestID); err != nil { + if err := syncerVM.AppRequestFailed(context.Background(), nodeID, requestID); err != nil { panic(err) } cancel := syncerVM.StateSyncClient.(*stateSyncerClient).cancel @@ -97,7 +101,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { t.Fatal("state sync client not populated correctly") } } else { - syncerVM.AppResponse(nodeID, requestID, response) + syncerVM.AppResponse(context.Background(), nodeID, requestID, response) } }, expectedErr: context.Canceled, @@ -108,28 +112,30 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { // Perform sync resulting in early termination. testSyncerVM(t, vmSetup, test) - test.shouldSync = true + test.syncMode = block.StateSyncStatic test.responseIntercept = nil test.expectedErr = nil syncDisabledVM := &VM{} appSender := &commonEng.SenderTest{T: t} - appSender.SendAppGossipF = func([]byte) error { return nil } - appSender.SendAppRequestF = func(nodeSet ids.NodeIDSet, requestID uint32, request []byte) error { + appSender.SendAppGossipF = func(context.Context, []byte) error { return nil } + appSender.SendAppRequestF = func(ctx context.Context, nodeSet set.Set[ids.NodeID], requestID uint32, request []byte) error { nodeID, hasItem := nodeSet.Pop() if !hasItem { t.Fatal("expected nodeSet to contain at least 1 nodeID") } - go vmSetup.serverVM.AppRequest(nodeID, requestID, time.Now().Add(1*time.Second), request) + go vmSetup.serverVM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request) return nil } // Disable metrics to prevent duplicate registerer + stateSyncDisabledConfigJSON := `{"state-sync-enabled":false}` if err := syncDisabledVM.Initialize( + context.Background(), vmSetup.syncerVM.ctx, vmSetup.syncerDBManager, []byte(genesisJSONLatest), nil, - nil, + []byte(stateSyncDisabledConfigJSON), vmSetup.syncerVM.toEngine, []*commonEng.Fx{}, appSender, @@ -138,7 +144,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { } defer func() { - if err := syncDisabledVM.Shutdown(); err != nil { + if err := syncDisabledVM.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() @@ -147,7 +153,7 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { t.Fatalf("Unexpected last accepted height: %d", height) } - enabled, err := syncDisabledVM.StateSyncEnabled() + enabled, err := syncDisabledVM.StateSyncEnabled(context.Background()) assert.NoError(t, err) assert.False(t, enabled, "sync should be disabled") @@ -161,14 +167,14 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { if err != nil { t.Fatal(err) } - blk, err := syncDisabledVM.ParseBlock(b) + blk, err := syncDisabledVM.ParseBlock(context.Background(), b) if err != nil { t.Fatal(err) } - if err := blk.Verify(); err != nil { + if err := blk.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := blk.Accept(); err != nil { + if err := blk.Accept(context.Background()); err != nil { t.Fatal(err) } } @@ -183,10 +189,11 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { syncReEnabledVM := &VM{} // Enable state sync in configJSON configJSON := fmt.Sprintf( - "{\"state-sync-enabled\":true, \"state-sync-min-blocks\":%d}", + `{"state-sync-enabled":true, "state-sync-min-blocks":%d}`, test.stateSyncMinBlocks, ) if err := syncReEnabledVM.Initialize( + context.Background(), vmSetup.syncerVM.ctx, vmSetup.syncerDBManager, []byte(genesisJSONLatest), @@ -200,9 +207,9 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { } // override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM] - vmSetup.serverAppSender.SendAppResponseF = func(nodeID ids.NodeID, requestID uint32, response []byte) error { + vmSetup.serverAppSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { if test.responseIntercept == nil { - go syncReEnabledVM.AppResponse(nodeID, requestID, response) + go syncReEnabledVM.AppResponse(ctx, nodeID, requestID, response) } else { go test.responseIntercept(syncReEnabledVM, nodeID, requestID, response) } @@ -211,9 +218,13 @@ func TestStateSyncToggleEnabledToDisabled(t *testing.T) { } // connect peer to [syncerVM] - assert.NoError(t, syncReEnabledVM.Connected(vmSetup.serverVM.ctx.NodeID, statesyncclient.StateSyncVersion)) + assert.NoError(t, syncReEnabledVM.Connected( + context.Background(), + vmSetup.serverVM.ctx.NodeID, + statesyncclient.StateSyncVersion, + )) - enabled, err = syncReEnabledVM.StateSyncEnabled() + enabled, err = syncReEnabledVM.StateSyncEnabled(context.Background()) assert.NoError(t, err) assert.True(t, enabled, "sync should be enabled") @@ -236,13 +247,13 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest) *syncVMSetup { // If the test already failed, shut down the VMs if they were instantiated. if serverVM != nil { log.Info("Shutting down server VM") - if err := serverVM.Shutdown(); err != nil { + if err := serverVM.Shutdown(context.Background()); err != nil { t.Fatal(err) } } if syncerVM != nil { log.Info("Shutting down syncerVM") - if err := syncerVM.Shutdown(); err != nil { + if err := syncerVM.Shutdown(context.Background()); err != nil { t.Fatal(err) } } @@ -269,7 +280,7 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest) *syncVMSetup { switch i { case 0: // spend the UTXOs from shared memory - importTx, err = serverVM.newImportTx(serverVM.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + importTx, err = serverVM.newImportTx(serverVM.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -284,7 +295,7 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest) *syncVMSetup { serverVM.ctx.XChainID, testShortIDAddrs[0], initialBaseFee, - []*crypto.PrivateKeySECP256K1R{testKeys[0]}, + []*secp256k1.PrivateKey{testKeys[0]}, ) if err != nil { t.Fatal(err) @@ -328,7 +339,7 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest) *syncVMSetup { if err != nil { t.Fatal(err) } - internalBlock, err := serverVM.parseBlock(blockBytes) + internalBlock, err := serverVM.parseBlock(context.Background(), blockBytes) if err != nil { t.Fatal(err) } @@ -350,10 +361,10 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest) *syncVMSetup { testShortIDAddrs[0]: importAmount, }, ) - if err := syncerVM.SetState(snow.StateSyncing); err != nil { + if err := syncerVM.SetState(context.Background(), snow.StateSyncing); err != nil { t.Fatal(err) } - enabled, err := syncerVM.StateSyncEnabled() + enabled, err := syncerVM.StateSyncEnabled(context.Background()) assert.NoError(t, err) assert.True(t, enabled) @@ -361,9 +372,9 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest) *syncVMSetup { syncerVM.atomicTrie.(*atomicTrie).commitInterval = test.syncableInterval // override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM] - serverAppSender.SendAppResponseF = func(nodeID ids.NodeID, requestID uint32, response []byte) error { + serverAppSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { if test.responseIntercept == nil { - go syncerVM.AppResponse(nodeID, requestID, response) + go syncerVM.AppResponse(ctx, nodeID, requestID, response) } else { go test.responseIntercept(syncerVM, nodeID, requestID, response) } @@ -372,15 +383,19 @@ func createSyncServerAndClientVMs(t *testing.T, test syncTest) *syncVMSetup { } // connect peer to [syncerVM] - assert.NoError(t, syncerVM.Connected(serverVM.ctx.NodeID, statesyncclient.StateSyncVersion)) + assert.NoError(t, syncerVM.Connected( + context.Background(), + serverVM.ctx.NodeID, + statesyncclient.StateSyncVersion, + )) // override [syncerVM]'s SendAppRequest function to trigger AppRequest on [serverVM] - syncerAppSender.SendAppRequestF = func(nodeSet ids.NodeIDSet, requestID uint32, request []byte) error { + syncerAppSender.SendAppRequestF = func(ctx context.Context, nodeSet set.Set[ids.NodeID], requestID uint32, request []byte) error { nodeID, hasItem := nodeSet.Pop() if !hasItem { t.Fatal("expected nodeSet to contain at least 1 nodeID") } - go serverVM.AppRequest(nodeID, requestID, time.Now().Add(1*time.Second), request) + go serverVM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request) return nil } @@ -417,8 +432,8 @@ type syncVMSetup struct { // Teardown shuts down both VMs and asserts that both exit without error. // Note: assumes both serverVM and sycnerVM have been initialized. func (s *syncVMSetup) Teardown(t *testing.T) { - assert.NoError(t, s.serverVM.Shutdown()) - assert.NoError(t, s.syncerVM.Shutdown()) + assert.NoError(t, s.serverVM.Shutdown(context.Background())) + assert.NoError(t, s.syncerVM.Shutdown(context.Background())) } // syncTest contains both the actual VMs as well as the parameters with the expected output. @@ -426,7 +441,7 @@ type syncTest struct { responseIntercept func(vm *VM, nodeID ids.NodeID, requestID uint32, response []byte) stateSyncMinBlocks uint64 syncableInterval uint64 - shouldSync bool + syncMode block.StateSyncMode expectedErr error } @@ -442,28 +457,28 @@ func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { ) // get last summary and test related methods - summary, err := serverVM.GetLastStateSummary() + summary, err := serverVM.GetLastStateSummary(context.Background()) if err != nil { t.Fatal("error getting state sync last summary", "err", err) } - parsedSummary, err := syncerVM.ParseStateSummary(summary.Bytes()) + parsedSummary, err := syncerVM.ParseStateSummary(context.Background(), summary.Bytes()) if err != nil { t.Fatal("error getting state sync last summary", "err", err) } - retrievedSummary, err := serverVM.GetStateSummary(parsedSummary.Height()) + retrievedSummary, err := serverVM.GetStateSummary(context.Background(), parsedSummary.Height()) if err != nil { t.Fatal("error when checking if summary is accepted", "err", err) } assert.Equal(t, summary, retrievedSummary) - shouldSync, err := parsedSummary.Accept() + syncMode, err := parsedSummary.Accept(context.Background()) if err != nil { t.Fatal("unexpected error accepting state summary", "err", err) } - if shouldSync != test.shouldSync { - t.Fatal("unexpected value returned from accept", "expected", test.shouldSync, "got", shouldSync) + if syncMode != test.syncMode { + t.Fatal("unexpected value returned from accept", "expected", test.syncMode, "got", syncMode) } - if !shouldSync { + if syncMode == block.StateSyncSkipped { return } msg := <-syncerEngineChan @@ -473,6 +488,7 @@ func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { err = syncerVM.StateSyncClient.Error() if test.expectedErr != nil { assert.ErrorIs(t, err, test.expectedErr) + assertSyncPerformedHeights(t, syncerVM.chaindb, map[uint64]struct{}{}) return } if err != nil { @@ -481,12 +497,13 @@ func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { // set [syncerVM] to bootstrapping and verify the last accepted block has been updated correctly // and that we can bootstrap and process some blocks. - if err := syncerVM.SetState(snow.Bootstrapping); err != nil { + if err := syncerVM.SetState(context.Background(), snow.Bootstrapping); err != nil { t.Fatal(err) } assert.Equal(t, serverVM.LastAcceptedBlock().Height(), syncerVM.LastAcceptedBlock().Height(), "block height mismatch between syncer and server") assert.Equal(t, serverVM.LastAcceptedBlock().ID(), syncerVM.LastAcceptedBlock().ID(), "blockID mismatch between syncer and server") assert.True(t, syncerVM.blockChain.HasState(syncerVM.blockChain.LastAcceptedBlock().Root()), "unavailable state for last accepted block") + assertSyncPerformedHeights(t, syncerVM.chaindb, map[uint64]struct{}{retrievedSummary.Height(): {}}) blocksToBuild := 10 txsPerBlock := 10 @@ -508,7 +525,7 @@ func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) { }) // check we can transition to [NormalOp] state and continue to process blocks. - assert.NoError(t, syncerVM.SetState(snow.NormalOp)) + assert.NoError(t, syncerVM.SetState(context.Background(), snow.NormalOp)) assert.True(t, syncerVM.bootstrapped) // check atomic memory was synced properly @@ -566,14 +583,14 @@ func generateAndAcceptBlocks(t *testing.T, vm *VM, numBlocks int, gen func(int, if err != nil { t.Fatal(err) } - vmBlock, err := vm.ParseBlock(bytes) + vmBlock, err := vm.ParseBlock(context.Background(), bytes) if err != nil { t.Fatal(err) } - if err := vmBlock.Verify(); err != nil { + if err := vmBlock.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := vmBlock.Accept(); err != nil { + if err := vmBlock.Accept(context.Background()); err != nil { t.Fatal(err) } } @@ -595,3 +612,17 @@ func generateAndAcceptBlocks(t *testing.T, vm *VM, numBlocks int, gen func(int, } vm.blockChain.DrainAcceptorQueue() } + +// assertSyncPerformedHeights iterates over all heights the VM has synced to and +// verifies it matches [expected]. +func assertSyncPerformedHeights(t *testing.T, db ethdb.Iteratee, expected map[uint64]struct{}) { + it := rawdb.NewSyncPerformedIterator(db) + defer it.Release() + + found := make(map[uint64]struct{}, len(expected)) + for it.Next() { + found[rawdb.UnpackSyncPerformedKey(it.Key())] = struct{}{} + } + require.NoError(t, it.Error()) + require.Equal(t, expected, found) +} diff --git a/coreth/plugin/evm/test_tx.go b/coreth/plugin/evm/test_tx.go index 2cc7f2b4..c057c874 100644 --- a/coreth/plugin/evm/test_tx.go +++ b/coreth/plugin/evm/test_tx.go @@ -14,6 +14,7 @@ import ( "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/coreth/core/state" "github.com/ava-labs/coreth/params" @@ -28,7 +29,7 @@ type TestUnsignedTx struct { BurnedV uint64 `serialize:"true"` UnsignedBytesV []byte SignedBytesV []byte - InputUTXOsV ids.Set + InputUTXOsV set.Set[ids.ID] SemanticVerifyV error EVMStateTransferV error } @@ -62,7 +63,7 @@ func (t *TestUnsignedTx) Bytes() []byte { return t.UnsignedBytesV } func (t *TestUnsignedTx) SignedBytes() []byte { return t.SignedBytesV } // InputUTXOs implements the UnsignedAtomicTx interface -func (t *TestUnsignedTx) InputUTXOs() ids.Set { return t.InputUTXOsV } +func (t *TestUnsignedTx) InputUTXOs() set.Set[ids.ID] { return t.InputUTXOsV } // SemanticVerify implements the UnsignedAtomicTx interface func (t *TestUnsignedTx) SemanticVerify(vm *VM, stx *Tx, parent *Block, baseFee *big.Int, rules params.Rules) error { diff --git a/coreth/plugin/evm/tx.go b/coreth/plugin/evm/tx.go index 165da9ce..fc7cce74 100644 --- a/coreth/plugin/evm/tx.go +++ b/coreth/plugin/evm/tx.go @@ -20,8 +20,9 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -103,7 +104,7 @@ type UnsignedAtomicTx interface { UnsignedTx // InputUTXOs returns the UTXOs this tx consumes - InputUTXOs() ids.Set + InputUTXOs() set.Set[ids.ID] // Verify attempts to verify that the transaction is well formed Verify(ctx *snow.Context, rules params.Rules) error // Attempts to verify this transaction with the provided state. @@ -126,7 +127,7 @@ type Tx struct { } // Sign this transaction with the provided signers -func (tx *Tx) Sign(c codec.Manager, signers [][]*crypto.PrivateKeySECP256K1R) error { +func (tx *Tx) Sign(c codec.Manager, signers [][]*secp256k1.PrivateKey) error { unsignedBytes, err := c.Marshal(codecVersion, &tx.UnsignedAtomicTx) if err != nil { return fmt.Errorf("couldn't marshal UnsignedAtomicTx: %w", err) @@ -136,7 +137,7 @@ func (tx *Tx) Sign(c codec.Manager, signers [][]*crypto.PrivateKeySECP256K1R) er hash := hashing.ComputeHash256(unsignedBytes) for _, keys := range signers { cred := &secp256k1fx.Credential{ - Sigs: make([][crypto.SECP256K1RSigLen]byte, len(keys)), + Sigs: make([][secp256k1.SignatureLen]byte, len(keys)), } for i, key := range keys { sig, err := key.SignHash(hash) // Sign hash @@ -193,7 +194,7 @@ func (tx *Tx) BlockFeeContribution(fixedFee bool, avaxAssetID ids.ID, baseFee *b // innerSortInputsAndSigners implements sort.Interface for EVMInput type innerSortInputsAndSigners struct { inputs []EVMInput - signers [][]*crypto.PrivateKeySECP256K1R + signers [][]*secp256k1.PrivateKey } func (ins *innerSortInputsAndSigners) Less(i, j int) bool { @@ -212,7 +213,7 @@ func (ins *innerSortInputsAndSigners) Swap(i, j int) { } // SortEVMInputsAndSigners sorts the list of EVMInputs based on the addresses and assetIDs -func SortEVMInputsAndSigners(inputs []EVMInput, signers [][]*crypto.PrivateKeySECP256K1R) { +func SortEVMInputsAndSigners(inputs []EVMInput, signers [][]*secp256k1.PrivateKey) { sort.Sort(&innerSortInputsAndSigners{inputs: inputs, signers: signers}) } diff --git a/coreth/plugin/evm/tx_test.go b/coreth/plugin/evm/tx_test.go index cb59f20a..91f8ab6c 100644 --- a/coreth/plugin/evm/tx_test.go +++ b/coreth/plugin/evm/tx_test.go @@ -4,6 +4,7 @@ package evm import ( + "context" "math/big" "strings" "testing" @@ -153,16 +154,16 @@ func executeTxTest(t *testing.T, test atomicTxTest) { <-issuer // If we've reached this point, we expect to be able to build and verify the block without any errors - blk, err := vm.BuildBlock() + blk, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := blk.Verify(); err != nil { + if err := blk.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := blk.Accept(); len(test.acceptErr) == 0 && err != nil { + if err := blk.Accept(context.Background()); len(test.acceptErr) == 0 && err != nil { t.Fatalf("Accept failed unexpectedly due to: %s", err) } else if len(test.acceptErr) != 0 { if err == nil { diff --git a/coreth/plugin/evm/user.go b/coreth/plugin/evm/user.go index e1902598..51a3fa38 100644 --- a/coreth/plugin/evm/user.go +++ b/coreth/plugin/evm/user.go @@ -5,11 +5,10 @@ package evm import ( "errors" - "fmt" "github.com/ava-labs/avalanchego/database/encdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ethereum/go-ethereum/common" ) @@ -23,7 +22,7 @@ var ( ) type user struct { - secpFactory *crypto.FactorySECP256K1R + secpFactory *secp256k1.Factory // This user's database, acquired from the keystore db *encdb.Database } @@ -66,7 +65,7 @@ func (u *user) controlsAddress(address common.Address) (bool, error) { } // putAddress persists that this user controls address controlled by [privKey] -func (u *user) putAddress(privKey *crypto.PrivateKeySECP256K1R) error { +func (u *user) putAddress(privKey *secp256k1.PrivateKey) error { if privKey == nil { return errKeyNil } @@ -106,7 +105,7 @@ func (u *user) putAddress(privKey *crypto.PrivateKeySECP256K1R) error { } // Key returns the private key that controls the given address -func (u *user) getKey(address common.Address) (*crypto.PrivateKeySECP256K1R, error) { +func (u *user) getKey(address common.Address) (*secp256k1.PrivateKey, error) { if u.db == nil { return nil, errDBNil //} else if address.IsZero() { @@ -117,23 +116,16 @@ func (u *user) getKey(address common.Address) (*crypto.PrivateKeySECP256K1R, err if err != nil { return nil, err } - sk, err := u.secpFactory.ToPrivateKey(bytes) - if err != nil { - return nil, err - } - if sk, ok := sk.(*crypto.PrivateKeySECP256K1R); ok { - return sk, nil - } - return nil, fmt.Errorf("expected private key to be type *crypto.PrivateKeySECP256K1R but is type %T", sk) + return u.secpFactory.ToPrivateKey(bytes) } // Return all private keys controlled by this user -func (u *user) getKeys() ([]*crypto.PrivateKeySECP256K1R, error) { +func (u *user) getKeys() ([]*secp256k1.PrivateKey, error) { addrs, err := u.getAddresses() if err != nil { return nil, err } - keys := make([]*crypto.PrivateKeySECP256K1R, len(addrs)) + keys := make([]*secp256k1.PrivateKey, len(addrs)) for i, addr := range addrs { key, err := u.getKey(addr) if err != nil { diff --git a/coreth/plugin/evm/version.go b/coreth/plugin/evm/version.go index 427e6a10..5fdcddd8 100644 --- a/coreth/plugin/evm/version.go +++ b/coreth/plugin/evm/version.go @@ -11,7 +11,7 @@ var ( // GitCommit is set by the build script GitCommit string // Version is the version of Coreth - Version string = "v0.11.0" + Version string = "v0.12.0" ) func init() { diff --git a/coreth/plugin/evm/vm.go b/coreth/plugin/evm/vm.go index 4212a92c..c7ffb490 100644 --- a/coreth/plugin/evm/vm.go +++ b/coreth/plugin/evm/vm.go @@ -5,7 +5,6 @@ package evm import ( "context" - "encoding/binary" "encoding/json" "errors" "fmt" @@ -47,13 +46,15 @@ import ( // We must import this package (not referenced elsewhere) so that the native "callTracer" // is added to a map of client-accessible tracers. In geth, this is done // inside of cmd/geth. + _ "github.com/ava-labs/coreth/eth/tracers/js" _ "github.com/ava-labs/coreth/eth/tracers/native" - "github.com/ava-labs/coreth/metrics" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" + "github.com/ava-labs/coreth/metrics" + avalancheRPC "github.com/gorilla/rpc/v2" "github.com/ava-labs/avalanchego/cache" @@ -69,12 +70,13 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/utils/profiler" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -135,11 +137,10 @@ const ( var ( // Set last accepted key to be longer than the keys used to store accepted block IDs. - lastAcceptedKey = []byte("last_accepted_key") - acceptedPrefix = []byte("snowman_accepted") - metadataPrefix = []byte("metadata") - ethDBPrefix = []byte("ethdb") - pruneRejectedBlocksKey = []byte("pruned_rejected_blocks") + lastAcceptedKey = []byte("last_accepted_key") + acceptedPrefix = []byte("snowman_accepted") + metadataPrefix = []byte("metadata") + ethDBPrefix = []byte("ethdb") // Prefixes for atomic trie atomicTrieDBPrefix = []byte("atomicTrieDB") @@ -174,7 +175,6 @@ var ( errConflictingAtomicTx = errors.New("conflicting atomic tx present") errTooManyAtomicTx = errors.New("too many atomic tx") errMissingAtomicTxs = errors.New("cannot build a block with non-empty extra data and zero atomic transactions") - errInvalidExtraStateRoot = errors.New("invalid ExtraStateRoot") errImportTxsDisabled = errors.New("import transactions are disabled") errExportTxsDisabled = errors.New("export transactions are disabled") ) @@ -267,7 +267,7 @@ type VM struct { shutdownWg sync.WaitGroup fx secp256k1fx.Fx - secpFactory crypto.FactorySECP256K1R + secpFactory secp256k1.Factory // Continuous Profiler profiler profiler.ContinuousProfiler @@ -313,7 +313,8 @@ func (vm *VM) GetActivationTime() time.Time { // Initialize implements the snowman.ChainVM interface func (vm *VM) Initialize( - ctx *snow.Context, + _ context.Context, + chainCtx *snow.Context, dbManager manager.Manager, genesisBytes []byte, upgradeBytes []byte, @@ -332,7 +333,7 @@ func (vm *VM) Initialize( return err } - vm.ctx = ctx + vm.ctx = chainCtx // Create logger alias, err := vm.ctx.BCLookup.PrimaryAlias(vm.ctx.ChainID) @@ -369,6 +370,16 @@ func (vm *VM) Initialize( vm.db = versiondb.New(baseDB) vm.acceptedBlockDB = prefixdb.New(acceptedPrefix, vm.db) vm.metadataDB = prefixdb.New(metadataPrefix, vm.db) + + if vm.config.InspectDatabase { + start := time.Now() + log.Info("Starting database inspection") + if err := rawdb.InspectDatabase(vm.chaindb, nil, nil); err != nil { + return err + } + log.Info("Completed database inspection", "elapsed", time.Since(start)) + } + g := new(core.Genesis) if err := json.Unmarshal(genesisBytes, g); err != nil { return err @@ -398,6 +409,10 @@ func (vm *VM) Initialize( case g.Config.ChainID.Cmp(params.AvalancheLocalChainID) == 0: g.Config = params.AvalancheLocalChainConfig } + // Set the Avalanche Context on the ChainConfig + g.Config.AvalancheContext = params.AvalancheContext{ + BlockchainID: common.Hash(chainCtx.ChainID), + } vm.syntacticBlockValidator = NewBlockValidator(extDataHashes) // Ensure that non-standard commit interval is only allowed for the local network @@ -424,28 +439,54 @@ func (vm *VM) Initialize( } vm.ethConfig.Genesis = g vm.ethConfig.NetworkId = vm.chainID.Uint64() + vm.genesisHash = vm.ethConfig.Genesis.ToBlock(nil).Hash() // must create genesis hash before [vm.readLastAccepted] + lastAcceptedHash, lastAcceptedHeight, err := vm.readLastAccepted() + if err != nil { + return err + } + log.Info(fmt.Sprintf("lastAccepted = %s", lastAcceptedHash)) // Set minimum price for mining and default gas price oracle value to the min // gas price to prevent so transactions and blocks all use the correct fees vm.ethConfig.RPCGasCap = vm.config.RPCGasCap vm.ethConfig.RPCEVMTimeout = vm.config.APIMaxDuration.Duration vm.ethConfig.RPCTxFeeCap = vm.config.RPCTxFeeCap + vm.ethConfig.TxPool.NoLocals = !vm.config.LocalTxsEnabled + vm.ethConfig.TxPool.Journal = vm.config.TxPoolJournal + vm.ethConfig.TxPool.Rejournal = vm.config.TxPoolRejournal.Duration + vm.ethConfig.TxPool.PriceLimit = vm.config.TxPoolPriceLimit + vm.ethConfig.TxPool.PriceBump = vm.config.TxPoolPriceBump + vm.ethConfig.TxPool.AccountSlots = vm.config.TxPoolAccountSlots + vm.ethConfig.TxPool.GlobalSlots = vm.config.TxPoolGlobalSlots + vm.ethConfig.TxPool.AccountQueue = vm.config.TxPoolAccountQueue + vm.ethConfig.TxPool.GlobalQueue = vm.config.TxPoolGlobalQueue + vm.ethConfig.AllowUnfinalizedQueries = vm.config.AllowUnfinalizedQueries vm.ethConfig.AllowUnprotectedTxs = vm.config.AllowUnprotectedTxs + vm.ethConfig.AllowUnprotectedTxHashes = vm.config.AllowUnprotectedTxHashes vm.ethConfig.Preimages = vm.config.Preimages + vm.ethConfig.TrieCleanCache = vm.config.TrieCleanCache + vm.ethConfig.TrieCleanJournal = vm.config.TrieCleanJournal + vm.ethConfig.TrieCleanRejournal = vm.config.TrieCleanRejournal.Duration + vm.ethConfig.TrieDirtyCache = vm.config.TrieDirtyCache + vm.ethConfig.TrieDirtyCommitTarget = vm.config.TrieDirtyCommitTarget + vm.ethConfig.SnapshotCache = vm.config.SnapshotCache vm.ethConfig.Pruning = vm.config.Pruning vm.ethConfig.AcceptorQueueLimit = vm.config.AcceptorQueueLimit vm.ethConfig.PopulateMissingTries = vm.config.PopulateMissingTries vm.ethConfig.PopulateMissingTriesParallelism = vm.config.PopulateMissingTriesParallelism vm.ethConfig.AllowMissingTries = vm.config.AllowMissingTries - vm.ethConfig.SnapshotDelayInit = vm.config.StateSyncEnabled + vm.ethConfig.SnapshotDelayInit = vm.stateSyncEnabled(lastAcceptedHeight) vm.ethConfig.SnapshotAsync = vm.config.SnapshotAsync vm.ethConfig.SnapshotVerify = vm.config.SnapshotVerify vm.ethConfig.OfflinePruning = vm.config.OfflinePruning vm.ethConfig.OfflinePruningBloomFilterSize = vm.config.OfflinePruningBloomFilterSize vm.ethConfig.OfflinePruningDataDirectory = vm.config.OfflinePruningDataDirectory vm.ethConfig.CommitInterval = vm.config.CommitInterval + vm.ethConfig.SkipUpgradeCheck = vm.config.SkipUpgradeCheck + vm.ethConfig.AcceptedCacheSize = vm.config.AcceptedCacheSize + vm.ethConfig.TxLookupLimit = vm.config.TxLookupLimit // Create directory for offline pruning if len(vm.ethConfig.OfflinePruningDataDirectory) != 0 { @@ -455,22 +496,18 @@ func (vm *VM) Initialize( } } - vm.genesisHash = vm.ethConfig.Genesis.ToBlock(nil).Hash() - vm.chainConfig = g.Config vm.networkID = vm.ethConfig.NetworkId - vm.secpFactory = crypto.FactorySECP256K1R{Cache: cache.LRU{Size: secpFactoryCacheSize}} + vm.secpFactory = secp256k1.Factory{ + Cache: cache.LRU[ids.ID, *secp256k1.PublicKey]{ + Size: secpFactoryCacheSize, + }, + } vm.codec = Codec // TODO: read size from settings - vm.mempool = NewMempool(ctx.AVAXAssetID, defaultMempoolSize) - - lastAcceptedHash, lastAcceptedHeight, err := vm.readLastAccepted() - if err != nil { - return err - } - log.Info(fmt.Sprintf("lastAccepted = %s", lastAcceptedHash)) + vm.mempool = NewMempool(chainCtx.AVAXAssetID, defaultMempoolSize) if err := vm.initializeMetrics(); err != nil { return err @@ -478,7 +515,7 @@ func (vm *VM) Initialize( // initialize peer network vm.networkCodec = message.Codec - vm.Network = peer.NewNetwork(appSender, vm.networkCodec, ctx.NodeID, vm.config.MaxOutboundActiveRequests) + vm.Network = peer.NewNetwork(appSender, vm.networkCodec, message.CrossChainCodec, chainCtx.NodeID, vm.config.MaxOutboundActiveRequests, vm.config.MaxOutboundActiveCrossChainRequests) vm.client = peer.NewNetworkClient(vm.Network) if err := vm.initializeChain(lastAcceptedHash); err != nil { @@ -582,9 +619,10 @@ func (vm *VM) initializeChain(lastAcceptedHash common.Hash) error { // If state sync is disabled, this function will wipe any ongoing summary from // disk to ensure that we do not continue syncing from an invalid snapshot. func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { + stateSyncEnabled := vm.stateSyncEnabled(lastAcceptedHeight) // parse nodeIDs from state sync IDs in vm config var stateSyncIDs []ids.NodeID - if vm.config.StateSyncEnabled && len(vm.config.StateSyncIDs) > 0 { + if stateSyncEnabled && len(vm.config.StateSyncIDs) > 0 { nodeIDs := strings.Split(vm.config.StateSyncIDs, ",") stateSyncIDs = make([]ids.NodeID, len(nodeIDs)) for i, nodeIDString := range nodeIDs { @@ -609,7 +647,7 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { IsSongbirdCode: vm.chainConfig.IsSongbirdCode(), }, ), - enabled: vm.config.StateSyncEnabled, + enabled: stateSyncEnabled, skipResume: vm.config.StateSyncSkipResume, stateSyncMinBlocks: vm.config.StateSyncMinBlocks, lastAcceptedHeight: lastAcceptedHeight, // TODO clean up how this is passed around @@ -623,7 +661,7 @@ func (vm *VM) initializeStateSyncClient(lastAcceptedHeight uint64) error { // If StateSync is disabled, clear any ongoing summary so that we will not attempt to resume // sync using a snapshot that has been modified by the node running normal operations. - if !vm.config.StateSyncEnabled { + if !stateSyncEnabled { return vm.StateSyncClient.StateSyncClearOngoingSummary() } @@ -639,6 +677,7 @@ func (vm *VM) initializeStateSyncServer() { }) vm.setAppRequestHandlers() + vm.setCrossChainAppRequestHandler() } func (vm *VM) initChainState(lastAcceptedBlock *types.Block) error { @@ -725,7 +764,7 @@ func (vm *VM) preBatchOnFinalizeAndAssemble(header *types.Header, state *state.S func (vm *VM) postBatchOnFinalizeAndAssemble(header *types.Header, state *state.StateDB, txs []*types.Transaction) ([]byte, *big.Int, *big.Int, error) { var ( batchAtomicTxs []*Tx - batchAtomicUTXOs ids.Set + batchAtomicUTXOs set.Set[ids.ID] batchContribution *big.Int = new(big.Int).Set(common.Big0) batchGasUsed *big.Int = new(big.Int).Set(common.Big0) rules = vm.chainConfig.AvalancheRules(header.Number, new(big.Int).SetUint64(header.Time)) @@ -794,19 +833,6 @@ func (vm *VM) postBatchOnFinalizeAndAssemble(header *types.Header, state *state. size += txSize } - // In Cortina the block header must include the atomic trie root. - if rules.IsCortina { - // Pass common.Hash{} as the current block's hash to the atomic backend, this avoids - // pinning changes to the atomic trie in memory, as we are still computing the header - // for this block and don't have its hash yet. Here we calculate the root of the atomic - // trie to store in the block header. - atomicTrieRoot, err := vm.atomicBackend.InsertTxs(common.Hash{}, header.Number.Uint64(), header.ParentHash, batchAtomicTxs) - if err != nil { - return nil, nil, nil, err - } - header.ExtraStateRoot = atomicTrieRoot - } - // If there is a non-zero number of transactions, marshal them and return the byte slice // for the block's extra data along with the contribution and gas used. if len(batchAtomicTxs) > 0 { @@ -863,18 +889,10 @@ func (vm *VM) onExtraStateChange(block *types.Block, state *state.StateDB) (*big } } // Update the atomic backend with [txs] from this block. - atomicRoot, err := vm.atomicBackend.InsertTxs(block.Hash(), block.NumberU64(), block.ParentHash(), txs) + _, err := vm.atomicBackend.InsertTxs(block.Hash(), block.NumberU64(), block.ParentHash(), txs) if err != nil { return nil, nil, err } - if rules.IsCortina { - // In Cortina, the atomic trie root should be in ExtraStateRoot. - if header.ExtraStateRoot != atomicRoot { - return nil, nil, fmt.Errorf( - "%w: (expected %s) (got %s)", errInvalidExtraStateRoot, header.ExtraStateRoot, atomicRoot, - ) - } - } } // If there are no transactions, we can return early. @@ -909,31 +927,7 @@ func (vm *VM) onExtraStateChange(block *types.Block, state *state.StateDB) (*big return batchContribution, batchGasUsed, nil } -func (vm *VM) pruneChain() error { - if !vm.config.Pruning { - return nil - } - pruned, err := vm.db.Has(pruneRejectedBlocksKey) - if err != nil { - return fmt.Errorf("failed to check if the VM has pruned rejected blocks: %w", err) - } - if pruned { - return nil - } - - lastAcceptedHeight := vm.LastAcceptedBlock().Height() - if err := vm.blockChain.RemoveRejectedBlocks(0, lastAcceptedHeight); err != nil { - return err - } - heightBytes := make([]byte, 8) - binary.PutUvarint(heightBytes, lastAcceptedHeight) - if err := vm.db.Put(pruneRejectedBlocksKey, heightBytes); err != nil { - return err - } - return vm.db.Commit() -} - -func (vm *VM) SetState(state snow.State) error { +func (vm *VM) SetState(_ context.Context, state snow.State) error { switch state { case snow.StateSyncing: vm.bootstrapped = false @@ -978,6 +972,7 @@ func (vm *VM) setAppRequestHandlers() { ) syncRequestHandler := handlers.NewSyncHandler( vm.blockChain, + vm.chaindb, evmTrieDB, vm.atomicTrie.TrieDB(), vm.networkCodec, @@ -986,8 +981,15 @@ func (vm *VM) setAppRequestHandlers() { vm.Network.SetRequestHandler(syncRequestHandler) } +// setCrossChainAppRequestHandler sets the request handlers for the VM to serve cross chain +// requests. +func (vm *VM) setCrossChainAppRequestHandler() { + crossChainRequestHandler := message.NewCrossChainHandler(vm.eth.APIBackend, message.CrossChainCodec) + vm.Network.SetCrossChainRequestHandler(crossChainRequestHandler) +} + // Shutdown implements the snowman.ChainVM interface -func (vm *VM) Shutdown() error { +func (vm *VM) Shutdown(context.Context) error { if vm.ctx == nil { return nil } @@ -1002,7 +1004,7 @@ func (vm *VM) Shutdown() error { } // buildBlock builds a block to be wrapped by ChainState -func (vm *VM) buildBlock() (snowman.Block, error) { +func (vm *VM) buildBlock(_ context.Context) (snowman.Block, error) { block, err := vm.miner.GenerateBlock() vm.builder.handleGenerateBlock() if err != nil { @@ -1045,7 +1047,7 @@ func (vm *VM) buildBlock() (snowman.Block, error) { } // parseBlock parses [b] into a block to be wrapped by ChainState. -func (vm *VM) parseBlock(b []byte) (snowman.Block, error) { +func (vm *VM) parseBlock(_ context.Context, b []byte) (snowman.Block, error) { ethBlock := new(types.Block) if err := rlp.DecodeBytes(b, ethBlock); err != nil { return nil, err @@ -1065,7 +1067,7 @@ func (vm *VM) parseBlock(b []byte) (snowman.Block, error) { } func (vm *VM) ParseEthBlock(b []byte) (*types.Block, error) { - block, err := vm.parseBlock(b) + block, err := vm.parseBlock(context.TODO(), b) if err != nil { return nil, err } @@ -1075,7 +1077,7 @@ func (vm *VM) ParseEthBlock(b []byte) (*types.Block, error) { // getBlock attempts to retrieve block [id] from the VM to be wrapped // by ChainState. -func (vm *VM) getBlock(id ids.ID) (snowman.Block, error) { +func (vm *VM) getBlock(_ context.Context, id ids.ID) (snowman.Block, error) { ethBlock := vm.blockChain.GetBlockByHash(common.Hash(id)) // If [ethBlock] is nil, return [database.ErrNotFound] here // so that the miss is considered cacheable. @@ -1087,11 +1089,11 @@ func (vm *VM) getBlock(id ids.ID) (snowman.Block, error) { } // SetPreference sets what the current tail of the chain is -func (vm *VM) SetPreference(blkID ids.ID) error { +func (vm *VM) SetPreference(ctx context.Context, blkID ids.ID) error { // Since each internal handler used by [vm.State] always returns a block // with non-nil ethBlock value, GetBlockInternal should never return a // (*Block) with a nil ethBlock value. - block, err := vm.GetBlockInternal(blkID) + block, err := vm.GetBlockInternal(ctx, blkID) if err != nil { return fmt.Errorf("failed to set preference to %s: %w", blkID, err) } @@ -1101,7 +1103,7 @@ func (vm *VM) SetPreference(blkID ids.ID) error { // VerifyHeightIndex always returns a nil error since the index is maintained by // vm.blockChain. -func (vm *VM) VerifyHeightIndex() error { +func (vm *VM) VerifyHeightIndex(context.Context) error { return nil } @@ -1113,7 +1115,7 @@ func (vm *VM) VerifyHeightIndex() error { // Note: the engine assumes that if a block is not found at [blkHeight], then // [database.ErrNotFound] will be returned. This indicates that the VM has state synced // and does not have all historical blocks available. -func (vm *VM) GetBlockIDAtHeight(blkHeight uint64) (ids.ID, error) { +func (vm *VM) GetBlockIDAtHeight(_ context.Context, blkHeight uint64) (ids.ID, error) { ethBlock := vm.blockChain.GetBlockByNumber(blkHeight) if ethBlock == nil { return ids.ID{}, database.ErrNotFound @@ -1122,7 +1124,7 @@ func (vm *VM) GetBlockIDAtHeight(blkHeight uint64) (ids.ID, error) { return ids.ID(ethBlock.Hash()), nil } -func (vm *VM) Version() (string, error) { +func (vm *VM) Version(context.Context) (string, error) { return Version, nil } @@ -1149,7 +1151,7 @@ func newHandler(name string, service interface{}, lockOption ...commonEng.LockOp } // CreateHandlers makes new http handlers that can handle API calls -func (vm *VM) CreateHandlers() (map[string]*commonEng.HTTPHandler, error) { +func (vm *VM) CreateHandlers(context.Context) (map[string]*commonEng.HTTPHandler, error) { handler := rpc.NewServer(vm.config.APIMaxDuration.Duration) enabledAPIs := vm.config.EthAPIs() if err := attachEthService(handler, vm.eth.APIs(), enabledAPIs); err != nil { @@ -1203,7 +1205,7 @@ func (vm *VM) CreateHandlers() (map[string]*commonEng.HTTPHandler, error) { } // CreateStaticHandlers makes new http handlers that can handle API calls -func (vm *VM) CreateStaticHandlers() (map[string]*commonEng.HTTPHandler, error) { +func (vm *VM) CreateStaticHandlers(context.Context) (map[string]*commonEng.HTTPHandler, error) { handler := rpc.NewServer(0) if err := handler.RegisterName("static", &StaticService{}); err != nil { return nil, err @@ -1224,7 +1226,7 @@ func (vm *VM) CreateStaticHandlers() (map[string]*commonEng.HTTPHandler, error) // or any of its ancestor blocks going back to the last accepted block in its ancestry. If [ancestor] is // accepted, then nil will be returned immediately. // If the ancestry of [ancestor] cannot be fetched, then [errRejectedParent] may be returned. -func (vm *VM) conflicts(inputs ids.Set, ancestor *Block) error { +func (vm *VM) conflicts(inputs set.Set[ids.ID], ancestor *Block) error { for ancestor.Status() != choices.Accepted { // If any of the atomic transactions in the ancestor conflict with [inputs] // return an error. @@ -1243,7 +1245,7 @@ func (vm *VM) conflicts(inputs ids.Set, ancestor *Block) error { // will be missing. // If the ancestor is processing, then the block may have // been verified. - nextAncestorIntf, err := vm.GetBlockInternal(nextAncestorID) + nextAncestorIntf, err := vm.GetBlockInternal(context.TODO(), nextAncestorID) if err != nil { return errRejectedParent } @@ -1373,7 +1375,7 @@ func (vm *VM) verifyTxAtTip(tx *Tx) error { // for reverting to the correct snapshot after calling this function. If this function is called with a // throwaway state, then this is not necessary. func (vm *VM) verifyTx(tx *Tx, parentHash common.Hash, baseFee *big.Int, state *state.StateDB, rules params.Rules) error { - parentIntf, err := vm.GetBlockInternal(ids.ID(parentHash)) + parentIntf, err := vm.GetBlockInternal(context.TODO(), ids.ID(parentHash)) if err != nil { return fmt.Errorf("failed to get parent block: %w", err) } @@ -1400,7 +1402,7 @@ func (vm *VM) verifyTxs(txs []*Tx, parentHash common.Hash, baseFee *big.Int, hei // it was called. // If the ancestor is rejected, then this block shouldn't be inserted // into the canonical chain because the parent will be missing. - ancestorInf, err := vm.GetBlockInternal(ancestorID) + ancestorInf, err := vm.GetBlockInternal(context.TODO(), ancestorID) if err != nil { return errRejectedParent } @@ -1414,7 +1416,7 @@ func (vm *VM) verifyTxs(txs []*Tx, parentHash common.Hash, baseFee *big.Int, hei // Ensure each tx in [txs] doesn't conflict with any other atomic tx in // a processing ancestor block. - inputs := &ids.Set{} + inputs := set.Set[ids.ID]{} for _, atomicTx := range txs { utx := atomicTx.UnsignedAtomicTx if err := utx.SemanticVerify(vm, atomicTx, ancestor, baseFee, rules); err != nil { @@ -1433,7 +1435,7 @@ func (vm *VM) verifyTxs(txs []*Tx, parentHash common.Hash, baseFee *big.Int, hei // referenced in. func (vm *VM) GetAtomicUTXOs( chainID ids.ID, - addrs ids.ShortSet, + addrs set.Set[ids.ShortID], startAddr ids.ShortID, startUTXOID ids.ID, limit int, @@ -1480,21 +1482,21 @@ func (vm *VM) GetAtomicUTXOs( // GetSpendableFunds returns a list of EVMInputs and keys (in corresponding // order) to total [amount] of [assetID] owned by [keys]. -// Note: we return [][]*crypto.PrivateKeySECP256K1R even though each input +// Note: we return [][]*secp256k1.PrivateKey even though each input // corresponds to a single key, so that the signers can be passed in to // [tx.Sign] which supports multiple keys on a single input. func (vm *VM) GetSpendableFunds( - keys []*crypto.PrivateKeySECP256K1R, + keys []*secp256k1.PrivateKey, assetID ids.ID, amount uint64, -) ([]EVMInput, [][]*crypto.PrivateKeySECP256K1R, error) { +) ([]EVMInput, [][]*secp256k1.PrivateKey, error) { // Note: current state uses the state of the preferred block. state, err := vm.blockChain.State() if err != nil { return nil, nil, err } inputs := []EVMInput{} - signers := [][]*crypto.PrivateKeySECP256K1R{} + signers := [][]*secp256k1.PrivateKey{} // Note: we assume that each key in [keys] is unique, so that iterating over // the keys will not produce duplicated nonces in the returned EVMInput slice. for _, key := range keys { @@ -1526,7 +1528,7 @@ func (vm *VM) GetSpendableFunds( AssetID: assetID, Nonce: nonce, }) - signers = append(signers, []*crypto.PrivateKeySECP256K1R{key}) + signers = append(signers, []*secp256k1.PrivateKey{key}) amount -= balance } @@ -1542,15 +1544,15 @@ func (vm *VM) GetSpendableFunds( // This function accounts for the added cost of the additional inputs needed to // create the transaction and makes sure to skip any keys with a balance that is // insufficient to cover the additional fee. -// Note: we return [][]*crypto.PrivateKeySECP256K1R even though each input +// Note: we return [][]*secp256k1.PrivateKey even though each input // corresponds to a single key, so that the signers can be passed in to // [tx.Sign] which supports multiple keys on a single input. func (vm *VM) GetSpendableAVAXWithFee( - keys []*crypto.PrivateKeySECP256K1R, + keys []*secp256k1.PrivateKey, amount uint64, cost uint64, baseFee *big.Int, -) ([]EVMInput, [][]*crypto.PrivateKeySECP256K1R, error) { +) ([]EVMInput, [][]*secp256k1.PrivateKey, error) { // Note: current state uses the state of the preferred block. state, err := vm.blockChain.State() if err != nil { @@ -1569,7 +1571,7 @@ func (vm *VM) GetSpendableAVAXWithFee( amount = newAmount inputs := []EVMInput{} - signers := [][]*crypto.PrivateKeySECP256K1R{} + signers := [][]*secp256k1.PrivateKey{} // Note: we assume that each key in [keys] is unique, so that iterating over // the keys will not produce duplicated nonces in the returned EVMInput slice. for _, key := range keys { @@ -1626,7 +1628,7 @@ func (vm *VM) GetSpendableAVAXWithFee( AssetID: vm.ctx.AVAXAssetID, Nonce: nonce, }) - signers = append(signers, []*crypto.PrivateKeySECP256K1R{key}) + signers = append(signers, []*secp256k1.PrivateKey{key}) amount -= inputAmount } @@ -1708,7 +1710,7 @@ func (vm *VM) getAtomicTxFromPreApricot5BlockByHeight(height uint64) (*Tx, error // readLastAccepted reads the last accepted hash from [acceptedBlockDB] and returns the // last accepted block hash and height by reading directly from [vm.chaindb] instead of relying // on [chain]. -// Note: assumes chaindb, ethConfig, and genesisHash have been initialized. +// Note: assumes [vm.chaindb] and [vm.genesisHash] have been initialized. func (vm *VM) readLastAccepted() (common.Hash, uint64, error) { // Attempt to load last accepted block to determine if it is necessary to // initialize state with the genesis block. @@ -1772,3 +1774,13 @@ func attachEthService(handler *rpc.Server, apis []rpc.API, names []string) error return nil } + +func (vm *VM) stateSyncEnabled(lastAcceptedHeight uint64) bool { + if vm.config.StateSyncEnabled != nil { + // if the config is set, use that + return *vm.config.StateSyncEnabled + } + + // enable state sync by default if the chain is empty. + return lastAcceptedHeight == 0 +} diff --git a/coreth/plugin/evm/vm_extra_state_root_test.go b/coreth/plugin/evm/vm_extra_state_root_test.go deleted file mode 100644 index 2bbefe97..00000000 --- a/coreth/plugin/evm/vm_extra_state_root_test.go +++ /dev/null @@ -1,267 +0,0 @@ -// (c) 2019-2022, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import ( - "encoding/json" - "math/big" - "testing" - "time" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/utils/crypto" - "github.com/ava-labs/avalanchego/vms/components/chain" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/core/types" - "github.com/ava-labs/coreth/trie" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/assert" -) - -var ( - // testCortinaTime is an arbitrary time used to test the VM's behavior when - // Cortina activates. - testCortinaTime = time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC) - // testCortinaJSON is a modified genesisJSONCortina to include the Cortina - // upgrade at testCortinaTime. - testCortinaJSON string -) - -func init() { - var genesis core.Genesis - if err := json.Unmarshal([]byte(genesisJSONCortina), &genesis); err != nil { - panic(err) - } - genesis.Config.CortinaBlockTimestamp = big.NewInt(testCortinaTime.Unix()) - json, err := json.Marshal(genesis) - if err != nil { - panic(err) - } - testCortinaJSON = string(json) -} - -type verifyExtraStateRootConfig struct { - genesis string - blockTime1 time.Time - blockTime2 time.Time - expectedExtraStateRoot func(atomicRoot1, atomicRoot2 common.Hash) (common.Hash, common.Hash) -} - -// testVerifyExtraState root builds 2 blocks using a vm with [test.genesis]. -// First block is built at [blockTime1] and includes an import tx. -// Second block is build at [blockTime2] and includes an export tx. -// After blocks build, [test.expectedExtraStateRoot] is called with the roots -// of the atomic trie at block1 and block2 and the ExtraStateRoot field of -// the blocks are checked against the return value of that function. -func testVerifyExtraStateRoot(t *testing.T, test verifyExtraStateRootConfig) { - importAmount := uint64(50000000) - issuer, vm, _, _, _ := GenesisVMWithUTXOs(t, true, test.genesis, "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) - defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } - }() - - // issue tx for block1 - vm.clock.Set(test.blockTime1) - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) - if err != nil { - t.Fatal(err) - } - if err := vm.issueTx(importTx, true /*=local*/); err != nil { - t.Fatal(err) - } - - // build block1 - <-issuer - blk, err := vm.BuildBlock() - if err != nil { - t.Fatal(err) - } - if err := blk.Verify(); err != nil { - t.Fatal(err) - } - if status := blk.Status(); status != choices.Processing { - t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) - } - if err := vm.SetPreference(blk.ID()); err != nil { - t.Fatal(err) - } - if err := blk.Accept(); err != nil { - t.Fatal(err) - } - if status := blk.Status(); status != choices.Accepted { - t.Fatalf("Expected status of accepted block to be %s, but found %s", choices.Accepted, status) - } - if lastAcceptedID, err := vm.LastAccepted(); err != nil { - t.Fatal(err) - } else if lastAcceptedID != blk.ID() { - t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk.ID(), lastAcceptedID) - } - - // issue tx for block2 - vm.clock.Set(test.blockTime2) - exportAmount := importAmount / 2 - exportTx, err := vm.newExportTx(vm.ctx.AVAXAssetID, exportAmount, vm.ctx.XChainID, testShortIDAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) - if err != nil { - t.Fatal(err) - } - if err := vm.issueTx(exportTx, true /*=local*/); err != nil { - t.Fatal(err) - } - - // build block2 - <-issuer - blk2, err := vm.BuildBlock() - if err != nil { - t.Fatal(err) - } - if err := blk2.Verify(); err != nil { - t.Fatal(err) - } - if status := blk2.Status(); status != choices.Processing { - t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) - } - if err := blk2.Accept(); err != nil { - t.Fatal(err) - } - if status := blk2.Status(); status != choices.Accepted { - t.Fatalf("Expected status of accepted block to be %s, but found %s", choices.Accepted, status) - } - if lastAcceptedID, err := vm.LastAccepted(); err != nil { - t.Fatal(err) - } else if lastAcceptedID != blk2.ID() { - t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk2.ID(), lastAcceptedID) - } - - // Check that both atomic transactions were indexed as expected. - indexedImportTx, status, height, err := vm.getAtomicTx(importTx.ID()) - assert.NoError(t, err) - assert.Equal(t, Accepted, status) - assert.Equal(t, uint64(1), height, "expected height of indexed import tx to be 1") - assert.Equal(t, indexedImportTx.ID(), importTx.ID(), "expected ID of indexed import tx to match original txID") - - indexedExportTx, status, height, err := vm.getAtomicTx(exportTx.ID()) - assert.NoError(t, err) - assert.Equal(t, Accepted, status) - assert.Equal(t, uint64(2), height, "expected height of indexed export tx to be 2") - assert.Equal(t, indexedExportTx.ID(), exportTx.ID(), "expected ID of indexed import tx to match original txID") - - // Open an empty trie to re-create the expected atomic trie roots - trie, err := vm.atomicTrie.OpenTrie(common.Hash{}) - if err != nil { - t.Fatal(err) - } - assert.NoError(t, vm.atomicTrie.UpdateTrie(trie, blk.Height(), importTx.mustAtomicOps())) - atomicRootBlock1 := trie.Hash() - assert.NoError(t, vm.atomicTrie.UpdateTrie(trie, blk2.Height(), exportTx.mustAtomicOps())) - atomicRootBlock2 := trie.Hash() - assert.NotZero(t, atomicRootBlock1) - assert.NotZero(t, atomicRootBlock2) - assert.NotEqual(t, atomicRootBlock1, atomicRootBlock2) - - // verify atomic trie roots included in block header. - extraStateRoot := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock.Header().ExtraStateRoot - extraStateRoot2 := blk2.(*chain.BlockWrapper).Block.(*Block).ethBlock.Header().ExtraStateRoot - expectedRoot1, expectedRoot2 := test.expectedExtraStateRoot(atomicRootBlock1, atomicRootBlock2) - assert.Equal(t, expectedRoot1, extraStateRoot) - assert.Equal(t, expectedRoot2, extraStateRoot2) -} - -// Verifies the root of the atomic trie is inclued in Cortina blocks. -func TestIssueAtomicTxsCortina(t *testing.T) { - testVerifyExtraStateRoot(t, verifyExtraStateRootConfig{ - genesis: genesisJSONCortina, - blockTime1: time.Unix(0, 0), // genesis - blockTime2: time.Unix(2, 0), // a bit after, for fee purposes. - expectedExtraStateRoot: func(atomicRoot1, atomicRoot2 common.Hash) (common.Hash, common.Hash) { - return atomicRoot1, atomicRoot2 // we expect both blocks to contain the atomic trie roots respectively. - }, - }) -} - -// Verifies the root of the atomic trie is inclued in the first Cortina block. -func TestIssueAtomicTxsCortinaTransition(t *testing.T) { - testVerifyExtraStateRoot(t, verifyExtraStateRootConfig{ - genesis: testCortinaJSON, - blockTime1: testCortinaTime.Add(-2 * time.Second), // a little before Cortina, so we can test next block at the upgrade timestamp - blockTime2: testCortinaTime, // at the upgrade timestamp - expectedExtraStateRoot: func(atomicRoot1, atomicRoot2 common.Hash) (common.Hash, common.Hash) { - return common.Hash{}, atomicRoot2 // we only expect the Cortina block to include the atomic trie root. - }, - }) -} - -// Calling Verify should not succeed if the proper ExtraStateRoot is not included in a Cortina block. -// Calling Verify should not succeed if ExtraStateRoot is not empty pre-Cortina -func TestCortinaInvalidExtraStateRootWillNotVerify(t *testing.T) { - importAmount := uint64(50000000) - issuer, vm, _, _, _ := GenesisVMWithUTXOs(t, true, testCortinaJSON, "", "", map[ids.ShortID]uint64{ - testShortIDAddrs[0]: importAmount, - }) - defer func() { - if err := vm.Shutdown(); err != nil { - t.Fatal(err) - } - }() - - // issue a tx and build a Cortina block - vm.clock.Set(testCortinaTime) - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) - if err != nil { - t.Fatal(err) - } - if err := vm.issueTx(importTx, true /*=local*/); err != nil { - t.Fatal(err) - } - - <-issuer - - // calling Verify on blk will succeed, we use it as - // a starting point to make an invalid block. - blk, err := vm.BuildBlock() - if err != nil { - t.Fatal(err) - } - validEthBlk := blk.(*chain.BlockWrapper).Block.(*Block).ethBlock - - // make a bad block by setting ExtraStateRoot to common.Hash{} - badHeader := validEthBlk.Header() - badHeader.ExtraStateRoot = common.Hash{} - ethBlkBad := types.NewBlock(badHeader, validEthBlk.Transactions(), validEthBlk.Uncles(), nil, trie.NewStackTrie(nil), validEthBlk.ExtData(), true) - - badBlk, err := vm.newBlock(ethBlkBad) - if err != nil { - t.Fatal(err) - } - err = badBlk.Verify() - assert.ErrorIs(t, err, errInvalidExtraStateRoot) - - // make a bad block by setting ExtraStateRoot to an incorrect hash - badHeader = validEthBlk.Header() - badHeader.ExtraStateRoot = common.BytesToHash([]byte("incorrect")) - ethBlkBad = types.NewBlock(badHeader, validEthBlk.Transactions(), validEthBlk.Uncles(), nil, trie.NewStackTrie(nil), validEthBlk.ExtData(), true) - - badBlk, err = vm.newBlock(ethBlkBad) - if err != nil { - t.Fatal(err) - } - err = badBlk.Verify() - assert.ErrorIs(t, err, errInvalidExtraStateRoot) - - // make a bad block by setting the timestamp before Cortina. - badHeader = validEthBlk.Header() - badHeader.Time = uint64(testCortinaTime.Add(-2 * time.Second).Unix()) - ethBlkBad = types.NewBlock(badHeader, validEthBlk.Transactions(), validEthBlk.Uncles(), nil, trie.NewStackTrie(nil), validEthBlk.ExtData(), true) - - badBlk, err = vm.newBlock(ethBlkBad) - if err != nil { - t.Fatal(err) - } - err = badBlk.Verify() - assert.ErrorIs(t, err, errInvalidExtraStateRoot) -} diff --git a/coreth/plugin/evm/vm_test.go b/coreth/plugin/evm/vm_test.go index 1bec4ccc..73d99b33 100644 --- a/coreth/plugin/evm/vm_test.go +++ b/coreth/plugin/evm/vm_test.go @@ -18,12 +18,18 @@ import ( "time" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" + "github.com/ava-labs/coreth/internal/ethapi" + "github.com/ava-labs/coreth/metrics" + "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/trie" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/api/keystore" "github.com/ava-labs/avalanchego/chains/atomic" @@ -32,12 +38,14 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/snow/choices" + "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/cb58" "github.com/ava-labs/avalanchego/utils/constants" - "github.com/ava-labs/avalanchego/utils/crypto" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/avax" @@ -53,6 +61,7 @@ import ( "github.com/ava-labs/coreth/params" "github.com/ava-labs/coreth/rpc" + "github.com/ava-labs/coreth/accounts/abi" accountKeystore "github.com/ava-labs/coreth/accounts/keystore" ) @@ -61,7 +70,7 @@ var ( testCChainID = ids.ID{'c', 'c', 'h', 'a', 'i', 'n', 't', 'e', 's', 't'} testXChainID = ids.ID{'t', 'e', 's', 't', 'x'} nonExistentID = ids.ID{'F'} - testKeys []*crypto.PrivateKeySECP256K1R + testKeys []*secp256k1.PrivateKey testEthAddrs []common.Address // testEthAddrs[i] corresponds to testKeys[i] testShortIDAddrs []ids.ShortID testAvaxAssetID = ids.ID{1, 2, 3} @@ -82,7 +91,7 @@ var ( genesisJSONBanff = "{\"config\":{\"chainId\":43111,\"homesteadBlock\":0,\"daoForkBlock\":0,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"apricotPhase1BlockTimestamp\":0,\"apricotPhase2BlockTimestamp\":0,\"apricotPhase3BlockTimestamp\":0,\"apricotPhase4BlockTimestamp\":0,\"apricotPhase5BlockTimestamp\":0,\"apricotPhasePre6BlockTimestamp\":0,\"apricotPhase6BlockTimestamp\":0,\"apricotPhasePost6BlockTimestamp\":0,\"banffBlockTimestamp\":0},\"nonce\":\"0x0\",\"timestamp\":\"0x0\",\"extraData\":\"0x00\",\"gasLimit\":\"0x5f5e100\",\"difficulty\":\"0x0\",\"mixHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"coinbase\":\"0x0000000000000000000000000000000000000000\",\"alloc\":{\"0100000000000000000000000000000000000000\":{\"code\":\"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033\",\"balance\":\"0x0\"}},\"number\":\"0x0\",\"gasUsed\":\"0x0\",\"parentHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\"}" genesisJSONCortina = "{\"config\":{\"chainId\":43111,\"homesteadBlock\":0,\"daoForkBlock\":0,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"apricotPhase1BlockTimestamp\":0,\"apricotPhase2BlockTimestamp\":0,\"apricotPhase3BlockTimestamp\":0,\"apricotPhase4BlockTimestamp\":0,\"apricotPhase5BlockTimestamp\":0,\"apricotPhasePre6BlockTimestamp\":0,\"apricotPhase6BlockTimestamp\":0,\"apricotPhasePost6BlockTimestamp\":0,\"banffBlockTimestamp\":0,\"cortinaBlockTimestamp\":0},\"nonce\":\"0x0\",\"timestamp\":\"0x0\",\"extraData\":\"0x00\",\"gasLimit\":\"0x5f5e100\",\"difficulty\":\"0x0\",\"mixHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"coinbase\":\"0x0000000000000000000000000000000000000000\",\"alloc\":{\"0100000000000000000000000000000000000000\":{\"code\":\"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033\",\"balance\":\"0x0\"}},\"number\":\"0x0\",\"gasUsed\":\"0x0\",\"parentHash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\"}" - genesisJSONLatest = genesisJSONBanff // TODO: update to Cortina + genesisJSONLatest = genesisJSONCortina apricotRulesPhase0 = params.Rules{} apricotRulesPhase1 = params.Rules{IsApricotPhase1: true} @@ -97,7 +106,7 @@ var ( func init() { var b []byte - factory := crypto.FactorySECP256K1R{} + factory := secp256k1.Factory{} for _, key := range []string{ "24jUJ9vZexUM6expyMcT48LBx27k1m7xpraoV62oSQAHdziao5", @@ -106,13 +115,10 @@ func init() { } { b, _ = cb58.Decode(key) pk, _ := factory.ToPrivateKey(b) - secpKey := pk.(*crypto.PrivateKeySECP256K1R) - testKeys = append(testKeys, secpKey) - testEthAddrs = append(testEthAddrs, GetEthAddress(secpKey)) + testKeys = append(testKeys, pk) + testEthAddrs = append(testEthAddrs, GetEthAddress(pk)) testShortIDAddrs = append(testShortIDAddrs, pk.PublicKey().Address()) } - - minBlockTimeAP4 = time.Millisecond } // BuildGenesisTest returns the genesis bytes for Coreth VM to be used in testing @@ -147,28 +153,22 @@ func NewContext() *snow.Context { _ = aliaser.Alias(testCChainID, testCChainID.String()) _ = aliaser.Alias(testXChainID, "X") _ = aliaser.Alias(testXChainID, testXChainID.String()) - ctx.SNLookup = &snLookup{ - chainsToSubnet: map[ids.ID]ids.ID{ - constants.PlatformChainID: constants.PrimaryNetworkID, - testXChainID: constants.PrimaryNetworkID, - testCChainID: constants.PrimaryNetworkID, + ctx.ValidatorState = &validators.TestState{ + GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { + subnetID, ok := map[ids.ID]ids.ID{ + constants.PlatformChainID: constants.PrimaryNetworkID, + testXChainID: constants.PrimaryNetworkID, + testCChainID: constants.PrimaryNetworkID, + }[chainID] + if !ok { + return ids.Empty, errors.New("unknown chain") + } + return subnetID, nil }, } return ctx } -type snLookup struct { - chainsToSubnet map[ids.ID]ids.ID -} - -func (sn *snLookup) SubnetID(chainID ids.ID) (ids.ID, error) { - subnetID, ok := sn.chainsToSubnet[chainID] - if !ok { - return ids.ID{}, errors.New("unknown chain") - } - return subnetID, nil -} - // setupGenesis sets up the genesis // If [genesisJSON] is empty, defaults to using [genesisJSONLatest] func setupGenesis(t *testing.T, @@ -231,8 +231,9 @@ func GenesisVM(t *testing.T, ctx, dbManager, genesisBytes, issuer, m := setupGenesis(t, genesisJSON) appSender := &engCommon.SenderTest{T: t} appSender.CantSendAppGossip = true - appSender.SendAppGossipF = func([]byte) error { return nil } + appSender.SendAppGossipF = func(context.Context, []byte) error { return nil } if err := vm.Initialize( + context.Background(), ctx, dbManager, genesisBytes, @@ -246,8 +247,8 @@ func GenesisVM(t *testing.T, } if finishBootstrapping { - assert.NoError(t, vm.SetState(snow.Bootstrapping)) - assert.NoError(t, vm.SetState(snow.NormalOp)) + assert.NoError(t, vm.SetState(context.Background(), snow.Bootstrapping)) + assert.NoError(t, vm.SetState(context.Background(), snow.NormalOp)) } return issuer, vm, dbManager, m, appSender @@ -313,7 +314,173 @@ func TestVMConfig(t *testing.T) { _, vm, _, _, _ := GenesisVM(t, false, "", configJSON, "") assert.Equal(t, vm.config.RPCTxFeeCap, txFeeCap, "Tx Fee Cap should be set") assert.Equal(t, vm.config.EthAPIs(), enabledEthAPIs, "EnabledEthAPIs should be set") - assert.NoError(t, vm.Shutdown()) + assert.NoError(t, vm.Shutdown(context.Background())) +} + +func TestCrossChainMessagestoVM(t *testing.T) { + crossChainCodec := message.CrossChainCodec + require := require.New(t) + + // the following is based on this contract: + // contract T { + // event received(address sender, uint amount, bytes memo); + // event receivedAddr(address sender); + // + // function receive(bytes calldata memo) external payable returns (string memory res) { + // emit received(msg.sender, msg.value, memo); + // emit receivedAddr(msg.sender); + // return "hello world"; + // } + // } + + const abiBin = `0x608060405234801561001057600080fd5b506102a0806100206000396000f3fe60806040526004361061003b576000357c010000000000000000000000000000000000000000000000000000000090048063a69b6ed014610040575b600080fd5b6100b76004803603602081101561005657600080fd5b810190808035906020019064010000000081111561007357600080fd5b82018360208201111561008557600080fd5b803590602001918460018302840111640100000000831117156100a757600080fd5b9091929391929390505050610132565b6040518080602001828103825283818151815260200191508051906020019080838360005b838110156100f75780820151818401526020810190506100dc565b50505050905090810190601f1680156101245780820380516001836020036101000a031916815260200191505b509250505060405180910390f35b60607f75fd880d39c1daf53b6547ab6cb59451fc6452d27caa90e5b6649dd8293b9eed33348585604051808573ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff168152602001848152602001806020018281038252848482818152602001925080828437600081840152601f19601f8201169050808301925050509550505050505060405180910390a17f46923992397eac56cf13058aced2a1871933622717e27b24eabc13bf9dd329c833604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390a16040805190810160405280600b81526020017f68656c6c6f20776f726c6400000000000000000000000000000000000000000081525090509291505056fea165627a7a72305820ff0c57dad254cfeda48c9cfb47f1353a558bccb4d1bc31da1dae69315772d29e0029` + const abiJSON = `[ { "constant": false, "inputs": [ { "name": "memo", "type": "bytes" } ], "name": "receive", "outputs": [ { "name": "res", "type": "string" } ], "payable": true, "stateMutability": "payable", "type": "function" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" }, { "indexed": false, "name": "amount", "type": "uint256" }, { "indexed": false, "name": "memo", "type": "bytes" } ], "name": "received", "type": "event" }, { "anonymous": false, "inputs": [ { "indexed": false, "name": "sender", "type": "address" } ], "name": "receivedAddr", "type": "event" } ]` + parsed, err := abi.JSON(strings.NewReader(abiJSON)) + require.NoErrorf(err, "could not parse abi: %v") + + calledSendCrossChainAppResponseFn := false + importAmount := uint64(5000000000) + issuer, vm, _, _, appSender := GenesisVMWithUTXOs(t, true, "", "", "", map[ids.ShortID]uint64{ + testShortIDAddrs[0]: importAmount, + }) + + defer func() { + err := vm.Shutdown(context.Background()) + require.NoError(err) + }() + + appSender.SendCrossChainAppResponseF = func(ctx context.Context, respondingChainID ids.ID, requestID uint32, responseBytes []byte) { + calledSendCrossChainAppResponseFn = true + + var response message.EthCallResponse + if _, err = crossChainCodec.Unmarshal(responseBytes, &response); err != nil { + require.NoErrorf(err, "unexpected error during unmarshal: %w") + } + + result := core.ExecutionResult{} + err = json.Unmarshal(response.ExecutionResult, &result) + require.NoError(err) + require.NotNil(result.ReturnData) + + finalResult, err := parsed.Unpack("receive", result.ReturnData) + require.NoError(err) + require.NotNil(finalResult) + require.Equal("hello world", finalResult[0]) + } + + newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) + vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) + + importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + require.NoError(err) + + err = vm.issueTx(importTx, true /*=local*/) + require.NoError(err) + + <-issuer + + blk1, err := vm.BuildBlock(context.Background()) + require.NoError(err) + + err = blk1.Verify(context.Background()) + require.NoError(err) + + if status := blk1.Status(); status != choices.Processing { + t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) + } + + err = vm.SetPreference(context.Background(), blk1.ID()) + require.NoError(err) + + err = blk1.Accept(context.Background()) + require.NoError(err) + + newHead := <-newTxPoolHeadChan + if newHead.Head.Hash() != common.Hash(blk1.ID()) { + t.Fatalf("Expected new block to match") + } + + if status := blk1.Status(); status != choices.Accepted { + t.Fatalf("Expected status of accepted block to be %s, but found %s", choices.Accepted, status) + } + + lastAcceptedID, err := vm.LastAccepted(context.Background()) + require.NoError(err) + + if lastAcceptedID != blk1.ID() { + t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk1.ID(), lastAcceptedID) + } + + contractTx := types.NewContractCreation(0, common.Big0, 200000, new(big.Int).Mul(big.NewInt(3), initialBaseFee), common.FromHex(abiBin)) + contractSignedTx, err := types.SignTx(contractTx, types.NewEIP155Signer(vm.chainID), testKeys[0].ToECDSA()) + require.NoError(err) + + errs := vm.txPool.AddRemotesSync([]*types.Transaction{contractSignedTx}) + for _, err := range errs { + require.NoError(err) + } + testAddr := crypto.PubkeyToAddress(testKeys[0].ToECDSA().PublicKey) + contractAddress := crypto.CreateAddress(testAddr, 0) + + <-issuer + + blk2, err := vm.BuildBlock(context.Background()) + require.NoError(err) + + err = blk2.Verify(context.Background()) + require.NoError(err) + + if status := blk2.Status(); status != choices.Processing { + t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) + } + + err = vm.SetPreference(context.Background(), blk2.ID()) + require.NoError(err) + + err = blk2.Accept(context.Background()) + require.NoError(err) + + newHead = <-newTxPoolHeadChan + if newHead.Head.Hash() != common.Hash(blk2.ID()) { + t.Fatalf("Expected new block to match") + } + + if status := blk2.Status(); status != choices.Accepted { + t.Fatalf("Expected status of accepted block to be %s, but found %s", choices.Accepted, status) + } + + lastAcceptedID, err = vm.LastAccepted(context.Background()) + require.NoError(err) + + if lastAcceptedID != blk2.ID() { + t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk2.ID(), lastAcceptedID) + } + + input, err := parsed.Pack("receive", []byte("X")) + require.NoError(err) + + data := hexutil.Bytes(input) + + requestArgs, err := json.Marshal(ðapi.TransactionArgs{ + To: &contractAddress, + Data: &data, + }) + require.NoError(err) + + var ethCallRequest message.CrossChainRequest = message.EthCallRequest{ + RequestArgs: requestArgs, + } + + crossChainRequest, err := crossChainCodec.Marshal(message.Version, ðCallRequest) + require.NoError(err) + + requestingChainID := ids.ID(common.BytesToHash([]byte{1, 2, 3, 4, 5})) + + // we need all items in the acceptor queue to be processed before we process a cross chain request + vm.blockChain.DrainAcceptorQueue() + err = vm.Network.CrossChainAppRequest(context.Background(), requestingChainID, 1, time.Now().Add(60*time.Second), crossChainRequest) + require.NoError(err) + require.True(calledSendCrossChainAppResponseFn, "sendCrossChainAppResponseFn was not called") } func TestVMConfigDefaults(t *testing.T) { @@ -327,7 +494,7 @@ func TestVMConfigDefaults(t *testing.T) { vmConfig.RPCTxFeeCap = txFeeCap vmConfig.EnabledEthAPIs = enabledEthAPIs assert.Equal(t, vmConfig, vm.config, "VM Config should match default with overrides") - assert.NoError(t, vm.Shutdown()) + assert.NoError(t, vm.Shutdown(context.Background())) } func TestVMNilConfig(t *testing.T) { @@ -337,7 +504,7 @@ func TestVMNilConfig(t *testing.T) { var vmConfig Config vmConfig.SetDefaults() assert.Equal(t, vmConfig, vm.config, "VM Config should match default config") - assert.NoError(t, vm.Shutdown()) + assert.NoError(t, vm.Shutdown(context.Background())) } func TestVMContinuosProfiler(t *testing.T) { @@ -351,7 +518,7 @@ func TestVMContinuosProfiler(t *testing.T) { // Sleep for twice the frequency of the profiler to give it time // to generate the first profile. time.Sleep(2 * time.Second) - assert.NoError(t, vm.Shutdown()) + assert.NoError(t, vm.Shutdown(context.Background())) // Check that the first profile was generated expectedFileName := filepath.Join(profilerDir, "cpu.profile.1") @@ -431,7 +598,7 @@ func TestVMUpgrades(t *testing.T) { defer func() { shutdownChan := make(chan error, 1) shutdownFunc := func() { - err := vm.Shutdown() + err := vm.Shutdown(context.Background()) shutdownChan <- err } @@ -448,7 +615,7 @@ func TestVMUpgrades(t *testing.T) { } }() - lastAcceptedID, err := vm.LastAccepted() + lastAcceptedID, err := vm.LastAccepted(context.Background()) if err != nil { t.Fatal(err) } @@ -457,7 +624,7 @@ func TestVMUpgrades(t *testing.T) { t.Fatal("Expected last accepted block to match the genesis block hash") } - genesisBlk, err := vm.GetBlock(lastAcceptedID) + genesisBlk, err := vm.GetBlock(context.Background(), lastAcceptedID) if err != nil { t.Fatalf("Failed to get genesis block due to %s", err) } @@ -466,7 +633,7 @@ func TestVMUpgrades(t *testing.T) { t.Fatalf("Expected height of geneiss block to be 0, found: %d", height) } - if _, err := vm.ParseBlock(genesisBlk.Bytes()); err != nil { + if _, err := vm.ParseBlock(context.Background(), genesisBlk.Bytes()); err != nil { t.Fatalf("Failed to parse genesis block due to %s", err) } @@ -478,6 +645,43 @@ func TestVMUpgrades(t *testing.T) { } } +func TestImportMissingUTXOs(t *testing.T) { + // make a VM with a shared memory that has an importable UTXO to build a block + importAmount := uint64(50000000) + issuer, vm, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase2, "", "", map[ids.ShortID]uint64{ + testShortIDAddrs[0]: importAmount, + }) + defer func() { + err := vm.Shutdown(context.Background()) + require.NoError(t, err) + }() + + importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + require.NoError(t, err) + err = vm.issueTx(importTx, true /*=local*/) + require.NoError(t, err) + <-issuer + blk, err := vm.BuildBlock(context.Background()) + require.NoError(t, err) + + // make another VM which is missing the UTXO in shared memory + _, vm2, _, _, _ := GenesisVM(t, true, genesisJSONApricotPhase2, "", "") + defer func() { + err := vm2.Shutdown(context.Background()) + require.NoError(t, err) + }() + + vm2Blk, err := vm2.ParseBlock(context.Background(), blk.Bytes()) + require.NoError(t, err) + err = vm2Blk.Verify(context.Background()) + require.ErrorIs(t, err, errMissingUTXOs) + + // This should not result in a bad block since the missing UTXO should + // prevent InsertBlockManual from being called. + badBlocks, _ := vm2.blockChain.BadBlocks() + require.Len(t, badBlocks, 0) +} + // Simple test to ensure we can issue an import transaction followed by an export transaction // and they will be indexed correctly when accepted. func TestIssueAtomicTxs(t *testing.T) { @@ -487,12 +691,12 @@ func TestIssueAtomicTxs(t *testing.T) { }) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -503,12 +707,12 @@ func TestIssueAtomicTxs(t *testing.T) { <-issuer - blk, err := vm.BuildBlock() + blk, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := blk.Verify(); err != nil { + if err := blk.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -516,11 +720,11 @@ func TestIssueAtomicTxs(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm.SetPreference(blk.ID()); err != nil { + if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { t.Fatal(err) } - if err := blk.Accept(); err != nil { + if err := blk.Accept(context.Background()); err != nil { t.Fatal(err) } @@ -528,13 +732,13 @@ func TestIssueAtomicTxs(t *testing.T) { t.Fatalf("Expected status of accepted block to be %s, but found %s", choices.Accepted, status) } - if lastAcceptedID, err := vm.LastAccepted(); err != nil { + if lastAcceptedID, err := vm.LastAccepted(context.Background()); err != nil { t.Fatal(err) } else if lastAcceptedID != blk.ID() { t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk.ID(), lastAcceptedID) } - exportTx, err := vm.newExportTx(vm.ctx.AVAXAssetID, importAmount-(2*params.AvalancheAtomicTxFee), vm.ctx.XChainID, testShortIDAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + exportTx, err := vm.newExportTx(vm.ctx.AVAXAssetID, importAmount-(2*params.AvalancheAtomicTxFee), vm.ctx.XChainID, testShortIDAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -545,12 +749,12 @@ func TestIssueAtomicTxs(t *testing.T) { <-issuer - blk2, err := vm.BuildBlock() + blk2, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := blk2.Verify(); err != nil { + if err := blk2.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -558,7 +762,7 @@ func TestIssueAtomicTxs(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := blk2.Accept(); err != nil { + if err := blk2.Accept(context.Background()); err != nil { t.Fatal(err) } @@ -566,7 +770,7 @@ func TestIssueAtomicTxs(t *testing.T) { t.Fatalf("Expected status of accepted block to be %s, but found %s", choices.Accepted, status) } - if lastAcceptedID, err := vm.LastAccepted(); err != nil { + if lastAcceptedID, err := vm.LastAccepted(context.Background()); err != nil { t.Fatal(err) } else if lastAcceptedID != blk2.ID() { t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blk2.ID(), lastAcceptedID) @@ -593,7 +797,7 @@ func TestBuildEthTxBlock(t *testing.T) { }) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() @@ -601,7 +805,7 @@ func TestBuildEthTxBlock(t *testing.T) { newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -612,12 +816,12 @@ func TestBuildEthTxBlock(t *testing.T) { <-issuer - blk1, err := vm.BuildBlock() + blk1, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := blk1.Verify(); err != nil { + if err := blk1.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -625,11 +829,11 @@ func TestBuildEthTxBlock(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm.SetPreference(blk1.ID()); err != nil { + if err := vm.SetPreference(context.Background(), blk1.ID()); err != nil { t.Fatal(err) } - if err := blk1.Accept(); err != nil { + if err := blk1.Accept(context.Background()); err != nil { t.Fatal(err) } @@ -656,12 +860,12 @@ func TestBuildEthTxBlock(t *testing.T) { <-issuer - blk2, err := vm.BuildBlock() + blk2, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := blk2.Verify(); err != nil { + if err := blk2.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -669,7 +873,7 @@ func TestBuildEthTxBlock(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := blk2.Accept(); err != nil { + if err := blk2.Accept(context.Background()); err != nil { t.Fatal(err) } @@ -682,7 +886,7 @@ func TestBuildEthTxBlock(t *testing.T) { t.Fatalf("Expected status of accepted block to be %s, but found %s", choices.Accepted, status) } - lastAcceptedID, err := vm.LastAccepted() + lastAcceptedID, err := vm.LastAccepted(context.Background()) if err != nil { t.Fatal(err) } @@ -697,7 +901,7 @@ func TestBuildEthTxBlock(t *testing.T) { // Clear the cache and ensure that GetBlock returns internal blocks with the correct status vm.State.Flush() - blk2Refreshed, err := vm.GetBlockInternal(blk2.ID()) + blk2Refreshed, err := vm.GetBlockInternal(context.Background(), blk2.ID()) if err != nil { t.Fatal(err) } @@ -706,7 +910,7 @@ func TestBuildEthTxBlock(t *testing.T) { } blk1RefreshedID := blk2Refreshed.Parent() - blk1Refreshed, err := vm.GetBlockInternal(blk1RefreshedID) + blk1Refreshed, err := vm.GetBlockInternal(context.Background(), blk1RefreshedID) if err != nil { t.Fatal(err) } @@ -720,6 +924,7 @@ func TestBuildEthTxBlock(t *testing.T) { restartedVM := &VM{} if err := restartedVM.Initialize( + context.Background(), NewContext(), dbManager, []byte(genesisJSONApricotPhase2), @@ -753,7 +958,7 @@ func testConflictingImportTxs(t *testing.T, genesis string) { }) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() @@ -761,21 +966,21 @@ func testConflictingImportTxs(t *testing.T, genesis string) { importTxs := make([]*Tx, 0, 3) conflictTxs := make([]*Tx, 0, 3) for i, key := range testKeys { - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[i], initialBaseFee, []*crypto.PrivateKeySECP256K1R{key}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[i], initialBaseFee, []*secp256k1.PrivateKey{key}) if err != nil { t.Fatal(err) } importTxs = append(importTxs, importTx) conflictAddr := testEthAddrs[(i+1)%len(testEthAddrs)] - conflictTx, err := vm.newImportTx(vm.ctx.XChainID, conflictAddr, initialBaseFee, []*crypto.PrivateKeySECP256K1R{key}) + conflictTx, err := vm.newImportTx(vm.ctx.XChainID, conflictAddr, initialBaseFee, []*secp256k1.PrivateKey{key}) if err != nil { t.Fatal(err) } conflictTxs = append(conflictTxs, conflictTx) } - expectedParentBlkID, err := vm.LastAccepted() + expectedParentBlkID, err := vm.LastAccepted(context.Background()) if err != nil { t.Fatal(err) } @@ -787,12 +992,12 @@ func testConflictingImportTxs(t *testing.T, genesis string) { <-issuer vm.clock.Set(vm.clock.Time().Add(2 * time.Second)) - blk, err := vm.BuildBlock() + blk, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := blk.Verify(); err != nil { + if err := blk.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -805,7 +1010,7 @@ func testConflictingImportTxs(t *testing.T, genesis string) { } expectedParentBlkID = blk.ID() - if err := vm.SetPreference(blk.ID()); err != nil { + if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { t.Fatal(err) } } @@ -824,7 +1029,7 @@ func testConflictingImportTxs(t *testing.T, genesis string) { <-issuer vm.clock.Set(vm.clock.Time().Add(2 * time.Second)) - _, err = vm.BuildBlock() + _, err = vm.BuildBlock(context.Background()) // The new block is verified in BuildBlock, so // BuildBlock should fail due to an attempt to // double spend an atomic UTXO. @@ -842,12 +1047,12 @@ func testConflictingImportTxs(t *testing.T, genesis string) { <-issuer vm.clock.Set(vm.clock.Time().Add(2 * time.Second)) - validBlock, err := vm.BuildBlock() + validBlock, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := validBlock.Verify(); err != nil { + if err := validBlock.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -880,12 +1085,12 @@ func testConflictingImportTxs(t *testing.T, genesis string) { t.Fatal(err) } - parsedBlock, err := vm.ParseBlock(blockBytes) + parsedBlock, err := vm.ParseBlock(context.Background(), blockBytes) if err != nil { t.Fatal(err) } - if err := parsedBlock.Verify(); !errors.Is(err, errConflictingAtomicInputs) { + if err := parsedBlock.Verify(context.Background()); !errors.Is(err, errConflictingAtomicInputs) { t.Fatalf("Expected to fail with err: %s, but found err: %s", errConflictingAtomicInputs, err) } @@ -916,12 +1121,12 @@ func testConflictingImportTxs(t *testing.T, genesis string) { t.Fatal(err) } - parsedBlock, err = vm.ParseBlock(blockBytes) + parsedBlock, err = vm.ParseBlock(context.Background(), blockBytes) if err != nil { t.Fatal(err) } - if err := parsedBlock.Verify(); !errors.Is(err, errConflictingAtomicInputs) { + if err := parsedBlock.Verify(context.Background()); !errors.Is(err, errConflictingAtomicInputs) { t.Fatalf("Expected to fail with err: %s, but found err: %s", errConflictingAtomicInputs, err) } } @@ -1056,6 +1261,7 @@ func TestConflictingImportTxsAcrossBlocks(t *testing.T) { "apricotPhase4": genesisJSONApricotPhase4, "apricotPhase5": genesisJSONApricotPhase5, } { + genesis := genesis t.Run(name, func(t *testing.T) { testConflictingImportTxs(t, genesis) }) @@ -1066,11 +1272,12 @@ func TestConflictingImportTxsAcrossBlocks(t *testing.T) { // then calling SetPreference on block B (when it becomes preferred) // and the head of a longer chain (block D) does not corrupt the // canonical chain. -// A -// / \ -// B C -// | -// D +// +// A +// / \ +// B C +// | +// D func TestSetPreferenceRace(t *testing.T) { // Create two VMs which will agree on block A and then // build the two distinct preferred chains above @@ -1083,11 +1290,11 @@ func TestSetPreferenceRace(t *testing.T) { }) defer func() { - if err := vm1.Shutdown(); err != nil { + if err := vm1.Shutdown(context.Background()); err != nil { t.Fatal(err) } - if err := vm2.Shutdown(); err != nil { + if err := vm2.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() @@ -1097,7 +1304,7 @@ func TestSetPreferenceRace(t *testing.T) { newTxPoolHeadChan2 := make(chan core.NewTxPoolReorgEvent, 1) vm2.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan2) - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, testEthAddrs[1], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + importTx, err := vm1.newImportTx(vm1.ctx.XChainID, testEthAddrs[1], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -1108,12 +1315,12 @@ func TestSetPreferenceRace(t *testing.T) { <-issuer1 - vm1BlkA, err := vm1.BuildBlock() + vm1BlkA, err := vm1.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } - if err := vm1BlkA.Verify(); err != nil { + if err := vm1BlkA.Verify(context.Background()); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } @@ -1121,28 +1328,28 @@ func TestSetPreferenceRace(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm1.SetPreference(vm1BlkA.ID()); err != nil { + if err := vm1.SetPreference(context.Background(), vm1BlkA.ID()); err != nil { t.Fatal(err) } - vm2BlkA, err := vm2.ParseBlock(vm1BlkA.Bytes()) + vm2BlkA, err := vm2.ParseBlock(context.Background(), vm1BlkA.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } - if err := vm2BlkA.Verify(); err != nil { + if err := vm2BlkA.Verify(context.Background()); err != nil { t.Fatalf("Block failed verification on VM2: %s", err) } if status := vm2BlkA.Status(); status != choices.Processing { t.Fatalf("Expected status of block on VM2 to be %s, but found %s", choices.Processing, status) } - if err := vm2.SetPreference(vm2BlkA.ID()); err != nil { + if err := vm2.SetPreference(context.Background(), vm2BlkA.ID()); err != nil { t.Fatal(err) } - if err := vm1BlkA.Accept(); err != nil { + if err := vm1BlkA.Accept(context.Background()); err != nil { t.Fatalf("VM1 failed to accept block: %s", err) } - if err := vm2BlkA.Accept(); err != nil { + if err := vm2BlkA.Accept(context.Background()); err != nil { t.Fatalf("VM2 failed to accept block: %s", err) } @@ -1179,12 +1386,12 @@ func TestSetPreferenceRace(t *testing.T) { <-issuer1 - vm1BlkB, err := vm1.BuildBlock() + vm1BlkB, err := vm1.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := vm1BlkB.Verify(); err != nil { + if err := vm1BlkB.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -1192,7 +1399,7 @@ func TestSetPreferenceRace(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm1.SetPreference(vm1BlkB.ID()); err != nil { + if err := vm1.SetPreference(context.Background(), vm1BlkB.ID()); err != nil { t.Fatal(err) } @@ -1207,12 +1414,12 @@ func TestSetPreferenceRace(t *testing.T) { } <-issuer2 - vm2BlkC, err := vm2.BuildBlock() + vm2BlkC, err := vm2.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build BlkC on VM2: %s", err) } - if err := vm2BlkC.Verify(); err != nil { + if err := vm2BlkC.Verify(context.Background()); err != nil { t.Fatalf("BlkC failed verification on VM2: %s", err) } @@ -1220,7 +1427,7 @@ func TestSetPreferenceRace(t *testing.T) { t.Fatalf("Expected status of built block C to be %s, but found %s", choices.Processing, status) } - if err := vm2.SetPreference(vm2BlkC.ID()); err != nil { + if err := vm2.SetPreference(context.Background(), vm2BlkC.ID()); err != nil { t.Fatal(err) } @@ -1238,12 +1445,12 @@ func TestSetPreferenceRace(t *testing.T) { } <-issuer2 - vm2BlkD, err := vm2.BuildBlock() + vm2BlkD, err := vm2.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build BlkD on VM2: %s", err) } - if err := vm2BlkD.Verify(); err != nil { + if err := vm2BlkD.Verify(context.Background()); err != nil { t.Fatalf("BlkD failed verification on VM2: %s", err) } @@ -1251,7 +1458,7 @@ func TestSetPreferenceRace(t *testing.T) { t.Fatalf("Expected status of built block D to be %s, but found %s", choices.Processing, status) } - if err := vm2.SetPreference(vm2BlkD.ID()); err != nil { + if err := vm2.SetPreference(context.Background(), vm2BlkD.ID()); err != nil { t.Fatal(err) } @@ -1261,43 +1468,43 @@ func TestSetPreferenceRace(t *testing.T) { // Here we parse them in reverse order to simulate receiving a chain from the tip // back to the last accepted block as would typically be the case in the consensus // engine - vm1BlkD, err := vm1.ParseBlock(vm2BlkD.Bytes()) + vm1BlkD, err := vm1.ParseBlock(context.Background(), vm2BlkD.Bytes()) if err != nil { t.Fatalf("VM1 errored parsing blkD: %s", err) } - vm1BlkC, err := vm1.ParseBlock(vm2BlkC.Bytes()) + vm1BlkC, err := vm1.ParseBlock(context.Background(), vm2BlkC.Bytes()) if err != nil { t.Fatalf("VM1 errored parsing blkC: %s", err) } // The blocks must be verified in order. This invariant is maintained // in the consensus engine. - if err := vm1BlkC.Verify(); err != nil { + if err := vm1BlkC.Verify(context.Background()); err != nil { t.Fatalf("VM1 BlkC failed verification: %s", err) } - if err := vm1BlkD.Verify(); err != nil { + if err := vm1BlkD.Verify(context.Background()); err != nil { t.Fatalf("VM1 BlkD failed verification: %s", err) } // Set VM1's preference to blockD, skipping blockC - if err := vm1.SetPreference(vm1BlkD.ID()); err != nil { + if err := vm1.SetPreference(context.Background(), vm1BlkD.ID()); err != nil { t.Fatal(err) } // Accept the longer chain on both VMs and ensure there are no errors // VM1 Accepts the blocks in order - if err := vm1BlkC.Accept(); err != nil { + if err := vm1BlkC.Accept(context.Background()); err != nil { t.Fatalf("VM1 BlkC failed on accept: %s", err) } - if err := vm1BlkD.Accept(); err != nil { + if err := vm1BlkD.Accept(context.Background()); err != nil { t.Fatalf("VM1 BlkC failed on accept: %s", err) } // VM2 Accepts the blocks in order - if err := vm2BlkC.Accept(); err != nil { + if err := vm2BlkC.Accept(context.Background()); err != nil { t.Fatalf("VM2 BlkC failed on accept: %s", err) } - if err := vm2BlkD.Accept(); err != nil { + if err := vm2BlkD.Accept(context.Background()); err != nil { t.Fatalf("VM2 BlkC failed on accept: %s", err) } @@ -1333,7 +1540,7 @@ func TestConflictingTransitiveAncestryWithGap(t *testing.T) { }) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() @@ -1341,12 +1548,12 @@ func TestConflictingTransitiveAncestryWithGap(t *testing.T) { newTxPoolHeadChan := make(chan core.NewTxPoolReorgEvent, 1) vm.txPool.SubscribeNewReorgEvent(newTxPoolHeadChan) - importTx0A, err := vm.newImportTx(vm.ctx.XChainID, key.Address, initialBaseFee, []*crypto.PrivateKeySECP256K1R{key0}) + importTx0A, err := vm.newImportTx(vm.ctx.XChainID, key.Address, initialBaseFee, []*secp256k1.PrivateKey{key0}) if err != nil { t.Fatal(err) } // Create a conflicting transaction - importTx0B, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[2], initialBaseFee, []*crypto.PrivateKeySECP256K1R{key0}) + importTx0B, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[2], initialBaseFee, []*secp256k1.PrivateKey{key0}) if err != nil { t.Fatal(err) } @@ -1357,16 +1564,16 @@ func TestConflictingTransitiveAncestryWithGap(t *testing.T) { <-issuer - blk0, err := vm.BuildBlock() + blk0, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } - if err := blk0.Verify(); err != nil { + if err := blk0.Verify(context.Background()); err != nil { t.Fatalf("Block failed verification: %s", err) } - if err := vm.SetPreference(blk0.ID()); err != nil { + if err := vm.SetPreference(context.Background(), blk0.ID()); err != nil { t.Fatal(err) } @@ -1391,20 +1598,20 @@ func TestConflictingTransitiveAncestryWithGap(t *testing.T) { <-issuer - blk1, err := vm.BuildBlock() + blk1, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build blk1: %s", err) } - if err := blk1.Verify(); err != nil { + if err := blk1.Verify(context.Background()); err != nil { t.Fatalf("blk1 failed verification due to %s", err) } - if err := vm.SetPreference(blk1.ID()); err != nil { + if err := vm.SetPreference(context.Background(), blk1.ID()); err != nil { t.Fatal(err) } - importTx1, err := vm.newImportTx(vm.ctx.XChainID, key.Address, initialBaseFee, []*crypto.PrivateKeySECP256K1R{key1}) + importTx1, err := vm.newImportTx(vm.ctx.XChainID, key.Address, initialBaseFee, []*secp256k1.PrivateKey{key1}) if err != nil { t.Fatalf("Failed to issue importTx1 due to: %s", err) } @@ -1415,16 +1622,16 @@ func TestConflictingTransitiveAncestryWithGap(t *testing.T) { <-issuer - blk2, err := vm.BuildBlock() + blk2, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } - if err := blk2.Verify(); err != nil { + if err := blk2.Verify(context.Background()); err != nil { t.Fatalf("Block failed verification: %s", err) } - if err := vm.SetPreference(blk2.ID()); err != nil { + if err := vm.SetPreference(context.Background(), blk2.ID()); err != nil { t.Fatal(err) } @@ -1437,7 +1644,7 @@ func TestConflictingTransitiveAncestryWithGap(t *testing.T) { } <-issuer - _, err = vm.BuildBlock() + _, err = vm.BuildBlock(context.Background()) if err == nil { t.Fatal("Shouldn't have been able to build an invalid block") } @@ -1447,7 +1654,7 @@ func TestBonusBlocksTxs(t *testing.T) { issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase0, "", "") defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() @@ -1483,7 +1690,7 @@ func TestBonusBlocksTxs(t *testing.T) { t.Fatal(err) } - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -1494,7 +1701,7 @@ func TestBonusBlocksTxs(t *testing.T) { <-issuer - blk, err := vm.BuildBlock() + blk, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } @@ -1507,7 +1714,7 @@ func TestBonusBlocksTxs(t *testing.T) { t.Fatal(err) } - if err := blk.Verify(); err != nil { + if err := blk.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -1515,11 +1722,11 @@ func TestBonusBlocksTxs(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm.SetPreference(blk.ID()); err != nil { + if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { t.Fatal(err) } - if err := blk.Accept(); err != nil { + if err := blk.Accept(context.Background()); err != nil { t.Fatal(err) } @@ -1527,7 +1734,7 @@ func TestBonusBlocksTxs(t *testing.T) { t.Fatalf("Expected status of accepted block to be %s, but found %s", choices.Accepted, status) } - lastAcceptedID, err := vm.LastAccepted() + lastAcceptedID, err := vm.LastAccepted(context.Background()) if err != nil { t.Fatal(err) } @@ -1540,9 +1747,10 @@ func TestBonusBlocksTxs(t *testing.T) { // will not attempt to orphan either when verifying blocks C and D // from another VM (which have a common ancestor under the finalized // frontier). -// A -// / \ -// B C +// +// A +// / \ +// B C // // verifies block B and C, then Accepts block B. Then we test to ensure // that the VM defends against any attempt to set the preference or to @@ -1558,11 +1766,11 @@ func TestReorgProtection(t *testing.T) { }) defer func() { - if err := vm1.Shutdown(); err != nil { + if err := vm1.Shutdown(context.Background()); err != nil { t.Fatal(err) } - if err := vm2.Shutdown(); err != nil { + if err := vm2.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() @@ -1575,7 +1783,7 @@ func TestReorgProtection(t *testing.T) { key := testKeys[0].ToECDSA() address := testEthAddrs[0] - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -1586,12 +1794,12 @@ func TestReorgProtection(t *testing.T) { <-issuer1 - vm1BlkA, err := vm1.BuildBlock() + vm1BlkA, err := vm1.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } - if err := vm1BlkA.Verify(); err != nil { + if err := vm1BlkA.Verify(context.Background()); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } @@ -1599,28 +1807,28 @@ func TestReorgProtection(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm1.SetPreference(vm1BlkA.ID()); err != nil { + if err := vm1.SetPreference(context.Background(), vm1BlkA.ID()); err != nil { t.Fatal(err) } - vm2BlkA, err := vm2.ParseBlock(vm1BlkA.Bytes()) + vm2BlkA, err := vm2.ParseBlock(context.Background(), vm1BlkA.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } - if err := vm2BlkA.Verify(); err != nil { + if err := vm2BlkA.Verify(context.Background()); err != nil { t.Fatalf("Block failed verification on VM2: %s", err) } if status := vm2BlkA.Status(); status != choices.Processing { t.Fatalf("Expected status of block on VM2 to be %s, but found %s", choices.Processing, status) } - if err := vm2.SetPreference(vm2BlkA.ID()); err != nil { + if err := vm2.SetPreference(context.Background(), vm2BlkA.ID()); err != nil { t.Fatal(err) } - if err := vm1BlkA.Accept(); err != nil { + if err := vm1BlkA.Accept(context.Background()); err != nil { t.Fatalf("VM1 failed to accept block: %s", err) } - if err := vm2BlkA.Accept(); err != nil { + if err := vm2BlkA.Accept(context.Background()); err != nil { t.Fatalf("VM2 failed to accept block: %s", err) } @@ -1657,12 +1865,12 @@ func TestReorgProtection(t *testing.T) { <-issuer1 - vm1BlkB, err := vm1.BuildBlock() + vm1BlkB, err := vm1.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := vm1BlkB.Verify(); err != nil { + if err := vm1BlkB.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -1670,7 +1878,7 @@ func TestReorgProtection(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm1.SetPreference(vm1BlkB.ID()); err != nil { + if err := vm1.SetPreference(context.Background(), vm1BlkB.ID()); err != nil { t.Fatal(err) } @@ -1685,29 +1893,29 @@ func TestReorgProtection(t *testing.T) { } <-issuer2 - vm2BlkC, err := vm2.BuildBlock() + vm2BlkC, err := vm2.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build BlkC on VM2: %s", err) } - if err := vm2BlkC.Verify(); err != nil { + if err := vm2BlkC.Verify(context.Background()); err != nil { t.Fatalf("Block failed verification on VM2: %s", err) } if status := vm2BlkC.Status(); status != choices.Processing { t.Fatalf("Expected status of block on VM2 to be %s, but found %s", choices.Processing, status) } - vm1BlkC, err := vm1.ParseBlock(vm2BlkC.Bytes()) + vm1BlkC, err := vm1.ParseBlock(context.Background(), vm2BlkC.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } - if err := vm1BlkC.Verify(); err != nil { + if err := vm1BlkC.Verify(context.Background()); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } // Accept B, such that block C should get Rejected. - if err := vm1BlkB.Accept(); err != nil { + if err := vm1BlkB.Accept(context.Background()); err != nil { t.Fatalf("VM1 failed to accept block: %s", err) } @@ -1715,20 +1923,21 @@ func TestReorgProtection(t *testing.T) { // with the preferred chain lower than the last finalized block) // should NEVER happen. However, the VM defends against this // just in case. - if err := vm1.SetPreference(vm1BlkC.ID()); !strings.Contains(err.Error(), "cannot orphan finalized block") { + if err := vm1.SetPreference(context.Background(), vm1BlkC.ID()); !strings.Contains(err.Error(), "cannot orphan finalized block") { t.Fatalf("Unexpected error when setting preference that would trigger reorg: %s", err) } - if err := vm1BlkC.Accept(); !strings.Contains(err.Error(), "expected accepted block to have parent") { + if err := vm1BlkC.Accept(context.Background()); !strings.Contains(err.Error(), "expected accepted block to have parent") { t.Fatalf("Unexpected error when setting block at finalized height: %s", err) } } // Regression test to ensure that a VM that accepts block C while preferring // block B will trigger a reorg. -// A -// / \ -// B C +// +// A +// / \ +// B C func TestNonCanonicalAccept(t *testing.T) { importAmount := uint64(1000000000) issuer1, vm1, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, "", "", map[ids.ShortID]uint64{ @@ -1739,11 +1948,11 @@ func TestNonCanonicalAccept(t *testing.T) { }) defer func() { - if err := vm1.Shutdown(); err != nil { + if err := vm1.Shutdown(context.Background()); err != nil { t.Fatal(err) } - if err := vm2.Shutdown(); err != nil { + if err := vm2.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() @@ -1756,7 +1965,7 @@ func TestNonCanonicalAccept(t *testing.T) { key := testKeys[0].ToECDSA() address := testEthAddrs[0] - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -1767,12 +1976,12 @@ func TestNonCanonicalAccept(t *testing.T) { <-issuer1 - vm1BlkA, err := vm1.BuildBlock() + vm1BlkA, err := vm1.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } - if err := vm1BlkA.Verify(); err != nil { + if err := vm1BlkA.Verify(context.Background()); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } @@ -1780,28 +1989,28 @@ func TestNonCanonicalAccept(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm1.SetPreference(vm1BlkA.ID()); err != nil { + if err := vm1.SetPreference(context.Background(), vm1BlkA.ID()); err != nil { t.Fatal(err) } - vm2BlkA, err := vm2.ParseBlock(vm1BlkA.Bytes()) + vm2BlkA, err := vm2.ParseBlock(context.Background(), vm1BlkA.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } - if err := vm2BlkA.Verify(); err != nil { + if err := vm2BlkA.Verify(context.Background()); err != nil { t.Fatalf("Block failed verification on VM2: %s", err) } if status := vm2BlkA.Status(); status != choices.Processing { t.Fatalf("Expected status of block on VM2 to be %s, but found %s", choices.Processing, status) } - if err := vm2.SetPreference(vm2BlkA.ID()); err != nil { + if err := vm2.SetPreference(context.Background(), vm2BlkA.ID()); err != nil { t.Fatal(err) } - if err := vm1BlkA.Accept(); err != nil { + if err := vm1BlkA.Accept(context.Background()); err != nil { t.Fatalf("VM1 failed to accept block: %s", err) } - if err := vm2BlkA.Accept(); err != nil { + if err := vm2BlkA.Accept(context.Background()); err != nil { t.Fatalf("VM2 failed to accept block: %s", err) } @@ -1838,12 +2047,12 @@ func TestNonCanonicalAccept(t *testing.T) { <-issuer1 - vm1BlkB, err := vm1.BuildBlock() + vm1BlkB, err := vm1.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := vm1BlkB.Verify(); err != nil { + if err := vm1BlkB.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -1851,7 +2060,7 @@ func TestNonCanonicalAccept(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm1.SetPreference(vm1BlkB.ID()); err != nil { + if err := vm1.SetPreference(context.Background(), vm1BlkB.ID()); err != nil { t.Fatal(err) } @@ -1871,21 +2080,21 @@ func TestNonCanonicalAccept(t *testing.T) { } <-issuer2 - vm2BlkC, err := vm2.BuildBlock() + vm2BlkC, err := vm2.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build BlkC on VM2: %s", err) } - vm1BlkC, err := vm1.ParseBlock(vm2BlkC.Bytes()) + vm1BlkC, err := vm1.ParseBlock(context.Background(), vm2BlkC.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } - if err := vm1BlkC.Verify(); err != nil { + if err := vm1BlkC.Verify(context.Background()); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } - if err := vm1BlkC.Accept(); err != nil { + if err := vm1BlkC.Accept(context.Background()); err != nil { t.Fatalf("VM1 failed to accept block: %s", err) } @@ -1898,11 +2107,12 @@ func TestNonCanonicalAccept(t *testing.T) { // Regression test to ensure that a VM that verifies block B, C, then // D (preferring block B) does not trigger a reorg through the re-verification // of block C or D. -// A -// / \ -// B C -// | -// D +// +// A +// / \ +// B C +// | +// D func TestStickyPreference(t *testing.T) { importAmount := uint64(1000000000) issuer1, vm1, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, "", "", map[ids.ShortID]uint64{ @@ -1913,11 +2123,11 @@ func TestStickyPreference(t *testing.T) { }) defer func() { - if err := vm1.Shutdown(); err != nil { + if err := vm1.Shutdown(context.Background()); err != nil { t.Fatal(err) } - if err := vm2.Shutdown(); err != nil { + if err := vm2.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() @@ -1930,7 +2140,7 @@ func TestStickyPreference(t *testing.T) { key := testKeys[0].ToECDSA() address := testEthAddrs[0] - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -1941,12 +2151,12 @@ func TestStickyPreference(t *testing.T) { <-issuer1 - vm1BlkA, err := vm1.BuildBlock() + vm1BlkA, err := vm1.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } - if err := vm1BlkA.Verify(); err != nil { + if err := vm1BlkA.Verify(context.Background()); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } @@ -1954,28 +2164,28 @@ func TestStickyPreference(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm1.SetPreference(vm1BlkA.ID()); err != nil { + if err := vm1.SetPreference(context.Background(), vm1BlkA.ID()); err != nil { t.Fatal(err) } - vm2BlkA, err := vm2.ParseBlock(vm1BlkA.Bytes()) + vm2BlkA, err := vm2.ParseBlock(context.Background(), vm1BlkA.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } - if err := vm2BlkA.Verify(); err != nil { + if err := vm2BlkA.Verify(context.Background()); err != nil { t.Fatalf("Block failed verification on VM2: %s", err) } if status := vm2BlkA.Status(); status != choices.Processing { t.Fatalf("Expected status of block on VM2 to be %s, but found %s", choices.Processing, status) } - if err := vm2.SetPreference(vm2BlkA.ID()); err != nil { + if err := vm2.SetPreference(context.Background(), vm2BlkA.ID()); err != nil { t.Fatal(err) } - if err := vm1BlkA.Accept(); err != nil { + if err := vm1BlkA.Accept(context.Background()); err != nil { t.Fatalf("VM1 failed to accept block: %s", err) } - if err := vm2BlkA.Accept(); err != nil { + if err := vm2BlkA.Accept(context.Background()); err != nil { t.Fatalf("VM2 failed to accept block: %s", err) } @@ -2012,12 +2222,12 @@ func TestStickyPreference(t *testing.T) { <-issuer1 - vm1BlkB, err := vm1.BuildBlock() + vm1BlkB, err := vm1.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := vm1BlkB.Verify(); err != nil { + if err := vm1BlkB.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -2025,7 +2235,7 @@ func TestStickyPreference(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm1.SetPreference(vm1BlkB.ID()); err != nil { + if err := vm1.SetPreference(context.Background(), vm1BlkB.ID()); err != nil { t.Fatal(err) } @@ -2045,12 +2255,12 @@ func TestStickyPreference(t *testing.T) { } <-issuer2 - vm2BlkC, err := vm2.BuildBlock() + vm2BlkC, err := vm2.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build BlkC on VM2: %s", err) } - if err := vm2BlkC.Verify(); err != nil { + if err := vm2BlkC.Verify(context.Background()); err != nil { t.Fatalf("BlkC failed verification on VM2: %s", err) } @@ -2058,7 +2268,7 @@ func TestStickyPreference(t *testing.T) { t.Fatalf("Expected status of built block C to be %s, but found %s", choices.Processing, status) } - if err := vm2.SetPreference(vm2BlkC.ID()); err != nil { + if err := vm2.SetPreference(context.Background(), vm2BlkC.ID()); err != nil { t.Fatal(err) } @@ -2075,19 +2285,19 @@ func TestStickyPreference(t *testing.T) { } <-issuer2 - vm2BlkD, err := vm2.BuildBlock() + vm2BlkD, err := vm2.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build BlkD on VM2: %s", err) } // Parse blocks produced in vm2 - vm1BlkC, err := vm1.ParseBlock(vm2BlkC.Bytes()) + vm1BlkC, err := vm1.ParseBlock(context.Background(), vm2BlkC.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } blkCHash := vm1BlkC.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() - vm1BlkD, err := vm1.ParseBlock(vm2BlkD.Bytes()) + vm1BlkD, err := vm1.ParseBlock(context.Background(), vm2BlkD.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } @@ -2095,10 +2305,10 @@ func TestStickyPreference(t *testing.T) { blkDHash := vm1BlkD.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() // Should be no-ops - if err := vm1BlkC.Verify(); err != nil { + if err := vm1BlkC.Verify(context.Background()); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } - if err := vm1BlkD.Verify(); err != nil { + if err := vm1BlkD.Verify(context.Background()); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkBHash { @@ -2112,10 +2322,10 @@ func TestStickyPreference(t *testing.T) { } // Should still be no-ops on re-verify - if err := vm1BlkC.Verify(); err != nil { + if err := vm1BlkC.Verify(context.Background()); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } - if err := vm1BlkD.Verify(); err != nil { + if err := vm1BlkD.Verify(context.Background()); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } if b := vm1.blockChain.GetBlockByNumber(blkBHeight); b.Hash() != blkBHash { @@ -2129,7 +2339,7 @@ func TestStickyPreference(t *testing.T) { } // Should be queryable after setting preference to side chain - if err := vm1.SetPreference(vm1BlkD.ID()); err != nil { + if err := vm1.SetPreference(context.Background(), vm1BlkD.ID()); err != nil { t.Fatal(err) } @@ -2144,15 +2354,15 @@ func TestStickyPreference(t *testing.T) { } // Attempt to accept out of order - if err := vm1BlkD.Accept(); !strings.Contains(err.Error(), "expected accepted block to have parent") { + if err := vm1BlkD.Accept(context.Background()); !strings.Contains(err.Error(), "expected accepted block to have parent") { t.Fatalf("unexpected error when accepting out of order block: %s", err) } // Accept in order - if err := vm1BlkC.Accept(); err != nil { + if err := vm1BlkC.Accept(context.Background()); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } - if err := vm1BlkD.Accept(); err != nil { + if err := vm1BlkD.Accept(context.Background()); err != nil { t.Fatalf("Block failed acceptance on VM1: %s", err) } @@ -2171,11 +2381,12 @@ func TestStickyPreference(t *testing.T) { // Regression test to ensure that a VM that prefers block B is able to parse // block C but unable to parse block D because it names B as an uncle, which // are not supported. -// A -// / \ -// B C -// | -// D +// +// A +// / \ +// B C +// | +// D func TestUncleBlock(t *testing.T) { importAmount := uint64(1000000000) issuer1, vm1, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, "", "", map[ids.ShortID]uint64{ @@ -2186,10 +2397,10 @@ func TestUncleBlock(t *testing.T) { }) defer func() { - if err := vm1.Shutdown(); err != nil { + if err := vm1.Shutdown(context.Background()); err != nil { t.Fatal(err) } - if err := vm2.Shutdown(); err != nil { + if err := vm2.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() @@ -2202,7 +2413,7 @@ func TestUncleBlock(t *testing.T) { key := testKeys[0].ToECDSA() address := testEthAddrs[0] - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -2213,12 +2424,12 @@ func TestUncleBlock(t *testing.T) { <-issuer1 - vm1BlkA, err := vm1.BuildBlock() + vm1BlkA, err := vm1.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } - if err := vm1BlkA.Verify(); err != nil { + if err := vm1BlkA.Verify(context.Background()); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } @@ -2226,28 +2437,28 @@ func TestUncleBlock(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm1.SetPreference(vm1BlkA.ID()); err != nil { + if err := vm1.SetPreference(context.Background(), vm1BlkA.ID()); err != nil { t.Fatal(err) } - vm2BlkA, err := vm2.ParseBlock(vm1BlkA.Bytes()) + vm2BlkA, err := vm2.ParseBlock(context.Background(), vm1BlkA.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } - if err := vm2BlkA.Verify(); err != nil { + if err := vm2BlkA.Verify(context.Background()); err != nil { t.Fatalf("Block failed verification on VM2: %s", err) } if status := vm2BlkA.Status(); status != choices.Processing { t.Fatalf("Expected status of block on VM2 to be %s, but found %s", choices.Processing, status) } - if err := vm2.SetPreference(vm2BlkA.ID()); err != nil { + if err := vm2.SetPreference(context.Background(), vm2BlkA.ID()); err != nil { t.Fatal(err) } - if err := vm1BlkA.Accept(); err != nil { + if err := vm1BlkA.Accept(context.Background()); err != nil { t.Fatalf("VM1 failed to accept block: %s", err) } - if err := vm2BlkA.Accept(); err != nil { + if err := vm2BlkA.Accept(context.Background()); err != nil { t.Fatalf("VM2 failed to accept block: %s", err) } @@ -2281,12 +2492,12 @@ func TestUncleBlock(t *testing.T) { <-issuer1 - vm1BlkB, err := vm1.BuildBlock() + vm1BlkB, err := vm1.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := vm1BlkB.Verify(); err != nil { + if err := vm1BlkB.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -2294,7 +2505,7 @@ func TestUncleBlock(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm1.SetPreference(vm1BlkB.ID()); err != nil { + if err := vm1.SetPreference(context.Background(), vm1BlkB.ID()); err != nil { t.Fatal(err) } @@ -2306,12 +2517,12 @@ func TestUncleBlock(t *testing.T) { } <-issuer2 - vm2BlkC, err := vm2.BuildBlock() + vm2BlkC, err := vm2.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build BlkC on VM2: %s", err) } - if err := vm2BlkC.Verify(); err != nil { + if err := vm2BlkC.Verify(context.Background()); err != nil { t.Fatalf("BlkC failed verification on VM2: %s", err) } @@ -2319,7 +2530,7 @@ func TestUncleBlock(t *testing.T) { t.Fatalf("Expected status of built block C to be %s, but found %s", choices.Processing, status) } - if err := vm2.SetPreference(vm2BlkC.ID()); err != nil { + if err := vm2.SetPreference(context.Background(), vm2BlkC.ID()); err != nil { t.Fatal(err) } @@ -2336,7 +2547,7 @@ func TestUncleBlock(t *testing.T) { } <-issuer2 - vm2BlkD, err := vm2.BuildBlock() + vm2BlkD, err := vm2.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build BlkD on VM2: %s", err) } @@ -2360,13 +2571,13 @@ func TestUncleBlock(t *testing.T) { if err != nil { t.Fatal(err) } - if err := uncleBlock.Verify(); !errors.Is(err, errUnclesUnsupported) { + if err := uncleBlock.Verify(context.Background()); !errors.Is(err, errUnclesUnsupported) { t.Fatalf("VM2 should have failed with %q but got %q", errUnclesUnsupported, err.Error()) } - if _, err := vm1.ParseBlock(vm2BlkC.Bytes()); err != nil { + if _, err := vm1.ParseBlock(context.Background(), vm2BlkC.Bytes()); err != nil { t.Fatalf("VM1 errored parsing blkC: %s", err) } - if _, err := vm1.ParseBlock(uncleBlock.Bytes()); !errors.Is(err, errUnclesUnsupported) { + if _, err := vm1.ParseBlock(context.Background(), uncleBlock.Bytes()); !errors.Is(err, errUnclesUnsupported) { t.Fatalf("VM1 should have failed with %q but got %q", errUnclesUnsupported, err.Error()) } } @@ -2380,12 +2591,12 @@ func TestEmptyBlock(t *testing.T) { }) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -2396,7 +2607,7 @@ func TestEmptyBlock(t *testing.T) { <-issuer - blk, err := vm.BuildBlock() + blk, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } @@ -2423,21 +2634,22 @@ func TestEmptyBlock(t *testing.T) { t.Fatal(err) } - if _, err := vm.ParseBlock(emptyBlock.Bytes()); !errors.Is(err, errEmptyBlock) { + if _, err := vm.ParseBlock(context.Background(), emptyBlock.Bytes()); !errors.Is(err, errEmptyBlock) { t.Fatalf("VM should have failed with errEmptyBlock but got %s", err.Error()) } - if err := emptyBlock.Verify(); !errors.Is(err, errEmptyBlock) { + if err := emptyBlock.Verify(context.Background()); !errors.Is(err, errEmptyBlock) { t.Fatalf("block should have failed verification with errEmptyBlock but got %s", err.Error()) } } // Regression test to ensure that a VM that verifies block B, C, then // D (preferring block B) reorgs when C and then D are accepted. -// A -// / \ -// B C -// | -// D +// +// A +// / \ +// B C +// | +// D func TestAcceptReorg(t *testing.T) { importAmount := uint64(1000000000) issuer1, vm1, _, _, _ := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase0, "", "", map[ids.ShortID]uint64{ @@ -2448,11 +2660,11 @@ func TestAcceptReorg(t *testing.T) { }) defer func() { - if err := vm1.Shutdown(); err != nil { + if err := vm1.Shutdown(context.Background()); err != nil { t.Fatal(err) } - if err := vm2.Shutdown(); err != nil { + if err := vm2.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() @@ -2465,7 +2677,7 @@ func TestAcceptReorg(t *testing.T) { key := testKeys[0].ToECDSA() address := testEthAddrs[0] - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + importTx, err := vm1.newImportTx(vm1.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -2476,12 +2688,12 @@ func TestAcceptReorg(t *testing.T) { <-issuer1 - vm1BlkA, err := vm1.BuildBlock() + vm1BlkA, err := vm1.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } - if err := vm1BlkA.Verify(); err != nil { + if err := vm1BlkA.Verify(context.Background()); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } @@ -2489,28 +2701,28 @@ func TestAcceptReorg(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm1.SetPreference(vm1BlkA.ID()); err != nil { + if err := vm1.SetPreference(context.Background(), vm1BlkA.ID()); err != nil { t.Fatal(err) } - vm2BlkA, err := vm2.ParseBlock(vm1BlkA.Bytes()) + vm2BlkA, err := vm2.ParseBlock(context.Background(), vm1BlkA.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } - if err := vm2BlkA.Verify(); err != nil { + if err := vm2BlkA.Verify(context.Background()); err != nil { t.Fatalf("Block failed verification on VM2: %s", err) } if status := vm2BlkA.Status(); status != choices.Processing { t.Fatalf("Expected status of block on VM2 to be %s, but found %s", choices.Processing, status) } - if err := vm2.SetPreference(vm2BlkA.ID()); err != nil { + if err := vm2.SetPreference(context.Background(), vm2BlkA.ID()); err != nil { t.Fatal(err) } - if err := vm1BlkA.Accept(); err != nil { + if err := vm1BlkA.Accept(context.Background()); err != nil { t.Fatalf("VM1 failed to accept block: %s", err) } - if err := vm2BlkA.Accept(); err != nil { + if err := vm2BlkA.Accept(context.Background()); err != nil { t.Fatalf("VM2 failed to accept block: %s", err) } @@ -2546,12 +2758,12 @@ func TestAcceptReorg(t *testing.T) { <-issuer1 - vm1BlkB, err := vm1.BuildBlock() + vm1BlkB, err := vm1.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := vm1BlkB.Verify(); err != nil { + if err := vm1BlkB.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -2559,7 +2771,7 @@ func TestAcceptReorg(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm1.SetPreference(vm1BlkB.ID()); err != nil { + if err := vm1.SetPreference(context.Background(), vm1BlkB.ID()); err != nil { t.Fatal(err) } @@ -2572,16 +2784,16 @@ func TestAcceptReorg(t *testing.T) { <-issuer2 - vm2BlkC, err := vm2.BuildBlock() + vm2BlkC, err := vm2.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build BlkC on VM2: %s", err) } - if err := vm2BlkC.Verify(); err != nil { + if err := vm2BlkC.Verify(context.Background()); err != nil { t.Fatalf("BlkC failed verification on VM2: %s", err) } - if err := vm2.SetPreference(vm2BlkC.ID()); err != nil { + if err := vm2.SetPreference(context.Background(), vm2BlkC.ID()); err != nil { t.Fatal(err) } @@ -2599,26 +2811,26 @@ func TestAcceptReorg(t *testing.T) { <-issuer2 - vm2BlkD, err := vm2.BuildBlock() + vm2BlkD, err := vm2.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build BlkD on VM2: %s", err) } // Parse blocks produced in vm2 - vm1BlkC, err := vm1.ParseBlock(vm2BlkC.Bytes()) + vm1BlkC, err := vm1.ParseBlock(context.Background(), vm2BlkC.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } - vm1BlkD, err := vm1.ParseBlock(vm2BlkD.Bytes()) + vm1BlkD, err := vm1.ParseBlock(context.Background(), vm2BlkD.Bytes()) if err != nil { t.Fatalf("Unexpected error parsing block from vm2: %s", err) } - if err := vm1BlkC.Verify(); err != nil { + if err := vm1BlkC.Verify(context.Background()); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } - if err := vm1BlkD.Verify(); err != nil { + if err := vm1BlkD.Verify(context.Background()); err != nil { t.Fatalf("Block failed verification on VM1: %s", err) } @@ -2627,7 +2839,7 @@ func TestAcceptReorg(t *testing.T) { t.Fatalf("expected current block to have hash %s but got %s", blkBHash.Hex(), b.Hash().Hex()) } - if err := vm1BlkC.Accept(); err != nil { + if err := vm1BlkC.Accept(context.Background()); err != nil { t.Fatal(err) } @@ -2635,11 +2847,11 @@ func TestAcceptReorg(t *testing.T) { if b := vm1.blockChain.CurrentBlock(); b.Hash() != blkCHash { t.Fatalf("expected current block to have hash %s but got %s", blkCHash.Hex(), b.Hash().Hex()) } - if err := vm1BlkB.Reject(); err != nil { + if err := vm1BlkB.Reject(context.Background()); err != nil { t.Fatal(err) } - if err := vm1BlkD.Accept(); err != nil { + if err := vm1BlkD.Accept(context.Background()); err != nil { t.Fatal(err) } blkDHash := vm1BlkD.(*chain.BlockWrapper).Block.(*Block).ethBlock.Hash() @@ -2655,12 +2867,12 @@ func TestFutureBlock(t *testing.T) { }) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -2671,7 +2883,7 @@ func TestFutureBlock(t *testing.T) { <-issuer - blkA, err := vm.BuildBlock() + blkA, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } @@ -2699,7 +2911,7 @@ func TestFutureBlock(t *testing.T) { t.Fatal(err) } - if err := futureBlock.Verify(); err == nil { + if err := futureBlock.Verify(context.Background()); err == nil { t.Fatal("Future block should have failed verification due to block timestamp too far in the future") } else if !strings.Contains(err.Error(), "block timestamp is too far in the future") { t.Fatalf("Expected error to be block timestamp too far in the future but found %s", err) @@ -2714,7 +2926,7 @@ func TestBuildApricotPhase1Block(t *testing.T) { testShortIDAddrs[0]: importAmount, }) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() @@ -2725,7 +2937,7 @@ func TestBuildApricotPhase1Block(t *testing.T) { key := testKeys[0].ToECDSA() address := testEthAddrs[0] - importTx, err := vm.newImportTx(vm.ctx.XChainID, address, initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -2736,12 +2948,12 @@ func TestBuildApricotPhase1Block(t *testing.T) { <-issuer - blk, err := vm.BuildBlock() + blk, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := blk.Verify(); err != nil { + if err := blk.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -2749,11 +2961,11 @@ func TestBuildApricotPhase1Block(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm.SetPreference(blk.ID()); err != nil { + if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { t.Fatal(err) } - if err := blk.Accept(); err != nil { + if err := blk.Accept(context.Background()); err != nil { t.Fatal(err) } @@ -2788,12 +3000,12 @@ func TestBuildApricotPhase1Block(t *testing.T) { <-issuer - blk, err = vm.BuildBlock() + blk, err = vm.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := blk.Verify(); err != nil { + if err := blk.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -2801,7 +3013,7 @@ func TestBuildApricotPhase1Block(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := blk.Accept(); err != nil { + if err := blk.Accept(context.Background()); err != nil { t.Fatal(err) } @@ -2809,7 +3021,7 @@ func TestBuildApricotPhase1Block(t *testing.T) { t.Fatalf("Expected status of accepted block to be %s, but found %s", choices.Accepted, status) } - lastAcceptedID, err := vm.LastAccepted() + lastAcceptedID, err := vm.LastAccepted(context.Background()) if err != nil { t.Fatal(err) } @@ -2836,12 +3048,12 @@ func TestLastAcceptedBlockNumberAllow(t *testing.T) { }) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -2852,12 +3064,12 @@ func TestLastAcceptedBlockNumberAllow(t *testing.T) { <-issuer - blk, err := vm.BuildBlock() + blk, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatalf("Failed to build block with import transaction: %s", err) } - if err := blk.Verify(); err != nil { + if err := blk.Verify(context.Background()); err != nil { t.Fatalf("Block failed verification on VM: %s", err) } @@ -2865,7 +3077,7 @@ func TestLastAcceptedBlockNumberAllow(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm.SetPreference(blk.ID()); err != nil { + if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { t.Fatal(err) } @@ -2890,7 +3102,7 @@ func TestLastAcceptedBlockNumberAllow(t *testing.T) { t.Fatalf("expected ErrUnfinalizedData but got %s", err.Error()) } - if err := blk.Accept(); err != nil { + if err := blk.Accept(context.Background()); err != nil { t.Fatalf("VM failed to accept block: %s", err) } @@ -2909,17 +3121,17 @@ func TestReissueAtomicTx(t *testing.T) { }) defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() - genesisBlkID, err := vm.LastAccepted() + genesisBlkID, err := vm.LastAccepted(context.Background()) if err != nil { t.Fatal(err) } - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -2930,7 +3142,7 @@ func TestReissueAtomicTx(t *testing.T) { <-issuer - blkA, err := vm.BuildBlock() + blkA, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } @@ -2939,23 +3151,23 @@ func TestReissueAtomicTx(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := blkA.Verify(); err != nil { + if err := blkA.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := vm.SetPreference(blkA.ID()); err != nil { + if err := vm.SetPreference(context.Background(), blkA.ID()); err != nil { t.Fatal(err) } // SetPreference to parent before rejecting (will rollback state to genesis // so that atomic transaction can be reissued, otherwise current block will // conflict with UTXO to be reissued) - if err := vm.SetPreference(genesisBlkID); err != nil { + if err := vm.SetPreference(context.Background(), genesisBlkID); err != nil { t.Fatal(err) } // Rejecting [blkA] should cause [importTx] to be re-issued into the mempool. - if err := blkA.Reject(); err != nil { + if err := blkA.Reject(context.Background()); err != nil { t.Fatal(err) } @@ -2964,7 +3176,7 @@ func TestReissueAtomicTx(t *testing.T) { // as Rejected. time.Sleep(2 * time.Second) <-issuer - blkB, err := vm.BuildBlock() + blkB, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } @@ -2979,7 +3191,7 @@ func TestReissueAtomicTx(t *testing.T) { t.Fatalf("Expected status of blkB to be %s, but found %s", choices.Processing, status) } - if err := blkB.Verify(); err != nil { + if err := blkB.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -2987,11 +3199,11 @@ func TestReissueAtomicTx(t *testing.T) { t.Fatalf("Expected status of blkC to be %s, but found %s", choices.Processing, status) } - if err := vm.SetPreference(blkB.ID()); err != nil { + if err := vm.SetPreference(context.Background(), blkB.ID()); err != nil { t.Fatal(err) } - if err := blkB.Accept(); err != nil { + if err := blkB.Accept(context.Background()); err != nil { t.Fatal(err) } @@ -2999,7 +3211,7 @@ func TestReissueAtomicTx(t *testing.T) { t.Fatalf("Expected status of accepted block to be %s, but found %s", choices.Accepted, status) } - if lastAcceptedID, err := vm.LastAccepted(); err != nil { + if lastAcceptedID, err := vm.LastAccepted(context.Background()); err != nil { t.Fatal(err) } else if lastAcceptedID != blkB.ID() { t.Fatalf("Expected last accepted blockID to be the accepted block: %s, but found %s", blkB.ID(), lastAcceptedID) @@ -3018,7 +3230,7 @@ func TestAtomicTxFailsEVMStateTransferBuildBlock(t *testing.T) { issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase1, "", "") defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() @@ -3030,15 +3242,15 @@ func TestAtomicTxFailsEVMStateTransferBuildBlock(t *testing.T) { t.Fatal(err) } <-issuer - exportBlk1, err := vm.BuildBlock() + exportBlk1, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := exportBlk1.Verify(); err != nil { + if err := exportBlk1.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := vm.SetPreference(exportBlk1.ID()); err != nil { + if err := vm.SetPreference(context.Background(), exportBlk1.ID()); err != nil { t.Fatal(err) } @@ -3056,7 +3268,7 @@ func TestAtomicTxFailsEVMStateTransferBuildBlock(t *testing.T) { } <-issuer - _, err = vm.BuildBlock() + _, err = vm.BuildBlock(context.Background()) if err == nil { t.Fatal("BuildBlock should have returned an error due to invalid export transaction") } @@ -3066,7 +3278,7 @@ func TestBuildInvalidBlockHead(t *testing.T) { issuer, vm, _, _, _ := GenesisVM(t, true, genesisJSONApricotPhase0, "", "") defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() @@ -3097,7 +3309,7 @@ func TestBuildInvalidBlockHead(t *testing.T) { SourceChain: vm.ctx.XChainID, } tx := &Tx{UnsignedAtomicTx: utx} - if err := tx.Sign(vm.codec, [][]*crypto.PrivateKeySECP256K1R{{key0}}); err != nil { + if err := tx.Sign(vm.codec, [][]*secp256k1.PrivateKey{{key0}}); err != nil { t.Fatal(err) } @@ -3115,7 +3327,7 @@ func TestBuildInvalidBlockHead(t *testing.T) { <-issuer - if _, err := vm.BuildBlock(); err == nil { + if _, err := vm.BuildBlock(context.Background()); err == nil { t.Fatalf("Unexpectedly created a block") } @@ -3154,8 +3366,9 @@ func TestConfigureLogLevel(t *testing.T) { ctx, dbManager, genesisBytes, issuer, _ := setupGenesis(t, test.genesisJSON) appSender := &engCommon.SenderTest{T: t} appSender.CantSendAppGossip = true - appSender.SendAppGossipF = func([]byte) error { return nil } + appSender.SendAppGossipF = func(context.Context, []byte) error { return nil } err := vm.Initialize( + context.Background(), ctx, dbManager, genesisBytes, @@ -3179,7 +3392,7 @@ func TestConfigureLogLevel(t *testing.T) { if err == nil { shutdownChan := make(chan error, 1) shutdownFunc := func() { - err := vm.Shutdown() + err := vm.Shutdown(context.Background()) shutdownChan <- err } go shutdownFunc() @@ -3205,7 +3418,7 @@ func TestBuildApricotPhase4Block(t *testing.T) { issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase4, "", "") defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() @@ -3247,7 +3460,7 @@ func TestBuildApricotPhase4Block(t *testing.T) { t.Fatal(err) } - importTx, err := vm.newImportTx(vm.ctx.XChainID, address, initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -3258,12 +3471,12 @@ func TestBuildApricotPhase4Block(t *testing.T) { <-issuer - blk, err := vm.BuildBlock() + blk, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := blk.Verify(); err != nil { + if err := blk.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -3271,11 +3484,11 @@ func TestBuildApricotPhase4Block(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm.SetPreference(blk.ID()); err != nil { + if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { t.Fatal(err) } - if err := blk.Accept(); err != nil { + if err := blk.Accept(context.Background()); err != nil { t.Fatal(err) } @@ -3325,12 +3538,12 @@ func TestBuildApricotPhase4Block(t *testing.T) { <-issuer - blk, err = vm.BuildBlock() + blk, err = vm.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := blk.Verify(); err != nil { + if err := blk.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -3338,7 +3551,7 @@ func TestBuildApricotPhase4Block(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := blk.Accept(); err != nil { + if err := blk.Accept(context.Background()); err != nil { t.Fatal(err) } @@ -3361,7 +3574,7 @@ func TestBuildApricotPhase4Block(t *testing.T) { t.Fatalf("Expected status of accepted block to be %s, but found %s", choices.Accepted, status) } - lastAcceptedID, err := vm.LastAccepted() + lastAcceptedID, err := vm.LastAccepted(context.Background()) if err != nil { t.Fatal(err) } @@ -3387,7 +3600,7 @@ func TestBuildApricotPhase5Block(t *testing.T) { issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase5, "", "") defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() @@ -3429,7 +3642,7 @@ func TestBuildApricotPhase5Block(t *testing.T) { t.Fatal(err) } - importTx, err := vm.newImportTx(vm.ctx.XChainID, address, initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, address, initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -3440,12 +3653,12 @@ func TestBuildApricotPhase5Block(t *testing.T) { <-issuer - blk, err := vm.BuildBlock() + blk, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := blk.Verify(); err != nil { + if err := blk.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -3453,11 +3666,11 @@ func TestBuildApricotPhase5Block(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm.SetPreference(blk.ID()); err != nil { + if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { t.Fatal(err) } - if err := blk.Accept(); err != nil { + if err := blk.Accept(context.Background()); err != nil { t.Fatal(err) } @@ -3499,12 +3712,12 @@ func TestBuildApricotPhase5Block(t *testing.T) { <-issuer - blk, err = vm.BuildBlock() + blk, err = vm.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := blk.Verify(); err != nil { + if err := blk.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -3512,7 +3725,7 @@ func TestBuildApricotPhase5Block(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := blk.Accept(); err != nil { + if err := blk.Accept(context.Background()); err != nil { t.Fatal(err) } @@ -3535,7 +3748,7 @@ func TestBuildApricotPhase5Block(t *testing.T) { t.Fatalf("Expected status of accepted block to be %s, but found %s", choices.Accepted, status) } - lastAcceptedID, err := vm.LastAccepted() + lastAcceptedID, err := vm.LastAccepted(context.Background()) if err != nil { t.Fatal(err) } @@ -3562,7 +3775,7 @@ func TestConsecutiveAtomicTransactionsRevertSnapshot(t *testing.T) { issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase1, "", "") defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() @@ -3580,12 +3793,12 @@ func TestConsecutiveAtomicTransactionsRevertSnapshot(t *testing.T) { <-issuer - blk, err := vm.BuildBlock() + blk, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := blk.Verify(); err != nil { + if err := blk.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -3593,11 +3806,11 @@ func TestConsecutiveAtomicTransactionsRevertSnapshot(t *testing.T) { t.Fatalf("Expected status of built block to be %s, but found %s", choices.Processing, status) } - if err := vm.SetPreference(blk.ID()); err != nil { + if err := vm.SetPreference(context.Background(), blk.ID()); err != nil { t.Fatal(err) } - if err := blk.Accept(); err != nil { + if err := blk.Accept(context.Background()); err != nil { t.Fatal(err) } @@ -3611,7 +3824,7 @@ func TestConsecutiveAtomicTransactionsRevertSnapshot(t *testing.T) { vm.mempool.AddTx(importTxs[1]) vm.mempool.AddTx(importTxs[2]) - if _, err := vm.BuildBlock(); err == nil { + if _, err := vm.BuildBlock(context.Background()); err == nil { t.Fatal("Expected build block to fail due to empty block") } } @@ -3628,15 +3841,15 @@ func TestAtomicTxBuildBlockDropsConflicts(t *testing.T) { t.Fatal(err) } defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() // Create a conflict set for each pair of transactions - conflictSets := make([]ids.Set, len(testKeys)) + conflictSets := make([]set.Set[ids.ID], len(testKeys)) for index, key := range testKeys { - importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[index], initialBaseFee, []*crypto.PrivateKeySECP256K1R{key}) + importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[index], initialBaseFee, []*secp256k1.PrivateKey{key}) if err != nil { t.Fatal(err) } @@ -3644,7 +3857,7 @@ func TestAtomicTxBuildBlockDropsConflicts(t *testing.T) { t.Fatal(err) } conflictSets[index].Add(importTx.ID()) - conflictTx, err := vm.newImportTx(vm.ctx.XChainID, conflictKey.Address, initialBaseFee, []*crypto.PrivateKeySECP256K1R{key}) + conflictTx, err := vm.newImportTx(vm.ctx.XChainID, conflictKey.Address, initialBaseFee, []*secp256k1.PrivateKey{key}) if err != nil { t.Fatal(err) } @@ -3658,13 +3871,13 @@ func TestAtomicTxBuildBlockDropsConflicts(t *testing.T) { <-issuer // Note: this only checks the path through OnFinalizeAndAssemble, we should make sure to add a test // that verifies blocks received from the network will also fail verification - blk, err := vm.BuildBlock() + blk, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } atomicTxs := blk.(*chain.BlockWrapper).Block.(*Block).atomicTxs assert.True(t, len(atomicTxs) == len(testKeys), "Conflict transactions should be out of the batch") - atomicTxIDs := ids.Set{} + atomicTxIDs := set.Set[ids.ID]{} for _, tx := range atomicTxs { atomicTxIDs.Add(tx.ID()) } @@ -3677,10 +3890,10 @@ func TestAtomicTxBuildBlockDropsConflicts(t *testing.T) { assert.Equal(t, 1, conflictSet.Len()) } - if err := blk.Verify(); err != nil { + if err := blk.Verify(context.Background()); err != nil { t.Fatal(err) } - if err := blk.Accept(); err != nil { + if err := blk.Accept(context.Background()); err != nil { t.Fatal(err) } } @@ -3690,7 +3903,7 @@ func TestBuildBlockDoesNotExceedAtomicGasLimit(t *testing.T) { issuer, vm, _, sharedMemory, _ := GenesisVM(t, true, genesisJSONApricotPhase5, "", "") defer func() { - if err := vm.Shutdown(); err != nil { + if err := vm.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() @@ -3715,7 +3928,7 @@ func TestBuildBlockDoesNotExceedAtomicGasLimit(t *testing.T) { } <-issuer - blk, err := vm.BuildBlock() + blk, err := vm.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } @@ -3739,10 +3952,10 @@ func TestExtraStateChangeAtomicGasLimitExceeded(t *testing.T) { _, vm2, _, sharedMemory2, _ := GenesisVM(t, true, genesisJSONApricotPhase5, "", "") defer func() { - if err := vm1.Shutdown(); err != nil { + if err := vm1.Shutdown(context.Background()); err != nil { t.Fatal(err) } - if err := vm2.Shutdown(); err != nil { + if err := vm2.Shutdown(context.Background()); err != nil { t.Fatal(err) } }() @@ -3764,7 +3977,7 @@ func TestExtraStateChangeAtomicGasLimitExceeded(t *testing.T) { // Double the initial base fee used when estimating the cost of this transaction to ensure that when it is // used in ApricotPhase5 it still pays a sufficient fee with the fixed fee per atomic transaction. - importTx, err := vm1.newImportTx(vm1.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(common.Big2, initialBaseFee), []*crypto.PrivateKeySECP256K1R{testKeys[0]}) + importTx, err := vm1.newImportTx(vm1.ctx.XChainID, testEthAddrs[0], new(big.Int).Mul(common.Big2, initialBaseFee), []*secp256k1.PrivateKey{testKeys[0]}) if err != nil { t.Fatal(err) } @@ -3773,11 +3986,11 @@ func TestExtraStateChangeAtomicGasLimitExceeded(t *testing.T) { } <-issuer - blk1, err := vm1.BuildBlock() + blk1, err := vm1.BuildBlock(context.Background()) if err != nil { t.Fatal(err) } - if err := blk1.Verify(); err != nil { + if err := blk1.Verify(context.Background()); err != nil { t.Fatal(err) } @@ -3816,3 +4029,47 @@ func TestGetAtomicRepositoryRepairHeights(t *testing.T) { sorted := sort.SliceIsSorted(mainnetHeights, func(i, j int) bool { return mainnetHeights[i] < mainnetHeights[j] }) assert.True(t, sorted) } + +func TestSkipChainConfigCheckCompatible(t *testing.T) { + // Hack: registering metrics uses global variables, so we need to disable metrics here so that we can initialize the VM twice. + metrics.Enabled = false + defer func() { metrics.Enabled = true }() + + importAmount := uint64(50000000) + issuer, vm, dbManager, _, appSender := GenesisVMWithUTXOs(t, true, genesisJSONApricotPhase1, "", "", map[ids.ShortID]uint64{ + testShortIDAddrs[0]: importAmount, + }) + defer func() { require.NoError(t, vm.Shutdown(context.Background())) }() + + // Since rewinding is permitted for last accepted height of 0, we must + // accept one block to test the SkipUpgradeCheck functionality. + importTx, err := vm.newImportTx(vm.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*secp256k1.PrivateKey{testKeys[0]}) + require.NoError(t, err) + require.NoError(t, vm.issueTx(importTx, true /*=local*/)) + <-issuer + + blk, err := vm.BuildBlock(context.Background()) + require.NoError(t, err) + require.NoError(t, blk.Verify(context.Background())) + require.NoError(t, vm.SetPreference(context.Background(), blk.ID())) + require.NoError(t, blk.Accept(context.Background())) + + reinitVM := &VM{} + // use the block's timestamp instead of 0 since rewind to genesis + // is hardcoded to be allowed in core/genesis.go. + genesisWithUpgrade := &core.Genesis{} + require.NoError(t, json.Unmarshal([]byte(genesisJSONApricotPhase1), genesisWithUpgrade)) + genesisWithUpgrade.Config.ApricotPhase2BlockTimestamp = big.NewInt(blk.Timestamp().Unix()) + genesisWithUpgradeBytes, err := json.Marshal(genesisWithUpgrade) + require.NoError(t, err) + + // this will not be allowed + err = reinitVM.Initialize(context.Background(), vm.ctx, dbManager, genesisWithUpgradeBytes, []byte{}, []byte{}, issuer, []*engCommon.Fx{}, appSender) + require.ErrorContains(t, err, "mismatching ApricotPhase2 fork block timestamp in database") + + // try again with skip-upgrade-check + config := []byte("{\"skip-upgrade-check\": true}") + err = reinitVM.Initialize(context.Background(), vm.ctx, dbManager, genesisWithUpgradeBytes, []byte{}, config, issuer, []*engCommon.Fx{}, appSender) + require.NoError(t, err) + require.NoError(t, reinitVM.Shutdown(context.Background())) +} diff --git a/coreth/plugin/main.go b/coreth/plugin/main.go index bfd6c965..4080476d 100644 --- a/coreth/plugin/main.go +++ b/coreth/plugin/main.go @@ -4,6 +4,7 @@ package main import ( + "context" "fmt" "os" @@ -29,5 +30,5 @@ func main() { os.Exit(1) } - rpcchainvm.Serve(&evm.VM{IsPlugin: true}) + rpcchainvm.Serve(context.Background(), &evm.VM{IsPlugin: true}) } diff --git a/coreth/rpc/client.go b/coreth/rpc/client.go index 5407bb5b..68a68390 100644 --- a/coreth/rpc/client.go +++ b/coreth/rpc/client.go @@ -41,6 +41,7 @@ import ( ) var ( + ErrBadResult = errors.New("bad result in JSON-RPC response") ErrClientQuit = errors.New("client is closed") ErrNoResult = errors.New("no result in JSON-RPC response") ErrSubscriptionQueueOverflow = errors.New("subscription queue overflow") @@ -109,7 +110,7 @@ type Client struct { reqTimeout chan *requestOp // removes response IDs when call timeout expires } -type reconnectFunc func(ctx context.Context) (ServerCodec, error) +type reconnectFunc func(context.Context) (ServerCodec, error) type clientContextKey struct{} @@ -168,14 +169,16 @@ func (op *requestOp) wait(ctx context.Context, c *Client) (*jsonrpcMessage, erro // // The currently supported URL schemes are "http", "https", "ws" and "wss". If rawurl is a // file name with no URL scheme, a local socket connection is established using UNIX -// domain sockets on supported platforms and named pipes on Windows. If you want to -// configure transport options, use DialHTTP, DialWebsocket or DialIPC instead. +// domain sockets on supported platforms and named pipes on Windows. +// +// If you want to further configure the transport, use DialOptions instead of this +// function. // // For websocket connections, the origin is set to the local host name. // -// The client reconnects automatically if the connection is lost. +// The client reconnects automatically when the connection is lost. func Dial(rawurl string) (*Client, error) { - return DialContext(context.Background(), rawurl) + return DialOptions(context.Background(), rawurl) } // DialContext creates a new RPC client, just like Dial. @@ -183,22 +186,46 @@ func Dial(rawurl string) (*Client, error) { // The context is used to cancel or time out the initial connection establishment. It does // not affect subsequent interactions with the client. func DialContext(ctx context.Context, rawurl string) (*Client, error) { + return DialOptions(ctx, rawurl) +} + +// DialOptions creates a new RPC client for the given URL. You can supply any of the +// pre-defined client options to configure the underlying transport. +// +// The context is used to cancel or time out the initial connection establishment. It does +// not affect subsequent interactions with the client. +// +// The client reconnects automatically when the connection is lost. +func DialOptions(ctx context.Context, rawurl string, options ...ClientOption) (*Client, error) { u, err := url.Parse(rawurl) if err != nil { return nil, err } + + cfg := new(clientConfig) + for _, opt := range options { + opt.applyOption(cfg) + } + + var reconnect reconnectFunc switch u.Scheme { case "http", "https": - return DialHTTP(rawurl) + reconnect = newClientTransportHTTP(rawurl, cfg) case "ws", "wss": - return DialWebsocket(ctx, rawurl, "") + rc, err := newClientTransportWS(rawurl, cfg) + if err != nil { + return nil, err + } + reconnect = rc //case "stdio": - // return DialStdIO(ctx) + //reconnect = newClientTransportIO(os.Stdin, os.Stdout) //case "": - // return DialIPC(ctx, rawurl) + //reconnect = newClientTransportIPC(rawurl) default: return nil, fmt.Errorf("no known transport for URL scheme %q", u.Scheme) } + + return newClient(ctx, reconnect) } // ClientFromContext retrieves the client from the context, if any. This can be used to perform diff --git a/coreth/rpc/client_opt.go b/coreth/rpc/client_opt.go new file mode 100644 index 00000000..c1b99312 --- /dev/null +++ b/coreth/rpc/client_opt.go @@ -0,0 +1,116 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +// ********** +// Copyright 2022 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package rpc + +import ( + "net/http" + + "github.com/gorilla/websocket" +) + +// ClientOption is a configuration option for the RPC client. +type ClientOption interface { + applyOption(*clientConfig) +} + +type clientConfig struct { + httpClient *http.Client + httpHeaders http.Header + httpAuth HTTPAuth + + wsDialer *websocket.Dialer +} + +func (cfg *clientConfig) initHeaders() { + if cfg.httpHeaders == nil { + cfg.httpHeaders = make(http.Header) + } +} + +func (cfg *clientConfig) setHeader(key, value string) { + cfg.initHeaders() + cfg.httpHeaders.Set(key, value) +} + +type optionFunc func(*clientConfig) + +func (fn optionFunc) applyOption(opt *clientConfig) { + fn(opt) +} + +// WithWebsocketDialer configures the websocket.Dialer used by the RPC client. +func WithWebsocketDialer(dialer websocket.Dialer) ClientOption { + return optionFunc(func(cfg *clientConfig) { + cfg.wsDialer = &dialer + }) +} + +// WithHeader configures HTTP headers set by the RPC client. Headers set using this option +// will be used for both HTTP and WebSocket connections. +func WithHeader(key, value string) ClientOption { + return optionFunc(func(cfg *clientConfig) { + cfg.initHeaders() + cfg.httpHeaders.Set(key, value) + }) +} + +// WithHeaders configures HTTP headers set by the RPC client. Headers set using this +// option will be used for both HTTP and WebSocket connections. +func WithHeaders(headers http.Header) ClientOption { + return optionFunc(func(cfg *clientConfig) { + cfg.initHeaders() + for k, vs := range headers { + cfg.httpHeaders[k] = vs + } + }) +} + +// WithHTTPClient configures the http.Client used by the RPC client. +func WithHTTPClient(c *http.Client) ClientOption { + return optionFunc(func(cfg *clientConfig) { + cfg.httpClient = c + }) +} + +// WithHTTPAuth configures HTTP request authentication. The given provider will be called +// whenever a request is made. Note that only one authentication provider can be active at +// any time. +func WithHTTPAuth(a HTTPAuth) ClientOption { + if a == nil { + panic("nil auth") + } + return optionFunc(func(cfg *clientConfig) { + cfg.httpAuth = a + }) +} + +// A HTTPAuth function is called by the client whenever a HTTP request is sent. +// The function must be safe for concurrent use. +// +// Usually, HTTPAuth functions will call h.Set("authorization", "...") to add +// auth information to the request. +type HTTPAuth func(h http.Header) error diff --git a/coreth/rpc/client_opt_test.go b/coreth/rpc/client_opt_test.go new file mode 100644 index 00000000..272d73cb --- /dev/null +++ b/coreth/rpc/client_opt_test.go @@ -0,0 +1,34 @@ +// (c) 2023, Ava Labs, Inc. +// +// This file is a derived work, based on the go-ethereum library whose original +// notices appear below. +// +// It is distributed under a license compatible with the licensing terms of the +// original code from which it is derived. +// +// Much love to the original authors for their work. +package rpc_test + +import ( + "context" + "net/http" + "time" + + "github.com/ava-labs/coreth/rpc" +) + +// This example configures a HTTP-based RPC client with two options - one setting the +// overall request timeout, the other adding a custom HTTP header to all requests. +func ExampleDialOptions() { + tokenHeader := rpc.WithHeader("x-token", "foo") + httpClient := rpc.WithHTTPClient(&http.Client{ + Timeout: 10 * time.Second, + }) + + ctx := context.Background() + c, err := rpc.DialOptions(ctx, "http://rpc.example.com", httpClient, tokenHeader) + if err != nil { + panic(err) + } + c.Close() +} diff --git a/coreth/rpc/client_test.go b/coreth/rpc/client_test.go index 5dc92b9a..5bac4762 100644 --- a/coreth/rpc/client_test.go +++ b/coreth/rpc/client_test.go @@ -90,11 +90,15 @@ func TestClientErrorData(t *testing.T) { } // Check code. + // The method handler returns an error value which implements the rpc.Error + // interface, i.e. it has a custom error code. The server returns this error code. + expectedCode := testError{}.ErrorCode() if e, ok := err.(Error); !ok { t.Fatalf("client did not return rpc.Error, got %#v", e) - } else if e.ErrorCode() != (testError{}.ErrorCode()) { - t.Fatalf("wrong error code %d, want %d", e.ErrorCode(), testError{}.ErrorCode()) + } else if e.ErrorCode() != expectedCode { + t.Fatalf("wrong error code %d, want %d", e.ErrorCode(), expectedCode) } + // Check data. if e, ok := err.(DataError); !ok { t.Fatalf("client did not return rpc.DataError, got %#v", e) diff --git a/coreth/rpc/doc.go b/coreth/rpc/doc.go index 5df9d113..49240c6f 100644 --- a/coreth/rpc/doc.go +++ b/coreth/rpc/doc.go @@ -25,7 +25,6 @@ // along with the go-ethereum library. If not, see . /* - Package rpc implements bi-directional JSON-RPC 2.0 on multiple transports. It provides access to the exported methods of an object across a network or other I/O @@ -33,16 +32,16 @@ connection. After creating a server or client instance, objects can be registere them visible as 'services'. Exported methods that follow specific conventions can be called remotely. It also has support for the publish/subscribe pattern. -RPC Methods +# RPC Methods Methods that satisfy the following criteria are made available for remote access: - - method must be exported - - method returns 0, 1 (response or error) or 2 (response and error) values + - method must be exported + - method returns 0, 1 (response or error) or 2 (response and error) values An example method: - func (s *CalcService) Add(a, b int) (int, error) + func (s *CalcService) Add(a, b int) (int, error) When the returned error isn't nil the returned integer is ignored and the error is sent back to the client. Otherwise the returned integer is sent back to the client. @@ -51,7 +50,7 @@ Optional arguments are supported by accepting pointer values as arguments. E.g. to do the addition in an optional finite field we can accept a mod argument as pointer value. - func (s *CalcService) Add(a, b int, mod *int) (int, error) + func (s *CalcService) Add(a, b int, mod *int) (int, error) This RPC method can be called with 2 integers and a null value as third argument. In that case the mod argument will be nil. Or it can be called with 3 integers, in that case mod @@ -66,40 +65,40 @@ to the client out of order. An example server which uses the JSON codec: - type CalculatorService struct {} + type CalculatorService struct {} - func (s *CalculatorService) Add(a, b int) int { - return a + b - } + func (s *CalculatorService) Add(a, b int) int { + return a + b + } - func (s *CalculatorService) Div(a, b int) (int, error) { - if b == 0 { - return 0, errors.New("divide by zero") - } - return a/b, nil - } + func (s *CalculatorService) Div(a, b int) (int, error) { + if b == 0 { + return 0, errors.New("divide by zero") + } + return a/b, nil + } - calculator := new(CalculatorService) - server := NewServer() - server.RegisterName("calculator", calculator) - l, _ := net.ListenUnix("unix", &net.UnixAddr{Net: "unix", Name: "/tmp/calculator.sock"}) - server.ServeListener(l) + calculator := new(CalculatorService) + server := NewServer() + server.RegisterName("calculator", calculator) + l, _ := net.ListenUnix("unix", &net.UnixAddr{Net: "unix", Name: "/tmp/calculator.sock"}) + server.ServeListener(l) -Subscriptions +# Subscriptions The package also supports the publish subscribe pattern through the use of subscriptions. A method that is considered eligible for notifications must satisfy the following criteria: - - method must be exported - - first method argument type must be context.Context - - method must have return types (rpc.Subscription, error) + - method must be exported + - first method argument type must be context.Context + - method must have return types (rpc.Subscription, error) An example method: - func (s *BlockChainService) NewBlocks(ctx context.Context) (rpc.Subscription, error) { - ... - } + func (s *BlockChainService) NewBlocks(ctx context.Context) (rpc.Subscription, error) { + ... + } When the service containing the subscription method is registered to the server, for example under the "blockchain" namespace, a subscription is created by calling the @@ -111,7 +110,7 @@ the client and server. The server will close the connection for any write error. For more information about subscriptions, see https://github.com/ethereum/go-ethereum/wiki/RPC-PUB-SUB. -Reverse Calls +# Reverse Calls In any method handler, an instance of rpc.Client can be accessed through the ClientFromContext method. Using this client instance, server-to-client method calls can be diff --git a/coreth/rpc/errors.go b/coreth/rpc/errors.go index a43fa9bd..7db25ffa 100644 --- a/coreth/rpc/errors.go +++ b/coreth/rpc/errors.go @@ -64,9 +64,15 @@ var ( _ Error = new(invalidRequestError) _ Error = new(invalidMessageError) _ Error = new(invalidParamsError) + _ Error = new(internalServerError) ) -const defaultErrorCode = -32000 +const ( + errcodeDefault = -32000 + errcodeNotificationsUnsupported = -32001 + errcodePanic = -32603 + errcodeMarshalError = -32603 +) type methodNotFoundError struct{ method string } @@ -111,3 +117,13 @@ type invalidParamsError struct{ message string } func (e *invalidParamsError) ErrorCode() int { return -32602 } func (e *invalidParamsError) Error() string { return e.message } + +// internalServerError is used for server errors during request processing. +type internalServerError struct { + code int + message string +} + +func (e *internalServerError) ErrorCode() int { return e.code } + +func (e *internalServerError) Error() string { return e.message } diff --git a/coreth/rpc/handler.go b/coreth/rpc/handler.go index d45d4765..e43e6028 100644 --- a/coreth/rpc/handler.go +++ b/coreth/rpc/handler.go @@ -29,6 +29,7 @@ package rpc import ( "context" "encoding/json" + "os" "reflect" "strconv" "strings" @@ -40,27 +41,62 @@ import ( "golang.org/x/time/rate" ) +const ( + errcodeTimeout = -32002 + errcodeResponseTooLarge = -32003 +) + +const ( + errMsgTimeout = "request timed out" + errMsgResponseTooLarge = "response too large" + errMsgBatchTooLarge = "batch too large" +) + +var ( + batchRequestLimit = 0 // limit on total number of requests in a batch + batchResponseMaxSize = 0 // limit on the size of a batch response +) + +func init() { + // Read batchRequestLimit and batchResponseMaxSize from environment variables + // RPC_BATCH_REQUEST_LIMIT and RPC_BATCH_RESPONSE_MAX_SIZE. + // If their values are invalid integers, panic. + if batchRequestLimitStr := os.Getenv("RPC_BATCH_REQUEST_LIMIT"); batchRequestLimitStr != "" { + var err error + batchRequestLimit, err = strconv.Atoi(batchRequestLimitStr) + if err != nil || batchRequestLimit < 0 { + panic("RPC_BATCH_REQUEST_LIMIT must be a non-negative integer") + } + } + if batchResponseMaxSizeStr := os.Getenv("RPC_BATCH_RESPONSE_MAX_SIZE"); batchResponseMaxSizeStr != "" { + var err error + batchResponseMaxSize, err = strconv.Atoi(batchResponseMaxSizeStr) + if err != nil || batchResponseMaxSize < 0 { + panic("RPC_BATCH_RESPONSE_MAX_SIZE must be a non-negative integer") + } + } +} + // handler handles JSON-RPC messages. There is one handler per connection. Note that // handler is not safe for concurrent use. Message handling never blocks indefinitely // because RPCs are processed on background goroutines launched by handler. // // The entry points for incoming messages are: // -// h.handleMsg(message) -// h.handleBatch(message) +// h.handleMsg(message) +// h.handleBatch(message) // // Outgoing calls use the requestOp struct. Register the request before sending it // on the connection: // -// op := &requestOp{ids: ...} -// h.addRequestOp(op) +// op := &requestOp{ids: ...} +// h.addRequestOp(op) // // Now send the request, then wait for the reply to be delivered through handleMsg: // -// if err := op.wait(...); err != nil { -// h.removeRequestOp(op) // timeout, etc. -// } -// +// if err := op.wait(...); err != nil { +// h.removeRequestOp(op) // timeout, etc. +// } type handler struct { reg *serviceRegistry unsubscribeCb *callback @@ -109,6 +145,75 @@ func newHandler(connCtx context.Context, conn jsonWriter, idgen func() ID, reg * return h } +// batchCallBuffer manages in progress call messages and their responses during a batch +// call. Calls need to be synchronized between the processing and timeout-triggering +// goroutines. +type batchCallBuffer struct { + mutex sync.Mutex + calls []*jsonrpcMessage + resp []*jsonrpcMessage + wrote bool +} + +// nextCall returns the next unprocessed message. +func (b *batchCallBuffer) nextCall() *jsonrpcMessage { + b.mutex.Lock() + defer b.mutex.Unlock() + + if len(b.calls) == 0 { + return nil + } + // The popping happens in `pushAnswer`. The in progress call is kept + // so we can return an error for it in case of timeout. + msg := b.calls[0] + return msg +} + +// pushResponse adds the response to last call returned by nextCall. +func (b *batchCallBuffer) pushResponse(answer *jsonrpcMessage) { + b.mutex.Lock() + defer b.mutex.Unlock() + + if answer != nil { + b.resp = append(b.resp, answer) + } + b.calls = b.calls[1:] +} + +// write sends the responses. +func (b *batchCallBuffer) write(ctx context.Context, conn jsonWriter) { + b.mutex.Lock() + defer b.mutex.Unlock() + + b.doWrite(ctx, conn) +} + +// respondWithError sends the responses added so far. For the remaining unanswered call +// messages, it responds with the given error. +func (b *batchCallBuffer) respondWithError(ctx context.Context, conn jsonWriter, err error) { + b.mutex.Lock() + defer b.mutex.Unlock() + + for _, msg := range b.calls { + if !msg.isNotification() { + b.resp = append(b.resp, msg.errorResponse(err)) + } + } + b.doWrite(ctx, conn) +} + +// doWrite actually writes the response. +// This assumes b.mutex is held. +func (b *batchCallBuffer) doWrite(ctx context.Context, conn jsonWriter) { + if b.wrote { + return + } + b.wrote = true // can only write once + if len(b.resp) > 0 { + conn.writeJSONSkipDeadline(ctx, b.resp, true) + } +} + // addLimiter adds a rate limiter to the handler that will allow at most // [refillRate] cpu to be used per second. At most [maxStored] cpu time will be // stored for this limiter. @@ -130,6 +235,13 @@ func (h *handler) handleBatch(msgs []*jsonrpcMessage) { }) return } + // Apply limit on total number of requests. + if batchRequestLimit != 0 && len(msgs) > batchRequestLimit { + h.startCallProc(func(cp *callProc) { + h.respondWithBatchTooLarge(cp, msgs) + }) + return + } // Handle non-call messages first: calls := make([]*jsonrpcMessage, 0, len(msgs)) @@ -141,24 +253,76 @@ func (h *handler) handleBatch(msgs []*jsonrpcMessage) { if len(calls) == 0 { return } + // Process calls on a goroutine because they may block indefinitely: h.startCallProc(func(cp *callProc) { - answers := make([]*jsonrpcMessage, 0, len(msgs)) - for _, msg := range calls { - if answer := h.handleCallMsg(cp, msg); answer != nil { - answers = append(answers, answer) + var ( + timer *time.Timer + cancel context.CancelFunc + callBuffer = &batchCallBuffer{calls: calls, resp: make([]*jsonrpcMessage, 0, len(calls))} + ) + + cp.ctx, cancel = context.WithCancel(cp.ctx) + defer cancel() + + // Cancel the request context after timeout and send an error response. Since the + // currently-running method might not return immediately on timeout, we must wait + // for the timeout concurrently with processing the request. + if timeout, ok := ContextRequestTimeout(cp.ctx); ok { + timer = time.AfterFunc(timeout, func() { + cancel() + err := &internalServerError{errcodeTimeout, errMsgTimeout} + callBuffer.respondWithError(cp.ctx, h.conn, err) + }) + } + + responseBytes := 0 + for { + // No need to handle rest of calls if timed out. + if cp.ctx.Err() != nil { + break + } + msg := callBuffer.nextCall() + if msg == nil { + break + } + resp := h.handleCallMsg(cp, msg) + callBuffer.pushResponse(resp) + if resp != nil && batchResponseMaxSize != 0 { + responseBytes += len(resp.Result) + if responseBytes > batchResponseMaxSize { + err := &internalServerError{errcodeResponseTooLarge, errMsgResponseTooLarge} + callBuffer.respondWithError(cp.ctx, h.conn, err) + break + } } } - h.addSubscriptions(cp.notifiers) - if len(answers) > 0 { - h.conn.writeJSONSkipDeadline(cp.ctx, answers, h.deadlineContext > 0) + if timer != nil { + timer.Stop() } + + h.addSubscriptions(cp.notifiers) + callBuffer.write(cp.ctx, h.conn) for _, n := range cp.notifiers { n.activate() } }) } +func (h *handler) respondWithBatchTooLarge(cp *callProc, batch []*jsonrpcMessage) { + resp := errorMessage(&invalidRequestError{errMsgBatchTooLarge}) + // Find the first call and add its "id" field to the error. + // This is the best we can do, given that the protocol doesn't have a way + // of reporting an error for the entire batch. + for _, msg := range batch { + if msg.isCall() { + resp.ID = msg.ID + break + } + } + h.conn.writeJSONSkipDeadline(cp.ctx, []*jsonrpcMessage{resp}, h.deadlineContext > 0) +} + // handleMsg handles a single message. func (h *handler) handleMsg(msg *jsonrpcMessage) { if ok := h.handleImmediate(msg); ok { @@ -454,7 +618,10 @@ func (h *handler) handleCall(cp *callProc, msg *jsonrpcMessage) *jsonrpcMessage // handleSubscribe processes *_subscribe method calls. func (h *handler) handleSubscribe(cp *callProc, msg *jsonrpcMessage) *jsonrpcMessage { if !h.allowSubscribe { - return msg.errorResponse(ErrNotificationsUnsupported) + return msg.errorResponse(&internalServerError{ + code: errcodeNotificationsUnsupported, + message: ErrNotificationsUnsupported.Error(), + }) } // Subscription method name is first argument. diff --git a/coreth/rpc/http.go b/coreth/rpc/http.go index 528fd8be..a146f1b7 100644 --- a/coreth/rpc/http.go +++ b/coreth/rpc/http.go @@ -33,6 +33,7 @@ import ( "errors" "fmt" "io" + "math" "mime" "net/http" "net/url" @@ -55,6 +56,7 @@ type httpConn struct { closeCh chan interface{} mu sync.Mutex // protects headers headers http.Header + auth HTTPAuth } // httpConn implements ServerCodec, but it is treated specially by Client @@ -131,8 +133,15 @@ var DefaultHTTPTimeouts = HTTPTimeouts{ IdleTimeout: 120 * time.Second, } +// DialHTTP creates a new RPC client that connects to an RPC server over HTTP. +func DialHTTP(endpoint string) (*Client, error) { + return DialHTTPWithClient(endpoint, new(http.Client)) +} + // DialHTTPWithClient creates a new RPC client that connects to an RPC server over HTTP // using the provided HTTP Client. +// +// Deprecated: use DialOptions and the WithHTTPClient option. func DialHTTPWithClient(endpoint string, client *http.Client) (*Client, error) { // Sanity check URL so we don't end up with a client that will fail every request. _, err := url.Parse(endpoint) @@ -140,24 +149,35 @@ func DialHTTPWithClient(endpoint string, client *http.Client) (*Client, error) { return nil, err } - initctx := context.Background() - headers := make(http.Header, 2) + var cfg clientConfig + fn := newClientTransportHTTP(endpoint, &cfg) + return newClient(context.Background(), fn) +} + +func newClientTransportHTTP(endpoint string, cfg *clientConfig) reconnectFunc { + headers := make(http.Header, 2+len(cfg.httpHeaders)) headers.Set("accept", contentType) headers.Set("content-type", contentType) - return newClient(initctx, func(context.Context) (ServerCodec, error) { - hc := &httpConn{ - client: client, - headers: headers, - url: endpoint, - closeCh: make(chan interface{}), - } - return hc, nil - }) -} + for key, values := range cfg.httpHeaders { + headers[key] = values + } -// DialHTTP creates a new RPC client that connects to an RPC server over HTTP. -func DialHTTP(endpoint string) (*Client, error) { - return DialHTTPWithClient(endpoint, new(http.Client)) + client := cfg.httpClient + if client == nil { + client = new(http.Client) + } + + hc := &httpConn{ + client: client, + headers: headers, + url: endpoint, + auth: cfg.httpAuth, + closeCh: make(chan interface{}), + } + + return func(ctx context.Context) (ServerCodec, error) { + return hc, nil + } } func (c *Client) sendHTTP(ctx context.Context, op *requestOp, msg interface{}) error { @@ -187,6 +207,9 @@ func (c *Client) sendBatchHTTP(ctx context.Context, op *requestOp, msgs []*jsonr if err := json.NewDecoder(respBody).Decode(&respmsgs); err != nil { return err } + if len(respmsgs) != len(msgs) { + return fmt.Errorf("batch has %d requests but response has %d: %w", len(msgs), len(respmsgs), ErrBadResult) + } for i := 0; i < len(respmsgs); i++ { op.resp <- &respmsgs[i] } @@ -209,6 +232,11 @@ func (hc *httpConn) doRequest(ctx context.Context, msg interface{}) (io.ReadClos hc.mu.Lock() req.Header = hc.headers.Clone() hc.mu.Unlock() + if hc.auth != nil { + if err := hc.auth(req.Header); err != nil { + return nil, err + } + } // do request resp, err := hc.client.Do(req) @@ -311,3 +339,35 @@ func validateRequest(r *http.Request) (int, error) { err := fmt.Errorf("invalid content type, only %s is supported", contentType) return http.StatusUnsupportedMediaType, err } + +// ContextRequestTimeout returns the request timeout derived from the given context. +func ContextRequestTimeout(ctx context.Context) (time.Duration, bool) { + timeout := time.Duration(math.MaxInt64) + hasTimeout := false + setTimeout := func(d time.Duration) { + if d < timeout { + timeout = d + hasTimeout = true + } + } + + if deadline, ok := ctx.Deadline(); ok { + setTimeout(time.Until(deadline)) + } + + // If the context is an HTTP request context, use the server's WriteTimeout. + httpSrv, ok := ctx.Value(http.ServerContextKey).(*http.Server) + if ok && httpSrv.WriteTimeout > 0 { + wt := httpSrv.WriteTimeout + // When a write timeout is configured, we need to send the response message before + // the HTTP server cuts connection. So our internal timeout must be earlier than + // the server's true timeout. + // + // Note: Timeouts are sanitized to be a minimum of 1 second. + // Also see issue: https://github.com/golang/go/issues/47229 + wt -= 100 * time.Millisecond + setTimeout(wt) + } + + return timeout, hasTimeout +} diff --git a/coreth/rpc/json.go b/coreth/rpc/json.go index 7e1c6a7f..84b9fede 100644 --- a/coreth/rpc/json.go +++ b/coreth/rpc/json.go @@ -68,21 +68,25 @@ type jsonrpcMessage struct { } func (msg *jsonrpcMessage) isNotification() bool { - return msg.ID == nil && msg.Method != "" + return msg.hasValidVersion() && msg.ID == nil && msg.Method != "" } func (msg *jsonrpcMessage) isCall() bool { - return msg.hasValidID() && msg.Method != "" + return msg.hasValidVersion() && msg.hasValidID() && msg.Method != "" } func (msg *jsonrpcMessage) isResponse() bool { - return msg.hasValidID() && msg.Method == "" && msg.Params == nil && (msg.Result != nil || msg.Error != nil) + return msg.hasValidVersion() && msg.hasValidID() && msg.Method == "" && msg.Params == nil && (msg.Result != nil || msg.Error != nil) } func (msg *jsonrpcMessage) hasValidID() bool { return len(msg.ID) > 0 && msg.ID[0] != '{' && msg.ID[0] != '[' } +func (msg *jsonrpcMessage) hasValidVersion() bool { + return msg.Version == vsn +} + func (msg *jsonrpcMessage) isSubscribe() bool { return strings.HasSuffix(msg.Method, subscribeMethodSuffix) } @@ -110,15 +114,14 @@ func (msg *jsonrpcMessage) errorResponse(err error) *jsonrpcMessage { func (msg *jsonrpcMessage) response(result interface{}) *jsonrpcMessage { enc, err := json.Marshal(result) if err != nil { - // TODO: wrap with 'internal server error' - return msg.errorResponse(err) + return msg.errorResponse(&internalServerError{errcodeMarshalError, err.Error()}) } return &jsonrpcMessage{Version: vsn, ID: msg.ID, Result: enc} } func errorMessage(err error) *jsonrpcMessage { msg := &jsonrpcMessage{Version: vsn, ID: null, Error: &jsonError{ - Code: defaultErrorCode, + Code: errcodeDefault, Message: err.Error(), }} ec, ok := err.(Error) diff --git a/coreth/rpc/server_test.go b/coreth/rpc/server_test.go index a7da64e5..ac70eb1c 100644 --- a/coreth/rpc/server_test.go +++ b/coreth/rpc/server_test.go @@ -54,7 +54,7 @@ func TestServerRegisterName(t *testing.T) { t.Fatalf("Expected service calc to be registered") } - wantCallbacks := 10 + wantCallbacks := 12 if len(svc.callbacks) != wantCallbacks { t.Errorf("Expected %d callbacks for service 'service', got %d", wantCallbacks, len(svc.callbacks)) } diff --git a/coreth/rpc/service.go b/coreth/rpc/service.go index 86ec93fd..cbdb8e30 100644 --- a/coreth/rpc/service.go +++ b/coreth/rpc/service.go @@ -28,7 +28,6 @@ package rpc import ( "context" - "errors" "fmt" "reflect" "runtime" @@ -209,7 +208,7 @@ func (c *callback) call(ctx context.Context, method string, args []reflect.Value buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] log.Error("RPC method " + method + " crashed: " + fmt.Sprintf("%v\n%s", err, buf)) - errRes = errors.New("method handler crashed") + errRes = &internalServerError{errcodePanic, "method handler crashed"} } }() // Run the callback. diff --git a/coreth/rpc/subscription_test.go b/coreth/rpc/subscription_test.go index 72caeb85..d09815a6 100644 --- a/coreth/rpc/subscription_test.go +++ b/coreth/rpc/subscription_test.go @@ -89,7 +89,7 @@ func TestSubscriptions(t *testing.T) { request := map[string]interface{}{ "id": i, "method": fmt.Sprintf("%s_subscribe", namespace), - "version": "2.0", + "jsonrpc": "2.0", "params": []interface{}{"someSubscription", notificationCount, i}, } if err := out.Encode(&request); err != nil { diff --git a/coreth/rpc/testdata/internal-error.js b/coreth/rpc/testdata/internal-error.js new file mode 100644 index 00000000..2ba38740 --- /dev/null +++ b/coreth/rpc/testdata/internal-error.js @@ -0,0 +1,7 @@ +// These tests trigger various 'internal error' conditions. + +--> {"jsonrpc":"2.0","id":1,"method":"test_marshalError","params": []} +<-- {"jsonrpc":"2.0","id":1,"error":{"code":-32603,"message":"json: error calling MarshalText for type *rpc.MarshalErrObj: marshal error"}} + +--> {"jsonrpc":"2.0","id":2,"method":"test_panic","params": []} +<-- {"jsonrpc":"2.0","id":2,"error":{"code":-32603,"message":"method handler crashed"}} diff --git a/coreth/rpc/testdata/invalid-badversion.js b/coreth/rpc/testdata/invalid-badversion.js new file mode 100644 index 00000000..75b5291d --- /dev/null +++ b/coreth/rpc/testdata/invalid-badversion.js @@ -0,0 +1,19 @@ +// This test checks processing of messages with invalid Version. + +--> {"jsonrpc":"2.0","id":1,"method":"test_echo","params":["x", 3]} +<-- {"jsonrpc":"2.0","id":1,"result":{"String":"x","Int":3,"Args":null}} + +--> {"jsonrpc":"2.1","id":1,"method":"test_echo","params":["x", 3]} +<-- {"jsonrpc":"2.0","id":1,"error":{"code":-32600,"message":"invalid request"}} + +--> {"jsonrpc":"go-ethereum","id":1,"method":"test_echo","params":["x", 3]} +<-- {"jsonrpc":"2.0","id":1,"error":{"code":-32600,"message":"invalid request"}} + +--> {"jsonrpc":1,"id":1,"method":"test_echo","params":["x", 3]} +<-- {"jsonrpc":"2.0","id":1,"error":{"code":-32600,"message":"invalid request"}} + +--> {"jsonrpc":2.0,"id":1,"method":"test_echo","params":["x", 3]} +<-- {"jsonrpc":"2.0","id":1,"error":{"code":-32600,"message":"invalid request"}} + +--> {"id":1,"method":"test_echo","params":["x", 3]} +<-- {"jsonrpc":"2.0","id":1,"error":{"code":-32600,"message":"invalid request"}} diff --git a/coreth/rpc/testservice_test.go b/coreth/rpc/testservice_test.go index e3c8d74c..c8d0d36e 100644 --- a/coreth/rpc/testservice_test.go +++ b/coreth/rpc/testservice_test.go @@ -80,6 +80,12 @@ func (testError) Error() string { return "testError" } func (testError) ErrorCode() int { return 444 } func (testError) ErrorData() interface{} { return "testError data" } +type MarshalErrObj struct{} + +func (o *MarshalErrObj) MarshalText() ([]byte, error) { + return nil, errors.New("marshal error") +} + func (s *testService) NoArgsRets() {} func (s *testService) Echo(str string, i int, args *echoArgs) echoResult { @@ -124,6 +130,14 @@ func (s *testService) ReturnError() error { return testError{} } +func (s *testService) MarshalError() *MarshalErrObj { + return &MarshalErrObj{} +} + +func (s *testService) Panic() string { + panic("service panic") +} + func (s *testService) CallMeBack(ctx context.Context, method string, args []interface{}) (interface{}, error) { c, ok := ClientFromContext(ctx) if !ok { diff --git a/coreth/rpc/websocket.go b/coreth/rpc/websocket.go index 9e75a0b4..a2031315 100644 --- a/coreth/rpc/websocket.go +++ b/coreth/rpc/websocket.go @@ -195,24 +195,23 @@ func parseOriginURL(origin string) (string, string, string, error) { return scheme, hostname, port, nil } -// DialWebsocketWithDialer creates a new RPC client that communicates with a JSON-RPC server -// that is listening on the given endpoint using the provided dialer. +// DialWebsocketWithDialer creates a new RPC client using WebSocket. +// +// The context is used for the initial connection establishment. It does not +// affect subsequent interactions with the client. +// +// Deprecated: use DialOptions and the WithWebsocketDialer option. func DialWebsocketWithDialer(ctx context.Context, endpoint, origin string, dialer websocket.Dialer) (*Client, error) { - endpoint, header, err := wsClientHeaders(endpoint, origin) + cfg := new(clientConfig) + cfg.wsDialer = &dialer + if origin != "" { + cfg.setHeader("origin", origin) + } + connect, err := newClientTransportWS(endpoint, cfg) if err != nil { return nil, err } - return newClient(ctx, func(ctx context.Context) (ServerCodec, error) { - conn, resp, err := dialer.DialContext(ctx, endpoint, header) - if err != nil { - hErr := wsHandshakeError{err: err} - if resp != nil { - hErr.status = resp.Status - } - return nil, hErr - } - return newWebsocketCodec(conn, endpoint, header), nil - }) + return newClient(ctx, connect) } // DialWebsocket creates a new RPC client that communicates with a JSON-RPC server @@ -221,12 +220,53 @@ func DialWebsocketWithDialer(ctx context.Context, endpoint, origin string, diale // The context is used for the initial connection establishment. It does not // affect subsequent interactions with the client. func DialWebsocket(ctx context.Context, endpoint, origin string) (*Client, error) { - dialer := websocket.Dialer{ - ReadBufferSize: wsReadBuffer, - WriteBufferSize: wsWriteBuffer, - WriteBufferPool: wsBufferPool, + cfg := new(clientConfig) + if origin != "" { + cfg.setHeader("origin", origin) + } + connect, err := newClientTransportWS(endpoint, cfg) + if err != nil { + return nil, err + } + return newClient(ctx, connect) +} + +func newClientTransportWS(endpoint string, cfg *clientConfig) (reconnectFunc, error) { + dialer := cfg.wsDialer + if dialer == nil { + dialer = &websocket.Dialer{ + ReadBufferSize: wsReadBuffer, + WriteBufferSize: wsWriteBuffer, + WriteBufferPool: wsBufferPool, + } + } + + dialURL, header, err := wsClientHeaders(endpoint, "") + if err != nil { + return nil, err + } + for key, values := range cfg.httpHeaders { + header[key] = values + } + + connect := func(ctx context.Context) (ServerCodec, error) { + header := header.Clone() + if cfg.httpAuth != nil { + if err := cfg.httpAuth(header); err != nil { + return nil, err + } + } + conn, resp, err := dialer.DialContext(ctx, dialURL, header) + if err != nil { + hErr := wsHandshakeError{err: err} + if resp != nil { + hErr.status = resp.Status + } + return nil, hErr + } + return newWebsocketCodec(conn, dialURL, header), nil } - return DialWebsocketWithDialer(ctx, endpoint, origin, dialer) + return connect, nil } func wsClientHeaders(endpoint, origin string) (string, http.Header, error) { diff --git a/coreth/rpc/websocket_test.go b/coreth/rpc/websocket_test.go index 55372a28..31e9847a 100644 --- a/coreth/rpc/websocket_test.go +++ b/coreth/rpc/websocket_test.go @@ -35,6 +35,7 @@ import ( "net/http/httptest" "net/http/httputil" "net/url" + "os" "strings" "sync/atomic" "testing" @@ -240,7 +241,9 @@ func TestClientWebsocketLargeMessage(t *testing.T) { } func TestClientWebsocketSevered(t *testing.T) { - t.Skip("FLAKY") + if os.Getenv("RUN_FLAKY_TESTS") != "true" { + t.Skip("FLAKY") + } t.Parallel() var ( diff --git a/coreth/scripts/build.sh b/coreth/scripts/build.sh index 4c51c62e..3a3c8279 100755 --- a/coreth/scripts/build.sh +++ b/coreth/scripts/build.sh @@ -27,7 +27,7 @@ if version_lt "$(go_version)" "$go_version_minimum"; then exit 1 fi -# Avalanche root directory +# Coreth root directory CORETH_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) # Load the versions diff --git a/coreth/scripts/build_test.sh b/coreth/scripts/build_test.sh index ff6f797b..848d538a 100755 --- a/coreth/scripts/build_test.sh +++ b/coreth/scripts/build_test.sh @@ -6,6 +6,15 @@ set -o pipefail export GOGC=25 +# Avalanche root directory +CORETH_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) + +# Load the versions +source "$CORETH_PATH"/scripts/versions.sh + +# Load the constants +source "$CORETH_PATH"/scripts/constants.sh + # We pass in the arguments to this script directly to enable easily passing parameters such as enabling race detection, # parallelism, and test coverage. go test -coverprofile=coverage.out -covermode=atomic -timeout="30m" ./... $@ diff --git a/coreth/scripts/constants.sh b/coreth/scripts/constants.sh index 262f1a48..7eb87f37 100644 --- a/coreth/scripts/constants.sh +++ b/coreth/scripts/constants.sh @@ -21,3 +21,9 @@ coreth_commit="$(git --git-dir="$CORETH_PATH/.git" rev-parse HEAD)" coreth_commit_id="${coreth_commit::8}" build_image_id=${BUILD_IMAGE_ID:-"$avalanche_version-$coreth_commit_id"} + +# Set the CGO flags to use the portable version of BLST +# +# We use "export" here instead of just setting a bash variable because we need +# to pass this flag to all child processes spawned by the shell. +export CGO_CFLAGS="-O -D__BLST_PORTABLE__" diff --git a/coreth/scripts/versions.sh b/coreth/scripts/versions.sh index c7d6945d..f2a7c6cf 100644 --- a/coreth/scripts/versions.sh +++ b/coreth/scripts/versions.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash # Set up the versions to be used -coreth_version=${CORETH_VERSION:-'v0.11.0'} +coreth_version=${CORETH_VERSION:-'v0.12.0'} # Don't export them as they're used in the context of other calls -avalanche_version=${AVALANCHE_VERSION:-'v1.8.5'} +avalanche_version=${AVALANCHE_VERSION:-'v1.9.16'} diff --git a/coreth/signer/core/apitypes/types.go b/coreth/signer/core/apitypes/types.go index cab7f9cf..2d5cb7f7 100644 --- a/coreth/signer/core/apitypes/types.go +++ b/coreth/signer/core/apitypes/types.go @@ -61,7 +61,7 @@ func (vs *ValidationMessages) Info(msg string) { vs.Messages = append(vs.Messages, ValidationInfo{INFO, msg}) } -/// getWarnings returns an error with all messages of type WARN of above, or nil if no warnings were present +// getWarnings returns an error with all messages of type WARN of above, or nil if no warnings were present func (v *ValidationMessages) GetWarnings() error { var messages []string for _, msg := range v.Messages { diff --git a/coreth/sync/client/client.go b/coreth/sync/client/client.go index 6202fbde..d77913d0 100644 --- a/coreth/sync/client/client.go +++ b/coreth/sync/client/client.go @@ -337,14 +337,14 @@ func (c *client) get(ctx context.Context, request message.Request, parseFn parse if c.isSongbirdCode { minVersion = StateSyncVersionSgb } - response, nodeID, err = c.networkClient.RequestAny(minVersion, requestBytes) + response, nodeID, err = c.networkClient.SendAppRequestAny(minVersion, requestBytes) } else { // get the next nodeID using the nodeIdx offset. If we're out of nodes, loop back to 0 // we do this every attempt to ensure we get a different node each time if possible. nodeIdx := atomic.AddUint32(&c.stateSyncNodeIdx, 1) nodeID = c.stateSyncNodes[nodeIdx%uint32(len(c.stateSyncNodes))] - response, err = c.networkClient.Request(nodeID, requestBytes) + response, err = c.networkClient.SendAppRequest(nodeID, requestBytes) } metric.UpdateRequestLatency(time.Since(start)) diff --git a/coreth/sync/client/leaf_syncer.go b/coreth/sync/client/leaf_syncer.go index cc2dd363..754c559e 100644 --- a/coreth/sync/client/leaf_syncer.go +++ b/coreth/sync/client/leaf_syncer.go @@ -9,11 +9,12 @@ import ( "errors" "fmt" - "github.com/ava-labs/coreth/plugin/evm/message" - "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "golang.org/x/sync/errgroup" + + "github.com/ava-labs/coreth/plugin/evm/message" + "github.com/ava-labs/coreth/utils" ) var ( @@ -28,14 +29,14 @@ const defaultLeafRequestLimit = 1024 // the same value for Root, Account, Start, and NodeType throughout the sync. // The value returned by End can change between calls to OnLeafs. type LeafSyncTask interface { - Root() common.Hash // Root of the trie to sync - Account() common.Hash // Account hash of the trie to sync (only applicable to storage tries) - Start() []byte // Starting key to request new leaves - End() []byte // End key to request new leaves - NodeType() message.NodeType // Specifies the message type (atomic/state trie) for the leaf syncer to send - OnStart() (bool, error) // Callback when tasks begins, returns true if work can be skipped - OnLeafs(keys, vals [][]byte) error // Callback when new leaves are received from the network - OnFinish() error // Callback when there are no more leaves in the trie to sync or when we reach End() + Root() common.Hash // Root of the trie to sync + Account() common.Hash // Account hash of the trie to sync (only applicable to storage tries) + Start() []byte // Starting key to request new leaves + End() []byte // End key to request new leaves + NodeType() message.NodeType // Specifies the message type (atomic/state trie) for the leaf syncer to send + OnStart() (bool, error) // Callback when tasks begins, returns true if work can be skipped + OnLeafs(keys, vals [][]byte) error // Callback when new leaves are received from the network + OnFinish(ctx context.Context) error // Callback when there are no more leaves in the trie to sync or when we reach End() } type CallbackLeafSyncer struct { @@ -133,7 +134,7 @@ func (c *CallbackLeafSyncer) syncTask(ctx context.Context, task LeafSyncTask) er // If we have completed syncing this task, invoke [OnFinish] and mark the task // as complete. if done || !leafsResponse.More { - return task.OnFinish() + return task.OnFinish(ctx) } if len(leafsResponse.Keys) == 0 { diff --git a/coreth/sync/client/mock_network.go b/coreth/sync/client/mock_network.go index 19ab6c80..43152cd4 100644 --- a/coreth/sync/client/mock_network.go +++ b/coreth/sync/client/mock_network.go @@ -28,7 +28,7 @@ type mockNetwork struct { nodesRequested []ids.NodeID } -func (t *mockNetwork) RequestAny(minVersion *version.Application, request []byte) ([]byte, ids.NodeID, error) { +func (t *mockNetwork) SendAppRequestAny(minVersion *version.Application, request []byte) ([]byte, ids.NodeID, error) { if len(t.response) == 0 { return nil, ids.EmptyNodeID, errors.New("no mocked response to return in mockNetwork") } @@ -39,7 +39,7 @@ func (t *mockNetwork) RequestAny(minVersion *version.Application, request []byte return response, ids.EmptyNodeID, err } -func (t *mockNetwork) Request(nodeID ids.NodeID, request []byte) ([]byte, error) { +func (t *mockNetwork) SendAppRequest(nodeID ids.NodeID, request []byte) ([]byte, error) { if len(t.response) == 0 { return nil, errors.New("no mocked response to return in mockNetwork") } @@ -77,6 +77,10 @@ func (t *mockNetwork) Gossip([]byte) error { panic("not implemented") // we don't care about this function for this test } +func (t *mockNetwork) SendCrossChainRequest(chainID ids.ID, request []byte) ([]byte, error) { + panic("not implemented") // we don't care about this function for this test +} + func (t *mockNetwork) mockResponse(times uint8, callback func(), response []byte) { t.response = make([][]byte, times) for i := uint8(0); i < times; i++ { diff --git a/coreth/sync/handlers/block_request.go b/coreth/sync/handlers/block_request.go index db99d9b8..a631c61c 100644 --- a/coreth/sync/handlers/block_request.go +++ b/coreth/sync/handlers/block_request.go @@ -11,7 +11,6 @@ import ( "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/coreth/peer" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/sync/handlers/stats" "github.com/ethereum/go-ethereum/common" @@ -26,7 +25,6 @@ const parentLimit = uint16(64) // serving requested blocks starting at specified hash type BlockRequestHandler struct { stats stats.BlockRequestHandlerStats - network peer.Network blockProvider BlockProvider codec codec.Manager } diff --git a/coreth/sync/handlers/handler.go b/coreth/sync/handlers/handler.go index bc872e1c..6254801c 100644 --- a/coreth/sync/handlers/handler.go +++ b/coreth/sync/handlers/handler.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/coreth/core/state/snapshot" "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/plugin/evm/message" "github.com/ava-labs/coreth/sync/handlers/stats" "github.com/ava-labs/coreth/trie" @@ -41,6 +42,7 @@ type syncHandler struct { // NewSyncHandler constructs the handler for serving state sync. func NewSyncHandler( provider SyncDataProvider, + diskDB ethdb.KeyValueReader, evmTrieDB *trie.Database, atomicTrieDB *trie.Database, networkCodec codec.Manager, @@ -50,7 +52,7 @@ func NewSyncHandler( stateTrieLeafsRequestHandler: NewLeafsRequestHandler(evmTrieDB, provider, networkCodec, stats), atomicTrieLeafsRequestHandler: NewLeafsRequestHandler(atomicTrieDB, nil, networkCodec, stats), blockRequestHandler: NewBlockRequestHandler(provider, networkCodec, stats), - codeRequestHandler: NewCodeRequestHandler(evmTrieDB.DiskDB(), networkCodec, stats), + codeRequestHandler: NewCodeRequestHandler(diskDB, networkCodec, stats), } } diff --git a/coreth/sync/statesync/code_syncer.go b/coreth/sync/statesync/code_syncer.go index 7631ac97..28f47044 100644 --- a/coreth/sync/statesync/code_syncer.go +++ b/coreth/sync/statesync/code_syncer.go @@ -10,6 +10,7 @@ import ( "sync" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/params" @@ -45,7 +46,7 @@ type codeSyncer struct { CodeSyncerConfig - outstandingCodeHashes ids.Set // Set of code hashes that we need to fetch from the network. + outstandingCodeHashes set.Set[ids.ID] // Set of code hashes that we need to fetch from the network. codeHashes chan common.Hash // Channel of incoming code hash requests // Used to set terminal error or pass nil to [errChan] if successful. @@ -62,7 +63,7 @@ func newCodeSyncer(config CodeSyncerConfig) *codeSyncer { return &codeSyncer{ CodeSyncerConfig: config, codeHashes: make(chan common.Hash, config.MaxOutstandingCodeHashes), - outstandingCodeHashes: ids.NewSet(0), + outstandingCodeHashes: set.NewSet[ids.ID](0), errChan: make(chan error, 1), } } diff --git a/coreth/sync/statesync/sync_test.go b/coreth/sync/statesync/sync_test.go index 13d58c0c..5f6942ec 100644 --- a/coreth/sync/statesync/sync_test.go +++ b/coreth/sync/statesync/sync_test.go @@ -35,7 +35,7 @@ var errInterrupted = errors.New("interrupted sync") type syncTest struct { ctx context.Context - prepareForTest func(t *testing.T) (clientDB ethdb.Database, serverTrieDB *trie.Database, syncRoot common.Hash) + prepareForTest func(t *testing.T) (clientDB ethdb.Database, serverDB ethdb.Database, serverTrieDB *trie.Database, syncRoot common.Hash) expectedError error GetLeafsIntercept func(message.LeafsRequest, message.LeafsResponse) (message.LeafsResponse, error) GetCodeIntercept func([]common.Hash, [][]byte) ([][]byte, error) @@ -47,9 +47,9 @@ func testSync(t *testing.T, test syncTest) { if test.ctx != nil { ctx = test.ctx } - clientDB, serverTrieDB, root := test.prepareForTest(t) + clientDB, serverDB, serverTrieDB, root := test.prepareForTest(t) leafsRequestHandler := handlers.NewLeafsRequestHandler(serverTrieDB, nil, message.Codec, handlerstats.NewNoopHandlerStats()) - codeRequestHandler := handlers.NewCodeRequestHandler(serverTrieDB.DiskDB(), message.Codec, handlerstats.NewNoopHandlerStats()) + codeRequestHandler := handlers.NewCodeRequestHandler(serverDB, message.Codec, handlerstats.NewNoopHandlerStats()) mockClient := statesyncclient.NewMockClient(message.Codec, leafsRequestHandler, codeRequestHandler, nil) // Set intercept functions for the mock client mockClient.GetLeafsIntercept = test.GetLeafsIntercept @@ -73,7 +73,7 @@ func testSync(t *testing.T, test syncTest) { return } - assertDBConsistency(t, root, serverTrieDB, trie.NewDatabase(clientDB)) + assertDBConsistency(t, root, clientDB, serverTrieDB, trie.NewDatabase(clientDB)) } // testSyncResumes tests a series of syncTests work as expected, invoking a callback function after each @@ -117,15 +117,17 @@ func TestSimpleSyncCases(t *testing.T) { ) tests := map[string]syncTest{ "accounts": { - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - serverTrieDB := trie.NewDatabase(memorydb.New()) + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, numAccounts, nil) - return memorydb.New(), serverTrieDB, root + return memorydb.New(), serverDB, serverTrieDB, root }, }, "accounts with code": { - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - serverTrieDB := trie.NewDatabase(memorydb.New()) + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, numAccounts, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { if index%3 == 0 { codeBytes := make([]byte, 256) @@ -135,24 +137,26 @@ func TestSimpleSyncCases(t *testing.T) { } codeHash := crypto.Keccak256Hash(codeBytes) - rawdb.WriteCode(serverTrieDB.DiskDB(), codeHash, codeBytes) + rawdb.WriteCode(serverDB, codeHash, codeBytes) account.CodeHash = codeHash[:] } return account }) - return memorydb.New(), serverTrieDB, root + return memorydb.New(), serverDB, serverTrieDB, root }, }, "accounts with code and storage": { - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - serverTrieDB := trie.NewDatabase(memorydb.New()) - root := fillAccountsWithStorage(t, serverTrieDB, common.Hash{}, numAccounts) - return memorydb.New(), serverTrieDB, root + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) + root := fillAccountsWithStorage(t, serverDB, serverTrieDB, common.Hash{}, numAccounts) + return memorydb.New(), serverDB, serverTrieDB, root }, }, "accounts with storage": { - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - serverTrieDB := trie.NewDatabase(memorydb.New()) + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, numAccounts, func(t *testing.T, i int, account types.StateAccount) types.StateAccount { if i%5 == 0 { account.Root, _, _ = trie.GenerateTrie(t, serverTrieDB, 16, common.HashLength) @@ -160,21 +164,23 @@ func TestSimpleSyncCases(t *testing.T) { return account }) - return memorydb.New(), serverTrieDB, root + return memorydb.New(), serverDB, serverTrieDB, root }, }, "accounts with overlapping storage": { - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - serverTrieDB := trie.NewDatabase(memorydb.New()) + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) root, _ := FillAccountsWithOverlappingStorage(t, serverTrieDB, common.Hash{}, numAccounts, 3) - return memorydb.New(), serverTrieDB, root + return memorydb.New(), serverDB, serverTrieDB, root }, }, "failed to fetch leafs": { - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - serverTrieDB := trie.NewDatabase(memorydb.New()) + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, numAccountsSmall, nil) - return memorydb.New(), serverTrieDB, root + return memorydb.New(), serverDB, serverTrieDB, root }, GetLeafsIntercept: func(_ message.LeafsRequest, _ message.LeafsResponse) (message.LeafsResponse, error) { return message.LeafsResponse{}, clientErr @@ -182,10 +188,11 @@ func TestSimpleSyncCases(t *testing.T) { expectedError: clientErr, }, "failed to fetch code": { - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - serverTrieDB := trie.NewDatabase(memorydb.New()) - root := fillAccountsWithStorage(t, serverTrieDB, common.Hash{}, numAccountsSmall) - return memorydb.New(), serverTrieDB, root + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) + root := fillAccountsWithStorage(t, serverDB, serverTrieDB, common.Hash{}, numAccountsSmall) + return memorydb.New(), serverDB, serverTrieDB, root }, GetCodeIntercept: func(_ []common.Hash, _ [][]byte) ([][]byte, error) { return nil, clientErr @@ -202,15 +209,16 @@ func TestSimpleSyncCases(t *testing.T) { } func TestCancelSync(t *testing.T) { - serverTrieDB := trie.NewDatabase(memorydb.New()) + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) // Create trie with 2000 accounts (more than one leaf request) - root := fillAccountsWithStorage(t, serverTrieDB, common.Hash{}, 2000) + root := fillAccountsWithStorage(t, serverDB, serverTrieDB, common.Hash{}, 2000) ctx, cancel := context.WithCancel(context.Background()) defer cancel() testSync(t, syncTest{ ctx: ctx, - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return memorydb.New(), serverTrieDB, root + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return memorydb.New(), serverDB, serverTrieDB, root }, expectedError: context.Canceled, GetLeafsIntercept: func(_ message.LeafsRequest, lr message.LeafsResponse) (message.LeafsResponse, error) { @@ -242,7 +250,8 @@ func (i *interruptLeafsIntercept) getLeafsIntercept(request message.LeafsRequest } func TestResumeSyncAccountsTrieInterrupted(t *testing.T) { - serverTrieDB := trie.NewDatabase(memorydb.New()) + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) root, _ := FillAccountsWithOverlappingStorage(t, serverTrieDB, common.Hash{}, 2000, 3) clientDB := memorydb.New() intercept := &interruptLeafsIntercept{ @@ -250,8 +259,8 @@ func TestResumeSyncAccountsTrieInterrupted(t *testing.T) { interruptAfter: 1, } testSync(t, syncTest{ - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return clientDB, serverTrieDB, root + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return clientDB, serverDB, serverTrieDB, root }, expectedError: errInterrupted, GetLeafsIntercept: intercept.getLeafsIntercept, @@ -260,14 +269,15 @@ func TestResumeSyncAccountsTrieInterrupted(t *testing.T) { assert.EqualValues(t, 2, intercept.numRequests) testSync(t, syncTest{ - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return clientDB, serverTrieDB, root + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return clientDB, serverDB, serverTrieDB, root }, }) } func TestResumeSyncLargeStorageTrieInterrupted(t *testing.T) { - serverTrieDB := trie.NewDatabase(memorydb.New()) + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) largeStorageRoot, _, _ := trie.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, 2000, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { @@ -283,22 +293,23 @@ func TestResumeSyncLargeStorageTrieInterrupted(t *testing.T) { interruptAfter: 1, } testSync(t, syncTest{ - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return clientDB, serverTrieDB, root + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return clientDB, serverDB, serverTrieDB, root }, expectedError: errInterrupted, GetLeafsIntercept: intercept.getLeafsIntercept, }) testSync(t, syncTest{ - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return clientDB, serverTrieDB, root + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return clientDB, serverDB, serverTrieDB, root }, }) } func TestResumeSyncToNewRootAfterLargeStorageTrieInterrupted(t *testing.T) { - serverTrieDB := trie.NewDatabase(memorydb.New()) + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) largeStorageRoot1, _, _ := trie.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) largeStorageRoot2, _, _ := trie.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) @@ -321,8 +332,8 @@ func TestResumeSyncToNewRootAfterLargeStorageTrieInterrupted(t *testing.T) { interruptAfter: 1, } testSync(t, syncTest{ - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return clientDB, serverTrieDB, root1 + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return clientDB, serverDB, serverTrieDB, root1 }, expectedError: errInterrupted, GetLeafsIntercept: intercept.getLeafsIntercept, @@ -331,14 +342,15 @@ func TestResumeSyncToNewRootAfterLargeStorageTrieInterrupted(t *testing.T) { <-snapshot.WipeSnapshot(clientDB, false) testSync(t, syncTest{ - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return clientDB, serverTrieDB, root2 + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return clientDB, serverDB, serverTrieDB, root2 }, }) } func TestResumeSyncLargeStorageTrieWithConsecutiveDuplicatesInterrupted(t *testing.T) { - serverTrieDB := trie.NewDatabase(memorydb.New()) + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) largeStorageRoot, _, _ := trie.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, 100, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { @@ -354,22 +366,23 @@ func TestResumeSyncLargeStorageTrieWithConsecutiveDuplicatesInterrupted(t *testi interruptAfter: 1, } testSync(t, syncTest{ - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return clientDB, serverTrieDB, root + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return clientDB, serverDB, serverTrieDB, root }, expectedError: errInterrupted, GetLeafsIntercept: intercept.getLeafsIntercept, }) testSync(t, syncTest{ - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return clientDB, serverTrieDB, root + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return clientDB, serverDB, serverTrieDB, root }, }) } func TestResumeSyncLargeStorageTrieWithSpreadOutDuplicatesInterrupted(t *testing.T) { - serverTrieDB := trie.NewDatabase(memorydb.New()) + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) largeStorageRoot, _, _ := trie.GenerateTrie(t, serverTrieDB, 2000, common.HashLength) root, _ := trie.FillAccounts(t, serverTrieDB, common.Hash{}, 100, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { @@ -384,35 +397,34 @@ func TestResumeSyncLargeStorageTrieWithSpreadOutDuplicatesInterrupted(t *testing interruptAfter: 1, } testSync(t, syncTest{ - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return clientDB, serverTrieDB, root + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return clientDB, serverDB, serverTrieDB, root }, expectedError: errInterrupted, GetLeafsIntercept: intercept.getLeafsIntercept, }) testSync(t, syncTest{ - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return clientDB, serverTrieDB, root + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return clientDB, serverDB, serverTrieDB, root }, }) } func TestResyncNewRootAfterDeletes(t *testing.T) { for name, test := range map[string]struct { - deleteBetweenSyncs func(*testing.T, common.Hash, *trie.Database) + deleteBetweenSyncs func(*testing.T, common.Hash, ethdb.Database) }{ "delete code": { - deleteBetweenSyncs: func(t *testing.T, _ common.Hash, clientTrieDB *trie.Database) { - db := clientTrieDB.DiskDB() + deleteBetweenSyncs: func(t *testing.T, _ common.Hash, clientDB ethdb.Database) { // delete code - it := db.NewIterator(rawdb.CodePrefix, nil) + it := clientDB.NewIterator(rawdb.CodePrefix, nil) defer it.Release() for it.Next() { if len(it.Key()) != len(rawdb.CodePrefix)+common.HashLength { continue } - if err := db.Delete(it.Key()); err != nil { + if err := clientDB.Delete(it.Key()); err != nil { t.Fatal(err) } } @@ -422,7 +434,8 @@ func TestResyncNewRootAfterDeletes(t *testing.T) { }, }, "delete intermediate storage nodes": { - deleteBetweenSyncs: func(t *testing.T, root common.Hash, clientTrieDB *trie.Database) { + deleteBetweenSyncs: func(t *testing.T, root common.Hash, clientDB ethdb.Database) { + clientTrieDB := trie.NewDatabase(clientDB) tr, err := trie.New(common.Hash{}, root, clientTrieDB) if err != nil { t.Fatal(err) @@ -459,7 +472,8 @@ func TestResyncNewRootAfterDeletes(t *testing.T) { }, }, "delete intermediate account trie nodes": { - deleteBetweenSyncs: func(t *testing.T, root common.Hash, clientTrieDB *trie.Database) { + deleteBetweenSyncs: func(t *testing.T, root common.Hash, clientDB ethdb.Database) { + clientTrieDB := trie.NewDatabase(clientDB) trie.CorruptTrie(t, clientTrieDB, root, 5) }, }, @@ -470,10 +484,11 @@ func TestResyncNewRootAfterDeletes(t *testing.T) { } } -func testSyncerSyncsToNewRoot(t *testing.T, deleteBetweenSyncs func(*testing.T, common.Hash, *trie.Database)) { +func testSyncerSyncsToNewRoot(t *testing.T, deleteBetweenSyncs func(*testing.T, common.Hash, ethdb.Database)) { rand.Seed(1) clientDB := memorydb.New() - serverTrieDB := trie.NewDatabase(memorydb.New()) + serverDB := memorydb.New() + serverTrieDB := trie.NewDatabase(serverDB) root1, _ := FillAccountsWithOverlappingStorage(t, serverTrieDB, common.Hash{}, 1000, 3) root2, _ := FillAccountsWithOverlappingStorage(t, serverTrieDB, root1, 1000, 3) @@ -482,13 +497,13 @@ func testSyncerSyncsToNewRoot(t *testing.T, deleteBetweenSyncs func(*testing.T, testSyncResumes(t, []syncTest{ { - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return clientDB, serverTrieDB, root1 + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return clientDB, serverDB, serverTrieDB, root1 }, }, { - prepareForTest: func(t *testing.T) (ethdb.Database, *trie.Database, common.Hash) { - return clientDB, serverTrieDB, root2 + prepareForTest: func(t *testing.T) (ethdb.Database, ethdb.Database, *trie.Database, common.Hash) { + return clientDB, serverDB, serverTrieDB, root2 }, }, }, func() { @@ -500,6 +515,6 @@ func testSyncerSyncsToNewRoot(t *testing.T, deleteBetweenSyncs func(*testing.T, // delete snapshot first since this is not the responsibility of the EVM State Syncer <-snapshot.WipeSnapshot(clientDB, false) - deleteBetweenSyncs(t, root1, trie.NewDatabase(clientDB)) + deleteBetweenSyncs(t, root1, clientDB) }) } diff --git a/coreth/sync/statesync/test_sync.go b/coreth/sync/statesync/test_sync.go index 425507a8..74a3686a 100644 --- a/coreth/sync/statesync/test_sync.go +++ b/coreth/sync/statesync/test_sync.go @@ -12,6 +12,7 @@ import ( "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/state/snapshot" "github.com/ava-labs/coreth/core/types" + "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/trie" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" @@ -22,8 +23,7 @@ import ( // assertDBConsistency checks [serverTrieDB] and [clientTrieDB] have the same EVM state trie at [root], // and that [clientTrieDB.DiskDB] has corresponding account & snapshot values. // Also verifies any code referenced by the EVM state is present in [clientTrieDB] and the hash is correct. -func assertDBConsistency(t testing.TB, root common.Hash, serverTrieDB, clientTrieDB *trie.Database) { - clientDB := clientTrieDB.DiskDB() +func assertDBConsistency(t testing.TB, root common.Hash, clientDB ethdb.Database, serverTrieDB, clientTrieDB *trie.Database) { numSnapshotAccounts := 0 accountIt := rawdb.IterateAccountSnapshots(clientDB) defer accountIt.Release() @@ -46,14 +46,14 @@ func assertDBConsistency(t testing.TB, root common.Hash, serverTrieDB, clientTri return err } // check snapshot consistency - snapshotVal := rawdb.ReadAccountSnapshot(clientTrieDB.DiskDB(), accHash) + snapshotVal := rawdb.ReadAccountSnapshot(clientDB, accHash) expectedSnapshotVal := snapshot.SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash, acc.IsMultiCoin) assert.Equal(t, expectedSnapshotVal, snapshotVal) // check code consistency if !bytes.Equal(acc.CodeHash, types.EmptyCodeHash[:]) { codeHash := common.BytesToHash(acc.CodeHash) - code := rawdb.ReadCode(clientTrieDB.DiskDB(), codeHash) + code := rawdb.ReadCode(clientDB, codeHash) actualHash := crypto.Keccak256Hash(code) assert.NotZero(t, len(code)) assert.Equal(t, codeHash, actualHash) @@ -75,7 +75,7 @@ func assertDBConsistency(t testing.TB, root common.Hash, serverTrieDB, clientTri // check storage trie and storage snapshot consistency trie.AssertTrieConsistency(t, acc.Root, serverTrieDB, clientTrieDB, func(key, val []byte) error { storageTrieLeavesCount++ - snapshotVal := rawdb.ReadStorageSnapshot(clientTrieDB.DiskDB(), accHash, common.BytesToHash(key)) + snapshotVal := rawdb.ReadStorageSnapshot(clientDB, accHash, common.BytesToHash(key)) assert.Equal(t, val, snapshotVal) return nil }) @@ -88,7 +88,7 @@ func assertDBConsistency(t testing.TB, root common.Hash, serverTrieDB, clientTri assert.Equal(t, trieAccountLeaves, numSnapshotAccounts) } -func fillAccountsWithStorage(t *testing.T, serverTrieDB *trie.Database, root common.Hash, numAccounts int) common.Hash { +func fillAccountsWithStorage(t *testing.T, serverDB ethdb.Database, serverTrieDB *trie.Database, root common.Hash, numAccounts int) common.Hash { newRoot, _ := trie.FillAccounts(t, serverTrieDB, root, numAccounts, func(t *testing.T, index int, account types.StateAccount) types.StateAccount { codeBytes := make([]byte, 256) _, err := rand.Read(codeBytes) @@ -97,7 +97,7 @@ func fillAccountsWithStorage(t *testing.T, serverTrieDB *trie.Database, root com } codeHash := crypto.Keccak256Hash(codeBytes) - rawdb.WriteCode(serverTrieDB.DiskDB(), codeHash, codeBytes) + rawdb.WriteCode(serverDB, codeHash, codeBytes) account.CodeHash = codeHash[:] // now create state trie diff --git a/coreth/sync/statesync/trie_segments.go b/coreth/sync/statesync/trie_segments.go index d0feda61..48afb6ec 100644 --- a/coreth/sync/statesync/trie_segments.go +++ b/coreth/sync/statesync/trie_segments.go @@ -5,6 +5,7 @@ package statesync import ( "bytes" + "context" "encoding/binary" "fmt" "sync" @@ -158,7 +159,7 @@ func (t *trieToSync) addSegment(start, end []byte) *trieSegment { // segmentFinished is called when one the trie segment with index [idx] finishes syncing. // creates intermediary hash nodes for the trie up to the last contiguous segment received from start. -func (t *trieToSync) segmentFinished(idx int) error { +func (t *trieToSync) segmentFinished(ctx context.Context, idx int) error { t.lock.Lock() defer t.lock.Unlock() @@ -183,6 +184,10 @@ func (t *trieToSync) segmentFinished(idx int) error { defer it.Release() for it.Next() { + if err := ctx.Err(); err != nil { + return err + } + if len(segment.end) > 0 && bytes.Compare(it.Key(), segment.end) > 0 { // don't go past the end of the segment. (data belongs to the next segment) break @@ -337,12 +342,12 @@ func (t *trieSegment) String() string { } // these functions implement the LeafSyncTask interface. -func (t *trieSegment) Root() common.Hash { return t.trie.root } -func (t *trieSegment) Account() common.Hash { return t.trie.account } -func (t *trieSegment) End() []byte { return t.end } -func (t *trieSegment) NodeType() message.NodeType { return message.StateTrieNode } -func (t *trieSegment) OnStart() (bool, error) { return t.trie.task.OnStart() } -func (t *trieSegment) OnFinish() error { return t.trie.segmentFinished(t.idx) } +func (t *trieSegment) Root() common.Hash { return t.trie.root } +func (t *trieSegment) Account() common.Hash { return t.trie.account } +func (t *trieSegment) End() []byte { return t.end } +func (t *trieSegment) NodeType() message.NodeType { return message.StateTrieNode } +func (t *trieSegment) OnStart() (bool, error) { return t.trie.task.OnStart() } +func (t *trieSegment) OnFinish(ctx context.Context) error { return t.trie.segmentFinished(ctx, t.idx) } func (t *trieSegment) Start() []byte { if t.pos != nil { diff --git a/coreth/tests/init.go b/coreth/tests/init.go index 6ae8d059..eeb50050 100644 --- a/coreth/tests/init.go +++ b/coreth/tests/init.go @@ -259,6 +259,25 @@ var Forks = map[string]*params.ChainConfig{ BanffBlockTimestamp: big.NewInt(0), CortinaBlockTimestamp: big.NewInt(0), }, + "DUpgrade": { + ChainID: big.NewInt(1), + HomesteadBlock: big.NewInt(0), + EIP150Block: big.NewInt(0), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + IstanbulBlock: big.NewInt(0), + ApricotPhase1BlockTimestamp: big.NewInt(0), + ApricotPhase2BlockTimestamp: big.NewInt(0), + ApricotPhase3BlockTimestamp: big.NewInt(0), + ApricotPhase4BlockTimestamp: big.NewInt(0), + ApricotPhase5BlockTimestamp: big.NewInt(0), + BanffBlockTimestamp: big.NewInt(0), + CortinaBlockTimestamp: big.NewInt(0), + DUpgradeBlockTimestamp: big.NewInt(0), + }, } // Returns the set of defined fork names diff --git a/coreth/tests/init_test.go b/coreth/tests/init_test.go deleted file mode 100644 index b41f32dc..00000000 --- a/coreth/tests/init_test.go +++ /dev/null @@ -1,287 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. -// -// This file is a derived work, based on the go-ethereum library whose original -// notices appear below. -// -// It is distributed under a license compatible with the licensing terms of the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********** -// Copyright 2017 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package tests - -import ( - "encoding/json" - "fmt" - "io" - "os" - "path/filepath" - "reflect" - "regexp" - "runtime" - "sort" - "strings" - "testing" - - "github.com/ava-labs/coreth/params" -) - -func readJSON(reader io.Reader, value interface{}) error { - data, err := io.ReadAll(reader) - if err != nil { - return fmt.Errorf("error reading JSON file: %v", err) - } - if err = json.Unmarshal(data, &value); err != nil { - if syntaxerr, ok := err.(*json.SyntaxError); ok { - line := findLine(data, syntaxerr.Offset) - return fmt.Errorf("JSON syntax error at line %v: %v", line, err) - } - return err - } - return nil -} - -func readJSONFile(fn string, value interface{}) error { - file, err := os.Open(fn) - if err != nil { - return err - } - defer file.Close() - - err = readJSON(file, value) - if err != nil { - return fmt.Errorf("%s in file %s", err.Error(), fn) - } - return nil -} - -// findLine returns the line number for the given offset into data. -func findLine(data []byte, offset int64) (line int) { - line = 1 - for i, r := range string(data) { - if int64(i) >= offset { - return - } - if r == '\n' { - line++ - } - } - return -} - -// testMatcher controls skipping and chain config assignment to tests. -type testMatcher struct { - configpat []testConfig - failpat []testFailure - skiploadpat []*regexp.Regexp - slowpat []*regexp.Regexp - runonlylistpat *regexp.Regexp -} - -type testConfig struct { - p *regexp.Regexp - config params.ChainConfig -} - -type testFailure struct { - p *regexp.Regexp - reason string -} - -// skipShortMode skips tests matching when the -short flag is used. -func (tm *testMatcher) slow(pattern string) { - tm.slowpat = append(tm.slowpat, regexp.MustCompile(pattern)) -} - -// skipLoad skips JSON loading of tests matching the pattern. -func (tm *testMatcher) skipLoad(pattern string) { - tm.skiploadpat = append(tm.skiploadpat, regexp.MustCompile(pattern)) -} - -// fails adds an expected failure for tests matching the pattern. -//nolint:unused -func (tm *testMatcher) fails(pattern string, reason string) { - if reason == "" { - panic("empty fail reason") - } - tm.failpat = append(tm.failpat, testFailure{regexp.MustCompile(pattern), reason}) -} - -func (tm *testMatcher) runonly(pattern string) { - tm.runonlylistpat = regexp.MustCompile(pattern) -} - -// config defines chain config for tests matching the pattern. -func (tm *testMatcher) config(pattern string, cfg params.ChainConfig) { - tm.configpat = append(tm.configpat, testConfig{regexp.MustCompile(pattern), cfg}) -} - -// findSkip matches name against test skip patterns. -func (tm *testMatcher) findSkip(name string) (reason string, skipload bool) { - isWin32 := runtime.GOARCH == "386" && runtime.GOOS == "windows" - for _, re := range tm.slowpat { - if re.MatchString(name) { - if testing.Short() { - return "skipped in -short mode", false - } - if isWin32 { - return "skipped on 32bit windows", false - } - } - } - for _, re := range tm.skiploadpat { - if re.MatchString(name) { - return "skipped by skipLoad", true - } - } - return "", false -} - -// findConfig returns the chain config matching defined patterns. -func (tm *testMatcher) findConfig(t *testing.T) *params.ChainConfig { - for _, m := range tm.configpat { - if m.p.MatchString(t.Name()) { - return &m.config - } - } - return new(params.ChainConfig) -} - -// checkFailure checks whether a failure is expected. -func (tm *testMatcher) checkFailure(t *testing.T, name string, err error) error { - failReason := "" - for _, m := range tm.failpat { - if m.p.MatchString(name) { - failReason = m.reason - break - } - } - if failReason != "" { - t.Logf("expected failure: %s", failReason) - if err != nil { - t.Logf("error: %v", err) - return nil - } - return fmt.Errorf("test succeeded unexpectedly") - } - return err -} - -// walk invokes its runTest argument for all subtests in the given directory. -// -// runTest should be a function of type func(t *testing.T, name string, x ), -// where TestType is the type of the test contained in test files. -func (tm *testMatcher) walk(t *testing.T, dir string, runTest interface{}) { - // Walk the directory. - dirinfo, err := os.Stat(dir) - if os.IsNotExist(err) || !dirinfo.IsDir() { - fmt.Fprintf(os.Stderr, "can't find test files in %s, did you clone the tests submodule?\n", dir) - t.Skip("missing test files") - } - err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - name := filepath.ToSlash(strings.TrimPrefix(path, dir+string(filepath.Separator))) - if info.IsDir() { - if _, skipload := tm.findSkip(name + "/"); skipload { - return filepath.SkipDir - } - return nil - } - if filepath.Ext(path) == ".json" { - t.Run(name, func(t *testing.T) { tm.runTestFile(t, path, name, runTest) }) - } - return nil - }) - if err != nil { - t.Fatal(err) - } -} - -func (tm *testMatcher) runTestFile(t *testing.T, path, name string, runTest interface{}) { - if r, _ := tm.findSkip(name); r != "" { - t.Skip(r) - } - if tm.runonlylistpat != nil { - if !tm.runonlylistpat.MatchString(name) { - t.Skip("Skipped by runonly") - } - } - t.Parallel() - - // Load the file as map[string]. - m := makeMapFromTestFunc(runTest) - if err := readJSONFile(path, m.Addr().Interface()); err != nil { - t.Fatal(err) - } - - // Run all tests from the map. Don't wrap in a subtest if there is only one test in the file. - keys := sortedMapKeys(m) - if len(keys) == 1 { - runTestFunc(runTest, t, name, m, keys[0]) - } else { - for _, key := range keys { - name := name + "/" + key - t.Run(key, func(t *testing.T) { - if r, _ := tm.findSkip(name); r != "" { - t.Skip(r) - } - runTestFunc(runTest, t, name, m, key) - }) - } - } -} - -func makeMapFromTestFunc(f interface{}) reflect.Value { - stringT := reflect.TypeOf("") - testingT := reflect.TypeOf((*testing.T)(nil)) - ftyp := reflect.TypeOf(f) - if ftyp.Kind() != reflect.Func || ftyp.NumIn() != 3 || ftyp.NumOut() != 0 || ftyp.In(0) != testingT || ftyp.In(1) != stringT { - panic(fmt.Sprintf("bad test function type: want func(*testing.T, string, ), have %s", ftyp)) - } - testType := ftyp.In(2) - mp := reflect.New(reflect.MapOf(stringT, testType)) - return mp.Elem() -} - -func sortedMapKeys(m reflect.Value) []string { - keys := make([]string, m.Len()) - for i, k := range m.MapKeys() { - keys[i] = k.String() - } - sort.Strings(keys) - return keys -} - -func runTestFunc(runTest interface{}, t *testing.T, name string, m reflect.Value, key string) { - reflect.ValueOf(runTest).Call([]reflect.Value{ - reflect.ValueOf(t), - reflect.ValueOf(name), - m.MapIndex(reflect.ValueOf(key)), - }) -} - -// func TestMatcherRunonlylist(t *testing.T) { -// t.Parallel() -// tm := new(testMatcher) -// tm.runonly("invalid*") -// tm.walk(t, rlpTestDir, func(t *testing.T, name string, test *RLPTest) { -// if name[:len("invalidRLPTest.json")] != "invalidRLPTest.json" { -// t.Fatalf("invalid test found: %s != invalidRLPTest.json", name) -// } -// }) -// } diff --git a/coreth/tests/state_test_util.go b/coreth/tests/state_test_util.go index 22e6c15e..abe50d67 100644 --- a/coreth/tests/state_test_util.go +++ b/coreth/tests/state_test_util.go @@ -27,7 +27,6 @@ package tests import ( - "encoding/hex" "encoding/json" "fmt" "math/big" @@ -43,8 +42,6 @@ import ( "github.com/ava-labs/coreth/params" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/common/math" - "github.com/ethereum/go-ethereum/crypto" ) // StateTest checks transaction processing without block context. @@ -144,10 +141,6 @@ func (t *StateTest) Subtests() []StateSubtest { return sub } -func (t *StateTest) gasLimit(subtest StateSubtest) uint64 { - return t.json.Tx.GasLimit[t.json.Post[subtest.Fork][subtest.Index].Indexes.Gas] -} - func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter bool) (*snapshot.Tree, *state.StateDB) { sdb := state.NewDatabase(db) statedb, _ := state.New(common.Hash{}, sdb, nil) @@ -169,88 +162,3 @@ func MakePreState(db ethdb.Database, accounts core.GenesisAlloc, snapshotter boo statedb, _ = state.New(root, sdb, snaps) return snaps, statedb } - -func (t *StateTest) genesis(config *params.ChainConfig) *core.Genesis { - return &core.Genesis{ - Config: config, - Coinbase: t.json.Env.Coinbase, - Difficulty: t.json.Env.Difficulty, - GasLimit: t.json.Env.GasLimit, - Number: t.json.Env.Number, - Timestamp: t.json.Env.Timestamp, - Alloc: t.json.Pre, - } -} - -func (tx *stTransaction) toMessage(ps stPostState, baseFee *big.Int) (core.Message, error) { - // Derive sender from private key if present. - var from common.Address - if len(tx.PrivateKey) > 0 { - key, err := crypto.ToECDSA(tx.PrivateKey) - if err != nil { - return nil, fmt.Errorf("invalid private key: %v", err) - } - from = crypto.PubkeyToAddress(key.PublicKey) - } - // Parse recipient if present. - var to *common.Address - if tx.To != "" { - to = new(common.Address) - if err := to.UnmarshalText([]byte(tx.To)); err != nil { - return nil, fmt.Errorf("invalid to address: %v", err) - } - } - - // Get values specific to this post state. - if ps.Indexes.Data > len(tx.Data) { - return nil, fmt.Errorf("tx data index %d out of bounds", ps.Indexes.Data) - } - if ps.Indexes.Value > len(tx.Value) { - return nil, fmt.Errorf("tx value index %d out of bounds", ps.Indexes.Value) - } - if ps.Indexes.Gas > len(tx.GasLimit) { - return nil, fmt.Errorf("tx gas limit index %d out of bounds", ps.Indexes.Gas) - } - dataHex := tx.Data[ps.Indexes.Data] - valueHex := tx.Value[ps.Indexes.Value] - gasLimit := tx.GasLimit[ps.Indexes.Gas] - // Value, Data hex encoding is messy: https://github.com/ethereum/tests/issues/203 - value := new(big.Int) - if valueHex != "0x" { - v, ok := math.ParseBig256(valueHex) - if !ok { - return nil, fmt.Errorf("invalid tx value %q", valueHex) - } - value = v - } - data, err := hex.DecodeString(strings.TrimPrefix(dataHex, "0x")) - if err != nil { - return nil, fmt.Errorf("invalid tx data %q", dataHex) - } - var accessList types.AccessList - if tx.AccessLists != nil && tx.AccessLists[ps.Indexes.Data] != nil { - accessList = *tx.AccessLists[ps.Indexes.Data] - } - // If baseFee provided, set gasPrice to effectiveGasPrice. - gasPrice := tx.GasPrice - if baseFee != nil { - if tx.MaxFeePerGas == nil { - tx.MaxFeePerGas = gasPrice - } - if tx.MaxFeePerGas == nil { - tx.MaxFeePerGas = new(big.Int) - } - if tx.MaxPriorityFeePerGas == nil { - tx.MaxPriorityFeePerGas = tx.MaxFeePerGas - } - gasPrice = math.BigMin(new(big.Int).Add(tx.MaxPriorityFeePerGas, baseFee), - tx.MaxFeePerGas) - } - if gasPrice == nil { - return nil, fmt.Errorf("no gas price provided") - } - - msg := types.NewMessage(from, to, tx.Nonce, value, gasLimit, gasPrice, - tx.MaxFeePerGas, tx.MaxPriorityFeePerGas, data, accessList, false) - return msg, nil -} diff --git a/coreth/trie/committer.go b/coreth/trie/committer.go index 535f2cf0..a430834d 100644 --- a/coreth/trie/committer.go +++ b/coreth/trie/committer.go @@ -54,7 +54,8 @@ func newCommitter(owner common.Hash, collectLeaf bool) *committer { } } -// Commit collapses a node down into a hash node and inserts it into the database +// Commit collapses a node down into a hash node and returns it along with +// the modified nodeset. func (c *committer) Commit(n node) (hashNode, *NodeSet, error) { h, err := c.commit(nil, n) if err != nil { @@ -63,7 +64,7 @@ func (c *committer) Commit(n node) (hashNode, *NodeSet, error) { return h.(hashNode), c.nodes, nil } -// commit collapses a node down into a hash node and inserts it into the database +// commit collapses a node down into a hash node and returns it. func (c *committer) commit(path []byte, n node) (node, error) { // if this path is clean, use available cached data hash, dirty := n.cache() @@ -85,7 +86,8 @@ func (c *committer) commit(path []byte, n node) (node, error) { } collapsed.Val = childV } - // The key needs to be copied, since we're delivering it to database + // The key needs to be copied, since we're adding it to the + // modified nodeset. collapsed.Key = hexToCompact(cn.Key) hashedNode := c.store(path, collapsed) if hn, ok := hashedNode.(hashNode); ok { @@ -144,16 +146,16 @@ func (c *committer) commitChildren(path []byte, n *fullNode) ([17]node, error) { return children, nil } -// store hashes the node n and if we have a storage layer specified, it writes -// the key/value pair to it and tracks any node->child references as well as any -// node->external trie references. +// store hashes the node n and adds it to the modified nodeset. If leaf collection +// is enabled, leaf nodes will be tracked in the modified nodeset as well. func (c *committer) store(path []byte, n node) node { // Larger nodes are replaced by their hash and stored in the database. var hash, _ = n.cache() + // This was not generated - must be a small node stored in the parent. // In theory, we should check if the node is leaf here (embedded node - // usually is leaf node). But small value(less than 32bytes) is not - // our target(leaves in account trie only). + // usually is leaf node). But small value (less than 32bytes) is not + // our target (leaves in account trie only). if hash == nil { return n } diff --git a/coreth/trie/database.go b/coreth/trie/database.go index b98acd92..8d2e4cbf 100644 --- a/coreth/trie/database.go +++ b/coreth/trie/database.go @@ -34,16 +34,20 @@ import ( "sync" "time" - "github.com/VictoriaMetrics/fastcache" "github.com/ava-labs/coreth/core/rawdb" "github.com/ava-labs/coreth/core/types" "github.com/ava-labs/coreth/ethdb" "github.com/ava-labs/coreth/metrics" + "github.com/ava-labs/coreth/utils" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/rlp" ) +const ( + cacheStatsUpdateFrequency = 1000 // update trie cache stats once per 1000 ops +) + var ( memcacheCleanHitMeter = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil) memcacheCleanMissMeter = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil) @@ -84,7 +88,7 @@ var ( type Database struct { diskdb ethdb.KeyValueStore // Persistent storage for matured trie nodes - cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs + cleans *utils.MeteredCache // GC friendly memory cache of clean node RLPs dirties map[common.Hash]*cachedNode // Data and references relationships of dirty trie nodes oldest common.Hash // Oldest tracked node, flush-list head newest common.Hash // Newest tracked node, flush-list tail @@ -279,8 +283,10 @@ func expandNode(hash hashNode, n node) node { // Config defines all necessary options for database. type Config struct { - Cache int // Memory allowance (MB) to use for caching trie nodes in memory - Preimages bool // Flag whether the preimage of trie key is recorded + Cache int // Memory allowance (MB) to use for caching trie nodes in memory + Preimages bool // Flag whether the preimage of trie key is recorded + Journal string // File location to load trie clean cache from + StatsPrefix string // Prefix for cache stats (disabled if empty) } // NewDatabase creates a new trie database to store ephemeral trie content before @@ -294,9 +300,9 @@ func NewDatabase(diskdb ethdb.KeyValueStore) *Database { // before its written out to disk or garbage collected. It also acts as a read cache // for nodes loaded from disk. func NewDatabaseWithConfig(diskdb ethdb.KeyValueStore, config *Config) *Database { - var cleans *fastcache.Cache + var cleans *utils.MeteredCache if config != nil && config.Cache > 0 { - cleans = fastcache.New(config.Cache * 1024 * 1024) + cleans = utils.NewMeteredCache(config.Cache*1024*1024, config.Journal, config.StatsPrefix, cacheStatsUpdateFrequency) } var preimage *preimageStore if config != nil && config.Preimages { @@ -313,11 +319,6 @@ func NewDatabaseWithConfig(diskdb ethdb.KeyValueStore, config *Config) *Database return db } -// DiskDB retrieves the persistent storage backing the trie database. -func (db *Database) DiskDB() ethdb.KeyValueStore { - return db.diskdb -} - // insert inserts a simplified trie node into the memory database. // All nodes inserted by this function will be reference tracked // and in theory should only used for **trie nodes** insertion. @@ -388,10 +389,21 @@ func (db *Database) EncodedNode(h common.Hash) node { func (db *Database) node(hash common.Hash) ([]byte, *cachedNode, error) { // Retrieve the node from the clean cache if available if db.cleans != nil { - if enc := db.cleans.Get(nil, hash[:]); enc != nil { - memcacheCleanHitMeter.Mark(1) - memcacheCleanReadMeter.Mark(int64(len(enc))) - return enc, nil, nil + k := hash[:] + enc, found := db.cleans.HasGet(nil, k) + if found { + if len(enc) > 0 { + memcacheCleanHitMeter.Mark(1) + memcacheCleanReadMeter.Mark(int64(len(enc))) + return enc, nil, nil + } else { + // Delete anything from cache that may have been added incorrectly + // + // This will prevent a panic as callers of this function assume the raw + // or cached node is populated. + log.Debug("removing empty value found in cleans cache", "k", k) + db.cleans.Del(k) + } } } // Retrieve the node from the dirty cache if available @@ -408,7 +420,7 @@ func (db *Database) node(hash common.Hash) ([]byte, *cachedNode, error) { // Content unavailable in memory, attempt to retrieve from disk enc := rawdb.ReadTrieNode(db.diskdb, hash) - if len(enc) != 0 { + if len(enc) > 0 { if db.cleans != nil { db.cleans.Set(hash[:], enc) memcacheCleanMissMeter.Mark(1) @@ -555,7 +567,7 @@ type flushItem struct { // writeFlushItems writes all items in [toFlush] to disk in batches of // [ethdb.IdealBatchSize]. This function does not access any variables inside // of [Database] and does not need to be synchronized. -func (db *Database) writeFlushItems(toFlush []flushItem) error { +func (db *Database) writeFlushItems(toFlush []*flushItem) error { batch := db.diskdb.NewBatch() for _, item := range toFlush { rlp := item.node.rlp() @@ -612,12 +624,12 @@ func (db *Database) Cap(limit common.StorageSize) error { } // Keep removing nodes from the flush-list until we're below allowance - toFlush := make([]flushItem, 0, 128) + toFlush := make([]*flushItem, 0, 128) oldest := db.oldest for pendingSize > limit && oldest != (common.Hash{}) { // Fetch the oldest referenced node and push into the batch node := db.dirties[oldest] - toFlush = append(toFlush, flushItem{oldest, node, nil}) + toFlush = append(toFlush, &flushItem{oldest, node, nil}) // Iterate to the next flush item, or abort if the size cap was achieved. Size // is the total size, including the useful cached data (hash -> blob), the @@ -683,8 +695,9 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H db.lock.RLock() lockStart := time.Now() nodes, storage := len(db.dirties), db.dirtiesSize - toFlush, err := db.commit(node, make([]flushItem, 0, 128), callback) + toFlush, err := db.commit(node, make([]*flushItem, 0, 128), callback) if err != nil { + db.lock.RUnlock() log.Error("Failed to commit trie from trie database", "err", err) return err } @@ -733,7 +746,7 @@ func (db *Database) Commit(node common.Hash, report bool, callback func(common.H // // [callback] will be invoked as soon as it is determined a trie node will be // flushed to disk (before it is actually written). -func (db *Database) commit(hash common.Hash, toFlush []flushItem, callback func(common.Hash)) ([]flushItem, error) { +func (db *Database) commit(hash common.Hash, toFlush []*flushItem, callback func(common.Hash)) ([]*flushItem, error) { // If the node does not exist, it's a previously committed node node, ok := db.dirties[hash] if !ok { @@ -751,7 +764,7 @@ func (db *Database) commit(hash common.Hash, toFlush []flushItem, callback func( // By processing the children of each node before the node itself, we ensure // that children are committed before their parents (an invariant of this // package). - toFlush = append(toFlush, flushItem{hash, node, nil}) + toFlush = append(toFlush, &flushItem{hash, node, nil}) if callback != nil { callback(hash) } @@ -895,3 +908,37 @@ func (db *Database) CommitPreimages() error { } return db.preimages.commit(true) } + +// saveCache saves clean state cache to given directory path +// using specified CPU cores. +func (db *Database) saveCache(dir string, threads int) error { + if db.cleans == nil { + return nil + } + log.Info("Writing clean trie cache to disk", "path", dir, "threads", threads) + + start := time.Now() + err := db.cleans.SaveToFileConcurrent(dir, threads) + if err != nil { + log.Error("Failed to persist clean trie cache", "error", err) + return err + } + log.Info("Persisted the clean trie cache", "path", dir, "elapsed", common.PrettyDuration(time.Since(start))) + return nil +} + +// SaveCachePeriodically atomically saves fast cache data to the given dir with +// the specified interval. All dump operation will only use a single CPU core. +func (db *Database) SaveCachePeriodically(dir string, interval time.Duration, stopCh <-chan struct{}) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + db.saveCache(dir, 1) + case <-stopCh: + return + } + } +} diff --git a/coreth/trie/hasher.go b/coreth/trie/hasher.go index 69d4f6d6..2737d8a8 100644 --- a/coreth/trie/hasher.go +++ b/coreth/trie/hasher.go @@ -180,8 +180,8 @@ func (h *hasher) fullnodeToHash(n *fullNode, force bool) node { // // All node encoding must be done like this: // -// node.encode(h.encbuf) -// enc := h.encodedBytes() +// node.encode(h.encbuf) +// enc := h.encodedBytes() // // This convention exists because node.encode can only be inlined/escape-analyzed when // called on a concrete receiver type. diff --git a/coreth/trie/proof.go b/coreth/trie/proof.go index 329b9afe..a864b05b 100644 --- a/coreth/trie/proof.go +++ b/coreth/trie/proof.go @@ -73,7 +73,7 @@ func (t *Trie) Prove(key []byte, fromLevel uint, proofDb ethdb.KeyValueWriter) e var err error tn, err = t.resolveHash(n, prefix) if err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + log.Error("Unhandled trie error in Trie.Prove", "err", err) return err } default: @@ -349,9 +349,9 @@ findFork: // unset removes all internal node references either the left most or right most. // It can meet these scenarios: // -// - The given path is existent in the trie, unset the associated nodes with the -// specific direction -// - The given path is non-existent in the trie +// - The given path is existent in the trie, unset the associated nodes with the +// specific direction +// - The given path is non-existent in the trie // - the fork point is a fullnode, the corresponding child pointed by path // is nil, return // - the fork point is a shortnode, the shortnode is included in the range, @@ -468,15 +468,15 @@ func hasRightElement(node node, key []byte) bool { // Expect the normal case, this function can also be used to verify the following // range proofs: // -// - All elements proof. In this case the proof can be nil, but the range should -// be all the leaves in the trie. +// - All elements proof. In this case the proof can be nil, but the range should +// be all the leaves in the trie. // -// - One element proof. In this case no matter the edge proof is a non-existent -// proof or not, we can always verify the correctness of the proof. +// - One element proof. In this case no matter the edge proof is a non-existent +// proof or not, we can always verify the correctness of the proof. // -// - Zero element proof. In this case a single non-existent proof is enough to prove. -// Besides, if there are still some other leaves available on the right side, then -// an error will be returned. +// - Zero element proof. In this case a single non-existent proof is enough to prove. +// Besides, if there are still some other leaves available on the right side, then +// an error will be returned. // // Except returning the error to indicate the proof is valid or not, the function will // also return a flag to indicate whether there exists more accounts/slots in the trie. diff --git a/coreth/trie/secure_trie.go b/coreth/trie/secure_trie.go index 68bcf22b..9927ada3 100644 --- a/coreth/trie/secure_trie.go +++ b/coreth/trie/secure_trie.go @@ -27,8 +27,6 @@ package trie import ( - "fmt" - "github.com/ava-labs/coreth/core/types" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" @@ -45,14 +43,14 @@ func NewSecure(owner common.Hash, root common.Hash, db *Database) (*SecureTrie, return NewStateTrie(owner, root, db) } -// StateTrie wraps a trie with key hashing. In a secure trie, all +// StateTrie wraps a trie with key hashing. In a stateTrie trie, all // access operations hash the key using keccak256. This prevents // calling code from creating long chains of nodes that // increase the access time. // // Contrary to a regular trie, a StateTrie can only be created with // New and must have an attached database. The database also stores -// the preimage of each key. +// the preimage of each key if preimage recording is enabled. // // StateTrie is not safe for concurrent use. type StateTrie struct { @@ -63,20 +61,14 @@ type StateTrie struct { secKeyCacheOwner *StateTrie // Pointer to self, replace the key cache on mismatch } -// NewStateTrie creates a trie with an existing root node from a backing database -// and optional intermediate in-memory node pool. +// NewStateTrie creates a trie with an existing root node from a backing database. // // If root is the zero hash or the sha3 hash of an empty string, the // trie is initially empty. Otherwise, New will panic if db is nil // and returns MissingNodeError if the root node cannot be found. -// -// Accessing the trie loads nodes from the database or node pool on demand. -// Loaded nodes are kept around until their 'cache generation' expires. -// A new cache generation is created by each call to Commit. -// cachelimit sets the number of past cache generations to keep. func NewStateTrie(owner common.Hash, root common.Hash, db *Database) (*StateTrie, error) { if db == nil { - panic("trie.NewSecure called without a database") + panic("trie.NewStateTrie called without a database") } trie, err := New(owner, root, db) if err != nil { @@ -90,70 +82,53 @@ func NewStateTrie(owner common.Hash, root common.Hash, db *Database) (*StateTrie func (t *StateTrie) Get(key []byte) []byte { res, err := t.TryGet(key) if err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + log.Error("Unhandled trie error in StateTrie.Get", "err", err) } return res } // TryGet returns the value for key stored in the trie. // The value bytes must not be modified by the caller. -// If a node was not found in the database, a MissingNodeError is returned. +// If the specified node is not in the trie, nil will be returned. +// If a trie node is not found in the database, a MissingNodeError is returned. func (t *StateTrie) TryGet(key []byte) ([]byte, error) { return t.trie.TryGet(t.hashKey(key)) } +// TryGetAccount attempts to retrieve an account with provided trie path. +// If the specified account is not in the trie, nil will be returned. +// If a trie node is not found in the database, a MissingNodeError is returned. func (t *StateTrie) TryGetAccount(key []byte) (*types.StateAccount, error) { - var ret types.StateAccount res, err := t.trie.TryGet(t.hashKey(key)) - if err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) - return &ret, err - } - if res == nil { - return nil, nil + if res == nil || err != nil { + return nil, err } - err = rlp.DecodeBytes(res, &ret) - return &ret, err + ret := new(types.StateAccount) + err = rlp.DecodeBytes(res, ret) + return ret, err } // TryGetAccountWithPreHashedKey does the same thing as TryGetAccount, however // it expects a key that is already hashed. This constitutes an abstraction leak, // since the client code needs to know the key format. func (t *StateTrie) TryGetAccountWithPreHashedKey(key []byte) (*types.StateAccount, error) { - var ret types.StateAccount res, err := t.trie.TryGet(key) - if err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) - return &ret, err - } - if res == nil { - return nil, nil + if res == nil || err != nil { + return nil, err } - err = rlp.DecodeBytes(res, &ret) - return &ret, err + ret := new(types.StateAccount) + err = rlp.DecodeBytes(res, ret) + return ret, err } // TryGetNode attempts to retrieve a trie node by compact-encoded path. It is not // possible to use keybyte-encoding as the path might contain odd nibbles. +// If the specified trie node is not in the trie, nil will be returned. +// If a trie node is not found in the database, a MissingNodeError is returned. func (t *StateTrie) TryGetNode(path []byte) ([]byte, int, error) { return t.trie.TryGetNode(path) } -// TryUpdateAccount account will abstract the write of an account to the -// secure trie. -func (t *StateTrie) TryUpdateAccount(key []byte, acc *types.StateAccount) error { - hk := t.hashKey(key) - data, err := rlp.EncodeToBytes(acc) - if err != nil { - return err - } - if err := t.trie.TryUpdate(hk, data); err != nil { - return err - } - t.getSecKeyCache()[string(hk)] = common.CopyBytes(key) - return nil -} - // Update associates key with value in the trie. Subsequent calls to // Get will return value. If value has length zero, any existing value // is deleted from the trie and calls to Get will return nil. @@ -162,7 +137,7 @@ func (t *StateTrie) TryUpdateAccount(key []byte, acc *types.StateAccount) error // stored in the trie. func (t *StateTrie) Update(key, value []byte) { if err := t.TryUpdate(key, value); err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + log.Error("Unhandled trie error in StateTrie.Update", "err", err) } } @@ -173,7 +148,7 @@ func (t *StateTrie) Update(key, value []byte) { // The value bytes must not be modified by the caller while they are // stored in the trie. // -// If a node was not found in the database, a MissingNodeError is returned. +// If a node is not found in the database, a MissingNodeError is returned. func (t *StateTrie) TryUpdate(key, value []byte) error { hk := t.hashKey(key) err := t.trie.TryUpdate(hk, value) @@ -184,15 +159,31 @@ func (t *StateTrie) TryUpdate(key, value []byte) error { return nil } +// TryUpdateAccount account will abstract the write of an account to the +// secure trie. +func (t *StateTrie) TryUpdateAccount(key []byte, acc *types.StateAccount) error { + hk := t.hashKey(key) + data, err := rlp.EncodeToBytes(acc) + if err != nil { + return err + } + if err := t.trie.TryUpdate(hk, data); err != nil { + return err + } + t.getSecKeyCache()[string(hk)] = common.CopyBytes(key) + return nil +} + // Delete removes any existing value for key from the trie. func (t *StateTrie) Delete(key []byte) { if err := t.TryDelete(key); err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + log.Error("Unhandled trie error in StateTrie.Delete", "err", err) } } // TryDelete removes any existing value for key from the trie. -// If a node was not found in the database, a MissingNodeError is returned. +// If the specified trie node is not in the trie, nothing will be changed. +// If a node is not found in the database, a MissingNodeError is returned. func (t *StateTrie) TryDelete(key []byte) error { hk := t.hashKey(key) delete(t.getSecKeyCache(), string(hk)) @@ -218,10 +209,10 @@ func (t *StateTrie) GetKey(shaKey []byte) []byte { return t.preimages.preimage(common.BytesToHash(shaKey)) } -// Commit collects all dirty nodes in the trie and replace them with the -// corresponding node hash. All collected nodes(including dirty leaves if +// Commit collects all dirty nodes in the trie and replaces them with the +// corresponding node hash. All collected nodes (including dirty leaves if // collectLeaf is true) will be encapsulated into a nodeset for return. -// The returned nodeset can be nil if the trie is clean(nothing to commit). +// The returned nodeset can be nil if the trie is clean (nothing to commit). // All cached preimages will be also flushed if preimages recording is enabled. // Once the trie is committed, it's not usable anymore. A new trie must // be created with new root and updated trie database for following usage @@ -237,17 +228,17 @@ func (t *StateTrie) Commit(collectLeaf bool) (common.Hash, *NodeSet, error) { } t.secKeyCache = make(map[string][]byte) } - // Commit the trie to its intermediate node database + // Commit the trie and return its modified nodeset. return t.trie.Commit(collectLeaf) } -// Hash returns the root hash of SecureTrie. It does not write to the +// Hash returns the root hash of StateTrie. It does not write to the // database and can be used even if the trie doesn't have one. func (t *StateTrie) Hash() common.Hash { return t.trie.Hash() } -// Copy returns a copy of SecureTrie. +// Copy returns a copy of StateTrie. func (t *StateTrie) Copy() *StateTrie { return &StateTrie{ trie: *t.trie.Copy(), diff --git a/coreth/trie/stacktrie.go b/coreth/trie/stacktrie.go index 98777ba6..99773e11 100644 --- a/coreth/trie/stacktrie.go +++ b/coreth/trie/stacktrie.go @@ -31,7 +31,6 @@ import ( "bytes" "encoding/gob" "errors" - "fmt" "io" "sync" @@ -217,7 +216,7 @@ func (st *StackTrie) TryUpdate(key, value []byte) error { func (st *StackTrie) Update(key, value []byte) { if err := st.TryUpdate(key, value); err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + log.Error("Unhandled trie error in StackTrie.Update", "err", err) } } @@ -386,11 +385,13 @@ func (st *StackTrie) insert(key, value []byte) { // hash converts st into a 'hashedNode', if possible. Possible outcomes: // // 1. The rlp-encoded value was >= 32 bytes: -// - Then the 32-byte `hash` will be accessible in `st.val`. -// - And the 'st.type' will be 'hashedNode' +// - Then the 32-byte `hash` will be accessible in `st.val`. +// - And the 'st.type' will be 'hashedNode' +// // 2. The rlp-encoded value was < 32 bytes -// - Then the <32 byte rlp-encoded value will be accessible in 'st.val'. -// - And the 'st.type' will be 'hashedNode' AGAIN +// - Then the <32 byte rlp-encoded value will be accessible in 'st.val'. +// - And the 'st.type' will be 'hashedNode' AGAIN +// // This method also sets 'st.type' to hashedNode, and clears 'st.key'. func (st *StackTrie) hash() { h := newHasher(false) diff --git a/coreth/trie/test_trie.go b/coreth/trie/test_trie.go index 4013d92a..580ba154 100644 --- a/coreth/trie/test_trie.go +++ b/coreth/trie/test_trie.go @@ -105,7 +105,7 @@ func AssertTrieConsistency(t testing.TB, root common.Hash, a, b *Database, onLea // CorruptTrie deletes every [n]th trie node from the trie given by [root] from the trieDB. // Assumes that the trie given by root can be iterated without issue. func CorruptTrie(t *testing.T, trieDB *Database, root common.Hash, n int) { - batch := trieDB.DiskDB().NewBatch() + batch := trieDB.diskdb.NewBatch() // next delete some trie nodes tr, err := New(common.Hash{}, root, trieDB) if err != nil { diff --git a/coreth/trie/trie.go b/coreth/trie/trie.go index c6433bd6..5539f755 100644 --- a/coreth/trie/trie.go +++ b/coreth/trie/trie.go @@ -138,7 +138,7 @@ func (t *Trie) NodeIterator(start []byte) NodeIterator { func (t *Trie) Get(key []byte) []byte { res, err := t.TryGet(key) if err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + log.Error("Unhandled trie error in Trie.Get", "err", err) } return res } @@ -275,7 +275,7 @@ func (t *Trie) tryGetNode(origNode node, path []byte, pos int) (item []byte, new // stored in the trie. func (t *Trie) Update(key, value []byte) { if err := t.TryUpdate(key, value); err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + log.Error("Unhandled trie error in Trie.Update", "err", err) } } @@ -394,7 +394,7 @@ func (t *Trie) insert(n node, prefix, key []byte, value node) (bool, node, error // Delete removes any existing value for key from the trie. func (t *Trie) Delete(key []byte) { if err := t.TryDelete(key); err != nil { - log.Error(fmt.Sprintf("Unhandled trie error: %v", err)) + log.Error("Unhandled trie error in Trie.Delete", "err", err) } } @@ -588,10 +588,10 @@ func (t *Trie) Hash() common.Hash { return common.BytesToHash(hash.(hashNode)) } -// Commit collects all dirty nodes in the trie and replace them with the -// corresponding node hash. All collected nodes(including dirty leaves if +// Commit collects all dirty nodes in the trie and replaces them with the +// corresponding node hash. All collected nodes (including dirty leaves if // collectLeaf is true) will be encapsulated into a nodeset for return. -// The returned nodeset can be nil if the trie is clean(nothing to commit). +// The returned nodeset can be nil if the trie is clean (nothing to commit). // Once the trie is committed, it's not usable anymore. A new trie must // be created with new root and updated trie database for following usage func (t *Trie) Commit(collectLeaf bool) (common.Hash, *NodeSet, error) { diff --git a/coreth/trie/util_test.go b/coreth/trie/util_test.go index ad8a0966..95103747 100644 --- a/coreth/trie/util_test.go +++ b/coreth/trie/util_test.go @@ -79,7 +79,9 @@ func TestTrieTracer(t *testing.T) { // Commit the changes and re-create with new root root, nodes, _ := trie.Commit(false) - db.Update(NewWithNodeSet(nodes)) + if err := db.Update(NewWithNodeSet(nodes)); err != nil { + t.Fatal(err) + } trie, _ = New(common.Hash{}, root, db) trie.tracer = newTracer() diff --git a/coreth/utils/metered_cache.go b/coreth/utils/metered_cache.go new file mode 100644 index 00000000..4f5ce04a --- /dev/null +++ b/coreth/utils/metered_cache.go @@ -0,0 +1,140 @@ +// (c) 2022, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package utils + +import ( + "fmt" + "os" + "path/filepath" + "sync/atomic" + "time" + + "github.com/VictoriaMetrics/fastcache" + "github.com/ava-labs/coreth/metrics" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" +) + +// MeteredCache wraps *fastcache.Cache and periodically pulls stats from it. +type MeteredCache struct { + *fastcache.Cache + namespace string + + // stats to be surfaced + entriesCount metrics.Gauge + bytesSize metrics.Gauge + collisions metrics.Gauge + gets metrics.Gauge + sets metrics.Gauge + misses metrics.Gauge + statsTime metrics.Gauge + + // count all operations to decide when to update stats + ops uint64 + updateFrequency uint64 +} + +func dirSize(path string) (int64, error) { + var size int64 + err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + size += info.Size() + } + return nil + }) + return size, err +} + +// NewMeteredCache returns a new MeteredCache that will update stats to the +// provided namespace once per each [updateFrequency] operations. +// Note: if [updateFrequency] is passed as 0, it will be treated as 1. +func NewMeteredCache(size int, journal string, namespace string, updateFrequency uint64) *MeteredCache { + var cache *fastcache.Cache + if journal == "" { + cache = fastcache.New(size) + } else { + dirSize, err := dirSize(journal) + log.Info("attempting to load cache from disk", "path", journal, "dirSize", common.StorageSize(dirSize), "err", err) + cache = fastcache.LoadFromFileOrNew(journal, size) + } + if updateFrequency == 0 { + updateFrequency = 1 // avoid division by zero + } + mc := &MeteredCache{ + Cache: cache, + namespace: namespace, + updateFrequency: updateFrequency, + } + if namespace != "" { + // only register stats if a namespace is provided. + mc.entriesCount = metrics.GetOrRegisterGauge(fmt.Sprintf("%s/entriesCount", namespace), nil) + mc.bytesSize = metrics.GetOrRegisterGauge(fmt.Sprintf("%s/bytesSize", namespace), nil) + mc.collisions = metrics.GetOrRegisterGauge(fmt.Sprintf("%s/collisions", namespace), nil) + mc.gets = metrics.GetOrRegisterGauge(fmt.Sprintf("%s/gets", namespace), nil) + mc.sets = metrics.GetOrRegisterGauge(fmt.Sprintf("%s/sets", namespace), nil) + mc.misses = metrics.GetOrRegisterGauge(fmt.Sprintf("%s/misses", namespace), nil) + mc.statsTime = metrics.GetOrRegisterGauge(fmt.Sprintf("%s/statsTime", namespace), nil) + } + return mc +} + +// updateStats updates metrics from fastcache +func (mc *MeteredCache) updateStatsIfNeeded() { + if mc.namespace == "" { + return + } + ops := atomic.AddUint64(&mc.ops, 1) + if ops%mc.updateFrequency != 0 { + return + } + + start := time.Now() + s := fastcache.Stats{} + mc.UpdateStats(&s) + mc.entriesCount.Update(int64(s.EntriesCount)) + mc.bytesSize.Update(int64(s.BytesSize)) + mc.collisions.Update(int64(s.Collisions)) + mc.gets.Update(int64(s.GetCalls)) + mc.sets.Update(int64(s.SetCalls)) + mc.misses.Update(int64(s.Misses)) + mc.statsTime.Inc(int64(time.Since(start))) // cumulative metric +} + +func (mc *MeteredCache) Del(k []byte) { + mc.updateStatsIfNeeded() + mc.Cache.Del(k) +} + +func (mc *MeteredCache) Get(dst, k []byte) []byte { + mc.updateStatsIfNeeded() + return mc.Cache.Get(dst, k) +} + +func (mc *MeteredCache) GetBig(dst, k []byte) []byte { + mc.updateStatsIfNeeded() + return mc.Cache.GetBig(dst, k) +} + +func (mc *MeteredCache) Has(k []byte) bool { + mc.updateStatsIfNeeded() + return mc.Cache.Has(k) +} + +func (mc *MeteredCache) HasGet(dst, k []byte) ([]byte, bool) { + mc.updateStatsIfNeeded() + return mc.Cache.HasGet(dst, k) +} + +func (mc *MeteredCache) Set(k, v []byte) { + mc.updateStatsIfNeeded() + mc.Cache.Set(k, v) +} + +func (mc *MeteredCache) SetBig(k, v []byte) { + mc.updateStatsIfNeeded() + mc.Cache.SetBig(k, v) +} diff --git a/coreth/vmerrs/vmerrs.go b/coreth/vmerrs/vmerrs.go index d7b160b0..4a7afcfd 100644 --- a/coreth/vmerrs/vmerrs.go +++ b/coreth/vmerrs/vmerrs.go @@ -32,21 +32,19 @@ import ( // List evm execution errors var ( - ErrOutOfGas = errors.New("out of gas") - ErrCodeStoreOutOfGas = errors.New("contract creation code storage out of gas") - ErrDepth = errors.New("max call depth exceeded") - ErrInsufficientBalance = errors.New("insufficient balance for transfer") - ErrContractAddressCollision = errors.New("contract address collision") - ErrExecutionReverted = errors.New("execution reverted") - ErrMaxCodeSizeExceeded = errors.New("max code size exceeded") - ErrInvalidJump = errors.New("invalid jump destination") - ErrWriteProtection = errors.New("write protection") - ErrReturnDataOutOfBounds = errors.New("return data out of bounds") - ErrGasUintOverflow = errors.New("gas uint64 overflow") - ErrInvalidCode = errors.New("invalid code: must not begin with 0xef") - ErrNonceUintOverflow = errors.New("nonce uint64 overflow") - ErrAddrProhibited = errors.New("prohibited address cannot be sender or created contract address") - ErrNativeAssetCallDeprecated = errors.New("nativeAssetCall is deprecated") - ErrToAddrProhibited6 = errors.New("prohibited address cannot be called") - ErrToAddrProhibitedSoft = errors.New("prohibited address cannot be called") + ErrOutOfGas = errors.New("out of gas") + ErrCodeStoreOutOfGas = errors.New("contract creation code storage out of gas") + ErrDepth = errors.New("max call depth exceeded") + ErrInsufficientBalance = errors.New("insufficient balance for transfer") + ErrContractAddressCollision = errors.New("contract address collision") + ErrExecutionReverted = errors.New("execution reverted") + ErrMaxInitCodeSizeExceeded = errors.New("max initcode size exceeded") + ErrMaxCodeSizeExceeded = errors.New("max code size exceeded") + ErrInvalidJump = errors.New("invalid jump destination") + ErrWriteProtection = errors.New("write protection") + ErrReturnDataOutOfBounds = errors.New("return data out of bounds") + ErrGasUintOverflow = errors.New("gas uint64 overflow") + ErrInvalidCode = errors.New("invalid code: must not begin with 0xef") + ErrNonceUintOverflow = errors.New("nonce uint64 overflow") + ErrAddrProhibited = errors.New("prohibited address cannot be sender or created contract address") )